query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
The name of the application that is associated with this environment.
def application_name(self) -> pulumi.Output[str]: return pulumi.get(self, "application_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def app_name(self) -> str:\n return self._app_name", "def app_name(self):\n return self._app_name", "def app_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_name\")", "def app_name(self): # pylint:disable=function-redefined\n return self._app_name", "def get_name():\n return config.APP_NAME", "def application_name(self) -> Optional[str]:\n return pulumi.get(self, \"application_name\")", "def get_app_name(self):\n return getattr(self, '_app_name', None)", "def app_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_name\")", "def app(self) -> str:\n return pulumi.get(self, \"app\")", "def name(self):\n return self._env_name", "def name(self):\n return self.application_tree['name']", "def _app(self) -> str:\n return self.charm.app.name", "def _get_app_name(app):\n return app[APP_NAME_KEY]", "def app_name(self):\n module_filepath = inspect.getfile(type(self))\n parent_dir = os.path.dirname\n app_dirpath = parent_dir(parent_dir(parent_dir(module_filepath)))\n app_name = os.path.basename(app_dirpath)\n return app_name", "def getApplicationName(self) -> unicode:\n ...", "def _get_app_name(self):\n # TODO move app name into pyglet.app (also useful for OS X menu bar?).\n return sys.argv[0]", "def get_name(self, name):\n return self.apps[name]['name']", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def current_app(self) -> str:\n app_id = self.app.get_current() # Returns the application ID (string) of the\n foreground_app = [x for x in self.app.list_apps() if app_id == x[\"id\"]][0]\n return foreground_app['title']", "def module_name(self) -> str | None:\n try:\n return self._app_name.replace(\"-\", \"_\")\n except AttributeError:\n # If the app was created from an interactive prompt,\n # there won't be a module name.\n return None", "def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])", "def app_name(self, value):\n self._app_name = value", "def app_name(self):\n return self._chromecast.app_display_name if self._chromecast else None", "def name(self):\r\n if self._name is not None:\r\n return self._name\r\n else:\r\n try:\r\n return Inspection.find_application_name()\r\n # TODO(wickman) Be more specific\r\n except Exception:\r\n return 'unknown'", "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "def get_application_name(self, feed_id):\r\n return self._handler.get_application_name(feed_id)", "def fallback_application_name() -> str:\n # Import here instead of at the top to avoid an ImportError caused by an\n # import cycle. This can be removed once the import graph of id3c.cli is\n # less tangled.\n from ..cli.utils import running_command_name\n\n # \"The application_name can be any string of less than NAMEDATALEN\n # characters (64 characters in a standard build).\"¹\n #\n # psycopg2 / libpq will truncate for us, but they will issue a NOTICE log\n # message if they do. Avoid the cluttery notice by truncating ourselves.\n #\n # ¹ https://www.postgresql.org/docs/current/runtime-config-logging.html#GUC-APPLICATION-NAME\n max_len = 64\n appname = running_command_name()\n\n return shorten(appname, max_len, \"...\")", "def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")", "def environment_label(self) -> str:\n return self._environment_label", "def programName(self):\n return self._parser.prog", "def product(self):\n return self.appName", "def name(self):\n\n return self.manifest[\"name\"]", "def app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"app_id\")", "def app_id(self):\n return self._app_id or self._modules['default'].data.get('application')", "def _DefaultAppId():\n return os.getenv('APPLICATION_ID', '_')", "def call_name(self):\n return str(self.executable.name)", "def master_name(self):\n return self._LAUNCHPAD_NAME", "def application_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_id\")", "def get_app_label(app_module):\n return app_module.__name__.split('.')[-1]", "def appName(self, name: str) -> \"SparkSession.Builder\":\n return self.config(\"spark.app.name\", name)", "def app_id(self) -> str:\n return self._app_id", "def _generateApplicationName(self, obj, **args):\n result = []\n try:\n result.append(obj.getApplication().name)\n except:\n pass\n return result", "def get_app_name(i):\n return app_id + '-' + str(i)", "def app_label(cls):\n return cls.model_meta.app_label", "def env_name(self):\n return f\"{self.project_name}-{self.stage}\"", "def get_app(self):\n\n app = APP\n return app", "def get_application(self):\n return self._silva_root", "def name(self):\n return self._config.get(CONF_NAME)", "def app_names(self):\n return self.get_app_names()", "def application(self):\n\n if not self._applicationDef:\n raise NotValidPlatformException(\n 'No application definition is available. Are you sure you are running on Platform.sh?'\n )\n return self._applicationDef", "def application_id(self) -> Optional[str]:\n return pulumi.get(self, \"application_id\")", "def app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"app_id\")", "def stackname(self):\n return self.BASE_NAME.format(**self.conf)", "def name(self):\n return self.config[\"name\"]", "def get_env_name(self):\n if self.options.environment:\n return self.options.environment\n elif os.environ.get(\"JUJU_ENV\"):\n return os.environ['JUJU_ENV']\n\n env_ptr = os.path.join(self.juju_home, \"current-environment\")\n if os.path.exists(env_ptr):\n with open(env_ptr) as fh:\n return fh.read().strip()\n\n with open(self.get_env_conf()) as fh:\n conf = yaml.safe_load(fh.read())\n if not 'default' in conf:\n raise ConfigError(\"No Environment specified\")\n return conf['default']", "def name(self):\n return self._config.backend_name", "def get_name():\n return __name__", "def getApp(self):\n return self.serviceClass.app", "def getWindowName(self):\n return self.__windowName", "def name(self):\n return self.appliance_name", "def package_name(self) -> str:\n return pulumi.get(self, \"package_name\")", "def name(self):\n return self._config_name", "def application(self):\n return self._application", "def application(self):\n return self._application", "def app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_id\")", "def name(self) -> str:\n name = self._config[\"name\"]\n assert isinstance(name, str) # noqa: S101\n return name", "def get_app_hostname():\n if not is_running_on_app_engine() or is_running_on_localhost():\n return None\n\n version = modules.get_current_version_name()\n app_id = app_identity.get_application_id()\n\n suffix = 'appspot.com'\n\n if ':' in app_id:\n tokens = app_id.split(':')\n api_name = tokens[1]\n if tokens[0] == 'google.com':\n suffix = 'googleplex.com'\n else:\n api_name = app_id\n\n # Check if this is the default version\n default_version = modules.get_default_version()\n if version == default_version:\n return '{0}.{1}'.format(app_id, suffix)\n else:\n return '{0}-dot-{1}.{2}'.format(version, api_name, suffix)", "def _extract_appname(self, log):\n appname = \"\"\n if \"appLaunch\" in log:\n appname = log[\"appLaunch\"][\"appName\"]\n else:\n self.logger.info(\"no applaunch field\")\n self.logger.info(log[\"event\"])\n pass \n \n return appname", "def get_name(app):\n from uuid import uuid4 as uuid\n return (f'accelpy_{app[\"application\"][\"product_id\"]}'\n f'_{str(uuid()).replace(\"-\", \"\")[:8]}')", "def application_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_id\")", "def _get_environment(cls):\n return cls.__name__.lower()", "def _get_base_app_name(value):\n value = os.path.basename(value)\n if (\n value.endswith(\".exe\")\n or value.endswith(\".dll\")\n or value.endswith(\".so\")\n ):\n value = os.path.splitext(value)[0]\n\n return value", "def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system", "def name(self) -> str:\n return self.dev.label", "def __repr__(self):\n return '<Application({name})>'.format(name=self.name)", "def name(self):\n return self._path or '__main__'", "def python_name(self):\n return self.requirement.name", "def app_id(self):\n return self._app_id", "def set_name(self, application_name):\r\n self._name = application_name", "def get_wsgi_file_name(self):\n return self.wsgi", "def application_arn(self) -> Optional[str]:\n return pulumi.get(self, \"application_arn\")", "def name(self):\n # This is how PIDs 0 and 4 are always represented in taskmgr\n # and process-hacker.\n if self.pid == 0:\n return \"System Idle Process\"\n if self.pid == 4:\n return \"System\"\n return os.path.basename(self.exe())", "def package_name(self):\n return self._package_name", "def name(self):\r\n return self.setuptools_requirement.project_name", "def env_name(pre_chars='(', post_chars=')'):\n env_path = builtins.__xonsh_env__.get('VIRTUAL_ENV', '')\n if len(env_path) == 0 and xp.ON_ANACONDA:\n env_path = builtins.__xonsh_env__.get('CONDA_DEFAULT_ENV', '')\n env_name = os.path.basename(env_path)\n if env_name:\n return pre_chars + env_name + post_chars", "def application_object_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_object_id\")", "def get_raw_server_name():\n from google.appengine.api import app_identity\n return '%s.%s.appspot.com' % (os.environ[\n 'CURRENT_VERSION_ID'].split('.')[0], app_identity.get_application_id())", "def module_name(self):\n return self.name()", "def get_name(self):\n return self.settings.get(\"name\", None)", "def get_process_name(self):\n\n return self._args.t", "def get_name(self) -> str:\n return os.path.split(os.getcwd())[-1]", "def app_label(obj):\n try:\n return lower(obj._meta.object_name)\n except AttributeError:\n return ''", "def server_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_app_id\")", "def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")" ]
[ "0.8760097", "0.8733922", "0.8601743", "0.8560242", "0.8511749", "0.8471325", "0.8393377", "0.8365738", "0.8314625", "0.8314625", "0.8314625", "0.8314625", "0.811715", "0.8087263", "0.8060348", "0.8050741", "0.80447793", "0.80227804", "0.80048144", "0.79946846", "0.7801479", "0.7620674", "0.7486029", "0.74473315", "0.7439717", "0.7420109", "0.73927796", "0.73738307", "0.73728824", "0.73548454", "0.731052", "0.72918355", "0.72287494", "0.72042644", "0.7155492", "0.711755", "0.7021532", "0.70004296", "0.69830894", "0.69782907", "0.69281316", "0.6889805", "0.6869799", "0.68548876", "0.6849303", "0.6846015", "0.6782077", "0.6779725", "0.6772325", "0.6726621", "0.672583", "0.6713291", "0.67121696", "0.66931224", "0.66595334", "0.6640766", "0.6620728", "0.6607435", "0.6601561", "0.65891176", "0.65886426", "0.6569739", "0.6555624", "0.6542724", "0.65163636", "0.6508764", "0.6499386", "0.64868534", "0.64868534", "0.6463859", "0.64598", "0.64585376", "0.64476854", "0.6445617", "0.6444565", "0.6436596", "0.6406096", "0.6403448", "0.6399389", "0.6387711", "0.63539016", "0.6342174", "0.6336588", "0.6320458", "0.6314963", "0.63128155", "0.62967336", "0.6279666", "0.6272645", "0.6271827", "0.6268512", "0.6266831", "0.6262811", "0.6262355", "0.6257191", "0.6252552", "0.6239351", "0.62301135", "0.6223576" ]
0.8621024
3
If specified, the environment attempts to use this value as the prefix for the CNAME in your Elastic Beanstalk environment URL. If not specified, the CNAME is generated automatically by appending a random alphanumeric string to the environment name.
def cname_prefix(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "cname_prefix")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_env_prefix(instrument):\n return \"crds://\"", "def create_r53_name ( base_name, name ) :\n env = get_env_type( base_name )\n if env :\n env = env.lower( )\n if ( env == 'prod' ) :\n return name\n\n return name + '.' + env", "def env_name(self):\n return f\"{self.project_name}-{self.stage}\"", "def env_name(pre_chars='(', post_chars=')'):\n env_path = builtins.__xonsh_env__.get('VIRTUAL_ENV', '')\n if len(env_path) == 0 and xp.ON_ANACONDA:\n env_path = builtins.__xonsh_env__.get('CONDA_DEFAULT_ENV', '')\n env_name = os.path.basename(env_path)\n if env_name:\n return pre_chars + env_name + post_chars", "def env_var_aws_access_key_id():\n return 'AWS_ACCESS_KEY_ID'", "def staging():\n env.hosts = ['staging.example.com']", "def create_env_name(name):\n new_name = re.sub(r'''(?<=[a-z])([A-Z])''', '_\\\\1', name)\n new_name = re.sub(r'\\W+', '_', new_name)\n new_name = re.sub(r'_{2,}', '_', new_name)\n return new_name.upper().strip(\"_\")", "def get_consul_uri():\n if \"CONSUL_HOST\" in os.environ:\n # WARNING! TODO! Currently the env file does not include the port.\n # But some other people think that the port should be a part of that.\n # For now, I'm hardcoding 8500 until this gets resolved.\n return \"http://{0}:{1}\".format(os.environ[\"CONSUL_HOST\"], 8500)\n else:\n raise BadEnviornmentENVNotFound(\"CONSUL_HOST\")", "def env_prefix(self, path):\n if self.is_default:\n return self.root # FIXME: Is this guaranteed to be the right one?\n\n return os.sep.join([path, PROJECT_ENVS_FOLDER,\n self.default_environment])", "def cname_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cname_prefix\")", "def bucket_website_domain_name(self) -> str:\n ...", "def RSA_KEYPAIR_PREFIX() :\n return os.environ.get( \"ATC_KEYPAIR_PREFIX\", \"atc-dev\" )", "def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")", "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "def prepend_env(self, env_name, pattern):\n if not self.has_pattern(env_name, pattern):\n if env_name not in self.environ.keys():\n self.environ[env_name] = [pattern]\n else:\n self.environ[env_name].insert(0, pattern)\n if env_name not in self.env_name_changed:\n self.env_name_changed.append(env_name)", "def test_cluster_name_from_environment(env_config):\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'gcp-defaults.ini'))\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n\n assert cfg.cluster.results == env_config['ELB_RESULTS']\n assert cfg.cluster.name == env_config['ELB_CLUSTER_NAME']", "def name(self):\n return self._env_name", "def create_dns_name ( base_name, name ) :\n return create_r53_name( base_name, name) + '.mse-esp.com'", "def tracing_name(name: Optional[str] = None) -> str:\n if name is None:\n name = settings.SERVICE_NAME\n return f\"{name}.{settings.ENVIRONMENT.lower()}\"", "def generate_agent_name():\n\n return '{0}-{1}'.format(\n defaults.CLOUDIFY_AGENT_PREFIX,\n uuid.uuid4())", "def swap_cnames(profile, source_environment, destination_environment):\n client = boto3client.get(\"elasticbeanstalk\", profile)\n params = {}\n params[\"SourceEnvironmentName\"] = source_environment\n params[\"DestinationEnvironmentName\"] = destination_environment\n return client.swap_environment_cnames(**params)", "def prepend_environment_variable(self, key, value):\n script_keys = {\n \"k\": key,\n \"v\": value\n }\n script = \"$env:{k} = \\\"{v};$env:{k}\\\"\".format(**script_keys)\n self._printer(script)", "def bucket_domain_name(self) -> str:\n ...", "def bucket_dual_stack_domain_name(self) -> str:\n ...", "def get_raw_server_name():\n from google.appengine.api import app_identity\n return '%s.%s.appspot.com' % (os.environ[\n 'CURRENT_VERSION_ID'].split('.')[0], app_identity.get_application_id())", "def cname(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cname\")", "def get_hostname():\n return re.split(\"\\.\", env.host)[0]", "def env_var_aws_secret_access_key():\n return 'AWS_SECRET_ACCESS_KEY'", "def set_dns_cname ( route53_conn, dns_name, cname_value ) :\n r53 = boto.route53.record.ResourceRecordSets( route53_conn, route_53_hosted_zoneid )\n monitor_dns = r53.add_change( 'UPSERT', dns_name, 'CNAME', ttl=60 )\n monitor_dns.add_value( cname_value )\n r53.commit( )", "def cname(self, cname):\n if (\n self.local_vars_configuration.client_side_validation and cname is None\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `cname`, must not be `None`\"\n ) # noqa: E501\n\n self._cname = cname", "def get_balancer_name(self):\n return '{}-{}'.format(\n self.config['namespace'],\n self.get_current_env(),\n )", "def _generate_cache_key(self, address):\n\n return re.sub(r'[^a-z0-9]', '', str(address).lower())", "def production_url(service_name):\n project_id = os.getenv(\"GOOGLE_CLOUD_PROJECT\")\n project_url = f\"{project_id}.appspot.com\"\n if service_name == \"default\":\n return f\"https://{project_url}\"\n else:\n return f\"https://{service_name}-dot-{project_url}\"", "def prepend_environment_variable(parent, key, value):\n os.environ[key] = \"{0}{1}{2}\".format(str(value),\n os.pathsep,\n os.environ.get(key) or \"\")\n\n if parent:\n parent.prepend_environment_variable(key, value)", "def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''", "def _get_athena_connection_string(db_name_env_var: str = \"ATHENA_DB_NAME\") -> str:\n ATHENA_DB_NAME: Optional[str] = os.getenv(db_name_env_var)\n ATHENA_STAGING_S3: Optional[str] = os.getenv(\"ATHENA_STAGING_S3\")\n\n if not ATHENA_DB_NAME:\n raise ValueError(\n f\"Environment Variable {db_name_env_var} is required to run integration tests against AWS Athena\"\n )\n\n if not ATHENA_STAGING_S3:\n raise ValueError(\n \"Environment Variable ATHENA_STAGING_S3 is required to run integration tests against AWS Athena\"\n )\n\n url = f\"awsathena+rest://@athena.us-east-1.amazonaws.com/{ATHENA_DB_NAME}?s3_staging_dir={ATHENA_STAGING_S3}\"\n\n return url", "def environment_label(self) -> str:\n return self._environment_label", "def prepend_environment_variable(self, key, value):\n value = BashParentEnvironment._format_environment_value(value)\n script_keys = {\n \"k\": key,\n \"v\": value\n }\n script = \"export {k}=\\\"{v}:${k}\\\"\".format(**script_keys)\n self._printer(script)", "def production_url(service_name):\n project_id = os.environ.get('GOOGLE_CLOUD_PROJECT')\n project_url = '{}.appspot.com'.format(project_id)\n if service_name == 'default':\n return 'https://{}'.format(project_url)\n else:\n return 'https://{}-dot-{}'.format(service_name, project_url)", "def bucket_regional_domain_name(self) -> str:\n ...", "def get_url_prefix(config: Mapping[str, Any]) -> str:\n return _sanitize_url_prefix(config.get('url_prefix'))", "def environment_variable_string(self, name):\n return \"$(\" + name + \")\"", "def get_ami_keypath ( env_type ) :\n return \"/builds/esp/\" + env_type + \"/current/\"", "def get_env_key(obj, key=None):\n return str.join('_', [obj.__module__.replace('.','_').upper(),\n key.upper()])", "def test_generated_cluster_name(env_config_no_cluster):\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'gcp-defaults.ini'))\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n\n assert cfg.cluster.results == TEST_RESULTS_BUCKET\n user = getpass.getuser()\n digest = hashlib.md5(TEST_RESULTS_BUCKET.encode()).hexdigest()[0:9]\n assert cfg.cluster.name == f'elasticblast-{user.lower()}-{digest}'", "def _setup_friendly_environ(environ):\n http_host, host_url = determine_host(environ)\n if http_host == host_url:\n space_name = \"frontpage\"\n else:\n space_name = determine_space(environ, http_host)\n\n recipe_name = determine_space_recipe(environ, space_name)\n environ['wsgiorg.routing_args'][1]['recipe_name'] = recipe_name.encode(\n 'UTF-8')", "def getSiteName():\n return os.environ['SITENAME']", "def get_conda_env_name():\n env_name = os.popen('echo $CONDA_DEFAULT_ENV').read().strip()\n if env_name == '' or env_name == '$CONDA_DEFAULT_ENV':\n env_name = 'base'\n logging.info('Anaconda environment: ' + env_name)\n return env_name", "def test_wsgi_script_name_on_aws_url(self):\n lh = LambdaHandler(\"tests.test_wsgi_script_name_settings\")\n\n event = {\n \"body\": \"\",\n \"resource\": \"/{proxy+}\",\n \"requestContext\": {},\n \"queryStringParameters\": {},\n \"headers\": {\n \"Host\": \"1234567890.execute-api.us-east-1.amazonaws.com\",\n },\n \"pathParameters\": {\"proxy\": \"return/request/url\"},\n \"httpMethod\": \"GET\",\n \"stageVariables\": {},\n \"path\": \"/return/request/url\",\n }\n response = lh.handler(event, None)\n\n self.assertEqual(response[\"statusCode\"], 200)\n self.assertEqual(\n response[\"body\"],\n \"https://1234567890.execute-api.us-east-1.amazonaws.com/dev/return/request/url\",\n )", "def get_server_url():\n try:\n url = os.environ['API_HOST']\n # print('[ OK ] Server url loaded: ', url)\n except KeyError:\n url = 'http://localhost:3300/'\n print('[ WARNING ] API_HOST environment variable was not found. default server url was set at: ', url)\n\n return url", "def _staging():\n env.environment = 'staging'\n env.server_name = 'project-staging.dimagi.com'\n env.hosts = [settings.STAGING_HOST]", "def add_argument(self, *args, **kwargs):\n env_var = kwargs.pop('env_var', None)\n if env_var is not None:\n if not env_var.startswith('COSA_'):\n env_var = f\"COSA_{env_var}\"\n ka = kwargs.get(\"help\", '')\n kwargs['help'] = f\"{ka} (Env: {env_var})\"\n default = kwargs.pop('default', None)\n super().add_argument(\n *args, default=os.environ.get(env_var, default), **kwargs)\n else:\n super().add_argument(*args, **kwargs)", "def test_wsgi_script_name_on_domain_url(self):\n lh = LambdaHandler(\"tests.test_wsgi_script_name_settings\")\n\n event = {\n \"body\": \"\",\n \"resource\": \"/{proxy+}\",\n \"requestContext\": {},\n \"queryStringParameters\": {},\n \"headers\": {\n \"Host\": \"example.com\",\n },\n \"pathParameters\": {\"proxy\": \"return/request/url\"},\n \"httpMethod\": \"GET\",\n \"stageVariables\": {},\n \"path\": \"/return/request/url\",\n }\n response = lh.handler(event, None)\n\n self.assertEqual(response[\"statusCode\"], 200)\n self.assertEqual(response[\"body\"], \"https://example.com/return/request/url\")", "def set_BucketName(self, value):\n super(PutBucketWebsiteRedirectInputSet, self)._set_input('BucketName', value)", "def ecr_image_name(dev_account_id, region, component_name, version):\n return '%s.dkr.ecr.%s.amazonaws.com/%s:%s' % (dev_account_id, region, component_name, 'dev' if version is None else version)", "def GetEnvironment(self):\n environ = super(ServiceHandlerTest, self).GetEnvironment()\n if self.remote_host:\n environ['REMOTE_HOST'] = self.remote_host\n if self.server_host:\n environ['SERVER_HOST'] = self.server_host\n return environ", "def get_admin_bucket_name ( location = None, region_name = None ) :\n if region_name :\n location = get_s3_location( region_to_location_map[ region_name ] )\n\n if not location or len( location ) < 1 :\n location = 'us-standard'\n\n return 'admin.mse-esp.com-' + location", "def build_bucket_url(bucket_name) -> str:\n return \"https://s3.console.aws.amazon.com/s3/buckets/{0}\".format(bucket_name)", "def bucket_domain_name(self) -> str:\n return jsii.get(self, \"bucketDomainName\")", "def bucket_domain_name(self) -> str:\n return jsii.get(self, \"bucketDomainName\")", "def get_name():\n return config.APP_NAME", "def SANDBOX(cls):\n\n return DataCenter.Environment(\"https://sandbox.zohoapis.eu\", cls().get_iam_url(), cls().get_file_upload_url())", "def stackname(self):\n return self.BASE_NAME.format(**self.conf)", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def cname(self):\n return self._cname()", "def host(self):\r\n return self._environ.get('HTTP_HOST', '')", "def get_hostname(config):\n KEY = os.environ.get(\"DWH_AWS_KEY\")\n SECRET = os.environ.get(\"DWH_AWS_SECRET\")\n redshift = boto3.client('redshift', region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET)\n CLUSTER_IDENTIFIER = config.get(\"CLUSTER\", \"CLUSTER_IDENTIFIER\")\n cluster_props = redshift.describe_clusters(\n ClusterIdentifier=CLUSTER_IDENTIFIER)['Clusters'][0]\n endpoint = cluster_props[\"Endpoint\"][\"Address\"]\n return endpoint", "def basic_url(self):\n return self.base_name + '.cloudlabs.rc.ucl.ac.uk'", "def staging():\n env.settings = 'staging'\n env.hosts = ['db.beta.tribapps.com'] \n env.user = 'newsapps'\n env.s3_bucket = 'media-beta.tribapps.com'", "def get_vpc_name ( base_name ) :\n return base_name + '-VPC'", "def platform_config_filename(region, account_prefix, prod):\n return 'infra/platform-config/%s/%s/%s.json' % (\n account_prefix, \"prod\" if prod else \"dev\", region\n )", "def _prefix_env_variable(environ, name, paths, subfolders):\n value = environ[name] if name in environ else ''\n environ_paths = [path for path in value.split(os.pathsep) if path]\n checked_paths = []\n for path in paths:\n if not isinstance(subfolders, list):\n subfolders = [subfolders]\n for subfolder in subfolders:\n path_tmp = path\n if subfolder:\n path_tmp = os.path.join(path_tmp, subfolder)\n # skip nonexistent paths\n if not os.path.exists(path_tmp):\n continue\n # exclude any path already in env and any path we already added\n if path_tmp not in environ_paths and path_tmp not in checked_paths:\n checked_paths.append(path_tmp)\n prefix_str = os.pathsep.join(checked_paths)\n if prefix_str != '' and environ_paths:\n prefix_str += os.pathsep\n return prefix_str", "def getJobName():\n return os.environ['LCATR_JOB']", "def environ_key(name=None):\n try:\n return os.environ[name]\n except KeyError:\n return None", "def create_cdn(tag_prefix, cdn_name=None, elb_domain=None,\n s3_logs_bucket=None,\n tls_priv_key=None, tls_fullchain_cert=None,\n region_name=None, dry_run=False):\n if not cdn_name:\n cdn_name = '%scloudfront' % _clean_tag_prefix(tag_prefix)\n cdn_client = boto3.client('cloudfront', region_name='us-east-1')\n domains = []\n\n default_cert_location = None\n if not default_cert_location:\n if tls_priv_key and tls_fullchain_cert:\n resp = _store_certificate(\n tls_fullchain_cert, tls_priv_key,\n tag_prefix=tag_prefix, region_name=region_name,\n dry_run=dry_run)\n default_cert_location = resp['CertificateArn']\n else:\n LOGGER.warning(\"default_cert_location is not set and there are no\"\\\n \" tls_priv_key and tls_fullchain_cert either.\")\n\n try:\n resp = cdn_client.create_distribution(\n DistributionConfig={\n 'CallerReference': datetime.datetime.now(),\n 'DefaultRootObject': 'index.html',\n 'Aliases': {\n 'Quantity': len(domains),\n 'Items': domains\n },\n 'Origins': {\n 'Quantity': 1,\n 'Items': [{\n 'Id': tag_prefix,\n 'DomainName': elb_domain,\n 'CustomOriginConfig': {\n 'HTTPPort': 80,\n 'HTTPSPort': 443,\n 'OriginProtocolPolicy': 'match-viewer',\n }\n }]\n },\n 'DefaultCacheBehavior': {\n 'TargetOriginId': tag_prefix,\n 'TrustedSigners': {\n 'Enabled': False,\n 'Quantity': 0\n },\n 'ViewerProtocolPolicy': 'redirect-to-https',\n },\n #pylint:disable=line-too-long\n #https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PriceClass.html\n 'PriceClass': 'XXX',\n 'Enabled': True,\n 'ViewerCertificate': {\n #https://aws.amazon.com/premiumsupport/knowledge-center/associate-ssl-certificates-cloudfront/\n 'CloudFrontDefaultCertificate': False,\n 'ACMCertificateArn': default_cert_location,\n 'SSLSupportMethod': 'sni-only'\n }\n })\n except botocore.exceptions.ClientError as err:\n raise", "def generate_cluster_stack_name(job):\n return 'cluster-%s----%s' % (job.compute_resource.id, job.id)", "def host(self):\n return self._environ.get('HTTP_HOST', '')", "def bucket_website_url(self) -> str:\n ...", "def bucket_website_domain_name(self) -> str:\n return jsii.get(self, \"bucketWebsiteDomainName\")", "def bucket_website_domain_name(self) -> str:\n return jsii.get(self, \"bucketWebsiteDomainName\")", "def baseurl(request):\n if request.is_secure():\n scheme = 'https://'\n else:\n scheme = 'http://'\n\n return {'BASE_URL': scheme + request.get_host(),}", "def set_normal_environment(self):\n if 'RUSTUP_DIST_SERVER' in os.environ:\n self._download_url = os.environ['RUSTUP_DIST_SERVER']\n else:\n self._download_url = 'https://static.rust-lang.org'", "def set_env_var(self):\n\n list_env_vars = self.config.items('environment_variables')\n for env_var in list_env_vars:\n os.environ[env_var[0].upper()] = env_var[1]", "def production():\n env.settings = 'production'\n env.hosts = ['db.tribapps.com'] \n env.user = 'newsapps'\n env.s3_bucket = 'media.apps.chicagotribune.com'", "def overwrite_environment_variable(self, key, value):\n if value is not None:\n self._printer(\"$env:{0} = \\\"{1}\\\"\".format(key, value))\n else:\n self._printer(\"$env:{0} = \\\"\\\"\".format(key))", "def environment(self, environment):\n\n self._set_field(\"environment\", environment.get_json())", "def get_prefix(self) -> str:\n return self.env_type.value + '_'", "def elastic_cloud_sso_default_url(self) -> str:\n return pulumi.get(self, \"elastic_cloud_sso_default_url\")", "def get_ami_keyname ( app_name ) :\n return app_name + '.ami'", "def hostname_for_event(self, clean_server_name, agentConfig):\n uri = urlsplit(clean_server_name)\n if '@' in uri.netloc:\n hostname = uri.netloc.split('@')[1].split(':')[0]\n else:\n hostname = uri.netloc.split(':')[0]\n if hostname == 'localhost':\n hostname = self.hostname\n return hostname", "def test_metadata_cache_uri_set_via_env_vars(monkeypatch, caplog):\n ENV_METADATA_CACHE_URI = environ_names_and_sections[NAME_METADATA_CACHE_URI][0]\n ENV_AQUARIUS_URL = deprecated_environ_names[NAME_AQUARIUS_URL][0]\n\n monkeypatch.delenv(ENV_METADATA_CACHE_URI, raising=False)\n monkeypatch.delenv(ENV_AQUARIUS_URL, raising=False)\n config = Config()\n metadata_cache_uri = config.metadata_cache_uri\n assert metadata_cache_uri == \"https://aquarius.marketplace.oceanprotocol.com\"\n\n monkeypatch.setenv(ENV_METADATA_CACHE_URI, \"https://custom-aqua.uri\")\n config = Config()\n assert config.metadata_cache_uri == \"https://custom-aqua.uri\"\n\n monkeypatch.setenv(ENV_AQUARIUS_URL, \"https://another-aqua.url\")\n with pytest.raises(ValueError):\n Config()\n\n monkeypatch.delenv(ENV_METADATA_CACHE_URI)\n config = Config()\n assert config.metadata_cache_uri == \"https://another-aqua.url\"\n assert (\n \"Config: AQUARIUS_URL envvar is deprecated. Use METADATA_CACHE_URI instead.\"\n in caplog.text\n )", "def _key(\n service=None, # type: Optional[str]\n env=None, # type: Optional[str]\n ):\n # type: (...) -> str\n service = service or \"\"\n env = env or \"\"\n return \"service:\" + service + \",env:\" + env", "def flows_endpoint_envvar_callback(default_value: str) -> str:\n return os.getenv(\"GLOBUS_AUTOMATE_FLOWS_ENDPOINT\", default_value)", "def underlying_url(self):\n return 'http://{}:{}'.format(names.azure_url(self.dns_name), self.port)", "def escape_env_var(varname: str) -> str:\n varletters = list(varname.upper())\n if not varletters[0].isalpha():\n varletters[0] = \"_\"\n for i, c in enumerate(varletters):\n if not c.isalnum() and c != \"_\":\n varletters[i] = \"_\"\n return \"\".join(varletters)", "def set_platform_gs_prefix(self, gs_url):\n self.buildurl_gs_prefix = gs_url # pragma: no cover", "def build_endpoint_prefix(self):\n if not sanity.validate_api_hostname(self.api_host):\n error_message = \"Bad API hostname: %s\" % self.api_host\n raise CloudPassageValidation(error_message)\n prefix = \"https://\" + self.api_host + \":\" + str(self.api_port)\n return prefix", "def __get_host(self) -> str:\n\t\treturn os.getenv('FLASK_DRIVER_HOST', '0.0.0.0')", "def _set_from_env(name, context, default):\n if default is _DEFAULT_ARG and name not in os.environ:\n return\n\n context[name] = os.environ.get(name, default)", "def map_environment(env):\n if env in {'dev', 'develop', 'development'}:\n return 'dev'\n if env in {'prod', 'production'}:\n return 'prod'\n return env" ]
[ "0.6500855", "0.5895467", "0.5876529", "0.5587471", "0.55460733", "0.5484087", "0.5324461", "0.5320309", "0.5279635", "0.5274285", "0.52700907", "0.52646524", "0.5225509", "0.5187014", "0.5178442", "0.5177224", "0.5163091", "0.5162621", "0.5136999", "0.51282483", "0.5125458", "0.5118199", "0.5109413", "0.5075603", "0.50685436", "0.5043729", "0.5027756", "0.5015068", "0.5008953", "0.4999982", "0.49552843", "0.4925651", "0.49195904", "0.49177417", "0.49065322", "0.48967138", "0.48928395", "0.48927104", "0.48795167", "0.48704302", "0.48691452", "0.484089", "0.48392332", "0.48360285", "0.4831425", "0.48305753", "0.4826356", "0.4823251", "0.48198503", "0.4819316", "0.47960067", "0.47830042", "0.47824025", "0.47721824", "0.47572118", "0.47470617", "0.47431475", "0.474058", "0.47337422", "0.47337422", "0.4728016", "0.4727812", "0.47212115", "0.47211462", "0.4721107", "0.47193018", "0.47159624", "0.47153234", "0.47153032", "0.46963266", "0.46877128", "0.46784902", "0.46747673", "0.46745017", "0.46734145", "0.46697366", "0.46591136", "0.46579787", "0.46507567", "0.46507567", "0.46482933", "0.46428868", "0.46364358", "0.46354058", "0.4633805", "0.46304035", "0.4621492", "0.4620607", "0.4619445", "0.46139297", "0.46132544", "0.46115837", "0.45987839", "0.459226", "0.45918262", "0.45907626", "0.4584875", "0.45839295", "0.45825884", "0.45808125" ]
0.52051395
13
Your description for this environment.
def description(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "description")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def describe(self):\r\n print( self.name + \" is here!\" )\r\n print( self.description )", "def description(self):\n pass", "def description(self):\n pass", "def description():", "def description(self) -> str:\n pass", "def describe(self):\n print(self.description)", "def describe(self):\n print(self.description)", "def help_description():\n pass", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def get_description(self) -> str:\n pass", "def description(self):\n return self.settings['description']", "def Description(self) -> str:", "def Description(self) -> str:", "def _description(self):\n return None", "def get_description(self):\n pass", "def describe(self) -> str:", "def description(self):", "def description(self) -> str:\n raise NotImplementedError", "def description(self) -> str:\n raise NotImplementedError", "def description(self) -> str:\n raise NotImplementedError", "def describe(self):\n return ''", "def description(self) -> str:\r\n raise NotImplementedError", "def description(self) -> str:\r\n raise NotImplementedError", "def description(self) -> str:\r\n raise NotImplementedError", "def description(cls) -> str:\n\n return cls.__doc__ or \"\"", "def define_description(self):\n self._description = 'NODDI-based processing of DWI datasets.'", "def description(self) -> str:\n return self.data['description']", "def get_description(self):\r\n return self.__description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self):\n return (self.__doc__ or \"\").strip()", "def get_description(self):\n print(\"This Iron door.\")", "def get_description():\n raise NotImplementedError", "def get_description(self):\n return self.__description", "def get_description():\n desc = dict()\n desc[\"cache\"] = 3600\n desc[\"data\"] = True\n desc[\n \"description\"\n ] = \"\"\"This plot is not meant for interactive use, but a backend for\n SPS plots.\n \"\"\"\n desc[\"arguments\"] = [\n dict(\n type=\"text\",\n name=\"pid\",\n default=\"202012300005-KDVN-WWUS83-SPSDVN\",\n label=\"IEM generated up to 35 char product identifier:\",\n ),\n dict(\n type=\"int\",\n default=0,\n name=\"segnum\",\n label=\"Product Segment Number (starts at 0):\",\n ),\n ]\n return desc", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description" ]
[ "0.76594716", "0.7642881", "0.7642881", "0.7590907", "0.75673366", "0.754099", "0.754099", "0.74186075", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.73050547", "0.7232642", "0.7215585", "0.72022337", "0.72022337", "0.7193534", "0.7191506", "0.7190226", "0.71668494", "0.7061988", "0.7061988", "0.7061988", "0.70421666", "0.7006096", "0.7006096", "0.7006096", "0.69863814", "0.69656426", "0.69579184", "0.69471914", "0.6915571", "0.6915571", "0.6915571", "0.6915571", "0.6915571", "0.6915571", "0.6915571", "0.6915571", "0.6905591", "0.68884295", "0.68860555", "0.686865", "0.6867942", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828", "0.6863828" ]
0.0
-1
A unique name for the environment.
def environment_name(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "environment_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self):\n return self._env_name", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def env_name(self):\n return f\"{self.project_name}-{self.stage}\"", "def unique_name():\n return \"unique-{0}\".format(uuid.uuid4())", "def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")", "def get_name():\n return config.APP_NAME", "def environment_label(self) -> str:\n return self._environment_label", "def env_name(pre_chars='(', post_chars=')'):\n env_path = builtins.__xonsh_env__.get('VIRTUAL_ENV', '')\n if len(env_path) == 0 and xp.ON_ANACONDA:\n env_path = builtins.__xonsh_env__.get('CONDA_DEFAULT_ENV', '')\n env_name = os.path.basename(env_path)\n if env_name:\n return pre_chars + env_name + post_chars", "def generate_unique_name():\n return 'titanic-' + str(get_mac())", "def createname(cls):\n name = config.get(\"pyzombie_filesystem\", \"execbase\")\n name = \"{0}_{1}\".format(name, datetime.utcnow().strftime(\"%Y%jT%H%M%SZ\"))\n if os.path.isdir(Executable.execdirpath(name)):\n #Need to handle the rare case of duplicate resource names---this\n #will happen all the time in testing, but rarely in production.\n index = 0\n altname = \"{0}_{1:03}\".format(name, index)\n while os.path.isdir(Executable.execdirpath(altname)):\n index = index + 1\n altname = \"{0}_{1:03}\".format(name, index)\n name = altname\n return name", "def dir_name(self):\n name = get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars,\n build=True)\n return hashlib.md5(name.encode('utf-8')).hexdigest()", "def create_env_name(name):\n new_name = re.sub(r'''(?<=[a-z])([A-Z])''', '_\\\\1', name)\n new_name = re.sub(r'\\W+', '_', new_name)\n new_name = re.sub(r'_{2,}', '_', new_name)\n return new_name.upper().strip(\"_\")", "def unique_project_name(prefix: str = \"selenium-project\"):\n return f'{prefix}-{uuid.uuid4().hex[:8]}'", "def exp_name() -> str:\n return 'test-' + str(uuid.uuid4())", "def create_r53_name ( base_name, name ) :\n env = get_env_type( base_name )\n if env :\n env = env.lower( )\n if ( env == 'prod' ) :\n return name\n\n return name + '.' + env", "def name(self):\n if not self._name:\n prefix = self.random.choice(['Desktop'] * 4 + ['Laptop'])\n self._name = '{}-{}'.format(prefix, ''.join(\n self.random.choice(string.ascii_uppercase + string.digits) for _ in range(7)))\n return self._name", "def uniqueName(self):\n return \"{0}::{1}\".format(self.name(), str(self.uid))", "def name(self) -> str:\n name = self._config[\"name\"]\n assert isinstance(name, str) # noqa: S101\n return name", "def get_name():\n return __name__", "def scope_name_generator():\n return 'mock_' + str(uuid()).lower()[:16]", "def get_env_name(self):\n if self.options.environment:\n return self.options.environment\n elif os.environ.get(\"JUJU_ENV\"):\n return os.environ['JUJU_ENV']\n\n env_ptr = os.path.join(self.juju_home, \"current-environment\")\n if os.path.exists(env_ptr):\n with open(env_ptr) as fh:\n return fh.read().strip()\n\n with open(self.get_env_conf()) as fh:\n conf = yaml.safe_load(fh.read())\n if not 'default' in conf:\n raise ConfigError(\"No Environment specified\")\n return conf['default']", "def app_name(self): # pylint:disable=function-redefined\n return self._app_name", "def stack_name(self):\n stack_name = getattr(self, '__stack_name', None)\n if (\n self.args.stack_name and\n not stack_name\n ):\n stack_name = self.args.stack_name\n elif not stack_name:\n stack_name = \"nephoria-stack-\" + str(int(time.time()))\n\n setattr(self, '__stack_name', stack_name)\n return stack_name", "def name(self):\n return self._unique_id", "def generate_agent_name():\n\n return '{0}-{1}'.format(\n defaults.CLOUDIFY_AGENT_PREFIX,\n uuid.uuid4())", "def environment_variable_string(self, name):\n return \"$(\" + name + \")\"", "def unique_identifier(self) -> str:\n return pulumi.get(self, \"unique_identifier\")", "def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])", "def fixture_make_unique_name():\n def _make_unique_name(prefix):\n return f\"{prefix}{time.time_ns()}\"\n return _make_unique_name", "def stackname(self):\n return self.BASE_NAME.format(**self.conf)", "def unique_id(self) -> str:\n return f\"{self._host}_{self._name}_{self._unique_id}\"", "def generate_unique_job_name(self, name='no_name_job'):\n # TODO: Make it more suitable for disk paths. (no *, -)\n from base64 import urlsafe_b64encode\n name = os.path.basename(name)\n return \"_\".join([os.path.split(name)[1], urlsafe_b64encode(os.urandom(3))])", "def name(self) -> str:\n return self.config_name or self.host_name or self.dev_id or DEVICE_DEFAULT_NAME", "def secret_name(self) -> str:\n return self._secret_name", "def key_name(self) -> str:\n return pulumi.get(self, \"key_name\")", "def app_name(self) -> str:\n return self._app_name", "def name() -> str:\n pass", "def unique_pipeline_name(self):\n return self._unique_pipeline_name", "def get_name(app):\n from uuid import uuid4 as uuid\n return (f'accelpy_{app[\"application\"][\"product_id\"]}'\n f'_{str(uuid()).replace(\"-\", \"\")[:8]}')", "def unique_id(self):\r\n name_slug = slugify(self._name)\r\n return f\"{name_slug}\"", "def name(self):\n return self.config.get('name') or f\"{self.id.replace('_', ' ').title()}\"", "def name(self):\n return self.config.get('name') or f\"{self.id.replace('_', ' ').title()}\"", "def name(self):\n return _version._NAME # pylint: disable=protected-access", "def app_name(self):\n return self._app_name", "def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system", "def unique_id(self):\n return self.config_entry.entry_id + \"stg\"", "def hub_name(self):\n return self._props[\"persistent_identifiers\"].get(self._hub_name_prop)", "def name(self):\n return self.config[\"name\"]", "def _get_environment(cls):\n return cls.__name__.lower()", "def unique_id(self) -> str:\n return \"_\".join([self._name, \"climate\"])", "def get_identity_name(identity_kind: str = GLOBAL_APPLICATION_CONFIGURATION) -> str:\n identity_name = os.environ.get(identity_kind)\n if identity_name:\n return identity_name\n # TODO: Add discovery here? This can probably be inferred.\n # Need to be careful because not all users may have IAM privileges.\n # -kmp 31-Aug-2022\n context = \"\"\n account_number = os.environ.get('ACCOUNT_NUMBER')\n if account_number:\n context = f\" in account {account_number}\"\n raise ValueError(f\"There is no default identity name available for {identity_kind}{context}.\")", "def uid():\n\n # Ambient variables for each operating system\n us = {'Windows': 'USERNAME', 'Linux': 'USER'}\n\n u = us.get(platform.system())\n return os.environ.get(u)", "def _DefaultAppId():\n return os.getenv('APPLICATION_ID', '_')", "def tracing_name(name: Optional[str] = None) -> str:\n if name is None:\n name = settings.SERVICE_NAME\n return f\"{name}.{settings.ENVIRONMENT.lower()}\"", "def unique_id(self) -> str:\n return f\"{self._mac}_tracker\"", "def get_raw_server_name():\n from google.appengine.api import app_identity\n return '%s.%s.appspot.com' % (os.environ[\n 'CURRENT_VERSION_ID'].split('.')[0], app_identity.get_application_id())", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def name(self):\n return self._config_name", "def name(self):\n return f\"{habitica.DOMAIN}_{self._name}_{self._sensor_name}\"", "def name(self):\n return \"docker_{}\".format(self._var_name.lower())", "def get_prefix(self) -> str:\n return self.env_type.value + '_'", "def name(self):\n return '{} {}'.format(self.client_name, self.variable)", "def getSiteName():\n return os.environ['SITENAME']", "def unique_id(self):\n return f\"bhyve:program:{self._program_id}\"", "def name(self):\n return 'Greenlet-%d' % (self.minimal_ident,)", "def get_daemon_name(cls):\n\n return os.environ[cls.CLOUDIFY_DAEMON_NAME_KEY]", "def name(self):\n if self._name is None:\n return(self.default_name)\n else:\n return(self._name)", "def common_name(self) -> str:\n return pulumi.get(self, \"common_name\")", "def name(self):\n return self._config.get(CONF_NAME)", "def generate_name(self):\n name = self._generate_test_name()\n while self.exists(name):\n name = self._generate_test_name()\n return name", "def generate_workflow_name(self) -> str:\n pass", "def name(self):\n\n return self.manifest[\"name\"]", "def name(self):\n return \"docker_{}_{}\".format(self._name, self._var_name)", "def scope_name():\n return tf.compat.v1.get_variable_scope().name", "def windows_name(self):\n return self._windows_name", "def systematic_name(self):\n\n return self._systematic_name", "def unique_id(self) -> str | None:\n return self._config[CONF_ID]", "def scope_name():\n return tf.get_variable_scope().name", "def get_conda_env_name():\n env_name = os.popen('echo $CONDA_DEFAULT_ENV').read().strip()\n if env_name == '' or env_name == '$CONDA_DEFAULT_ENV':\n env_name = 'base'\n logging.info('Anaconda environment: ' + env_name)\n return env_name", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")" ]
[ "0.79474497", "0.75676024", "0.7346393", "0.71637094", "0.69561476", "0.69324404", "0.6864522", "0.6832018", "0.6827754", "0.66545093", "0.66468686", "0.65825635", "0.6506662", "0.6461384", "0.6451408", "0.64490986", "0.6430531", "0.6412154", "0.6346107", "0.6341232", "0.63376284", "0.63367367", "0.6329606", "0.6317618", "0.6284048", "0.6276035", "0.6266436", "0.62466586", "0.6245678", "0.62455535", "0.6235103", "0.6219821", "0.62020344", "0.6182158", "0.61811996", "0.61803746", "0.6128665", "0.61226356", "0.60999423", "0.6094516", "0.6077642", "0.6077642", "0.6067979", "0.60588527", "0.6057288", "0.60489494", "0.60260624", "0.60179424", "0.6016113", "0.60005075", "0.599657", "0.59915614", "0.59873265", "0.59847254", "0.5957554", "0.59502804", "0.594838", "0.594838", "0.59424794", "0.5940734", "0.5936716", "0.59356236", "0.59279215", "0.5927877", "0.5921673", "0.59179616", "0.59125125", "0.5907583", "0.59046775", "0.59041685", "0.59020925", "0.58988595", "0.58861744", "0.5882004", "0.5881382", "0.58779484", "0.58773154", "0.5875647", "0.58703846", "0.5870055", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384", "0.5863384" ]
0.7082336
4
The Amazon Resource Name (ARN) of an existing IAM role to be used as the environment's operations role.
def operations_role(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "operations_role")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def role_arn(self) -> str:\n return pulumi.get(self, \"role_arn\")", "def iam_role_arn(self) -> str:\n return pulumi.get(self, \"iam_role_arn\")", "def role_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role_arn\")", "def execution_role_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"execution_role_arn\")", "def execution_role_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"execution_role_arn\")", "def execution_role_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"execution_role_arn\")", "def execution_role_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"execution_role_arn\")", "def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")", "def execution_role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"execution_role_arn\")", "def role(self) -> str:\n\n assert self.data is not None\n return self.data[\"role\"][\"name\"]", "def role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role\")", "def role(self) -> str:\n return pulumi.get(self, \"role\")", "def alarm_role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"alarm_role_arn\")", "def get_redshift_iam_role_arn(iam, iam_role_name):\n return iam.get_role(RoleName=iam_role_name)['Role']['Arn']", "def operations_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"operations_role\")", "def account_role_arn(self, role, partition='aws'):\n if not role or role.startswith(\"arn:aws\"):\n return role\n if not role.startswith(\"role/\"):\n role = \"role/\" + role\n return \"arn:{0}:iam::{1}:{2}\".format(partition, self.account_id, role)", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def invocation_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"invocation_role\")", "def data_api_role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"data_api_role_arn\")", "def invoke_arn(self) -> str:\n return pulumi.get(self, \"invoke_arn\")", "def resource_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_arn\")", "def role(self) -> aws_cdk.aws_iam.IRole:\n return self._values.get('role')", "def role_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role_id\")", "def service_role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_role\")", "def role_arn_lookup(session, role_name):\n if session is None:\n return None\n\n client = session.client('iam')\n response = client.get_role(RoleName=role_name)\n if response is None:\n return None\n else:\n return response['Role']['Arn']", "def role(self):\n\n return self._role", "def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get(\"role\")", "def role(self):\n return self._role", "def role(self):\n return self._role", "def role(self):\n return self._role", "def create_arn_role(iam):\n print(\"Attaching policy to IAM role\")\n iam.attach_role_policy(RoleName=DWH_IAM_ROLE_NAME,\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess\")['ResponseMetadata']['HTTPStatusCode']\n roleArn = iam.get_role(RoleName=DWH_IAM_ROLE_NAME)['Role']['Arn']\n #print(\"ARN role:\", roleArn)\n return roleArn", "def target_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"target_role\")", "def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get('role')", "def invocation_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"invocation_role\")", "def invocation_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"invocation_role\")", "def getRole(self):\n return _libsbml.ReferenceGlyph_getRole(self)", "def get_role(self):\n return self.role", "def action_role(self) -> aws_cdk.aws_iam.IRole:\n return self._values.get(\"action_role\")", "def resource_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_arn\")", "def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")", "def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")", "def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")", "def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")", "def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")", "def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")", "def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")", "def getRoleString(self):\n return _libsbml.SpeciesReferenceGlyph_getRoleString(self)", "def role(self):\r\n roles = {\r\n 'student': u'Student',\r\n 'staff': u'Administrator',\r\n 'instructor': u'Instructor',\r\n }\r\n return roles.get(self.system.get_user_role(), u'Student')", "def user_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_role\")", "def get_name(self):\n return '-'.join(self._name_parts +\n [self.role.name, self.scenario.name])", "def role_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role_id\")", "def service_role(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_role\")", "def resource_arn(self) -> Optional[str]:\n return pulumi.get(self, \"resource_arn\")", "def target_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_role\")", "def target_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_role\")", "def management_account_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"management_account_arn\")", "def _generateRoleName(self, obj, **args):\n # Subclasses must override this.\n return []", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def getRole(self):\n return _libsbml.SpeciesReferenceGlyph_getRole(self)", "def role_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_id\")", "def user_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_role\")", "def user_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_role\")", "def _get_role(self):\n return self.__role", "def delivery_channel_assume_role_arn(self) -> str:\n return pulumi.get(self, \"delivery_channel_assume_role_arn\")", "def delivery_channel_assume_role_arn(self) -> str:\n return pulumi.get(self, \"delivery_channel_assume_role_arn\")", "def delivery_channel_assume_role_arn(self) -> str:\n return pulumi.get(self, \"delivery_channel_assume_role_arn\")", "def cloud_formation_execution_role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get(\"cloud_formation_execution_role\")", "def __repr__(self):\n return '<Role %r>' % self.name" ]
[ "0.7611145", "0.7547483", "0.7543366", "0.7487498", "0.7469303", "0.7469303", "0.7371766", "0.7371766", "0.73421746", "0.73421746", "0.73421746", "0.73421746", "0.7280896", "0.7280896", "0.72515625", "0.70219666", "0.69590557", "0.6927607", "0.69078577", "0.6905639", "0.68344396", "0.6757659", "0.6477008", "0.6477008", "0.6477008", "0.64551973", "0.6322703", "0.6310421", "0.62816626", "0.6277826", "0.62161297", "0.6188797", "0.61849666", "0.6175045", "0.61677015", "0.616746", "0.616746", "0.616746", "0.6160481", "0.6153376", "0.6131818", "0.6116512", "0.6116512", "0.6102012", "0.60952556", "0.6040231", "0.6008038", "0.5987227", "0.5987227", "0.5987227", "0.5987227", "0.5987227", "0.5987227", "0.5987227", "0.59664136", "0.59388304", "0.59285486", "0.5918489", "0.5900492", "0.5891757", "0.5874583", "0.58455396", "0.58455396", "0.5822604", "0.57989955", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.5726453", "0.57109153", "0.5697217", "0.5677662", "0.5677662", "0.5649345", "0.56320834", "0.56320834", "0.56320834", "0.5629649", "0.56265545" ]
0.7117283
15
Keyvalue pairs defining configuration options for this environment, such as the instance type.
def option_settings(self) -> pulumi.Output[Optional[Sequence['outputs.EnvironmentOptionSetting']]]: return pulumi.get(self, "option_settings")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def define_options(self):\n return {\n 'basename': OptionDef(required=True, default_value='keycloak', allowed_types=[str]),\n 'namespace': OptionDef(required=True, default_value='default', allowed_types=[str]),\n 'config': {\n 'service_port': OptionDef(required=True, default_value=8080, allowed_types=[int]),\n 'realm_import': OptionDef(format=OptionDefFormat.KDATA_VOLUME, allowed_types=[str, bytes, KData_Secret]),\n 'proxy_address_forwarding': OptionDef(format=OptionDefFormat.KDATA_ENV,\n allowed_types=[bool, *KDataHelper_Env.allowed_kdata()]),\n 'frontend_url': OptionDef(allowed_types=[str]),\n 'admin': {\n 'user': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'password': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, KData_Secret]),\n },\n 'db': {\n 'vendor': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'addr': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'port': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[int, *KDataHelper_Env.allowed_kdata()]),\n 'database': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'schema': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'user': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'password': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, KData_Secret]),\n },\n },\n 'container': {\n 'keycloak': OptionDef(required=True, default_value='quay.io/keycloak/keycloak:11.0.2', allowed_types=[str]),\n },\n 'kubernetes': {\n 'resources': {\n 'deployment': OptionDef(allowed_types=[Mapping]),\n }\n },\n }", "def options(self) -> Mapping[str, str]:\n return pulumi.get(self, \"options\")", "def get_config(self):\n return {\"name\": self.name, \"tunable\": self.tunable}", "def get_options(cls):\n return {\n \"name\": str,\n ConfigOption(\"install_files\", default=None): Or(None, list),\n ConfigOption(\"timeout\", default=300): int,\n ConfigOption(\"log_regexps\", default=None): Or(None, list),\n ConfigOption(\"stdout_regexps\", default=None): Or(None, list),\n ConfigOption(\"stderr_regexps\", default=None): Or(None, list),\n ConfigOption(\"file_logger\", default=None): Or(None, str),\n ConfigOption(\"async_start\", default=False): bool,\n ConfigOption(\"report_errors_from_logs\", default=False): bool,\n ConfigOption(\"error_logs_max_lines\", default=10): int,\n ConfigOption(\"path_cleanup\", default=True): bool,\n ConfigOption(\"pre_start\", default=None): validate_func(\"driver\"),\n ConfigOption(\"post_start\", default=None): validate_func(\"driver\"),\n ConfigOption(\"pre_stop\", default=None): validate_func(\"driver\"),\n ConfigOption(\"post_stop\", default=None): validate_func(\"driver\"),\n }", "def default_config(cls) -> dict:\n return {\n \"observation\": {\n \"type\": \"Kinematics\"\n },\n \"action\": {\n \"type\": \"DiscreteMetaAction\"\n },\n \"simulation_frequency\": 15, # [Hz]\n \"policy_frequency\": 1, # [Hz]\n \"other_vehicles_type\": \"highway_env.vehicle.behavior.IDMVehicle\",\n \"screen_width\": 600, # [px]\n \"screen_height\": 150, # [px]\n \"centering_position\": [0.3, 0.5],\n \"scaling\": 5.5,\n \"show_trajectories\": False,\n \"render_agent\": True,\n \"offscreen_rendering\": os.environ.get(\"OFFSCREEN_RENDERING\", \"0\") == \"1\",\n \"manual_control\": False,\n \"real_time_rendering\": False\n }", "def _options(self):\n return", "def required_config_keys(self):\n return [\"options\", \"label_columns\", \"env\"]", "def define_options(self) -> Optional[Any]:\n return {\n 'basename': OptionDef(required=True, default_value='promtail', allowed_types=[str]),\n 'namespace': OptionDef(required=True, default_value='monitoring', allowed_types=[str]),\n 'config': {\n 'prometheus_annotation': OptionDef(required=True, default_value=False, allowed_types=[bool]),\n 'promtail_config': OptionDef(allowed_types=[str, ConfigFile]),\n 'loki_url': OptionDef(allowed_types=[str]),\n 'authorization': {\n 'serviceaccount_create': OptionDef(required=True, default_value=True, allowed_types=[bool]),\n 'serviceaccount_use': OptionDef(allowed_types=[str]),\n 'roles_create': OptionDef(required=True, default_value=True, allowed_types=[bool]),\n 'roles_bind': OptionDef(required=True, default_value=True, allowed_types=[bool]),\n },\n },\n 'container': {\n 'promtail': OptionDef(required=True, default_value='grafana/promtail:2.0.0', allowed_types=[str]),\n },\n 'kubernetes': {\n 'resources': {\n 'daemonset': OptionDef(allowed_types=[Mapping]),\n }\n },\n }", "def configuration_keys(self):\n return ['dispname', 'decker', 'binning']", "def configuration_keys(self):\n return ['dispname', 'decker', 'binning']", "def configuration_keys(self):\n return ['dispname', 'decker', 'binning']", "def config_pairs(self):\n return [(\"templater\", self.name), (\"dbt\", self.dbt_version)]", "def get_config(self):\n config = {'name': self.name, 'trainable': self.trainable}\n if hasattr(self, '_batch_input_shape'):\n config['batch_input_shape'] = self._batch_input_shape\n if hasattr(self, 'dtype'):\n config['dtype'] = self.dtype\n return config", "def config(self) -> Dict[str, Any]:", "def default_options(cls) -> Dict:\n return {}", "def get_config(self):\n config = {\n 'window_length': self.window_length,\n 'ignore_episode_boundaries': self.ignore_episode_boundaries,\n }\n return config", "def options(self):\n options = {\n o.name: getattr(self, o.name)\n for o in _OPTIONS\n }\n return options", "def get_config(self):\n return {}", "def instance_configuration(self) -> Optional[pulumi.Input['ServiceInstanceConfigurationArgs']]:\n return pulumi.get(self, \"instance_configuration\")", "def instance_configuration(self) -> Optional[pulumi.Input['ServiceInstanceConfigurationArgs']]:\n return pulumi.get(self, \"instance_configuration\")", "def get_config(self):\n return {'name': self.name, 'dtype': self.dtype}", "def get_config(self):\n\n # these are all that is needed to rebuild this class\n config = dict(hidden_size=self.hidden_size,\n word_embedding=self.word_embedding,\n detection_embedding=self.detection_embedding,\n mode=self.mode,\n decoder_pos_emb=self.decoder_pos_emb,\n ** self.kwargs)\n\n base_config = super(RegionFeature, self).get_config()\n return dict(list(base_config.items()) +\n list(config.items()))", "def get_configuration_template(self):\n return {'EXAMPLE_KEY_1': \"Example value\",\n 'EXAMPLE_KEY_2': [\"Example\", \"Value\"]\n }", "def get_configuration_template(self):\n return {'EXAMPLE_KEY_1': \"Example value\",\n 'EXAMPLE_KEY_2': [\"Example\", \"Value\"]\n }", "def get_config(self) -> Dict[str, Any]:\n return {\n 'num_classes': self.num_classes,\n 'name': self.name,\n 'dtype': self.dtype,\n 'sparse_y_true': self.sparse_y_true,\n 'sparse_y_pred': self.sparse_y_pred,\n 'axis': self.axis,\n }", "def options(self):\n\t\treturn self.config_parser.options(self.section_name)", "def get_config(self) -> dict:\n out = {}\n for name in self.CONFIG_DEFAULTS:\n out[name] = self.__getattribute__(name)\n return out", "def default_options(cls) -> Dict:\n options = super().default_options()\n # scaling factor for temperature adaptation\n options['eta'] = 100\n # controls the adaptation degeneration velocity of the temperature\n # adaption.\n options['nu'] = 1e3\n\n return options", "def configuration(self):\n # type: () -> Dict[str, str]\n return {\n 'source': self.source,\n 'location': self.location,\n 'uri': self.uri,\n 'options': self.options,\n 'cache_dir': self.cache_dir\n }", "def settings(self) -> Dict[str, Any]:\n return {}", "def requested_config_vals():\n return {} # no extra values needed", "def _opt_config(self):\n return self._opt_method.config", "def _get_options(self) -> Dict[str, Any]:\n # TODO: handle holidays as well\n return {\n \"growth\": self.growth,\n \"changepoints\": self.changepoints and list(self.changepoints.astype('str')),\n \"n_changepoints\": self.n_changepoints,\n \"changepoint_range\": self.changepoint_range,\n \"changepoint_prior_scale\": self.changepoint_prior_scale,\n \"mcmc_samples\": self.mcmc_samples,\n \"interval_width\": self.interval_width,\n \"uncertainty_samples\": self.uncertainty_samples,\n \"yearly_seasonality\": self.yearly_seasonality,\n \"weekly_seasonality\": self.weekly_seasonality,\n \"daily_seasonality\": self.daily_seasonality,\n \"seasonality_mode\": self.seasonality_mode,\n \"seasonality_prior_scale\": self.seasonality_prior_scale,\n\n \"seasonalities\": self.seasonalities,\n \"extra_regressors\": self.extra_regressors\n }", "def _config(self):\r\n return (\r\n self.destructive,\r\n self.output_type,\r\n self.seed,\r\n )", "def options(self):\n return self.__options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\r\n return self._options", "def _set_instance_config(self):\n\t\t\n\t\tif \"PARAMETERS_NAME\" in self.config.keys():\n\t\t\tlogger.info(\"You specified your own PARAMETERS_NAME, I will use it.\")\n\t\telse:\n\t\t\tself.config[\"PARAMETERS_NAME\"] = self._get_params_filepath()\n\t\t\n\t\tif \"FILTER_NAME\" in self.config.keys():\n\t\t\tlogger.info(\"You specified your own FILTER_NAME, I will use it.\")\n\t\telse:\n\t\t\tself.config[\"FILTER_NAME\"] = self._get_conv_filepath()\n\t\t\n\t\t\n\t\tif \"CATALOG_NAME\" in self.config.keys():\n\t\t\tlogger.warning(\"You specified your own CATALOG_NAME, but I will *NOT* use it !\")\n\t\t\tdel self.config[\"CATALOG_NAME\"]\n\n\t\tif \"PSF_NAME\" in self.config.keys():\n\t\t\tlogger.info(\"You specified your own PSF_NAME, I will use it.\")\n\t\telse:\n\t\t\tself.config[\"PSF_NAME\"] = self._get_psf_filepath()", "def settings(self):\n return {}", "def get_external_opts_configs(cls):\n return [\n ExternalOptConfig(\n name=\"auth_uri\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_user\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_password\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_tenant_name\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ]", "def test_options_structure(self):\r\n deploy = self.wsgiDeploy()\r\n expected_keys = self.DEFAULTS.keys()\r\n actual_keys = deploy.options.keys()\r\n self.assertListEqual(expected_keys, actual_keys)", "def getServerOptions(self):\n pass", "def config(self):\n return {}", "def setting(self):\n return {\n \"num_examples\": self.num_examples,\n \"dim_data\": self.dim_data,\n \"dim_target\": self.dim_target,\n \"info\": self.info,\n }", "def _options(self):\r\n xmi_file = self.tb_xmi_file_name.GetValue()\r\n topic = self.tb_pragma.GetValue()\r\n package = self.tb_package.GetValue()\r\n header = self.tb_file_header.GetValue()\r\n target_folder = self.tb_target_folder.GetValue()\r\n encoding = self.tb_encoding.GetValue()\r\n \r\n return {\"topic\" : topic, \r\n \"package\" : package, \r\n \"header\" : header, \r\n \"target_folder\" : target_folder,\r\n \"encoding\" : encoding,\r\n \"xmi_file\" : xmi_file}", "def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": \"ft.onto.base_ontology.Document\",\n \"model_name\": \"ktrapeznikov/biobert_v1.1_pubmed_squad_v2\",\n \"question\": \"Where do I live\",\n \"max_answer_len\": 15,\n \"cuda_devices\": -1,\n \"handle_impossible_answer\": False,\n }\n )\n return config", "def options(self):\n return list(self._moptions.keys())", "def config(self):\n raise NotImplementedError", "def options(self, parser, env):\n pass", "def _default_config(cls):\n return dict()", "def get_full_configuration(self) -> dict:\n\n return {\n input_instance.key: input_instance.argument_value\n for input_instance in self.all_input_instances\n }", "def _config_options(self):\n self._config_sortable(self._sortable)\n self._config_drag_cols(self._drag_cols)", "def __init__(self):\n super(t_var_size_Options, self).__init__()\n self.options = {\n t_var_size_Options.BOARD_ID : {'value' : '', 'name' : 'board_id' },\n t_var_size_Options.CURRENT_STATE : {'value' : '', 'name' : 'state' },\n t_var_size_Options.PATTERN_WAVE : {'value' : '', 'name' : 'pat_wav' }\n }", "def readOptions(self):\n get = command_line.CommandLineParser().get_option\n if get('nosplash')!=None:\n self.temp_configuration.showSplash = bool(get('nosplash'))\n if get('debugsignals')!=None:\n self.temp_configuration.debugSignals = bool(get('debugsignals'))\n if get('dotVistrails')!=None:\n self.temp_configuration.dotVistrails = get('dotVistrails')\n #in theory this should never happen because core.configuration.default()\n #should have done this already\n #if not self.configuration.check('dotVistrails'):\n # self.configuration.dotVistrails = system.default_dot_vistrails()\n # self.temp_configuration.dotVistrails = system.default_dot_vistrails()\n if get('multiheads')!=None:\n self.temp_configuration.multiHeads = bool(get('multiheads'))\n if get('maximized')!=None:\n self.temp_configuration.maximizeWindows = bool(get('maximized'))\n if get('movies')!=None:\n self.temp_configuration.showMovies = bool(get('movies'))\n if get('cache')!=None:\n self.temp_configuration.useCache = bool(get('cache'))\n if get('verbose')!=None:\n self.temp_configuration.verbosenessLevel = get('verbose')\n if get('noninteractive')!=None:\n self.temp_configuration.interactiveMode = \\\n not bool(get('noninteractive'))\n if get('workflowinfo') != None:\n self.temp_configuration.workflowInfo = str(get('workflowinfo'))\n if get('dumpcells') != None:\n self.temp_configuration.spreadsheetDumpCells = get('dumpcells')\n if get('pdf') != None:\n self.temp_configuration.spreadsheetDumpPDF = get('pdf')\n if get('workflowgraph') != None:\n self.temp_configuration.workflowGraph = str(get('workflowgraph'))\n if get('evolutiongraph') != None:\n self.temp_configuration.evolutionGraph = str(get('evolutiongraph'))\n if get('executeworkflows') != None:\n self.temp_configuration.executeWorkflows = \\\n bool(get('executeworkflows'))\n if get('showspreadsheetonly') != None:\n self.temp_configuration.showSpreadsheetOnly = \\\n bool(get('showspreadsheetonly'))\n # asking to show only the spreadsheet will force the workflows to\n # be executed\n if get('reviewmode') != None:\n self.temp_configuration.reviewMode = bool(get('reviewmode'))\n\n if self.temp_configuration.showSpreadsheetOnly and not self.temp_configuration.reviewMode:\n self.temp_configuration.executeWorkflows = True\n \n self.temp_db_options = InstanceObject(host=get('host'),\n port=get('port'),\n db=get('db'),\n user=get('user'),\n parameters=get('parameters')\n )\n if get('nologger')!=None:\n self.temp_configuration.nologger = bool(get('nologger'))\n if get('quickstart') != None:\n self.temp_configuration.staticRegistry = str(get('quickstart'))\n if get('detachHistoryView')!= None:\n self.temp_configuration.detachHistoryView = bool(get('detachHistoryView'))\n self.input = command_line.CommandLineParser().positional_arguments()", "def instance_configuration(self) -> pulumi.Output['outputs.ServiceInstanceConfiguration']:\n return pulumi.get(self, \"instance_configuration\")", "def configuration(self) -> Dict[str, Any]:\n return {self.__class__.__qualname__: self._param_names}", "def config_init(self):\n\n game_opts = [\n\n # Execution Options\n ('debug',False), # Toggle Debug Messaging\n ('log_path',False), # Turn on logging (w/path)\n ('log_lvl',logging.DEBUG), # Set log level\n\n # World Generation Options\n ('flex_limit',3) # Sets the maximum variance\n\n ]\n\n # Attempts to pull each value from the configuration\n # if not in config, the default value defined above\n # is set instead\n for opt in game_opts:\n try:\n setattr(self,opt[0],self.conf.conf_dict[opt[0]])\n except:\n setattr(self,opt[0],opt[1])\n continue", "def get_engine_conf():\n result = {}\n for k,v in pylons.config.iteritems():\n if not k.startswith('sqlalchemy.'):\n continue\n k = k[11:]\n if k in BOOL_OPTIONS:\n result[k] = asbool(v)\n elif k in INT_OPTIONS:\n try:\n result[k] = int(v)\n except ValueError:\n reason = 'config sqlalchemy.%s is not an integer: %s'\n raise ValueError(reason % (k,v))\n else:\n result[k] = v\n return result", "def config(self): # type: () -> t.Dict[str, t.Any]\n return self.inspection['Config']", "def get_options(cls, mode):\n return dict(\n (key, properties[mode])\n for key, properties in cls.__register.items()\n if mode in properties\n )", "def get_options(self) -> dict:\n assert self.task\n task_options = {\n **self.task.get_task_options(),\n **self.expr.task_expr_options,\n **self.task_options,\n }\n return task_options", "def process_config(self):\n driver_options = self.config['service']['options']\n process_config = {\n 'assembler_config': {\n 'driver_options': driver_options,\n 'teststep_config': self.teststep_config,\n 'testcase_config': self.config['reader_settings']['test_case']['keys'],\n },\n 'assembly_config': self.config['assembly_settings'],\n }\n return process_config", "def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": None,\n \"attribute\": None,\n \"index_annotation\": None,\n }\n )\n return config", "def config(self):\n return {\"input_dims\": self.dims, \"output_dims\": self.output_dims, \"mapping\": self.mapping}", "def config(self) -> Dict[str, Any]:\r\n attr_conf = {attr: getattr(self._qda, attr, None) for attr in self.attributes}\r\n return {\"params\": self._qda.get_params(), \"attributes\": attr_conf}", "def as_dict(self) -> dict:\n return self._config", "def _create_options(self):\n self._OPTIONS = {}", "def get_config(self):\n config = self._kwargs.copy()\n config.update({\n 'metric': self.__class__.__name__,\n 'name': self.name,\n 'output_names': self.output_names,\n 'label_names': self.label_names})\n return config", "def beaker_session_options(self):\n\n session_data_dir = os.path.join(self.APP_DIR, self.SESSION_DIR)\n\n # TODO: Options which should be made into PyWy application options\n options = dict(type='file',\n data_dir=session_data_dir,\n auto=True)\n\n # Standard options\n options.update(invalidate_corrupt=True, timeout=None,\n secret=None, log_file=None,)\n\n return options", "def configuration():", "def create_options(self):\n return []", "def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})", "def set_env_config(self):\n self.env_config = {\n # ===== STANDARD ARGUMENTS ======\n \"n_agents\": 4, # Number of non-planner agents\n \"world_size\": [15, 15], # [Height, Width] of the env world\n \"episode_length\": 1000, # Number of time-steps per episode\n # In multi-action-mode, the policy selects an action for each action\n # subspace (defined in component code)\n # Otherwise, the policy selects only 1 action\n \"multi_action_mode_agents\": False,\n \"multi_action_mode_planner\": True,\n # When flattening observations, concatenate scalar & vector observations\n # before output\n # Otherwise, return observations with minimal processing\n \"flatten_observations\": False,\n # When Flattening masks, concatenate each action subspace mask\n # into a single array\n # Note: flatten_masks = True is recommended for masking action logits\n \"flatten_masks\": True,\n # ===== COMPONENTS =====\n # Which components to use\n \"components\": [\n # (1) Building houses\n {\"Build\": {}},\n # (2) Trading collectible resources\n {\"ContinuousDoubleAuction\": {\"max_num_orders\": 5}},\n # (3) Movement and resource collection\n {\"Gather\": {}},\n ],\n # ===== SCENARIO =====\n # Which scenario class to use\n \"scenario_name\": \"uniform/simple_wood_and_stone\",\n # (optional) kwargs of the chosen scenario class\n \"starting_agent_coin\": 10,\n \"starting_stone_coverage\": 0.10,\n \"starting_wood_coverage\": 0.10,\n }\n\n # Create an environment instance from the config\n self.env = foundation.make_env_instance(**self.env_config)", "def config(self) -> dict:\n return self._configs", "def config(self) -> dict:\n return self._config", "def ssh_config(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n print(utils.config_ssh_string(self.config_ssh))", "def get_config(self):\n config = {\n 'multichannel': self._multichannel,\n 'complex_part': self._complex_part\n }\n base_config = super().get_config()\n return {**base_config, **config}", "def declare_final_options(self):\n return {}", "def configuration_keys(self):\n return ['filter1', 'echangle', 'xdangle']", "def get_options(self):\n return []", "def config(self) -> pulumi.Input['ConfigArgs']:\n return pulumi.get(self, \"config\")", "def __init__(self) -> None:\n self.config: dict[str, str | int] = {}", "def default_configs(cls):\n config = super().default_configs()\n config.update({\"model\": \"openie\"})\n return config", "def getConfiguration(self):\n raise NotImplementedError", "def expected_instance_datastore_configs(instance_id):\n instance = instance_info.dbaas.instances.get(instance_id)\n datastore_type = instance.datastore['type']\n datastore_test_configs = CONFIG.get(datastore_type, {})\n return datastore_test_configs.get(\"configurations\", {})", "def get_config(self):\n config = super(Sc2Policy, self).get_config()\n config['eps'] = self.eps\n config['testing'] = self.testing\n return config", "def data_dict(self) -> dict:\n return self.design.renderers.gds.options", "def _store_options(self):\n logger.debug(\"Storing general ReplicaExchange options...\")\n self._reporter.write_dict('options', self.options)", "def config(self):\n pass", "def config(self):\n pass", "def buildersConf() :\n return dict(_buildersConf)", "def options_set(self):\n\n global OPTIONS\n OPTIONS.append(config.ENABLE(self.threaded))\n OPTIONS.append(config.ENABLE(self.datasaver))\n OPTIONS.append(self.language)", "async def config_options(self, ctx):\n embeds = []\n for names in zip_longest(*(iter(sorted(self.bot.config.public_keys)),) * 15):\n description = \"\\n\".join(\n f\"`{name}`\" for name in takewhile(lambda x: x is not None, names)\n )\n embed = Embed(\n title=\"Available configuration keys:\",\n color=self.bot.main_color,\n description=description,\n )\n embeds.append(embed)\n\n session = EmbedPaginatorSession(ctx, *embeds)\n await session.run()", "def config(self):\n return self[CONFIG_KEY]", "def _config_classes(self):\n pass", "def get_global_config(self, **kwargs):\n return {}", "def add_options(_config):\n settings = [\n [\"cache_worker\", bool, lambda x: x in [True, False], False, False],\n [\n \"kube_deployment\",\n str,\n lambda x: x in [\"pod\", \"container\", \"file\", \"call\"],\n False,\n \"pod\",\n ],\n [\n \"kube_version\",\n str,\n lambda _: [\"v1.27.0\", \"v1.26.0\", \"v1.25.0\", \"v1.24.0\", \"v1.23.0\"],\n False,\n \"v1.27.0\",\n ],\n ]\n return settings" ]
[ "0.6605203", "0.65723705", "0.65408826", "0.65288186", "0.651911", "0.649711", "0.6480811", "0.64501303", "0.6391103", "0.6391103", "0.6391103", "0.6328672", "0.6306278", "0.63004625", "0.6244758", "0.62443846", "0.624185", "0.6222133", "0.6206737", "0.6206737", "0.6205525", "0.61864835", "0.6186336", "0.61830086", "0.61644673", "0.6143182", "0.6112726", "0.6107032", "0.60775065", "0.6063169", "0.6056793", "0.6040969", "0.60267204", "0.60085213", "0.6007031", "0.59947896", "0.59947896", "0.59947896", "0.59947896", "0.59947896", "0.59790456", "0.5962236", "0.595609", "0.5955992", "0.594203", "0.59309626", "0.5924089", "0.5919046", "0.59171754", "0.591247", "0.5912432", "0.59121287", "0.59021735", "0.58988833", "0.58829546", "0.58714974", "0.58684796", "0.5853242", "0.5852988", "0.5829189", "0.58147246", "0.57680666", "0.5767966", "0.576356", "0.57624006", "0.57612544", "0.57536364", "0.5752291", "0.5747326", "0.5744211", "0.5741688", "0.57313794", "0.5712367", "0.57101184", "0.570051", "0.5696397", "0.56959385", "0.5695333", "0.569473", "0.567763", "0.56698", "0.56666243", "0.5665097", "0.565763", "0.56574494", "0.5651892", "0.5651742", "0.5651383", "0.5644724", "0.56386936", "0.5623132", "0.5621247", "0.56162727", "0.56162727", "0.5616089", "0.5612185", "0.56108886", "0.56074643", "0.56071186", "0.5606702", "0.5606086" ]
0.0
-1
The Amazon Resource Name (ARN) of the custom platform to use with the environment.
def platform_arn(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "platform_arn")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def platform_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"platform_arn\")", "def platform(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"platform\")", "def platform():\n return \"micaz\"", "def PlatformName():\n if override_platform_name:\n return override_platform_name\n if IsWindows():\n return 'win32'\n if IsLinux():\n return 'linux'\n if IsMac():\n return 'mac'\n raise NotImplementedError('Unknown platform \"%s\".' % sys.platform)", "def product(self):\n return self.appName", "def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system", "def GetOSName():\n return Config.osName_", "def environment_label(self) -> str:\n return self._environment_label", "def platform_config_filename(region, account_prefix, prod):\n return 'infra/platform-config/%s/%s/%s.json' % (\n account_prefix, \"prod\" if prod else \"dev\", region\n )", "def GetPlatform(self):\n arch = \"None\"\n # check architecture name\n if \"CMTCONFIG\" in os.environ:\n arch = os.environ[\"CMTCONFIG\"]\n elif \"SCRAM_ARCH\" in os.environ:\n arch = os.environ[\"SCRAM_ARCH\"]\n return arch", "def brand(self):\n return \"Nest Labs\"", "def name(self) -> str:\n return f\"{self.platform_name} {self._sensor_name}\"", "def platform(self):\n return self.random.choice([\n 'Laptop', \n 'Desktop', \n 'Workstation', \n 'Server', \n 'Virtual Machine', \n 'Container', \n 'Micro-Service', \n 'Droplet', \n 'SaaS'\n ])", "def application_arn(self) -> Optional[str]:\n return pulumi.get(self, \"application_arn\")", "def app_image_config_arn(self) -> Optional[str]:\n return pulumi.get(self, \"app_image_config_arn\")", "def os_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"os_name\")", "def platform_num(self) -> str:\n return pulumi.get(self, \"platform_num\")", "def architecture(self) -> str:\n return pulumi.get(self, \"architecture\")", "def architecture(self) -> str:\n return pulumi.get(self, \"architecture\")", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def platform(self):\n # type: () -> string_types\n return self._platform", "def architecture_name(self):\n return get_architecture_name(self.architecture)", "def get_os() -> str:\n system = platform.system().lower()\n\n if system == \"linux\":\n machine = os.uname().machine\n if machine.startswith(\"arm\") or machine.startswith(\"aarch\"):\n system = \"pi\"\n\n return system + \"_\" + platform.architecture()[0]", "def get_name():\n return config.APP_NAME", "def platform(self, return_str=True):\n architecture = self.arch(\"docker\")\n host_platform = self.osversion() + \"/\" + architecture\n if return_str:\n return host_platform.lower()\n return self.parse_platform(host_platform)", "def get_os_name(cls):\n return cls.get_os_type().name", "def application_name(self) -> Optional[str]:\n return pulumi.get(self, \"application_name\")", "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def get_ami_keyname ( app_name ) :\n return app_name + '.ami'", "def OverridePlatformName(name):\n global override_platform_name\n override_platform_name = name", "def getPlatform(self):\n\t\treturn None", "def product_name(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n self._dll.JLINKARM_EMU_GetProductName(buf, self.MAX_BUF_SIZE)\n return ctypes.string_at(buf).decode()", "def brand(self) -> str:\n return self._config_entry.data.get(CONF_BRAND, DEFAULT_BRAND)", "def get_name(self):\n return \"catkin\"", "def arn(self) -> Optional[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> Optional[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> Optional[str]:\n return pulumi.get(self, \"arn\")", "def master_name(self):\n return self._LAUNCHPAD_NAME", "def app_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_name\")", "def platform_info(self):\n return platform.uname()._asdict()", "def get_chromeos_platform_name():\r\n try:\r\n platform = cros_config.call_cros_config_get_output('/ name', utils.run)\r\n if platform == '':\r\n platform = get_board()\r\n return platform\r\n except:\r\n logging.info(\"Not found\")\r\n return -1", "def name(self) -> str:\n return self.dev.label", "def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")", "def _get_osname():\n osname = sys.platform.lower()\n if osname == \"linux2\":\n osname = \"linux\"\n return osname", "def getApplicationName(self) -> unicode:\n ...", "def name(self) -> str:\n return self.config_name or self.host_name or self.dev_id or DEVICE_DEFAULT_NAME", "def resource_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_arn\")", "def get_platform(self):\n return self._platform", "def platform():\n if 'OS' in gyp_defines():\n if 'android' in gyp_defines()['OS']:\n return 'android'\n else:\n return gyp_defines()['OS']\n elif IsWindows():\n return 'win'\n elif IsLinux():\n return 'linux'\n else:\n return 'mac'", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def os(self) -> str:\n os = None\n attributes_tag = self._get_tag(\"parallelcluster:attributes\")\n if attributes_tag:\n # tag is in the form \"{BaseOS}, {Scheduler}, {Version}, {Architecture}\"\n os = attributes_tag.split(\",\")[0].strip()\n return os", "def platform_version(self) -> Optional[str]:\n return pulumi.get(self, \"platform_version\")", "def invoke_arn(self) -> str:\n return pulumi.get(self, \"invoke_arn\")", "def resource_arn(self) -> Optional[str]:\n return pulumi.get(self, \"resource_arn\")", "def application_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_name\")", "def app_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_name\")", "def resource_name(self) -> Optional[str]:\n return pulumi.get(self, \"resource_name\")", "def resource_name(self) -> Optional[str]:\n return pulumi.get(self, \"resource_name\")", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def os_label(self):\n\n return self._os_label", "def application(self):\n\n if not self._applicationDef:\n raise NotValidPlatformException(\n 'No application definition is available. Are you sure you are running on Platform.sh?'\n )\n return self._applicationDef", "def name(self):\n return self._env_name", "def get_platform():\r\n platforms = [\r\n \"Android\",\r\n \"Linux.RaspberryPi\",\r\n \"Linux\",\r\n \"XBOX\",\r\n \"Windows\",\r\n \"ATV2\",\r\n \"IOS\",\r\n \"OSX\",\r\n \"Darwin\",\r\n ]\r\n\r\n for platform in platforms:\r\n if xbmc.getCondVisibility('System.Platform.%s' % platform):\r\n return platform\r\n return \"Unknown\"", "def get_launch_name():\n\n if product_type == \"RHEL7\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}_{5}CDN\".format(errata_id, product_type, variant, arch, test_level, cdn)\n \n elif product_type == \"RHEL8\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}CDN\".format(errata_id, product_type, arch, test_level, cdn)\n\n return launch_name", "def anki(self) -> str:\n\n return self._get_via_app_bundle(path=\"/Applications/Anki.app\")", "def name(self) -> str:\n return self._device.name or self._device.mac", "async def osname(self):\n\n await self.bot.say(box(system(), 'Bash'))", "def get_system_name(self):\n\n\t\treturn self.__system_name", "def name(self):\n return self._config.backend_name", "def management_account_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"management_account_arn\")", "def get_version_key(platform):\n\tversion_key = ''\n\tif platform.lower() == 'sdk':\n\t\tversion_key = 'sdk_version'\n\telif platform.lower() == 'server':\n\t\tversion_key = 'server_version'\n\telif platform.lower() == 'android' or platform.lower() == 'ios':\n\t\tversion_key = 'app_version'\n\treturn version_key", "def provider_name(self):\n return self.resource_class.name", "def provider_name(self):\n return self.resource_class.name", "def get_ami_keypath ( env_type ) :\n return \"/builds/esp/\" + env_type + \"/current/\"", "def _format_platform(platform, release, architecture=None):\n rep = f\"{_PLATFORMS[platform]} {release}\"\n if architecture is None or architecture == default.architecture:\n return rep\n return f\"{rep} ({architecture})\"", "def component_arn(self) -> Optional[str]:\n return pulumi.get(self, \"component_arn\")", "def component_arn(self) -> Optional[str]:\n return pulumi.get(self, \"component_arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")" ]
[ "0.7735306", "0.66278744", "0.6624211", "0.6606932", "0.6323558", "0.6283941", "0.6234539", "0.61513674", "0.6081606", "0.60462385", "0.60026896", "0.5988347", "0.598575", "0.59628206", "0.5962734", "0.59611076", "0.59324396", "0.59225625", "0.59225625", "0.5922316", "0.5922316", "0.59092885", "0.58717483", "0.5853615", "0.5838145", "0.58310544", "0.5810798", "0.577213", "0.5770021", "0.5745066", "0.5745066", "0.5745066", "0.5745066", "0.5745066", "0.57325476", "0.57261425", "0.5716356", "0.571532", "0.57071304", "0.5704987", "0.5700441", "0.5700441", "0.5700441", "0.5693006", "0.56789815", "0.56634223", "0.5662633", "0.5657371", "0.5650154", "0.56330764", "0.56224054", "0.56133753", "0.5608851", "0.5592761", "0.55727583", "0.55702496", "0.55702496", "0.55702496", "0.55702496", "0.5566541", "0.5558546", "0.55503935", "0.554941", "0.55341387", "0.55153906", "0.5513844", "0.5513844", "0.55093426", "0.55076003", "0.5503626", "0.5502875", "0.5499117", "0.54939014", "0.54865384", "0.5479545", "0.5475575", "0.5473094", "0.5473069", "0.5472761", "0.5457979", "0.54568017", "0.54568017", "0.5454219", "0.5453971", "0.54490453", "0.54490453", "0.543054", "0.543054", "0.543054", "0.543054", "0.543054", "0.543054", "0.543054", "0.543054", "0.543054", "0.543054", "0.543054", "0.543054", "0.543054", "0.543054" ]
0.7835914
0
The name of an Elastic Beanstalk solution stack (platform version) to use with the environment.
def solution_stack_name(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "solution_stack_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solution_stack_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"solution_stack_name\")", "def stackname(self):\n return self.BASE_NAME.format(**self.conf)", "def stack_name(self) -> str:\n return jsii.get(self, \"stackName\")", "def env_name(self):\n return f\"{self.project_name}-{self.stage}\"", "def stack_name(self):\n stack_name = getattr(self, '__stack_name', None)\n if (\n self.args.stack_name and\n not stack_name\n ):\n stack_name = self.args.stack_name\n elif not stack_name:\n stack_name = \"nephoria-stack-\" + str(int(time.time()))\n\n setattr(self, '__stack_name', stack_name)\n return stack_name", "def name(self):\n return self._env_name", "def kernel_name():\n return \"python3\"", "def stack_name(self) -> str:\n return self._values.get(\"stack_name\")", "def name_tag(resource_name):\n return Join(\"\", [Ref('AWS::StackName'), '-', resource_name])", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")", "def get_name():\n return config.APP_NAME", "def pipeline_stack_name(self) -> str:\n return self._values.get(\"pipeline_stack_name\")", "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "def environment_label(self) -> str:\n return self._environment_label", "def stack_name(self) -> typing.Optional[str]:\n return self._values.get(\"stack_name\")", "def get_egg_name():\n global eggname\n if not eggname:\n version = local('git describe --abbrev=4', capture=True)\n if version:\n version = '%s-%s' % (version, datetime.datetime.today().strftime('%Y%m%d'))\n eggname = APP_NAME + '-%s-py%s.egg' % (version.replace('-', '_'), python_version)\n return eggname", "def stackname(self):\n raise NotImplementedError", "def env_name(pre_chars='(', post_chars=')'):\n env_path = builtins.__xonsh_env__.get('VIRTUAL_ENV', '')\n if len(env_path) == 0 and xp.ON_ANACONDA:\n env_path = builtins.__xonsh_env__.get('CONDA_DEFAULT_ENV', '')\n env_name = os.path.basename(env_path)\n if env_name:\n return pre_chars + env_name + post_chars", "def python_name(self):\n return self.requirement.name", "def docker_image_tag(self, app):\n return f\"briefcase/{app.bundle}.{app.app_name.lower()}:{app.target_vendor}-{app.target_codename}\"", "def get_soc_name():\n return get_soc_spec(\"SOC_VERSION\")", "def _get_deployment_flavor():\n flavor = cfg.CONF.paste_deploy.flavor\n return '' if not flavor else ('-' + flavor)", "def get_launch_name():\n\n if product_type == \"RHEL7\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}_{5}CDN\".format(errata_id, product_type, variant, arch, test_level, cdn)\n \n elif product_type == \"RHEL8\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}CDN\".format(errata_id, product_type, arch, test_level, cdn)\n\n return launch_name", "def container_image_name(registry, component_name, version):\n if version is None:\n image = component_name + ':dev'\n else:\n image = '%s/%s:%s' % (registry, component_name, version)\n\n return image", "def generate_cluster_stack_name(job):\n return 'cluster-%s----%s' % (job.compute_resource.id, job.id)", "def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")", "def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")", "def version_name(self) -> str:\n return pulumi.get(self, \"version_name\")", "def bucket_dual_stack_domain_name(self) -> str:\n return jsii.get(self, \"bucketDualStackDomainName\")", "def bucket_dual_stack_domain_name(self) -> str:\n return jsii.get(self, \"bucketDualStackDomainName\")", "def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''", "def get_stack(stack_name, region, cfn_client=None):\n if not cfn_client:\n cfn_client = boto3.client(\"cloudformation\", region_name=region)\n return cfn_client.describe_stacks(StackName=stack_name).get(\"Stacks\")[0]", "def bucket_dual_stack_domain_name(self) -> str:\n ...", "def _branch_name(cls, version: Version) -> str:\n suffix = version.public[len(version.base_version) :]\n components = version.base_version.split(\".\") + [suffix]\n if suffix != \"\" and not (\n suffix.startswith(\"rc\")\n or suffix.startswith(\"a\")\n or suffix.startswith(\"b\")\n or suffix.startswith(\".dev\")\n ):\n raise ValueError(f\"Unparseable pants version number: {version}\")\n return \"{}.{}.x\".format(*components[:2])", "def get_res_name():\n return os.getenv(\"RESOURCES_VERSION\", \"res_0.0\")", "def ecr_image_name(dev_account_id, region, component_name, version):\n return '%s.dkr.ecr.%s.amazonaws.com/%s:%s' % (dev_account_id, region, component_name, 'dev' if version is None else version)", "def get_version_name(self):\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True\n\n\t\ttry:\n\t\t\treturn self.keyinfo['context_tags'].attrs['version_name']\n\t\texcept:\n\t\t\treturn None", "def get_raw_server_name():\n from google.appengine.api import app_identity\n return '%s.%s.appspot.com' % (os.environ[\n 'CURRENT_VERSION_ID'].split('.')[0], app_identity.get_application_id())", "def _k8s_service_name(self):\n return \"{}-ssh-service\".format(self.app.name)", "def stage_name(self) -> str:\n return self._stage_name", "def deploy_stack():\n build = \"sam build --use-container --manifest src/images/requirements.txt\"\n local(build)\n\n #package = f\"sam package --template-file template.yaml --output-template-file \\\n # packaged.yaml --s3-bucket {env.bucket_name} --region {env.aws_region}\"\n #local(package)\n\n deploy = f\"sam deploy --stack-name storge-machine-service \\\n --s3-bucket {env.bucket_name}\\\n --parameter-overrides env=dev --capabilities CAPABILITY_IAM CAPABILITY_AUTO_EXPAND --region {env.aws_region}\"\n #deploy = \"sam deploy\"\n local(deploy)", "def get_stack_info():\n\n response = cloudformation.describe_stacks(\n StackName=config.CLOUDFORMATION_STACK_NAME\n )\n return response['Stacks'][0]", "def product(self):\n return self.appName", "def get_distrib_name():\n distrib, version, codename = _get_release_infos()\n \n if distrib.startswith('Red Hat Enterprise Linux'):\n return 'RHEL'\n elif distrib.startswith('CentOS'):\n return 'CentOS'\n else:\n abort(\"OS not supported.\")", "def PlatformName():\n if override_platform_name:\n return override_platform_name\n if IsWindows():\n return 'win32'\n if IsLinux():\n return 'linux'\n if IsMac():\n return 'mac'\n raise NotImplementedError('Unknown platform \"%s\".' % sys.platform)", "def deployment_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"deployment_group_name\")", "def create_r53_name ( base_name, name ) :\n env = get_env_type( base_name )\n if env :\n env = env.lower( )\n if ( env == 'prod' ) :\n return name\n\n return name + '.' + env", "def version_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_name\")", "def describe_stack(cfn, stack_name):\n try:\n stacks = cfn.describe_stacks(StackName=stack_name)[\"Stacks\"]\n return stacks[0]\n except ClientError as e:\n if \"does not exist\" not in e.response[\"Error\"][\"Message\"]:\n raise e\n return None", "def fhir_version_name(fhir_version):\n major_version = int(fhir_version.split('.')[0])\n\n if major_version < 3:\n return 'dstu2'\n elif (major_version >= 3) and (major_version < 4):\n return 'stu3'\n elif (major_version >= 4) and (major_version < 5):\n return 'r4'\n else:\n raise Exception(\n f'Invalid fhir version supplied: {fhir_version}! No name exists '\n 'for the supplied fhir version.'\n )", "def getSlavename():", "def python_branch():\n\n return _sys_version()[2]", "def GetOSName():\n return Config.osName_", "def compliance_pack_name(self) -> str:\n return pulumi.get(self, \"compliance_pack_name\")", "def _get_upgrade_stack():\n from resource_management.libraries.functions.default import default\n direction = default(\"/commandParams/upgrade_direction\", None)\n stack_name = default(\"/hostLevelParams/stack_name\", None)\n stack_version = default(\"/commandParams/version\", None)\n\n if direction and stack_name and stack_version:\n return (stack_name, stack_version)\n\n return None", "def stage_name(self) -> str:\n return self._values.get(\"stage_name\")", "def get_package_name(self):\n return self.name + '-' + self.version", "def name(self):\n return _version._NAME # pylint: disable=protected-access", "def name(self):\r\n return self.setuptools_requirement.project_name", "def get_env_name(self):\n if self.options.environment:\n return self.options.environment\n elif os.environ.get(\"JUJU_ENV\"):\n return os.environ['JUJU_ENV']\n\n env_ptr = os.path.join(self.juju_home, \"current-environment\")\n if os.path.exists(env_ptr):\n with open(env_ptr) as fh:\n return fh.read().strip()\n\n with open(self.get_env_conf()) as fh:\n conf = yaml.safe_load(fh.read())\n if not 'default' in conf:\n raise ConfigError(\"No Environment specified\")\n return conf['default']", "def storage_appliance_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"storage_appliance_name\")", "def deployment_environment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"deployment_environment\")", "def name(self) -> str:\n return self.dev.label", "def get_app_hostname():\n if not is_running_on_app_engine() or is_running_on_localhost():\n return None\n\n version = modules.get_current_version_name()\n app_id = app_identity.get_application_id()\n\n suffix = 'appspot.com'\n\n if ':' in app_id:\n tokens = app_id.split(':')\n api_name = tokens[1]\n if tokens[0] == 'google.com':\n suffix = 'googleplex.com'\n else:\n api_name = app_id\n\n # Check if this is the default version\n default_version = modules.get_default_version()\n if version == default_version:\n return '{0}.{1}'.format(app_id, suffix)\n else:\n return '{0}-dot-{1}.{2}'.format(version, api_name, suffix)", "def brand(self):\n return \"Nest Labs\"", "def bucket_dual_stack_domain_name(self) -> typing.Optional[str]:\n return self._values.get('bucket_dual_stack_domain_name')", "def bundle_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_name\")", "def bundle_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_name\")", "def get_image_name():\n try:\n return os.environ['AIRFLOW_IMAGE']\n except KeyError:\n raise Exception(\"Please provide docker image name to pytest using environment variable AIRFLOW_IMAGE\")", "def fullname(self):\n return \"{project}/{version}\".format(\n project=self.project.name, version=self.name\n )", "def _app(self) -> str:\n return self.charm.app.name", "def get_product_name(self):\n sushy_system = self._get_sushy_system()\n return sushy_system.model", "def get_stack_domain_name(self, stack_name):\n cf_stack = stack(self.session)\n resources = cf_stack.get_stack_resources(stack_name, 'AWS::ApiGateway::DomainName')\n if not resources:\n return False\n return resources[0]", "def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system", "def get_env_type ( base_name ) :\n return base_name.split( '-', 1 )[ 0 ]", "def getSiteName():\n return os.environ['SITENAME']", "def bundle_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"bundle_name\")", "def name(self):\n return self._config.backend_name", "def app_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_name\")", "def package_name(self) -> str:\n return pulumi.get(self, \"package_name\")", "def production_settings_name():\n if hasattr(SettingsType, 'AWS'):\n # Hawthorn and Ironwood\n return getattr(SettingsType, 'AWS')\n else:\n # Juniper and beyond.\n return getattr(SettingsType, 'PRODUCTION')", "def display_name(self) -> str:\n if self.is_verified:\n return f\"Verified Package {self.csharp_version}\"\n elif self.is_main:\n return \"main (unstable)\"\n else:\n return self.release_tag.replace(\"_\", \" \").title()", "def tracing_name(name: Optional[str] = None) -> str:\n if name is None:\n name = settings.SERVICE_NAME\n return f\"{name}.{settings.ENVIRONMENT.lower()}\"", "def git_service_name(self):\n return self._git_service_name", "def app_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_name\")", "def get_target_group_name(self, short_name):\n app_env = self.get_current_env()\n full_name = self.get_target_group_fully_qualified_name(short_name)\n namespace = self.config['namespace']\n\n if len(full_name) <= 32:\n return full_name\n elif len(namespace) + 10 <= 32:\n env_target_hash = hashlib.md5((short_name + app_env).encode()).hexdigest()[:9]\n return '{}-{}'.format(namespace, env_target_hash)\n else:\n return hashlib.md5(full_name.encode()).hexdigest()", "def get_product_name(self):\n system = self._get_host_details()\n return system['Model']", "def name(self):\n return self.application_tree['name']", "def version():\n click.echo(u'shellfoundry version ' + pkg_resources.get_distribution(u'shellfoundry').version)", "def get_name():\n return __name__", "def get_vm_image_name(self):\n return self.virtual_environment[self.T_I][self.T_I_N] if self.is_vm_image() else None", "def platform_config_filename(region, account_prefix, prod):\n return 'infra/platform-config/%s/%s/%s.json' % (\n account_prefix, \"prod\" if prod else \"dev\", region\n )", "def platform():\n return \"micaz\"", "def get_package_name(self):\n return self.name + '-' + self.version + '-' + self.release", "def container_name(self):\n pass", "def docker_image_name(self):\n raise NotImplementedError", "def _get_engine_name(self):", "def get_version_tag(self, version: str) -> str:\n return version", "def get_version(self):\n return self.cur_config['version']['name']" ]
[ "0.6874254", "0.68344885", "0.6518337", "0.65035015", "0.6400849", "0.62462306", "0.61668295", "0.6154686", "0.6143118", "0.6137554", "0.61008966", "0.6086461", "0.60863656", "0.60621977", "0.60092354", "0.5980697", "0.5922324", "0.5898759", "0.5881433", "0.58511484", "0.582938", "0.5816132", "0.58146447", "0.5811952", "0.57712144", "0.5765751", "0.57646734", "0.57646734", "0.5764073", "0.5694881", "0.5694881", "0.5683015", "0.56755537", "0.5674331", "0.56537825", "0.5648027", "0.5632168", "0.5627301", "0.5613054", "0.5610453", "0.55885655", "0.55877715", "0.555428", "0.55518955", "0.55429226", "0.55337536", "0.55179286", "0.5517808", "0.5508351", "0.55035245", "0.54945946", "0.5493815", "0.54816306", "0.5473949", "0.5473396", "0.5472469", "0.54690564", "0.5463126", "0.54624945", "0.54573435", "0.5439292", "0.5438999", "0.5429474", "0.5398176", "0.53919953", "0.5374883", "0.5351118", "0.5348846", "0.5348846", "0.5341566", "0.534141", "0.5340569", "0.53298855", "0.5320319", "0.53165543", "0.53152215", "0.5312524", "0.5310521", "0.5293119", "0.52806854", "0.5278518", "0.52697825", "0.5261944", "0.5259018", "0.5253117", "0.5252711", "0.52338445", "0.5231651", "0.5218436", "0.52141285", "0.5213663", "0.5213581", "0.52134746", "0.52099466", "0.52066475", "0.52029055", "0.520057", "0.52002406", "0.5196915", "0.5192101" ]
0.6848552
1
Specifies the tags applied to resources in the environment.
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.EnvironmentTag']]]: return pulumi.get(self, "tags")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tags(self, tags):\n self._tags = tags", "def tags(self, tags):\n self._tags = tags", "def tags(self, tags):\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def add_tags_to_resource(ResourceId=None, Tags=None):\n pass", "def tags(self, tags: List[Tag]):\n\n self._tags = tags", "def tags(self) -> Tags:\n return Tags(**dict(self.context.tags, **self.args.tags))", "def tags(self):\r\n return resources.Tags(self)", "def list_tags_for_resource(Resource=None):\n pass", "def describe_tags(resourceArns=None):\n pass", "def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentTagArgs']]]]:\n return pulumi.get(self, \"tags\")", "def set_tags(self, tags, filename):\n return self.set_tags_batch(tags, [filename])", "def tag_resource(resourceArn=None, tags=None):\n pass", "def set_tags(self, tags):\n self.tags = []\n for tag in [t.strip() for t in tags.split(', ')]:\n self.tags.append(Tag(title=tag))", "def add_tags(ResourceArn=None, Tags=None):\n pass", "def set_tags(self, tags):\n self._tag.clear()\n\n for tag in tags:\n if tag not in self._tag:\n self._tag.append(tag)\n\n return self", "def tag_resource(ResourceArn=None, Tags=None):\n pass", "def tag_resource(ResourceArn=None, Tags=None):\n pass", "def tag_resource(ResourceArn=None, Tags=None):\n pass", "def tags():", "def set_tags(self, tags):\r\n current_tags = set(self.tag_names())\r\n updated_tags = set(tags)\r\n removed_tags = current_tags.difference(updated_tags)\r\n new_tags = updated_tags.difference(current_tags)\r\n \r\n for tag in new_tags:\r\n self.add_tag(tag)\r\n \r\n for tag in removed_tags:\r\n self.remove_tag(tag)", "def create_tags(self, resource_name: str, **kwargs) -> dict:\n init_tags = self._global_tags\n init_tags.update(kwargs)\n tags = {}\n for key, value in init_tags.items():\n if key == \"business_unit\":\n self._check_business_unit(value, self._allowed_business_units)\n if key in [\"is_production\"]:\n raise KeyError(f\"{key} is not an allowed argument\")\n tags[key.replace(\"_\", \"-\").lower()] = value\n tags[\"is-production\"] = tags[\"environment-name\"] in [\"alpha\", \"prod\"]\n tags[\"Name\"] = resource_name\n return tags", "def set_tags(self, session, *tags):\n if not tags:\n return list()\n\n result = self._tag(session.put, tags=list(tags), session=session)\n return result['tags']", "def setTags(self,newtags):\n\t\tself.tags = newtags;", "def add_tags():\n\n tags = shallow_copy(e['ResourceProperties'].get('Tags', []))\n tags += [\n {'Key': 'cloudformation:' + 'logical-id', 'Value': e['LogicalResourceId']},\n {'Key': 'cloudformation:' + 'stack-id', 'Value': e['StackId']},\n {'Key': 'cloudformation:' + 'stack-name', 'Value': e['StackId'].split('/')[1]},\n {'Key': 'cloudformation:' + 'properties', 'Value': hash_func(e['ResourceProperties'])}\n ]\n\n acm.add_tags_to_certificate(**{'CertificateArn': e['PhysicalResourceId'], 'Tags': tags})", "def defined_tags(self, defined_tags):\n self._defined_tags = defined_tags", "def tag_resources(\n self,\n request: dds_20151201_models.TagResourcesRequest,\n ) -> dds_20151201_models.TagResourcesResponse:\n runtime = util_models.RuntimeOptions()\n return self.tag_resources_with_options(request, runtime)", "def set_tags(self, tags):\n for task in self._tasks:\n task.set_tags(tags)\n\n return self", "def AddTags(resource_id, region, **kwargs):\n if not kwargs:\n return\n\n describe_cmd = SoftLayer_PREFIX + [\n '--format',\n 'json',\n 'vs',\n 'detail',\n '%s' % resource_id]\n\n stdout, _ = IssueRetryableCommand(describe_cmd)\n response = json.loads(stdout)\n tags = response['tags']\n\n tag_cmd = SoftLayer_PREFIX + [\n 'vs',\n 'edit']\n\n if tags is not None:\n for tag in tags:\n tag_cmd = tag_cmd + ['--tag', '{0}'.format(tag)]\n\n for key, value in kwargs.items():\n tag_cmd = tag_cmd + ['--tag', '{0}:{1}'.format(key, value)]\n\n tag_cmd = tag_cmd + ['{0}'.format(resource_id)]\n IssueRetryableCommand(tag_cmd)", "def setAddTags(self,value):\n self.PDFreactorConfiguration.in1[\"addTags\"] = value", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Sequence[str]:\n return pulumi.get(self, \"tags\")", "def add_tags(event):\n\n add_tags_from_presets()", "def create_tags(ResourceArn=None, Tags=None):\n pass", "def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")", "def set_tags(self, tags):\n uniques = set()\n distinct = []\n for tag in tags:\n if tag not in uniques:\n distinct.append(tag)\n uniques.add(tag)\n self.__post_changes(distinct)", "def setTag(self, tag):\n\t\tself.config.TAG = tag", "def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def save_tags(context):\n items = context.response.json()['items']\n tags = set()\n for item in items:\n for tag in item['tags']:\n tags.add(tag)\n context.tags = list(tags)\n logging.debug('Saved all tags in context.tags:\\n%s', pformat(sorted(context.tags)))", "def tag_instance(self, tags):\n self._request({\"instance-tags\": dict(tags)})", "def allocation_resource_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"allocation_resource_tags\")", "def allocation_resource_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"allocation_resource_tags\")", "def tags(self):\n raise BookInfoNotImplementedError('tags', self.__class__.__name__)", "def allocation_resource_tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n return pulumi.get(self, \"allocation_resource_tags\")", "def resource_name() -> str:\n return \"contactTags\"", "def __init__(self, tags=''):\n self.tags = tags", "def create_tags(configurationIds=None, tags=None):\n pass", "def update_tags(self, tags, **kwargs):\n request = RequestMiddleware.get_request()\n is_admin = request.user and request.user.is_admin\n # Keep all tags that start with pf: because they are reserved.\n preserved = [tag for tag in self.tags if tag.startswith('pf:')]\n if is_admin:\n remove = [tag[1:] for tag in tags if tag.startswith('-pf:')]\n preserved = [tag for tag in preserved if tag not in remove]\n\n # Filter out new tags that are invalid or reserved.\n accepted = [tag for tag in tags\n if TAG_REGEX_COMPILED.match(tag)\n and (is_admin or not tag.startswith('pf:'))]\n # Limit the number of tags per entity.\n if len(accepted + preserved) > settings.MAX_TAGS_PER_ENTITY:\n accepted = accepted[:settings.MAX_TAGS_PER_ENTITY - len(preserved)]\n self.tags = list(set(accepted + preserved))", "def tags(self, request, tag_list, group):\n return tag_list", "def __init__(__self__, *,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def append_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.update(tags)\n self.tags.update(tags)", "def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PackagingConfigurationTagArgs']]]]:\n return pulumi.get(self, \"tags\")", "def __get_tags(self, name):\n return Tags(\n Environment=\"ApiDev\",\n Name=\"ApiDev-Dev-\"+name,\n Owner=\"Foo industries\",\n Service=\"ServiceVPC\",\n VPC=\"Dev\",\n )", "def tags(self):\n return ['HostRoles/component_name', \\\n 'HostRoles/host_name', \\\n 'HostRoles/cluster_name']", "def tag_resources_with_options(\n self,\n request: dds_20151201_models.TagResourcesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.TagResourcesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_id):\n query['ResourceId'] = request.resource_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.resource_type):\n query['ResourceType'] = request.resource_type\n if not UtilClient.is_unset(request.tag):\n query['Tag'] = request.tag\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='TagResources',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.TagResourcesResponse(),\n self.call_api(params, req, runtime)\n )", "def tag_names(self, tag_names):\n\n self._tag_names = tag_names", "def custom_tags(self, custom_tags):\n\n self._custom_tags = custom_tags", "def initialize_tags(self):\n\t\tfor tag_enum in Tags:\n\t\t\ttag = Tag(id=tag_enum.value, description=tag_enum.name)\n\t\t\tself.session.add(tag)\n\t\t\tself.session.commit()", "async def tag_resources_async(\n self,\n request: dds_20151201_models.TagResourcesRequest,\n ) -> dds_20151201_models.TagResourcesResponse:\n runtime = util_models.RuntimeOptions()\n return await self.tag_resources_with_options_async(request, runtime)", "def add_tags(self, tags):\n\n if isinstance(tags, string_types):\n message = \"tags should be a list or None, got tags={}\".format(tags)\n raise TypeError(message)\n\n self.tags = self.tags.union(tags)", "def add_tags(self, tags):\n cp = self.copy()\n cp.tags = cp.tags.union(set(tags))\n return cp", "def list_tags_for_resource(ResourceId=None, NextToken=None, Limit=None):\n pass", "def remove_tags_from_resource(ResourceId=None, TagKeys=None):\n pass", "def resources(self, value):\n self._resource_objects = value", "def tags(self):\r\n return Tags(self)", "def tags(self):\r\n return Tags(self)", "def __init__(self, tags):\n self.tags = tags", "def get_resources(self):\n client = self.client\n if self.resources:\n return self.resources\n\n response = client.list_buckets()\n for resource in response['Buckets']:\n resource_name = resource['Name']\n tags = client.get_bucket_tagging(\n Bucket=resource_name\n )\n self.resources.append({\n \"Name\": resource_name,\n \"Tags\": tags.get(\"TagSet\")\n })\n\n return self.resources", "def resources(self, resources):\n self._resources = resources", "def list_tags(ResourceArn=None):\n pass", "def hook_tags_for_projects(task):\n if task['project'] in TAGS_FOR_PROJECTS.keys():\n for tag in TAGS_FOR_PROJECTS[task['project']]:\n task['tags'].add(tag)", "def set_tag(self, scope, key, value):\r\n self._tags[scope][key] = value\r\n print 'SET', scope, key, value, self._tags", "def tags_dict(self):\n return ({'name': 'tag', 'attrs': {'k': k, 'v': v}} for k, v in self.tags.items())", "def handle_tags(self, request):\n \"\"\"\n @api {get} /tags List tags\n @apiName GetTags\n @apiGroup Misc\n @apiVersion 1.0.0\n\n @apiDescription List currenty used tags\n\n @apiSuccessExample {json} Example response:\n [\n \"tag1\",\n \"tag2\"\n ]\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n tags = []\n\n for task in self.cluster.config.get('tasks').values():\n if 'tags' in task:\n tags += task['tags']\n\n tags = list(set(tags))\n\n return HTTPReply(code = 200, body = json.dumps(tags), headers = headers)", "def tags(self, val: list):\n self._tags = []\n if val is not None:\n for item in val:\n self._tags.append(item)", "def tags(self):\n return self.get(\"tags\")", "def ex_describe_tags(self, node):\n params = { 'Action': 'DescribeTags',\n 'Filter.0.Name': 'resource-id',\n 'Filter.0.Value.0': node.id,\n 'Filter.1.Name': 'resource-type',\n 'Filter.1.Value.0': 'instance',\n }\n\n result = self.connection.request(self.path,\n params=params.copy()).object\n\n tags = {}\n for element in self._findall(result, 'tagSet/item'):\n key = self._findtext(element, 'key')\n value = self._findtext(element, 'value')\n\n tags[key] = value\n return tags", "def tag_ids(self, tag_ids):\n\n self._tag_ids = tag_ids", "def list_tags_for_resource(ResourceArn=None):\n pass", "def tags(self) -> dict:\n return self._tags", "def __init__(__self__, *,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def tags(self) -> Optional[Any]:\n return pulumi.get(self, \"tags\")", "def tag(self, tag):\n self.tag = tag", "def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerTagArgs']]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[Sequence['outputs.ApplicationTag']]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[Sequence['outputs.ApplicationTag']]:\n return pulumi.get(self, \"tags\")", "def aws_tags(self, values):\n if not getattr(self, \"tags\", None):\n self.tags = {}\n\n tags = defaultdict(list)\n\n for tag in values:\n tags[tag[\"Key\"]].append(tag[\"Value\"])\n\n self.tags.update(tags)\n self._transform_known_tags()" ]
[ "0.72549057", "0.72549057", "0.72549057", "0.71421885", "0.71421885", "0.71421885", "0.71421885", "0.71421885", "0.71421885", "0.71421885", "0.71421885", "0.71421885", "0.71421885", "0.7037167", "0.6819578", "0.6523886", "0.65198076", "0.6484716", "0.63857216", "0.6372989", "0.63729405", "0.6371374", "0.635126", "0.63142556", "0.63089454", "0.62716", "0.62716", "0.62716", "0.6254411", "0.6238532", "0.619678", "0.6158898", "0.61558044", "0.6135329", "0.61127955", "0.60354793", "0.6013553", "0.6006397", "0.59759396", "0.59501755", "0.59501755", "0.59501755", "0.59501755", "0.5932594", "0.59277165", "0.5921238", "0.59199584", "0.59199584", "0.59151745", "0.5879354", "0.5861306", "0.5843479", "0.5840424", "0.5788822", "0.5788822", "0.5781244", "0.5769145", "0.5751966", "0.57486725", "0.57161826", "0.5701609", "0.57001436", "0.5700037", "0.56853807", "0.5674244", "0.56656384", "0.56655294", "0.56621104", "0.5658982", "0.5648485", "0.5638902", "0.5633441", "0.5625729", "0.56151754", "0.5610248", "0.5608261", "0.56049955", "0.5599392", "0.5599392", "0.55991405", "0.55924296", "0.5590507", "0.55787754", "0.55628186", "0.5559921", "0.55586183", "0.5558347", "0.5548408", "0.5543311", "0.55426985", "0.5540859", "0.5537586", "0.5533724", "0.55290866", "0.55246294", "0.5521952", "0.55152774", "0.55006576", "0.55006576", "0.54985636" ]
0.60345477
36
The name of the Elastic Beanstalk configuration template to use with the environment.
def template_name(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "template_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_configuration_template(self):\n return CONFIG_TEMPLATE", "def template_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"template_name\")", "def inspect_template_name(self) -> str:\n return pulumi.get(self, \"inspect_template_name\")", "def template(self):\n return self.conf.get(\"template\", None)", "def stackname(self):\n return self.BASE_NAME.format(**self.conf)", "def name(self):\n return self._config_name", "def health_check_template_name(self) -> str:\n return pulumi.get(self, \"health_check_template_name\")", "def name(self):\n return self._config.get(CONF_NAME)", "def template_name(self, template_type: Union[TemplateType, str]) -> str:\n return self.options.get(\"templates\", {}).get(template_type, template_type)", "def launch_template_name(self) -> Optional[str]:\n return pulumi.get(self, \"launch_template_name\")", "def getGenericConfigFileName(self):\n executePkgDir = lsst.utils.getPackageDir('ctrl_execute')\n\n name = \"config_with_%s.py.template\" % self.setup_using\n genericConfigName = os.path.join(executePkgDir,\n \"etc\", \"templates\", self.manager, name)\n if os.path.exists(genericConfigName):\n return genericConfigName\n raise RuntimeError(\"File %s not found; check etc/templates.\" %\n genericConfigName)", "def template_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"template_name\")", "def get_config_template(self) -> cconfig.Config:", "def get_config_name(self): # pragma: no cover\n pass", "def template_name(self):\n\t\traise NotImplementedError('template_name must be defined')", "def platform_config_filename(region, account_prefix, prod):\n return 'infra/platform-config/%s/%s/%s.json' % (\n account_prefix, \"prod\" if prod else \"dev\", region\n )", "def launch_template_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"launch_template_name\")", "def launch_template_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"launch_template_name\")", "def name(self) -> str:\n name = self._config[\"name\"]\n assert isinstance(name, str) # noqa: S101\n return name", "def name(self):\n return self.config[\"name\"]", "def template(self) -> str:\n manifest = self._get_manifest()\n\n return manifest[\"template\"]", "def template_path(self):\n return self.get_config(\"templates\")", "def _getConfigName(self):\n pass", "def name(self):\n return self._env_name", "def env_name(self):\n return f\"{self.project_name}-{self.stage}\"", "def name(self):\n return f'{self._config[CONF_NAME]} {self._typeconf[\"name\"]}'", "def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")", "def get_name():\n return config.APP_NAME", "def _get_config_template(self, key):\n tmp_path = self._get_config_value('templates', 'path') + key\n return tmp_path", "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "def environment_label(self) -> str:\n return self._environment_label", "def config_file_name(self):\n return self._config_file_name", "def get_context_template_name(self):\n return getattr(self, 'context_template_name', None)", "def template(self) -> Optional[pulumi.Input['InstanceTemplateSpecArgs']]:\n return pulumi.get(self, \"template\")", "def get_config_file_name(self):\n argv = sys.argv\n config_type = \"dev\" # default configuration type\n if None != argv and len(argv) > 1 :\n config_type = argv[1]\n config_file = config_type + \".cfg\"\n logger.info(\"get_config_file_name() return : \" + config_file)\n return config_file", "def production_settings_name():\n if hasattr(SettingsType, 'AWS'):\n # Hawthorn and Ironwood\n return getattr(SettingsType, 'AWS')\n else:\n # Juniper and beyond.\n return getattr(SettingsType, 'PRODUCTION')", "def get_configuration_template(self):\n return {'EXAMPLE_KEY_1': \"Example value\",\n 'EXAMPLE_KEY_2': [\"Example\", \"Value\"]\n }", "def access_configuration_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"access_configuration_name\")", "def get_env_name(self):\n if self.options.environment:\n return self.options.environment\n elif os.environ.get(\"JUJU_ENV\"):\n return os.environ['JUJU_ENV']\n\n env_ptr = os.path.join(self.juju_home, \"current-environment\")\n if os.path.exists(env_ptr):\n with open(env_ptr) as fh:\n return fh.read().strip()\n\n with open(self.get_env_conf()) as fh:\n conf = yaml.safe_load(fh.read())\n if not 'default' in conf:\n raise ConfigError(\"No Environment specified\")\n return conf['default']", "def get_configuration_template(self):\n return {'EXAMPLE_KEY_1': \"Example value\",\n 'EXAMPLE_KEY_2': [\"Example\", \"Value\"]\n }", "def sirsam_bs_conf(sirsam_bootstrap):\n return os.path.join(sirsam_bootstrap, 'bootstrapping.yaml')", "def _get_container_name(self) -> str:\n dirname = os.path.basename(os.getcwd())\n default_container_name = f\"{dirname}_{self.config_name}\"\n container_name = self.config_options.get(\"container_name\", default_container_name)\n return container_name", "def template_dir(self):\n return self.cm.get(YAML_CONFIG_TEMPLATE_DIR)", "def get_config_name():\n name = CONFIG_FILE_NAME\n for i, arg in enumerate(sys.argv):\n if arg.startswith('--config'):\n if arg == '--config':\n return sys.argv[i + 1]\n else:\n name = sys.argv[i].split('--config')[1]\n if name[0] == '=':\n name = name[1:]\n return name\n\n return name", "def configFilename(self):\n return self.name()+'.py'", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def configuration_set_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"configuration_set_name\")", "def render_template(self):\n apps = [{\n 'name': container.name,\n 'image': container.image,\n 'environment': container.environment,\n 'memory': container.memory,\n 'portMappings': container.portmappings\n } for container in self.containers]\n\n t = self.templates.get_template('aws/containers.template')\n return t.render(apps=apps, family=self.family)", "def _make_config_file_name(environment, out=False):\n return os.path.join(PH_HOME_DIR, \"etc/config\", \"%s.conf\" % environment) if out else \\\n os.path.join(PH_HOME_DIR, \"config\", \"%s.conf.in\" % environment)", "def getNameTemplate(self):\n\n return self.nameTemplate", "def get_name(self):\n if 'label' in self.configs_:\n return self.configs_['label']\n return self.configs_['id']", "def config(self):\n state_file_id = \"{env}-{component}\".format(env=self.environment, component=self.component)\n\n grunt_config_template = \"\"\"lock = {{\nbackend = \"dynamodb\"\nconfig {{\nstate_file_id = \"{state_file_id}\"\naws_region = \"{region}\"\ntable_name = \"terragrunt_locks\"\nmax_lock_retries = 360\n}}\n}}\nremote_state = {{\nbackend = \"s3\"\nconfig {{\nencrypt = \"true\"\nbucket = \"{s3_bucket}\"\nkey = \"{env}/{component}/terraform.tfstate\"\nregion = \"{region}\"\n}}\n}}\"\"\"\n\n with open('.terragrunt', 'w') as f:\n f.write(grunt_config_template.format(\n state_file_id=state_file_id,\n region=self.metadata['REGION'],\n s3_bucket=self.s3_bucket,\n env=self.environment,\n component=self.component\n ))", "def template(c, release=\"url-shortener\"):\n c.run(f\"helm template {release} {HELM_CHART_DIR} > ./generated-deployment.yml\")", "def _create_config(env_path):\n s2e_yaml = 's2e.yaml'\n version_path = os.path.join(os.path.dirname(__file__), '..', 'dat', 'VERSION')\n\n with open(version_path, 'r', encoding='utf-8') as fp:\n context = {\n 'creation_time': str(datetime.datetime.now()),\n 'version': fp.read().strip(),\n }\n\n render_template(context, s2e_yaml, os.path.join(env_path, s2e_yaml))", "def get_template_filename(template):\n config = read_config(SETTINGS_PATH)\n #String templates\n if (template in STRING_TEMPLATES):\n options = config.options(STRING_TEMPLATES_SECTION) \n for option in options:\n if (option==template):\n #Get root path for the templates\n root_path = config.get(TEMPLATES_SECTION,TEMPLATES_ROOT_PATH)\n #Get the strings path templates\n strings_path = config.get(STRING_TEMPLATES_SECTION,STRING_TEMPLATES_PATH)\n return join(root_path,strings_path),config.get(STRING_TEMPLATES_SECTION,option)", "def get_jinja_filename_environment(templates) -> jinja2.Environment:\n loader = jinja2.DictLoader(\n {template.name: template.name for template in templates}\n )\n return jinja2.Environment(\n loader=loader, trim_blocks=True, lstrip_blocks=True\n )", "def setup_config():\n if CONFIG.get(\"environment\", \"server\") == 'production':\n return 'config.ProductionConfig'\n else:\n return 'config.TestingConfig'", "def deploy_cfg():\n return '{buildout}.cfg'.format(buildout=env.host.split('.')[0])", "def get_template_name(self):\n template = None\n if self.template:\n template = self.template\n if not template:\n for p in self.get_ancestors(ascending=True):\n if p.template:\n template = p.template\n break\n if not template:\n template = settings.CMS_TEMPLATES[0][0]\n for t in settings.CMS_TEMPLATES:\n if t[0] == template:\n return t[1] \n return _(\"default\")", "def current_config():\n if os.environ[\"ENVIRONMENT\"] == \"production\":\n return Production()\n elif os.environ[\"ENVIRONMENT\"] == \"staging\":\n return Staging()\n elif os.environ[\"ENVIRONMENT\"] == \"testing\":\n return Testing()\n elif os.environ[\"ENVIRONMENT\"] == \"development\":\n return Development()\n else:\n raise KeyError(f\"Unknown environment '{os.environ['ENVIRONMENT']}'\")", "def get_template(self, template):\n\n template_path = aj.config.data['email']['templates'].get(template, 'default')\n\n if template_path == 'default' or not os.path.isfile(template_path):\n template_path = DEFAULT_TEMPLATES[template]\n\n return template_path", "def get_launch_template(lt_name):\n logger.info(f'Describing launch template for {lt_name}...')\n response = ec2_client.describe_launch_templates(LaunchTemplateNames=[lt_name])\n return response['LaunchTemplates'][0]", "def configuration_configmap_name(self) -> Optional[str]:\n return pulumi.get(self, \"configuration_configmap_name\")", "def access_configuration_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_configuration_name\")", "def container_name(self):\n pass", "def module_name(self):\n return self.config_section", "def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''", "def name_tag(resource_name):\n return Join(\"\", [Ref('AWS::StackName'), '-', resource_name])", "def access_configuration_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"access_configuration_name\")", "def name(self):\n return self._config.backend_name", "def get_instance_template(self, name):\n return self.call_api('/global/instanceTemplates/%s' % name)", "def tracing_name(name: Optional[str] = None) -> str:\n if name is None:\n name = settings.SERVICE_NAME\n return f\"{name}.{settings.ENVIRONMENT.lower()}\"", "def secrets_bucket_name(self):\n return self.config.secrets_bucket", "def configuration_set_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"configuration_set_name\")", "def get_sls_config_file(path, stage, region):\n for name in gen_sls_config_files(stage, region):\n if os.path.isfile(os.path.join(path, name)):\n return name\n return \"config-%s.json\" % stage # fallback to generic json name", "def _get_template():\n r = get('http://metadata.google.internal/'\n 'computeMetadata/v1/instance/attributes/instance-template',\n headers={'Metadata-Flavor': 'Google'})\n if r.status_code == 200:\n return sub(r'.+instanceTemplates/(.+)', r'\\1', r.text)\n else:\n return ''", "def config_bundle(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"config_bundle\")", "def env_name(pre_chars='(', post_chars=')'):\n env_path = builtins.__xonsh_env__.get('VIRTUAL_ENV', '')\n if len(env_path) == 0 and xp.ON_ANACONDA:\n env_path = builtins.__xonsh_env__.get('CONDA_DEFAULT_ENV', '')\n env_name = os.path.basename(env_path)\n if env_name:\n return pre_chars + env_name + post_chars", "def _get_template_fname(self):\n template_fname = self._context.get('template_fname', False)\n return template_fname", "def get_template_name(self):\n if self.template_name is not None:\n return self.template_name\n model_opts = self.queryset.model._meta\n return f\"{model_opts.app_label}/{model_opts.model_name}.html\"", "def get_template_name(self):\n if self.template_name is not None:\n return self.template_name\n model_opts = self.queryset.model._meta\n return f\"{model_opts.app_label}/{model_opts.model_name}.html\"", "def generate_haproxy_config(template=None, instances=None):\n\n return Template(filename=template).render(instances=instances)", "def GenerateConfig(context):\n\n resources = [{\n 'name': context.env['name'],\n 'type': 'compute.v1.instance',\n 'properties': {\n 'zone': context.properties['zone'],\n 'machineType': ''.join([COMPUTE_URL_BASE, 'projects/',\n context.env['project'], '/zones/',\n context.properties['zone'], '/machineTypes/',\n context.properties['machineType']]),\n 'disks': [{\n 'deviceName': 'boot',\n 'type': 'PERSISTENT',\n 'boot': True,\n 'autoDelete': True,\n 'initializeParams': {\n 'sourceImage': ''.join([COMPUTE_URL_BASE, 'projects/',\n 'ubuntu-os-cloud/global/',\n 'images/family/ubuntu-1604-lts'])\n }\n }],\n 'networkInterfaces': [{\n 'network': '$(ref.' + context.properties['network']\n + '.selfLink)',\n 'accessConfigs': [{\n 'name': 'External NAT',\n 'type': 'ONE_TO_ONE_NAT'\n }]\n }],\n 'metadata': {\n 'items': [{\n 'key': 'startup-script',\n 'value': ''.join(['#!/bin/bash\\n',\n 'sudo apt-get install openjdk-9-jre-headless -y\\n',\n 'sudo python -m SimpleHTTPServer 80'])\n }]\n }\n }\n }]\n return {'resources': resources}", "def _get_template_filename(self):\n _format = self.cfg.get('mutations', 'format')\n if _format == 'pdf':\n tf = 'PDFTemplate.bt'\n elif _format == 'png':\n tf = 'PNG12Template.bt'\n\n module_dir = os.path.dirname(os.path.abspath(__file__))\n\n return os.path.join(module_dir, templates_dir, tf)", "def runtime_config(self) -> str:\n return self._node[\"app_data\"].get(\"runtime_config\")", "def get_template_name(self):\n if self.template_name:\n return self.template_name\n\n if Path('_templates/global/WaitPage.html').exists():\n return 'global/WaitPage.html'\n return 'otree/WaitPage.html'", "def configuration_set_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"configuration_set_name\")", "def template_dir(self):\n return os.path.join(Config().template_dir(), 'platform')", "def get_named_config(config_name: str = 'production'):\n if config_name in ['production', 'staging', 'default']:\n config = ProdConfig()\n elif config_name == 'testing':\n config = TestConfig()\n elif config_name == 'development':\n config = DevConfig()\n else:\n raise KeyError(f'Unknown configuration: {config_name}')\n return config", "def name(self):\n return self.__class__.get_setting_name(self.key, **self.get_kwargs())", "def get_named_config(config_name: str = 'production'):\n if config_name in ['production', 'staging', 'default']:\n config = ProdConfig()\n elif config_name == 'testing':\n config = TestConfig()\n elif config_name == 'development':\n config = DevConfig()\n else:\n raise KeyError(f\"Unknown configuration '{config_name}'\")\n return config", "def get_branded_template(self, brand, template_name, deprecated_template_name):\n\n # If the deprecated setting is defined, return it.\n try:\n return self.cfg.get(*deprecated_template_name)\n except configparser.NoOptionError:\n pass\n\n # If a brand hint is provided, attempt to use it if it is valid.\n if brand:\n if brand not in self.valid_brands:\n brand = None\n\n # If the brand hint is not valid, or not provided, fallback to the default brand.\n if not brand:\n brand = self.cfg.get(\"general\", \"brand.default\")\n\n root_template_path = self.cfg.get(\"general\", \"templates.path\")\n\n # Grab jinja template if it exists\n if os.path.exists(\n os.path.join(root_template_path, brand, template_name + \".j2\")\n ):\n return os.path.join(brand, template_name + \".j2\")\n else:\n return os.path.join(root_template_path, brand, template_name)", "def _k8s_service_name(self):\n return \"{}-ssh-service\".format(self.app.name)", "def stack_name(self) -> str:\n return jsii.get(self, \"stackName\")", "def get(config_name):\n if config_name.lower() in GlobalConfig.__CONFIG__:\n return GlobalConfig.__CONFIG__[config_name.lower()]", "def getConfigFileName(self):\n return self._configFileName", "def name(self) -> str:\n return self.config_name or self.host_name or self.dev_id or DEVICE_DEFAULT_NAME", "def kafka_configuration_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kafka_configuration_name\")", "def get_balancer_name(self):\n return '{}-{}'.format(\n self.config['namespace'],\n self.get_current_env(),\n )", "def container_name(self):\n prefix = get_service_prefix('core' if self.core else 'service')\n return f'{prefix}{self.data.get(\"name\")}'" ]
[ "0.674576", "0.65179414", "0.6502485", "0.64848953", "0.6403082", "0.6380132", "0.6355327", "0.63501006", "0.63139427", "0.6292424", "0.62733996", "0.6261383", "0.6251333", "0.61383086", "0.61321324", "0.610607", "0.60788184", "0.60778415", "0.60102946", "0.6002426", "0.5973798", "0.5952915", "0.5912567", "0.59034204", "0.5884839", "0.58675617", "0.58549315", "0.58266777", "0.5822361", "0.5802485", "0.57281196", "0.57253003", "0.5716861", "0.5652639", "0.56303316", "0.5618341", "0.5578043", "0.5571686", "0.5566381", "0.5562447", "0.5560981", "0.55317396", "0.5516366", "0.5516054", "0.5515234", "0.55061865", "0.55021983", "0.5496992", "0.5488545", "0.54762584", "0.5456194", "0.5443929", "0.5439461", "0.543752", "0.5437233", "0.54352164", "0.5411993", "0.54107755", "0.5410172", "0.54097515", "0.54047537", "0.53918403", "0.5379569", "0.5354739", "0.5347949", "0.5342511", "0.5329978", "0.53233755", "0.53185153", "0.5309818", "0.52760834", "0.5268382", "0.52471435", "0.52465785", "0.5244702", "0.5233224", "0.5224997", "0.5220919", "0.5213575", "0.52065694", "0.52065694", "0.5204231", "0.52022415", "0.51972014", "0.5186528", "0.51818925", "0.51778764", "0.5173139", "0.517167", "0.5167251", "0.51663584", "0.51636237", "0.5163243", "0.5161939", "0.5160489", "0.51591814", "0.51586914", "0.51466566", "0.5130574", "0.51205385" ]
0.6263441
11
Specifies the tier to use in creating this environment. The environment tier that you choose determines whether Elastic Beanstalk provisions resources to support a web application that handles HTTP(S) requests or a web application that handles backgroundprocessing tasks.
def tier(self) -> pulumi.Output[Optional['outputs.EnvironmentTier']]: return pulumi.get(self, "tier")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tier(self) -> Optional[pulumi.Input['EnvironmentTierArgs']]:\n return pulumi.get(self, \"tier\")", "def tier(self):\n\n if not hasattr(self, \"_tier\"):\n self._tier = self.opts.get(\"tier\")\n return self._tier", "def set_tier(self, tier):\n self.single_selection_from_static_kendo_dropdown(self.tier_kendo_dropdown_locator, tier)", "def tier(self) -> Optional[pulumi.Input['InstanceTier']]:\n return pulumi.get(self, \"tier\")", "def tier(self) -> str:\n return pulumi.get(self, \"tier\")", "def tier(self) -> str:\n return pulumi.get(self, \"tier\")", "def tier(self) -> str:\n return pulumi.get(self, \"tier\")", "def get_tier(self) -> str:\n tier = self.raw_param.get(\"tier\")\n if not tier:\n return \"\"\n\n tierStr = tier.lower()\n if tierStr == CONST_MANAGED_CLUSTER_SKU_TIER_FREE and self._get_uptime_sla(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--uptime-sla\" and \"--tier free\" at the same time.'\n )\n\n if tierStr == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD and self._get_no_uptime_sla(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--no-uptime-sla\" and \"--tier standard\" at the same time.'\n )\n\n return tierStr", "def tier(self, tier):\n\n self._tier = tier", "def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")", "def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")", "def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")", "def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")", "def tier(self) -> Optional[pulumi.Input[Union[str, 'CapacitySkuTier']]]:\n return pulumi.get(self, \"tier\")", "def tier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tier\")", "def tier(self) -> Optional[pulumi.Input[Union[str, 'VCoreSkuTier']]]:\n return pulumi.get(self, \"tier\")", "def tier(self):\n return self._tier", "def AddTier(parser, is_patch=False):\n help_text = (\n \"Machine type for a shared-core instance e.g. ``db-g1-small''. \"\n 'For all other instances, instead of using tiers, customize '\n 'your instance by specifying its CPU and memory. You can do so '\n 'with the `--cpu` and `--memory` flags. Learn more about how '\n 'CPU and memory affects pricing: '\n 'https://cloud.google.com/sql/pricing.'\n )\n if is_patch:\n help_text += ' WARNING: Instance will be restarted.'\n\n parser.add_argument('--tier', '-t', required=False, help=help_text)", "def access_tier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_tier\")", "def configure_tiers(self, datacenter, tier):\n print \"Enabling tier %s...\" % tier\n tiers = datacenter.listTiers()\n\n tiers[0].setName(tier)\n tiers[0].update()\n\n for i in range(1, 4):\n tiers[i].setEnabled(False)\n tiers[i].update()\n\n return tiers[0]", "def tier_number(self, tier_number):\n\n self._tier_number = tier_number", "def run_on_tier(self, tier, tierY=None):\n raise NotImplementedError", "def post(self, tier):\n\n if self._from_cluster:\n raise exception.OperationNotPermitted\n\n try:\n tier = tier.as_dict()\n LOG.debug(\"storage tier post dict= %s\" % tier)\n\n new_tier = _create(self, tier)\n except exception.SysinvException as e:\n LOG.exception(e)\n raise wsme.exc.ClientSideError(_(\"Invalid data: failed to create \"\n \"a storage tier object\"))\n\n return StorageTier.convert_with_links(new_tier)", "def tier(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"tier\")", "def price_tier(self):\n return self._safe_value(VAR_PRICETIER, str)", "def __init__(__self__, *,\n name: str,\n tier: str):\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"tier\", tier)", "def get_pvp_tier(self, region, namespace, tier_id, **filters):\n filters['namespace'] = namespace\n resource = 'data/wow/pvp-tier/{0}'\n return self.get_resource(resource, region, *[tier_id], **filters)", "def tier_explanation(self, tier_explanation):\n\n self._tier_explanation = tier_explanation", "def tier_2160p(self, tier_2160p):\n\n self._tier_2160p = tier_2160p", "def create(self, callback=None):\n\n parms = [{'budget': self.budget,\n 'deployment': {'deploymentId': self.deployment},\n 'description': self.description,\n 'name': self.name,\n 'minimumServers': self.minimum_servers,\n 'maximumServers': self.maximum_servers,\n 'breachIncrement': self.breach_increment,\n 'breachPeriodInMinutes': self.breach_period_in_minutes,\n 'cooldownPeriodInMinutes': self.cooldown_period_in_minutes,\n 'lowerCpuThreshold': self.lower_cpu_threshold,\n 'upperCpuThreshold': self.upper_cpu_threshold,\n 'lowerRamThreshold': self.lower_ram_threshold,\n 'upperRamThreshold': self.upper_ram_threshold}]\n\n payload = {'addTier':camel_keys(parms)}\n\n response=self.post(data=json.dumps(payload))\n if self.last_error is None:\n self.load()\n return response\n else:\n raise TierCreationException(self.last_error)", "async def addTier(self, ctx, tier):\n server_dict = self.get_server_dict(ctx)\n tierList = server_dict.setdefault(\"Tiers\", [])\n \n try:\n tierList.append(tier)\n self.save_data()\n await self.bot.say(\":white_check_mark: {0} added to tier list\".format(tier))\n except:\n await self.bot.say(\":x: Error adding {0} to the tier list\".format(tier))", "def GachaCraftNodeExcelAddTier(builder, Tier):\n return AddTier(builder, Tier)", "def patch(self, tier_uuid, patch):\n\n if self._from_cluster:\n raise exception.OperationNotPermitted\n\n LOG.debug(\"patch_data: %s\" % patch)\n\n rpc_tier = objects.storage_tier.get_by_uuid(pecan.request.context,\n tier_uuid)\n\n patch_obj = jsonpatch.JsonPatch(patch)\n backend = dict(name='*unknown*')\n for p in patch_obj:\n if p['path'] == '/backend_uuid':\n p['path'] = '/forbackendid'\n backend = objects.storage_backend.get_by_uuid(pecan.request.context,\n p['value'])\n p['value'] = backend.id\n elif p['path'] == '/cluster_uuid':\n p['path'] = '/forclusterid'\n cluster = objects.cluster.get_by_uuid(pecan.request.context,\n p['value'])\n p['value'] = cluster.id\n otier = copy.deepcopy(rpc_tier)\n\n # Validate provided patch data meets validity checks\n _pre_patch_checks(rpc_tier, patch_obj)\n\n try:\n tier = StorageTier(**jsonpatch.apply_patch(rpc_tier.as_dict(),\n patch_obj))\n except utils.JSONPATCH_EXCEPTIONS as e:\n raise exception.PatchError(patch=patch, reason=e)\n\n # Semantic Checks\n _check(self, \"modify\", tier.as_dict())\n try:\n # Update only the fields that have changed\n for field in objects.storage_tier.fields:\n if rpc_tier[field] != getattr(tier, field):\n rpc_tier[field] = getattr(tier, field)\n\n # Obtain the fields that have changed.\n delta = rpc_tier.obj_what_changed()\n if len(delta) == 0:\n raise wsme.exc.ClientSideError(\n _(\"No changes to the existing tier settings were detected.\"))\n\n allowed_attributes = ['name']\n for d in delta:\n if d not in allowed_attributes:\n raise wsme.exc.ClientSideError(\n _(\"Cannot modify '%s' with this operation.\" % d))\n\n LOG.info(\"SYS_I orig storage_tier: %s \" % otier.as_dict())\n LOG.info(\"SYS_I new storage_tier: %s \" % rpc_tier.as_dict())\n\n if 'name' in delta:\n default_tier_name = constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH]\n if rpc_tier.name == default_tier_name:\n raise wsme.exc.ClientSideError(\n _(\"Cannot modify tier '%s'. Name '%s' is used \"\n \"by the default tier\" % (otier.name, rpc_tier.name)))\n self._ceph.crushmap_tier_rename(otier.name, rpc_tier.name)\n\n # Save and return\n rpc_tier.save()\n return StorageTier.convert_with_links(rpc_tier)\n except (exception.HTTPNotFound, exception.CephFailure) as e:\n msg = _(\"Storage Tier update failed: backend %s storage tier %s : patch %s. \"\n \" Reason: %s\") % (backend['name'], otier['name'], patch, str(e))\n raise wsme.exc.ClientSideError(msg)", "def worker_tier_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"worker_tier_name\")", "def worker_tier_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"worker_tier_name\")", "def create(profile, name, application, cname=None, version=None,\n tier=\"web\", key_pair=None, instance_type=\"t1.micro\",\n instance_profile=None, service_role=None,\n healthcheck_url=None, security_groups=None,\n max_instances=1, min_instances=1, tags=None,\n vpc_id=None, subnets=None, db_subnets=None,\n elb_subnets=None, elb_scheme=None,\n public_ip=None, root_volume_size=None):\n client = boto3client.get(\"elasticbeanstalk\", profile)\n params = {}\n params[\"ApplicationName\"] = application\n params[\"EnvironmentName\"] = name\n if cname:\n params[\"CNAMEPrefix\"] = cname\n if version:\n params[\"VersionLabel\"] = version\n stack = utils.get_multicontainer_docker_solution_stack(profile)\n params[\"SolutionStackName\"] = stack \n if tier == \"web\":\n tier_definition = {\n \"Name\": \"WebServer\",\n \"Type\": \"Standard\",\n \"Version\": \"1.0\",\n }\n elif tier == \"worker\":\n tier_definition = {\n \"Name\": \"Worker\",\n \"Type\": \"SQS/HTTP\",\n \"Version\": \"1.0\",\n }\n else:\n raise Exception(\"tier must be 'web' or 'worker'\")\n params[\"Tier\"] = tier_definition\n if tags:\n params[\"Tags\"] = tags\n options = []\n if key_pair:\n key_pair_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"EC2KeyName\",\n \"Value\": key_pair,\n }\n options.append(key_pair_option)\n if instance_type:\n instance_type_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"InstanceType\",\n \"Value\": instance_type,\n }\n options.append(instance_type_option)\n if instance_profile:\n profile_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"IamInstanceProfile\",\n \"Value\": instance_profile,\n }\n options.append(profile_option)\n if service_role:\n role_option = {\n \"Namespace\": \"aws:elasticbeanstalk:environment\",\n \"OptionName\": \"ServiceRole\",\n \"Value\": service_role,\n }\n options.append(role_option)\n if healthcheck_url:\n healthcheck_url_option = {\n \"Namespace\": \"aws:elasticbeanstalk:application\",\n \"OptionName\": \"Application Healthcheck URL\",\n \"Value\": healthcheck_url,\n }\n options.append(healthcheck_url_option)\n if security_groups:\n security_groups_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"SecurityGroups\",\n \"Value\": \",\".join(security_groups),\n }\n options.append(security_groups_option)\n if min_instances:\n min_instances_option = {\n \"Namespace\": \"aws:autoscaling:asg\",\n \"OptionName\": \"MinSize\",\n \"Value\": str(min_instances),\n }\n options.append(min_instances_option)\n if max_instances:\n max_instances_option = {\n \"Namespace\": \"aws:autoscaling:asg\",\n \"OptionName\": \"MaxSize\",\n \"Value\": str(max_instances),\n }\n options.append(max_instances_option)\n if vpc_id:\n vpc_id_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"VPCId\",\n \"Value\": vpc_id,\n }\n options.append(vpc_id_option)\n if subnets:\n subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"Subnets\",\n \"Value\": \",\".join(subnets),\n }\n options.append(subnets_option)\n if db_subnets:\n db_subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"DBSubnets\",\n \"Value\": \",\".join(db_subnets),\n }\n options.append(db_subnets_option)\n if elb_subnets:\n elb_subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"ELBSubnets\",\n \"Value\": \",\".join(elb_subnets),\n }\n options.append(elb_subnets_option)\n if elb_scheme:\n elb_scheme_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"ELBScheme\",\n \"Value\": elb_scheme,\n }\n options.append(elb_scheme_option)\n if public_ip:\n public_ip_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"AssociatePublicIpAddress\",\n \"Value\": str(public_ip),\n }\n options.append(public_ip_option)\n if root_volume_size:\n root_volume_size_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"RootVolumeSize\",\n \"Value\": str(root_volume_size),\n }\n options.append(root_volume_size_option)\n if options:\n params[\"OptionSettings\"] = options\n return client.create_environment(**params)", "def free_tier():\n return AccountTier.objects.get(id=1)", "def create_tier_from_file():\n parser = ArgumentParser(description=\"Tier JSON Descriptor\")\n if is_valid_file(parser,filename):\n f=open(filename,'r')\n json_object = json.load(f)\n\n new_tier = Tier()\n for value in json_object.values():\n for v in range(0,len(value)):\n new_tier.deployment=value[v]['deployment']['deploymentId']\n new_tier.description = value[v]['description']\n new_tier.name = value[v]['name']\n new_tier.budget = value[v]['budget']\n new_tier.minimum_servers = value[v]['minimumServers']\n new_tier.maximum_servers = value[v]['maximumServers']\n new_tier.breach_increment = value[v]['breachIncrement']\n new_tier.breach_period_in_minutes = value[v]['breachPeriodInMinutes']\n new_tier.cooldown_period_in_minutes = value[v]['cooldownPeriodInMinutes']\n new_tier.lower_cpu_threshold = value[v]['lowerCpuThreshold']\n new_tier.upper_cpu_threshold = value[v]['upperCpuThreshold']\n new_tier.lower_ram_threshold = value[v]['lowerRamThreshold']\n new_tier.upper_ram_threshold = value[v]['upperRamThreshold']\n #result=new_tier.create()\n #print new_tier.current_job", "def __init__(self, tier_2160p=None, tier_1440p=None, tier_1080p=None, tier_720p=None, tier_audio_only=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration.get_default_copy()\n self.local_vars_configuration = local_vars_configuration\n\n self._tier_2160p = None\n self._tier_1440p = None\n self._tier_1080p = None\n self._tier_720p = None\n self._tier_audio_only = None\n self.discriminator = None\n\n if tier_2160p is not None:\n self.tier_2160p = tier_2160p\n if tier_1440p is not None:\n self.tier_1440p = tier_1440p\n if tier_1080p is not None:\n self.tier_1080p = tier_1080p\n if tier_720p is not None:\n self.tier_720p = tier_720p\n if tier_audio_only is not None:\n self.tier_audio_only = tier_audio_only", "def __init__(__self__, *,\n name: Optional[pulumi.Input[Union[str, 'ManagedClusterSKUName']]] = None,\n tier: Optional[pulumi.Input[Union[str, 'ManagedClusterSKUTier']]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def tier_720p(self, tier_720p):\n\n self._tier_720p = tier_720p", "def __init__(__self__, *,\n name: pulumi.Input[str],\n capacity: Optional[pulumi.Input[int]] = None,\n tier: Optional[pulumi.Input[Union[str, 'VCoreSkuTier']]] = None):\n pulumi.set(__self__, \"name\", name)\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def external_ip_egress_bandwidth_tier(self) -> Optional[pulumi.Input['NetworkPerformanceConfigExternalIpEgressBandwidthTier']]:\n return pulumi.get(self, \"external_ip_egress_bandwidth_tier\")", "def test_tiers_tier_level_tier_name_put(self):\n pass", "def tier_1080p(self, tier_1080p):\n\n self._tier_1080p = tier_1080p", "def __init__(__self__, *,\n external_ip_egress_bandwidth_tier: Optional[pulumi.Input['NetworkPerformanceConfigExternalIpEgressBandwidthTier']] = None,\n total_egress_bandwidth_tier: Optional[pulumi.Input['NetworkPerformanceConfigTotalEgressBandwidthTier']] = None):\n if external_ip_egress_bandwidth_tier is not None:\n pulumi.set(__self__, \"external_ip_egress_bandwidth_tier\", external_ip_egress_bandwidth_tier)\n if total_egress_bandwidth_tier is not None:\n pulumi.set(__self__, \"total_egress_bandwidth_tier\", total_egress_bandwidth_tier)", "def __init__(self, name_i, tier, size):\r\n self.name_i = name_i\r\n self.tier = tier\r\n self.size = size", "def get_one(self, tier_uuid):\n\n if self._from_cluster:\n raise exception.OperationNotPermitted\n\n rpc_tier = objects.storage_tier.get_by_uuid(pecan.request.context,\n tier_uuid)\n return StorageTier.convert_with_links(rpc_tier)", "def __init__(__self__, *,\n name: pulumi.Input[str],\n capacity: Optional[pulumi.Input[int]] = None,\n tier: Optional[pulumi.Input[Union[str, 'CapacitySkuTier']]] = None):\n pulumi.set(__self__, \"name\", name)\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def get_pvp_tier_index(self, region, namespace, **filters):\n filters['namespace'] = namespace\n resource = 'data/wow/pvp-tier/index'\n return self.get_resource(resource, region, **filters)", "def PRODUCTION(cls):\n\n return DataCenter.Environment(\"https://www.zohoapis.eu\", cls().get_iam_url(), cls().get_file_upload_url())", "def get_tier_config(self, tier_config_id):\n text, code = ApiClient(self._config, 'tier/configs/' + tier_config_id).get()\n return TierConfig.deserialize(text)", "def __init__(__self__, *,\n tier: str,\n email: Optional[str] = None,\n link: Optional[str] = None,\n name: Optional[str] = None):\n pulumi.set(__self__, \"tier\", tier)\n if email is not None:\n pulumi.set(__self__, \"email\", email)\n if link is not None:\n pulumi.set(__self__, \"link\", link)\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def get_env_type ( base_name ) :\n return base_name.split( '-', 1 )[ 0 ]", "def get_env_class(environment_type):\n if environment_type == \"vanilla\":\n return city.CityGridEnv\n elif environment_type == \"distraction\":\n return city.DistractionGridEnv\n elif environment_type == \"map\":\n return city.MapGridEnv\n elif environment_type == \"cooking\":\n return cooking.CookingGridEnv\n elif environment_type == \"miniworld_sign\":\n # Dependencies on OpenGL, so only load if absolutely necessary\n from envs.miniworld import sign\n return sign.MiniWorldSign\n else:\n raise ValueError(\n \"Unsupported environment type: {}\".format(environment_type))", "def train_tier(args: argparse.Namespace, hp: HParams, tier: int, extension_architecture: str,\n timestamp: str, tensorboardwriter: TensorboardWriter,\n logger: logging.Logger) -> None:\n logger.info(f\"Start training of tier {tier}/{hp.network.n_tiers}\")\n\n # Setup the data ready to be consumed\n train_dataloader, test_dataloader, num_samples = get_dataloader(hp)\n\n # Setup tier\n # Calculate size of FREQ dimension for this tier\n tier_freq = tierutil.get_size_freqdim_of_tier(n_mels=hp.audio.mel_channels,\n n_tiers=hp.network.n_tiers,\n tier=tier)\n\n if tier == 1:\n model = Tier1(tier=tier,\n n_layers=hp.network.layers[tier - 1],\n hidden_size=hp.network.hidden_size,\n gmm_size=hp.network.gmm_size,\n freq=tier_freq)\n else:\n model = Tier(tier=tier,\n n_layers=hp.network.layers[tier - 1],\n hidden_size=hp.network.hidden_size,\n gmm_size=hp.network.gmm_size,\n freq=tier_freq)\n model = model.to(hp.device)\n model.train()\n\n # Setup loss criterion and optimizer\n criterion = GMMLoss()\n optimizer = torch.optim.RMSprop(params=model.parameters(),\n lr=hp.training.lr,\n momentum=hp.training.momentum)\n\n # Check if training has to be resumed from previous checkpoint\n if args.checkpoint_path is not None:\n model, optimizer = resume_training(args, hp, tier, model, optimizer, logger)\n else:\n logger.info(f\"Starting new training on dataset {hp.data.dataset} with configuration file \"\n f\"name {hp.name}\")\n\n # Train the tier\n total_iterations = 0\n loss_logging = 0 # accumulated loss between logging iterations\n loss_save = 0 # accumulated loss between saving iterations\n prev_loss_onesample = 1e8 # used to compare between saving iterations and decide whether or not\n # to save the model\n\n for epoch in range(hp.training.epochs):\n logger.info(f\"Epoch: {epoch}/{hp.training.epochs} - Starting\")\n for i, (waveform, utterance) in enumerate(train_dataloader):\n\n # 1.1 Transform waveform input to melspectrogram and apply preprocessing to normalize\n waveform = waveform.to(device=hp.device, non_blocking=True)\n spectrogram = transforms.wave_to_melspectrogram(waveform, hp)\n spectrogram = audio_normalizing.preprocessing(spectrogram, hp)\n # 1.2 Get input and output from the original spectrogram for this tier\n input_spectrogram, output_spectrogram = tierutil.split(spectrogram=spectrogram,\n tier=tier,\n n_tiers=hp.network.n_tiers)\n length_spectrogram = input_spectrogram.size(2)\n # 2. Clear the gradients\n optimizer.zero_grad()\n # 3. Compute the model output\n if tier == 1:\n # generation is unconditional so there is only one input\n mu_hat, std_hat, pi_hat = model(spectrogram=input_spectrogram)\n else:\n # generation is conditional on the spectrogram generated by previous tiers\n mu_hat, std_hat, pi_hat = model(spectrogram=output_spectrogram,\n spectrogram_prev_tier=input_spectrogram)\n # 4. Calculate the loss\n loss = criterion(mu=mu_hat, std=std_hat, pi=pi_hat, target=output_spectrogram)\n del spectrogram\n del mu_hat, std_hat, pi_hat\n\n # 4.1 Check if loss has exploded\n if torch.isnan(loss) or torch.isinf(loss):\n error_msg = f\"Loss exploded at Epoch: {epoch}/{hp.training.epochs} - \" \\\n f\"Iteration: {i * hp.training.batch_size}/{num_samples}\"\n logger.error(error_msg)\n raise Exception(error_msg)\n\n # 5. Perform backpropagation\n loss_cpu = loss.item()\n loss.backward()\n optimizer.step()\n\n # 6. Logging and saving model\n loss_oneframe = loss_cpu / (length_spectrogram * hp.training.batch_size)\n loss_logging += loss_oneframe # accumulated loss between logging iterations\n loss_save += loss_oneframe # accumulated loss between saving iterations\n\n # 6.1 Save model (if is better than previous tier)\n if (total_iterations + 1) % hp.training.save_iterations == 0:\n # Calculate average loss of one sample of a batch\n loss_onesample = int(loss_save / hp.training.save_iterations)\n # if loss_onesample of these iterations is lower, the tier is better and we save it\n if loss_onesample < prev_loss_onesample:\n path = f\"{hp.training.dir_chkpt}/tier{tier}_{timestamp}_loss{loss_onesample}.pt\"\n torch.save(obj={'dataset': hp.data.dataset,\n 'tier_idx': tier,\n 'hp': hp,\n 'epoch': epoch,\n 'iterations': i,\n 'total_iterations': total_iterations,\n 'tier': model.state_dict(),\n 'optimizer': optimizer.state_dict()}, f=path)\n logger.info(f\"Model saved to: {path}\")\n prev_loss_onesample = loss_onesample\n loss_save = 0\n\n # 6.2 Logging\n if (total_iterations + 1) % hp.logging.log_iterations == 0:\n # Calculate average loss of one sample of a batch\n loss_onesample = int(loss_logging / hp.logging.log_iterations)\n tensorboardwriter.log_training(hp, loss_onesample, total_iterations)\n logger.info(f\"Epoch: {epoch}/{hp.training.epochs} - \"\n f\"Iteration: {i * hp.training.batch_size}/{num_samples} - \"\n f\"Loss: {loss_onesample}\")\n loss_logging = 0\n\n # 6.3 Evaluate\n if (total_iterations + 1) % hp.training.evaluation_iterations == 0:\n evaluation(hp, tier, test_dataloader, model, criterion, logger)\n total_iterations += 1\n\n # After finishing training: save model, hyperparameters and total loss\n path = f\"{hp.training.dir_chkpt}/tier{tier}_{timestamp}_final.pt\"\n torch.save(obj={'dataset': hp.data.dataset,\n 'tier_idx': tier,\n 'hp': hp,\n 'epoch': epoch,\n 'iterations': evaluation(hp, tier, test_dataloader, model, criterion,\n logger),\n 'total_iterations': total_iterations,\n 'tier': model.state_dict(),\n 'optimizer': optimizer.state_dict()}, f=path)\n logger.info(f\"Model saved to: {path}\")\n tensorboardwriter.log_end_training(hp=hp, loss=-1)\n logger.info(\"Finished training\")", "def setUp(self) -> None:\n self.ec2 = boto3.resource('ec2')\n self.ec2_client = boto3.client('ec2')\n self.sts = boto3.client('sts')\n self.iam = boto3.client('iam')\n self.autoscaling = boto3.client('autoscaling')\n\n self.prod_env = prod_env", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def test_tiers_tier_level_tier_name_get(self):\n pass", "def tier_1440p(self, tier_1440p):\n\n self._tier_1440p = tier_1440p", "def _delete(self, tier_uuid):\n\n tier = objects.storage_tier.get_by_uuid(pecan.request.context, tier_uuid)\n\n # Semantic checks\n _check(self, \"delete\", tier.as_dict())\n\n # update the crushmap by removing the tier\n try:\n self._ceph.crushmap_tier_delete(tier.name)\n except exception.CephCrushMapNotApplied:\n # If crushmap has not been applied then there is no rule to update.\n pass\n\n try:\n pecan.request.dbapi.storage_tier_destroy(tier.id)\n except exception.HTTPNotFound:\n msg = _(\"Failed to delete storage tier %s.\" % tier.name)\n raise wsme.exc.ClientSideError(msg)", "def test_add_tier(self, mock_client):\n\n productRelease = collections.OrderedDict([(u'productName', PRODUCT1),\n (u'version', VERSION)])\n tierDto = collections.OrderedDict([(u'name', \"TIER\"), (u'flavour', \"flavour\"),\n (u'image', \"image\"),\n (u'productReleaseDtos', productRelease)])\n template = Template(TEMPLATE_NAME, TEMPLATE_DESCRIPTION)\n template.template_id = \"ID\"\n\n class Object(object):\n pass\n newtemplate = Object()\n newtemplate.id = \"ID\"\n\n mock_client.create_env_template.return_value = newtemplate\n mock_client.get_image_name.return_value = \"image\"\n Config.Clients = mock_client\n\n template.add_tiers(tierDto)\n self.assertEquals(len(template.tiers), 1)", "def total_egress_bandwidth_tier(self) -> Optional[pulumi.Input['NetworkPerformanceConfigTotalEgressBandwidthTier']]:\n return pulumi.get(self, \"total_egress_bandwidth_tier\")", "def _env_switch(environment: str, prod_value: T, qa_value: T) -> T:\n if environment == PROD:\n return prod_value\n return qa_value", "def __init__(__self__, *,\n resource_group: pulumi.Input[str],\n access_tier: Optional[pulumi.Input[str]] = None,\n data_lake_enabled: Optional[pulumi.Input[bool]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n network_rule: Optional[pulumi.Input['StorageAccountSpecNetworkRuleArgs']] = None,\n sku: Optional[pulumi.Input['StorageAccountSpecSkuArgs']] = None,\n supports_https_traffic_only: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"resource_group\", resource_group)\n if access_tier is not None:\n pulumi.set(__self__, \"access_tier\", access_tier)\n if data_lake_enabled is not None:\n pulumi.set(__self__, \"data_lake_enabled\", data_lake_enabled)\n if kind is not None:\n pulumi.set(__self__, \"kind\", kind)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if network_rule is not None:\n pulumi.set(__self__, \"network_rule\", network_rule)\n if sku is not None:\n pulumi.set(__self__, \"sku\", sku)\n if supports_https_traffic_only is not None:\n pulumi.set(__self__, \"supports_https_traffic_only\", supports_https_traffic_only)", "def set_mode(self, pardus_profile):\n\n #TODO: How to determine mode (adhoc or infrastructure) from old profile settings\n return \"infrastructure\"", "def _staging():\n env.environment = 'staging'\n env.server_name = 'project-staging.dimagi.com'\n env.hosts = [settings.STAGING_HOST]", "def tier_trading(self, item):\r\n\r\n # Initial tier is the item the user has going into the store and final\r\n # tier is the item the user has when leaving the store\r\n initial_tier = self.item.tier\r\n final_tier = item.tier\r\n\r\n # Not allowing items that are too large to be carried\r\n if item.size is False:\r\n self.add = False\r\n print(\"The\", item.name_i, \"is too big to carry around the mall.\" +\r\n \"\\nPlease select a different item.\\n\\nAfter you have\" +\r\n \" checked all items, if no item of the proper tier\" +\r\n \" exists\\nplease type [4] to leave the store.\")\r\n\r\n # Standard jumping of tier trading and checking to make sure the final\r\n # tier is one tier higher than the initial tier\r\n elif final_tier == initial_tier + 1:\r\n self.add = True\r\n\r\n # Jumping exceptions; if the initial item is earrings, that can jump\r\n # to purse, and if the initial item is iPod_Shuffle, that can jump\r\n # to Air_Jordan_Space_Jam_11\r\n elif self.item.name_i == 'Earrings' and item.name_i == 'Purse':\r\n self.add = True\r\n print(\"You have hit a jumping exception and get to skip a tier!\")\r\n\r\n elif (self.item.name_i == 'iPod_Shuffle' and\r\n item.name_i == 'Air_Jordan_Space_Jam_11'):\r\n self.add = True\r\n print(\"You have hit a jumping exception and get to skip a tier!\")\r\n\r\n # If the tier is not acceptable we have to set self.add back to False\r\n else:\r\n self.add = False\r\n print(\"You are not allowed to select items in that tier.\"\r\n \"\\n\\nPlease pick another item one tier higher than your\" +\r\n \" current tier.\\n\\nAfter you have checked all items,\" +\r\n \" if no item of the proper tier exists,\\nplease type [4]\" +\r\n \" to leave the store.\")", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is None:\n name = 'S0'\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tier is None:\n tier = 'Standard'\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def _get_tier(pkmn_id):\n if pkmn_id in tiers.TIERS[\"0\"]:\n return 0\n elif pkmn_id in tiers.TIERS[\"1\"]:\n return 1\n elif pkmn_id in tiers.TIERS[\"2\"]:\n return 2\n elif pkmn_id in tiers.TIERS[\"3\"]:\n return 3\n else:\n return 4", "def deployment_environment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"deployment_environment\")", "def get_current_environment(self):\n for env in self.indicators:\n if self._is_env_indicator_in_url(self.indicators[env]):\n return env\n\n return Environment.PRODUCTION", "def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''", "def __init__(__self__, *,\n disabled: Optional[pulumi.Input[bool]] = None,\n load_balancer_type: Optional[pulumi.Input['CloudRunConfigLoadBalancerType']] = None):\n if disabled is not None:\n pulumi.set(__self__, \"disabled\", disabled)\n if load_balancer_type is not None:\n pulumi.set(__self__, \"load_balancer_type\", load_balancer_type)", "def dl_tier(self, tier):\n\n tier_df = pd.DataFrame()\n\n for t in self.tier_tables[tier]:\n\n for y in self.years:\n\n df = get_GHGRP_records(y, t)\n\n tier_df = tier_df.append(df, sort=True, ignore_index=True)\n\n tier_df.columns = [x.lower() for x in tier_df.columns]\n\n # Fix issues with natural gas HHV reporting\n # Other fuel HHVs were exammined manually. There's a wide range for\n # wood and wood residuals, but not other fuels.\n if tier == 't2_hhv':\n\n tier_df['high_heat_value'] = \\\n tier_df.high_heat_value.astype('float32')\n\n natgas_st_index = tier_df[\n (tier_df.fuel_type == 'Natural Gas (Weighted U.S. Average)') &\n (tier_df.high_heat_value_uom == 'mmBtu/short ton')\n ].index\n\n tier_df.loc[natgas_st_index, 'high_heat_value_uom'] = 'mmBtu/scf'\n\n m_index = tier_df[\n (tier_df.fuel_type == 'Natural Gas (Weighted U.S. Average)') &\n (tier_df.high_heat_value.between(1, 1.2))\n ].index\n\n tier_df.high_heat_value.update(\n tier_df.loc[m_index, 'high_heat_value'].divide(1000)\n )\n\n drop_index = tier_df[\n (tier_df.fuel_type == 'Natural Gas (Weighted U.S. Average)') &\n (tier_df.high_heat_value.between(0.0012, 0.0014))\n ].index\n\n tier_df = tier_df[~tier_df.index.isin(drop_index)]\n\n return tier_df", "def production_settings_name():\n if hasattr(SettingsType, 'AWS'):\n # Hawthorn and Ironwood\n return getattr(SettingsType, 'AWS')\n else:\n # Juniper and beyond.\n return getattr(SettingsType, 'PRODUCTION')", "def create_pool(self, device, tier, poolname):\n print \"Adding pool %s...\" % poolname\n pool = device.findRemoteStoragePool(StoragePoolPredicates.name(poolname))\n pool.setTier(tier)\n pool.save()\n return pool", "def _production():\n env.environment = 'production'\n env.server_name = 'project-production.dimagi.com'\n env.hosts = [settings.PRODUCTION_HOST]", "def type(self):\n return EB.EnvType.ROBOSUITE_TYPE", "def _create_deployment(self) -> aws.apigateway.Stage:\n deployment = aws.apigateway.Deployment(\n f\"{self.rest_api._name}-deployment\",\n rest_api=self.rest_api.id,\n # TODO: Still want to have a triggers function\n opts=pulumi.ResourceOptions(\n parent=self, depends_on=[p.lambda_integration for p in self.proxies]\n ),\n )\n\n stage = aws.apigateway.Stage(\n f\"{self.rest_api._name}-prod-stage\",\n deployment=deployment.id,\n rest_api=self.rest_api.id,\n stage_name=\"prod\",\n opts=pulumi.ResourceOptions(parent=self),\n )\n\n return stage", "def tiers(self, args):\n parser = OptionParser(usage=\"vdc tiers <options>\")\n parser.add_option(\"-n\", \"--name\",\n help=\"The name of the virtual datacenter\", dest=\"name\")\n (options, args) = parser.parse_args(args)\n name = options.name\n if not name:\n parser.print_help()\n return\n\n # Once user input has been read, find the virtual datacenter\n try:\n cloud = self._context.getCloudService()\n vdc = cloud.findVirtualDatacenter(\n VirtualDatacenterPredicates.name(name))\n if vdc:\n tiers = vdc.listStorageTiers()\n pprint_tiers(tiers)\n else:\n print \"No virtual datacenter found with name: %s\" % name\n except (AbiquoException, AuthorizationException), ex:\n print \"Error: %s\" % ex.getMessage()", "def _get_environment(cls):\n return cls.__name__.lower()", "def create_loadbalancer(self, context, lb):\n super(ArrayDeviceDriverV2, self).create_loadbalancer(context, lb)\n deployment_model = self._get_setting(\n lb.tenant_id, \"lbaas_settings\", \"deployment_model\"\n )\n if deployment_model == \"PER_LOADBALANCER\":\n self.update_loadbalancer(context, lb, None)", "def __init__(__self__, *,\n application_name: pulumi.Input[str],\n cname_prefix: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n environment_name: Optional[pulumi.Input[str]] = None,\n operations_role: Optional[pulumi.Input[str]] = None,\n option_settings: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentOptionSettingArgs']]]] = None,\n platform_arn: Optional[pulumi.Input[str]] = None,\n solution_stack_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentTagArgs']]]] = None,\n template_name: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input['EnvironmentTierArgs']] = None,\n version_label: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"application_name\", application_name)\n if cname_prefix is not None:\n pulumi.set(__self__, \"cname_prefix\", cname_prefix)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if environment_name is not None:\n pulumi.set(__self__, \"environment_name\", environment_name)\n if operations_role is not None:\n pulumi.set(__self__, \"operations_role\", operations_role)\n if option_settings is not None:\n pulumi.set(__self__, \"option_settings\", option_settings)\n if platform_arn is not None:\n pulumi.set(__self__, \"platform_arn\", platform_arn)\n if solution_stack_name is not None:\n pulumi.set(__self__, \"solution_stack_name\", solution_stack_name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if template_name is not None:\n pulumi.set(__self__, \"template_name\", template_name)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)\n if version_label is not None:\n pulumi.set(__self__, \"version_label\", version_label)", "def get_environment_class_by_name(environment_type):\n for cls in util.iter_subclasses(Environment):\n if cls.tool_name == environment_type:\n return cls\n raise EnvironmentUnavailable(\n f\"Unknown environment type '{environment_type}'\")", "def isolationforest(data_set, types):\n if types == 'Global':\n clf = IsolationForest(random_state=19,n_estimators=200)\n else:\n clf = IForest(random_state=10,n_estimators=200)\n clf.fit(data_set)\n pred = clf.predict(data_set)\n return pred", "def set_env_config(self):\n self.env_config = {\n # ===== STANDARD ARGUMENTS ======\n \"n_agents\": 4, # Number of non-planner agents\n \"world_size\": [15, 15], # [Height, Width] of the env world\n \"episode_length\": 1000, # Number of time-steps per episode\n # In multi-action-mode, the policy selects an action for each action\n # subspace (defined in component code)\n # Otherwise, the policy selects only 1 action\n \"multi_action_mode_agents\": False,\n \"multi_action_mode_planner\": True,\n # When flattening observations, concatenate scalar & vector observations\n # before output\n # Otherwise, return observations with minimal processing\n \"flatten_observations\": False,\n # When Flattening masks, concatenate each action subspace mask\n # into a single array\n # Note: flatten_masks = True is recommended for masking action logits\n \"flatten_masks\": True,\n # ===== COMPONENTS =====\n # Which components to use\n \"components\": [\n # (1) Building houses\n {\"Build\": {}},\n # (2) Trading collectible resources\n {\"ContinuousDoubleAuction\": {\"max_num_orders\": 5}},\n # (3) Movement and resource collection\n {\"Gather\": {}},\n ],\n # ===== SCENARIO =====\n # Which scenario class to use\n \"scenario_name\": \"uniform/simple_wood_and_stone\",\n # (optional) kwargs of the chosen scenario class\n \"starting_agent_coin\": 10,\n \"starting_stone_coverage\": 0.10,\n \"starting_wood_coverage\": 0.10,\n }\n\n # Create an environment instance from the config\n self.env = foundation.make_env_instance(**self.env_config)", "def __init__(self, environment=None):\n if environment is None:\n environment = os.environ.get(\"SENTERA_ENV\") or \"prod\"\n environment = environment.lower()\n self.environment = environment\n\n if self.environment == \"prod\":\n self.config = {\n \"sentera_api_url\": \"https://api.sentera.com\",\n \"weather_api_url\": \"https://weather.sentera.com\",\n }\n else:\n self.config = {\n \"sentera_api_url\": f\"https://api{self.environment}.sentera.com\",\n \"weather_api_url\": f\"https://weather{self.environment}.sentera.com\",\n }\n\n if ENV_SENTERA_API_URL in os.environ:\n self.config[\"sentera_api_url\"] = os.environ.get(ENV_SENTERA_API_URL)\n\n if ENV_WEATHER_API_URL in os.environ:\n self.config[\"weather_api_url\"] = os.environ.get(ENV_WEATHER_API_URL)", "def setup_test_tenant(self):\n self.test_tenant = rand_name('test_tenant_')\n self.test_description = rand_name('desc_')\n resp, self.tenant = self.client.create_tenant(\n name=self.test_tenant,\n description=self.test_description)\n self.tenants.append(self.tenant)", "def detail(self, tier_uuid=None, marker=None, limit=None,\n sort_key='id', sort_dir='asc'):\n\n parent = pecan.request.path.split('/')[:-1][-1]\n if parent != 'storage_tiers':\n raise exception.HTTPNotFound\n\n expand = True\n resource_url = '/'.join(['storage_tiers', 'detail'])\n return self._get_tiers_collection(tier_uuid, marker, limit,\n sort_key, sort_dir, expand,\n resource_url)", "def test_create_storage_tiered_rate(self):\n storage_rates = (\n metric_constants.OCP_METRIC_STORAGE_GB_REQUEST_MONTH,\n metric_constants.OCP_METRIC_STORAGE_GB_USAGE_MONTH,\n )\n for storage_rate in storage_rates:\n ocp_data = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": Provider.PROVIDER_OCP,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"rates\": [\n {\n \"metric\": {\"name\": storage_rate},\n \"tiered_rates\": [\n {\"unit\": \"USD\", \"value\": 0.22, \"usage\": {\"usage_start\": None, \"usage_end\": 10.0}},\n {\"unit\": \"USD\", \"value\": 0.26, \"usage\": {\"usage_start\": 10.0, \"usage_end\": None}},\n ],\n }\n ],\n \"currency\": \"USD\",\n }\n\n with tenant_context(self.tenant):\n instance = None\n serializer = CostModelSerializer(data=ocp_data, context=self.request_context)\n if serializer.is_valid(raise_exception=True):\n instance = serializer.save()\n self.assertIsNotNone(instance)\n self.assertIsNotNone(instance.uuid)", "def create_infrastructure_storage(config, context, dc):\n print \"### Configuring storage ###\"\n storage = InfrastructureStorage(context)\n tier = storage.configure_tiers(dc, config.get(\"tier\", \"name\"))\n try: \n user = config.get(\"device\", \"user\")\n password= config.get(\"device\", \"password\")\n except NoOptionError:\n user = None\n password = None\n device = storage.create_device(dc, config.get(\"device\", \"name\"),\n StorageTechnologyType.valueOf(config.get(\"device\", \"type\")),\n config.get(\"device\", \"address\"),\n config.get(\"device\", \"address\"),\n user, password)\n\n storage.create_pool(device, tier, config.get(\"pool\", \"name\"))", "def run_on_tier(self, tier, tier_y=None):\n if tier_y is None:\n return None\n\n logging.info(\"Apply sppasFilter() on tier: {:s}\".format(tier.get_name()))\n sfilter = sppasTierFilters(tier)\n\n logging.debug(\"Data in RelationFilterProcess: {:s}\".format(self.data))\n ann_set = sfilter.rel(tier_y,\n *(self.data[0]),\n **{self.data[1][i][0]: self.data[1][i][1] for i in range(len(self.data[1]))})\n\n # convert the annotations set into a tier\n filtered_tier = ann_set.to_tier(name=self.tier_name,\n annot_value=self.annot_format)\n\n return filtered_tier", "def pricing_tiers(self, pricing_tiers):\n\n self._pricing_tiers = pricing_tiers", "def production():\n env.run = run\n env.cd = cd\n env.deployment = 'remote'", "def set_sizing_environment():\n # Creates a sizing executor factory to output communication cost\n # after the training finishes. Note that sizing executor only provides an\n # estimate (not exact) of communication cost, and doesn't capture cases like\n # compression of over-the-wire representations. However, it's perfect for\n # demonstrating the effect of compression in this tutorial.\n sizing_factory = tff.framework.sizing_executor_factory()\n\n # TFF has a modular runtime you can configure yourself for various\n # environments and purposes, and this example just shows how to configure one\n # part of it to report the size of things.\n context = tff.framework.ExecutionContext(executor_fn=sizing_factory)\n tff.framework.set_default_context(context)\n\n return sizing_factory", "def delete(self, tier_uuid):\n\n if self._from_cluster:\n raise exception.OperationNotPermitted\n\n _delete(self, tier_uuid)", "def get_environment():\n # Auto-set settings object based on App Engine dev environ\n if 'SERVER_SOFTWARE' in os.environ:\n if os.environ['SERVER_SOFTWARE'].startswith('Dev'):\n return Config.ENV_LOCAL\n elif os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/'):\n #For considering an environment staging we assume the version id\n # contains -staging and the URL\n current_version_id = str(os.environ['CURRENT_VERSION_ID']) if (\n 'CURRENT_VERSION_ID') in os.environ else ''\n if '-staging' in current_version_id:\n return Config.ENV_STAGING\n #If not local or staging then is production TODO: really?\n return Config.ENV_PRODUCTION\n return Config.ENV_LOCAL", "def environment(self, environment):\n\n self._set_field(\"environment\", environment.get_json())", "def get_env(self):\n self.airflow_cluster_name = conf.get('core', 'cluster')\n bicommon = BICommon()\n self.env_type = bicommon.env\n\n self.parameters.update({'airflow_cluster_name': self.airflow_cluster_name, 'env': self.env_type})" ]
[ "0.7523294", "0.697658", "0.64992577", "0.6451572", "0.6353276", "0.6353276", "0.6353276", "0.62764174", "0.62093973", "0.61662465", "0.61662465", "0.61662465", "0.61662465", "0.6111787", "0.6032573", "0.6006158", "0.59823996", "0.5843192", "0.5832442", "0.5829042", "0.57770836", "0.5765916", "0.57025033", "0.5682047", "0.56009716", "0.5564439", "0.55300856", "0.5363026", "0.5319493", "0.5293209", "0.5183178", "0.51632047", "0.5016507", "0.49579123", "0.49579123", "0.4911184", "0.4874531", "0.48511827", "0.48459488", "0.47647476", "0.47644153", "0.47634897", "0.47346604", "0.46940607", "0.46924618", "0.4672637", "0.46346137", "0.4617467", "0.46133536", "0.46034032", "0.45922667", "0.45717043", "0.45659924", "0.4553579", "0.45298445", "0.45254958", "0.4499112", "0.44979954", "0.44963014", "0.44899964", "0.44176343", "0.44095024", "0.4393759", "0.4384048", "0.43696824", "0.43544057", "0.43495476", "0.43427253", "0.43398833", "0.4316781", "0.4281352", "0.42520702", "0.42480493", "0.4239123", "0.42323732", "0.42190653", "0.42179006", "0.4214587", "0.42091942", "0.4182904", "0.41808444", "0.41634664", "0.41555676", "0.4150356", "0.4149918", "0.41441107", "0.414035", "0.41401136", "0.41390646", "0.413224", "0.41296744", "0.41249216", "0.41230598", "0.41202947", "0.41161293", "0.4101178", "0.4090122", "0.40800697", "0.40767127", "0.40658197" ]
0.68612045
2
The name of the application version to deploy.
def version_label(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "version_label")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version_name(self) -> str:\n return pulumi.get(self, \"version_name\")", "def app_version(self) -> str:\n return pulumi.get(self, \"app_version\")", "def version_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_name\")", "def get_package_name(self):\n return self.name + '-' + self.version", "def name(self):\n return _version._NAME # pylint: disable=protected-access", "def get_name():\n return config.APP_NAME", "def get_version(self):\n return self.cur_config['version']['name']", "def get_version_name(self):\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True\n\n\t\ttry:\n\t\t\treturn self.keyinfo['context_tags'].attrs['version_name']\n\t\texcept:\n\t\t\treturn None", "def get_package_name(self):\n return self.name + '-' + self.version + '-' + self.release", "def get_res_name():\n return os.getenv(\"RESOURCES_VERSION\", \"res_0.0\")", "def app_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_name\")", "def getApplicationReleaseName(self) -> unicode:\n ...", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def get_version_tag(self, version: str) -> str:\n return version", "def getApplicationVersion(self) -> unicode:\n ...", "def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])", "def get_version(cls):\n if Config.ENV_TYPE == PRD:\n return Config.version + \"/\" + Config.build\n return Config.version + \"/\" + Config.build + \"/\" + Config.generate + ' (' + Config.ENV_NAME + ')'", "def app_name(self) -> str:\n return self._app_name", "def fullname(self):\n return \"{project}/{version}\".format(\n project=self.project.name, version=self.name\n )", "def get_version(self):\n data = self._get('app_version')\n return data['version']", "def _branch_name(cls, version: Version) -> str:\n suffix = version.public[len(version.base_version) :]\n components = version.base_version.split(\".\") + [suffix]\n if suffix != \"\" and not (\n suffix.startswith(\"rc\")\n or suffix.startswith(\"a\")\n or suffix.startswith(\"b\")\n or suffix.startswith(\".dev\")\n ):\n raise ValueError(f\"Unparseable pants version number: {version}\")\n return \"{}.{}.x\".format(*components[:2])", "def print_app_version(app_name):\n print_file('{}/current/version.txt'.format(get_app_basedir(app_name)))", "def get_version(self) -> str:\n return versioning.get_version()", "def app_name(self):\n return self._app_name", "def get_application_version(self):\n return self.connector.request('GET', '/app/version')", "def kms_key_version_name(self) -> str:\n return pulumi.get(self, \"kms_key_version_name\")", "def app_name(self): # pylint:disable=function-redefined\n return self._app_name", "def application_name(self) -> Optional[str]:\n return pulumi.get(self, \"application_name\")", "def app_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_name\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version() -> str:\n with open(join(dirname(__file__), 'resources', 'VERSION')) as f:\n return f.read().strip()", "def name(self):\n return self.application_tree['name']", "def name(self):\n\n return self.manifest[\"name\"]", "def product(self):\n return self.appName", "def get_version():\n return \".\".join([str(i) for i in config[\"version\"]])", "def version(self) -> str:\n return '0.1'", "def version(self):\n return self.proto.details.appDetails.versionString", "def version(self):\n\n return self.manifest[\"version\"]", "def get_version():\n return '.'.join(map(str, VERSION))", "def path_name(self):\n return u'{0}-{1}'.format(self.plugin.name, self._major_version)", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version():\n from app import get_version\n\n return render_template(\"version.html\", version=get_version())", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def getApplicationName(self) -> unicode:\n ...", "def get_raw_server_name():\n from google.appengine.api import app_identity\n return '%s.%s.appspot.com' % (os.environ[\n 'CURRENT_VERSION_ID'].split('.')[0], app_identity.get_application_id())", "def app_version_id(self):\n return self._app_version_id", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def get_api_version(self):\n from webapi import VERSION\n return '.'.join(map(str, VERSION))", "def get_version() -> str:\n return __version__", "def name(self):\n return self._env_name", "def _get_app_name(app):\n return app[APP_NAME_KEY]", "def version_code(self) -> str:\n return pulumi.get(self, \"version_code\")", "def display_name(self) -> str:\n if self.is_verified:\n return f\"Verified Package {self.csharp_version}\"\n elif self.is_main:\n return \"main (unstable)\"\n else:\n return self.release_tag.replace(\"_\", \" \").title()", "def app_name(self):\n module_filepath = inspect.getfile(type(self))\n parent_dir = os.path.dirname\n app_dirpath = parent_dir(parent_dir(parent_dir(module_filepath)))\n app_name = os.path.basename(app_dirpath)\n return app_name", "def to_release_brach_name(self) -> str:\n return f\"release/{self.major}.{self.minor}\"", "def join_app_version(appname,version,platform):\n return \"%s-%s.%s\" % (appname,version,platform,)", "def get_name(app):\n from uuid import uuid4 as uuid\n return (f'accelpy_{app[\"application\"][\"product_id\"]}'\n f'_{str(uuid()).replace(\"-\", \"\")[:8]}')", "def version(self) -> str:\n data = \"none yet\"\n if self.STARTED:\n data = (\n self.about.get(\"Version\")\n or self.about.get(\"Installed Version\")\n or \"DEMO\"\n )\n data = data.replace(\"_\", \".\")\n return data", "def package_name(self) -> str:\n return pulumi.get(self, \"package_name\")", "def get_app_hostname():\n if not is_running_on_app_engine() or is_running_on_localhost():\n return None\n\n version = modules.get_current_version_name()\n app_id = app_identity.get_application_id()\n\n suffix = 'appspot.com'\n\n if ':' in app_id:\n tokens = app_id.split(':')\n api_name = tokens[1]\n if tokens[0] == 'google.com':\n suffix = 'googleplex.com'\n else:\n api_name = app_id\n\n # Check if this is the default version\n default_version = modules.get_default_version()\n if version == default_version:\n return '{0}.{1}'.format(app_id, suffix)\n else:\n return '{0}-dot-{1}.{2}'.format(version, api_name, suffix)", "def application_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_name\")", "def get_version_string():\n\n version_string = get_version()\n if not version_string:\n version_string = \"unknown\"\n\n return \"ImageSplit version \" + version_string", "def compute_name(self):\n version_id_str = DELIMITER.join(\n sorted(v.kf_id for v in self.versions.all())\n )\n return DELIMITER.join([NAME_PREFIX, version_id_str])", "def get_app_name(self):\n return getattr(self, '_app_name', None)", "def get_egg_name():\n global eggname\n if not eggname:\n version = local('git describe --abbrev=4', capture=True)\n if version:\n version = '%s-%s' % (version, datetime.datetime.today().strftime('%Y%m%d'))\n eggname = APP_NAME + '-%s-py%s.egg' % (version.replace('-', '_'), python_version)\n return eggname", "def getXsdVersionName(self):\n vers = self.getVersion()\n if vers is None:\n return None\n\n # Determine the filename\n bname = os.path.basename(self.__pathin)\n dname = bname.split(\".\")[0]\n\n dc = DictConfig()\n prefix = dc.get_prefix(dname)\n if prefix:\n vout = \"%s-v%s.xsd\" % (prefix, vers)\n return vout\n\n return None", "def version(self) -> str:\n return self._version", "def version(self) -> str:\n return self._version", "def version(self) -> str:\n return self._version", "def version(self) -> str:\n return self._version", "def version(self) -> str:\n return self._version", "def makeReleaseFileName(cls, version: str) -> str:\n\n from peek_platform import PeekPlatformConfig\n\n return os.path.join(\n PeekPlatformConfig.config.platformSoftwarePath,\n 'peek-release-%s.tar.gz' % version)", "def version_string():\n git_hash = current_git_hash()\n if git_hash:\n return \"pyhole v%s (%s) - https://github.com/jk0/pyhole\" % (\n __VERSION__, git_hash)\n\n return \"pyhole v%s - https://github.com/jk0/pyhole\" % __VERSION__", "def string(self) -> str:\n version = RE_VERSION.match(str(self._version)).group(2)\n if version.endswith(\".\"):\n version = version[:-1]\n return version", "def get_name(self, name):\n return self.apps[name]['name']", "def env_name(self):\n return f\"{self.project_name}-{self.stage}\"", "def launch_template_version(self) -> Optional[str]:\n return pulumi.get(self, \"launch_template_version\")", "def version_string(self):\n return self.server_version", "def get_launch_name():\n\n if product_type == \"RHEL7\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}_{5}CDN\".format(errata_id, product_type, variant, arch, test_level, cdn)\n \n elif product_type == \"RHEL8\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}CDN\".format(errata_id, product_type, arch, test_level, cdn)\n\n return launch_name", "def getVersionString():\n return str(version_gen.major) + \".\" + str(version_gen.minor) + \".\" + str(version_gen.compilation)", "def config_version(self) -> str:\n return pulumi.get(self, \"config_version\")", "def config_version(self) -> str:\n return pulumi.get(self, \"config_version\")", "def config_version(self) -> str:\n return pulumi.get(self, \"config_version\")", "def config_version(self) -> str:\n return pulumi.get(self, \"config_version\")", "def config_version(self) -> str:\n return pulumi.get(self, \"config_version\")", "def version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"version\")", "def fhir_version_name(fhir_version):\n major_version = int(fhir_version.split('.')[0])\n\n if major_version < 3:\n return 'dstu2'\n elif (major_version >= 3) and (major_version < 4):\n return 'stu3'\n elif (major_version >= 4) and (major_version < 5):\n return 'r4'\n else:\n raise Exception(\n f'Invalid fhir version supplied: {fhir_version}! No name exists '\n 'for the supplied fhir version.'\n )", "def version(self):\n return self._get(\"version\")", "def model_version(self) -> str:\n return pulumi.get(self, \"model_version\")" ]
[ "0.79674184", "0.752541", "0.7301922", "0.7259704", "0.72298706", "0.71760947", "0.7097202", "0.7067314", "0.7017301", "0.69222665", "0.69211406", "0.6917922", "0.6909662", "0.6909662", "0.6832326", "0.6825434", "0.67636746", "0.6756408", "0.67505723", "0.67250407", "0.6709316", "0.66898406", "0.6672872", "0.6670698", "0.6662288", "0.66378605", "0.66133124", "0.65875953", "0.6581486", "0.6575103", "0.65523434", "0.65523434", "0.65523434", "0.65523434", "0.6533765", "0.65285647", "0.6518388", "0.648232", "0.64703166", "0.64598465", "0.64508444", "0.6450417", "0.6447388", "0.6446992", "0.6432312", "0.6432312", "0.6432312", "0.6432312", "0.6432312", "0.64164263", "0.6399473", "0.6399473", "0.6399473", "0.6399473", "0.6398507", "0.6389303", "0.6388356", "0.63853484", "0.6381071", "0.63645464", "0.63626695", "0.6343655", "0.63384575", "0.63114053", "0.63109255", "0.6306938", "0.6305258", "0.6300399", "0.62983495", "0.62837726", "0.627433", "0.62692416", "0.6266828", "0.62655336", "0.6263612", "0.6263115", "0.6245801", "0.6244584", "0.6244584", "0.6244584", "0.6244584", "0.6244584", "0.62333244", "0.6231607", "0.6229841", "0.6226172", "0.62246937", "0.6213213", "0.62120986", "0.61964244", "0.6195447", "0.6195356", "0.6195356", "0.6195356", "0.6195356", "0.6195356", "0.61916304", "0.61874485", "0.6180664", "0.61708236" ]
0.6369655
59
The game algorithm, using continuation for a pure linear Python code
def go(self, comp): self.attempt = 1 number = random.randint(1, 20) comp.call(util.Confirm('I choose a number between 1 and 20. Try to guess it')) while True: x = comp.call(util.Ask('Try #%d: ' % self.attempt)) if not x.isdigit(): continue x = int(x) if x > number: comp.call(util.Confirm('Choose a lower number')) if x < number: comp.call(util.Confirm('Choose a greater number')) if x == number: comp.call(util.Confirm(self.final_text % self.attempt)) break self.attempt += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def play_game():\n board = create_board()\n while True:\n for player in [1, 2]:\n random_place(board, player)\n result = evaluate(board)\n if result != 0:\n return result", "def __run_game(game):\n\n while not game['lost']:\n\n game['move'], game['moveScores'] = my_algorithm(game)\n\n check_game_lost(game)\n\n # I changed the order of this loop to record information\n # about the algorithm before the board is moved so\n # in the move log csv the initial board will show scores and\n # a planned move instead of having the scores off by 1\n record_move(game)\n\n move(game)\n\n return game", "def playGame(b, px, po):\n \n nextPieceToMove = [\"X\",\"O\"] \n nextPlayerToMove = [px,po]\n n=0\n # FILL IN CODE HERE\n while True:\n if b.isFull() == False and nextPlayerToMove[n%2] == \"human\":\n move = askformove(b)\n b.addMove(move, nextPieceToMove[n%2])\n print(b)\n if b.winsFor(nextPieceToMove[n%2]):\n nextPieceToMove = nextPieceToMove[n%2]\n print(nextPieceToMove + \" wins!\")\n break\n elif not b.isFull():\n move = nextPlayerToMove[n%2].nextMove(b)\n b.addMove(move, nextPieceToMove[n%2])\n print(b)\n if b.winsFor(nextPieceToMove[n%2]):\n nextPieceToMove = nextPieceToMove[n%2]\n print(nextPieceToMove + \" wins!\")\n break\n else:\n nextPieceToMove = \"D\"\n break\n n = 1+n \n return(b.data, nextPieceToMove)", "def play(game, pick):\n\n # btn1\n if pick == 1:\n if game[0,0] != 0:\n game[0,0] = 0\n elif game[0,0] != 1:\n game[0,0] = 1\n \n if game[1,0] != 0:\n game[1,0] = 0\n elif game[1,0] != 1:\n game[1,0] = 1\n \n if game[0,1] != 0:\n game[0,1] = 0\n elif game[0,1] != 1:\n game[0,1] = 1\n\n # btn2\n if pick == 2:\n if game[0,1] != 0:\n game[0,1] = 0\n elif game[0,1] != 1:\n game[0,1] = 1\n \n if game[0,0] != 0:\n game[0,0] = 0\n elif game[0,0] != 1:\n game[0,0] = 1\n \n if game[0,2] != 0:\n game[0,2] = 0\n elif game[0,2] != 1:\n game[0,2] = 1\n \n if game[1,1] != 0:\n game[1,1] = 0\n elif game[1,1] != 1:\n game[1,1] = 1\n\n # btn3\n if pick == 3:\n if game[0,2] != 0:\n game[0,2] = 0\n elif game[0,2] != 1:\n game[0,2] = 1\n \n if game[0,1] != 0:\n game[0,1] = 0\n elif game[0,1] != 1:\n game[0,1] = 1\n \n if game[1,2] != 0:\n game[1,2] = 0\n elif game[1,2] != 1:\n game[1,2] = 1\n\n # btn4\n if pick == 4:\n if game[1,0] != 0:\n game[1,0] = 0\n elif game[1,0] != 1:\n game[1,0] = 1\n \n if game[1,1] != 0:\n game[1,1] = 0\n elif game[1,1] != 1:\n game[1,1] = 1\n \n if game[0,0] != 0:\n game[0,0] = 0\n elif game[0,0] != 1:\n game[0,0] = 1\n \n if game[2,0] != 0:\n game[2,0] = 0\n elif game[2,0] != 1:\n game[2,0] = 1\n\n # btn4\n if pick == 5:\n if game[1,1] != 0:\n game[1,1] = 0\n elif game[1,1] != 1:\n game[1,1] = 1\n \n if game[1,2] != 0:\n game[1,2] = 0\n elif game[1,2] != 1:\n game[1,2] = 1\n \n if game[0,1] != 0:\n game[0,1] = 0\n elif game[0,1] != 1:\n game[0,1] = 1\n \n if game[2,1] != 0:\n game[2,1] = 0\n elif game[2,1] != 1:\n game[2,1] = 1\n\n if game[1,0] != 0:\n game[1,0] = 0\n elif game[1,0] != 1:\n game[1,0] = 1\n \n if pick == 6:\n if game[1,2] != 0:\n game[1,2] = 0\n elif game[1,2] != 1:\n game[1,2] = 1\n \n if game[1,1] != 0:\n game[1,1] = 0\n elif game[1,1] != 1:\n game[1,1] = 1\n \n if game[0,2] != 0:\n game[0,2] = 0\n elif game[0,2] != 1:\n game[0,2] = 1\n \n if game[2,2] != 0:\n game[2,2] = 0\n elif game[2,2] != 1:\n game[2,2] = 1\n \n if pick == 7:\n if game[2,0] != 0:\n game[2,0] = 0\n elif game[2,0] != 1:\n game[2,0] = 1\n \n if game[1,0] != 0:\n game[1,0] = 0\n elif game[1,0] != 1:\n game[1,0] = 1\n \n if game[2,1] != 0:\n game[2,1] = 0\n elif game[2,1] != 1:\n game[2,1] = 1\n \n if pick == 8:\n if game[2,1] != 0:\n game[2,1] = 0\n elif game[2,1] != 1:\n game[2,1] = 1\n \n if game[1,1] != 0:\n game[1,1] = 0\n elif game[1,1] != 1:\n game[1,1] = 1\n \n if game[2,0] != 0:\n game[2,0] = 0\n elif game[2,0] != 1:\n game[2,0] = 1\n \n if game[2,2] != 0:\n game[2,2] = 0\n elif game[2,2] != 1:\n game[2,2] = 1\n \n if pick == 9:\n if game[2,2] != 0:\n game[2,2] = 0\n elif game[2,2] != 1:\n game[2,2] = 1\n \n if game[1,2] != 0:\n game[1,2] = 0\n elif game[1,2] != 1:\n game[1,2] = 1\n \n if game[2,1] != 0:\n game[2,1] = 0\n elif game[2,1] != 1:\n game[2,1] = 1\n return(game)", "def algorithm_loop(self):", "def play():\n global done\n done = False\n g = Game()\n turn = random.choice([PLAYER, AI])\n transitions_agent = []\n agent.epsilon = agent.eps_min\n while done == False:\n g.printBoard()\n if turn == PLAYER:\n row = input('{}\\'s turn:'.format('Red'))\n g.insert(int(row), PLAYER_PIECE)\n else:\n observation = []\n for sublist in g.board:\n for i in sublist:\n observation.append(i)\n observation = np.asarray(observation)\n action = agent.choose_action(observation)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent += [(observation, action, observation_, done)]\n turn = AI if turn == PLAYER else PLAYER\n winner = AI if turn == PLAYER else PLAYER\n if winner == AI:\n reward = 20\n else:\n reward = -20\n for i in range(len(transitions_agent)):\n agent.store_transition(transitions_agent[i][0], transitions_agent[i][1], reward, transitions_agent[i][2],\n transitions_agent[i][3])\n agent.learn()\n return", "def start_game(table_index):\n initiate_game(table_index)\n\n uId = uIds[table_index]\n current_map = maps[table_index]\n current_alg = applied_algs[table_index].func\n game_over = False\n previousmoves = []\n\n while True:\n try:\n if not game_over:\n # print('Move scores')\n # print(evaluate.evaluate(current_map, 0))\n # print(evaluate.evaluate(current_map, 1))\n # print(evaluate.evaluate(current_map, 2))\n # print(evaluate.evaluate(current_map, 3))\n\n move = current_alg(current_map)\n\n print(previousmoves)\n # check for bug\n # first case\n if len(previousmoves) == 0:\n previousmoves.append(move)\n # same movement\n elif previousmoves[-1] != move:\n previousmoves = []\n elif previousmoves[-1] == move:\n previousmoves.append(move)\n # 10 same movement\n\n\n if len(previousmoves) >= 4 and move == previousmoves[-1]:\n wrongdirection = previousmoves[-1]\n previousmoves = []\n moves = [\"w\", \"a\", \"s\", \"d\"]\n moves.remove(wrongdirection)\n\n move = random.choice(moves)\n\n print(move)\n\n request = requests.post(url=base_URL + \"/api/play_the_game\",\n json={'direction': move,\n 'uId': uId})\n current_map = request.json()['board']\n\n print(request.json())\n\n # TODO: Type checking, error handling (HTTP response?)\n game_over = request.json()[\"game_over\"]\n\n else:\n c_score = request.json()[\"c_score\"]\n SESSION_NAME = TEAM_NAME + \"_\" + applied_algs[table_index].label\n\n with open('score_data.txt', 'a') as f:\n f.write(datetime.datetime.now().strftime('%H:%M:%S') + \" \" + SESSION_NAME + \" \" + '%d' % c_score + \"\\n\")\n\n print()\n print(f\"Game Over. Your score is: {c_score}\")\n print()\n\n initiate_game(table_index)\n game_over = False\n uId = uIds[table_index]\n current_map = maps[table_index]\n except:\n print(\"Error\")", "def play_strategic_game():\n board, winner = create_board(), 0\n board[1,1] = 1\n while winner == 0:\n for player in [2,1]:\n board = random_place(board, player)\n winner = evaluate(board)\n if winner != 0:\n break\n return winner", "def comp_turn():\n global red_turn,board_array,die_1_num,die_2_num\n roll()\n red_turn = False\n value,move = backgammon_AI.choose_move(board_array,die_1_num,die_2_num,doubles)\n print value,move\n if(value != -1000):\n for sub_move in move:\n board_array[sub_move[0]][1] -= 1\n board_array[sub_move[1]][1] += 1\n if(board_array[sub_move[1]][0] == 1): #Handle hits\n board_array[sub_move[1]][0] -= 1\n board_array[0][0] += 1\n die_1_num = 0\n die_2_num = 0\n update_dice()\n draw_draughts()\n red_turn = True", "def main():\n even_game()", "def test_endgameStrategy(self):\n self.result = \"\"\"\n 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 1 2 3 3 2 1 0 0\n 0 0 1 3 x x x x 1 0 0\n 0 0 2 x x 6 x 5 2 0 0\n 0 0 3 x 4 4 x x 2 0 0\n 0 0 3 x 5 5 x x 2 0 0\n 0 0 2 x x x x 3 1 0 0\n 0 0 1 2 3 3 2 1 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0\n \"\"\"", "def play(strategy0, strategy1, score0=0, score1=0, dice=six_sided,\n goal=GOAL_SCORE, say=silence):\n player = 0 # Which player is about to take a turn, 0 (first) or 1 (second)\n # BEGIN PROBLEM 5\n n = 0 #turn number\n change_turn = 0 #0 - change turn, 1 - extra turn, 2 - just gave extra turn and cannot have another\n \"*** YOUR CODE HERE ***\"\n while (score0 < goal) and (score1 < goal):\n if player == 0:\n if n % 5 == strategy0(score0, score1) and change_turn != 2:\n change_turn = 1\n score0 += take_turn(strategy0(score0, score1), score1, dice)\n else:\n if n % 5 == strategy1(score1, score0) and change_turn != 2:\n change_turn = 1\n score1 += take_turn(strategy1(score1, score0), score0, dice)\n if is_swap(score0, score1):\n score0, score1 = score1, score0\n\n if change_turn == 1:\n change_turn = 2\n else:\n player = other(player)\n change_turn = 0\n say = say(score0, score1)\n n += 1 #increment turn\n # END PROBLEM 5\n # (note that the indentation for the problem 6 prompt (***YOUR CODE HERE***) might be misleading)\n # BEGIN PROBLEM 6\n \"*** YOUR CODE HERE ***\"\n # END PROBLEM 6\n return score0, score1", "def main():\r\n turn_left()\r\n move_three_times()\r\n turn_right()\r\n move_three_times()\r\n turn_right()\r\n move_three_times()\r\n turn_left()", "def turn_around():\n for i in range(2):\n turn_left()", "def betterEvaluationFunction(currentGameState):\n\n # Useful information you can extract from a GameState (pacman.py)\n newPos = currentGameState.getPacmanPosition()\n newFood = currentGameState.getFood()\n newGhostStates = currentGameState.getGhostStates()\n newCapsules = currentGameState.getCapsules()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n # Volem que s'apropi a les fruites i s'allunyi dels fantasmes en cas que aquests ens puguin matar, si no, hem d'intentar menjar-nos-els, pensant en seguir optant a la fruita.\n\n foodDistance = [util.manhattanDistance(newPos, food) for food in newFood.asList()]\n if foodDistance:\n foodMinima = min(foodDistance)\n else:\n foodMinima = -1 # perque si la llista esta buida vol dir que hem hem d'anar cap aquesta direcció, i per tant necessitem un valor molt gran.\n\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n ghostDistance = [util.manhattanDistance(newPos, ghostState.getPosition()) for ghostState in newGhostStates]\n\n distanciaFantasmes = 0\n fantasmaMoltAprop = 0\n\n for i in range(len(ghostDistance)):\n if newScaredTimes[i] >= 2:\n distanciaFantasmes -= ghostDistance[i]\n if ghostDistance[i] <= 1:\n fantasmaMoltAprop -= 1\n else:\n distanciaFantasmes += ghostDistance[i]\n if ghostDistance[i] <= 1:\n fantasmaMoltAprop += 1\n\n if distanciaFantasmes == 0:\n distanciaFantasmes = -1 # perque aixo voldra dir que tenim els fantasmes al voltant, i per tant ens en volem allunyar si o si d'aquesta direcció\n\n capsulesDistances = [util.manhattanDistance(newPos, capsuleState) for capsuleState in newCapsules]\n\n if capsulesDistances:\n capsulaMinima = min(capsulesDistances)\n itemMinim = min(capsulaMinima, foodMinima)\n else:\n itemMinim = foodMinima\n\n result = currentGameState.getScore() + 1 / float(itemMinim) - 1 / float(distanciaFantasmes) - fantasmaMoltAprop\n\n\n return result", "def the_counting_game(number_of_players=10, total=100):\n # a b c d e f g h i j\n # 1 2 3 4 5 6 7\n # 13 12 11 10 9 8\n # 14\n # 15 16 17 18 19 20 21\n # 27 26 25 24 23 22\n # 28\n # 29\n # print \"total\", total\n player_number = 1 # first player will say the number 1\n dir = 'right' # we start off counting to the right\n num_said = 1 # the number said by the first player\n while num_said < total:\n if dir == 'right':\n print dir\n # if we're at the last player, go back to the first player\n # which is last player minus total number of players minus 1\n if player_number == number_of_players:\n player_number = number_of_players - 1\n print \"p\", player_number, \"said: \", num_said\n else:\n print \"p\", player_number, \"said: \", num_said\n player_number += 1\n # if the next number will be a multiple of 7, time to switch directions\n if (num_said + 1) % 7 == 0:\n print \"this should switch\", dir\n dir = switch_direction(dir)\n print \"this should switch\", dir\n elif dir == 'left':\n print dir\n # if this is the first player, going left means going to the last player\n # which is total number of players\n if player_number == 1:\n player_number += (number_of_players - 1)\n else:\n print \"p\", player_number, \"said: \", num_said\n player_number -= 1\n # if the next number will be a multiple of 7, time to switch directions\n if (num_said + 1) % 7 == 0:\n print \"this should switch\", dir\n dir = switch_direction(dir)\n print \"this should switch\", dir\n num_said += 1\n return \"Player to say the total: \" + str(player_number)", "def GAME_LOOP():\n pass", "def run():\n game = Game()\n i = 0\n while True:\n print(i, \"\\n\\n\" + str(game))\n i += 1\n actions = game.possible_moves()\n if actions == []:\n return game.score()\n else:\n game_state = replace_none(np.array(game.state))\n action = h_min_max(game_state)[0]\n if action == UP:\n game.up()\n elif action == DOWN:\n game.down()\n elif action== LEFT:\n game.left()\n elif action== RIGHT:\n game.right()\n else:\n print(\"Didn't move\")\n return game", "def play_game():\n\tstate = Coinche(verbose=True)\n\tbeliefs = [Belief(i, state) for i in range(4)]\n\n\twhile state.get_moves():\n\t\tprint(state)\n\t\tm = ismcts(rootstate=state, itermax=2000, verbose=False, belief=beliefs[state.player_to_move])\n\t\tprint(\"Best Move: \" + str(m) + \"\\n\")\n\t\tstate.do_move(m)\n\n\tfor p in range(state.number_of_players):\n\t\tprint(\"Player \" + str(p), state.get_result(p))", "def opt_play():\n global piles\n global num_piles \n nim_sum = game_nim_sum()\n pile_sum = list(piles)\n for x in range(len(piles)):\n pile_sum[x] = nim_sum^piles[x]\n \n for y in range(len(piles)):\n if pile_sum[y] < piles[y]:\n return (y, piles[y]-pile_sum[y])\n\n for z in range(len(piles)):\n if piles[z] != 0:\n return (z,1)", "def GAMEOVER_LOOP():\n pass", "def main():\n move()\n move()\n pick_beeper()\n move()\n turn_left()\n for i in range(2):\n move()\n put_beeper()\n turn_around()\n move_to_wall()\n turn_right()\n move_to_wall()\n turn_around()", "def AI(current_board, AI_symbol, opponent_symbol, difficulty): #Written by Cody West\n victory_conditions = [[0,4,8],[2,4,6],[0,1,2],[3,4,5],[6,7,8],[0,3,6],[1,4,7],[2,5,8]] #Establishes victory conditions to be checked\n if difficulty >= 2: #If difficulty is at least 2\n ## Cody -- you could just write:\n ## for slots in victory_conditions\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions ## Oops\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list ## Oops \n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n ## This you can do even more efficiently using a beautiful syntax called\n ## \"list comprehension\" which entered python some years ago -- watch\n ## me do it in one line:\n ## check = [current_board[s] for s in slots]\n if check.count(AI_symbol)==2 and check.count(\" \")==1: #If there are any rows where the AI has two symbols and there's one empty spot\n return(slots[check.index(\" \")]) #Return the empty spot from that row\n ## Oops -- you repeat the code again here for no reason\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list\n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n if check.count(opponent_symbol)==2 and check.count(\" \")==1: #If there are any rows where the opponent has two symbols and there's one empty spot\n return(slots[check.index(\" \")]) #Return the empty spot from that row\n if difficulty >= 3: #If difficulty is at least 3\n ## It looks like you're doing an identical loop here -- I\n ## wonder why you don't move the if statement inside the loop\n ## -- I believe that would significantly shorten your code\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list\n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n if check.count(AI_symbol)==1 and check.count(\" \")==2: #If there are any rows where the AI has one symbol and there's two empty spots\n if check[0] == \" \": #If the first slot from check is empty\n return(slots[0]) #Return the first slot\n else: \n return(slots[2]) #Return the third slot\n if difficulty == 4: #If difficulty is 4\n if current_board[4] == \" \": #If the center is empty\n return(4) #Take the center\n elif current_board[0] or current_board[2] or current_board[6] or current_board[8] == \" \": #Else, if a corner is open\n corners = 2*random.randint(0,4) #Selects a random corner (or center, which will reject)\n while current_board[corners] != \" \": #Until the corner selected is empty\n corners = 2*random.randint(0,4) #Select a new corner or center\n return(corners) #Return empty corner\n else:\n sides = 2*random.randint(0,3)+1 #Selects a side\n while current_board[sides] != \" \": #Until the side is empty\n sides = 2*random.randint(0,3)+1 #Selects a new side\n return(sides) #Returns empty side\n if difficulty < 4: #If difficulty is less than 4\n ran = random.randint(0,8) #Picks random spot on board\n while current_board[ran] != \" \": #Until the spot is empty\n ran = random.randint(0,8) #Picks a new spot\n return(ran) #Returns empty spot", "def play_game(game,standings_):\n rand_nmr = random.random()\n\n standings_.loc[standings_.TEAMS==game['Home'],'MP'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'MP'] += 1\n\n if rand_nmr < game['Prob Home']:\n n_goals = goals() # a random number of goals is added to the goal tally, all games and in 1-0,2-0,3-0 or 4-0. This can be improved\n standings_.loc[standings_.TEAMS==game['Home'],'W'] += 1\n standings_.loc[standings_.TEAMS==game['Home'],'F'] += n_goals\n standings_.loc[standings_.TEAMS==game['Away'],'L'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'A'] += n_goals\n standings_.loc[standings_.TEAMS==game['Home']][\"h2h\"].apply(lambda x:x.append(game['Away']))\n\n return 0\n\n elif rand_nmr < game['Prob Home'] + game['Prob Draw']:\n # all draws end in 0-0 this can be improved\n standings_.loc[standings_.TEAMS==game['Home'],'D'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'D'] += 1\n\n return 1\n\n else:\n n_goals = goals() # a random number of goals is added to the goal tally, all games and in 1-0,2-0,3-0 or 4-0. This can be improved\n standings_.loc[standings_.TEAMS==game['Away'],'W'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'F'] += n_goals\n standings_.loc[standings_.TEAMS==game['Home'],'A'] += 1\n standings_.loc[standings_.TEAMS==game['Home'],'L'] += n_goals\n standings_.loc[standings_.TEAMS==game['Away']][\"h2h\"].apply(lambda x:x.append(game['Home']))\n\n return 2", "def test_simple():\n game = Game(3, [0, 0], -1, 5, -5, 10, 1, [[0, 1]], [0.0])\n\n print(f\"Check the baby exists\\n{game.baby}\")\n\n print(\"\\nCheck the berry exists\")\n for berry in game.get_berries():\n print(berry)\n\n print(f\"\\nHere is the board\\n{game.get_board()}\")\n\n print(\"First let's perform an illegal move Northwards\")\n board, reward, done = game.step(\"N\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")\n\n print(\"\\nNow let's perform a legal move which does NOT eat the berry\")\n board, reward, done = game.step(\"E\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")\n\n print(\"\\nNow we will move back to the original place and then eat the berry\")\n board, reward, done = game.step(\"W\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")\n\n print(\"\\nNow let's perform a legal move which does NOT eat the berry\")\n board, reward, done = game.step(\"S\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")", "def play_game():\n pass", "def main():\n ans = random_word()\n run_game(ans, N_TURNS)", "def main() -> None:\n # the current game is initialized with 1, 3, 5, 7 matches on the 4 rows.\n game: List[int] = [1, 3, 5, 7]\n\n print(\"\\nGame of Nim\")\n print( \"===========\")\n display_game(game)\n start = input(\"Do you want to start? (y/n) \")\n print()\n if start==\"y\" or start==\"Y\":\n print(\"Your turn\")\n user_turn(game)\n display_game(game)\n while True:\n print(\"My turn\")\n computer_turn(game)\n display_game(game)\n if is_finished(game):\n print(\"I WON\\n\")\n break\n print(\"Your turn\")\n user_turn(game)\n display_game(game)\n if is_finished(game):\n print(\"YOU WON\\n\")\n break", "def phase_8(self):\n\n def problem_1():\n test_board_1 = board(5, 5, snake_init_coordinates = [4, 2], fruit_init_coordinates = [0, 2])\n render = Render_engine('terminal', test_board_1)\n\n print(\"Before move\")\n print(\"*******************************\")\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nafter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nafter move up\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"up\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nafter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\n\\n\")\n \n def problem_2():\n test_board_1 = board(5, 5, snake_init_coordinates = [3, 1], fruit_init_coordinates = [3, 2])\n test_board_1.Snake_init_from_lst([[3, 1], [4, 1], [4, 2], [4, 3], [4, 4], [3, 4], [2, 4], [1, 4], [0, 4], [0, 3]])\n test_board_1.Update_board()\n render = Render_engine('terminal', test_board_1)\n print(\"Before move\")\n print(\"*******************************\")\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nAfter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\n\\n\")\n\n def problem_3():\n try:\n test_board_1 = board(5, 5, snake_init_coordinates = [3, 1], fruit_init_coordinates = [1, 2])\n test_board_1.Snake_init_from_lst([[3,4], [3, 3]])\n test_board_1.Update_board()\n render = Render_engine('terminal', test_board_1)\n print(\"Before move\")\n print(\"*******************************\")\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nAfter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n except GameBoardIndexError as error:\n print(\"Snake crash because\", str(error))\n\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\n\\n\")\n \n def problem_4():\n try:\n test_board_1 = board(5, 5, snake_init_coordinates = [3, 1], fruit_init_coordinates = [1, 2])\n test_board_1.Snake_init_from_lst([[3, 3], [3, 2], [3, 1], [4, 1], [4, 2], [4, 3], [4, 4], [3, 4], [2, 4], [1, 4], [0, 4], [0, 3]])\n test_board_1.Update_board()\n render = Render_engine('terminal', test_board_1)\n print(\"Before move\")\n print(\"*******************************\")\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nAfter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n except GameBoardIndexError as error:\n print(\"Snake crash because\", str(error))\n\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\n\\n\")\n\n problem_1()\n problem_2()\n problem_3()\n problem_4()", "def main():\n\n print('R-In-A-Row')\n print()\n\n while True:\n if play == 'human vs human':\n human1Tile, human2Tile = enterHuman1Tile()\n\n turn = whoGoesFirst()\n print('The %s player will got first.' % (turn))\n mainBoard = getNewBoard()\n elif play == 'human vs computer':\n human1Tile, computer1Tile = enterHuman1Tile()\n turn = whoGoesFirst()\n print('The %s player will go first.' % (turn))\n mainBoard = getNewBoard()\n elif play == 'computer vs computer':\n computer1Tile, computer2Tile = enterHuman1Tile()\n turn = whoGoesFirst()\n print('The %s player will go first.' % (turn))\n\n\n while True:\n if play == 'human vs human':\n if turn == 'human1':\n drawBoard(mainBoard)\n move = getHuman1Move(mainBoard)\n\n makeMove(mainBoard, human1Tile, move)\n\n if isWinner(mainBoard, human1Tile):\n winner = 'human1'\n\n break\n turn = 'human2'\n if turn == 'human2':\n drawBoard(mainBoard)\n move2 = getHuman2Move(mainBoard)\n makeMove(mainBoard, human2Tile, move2)\n if isWinner(mainBoard, human2Tile):\n winner = 'human2'\n break\n turn = 'human1'\n\n elif play == 'human vs computer' :\n if turn == 'human':\n drawBoard(mainBoard)\n move = getHuman1Move(mainBoard)\n makeMove(mainBoard, human1Tile, move)\n if isWinner(mainBoard, human1Tile):\n winner = 'human'\n\n break\n turn ='computer'\n\n elif turn == 'computer':\n drawBoard(mainBoard)\n print('The computer is thinking...')\n move = getComputer1Move(mainBoard, computer1Tile)\n makeMove(mainBoard, computer1Tile, move)\n if isWinner(mainBoard, computer1Tile):\n winner = 'computer'\n break\n turn = 'human'\n elif play == 'computer vs computer':\n if turn == 'computer1':\n drawBoard(mainBoard)\n print('computer1 is thinking...')\n move = getComputer1Move(mainBoard, computer1Tile)\n makeMove(mainBoard, computer1Tile, move)\n if isWinner(mainBoard, computer1Tile):\n winner = 'computer1'\n break\n turn = 'computer2'\n elif turn == 'computer2':\n drawBoard(mainBoard)\n print('computer2 is thinking...')\n move = getComputer2Move(mainBoard, computer2Tile)\n makeMove(mainBoard, computer2Tile, move)\n if isWinner(mainBoard, computer2Tile):\n winner = 'computer2'\n break\n turn = 'computer1'\n\n\n if isBoardFull(mainBoard):\n winner = 'tie'\n break\n\n drawBoard(mainBoard)\n print('Winner is: %s' % winner)\n if not playAgain():\n break", "def chessboardGame(x, y):\n xin = x\n yin = y\n\n # These squares have no possible move, therefore, are losing;\n # we chose these squares by sight; while loop below expands these sets\n # until we encompass whole board\n # it was not clear to me in the beginning that every square has a unique\n # determinant ending under optimal play\n losing_start = set([(1, 1), (2, 1), (1, 2), (2, 2)])\n\n # These squares can jump to losing_start in one move, so are winning\n winning_start = set([(1, 3), (1, 4), (2, 3), (2, 4),\n (3, 1), (3, 2), (3, 3), (3, 4),\n (4, 1), (4, 2), (4, 3)])\n\n def nextset(x, y):\n def isvalid(coord):\n return True if coord[0] >= 1 and coord[1] >= 1 \\\n and coord[0] <= 15 and coord[1] <= 15 else False\n\n nextsquares = [(x - 2, y + 1), (x - 2, y - 1), (x + 1, y - 2),\n (x - 1, y - 2)]\n nextsquares = set([*filter(isvalid, nextsquares)])\n # print(nextsquares)\n return nextsquares\n\n # run a few times through whole board;\n # it takes 5 times to find a definitive win path for all 225 squares\n # 161 squares are winning for first player\n # 64 squares are losing starting for first player\n test_set = [(i, j) for i in range(1, 16) for j in range(1, 16)]\n times = 1\n while (len(winning_start) + len(losing_start)) < 225:\n for coords in test_set:\n x_ = coords[0]\n y_ = coords[1]\n thenextset = nextset(x_, y_)\n # print('testing', x_, y_, thenextset)\n\n if (x_, y_) in losing_start:\n # print('No Path, Second wins')\n pass\n elif (x_, y_) in winning_start:\n # print('One jump to terminal square, First wins')\n pass\n elif (len(winning_start.intersection(thenextset))\n == len(thenextset)):\n # if next set ONLY includes winning_starts, First loses because\n # he has no choice but give win to opponent\n # need to add x,y to losing_start\n losing_start.add((x_, y_))\n # print('we lose, Second wins')\n elif len(losing_start.intersection(thenextset)) > 0:\n # if next set includes ANY losing_start, we win by choosing it\n # need to add x,y to winning_start\n winning_start.add((x_, y_))\n # print('First wins')\n else:\n # print('do not know')\n pass\n\n print('Run', times, len(winning_start) + len(losing_start))\n times += 1\n\n print(len(winning_start))\n print(len(losing_start))\n\n # prints schematic of Winor Loss of each of 15x15 squares\n\n print(' '.join(map(str, [i for i in range(1, 16)])))\n for i in range(15):\n row = ''\n for j in range(15):\n if test_set[i * 15 + j] in winning_start:\n row = row + 'W '\n else:\n row = row + 'L '\n print(row + str(i))\n\n if (xin, yin) in winning_start:\n print('First wins with', xin, yin)\n return 'First'\n else:\n print('Second wins with', xin, yin)\n return 'Second'", "def selfplay():\n agent2 = Agent(0.99, 0.1, 0.003, 42, train_games, 7, eps_dec)\n agent2.load_checkpoint()\n global win_cntr\n global done\n g = Game()\n turn = random.choice([PLAYER, AI])\n done = False\n transitions_agent = []\n transitions_agent2 = []\n while done == False:\n g.printBoard()\n if turn == PLAYER:\n # row = input('{}\\'s turn: '.format('Red'))\n # g.insert(int(row), turn)\n observation = []\n for sublist in g.board:\n for i in sublist:\n observation.append(i)\n observation = np.asarray(observation)\n action = agent2.choose_action(observation)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Red') % action)\n g.insert(action, PLAYER_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Red') % action)\n g.insert(action, PLAYER_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent2 += [(observation, action, observation_, done)]\n else:\n observation = []\n for sublist in g.board:\n for i in sublist:\n observation.append(i)\n observation = np.asarray(observation)\n action = agent.choose_action(observation)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent += [(observation, action, observation_, done)]\n turn = AI if turn == PLAYER else PLAYER\n if g.getWinner() == Tie:\n reward_agent = 0\n else:\n winner = AI if turn == PLAYER else PLAYER\n if winner == AI:\n win_cntr += 1\n if vertical_win:\n reward_agent = 5\n else:\n reward_agent = 20\n\n else:\n reward_agent = -20\n\n for i in range(len(transitions_agent)):\n agent.store_transition(transitions_agent[i][0], transitions_agent[i][1], reward_agent, transitions_agent[i][2],\n transitions_agent[i][3])\n agent.learn()\n return", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n # Useful information you can extract from a GameState (pacman.py)\n pos = currentGameState.getPacmanPosition()\n ghostState = currentGameState.getGhostStates()\n scaredTimes = [gs.scaredTimer for gs in ghostState]\n\n # get all the positions\n ghostPositions = []\n for ghost in ghostState:\n ghostPositions.append(ghost.getPosition())\n foodPosition = currentGameState.getFood()\n wallPosition = currentGameState.getWalls()\n capsulePosition = currentGameState.getCapsules()\n width = wallPosition.width\n length = wallPosition.height\n\n if currentGameState.isWin():\n return float('inf')\n if currentGameState.isLose():\n return -float('inf') \n \n # score for the over all grid\n score = currentGameState.getScore()\n\n # use bfs for food to help avoid the block that bothers the manhattan distance\n # depth limit at 5\n food_depth = -1\n max_depth = 3\n ghost_dis = -1\n # cur_depth\n frontier = []\n frontier.append((pos[0], pos[1], 0)) # depth of the pos is 0\n while (len(frontier) > 0 and frontier[0][2] < max_depth):\n if food_depth >= 0 and ghost >= 0: # if already have find food and ghost, break the loop\n break\n cur = frontier.pop(0)\n cur_x = cur[0]\n cur_y = cur[1]\n cur_depth = cur[2]\n if cur_y != 0 and wallPosition[cur_x][cur_y - 1] == False: # up\n if ghost_dis < 0 and (cur_x, cur_y - 1) in ghostPositions and scaredTimes[ghostPositions.index((cur_x, cur_y - 1))] == 0: # ghost still not found, is ghost, ghost is not scared\n ghost_dis = cur_depth\n elif food_depth < 0 and (cur_x, cur_y - 1) in foodPosition: # never met food, has food\n food_depth = cur_depth\n else:\n frontier.append((cur_x, cur_y - 1, cur_depth + 1))\n if cur_y != length - 1 and wallPosition[cur_x][cur_y + 1] == False: # down\n if ghost_dis < 0 and (cur_x, cur_y + 1) in ghostPositions and scaredTimes[ghostPositions.index((cur_x, cur_y + 1))] == 0: # ghost still not found, is ghost, ghost is not scared\n ghost_dis = cur_depth\n elif food_depth < 0 and (cur_x, cur_y + 1) in foodPosition: # never met food, has food\n food_depth = cur_depth\n else:\n frontier.append((cur_x, cur_y + 1, cur_depth + 1))\n if cur_x != 0 and wallPosition[cur_x - 1][cur_y] == False: # left\n if ghost_dis < 0 and (cur_x - 1, cur_y) in ghostPositions and scaredTimes[ghostPositions.index((cur_x - 1, cur_y))] == 0: # ghost still not found, is ghost, ghost is not scared\n ghost_dis = cur_depth\n elif food_depth < 0 and (cur_x - 1, cur_y) in foodPosition: # never met food, has food\n food_depth = cur_depth\n else:\n frontier.append((cur_x - 1, cur_y, cur_depth + 1))\n if cur_x != width - 1 and wallPosition[cur_x + 1][cur_y] == False: # left\n if ghost_dis < 0 and (cur_x + 1, cur_y) in ghostPositions and scaredTimes[ghostPositions.index((cur_x + 1, cur_y))] == 0: # ghost still not found, is ghost, ghost is not scared\n ghost_dis = cur_depth\n elif food_depth < 0 and (cur_x + 1, cur_y) in foodPosition: # never met food, has food\n food_depth = cur_depth\n else:\n frontier.append((cur_x + 1, cur_y, cur_depth + 1))\n\n for cp in capsulePosition:\n cp_dis = manhattanDistance(cp, pos)\n score -= cp_dis\n\n if food_depth < 0: # does not find food with bfs\n all_heu = []\n for food in foodPosition.asList():\n all_heu.append(manhattanDistance(pos, food))\n food_depth = min(all_heu)\n \n # if ghost_dis < 0 and ghost_dis == 0:\n # return 1 / food_depth\n # print('food_depth', food_depth)\n # print('ghost_dis', ghost_dis)\n # if ghost_dis == 0:\n # return float('inf')\n # return ((1 / food_depth) - (1 / ghost_dis))\n\n return (score - food_depth + ghost_dis)", "def betterEvaluationFunction(currentGameState):\r\n \"*** YOUR CODE HERE ***\"\r\n util.raiseNotDefined()", "def play_game() -> None:\n board = tuple(tuple(0 for _ in range(i, i + 16))\n for i in range(0, 64, 16))\n state = GameState(board, 1)\n while state.util is None:\n # human move\n print(state.display)\n state = state.traverse(int(input(\"Move: \")))\n if state.util is not None:\n break\n # computer move\n find_best_move(state)\n move = (state.selected if state.selected != -1\n else random.choice(state.moves))\n state = state.traverse(move)\n print(state.display)\n if state.util == 0:\n print(\"Tie Game\")\n else:\n print(f\"Player {state.util} Wins!\")", "def make_turn(self):\n # if play first, start in the middle\n if np.count_nonzero(self.board) == 0:\n self.place_disc(self.board.shape[1] / 2)\n return 1\n\n\n # win if possible\n for try_column in range(0,self.board.shape[1]):\n if 0 == self.board[0, try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, self.id())\n if dhw.did_he_win(new_board, self.id(), try_column):\n self.place_disc(try_column)\n return 1\n\n # don't loose if in danger\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, 3 - self.id())\n if dhw.did_he_win(new_board, 3 - self.id(), try_column):\n self.place_disc(try_column)\n return 1\n\n # don't fall in trap!\n forbidden_columns = []\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, self.id()) # my move\n new_board = self.simulate_place_disc(new_board, try_column, 3 - self.id()) # enemy move\n if dhw.did_he_win(new_board, 3 - self.id(), try_column):\n if try_column not in forbidden_columns: forbidden_columns.append(try_column)\n\n # don't ruin my trap\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, 3 - self.id()) # 'my' move\n new_board = self.simulate_place_disc(new_board, try_column, self.id()) # my move\n if dhw.did_he_win(new_board, self.id(), try_column):\n if try_column not in forbidden_columns: forbidden_columns.append(try_column)\n\n # allow forbidden_columns if no other choice\n if np.count_nonzero(self.board[0, :]) == self.board.shape[1] - len(forbidden_columns):\n forbidden_columns = []\n\n # otherwise, play randomly\n rannum = random.randrange(7)\n while 0 != self.board[0, rannum] or rannum in forbidden_columns:\n rannum = random.randrange(7)\n self.place_disc(rannum)\n return 1", "def test_endgameStrategy2(self):\n\n self.result = \"\"\"\n 1 x 1 0 0 2 x 2 1 x 1 0 0 1 x x 1\n 1 1 1 0 0 2 x 3 2 1 1 0 0 1 3 4 3\n 0 0 0 0 0 1 2 x 1 0 0 0 0 0 1 x x\n 0 0 0 0 0 0 1 1 1 0 0 0 0 0 1 2 2\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1\n 1 1 1 0 0 0 0 0 0 0 0 0 0 1 2 x 1\n 1 x 1 0 0 0 0 0 0 0 0 0 0 1 x 2 1\n \"\"\"", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [\n ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n # print \"newScaredTimes\", newScaredTimes\n # print successorGameState.getCapsules()\n\n newGhostPos = newGhostStates[0].getPosition()\n ghost_dist = ghost_distance(newPos, newGhostPos)\n capsules = successorGameState.getCapsules()\n # food_dist = food_distance(newPos, newFood)\n\n # approach 1: 2/4 win = 10, average < 500\n # if ghost_dist <= 1:\n # return -999999\n # return -food_num(newFood)\n\n # approach 2: 2/4 win = 10, average < 500 but close to 500\n # if newScaredTimes[0] == 0:\n # if ghost_dist <= 1:\n # return -999999\n # return -food_num(newFood) -capsule_distance(newPos, capsules)\n\n # final approach: 4/4 win = 10, average = 1310.5\n if newScaredTimes[0] == 0:\n if ghost_dist <= 1:\n return -999999\n return -food_distance(newPos, newFood) * .01 - food_num(newFood) - capsule_distance(newPos, capsules)", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n ghostPosition1 = newGhostStates[0].getPosition() \n #ghostPosition2 = newGhostStates[1].getPosition()\n\n score = successorGameState.getScore()\n if newFood[newPos[0]][newPos[1]]:\n score += 1\n if newFood[newPos[0]+1][newPos[1]]:\n score += 0.6\n elif newFood[newPos[0]-1][newPos[1]]:\n score += 0.6\n elif newFood[newPos[0]][newPos[1]+1]:\n score += 0.6\n elif newFood[newPos[0]][newPos[1]-1]:\n score += 0.6\n if currentGameState.getPacmanPosition()[0] == newPos[0] and currentGameState.getPacmanPosition()[1] == newPos[1]:\n score -= 0.5\n foodList = newFood.asList()\n FoodDistance = [(manhattanDistance(food,newPos),food) for food in foodList]\n if len(FoodDistance) != 0:\n score += 1.5/min(FoodDistance[0])\n i_nearestFood = FoodDistance[0].index(min(FoodDistance[0]))\n for i in range(newPos[0],FoodDistance[i_nearestFood][1][0]):\n if successorGameState.getWalls()[i][newPos[1]] :\n score -= 0.8\n break\n #print \"YYYYYY\"\n for j in range(newPos[1],FoodDistance[i_nearestFood][1][1]): \n if successorGameState.getWalls()[newPos[0]][j] :\n score -= 0.8\n break\n #print \"XXXXX\"\n\n \n if newScaredTimes[0] == 0 :\n if ghostPosition1 == newPos:\n #print newPos,\"ghostPosition (close!!):\", ghostPosition1\n score -= 1500\n elif manhattanDistance(newPos,ghostPosition1) <=1:\n #print newPos,\"ghostPosition :\", ghostPosition1\n score -= 100\n # if newPos[0] == ghostPosition2[0] and newPos[1] == ghostPosition2[1]:\n # score -= 1500\n # elif manhattanDistance(newPos,ghostPosition2) <=1:\n #print newPos,\"ghostPosition :\", ghostPosition1\n score -= 100\n elif newScaredTimes[0] >=2:\n if newPos[0] == ghostPosition1[0] and newPos[1] == ghostPosition1[1]:\n score += 300\n elif manhattanDistance(newPos,ghostPosition1):\n score += 30\n #elif newScaredTimes[1] >=2:\n # if newPos[0] == ghostPosition2[0] and newPos[1] == ghostPosition2[1]:\n # score += 300\n # elif manhattanDistance(newPos,ghostPosition2):\n # score += 30\n #if newPos[0] == ghostPosition2[0] and newPos[1] == ghostPosition2[1]:\n # score -= 500\n return score", "def main():\n\n board = [[\".\"] * grid_size for i in range(grid_size)]\n ship_row = random_row(board)\n ship_col = random_col(board) - 1\n ships = 0\n turn = 0\n\n print_board(board)\n while turn < total_turns:\n\n guess_col = get_col()\n guess_row = get_row()\n\n print(\"-\" * 35)\n print(\n f\"You entered: {letter_and_index_conversion(guess_col, grid_size)}{guess_row} \\n\"\n )\n\n if guess_row == ship_row and guess_col == ship_col:\n board[guess_row - 1][guess_col - 1] = \"X\"\n print(\"Congratulations Captain! You got a hit!\")\n print_board(board)\n print(\"-\" * 35)\n turn += 1\n ships += 1\n ship_row = random_row(board)\n ship_col = random_col(board)\n if ships == 10:\n print(\"Congratulations Captain! You won!\")\n game_prompt = input(\"Restart? y/n: \\n\")\n game_restart(game_prompt)\n else:\n if (\n board[guess_row - 1][guess_col - 1] == \"X\" or\n board[guess_row - 1][guess_col - 1] == \"*\"\n ):\n print(\"You already guessed this one -_-\")\n print(\"-\" * 35)\n else:\n print(\"Your aim is WAY off! \\n\")\n board[guess_row - 1][guess_col - 1] = \"*\"\n print_board(board)\n print(\"-\" * 35)\n turn += 1\n if turn == total_turns:\n print(\"Game Over! You ran out of turns\")\n print(\"-\" * 35)\n game_prompt = input(\"Restart? y/n: \\n\")\n game_restart(game_prompt)\n\n print(f\"Turn {turn + 1} of {total_turns}\")\n print(f\"You have {10 - ships} ships left\")", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def player_stage(niv): \n playing = True\n a = niv[0][0] \n b = niv[0][1] \n (x, y) = (a, b) \n state = [[a, b]] #Create a list with the starting point of the selected level patern.\n sense.stick.get_events()\n while playing:\n for event in sense.stick.get_events(): #It moves the pixel with the player moves and add the point passed by the player in the state[].\n if event.action == 'pressed':\n if event.direction == 'left':\n if x > 0:\n x = min(x-1, 7)\n state.append([x, y])\n elif event.direction == 'right':\n if x < 7:\n x = max(x+1, 0)\n state.append([x, y])\n if event.direction == 'down':\n if y < 7:\n y = min(y+1, 7)\n state.append([x, y])\n elif event.direction == 'up':\n if y > 0:\n y = max(y-1, 0)\n state.append([x, y])\n elif event.direction == 'middle':\n playing = False\n sense.set_pixel(x, y, RED)\n if state[:] == niv[:]: #Compare the way choosen by the player with the selected level patern. Results of the try.\n sense.show_message(\"WINNER !\",\n text_colour=LEMON, scroll_speed=0.05)\n sleep(2)\n main() #brings back to the level selection.\n else:\n sense.show_message(\"LOSER !\",\n text_colour=BLUE, scroll_speed=0.05)\n sleep(2)\n try_again(niv) #cf. try_again() function", "def main():\n play_game(progression)", "def run_simulation(self, state):\n \"*** YOUR CODE HERE ***\"\n player = 0\n visited_states = [(player, state)]\n depth_limited = self.depth != -1\n depth = self.depth\n expand = True\n while not visited_states[-1][1].isWin() and not visited_states[-1][1].isLose():\n if depth_limited and depth == 0: break\n state = self.UCB1(state, player) # Selection & Simulation\n if expand and state not in self.plays: # Expansion\n expand = False\n self.plays[state] = 0\n self.wins[state] = 0\n visited_states.append((player, state))\n player = (player + 1) % state.getNumAgents()\n if not expand and depth_limited and player == 0: depth -= 1\n \n for player, state in visited_states:\n if state in self.plays: # Not simulated nodes\n self.plays[state] += 1\n eval = self.evaluationFunction(visited_states[-1][1])\n if depth_limited:\n if player == 0: self.wins[state] += eval\n if player != 0: self.wins[state] -= eval\n else:\n if player == 0: self.wins[state] += eval\n if player != 0: self.wins[state] += (1 - eval)", "def play(strategy0, strategy1, score0=0, score1=0, goal=GOAL_SCORE):\n player = 0 # Which player is about to take a turn, 0 (first) or 1 (second)\n dice_swapped = False # Whether 4-sided dice have been swapped for 6-sided\n my_score = score0\n opponent_score = score1\n while(score0<goal and score1<goal): # 'While' loop that ends when game ends\n if(player == 0): # If it is Player0's turn...\n num_rolls = strategy0(my_score,opponent_score) # strategy for Player0 implemented\n if num_rolls == -1 and dice_swapped == False: # if strategy is Pork Chop, and current die is six sided\n my_score+=1\n dice_swapped = True\n elif num_rolls == -1 and dice_swapped == True: # if strategy is Pork Chop, and current die is four sided\n my_score+=1\n dice_swapped = False\n else: #if strategy is not Pork Chop\n dice = select_dice(my_score, opponent_score, dice_swapped)\n my_score += take_turn(num_rolls, opponent_score, dice)\n player = other(player)\n else: # If it is Player1's turn...\n num_rolls = strategy1(opponent_score,my_score)\n if num_rolls == -1 and dice_swapped == False:\n opponent_score+=1\n dice_swapped = True\n elif num_rolls == -1 and dice_swapped == True:\n opponent_score+=1\n dice_swapped = False\n else:\n dice = select_dice(opponent_score, my_score, dice_swapped)\n opponent_score = opponent_score + take_turn(strategy1(opponent_score, my_score), my_score, dice)\n player = other(player)\n if(my_score*2 == opponent_score or opponent_score*2 == my_score): #Swine Swap implementation via placeholders\n zerocounter = my_score\n onecounter = opponent_score\n score0 = onecounter\n my_score = onecounter\n score1 = zerocounter\n opponent_score = zerocounter\n else: #Final reassignments to original score variables before return statement\n score0 = my_score\n score1 = opponent_score\n# END PROBLEM 5\n return score0, score1", "def calculate_unfinished_game(test_string=\"\"):\n # get user input\n print \"No old game entered, starting new Game\"\n # which frame we're on,\n # which throw we're on in the frame,\n # and the number of throws so far\n # throw_number checks which throw in the frame you're on,\n # throw_idx tracks the number of throws so far total.\n frame_idx = throw_number = throw_idx = 0\n\n # track if there's an extra throw this frame, only used in 10th frame\n bonus_throw = False\n # create a list of throws as they come in\n throw_list = []\n # create a list of each frame, and what they scored -\n frame_list = []\n # and add the first 9 frames\n for _ in range(0, 9):\n frame_list.append([0, 0])\n\n # add the special 10th frame\n frame_list.append([0, 0, 0])\n # create a list of the total scores for the frame\n score_list = [0]*10\n # track the number of pins standing\n pins = reset_pins()\n # track which throw goes to which frame\n throw_to_frame = {}\n\n if test_string:\n test_list = list(test_string)\n\n # while - a game of bowling has only 10 frames\n while frame_idx < 10:\n print\n # just to add two lines between throws\n print\n\n print_scoreboard(frame_list, score_list, frame_idx, throw_number)\n print \"\"\"You are on frame {} and are on your throw {}.\nYour running score is {}. There are {} pins standing.\"\"\".format(\n # show which frame\n frame_idx+1,\n # show throw they're on\n throw_number+1,\n # show their score\n sum(score_list),\n # show how many pins are standing\n pins,\n )\n\n print\n # just to add two lines between throws\n print\n\n # just for testing: convert X to 10\n if test_string:\n throw = test_list.pop(0)\n if throw == 'X':\n throw = '10'\n else:\n # get user input\n throw = raw_input(\"After your throw, enter a number 0-10. > \")\n # along with numbers for pins, we can also accept characters\n # x, X & / (for strikes and spares)\n if not throw.isdigit():\n # covert X to strike\n if is_strike(str(throw)):\n throw = '10'\n # covert / to spare\n if is_spare(str(throw)):\n # if we're on the first throw\n if not throw_number:\n print 'Cannot get spare on first throw!'\n continue\n else:\n throw = str(10-throw_list[throw_idx-1])\n\n # check that the number isn't greater than the number of pins abailable\n if not check_valid_throw(throw, pins):\n print \"Enter only numbers, please.\"\n # if it's invalid, try again\n continue\n # cast to int\n throw = int(throw)\n # save throw in throw_list\n throw_list.append(throw)\n # save which throw was in which frame\n throw_to_frame[str(throw_idx)] = frame_idx\n # save throw score into the throw score sheet\n frame_list[frame_idx][throw_number] = throw\n # save throw into the frame score sheet\n score_list[frame_idx] += throw\n # update how many pins are standing after the throw\n pins -= throw\n\n # check for strikes and spares\n # if we can check last throw\n if len(throw_list) > 1:\n # if last throw was a strike\n if throw_list[throw_idx-1] == 10:\n # if last frame was in the first 9\n if throw_to_frame[str(throw_idx-1)] < 9:\n # add this throw to the frame for that throw\n score_list[throw_to_frame[str(throw_idx-1)]] += throw\n elif throw_number < 1:\n # if last frame was a spare\n if score_list[frame_idx-1] == 10:\n # add this throw to last frame\n score_list[frame_idx-1] += throw\n # if we can check two throws ago\n if len(throw_list) > 2:\n # if second to last throw was a strike\n if throw_list[throw_idx-2] == 10:\n # if second to last frame was in the first 9\n if throw_to_frame[str(throw_idx-2)] < 9:\n # add this throw to the score from that frame\n score_list[throw_to_frame[str(throw_idx-2)]] += throw\n\n # what to do after the throw depends on several factors\n # first nine frames\n if frame_idx < 9:\n # first throw\n if throw_number < 1:\n # if there are pins left\n if pins > 0:\n # go to second throw in frame\n throw_number += 1\n # if there are no pins left\n else:\n print \"Strike!\"\n print\n # reset pins\n pins = reset_pins(pins)\n # go to next frame\n frame_idx += 1\n throw_number = 0\n # second throw\n else:\n # if there are pins left\n if pins == 0:\n print \"Spare!\"\n print\n # go to next frame\n frame_idx += 1\n # reset throw_number to 0\n throw_number = 0\n # reset pins\n pins = reset_pins(pins)\n # final 10th frame\n else:\n # first throw\n if throw_number < 1:\n # if there no are pins left\n if pins == 0:\n print \"Strike!\"\n print\n # reset pins\n pins = reset_pins(pins)\n # you get a bonus 3rd throw\n bonus_throw = True\n # second throw\n elif throw_number < 2:\n # if there no are pins left\n if pins == 0:\n # if last throw was a strike,\n if throw_list[throw_idx-1] == 10:\n print \"Strike!\"\n print\n # if last throw was not a strike,\n else:\n print \"Spare!\"\n print\n # either way, you get a bonus 3rd throw\n bonus_throw = True\n # reset pins\n pins = reset_pins(pins)\n # if you don't have a bonus throw\n if not bonus_throw:\n # go to next frame, ends the game\n frame_idx += 1\n # third throw\n elif throw_number < 3:\n # go to next frame, ends the game\n frame_idx += 1\n # increment throw_number\n throw_number += 1\n # increment throw_idx\n throw_idx += 1\n\n print_scoreboard(frame_list, score_list, frame_idx, throw_number)\n\n if not test_string:\n _ = raw_input(\"Game Over!\")\n print \" {}\".format(_)\n print\n print \"final score: {}\".format(sum(score_list))\n if raw_input(\n \"\"\"Play again? Enter 'Y' to play again,\n or press enter to quit. \"\"\").lower() == 'y':\n Game() # pragma: no cover\n return sum(score_list)", "def game_loop(brains: Tuple[BrainType, BrainType]) -> \\\n Tuple[Dict[str, Any], Game]:\n def get_turn_brain(player: Player) -> BrainType:\n if player == S_DOG:\n return brains[0]\n return brains[1]\n\n def try_brain_move(game: Game, ev: EventType) -> bool:\n \"\"\"\n Picking the brain for current player, tries to update game with the received move\n Returns true if a valid move is performed\n \"\"\"\n # print(\"Try brain move\")\n\n brain = get_turn_brain(game.state.turn)\n ui_change, state_change = perform_brain_move(game, ev, brain)\n\n # print(f\"Tryingmove: UI - {ui_change} | State - {state_change}\")\n\n return ui_change or state_change\n\n game = Game(State(Table(None), S_DOG, []), [])\n\n game_start_time = time.time()\n last_move_time = time.time()\n changed_game = False\n last_turn = game.state.turn\n\n print(\" ------ Starea initiala\")\n game.console_draw()\n\n dog_data: List[float] = []\n rabbit_data: List[float] = []\n dog_nodes: List[int] = []\n rabbit_nodes: List[int] = []\n\n\n while not game.state.isFinal():\n changed_game = False\n for ev in pygame.event.get():\n game.draw(pygame.display.get_surface())\n # print(f\"Event type: {ev}\")\n if ev.type == pygame.QUIT:\n print(\" Intrerupt abrupt -------- \")\n show_stats({'winner': \"Intrerupt / Invalid\",\n 'dog_data': dog_data, \n 'rabbit_data': rabbit_data,\n 'dog_nodes': dog_nodes,\n 'rabbit_nodes': rabbit_nodes}, game)\n pygame.display.quit()\n pygame.quit()\n sys.exit()\n \n if ev.type == pygame.MOUSEMOTION:\n continue\n\n\n changed_game = try_brain_move(game, ev.type)\n \n if changed_game:\n break\n\n\n if not changed_game:\n changed_game = try_brain_move(game, pygame.K_DELETE)\n \n game.draw(pygame.display.get_surface())\n \n if changed_game: \n if game.state.turn == last_turn:\n continue\n # Calcule la schimbarea jucatorului\n last_turn = game.state.turn\n time_move = time.time() - last_move_time\n last_move_time = time.time()\n # Logging la shimbarea jucatorului\n print(f\"{name_player(other_player(last_turn))} time for move: {time_move:.2f} s\")\n if other_player(last_turn) == S_DOG:\n rabbit_data.append(time_move)\n if is_DOG_AI:\n dog_nodes.append(get_computed_nodes())\n print(f\"Numar stari calculate la mutare: {get_computed_nodes()}\")\n else:\n dog_data.append(time_move)\n if is_RAB_AI:\n rabbit_nodes.append(get_computed_nodes())\n print(f\"Numar stari calculate la mutare: {get_computed_nodes()}\")\n \n print(\" --------- \")\n game.console_draw()\n\n print(\" ========================== \")\n print(f\"Total game time: {time.time() - game_start_time:.2f} s\")\n print(f\"Numar mutari Dog: {len(dog_data)}\")\n print(f\"Numar mutari Rabbit: {len(rabbit_data)}\")\n\n print(\"\\n Finished game\")\n winner = S_DOG\n if game.state.rabbits_win():\n winner = S_RAB\n return {'winner': name_player(winner),\n 'dog_data': dog_data, \n 'rabbit_data': rabbit_data,\n 'dog_nodes': dog_nodes,\n 'rabbit_nodes': rabbit_nodes}, game", "def main():\r\n clean()\r\n h_choice = '2' # \r\n c_choice = '1' # \r\n first = '' # if human is the first\r\n\r\n # Human may starts first\r\n clean()\r\n while first != 'Y' and first != 'N':\r\n try:\r\n print(\" $$\\ $$\\ $$$$$$\\ $$$$$$$\\ $$$$$$$\\ $$$$$$$$\\ $$$$$$$\\ $$$$$$\\ \") \r\n print(\" $$ | $$ |$$ __$$\\ $$ __$$\\ $$ __$$\\ $$ _____|$$ __$$\\ $$ __$$\\ \")\r\n print(\" $$ | $$ |$$ / $$ |$$ | $$ |$$ | $$ |$$ | $$ | $$ |$$ / \\__|\")\r\n print(\" $$$$$$$$ |$$ | $$ |$$$$$$$ |$$$$$$$ |$$$$$\\ $$$$$$$ |\\$$$$$$\\ \")\r\n print(\" $$ __$$ |$$ | $$ |$$ ____/ $$ ____/ $$ __| $$ __$$< \\____$$\\ \")\r\n print(\" $$ | $$ |$$ | $$ |$$ | $$ | $$ | $$ | $$ |$$\\ $$ |\")\r\n print(\" $$ | $$ | $$$$$$ |$$ | $$ | $$$$$$$$\\ $$ | $$ |\\$$$$$$ |\")\r\n print(\" \\__| \\__| \\______/ \\__| \\__| \\________|\\__| \\__| \\______/ \") \r\n \r\n first = input('First to start?[y/n]: ').upper()\r\n except (EOFError, KeyboardInterrupt):\r\n print('Bye')\r\n exit()\r\n except (KeyError, ValueError):\r\n print('Bad choice')\r\n\r\n # Main loop of this game\r\n while len(empty_cells(board)) > 0 and not game_over(board):\r\n \r\n if first == 'N':\r\n print(\"Step\")\r\n xi = int (input(\"Initial row COMP(0-9): \"))\r\n yi = int (input(\"Initial column COMP(0-9): \"))\r\n ai_turn(c_choice, h_choice, xi, yi)\r\n first = ''\r\n render(board, c_choice, h_choice)\r\n print(\"Hope\")\r\n xi = int (input(\"Initial row HUMAN(0-9): \"))\r\n yi = int (input(\"Initial column HUMAN(0-9): \"))\r\n human_turn(c_choice, h_choice,xi,yi)\r\n render(board, c_choice, h_choice)\r\n xi = int (input(\"Initial row COMP(0-9): \"))\r\n yi = int (input(\"Initial column COMP(0-9): \"))\r\n ai_turn(c_choice, h_choice, xi, yi)\r\n\r\n # Game over message\r\n if wins(board, HUMAN):\r\n clean()\r\n print(f'Human turn [{h_choice}]')\r\n render(board, c_choice, h_choice)\r\n print('YOU WIN!')\r\n elif wins(board, COMP):\r\n clean()\r\n print(f'Computer turn [{c_choice}]')\r\n render(board, c_choice, h_choice)\r\n print('YOU LOSE!')\r\n else:\r\n clean()\r\n render(board, c_choice, h_choice)\r\n print('DRAW!')\r\n\r\n exit()", "def __init__(self, n: int):\n        self.rows = [[n, -1] for _ in range(n)]\n        self.cols = [[n, -1] for _ in range(n)]\n        self.diag = [[n, -1], [n, -1]] # 0 for normal, 1 for anti\n        \n    def move(self, row: int, col: int, player: int) -> int:\n        r1, r2 = self.check(self.rows, row, player), self.check(self.cols, col, player)\n        r3, r4 = 0, 0\n        if(row == col):\n            r3 = self.check(self.diag, 0, player)\n        if(row + col == len(self.rows)-1):\n            r4 = self.check(self.diag, 1, player)\n        \n        return max(r1,r2,r3,r4)\n    def check(self, arr, i, player):\n        arr[i][0] -= 1\n        \n        if(arr[i][1] == -1):\n            arr[i][1] = player\n        elif(arr[i][1] != player):\n            arr[i][1] = 0\n        \n        if(arr[i][0] == 0 and arr[i][1] != 0):\n            return player\n        return 0\n        \n        \"\"\"\n       Player {player} makes a move at ({row}, {col}).\n       @param row The row of the board.\n       @param col The column of the board.\n       @param player The player, can be either 1 or 2.\n       @return The current winning condition, can be either:\n               0: No one wins.\n               1: Player 1 wins.\n               2: Player 2 wins.\n       \"\"\"\n        ", "def strategy(self, game, args=()):", "def game():\n indexes = set_index_list(-1)\n score = 0\n a = get_person(indexes, -1)\n while True:\n b = get_person(indexes, a)\n print(game_art.logo)\n if score != 0:\n print(f\"You're right! Current score: {score}\")\n print('Compare A:', end=' ')\n followers_a = display_person(game_data.data[a])\n print(game_art.vs)\n print('Against B:', end=' ')\n followers_b = display_person(game_data.data[b])\n choice = input(\"Who has more followers? Type 'A' or 'B': \").lower()\n correct = compare_choice(followers_a, followers_b, choice == 'a')\n clear_screen()\n if correct:\n score += 1\n else:\n return score\n a = b", "def main():\n\n can_continue = True\n suma_jugador = 0\n suma_maquina = 0\n\n print SALUDO # Saludamos al usuario\n\n cards = barajar_cartas() # Barajamos las cartas\n\n print PLAYER_TIME\n\n while can_continue:\n\n card = dar_carta(cards) # Preguntamos al usuario si quiere carta\n\n if card:\n print \"It is the card %s and costs %.1f\" %(str(card), CARTAS[card])\n suma_jugador += CARTAS[card] # Sumamos los puntos de la carta tocada.\n \n if suma_jugador > 7.5: # Si se pasa del limite, el usuario pierde y acaba el juego.\n print GAME_OVER\n return\n \n else: #Si no quiere carta, turno de la maquina\n can_continue = False\n\n can_continue = True\n\n # Turno de la maquina\n print MACHINE_TIME\n\n while can_continue:\n\n card = cards.pop() # Cogemos carta\n\n if (suma_maquina + CARTAS[card] > 7.5):\n can_continue = False # Si se pasa del limite, la maquina para de jugar\n else:\n print \"The machine takes the card %s that it costs %.1f\" %(str(card), CARTAS[card])\n suma_maquina += CARTAS[card]\n \n delay(2) # Esperamos 2 segundos para que escoja de nuevo otra carta la maquina\n\n # Anunciamos el ganador\n print WINNER_TIME\n print \"And the winner is...\",\n if (suma_jugador > suma_maquina):\n print \"the player 1 with %.1f points vs %.1f points of the machine!!\" %(suma_jugador, suma_maquina)\n elif (suma_jugador == suma_maquina):\n print \"That was a draw!!! No winner this time, more lucky next time :) \"\n else:\n print \"the machine with %.1f points vs %.1f points of the player 1!!\" %(suma_maquina, suma_jugador)", "def run_game(player_board, user_guess, computer_board, computer_guess):\n player_turn = 0 # Ensures player goes first\n computer_turn = 1 # Computer can only go once player score is equal\n # Life counter decrements each time a ship is hit\n player_lives = 15\n computer_lives = 15\n while True:\n if player_turn < computer_turn:\n user_guess.print_board()\n column, row = player_board.attack_input()\n if user_guess.board[row][column] == GUESSED:\n print('\\nYOU HAVE ALREADY GUESSED THIS CO-ORDINATE\\n')\n elif user_guess.board[row][column] == HITSHIP:\n print('\\nYOU HAVE ALREADY HIT A SHIP IN THIS CO-ORDINATE\\n')\n elif computer_board.board[row][column] == SHIP:\n print(' ')\n print(PHASE)\n print('\\nCONGRATULATIONS, YOU HIT A SHIP!\\n')\n user_guess.board[row][column] = HITSHIP\n player_turn += 1\n user_guess.lives_counter()\n user_guess.print_board()\n computer_lives -= 1\n print(\"COMPUTER'S TURN TO ATTACK!\")\n time.sleep(3)\n if computer_lives == 0:\n print('\\nTHE COMPUTER HAS NO LIVES LEFT!')\n print('YOU WIN!')\n print(' ')\n print(PHASE)\n break\n else:\n print(' ')\n print(PHASE)\n print('\\nYOU MISSED!\\n')\n user_guess.board[row][column] = GUESSED\n player_turn += 1\n user_guess.print_board()\n print(\"COMPUTER'S TURN TO ATTACK!\")\n time.sleep(3)\n if computer_turn == player_turn:\n row, column = computer_guess.attack_input()\n if computer_guess.board[row][column] == GUESSED:\n pass\n elif computer_guess.board[row][column] == HITSHIP:\n pass\n elif player_board.board[row][column] == SHIP:\n print('THE COMPUTER HIT YOUR SHIP!\\n')\n computer_turn += 1\n player_lives -= 1\n computer_guess.column_arry.append(column)\n computer_guess.row_arry.append(row)\n computer_guess.board[row][column] = HITSHIP\n player_board.board[row][column] = HITSHIP\n player_board.lives_counter()\n player_board.print_board()\n computer_guess.attk_arry.append(0)\n time.sleep(3)\n if player_lives == 0:\n print('\\nYOU HAVE NO LIVES LEFT!')\n print('YOU LOSE!')\n print(' ')\n print(PHASE)\n break\n else:\n print('COMPUTER MISSED!\\n')\n computer_guess.board[row][column] = GUESSED\n computer_turn += 1\n player_board.print_board()\n computer_guess.attk_arry.append(1)\n computer_guess.check_miss_count()\n time.sleep(3)", "def evaluate_against_bot(self, opponent_bot, num_games, \n num_white_pieces = None, \n num_black_pieces = None,\n max_num_of_turns = 1000):\n zero_bot_player = 1\n score = 0\n num_games_won_as_black = 0\n num_games_won_as_white = 0\n \n # Play 'num_games' games of brandubh\n for i in range(num_games):\n print('\\rPlaying game {0}, score: w = {1}, b = {2}.'.format(i, \n num_games_won_as_white, num_games_won_as_black),end='')\n \n # If a maximum number of white or black pieces is given, then\n # use a random starting position for the game.\n if num_white_pieces or num_black_pieces:\n starting_board = random_starting_position(num_white_pieces, \n num_black_pieces)\n game = GameState.new_game(starting_board)\n else:\n game = GameState.new_game()\n \n # Get both bots to play a game of brandubh.\n turns_taken = 0\n while game.is_not_over() and turns_taken < max_num_of_turns:\n if game.player == zero_bot_player:\n action = self.select_move(game)\n else:\n action = opponent_bot.select_move(game) \n game.take_turn_with_no_checks(action)\n turns_taken += 1\n \n \n # At the end of the game, increment counts keeping track of how\n # many games the current bot won against the opponent bot and \n # get the bots to switch sides for the next game.\n if turns_taken < max_num_of_turns:\n score += zero_bot_player*game.winner\n if zero_bot_player == game.winner:\n if zero_bot_player == 1:\n num_games_won_as_white += 1\n else:\n num_games_won_as_black += 1 \n zero_bot_player *= -1\n \n else:\n score -= 1\n zero_bot_player *= -1\n \n print(' done.')\n # Return the evaluation score of the bot along with fraction of games\n # won as black/white, the total number of games and the number of\n # epochs the bot has trained for before being evaluated.\n return [score/num_games, 2*num_games_won_as_white/num_games,\n 2*num_games_won_as_black/num_games, \n num_games, len(self.loss_history)]", "def main():\n game_of_life(10, 20)", "def play_pig(A, B):\n # your code here\n strategies = [A, B]\n state = (0, 0, 0, 0)\n while True:\n (p, me, you, pending) = state\n if me >= goal:\n return strategies[p]\n elif you >= goal:\n return strategies[other[p]]\n elif strategies[p](state) == 'hold':\n state = hold(state)\n else:\n state = roll(state, random.randint(1, 6))", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n\n \"*** YOUR CODE HERE ***\"\n current_food_list = list(currentGameState.getFood().asList())\n food_list = list(newFood.asList())\n wall_list = list(currentGameState.getWalls().asList())\n score = successorGameState.getScore()\n x_newPos = newPos[0]\n y_newPos = newPos[1]\n neighbors = [((x_newPos-1),y_newPos),((x_newPos+1),y_newPos),(x_newPos,(y_newPos-1)),(x_newPos,(y_newPos+1))]\n food_distance = []\n if len(food_list)<>0: \n for food in food_list:\n food_distance.append(util.manhattanDistance(newPos, food))\n min_food_distance = min(food_distance)\n if action <> 'Stop':\n score += (30-min_food_distance)*10/30\n else:\n score -= 10\n if newPos in current_food_list:\n score +=5\n \n for neighbor in neighbors:\n if neighbor in current_food_list:\n score +=1 \n for ghost_index in range(len(newGhostStates)):\n if newScaredTimes[ghost_index]>5 and util.manhattanDistance(newPos, newGhostStates[ghost_index].getPosition())<=20:\n dis=util.manhattanDistance(newPos, newGhostStates[ghost_index].getPosition())\n score +=(30-dis)*200/30\n elif newScaredTimes[ghost_index]<=5 and util.manhattanDistance(newPos, newGhostStates[ghost_index].getPosition())<=2:\n dis=util.manhattanDistance(newPos, newGhostStates[ghost_index].getPosition())\n score -=(30-dis)*200/30 \n \n return score", "def action(self):\r\n\r\n\r\n #have we just started?\r\n if self.player_information[\"us\"][\"nTokens\"] == 0:\r\n move = generate_starting_move(self.player_information[\"us\"][\"player_side\"], self.board_array)\r\n return move\r\n\r\n #otherwise do minimax \r\n \r\n #start off with some shallow depth:\r\n if self.turn_no < 5:\r\n depth = 3\r\n else:\r\n depth = 2\r\n \r\n #set a constraint for search depth\r\n if self.total_tokens_on_board < 6:\r\n depth = 3\r\n elif self.total_tokens_on_board < 10:\r\n depth = 2\r\n else:\r\n depth = 1\r\n \r\n #have a time reference\r\n print(f'nthrows: {self.player_information[\"us\"][\"nThrowsRemaining\"]}')\r\n starting_time = int(round(time.time(), 0))\r\n #salvage result from minimax\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, depth, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, True, self.turn_no)\r\n\r\n #clean it up a bit \r\n print(self.board_dict)\r\n #tidy it up\r\n result = result[0]\r\n print(f'pre: {result}')\r\n #in case we get a bad move redo but make it very shallow\r\n if len(result) == 1 or result == (-5, -5):\r\n #force it to return a usable move\r\n counter = 0\r\n while (len(result) == 1) or (result == (-5, -5)):\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, 1, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, False, self.turn_no)\r\n result = result[0]\r\n counter += 1\r\n \r\n #if its taking too long\r\n if counter > 2: \r\n #generate one random possible move to use \r\n allied_tokens = [token for token in self.player_tokens if self.player_tokens[token] == \"us\"]\r\n move_list = generate_moves(self.board_dict, self.player_tokens, self.co_existance_dict, allied_tokens,\r\n self.player_information, self.board_array, True, \"all\")\r\n \r\n \r\n #if there are no moves\r\n if len(move_list) == 0:\r\n if self.player_information['us']['nThrowsRemaining'] > 0:\r\n throws = generate_possible_throws(self.board_dict, self.player_tokens, self.co_existance_dict, self.player_information, \"us\",\r\n self.player_information[\"us\"][\"player_side\"], self.board_array, \"all\" )\r\n result = random.choice(throws)\r\n \r\n else:\r\n result = random.choice(move_list)\r\n print(f'random: {result}')\r\n break\r\n\r\n print(f' inside: {result}')\r\n\r\n print(result)\r\n #otherwise clean it up\r\n if result[0] == 'throw':\r\n final_result = (result[0].upper(), result[1], result[2])\r\n else:\r\n final_result = (result[0].upper(), result[2], result[3])\r\n # return final result \r\n return final_result", "def alphabeta_search(state):\r\n \r\n '''\r\n Terminates when game.actions is empty\r\n Class Game needs the following functions:\r\n - game.result(state, a) -- successor\r\n - game.actions(state) -- possible moves\r\n - game.utility -- returns the state of the game (win/lose or tie, when game is terminal)\r\n \r\n '''\r\n #sort state.actions in increasing or decreasing based on max or min (alpha or beta)\r\n #use heuristics fn to get a value for each move (move is in format (x,y) where x and y are ints\r\n \r\n d = depthset[0] #this is the cutoff test depth value. if we exceed this value, stop\r\n cutoff_test=None\r\n sort_fn = [vitalpoint, eyeHeur]\r\n eval_fn = survivalheur \r\n #randnumheuristics \r\n player = state.to_move()\r\n prune = 0\r\n pruned = {} #this will store the depth of the prune\r\n totaldepth = [0]\r\n visited = {}\r\n heuristicInd = 0\r\n \r\n def max_value(state, alpha, beta, depth, heuristicInd):\r\n branches = len(state.actions())\r\n onbranch = 0\r\n \r\n if totaldepth[0] < depth:\r\n totaldepth[0] = depth\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = -infinity\r\n \r\n #sort state.actions based on heuristics before calling\r\n #max wants decreasing\r\n #sorted(state.actions(), key = eval_sort, reverse = True)\r\n \r\n #sort by favorites first, returns a list of actions\r\n # for sorts in sort_fn:\r\n tempher = heuristicInd\r\n\r\n sorts = sort_fn[heuristicInd]\r\n sortedactions, heuristicInd = sorts(state)\r\n #if heuristicInd != tempher:\r\n # print 's',\r\n ''''''\r\n for a in sortedactions:\r\n if visited.get(depth) == None:\r\n visited[depth] = [a]\r\n else:\r\n visited[depth].append(a)\r\n \r\n onbranch += 1\r\n v = max(v, min_value(state.result(a),\r\n alpha, beta, depth+1, heuristicInd)) #+ vitscore.count(a)\r\n if v >= beta: #pruning happens here, but in branches\r\n if pruned.get(depth) == None:\r\n pruned[depth] = branches - onbranch\r\n else:\r\n pruned[depth] += (branches - onbranch)\r\n #print \"prune\", depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n alpha = max(alpha, v)\r\n \r\n #print depth, \" \", state.actions()\r\n #state.display()\r\n \r\n return v\r\n\r\n def min_value(state, alpha, beta, depth, heuristicInd):\r\n branches = len(state.actions())\r\n onbranch = 0\r\n \r\n if totaldepth[0] < depth:\r\n totaldepth[0] = depth\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = infinity\r\n \r\n #sort state.actions based on heuristics before calling\r\n #min wants increasing\r\n #sorted(state.actions(), key = eval_sort)\r\n #Shayne\r\n tempher = heuristicInd\r\n sorts = sort_fn[heuristicInd]\r\n sortedactions, heuristicInd = sorts(state, 1)\r\n #if heuristicInd != tempher:\r\n # print 's',\r\n for a in sortedactions: #state.actions():\r\n onbranch += 1\r\n if visited.get(depth) == None:\r\n visited[depth] = [a]\r\n else:\r\n visited[depth].append(a)\r\n v = min(v, max_value(state.result(a),\r\n alpha, beta, depth+1, heuristicInd))\r\n if v <= alpha: #pruning happens here, but in branches\r\n if pruned.get(depth) == None:\r\n pruned[depth] = branches - onbranch\r\n else:\r\n pruned[depth] += (branches - onbranch)\r\n #print \"prune\", depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n beta = min(beta, v)\r\n #print depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n\r\n # Body of alphabeta_search starts here:\r\n #def cutoff_test and eval_fn \r\n cutoff_test = (cutoff_test or\r\n (lambda state,depth: depth>d or state.terminal_test()))\r\n eval_fn = eval_fn or (lambda state: state.utility(player))\r\n #by default, utility score is used\r\n \r\n \r\n #argmax goes through all the possible actions and \r\n # applies the alphabeta search onto all of them\r\n # and returns the move with the best score \r\n #print state.actions()\r\n heuristicInd = 0\r\n sorts = sort_fn[heuristicInd]\r\n sortedact, heuristicInd = sorts(state)\r\n abmove = argmax(sortedact,\r\n lambda a: min_value(state.result(a),\r\n -infinity, infinity, 0, heuristicInd))\r\n\r\n print 'problem,', problemno[0], ', total tree depth,', totaldepth[0]\r\n for i in range(1, len(visited)):\r\n if len(pruned) < i:\r\n pruned[i] = 0\r\n print i, \",\", len(visited[i]), \",\", pruned[i]\r\n \r\n return abmove", "def player(board):\n total = 0\n for i in range(len(board)):\n for j in range(len(board)):\n total = total + utility_map[board[i][j]]\n\n # If they cancel out then equal number so X's turn\n if total == 0:\n return X\n else:\n return O", "def inning(plays = play, outs = 0, strikes = 0, inning_runs = 0, bases = []):\r\n while(outs < 3):\r\n current_play = random.choice(plays)\r\n if(current_play == 'Hr' or current_play == 'trip'):\r\n inning_runs = inning_runs + len(bases)\r\n if(current_play == 'Hr'):\r\n inning_runs = inning_runs + 1\r\n bases = []\r\n if(current_play == 'trip'):\r\n bases = [3]\r\n strikes = 0\r\n\r\n elif(current_play == 'double'):\r\n if(2 in bases):\r\n inning_runs += 1\r\n bases.remove(2)\r\n if(3 in bases):\r\n inning_runs += 1\r\n bases.remove(3)\r\n if(1 in bases):\r\n bases.remove(1)\r\n bases.append(3)\r\n strikes = 0\r\n bases.append(2)\r\n bases.sort()\r\n\r\n elif(current_play == 'single' or current_play == 'be'):\r\n if(2 in bases):\r\n inning_runs += 1\r\n bases.remove(2)\r\n if(3 in bases):\r\n inning_runs += 1\r\n bases.remove(3)\r\n if(1 in bases):\r\n bases.remove(1)\r\n bases.append(2)\r\n strikes = 0\r\n bases.append(1)\r\n bases.sort()\r\n\r\n elif(current_play == 'bb'):\r\n if(1 in bases):\r\n if(2 in bases):\r\n if(3 in bases):\r\n inning_runs += 1\r\n bases.remove(3)\r\n bases.append(3)\r\n bases.remove(2)\r\n bases.append(2)\r\n bases.remove(1)\r\n strikes = 0\r\n bases.append(1)\r\n bases.sort()\r\n\r\n elif (current_play == 'st'):\r\n strikes += 1\r\n if(strikes > 2):\r\n outs += 1\r\n strikes = 0\r\n\r\n elif(current_play == 'fo' or current_play == 'of' or current_play == 'fout' or current_play == 'dp'):\r\n outs += 1\r\n strikes = 0\r\n if(outs < 3):\r\n if(current_play == 'fout'):\r\n if(3 in bases):\r\n inning_runs += 1\r\n bases.remove(3)\r\n if(current_play == 'of'):\r\n if(3 in bases):\r\n inning_runs += 1\r\n bases.remove(3)\r\n if(2 in bases):\r\n bases.remove(2)\r\n bases.append(3)\r\n if(1 in bases):\r\n bases.remove(1)\r\n bases.append(2)\r\n\r\n if(current_play == 'dp'):\r\n if bases:\r\n outs+= 1\r\n if(outs < 3):\r\n if(1 in bases):\r\n bases.remove(1)\r\n if(3 in bases):\r\n inning_runs += 1\r\n bases.remove(3)\r\n if(2 in bases):\r\n bases.remove(2)\r\n bases.append(3)\r\n elif(2 in bases and 1 not in bases):\r\n if(3 in bases):\r\n inning_runs += 1\r\n bases.remove(3)\r\n bases.remove(2)\r\n elif((3 in bases) and (1 not in bases) and (2 not in bases)):\r\n bases = []\r\n bases.sort()\r\n return inning_runs", "def play(self):\n\n value = 0 #player dictionary key\n player = {0: 'O', 1: 'X'}\n\n moveCount = 0 #how many moves have occurred. also doubles as the self.order index.\n turn = \"\"\n while moveCount < self.n**2 and self.go == \"Tie\":\n value = not value\n turn = player[value] #X starts\n key = self.order[moveCount]\n i = key[0]\n j = key[1]\n\n\n# self.rows[i][0] == homogenous?\n# self.rows[i][1] == X/O?\n# self.rows[i][2] == count of X's/O's?\n\n# Check to see if row i is 'homogenous' (contains only X's or O's):\n if self.rows[i][0]:\n\n# Check to see if any square in row i has been played. If it has been played,\n# check to see if it was the same person who's current turn it is.\n if self.rows[i][1] == \"\" or player[value] == self.rows[i][1]:\n\n# Mark the column with the current person's token (X or O).\n# Admittedly, this could be improved to not update every time.\n self.rows[i][1] = turn\n\n# Update the count by one.\n self.rows[i][2] += 1\n\n# If the count is equal to the board size, end the game and return who won and how.\n if self.rows[i][2] == self.n:\n self.go = (turn, 'row ' + str(i))\n\n# If the current person who's turn it is,\n# is not the same as the previous player who played this row,\n# set this row's 'homogenous' attribute to false.\n else:\n self.rows[i][0] = False\n\n if self.cols[j][0]:\n if self.cols[j][1] == \"\" or player[value] == self.cols[j][1]:\n self.cols[j][1] = turn\n self.cols[j][2] += 1\n if self.cols[j][2] == self.n:\n self.go = (turn, 'column ' + str(j))\n else:\n self.cols[j][0] = False\n\n# On boards of odd-sized 'n' (n = 3,5,7,etc...)\n# the middle square is part of both diagonals: 'step' and 'same':\n if i == j:\n if self.diags['same'][0]:\n if self.diags['same'][1] == \"\" or player[value] == self.diags['same'][1]:\n self.diags['same'][1] = turn\n self.diags['same'][2] += 1\n if self.diags['same'][2] == self.n:\n self.go = (turn, 'diagonal from 0,0 to n-1,n-1')\n else:\n self.diags['same'][0] = False\n\n if i + j + 1 == self.n:\n if self.diags['step'][0]:\n if self.diags['step'][1] == \"\" or player[value] == self.diags['step'][1]:\n self.diags['step'][1] = turn\n self.diags['step'][2] += 1\n if self.diags['step'][2] == self.n:\n self.go = (turn, 'diagonal from n-1,0 to 0,n-1')\n else:\n self.diags['step'][0] = False\n\n moveCount += 1\n print(turn, key)\n else:\n return self.go", "def main2():\n\n output = []\n joystick = 0\n paddle_x_pos = None\n ball_x_pos = None\n score = 0\n\n def inp():\n \"\"\"Return the current joystick position.\"\"\"\n return joystick\n\n def out(value):\n \"\"\"Wait until a triple has been read, then adjust position/joystick.\"\"\"\n nonlocal paddle_x_pos\n nonlocal ball_x_pos\n nonlocal joystick\n nonlocal output\n nonlocal score\n output.append(value)\n if len(output) == 3:\n i, j, tile_id = output\n output = []\n if i == -1 and j == 0:\n score = tile_id\n elif tile_id == 3:\n paddle_x_pos = i\n elif tile_id == 4:\n ball_x_pos = i\n\n if paddle_x_pos and ball_x_pos:\n if paddle_x_pos < ball_x_pos:\n joystick = 1\n elif paddle_x_pos > ball_x_pos:\n joystick = -1\n else:\n joystick = 0\n\n return False\n\n computer = Computer(\"day13-input\", inp, out)\n computer.memory[0] = 2\n computer.evaluate()\n\n print(score)", "def main():\n\tcolorama.init()\n\n\n\n\tgrid = get_start_grid(*map(int,sys.argv[1:]))\n\tprint_grid(grid)\n\n\twhile True:\n\t\tgrid_copy = copy.deepcopy(grid)\n\t\tget_input = getch(\"Enter direction (w/a/s/d/n/r/q): \")\n\t\tif get_input in functions:\t\n\t\t\tfunctions[get_input](grid)\n\t\telif get_input == \"n\":\n\t\t\tif get_next_action(grid) == '':\n\t\t\t\tprint(\"Checkmate!\")\n\t\t\t\tbreak\n\t\t\tfunctions[get_next_action(grid)](grid)\n\t\telif get_input == \"r\":\n\t\t\tbreak\n\t\telif get_input == \"q\":\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"\\nInvalid choice.\")\n\t\t\tcontinue\n\t\tif grid != grid_copy:\n\t\t\tif not prepare_next_turn(grid):\n\t\t\t\tprint_grid(grid)\n\t\t\t\tprint(\"Well played!\")\n\t\t\t\tbreak\n\t\tprint_grid(grid)\n\t\n\tif get_input == \"r\":\n\t\twhile True:\n\t\t\tgrid_copy = copy.deepcopy(grid)\n\n\t\t\tnext_action = get_next_action(grid)\n\t\t\tif next_action == '':\n\t\t\t\tprint(\"Checkmate!\")\n\t\t\t\tbreak\n\t\t\t\n\t\t\tfunctions[next_action](grid)\n\t\t\tif grid != grid_copy:\n\t\t\t\tif not prepare_next_turn(grid):\n\t\t\t\t\tprint_grid(grid)\n\t\t\t\t\tprint(\"Well played!\")\n\t\t\t\t\tbreak\n\t\t\tprint_grid(grid)\n\n\tprint(\"Thanks for playing.\")", "def solveOneStep(self):\n ### Student code goes here\n if (self.currentState.state == self.victoryCondition) or (self.currentState not in self.visited):\n self.visited[self.currentState] = True\n win_or_not = self.currentState.state == self.victoryCondition\n return win_or_not\n\n if not self.currentState.nextChildToVisit: \n its = 0\n for movable in self.gm.getMovables():\n its += 1\n # time test\n # too long \n if its == \"too long\":\n return \"too long\"\n #make every move in movable\n self.gm.makeMove(movable)\n new = self.gm.getGameState()\n new_gs = GameState(new, self.currentState.depth+1, movable)\n \n if new_gs not in self.visited:\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.gm.reverseMove(movable) \n \n num_children = len(self.currentState.children)\n if self.currentState.nextChildToVisit < num_children:\n new = self.currentState.children[self.currentState.nextChildToVisit]\n self.currentState.nextChildToVisit = self.currentState.nextChildToVisit + 1\n self.gm.makeMove(new.requiredMovable)\n self.currentState = new\n #recurse\n return self.solveOneStep()\n else:\n self.currentState.nextChildToVisit = self.currentState.nextChildToVisit + 1\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n #recurse\n return self.solveOneStep()", "def play(self, turn):\n # global black_prompt, white_prompt, res, pi, board\n if turn % 2 == 0:\n prompt, requests_add, responses_add, color_to_play = self.bp, self.bp, self.wp, BLACK\n print(\"pure\")\n res = pure_MCTS.UCTAlg(json=prompt).run(time_limit=1)\n else:\n prompt, requests_add, responses_add, color_to_play = self.wp, self.wp, self.bp, WHITE\n print(\"alpha\")\n res = mcts.uctAlg.UCTAlg(predict_model=player, json=prompt, mode='comp').run(time_limit=1)[0]\n print(res)\n self.board.disc_place(color_to_play, res[0], res[1]) # record steps to board\n\n dct = {'x': res[0], 'y': res[1]}\n requests_add[\"responses\"].append(dct)\n responses_add[\"requests\"].append(dct)", "def play(self): # TODO -- batches of games\n if self.terminal:\n raise Exception(\"This pit has already been played!\")\n\n # Let the models play a number of games\n for duel in range(self.num_duels):\n # Initialize a new game\n state = self.game_setup(self.args)\n current_player = 0\n\n # Store which model corresponds to which player\n # Let the models take turns in who is the starting player\n models = {duel % 2: (self.m1, self.mcst1),\n (duel + 1) % 2: (self.m2, self.mcst2)}\n\n # Play the game\n while not state.is_terminal():\n model, tree = models[current_player]\n # Perform a number of Monte Carlo searches\n for _ in range(self.num_sims):\n tree.search(state, model)\n # Determine an action by sampling from the policy as defined by the tree\n a, _ = tree.action(state, temperature=0)\n # Perform the move\n state.do_move(a)\n current_player = 1 - current_player\n # Add the game result to the win counter (taking player perspective into account)\n if duel % 2 == 0:\n self.wins += state.get_scores()\n else:\n self.wins += np.roll(state.get_scores(), 1)\n self.terminal = True\n\n return self.wins", "def main():\n\n game = JanggiGame()\n game.display_board()\n print(game.make_move('a7','a6'))\n print(game.make_move('h1','g3'))\n print(game.make_move('a10','a7'))\n print(game.make_move('b1','d4'))\n print(game.make_move('a7','b7'))\n print(game.make_move('c1','a2'))\n print(game.make_move('b7','b3'))\n print(game.make_move('h3','b3'))\n print(game.make_move('e7','e6'))\n print(game.make_move('i1','h1'))\n print(game.make_move('i7','h7'))\n print(game.make_move('a2','b4'))\n print(game.make_move('b10','d7'))\n print(game.make_move('b4','a6'))\n print(game.make_move('i10','i9'))\n print(game.make_move('a6','b8'))\n print(game.make_move('c10','b8'))\n print(game.make_move('b3','b9'))\n print(game.make_move('i9','i6'))\n print(game.make_move('a1','b1'))\n print(game.make_move('b8','c6'))\n print(game.make_move('b1','b8'))\n print(game.make_move('h8','h1'))\n print(game.make_move('g3','h1'))\n print(game.make_move('e6','d6'))\n print(game.make_move('h1','g3'))\n print(game.make_move('d6','d5'))\n print(game.make_move('d4','b1'))\n print(game.make_move('i6','e6'))\n print(game.make_move('i4','i5'))\n print(game.make_move('c6','d4'))\n print(game.make_move('c4','d4'))\n print(game.make_move('d5','d4'))\n print(game.make_move('g4','f4'))\n print(game.make_move('d4','e4'))\n print(game.make_move('f4','e4'))\n print(game.make_move('e6','e4'))\n print(game.make_move('g3','e4'))\n print(game.make_move('h10','i8'))\n print(game.make_move('e4','f6'))\n print(game.make_move('g7','g6'))\n print(game.make_move('b1','d4'))\n print(game.make_move('g6','f6'))\n print(game.make_move('d4','f7'))\n print(game.make_move('d7','f4'))\n print(game.make_move('f7','c9'))\n print(game.make_move('d10','d9'))\n print(game.make_move('a4','a5'))\n print(game.make_move('f6','f5'))\n print(game.make_move('g1','e4'))\n print(game.make_move('c7','c6'))\n print(game.make_move('b8','i8'))\n print(game.make_move('f5','e5'))\n print(game.make_move('e4','g1'))\n print(game.make_move('e5','d5'))\n print(game.make_move('i8','i9'))\n print(game.make_move('f10','f9'))\n print(game.make_move('a5','a6'))\n print(game.make_move('d5','d4'))\n print(game.make_move('a6','a7'))\n print(game.make_move('d4','d3'))\n print(game.make_move('e2','d3'))\n print(game.make_move('e9','e8'))\n print(game.make_move('i9','f9'))\n print(game.make_move('h7','h6'))\n print(game.make_move('a7','b7'))\n print(game.make_move('g10','e7'))\n print(game.make_move('f9','f7'))\n print(game.make_move('d9','d10'))\n print(game.make_move('f7','e7'))\n print(game.make_move('e8','f8'))\n print(game.make_move('b7','c7'))\n print(game.make_move('h6','h5'))\n print(game.make_move('e7','e10'))\n print(game.make_move('h5','h4'))\n print(game.make_move('c7','d7'))\n print(game.make_move('h4','h3'))\n print(game.make_move('d7','e7'))\n print(game.make_move('h3','h2'))\n print(game.make_move('e7','f7'))\n game.display_board()\n print('Red in check: '+str(game.is_in_check('red')))\n print('Blue in check: '+str(game.is_in_check('blue')))\n print(game.get_game_state())", "def counter_opponent_adv(self):\n\n # get essential values\n board = self.get_game_space()\n affinity = self.get_affinity()\n opaffinity = self.get_opponent().get_affinity()\n\n # pick the right check for the game we are playing\n if isinstance(board, Gomoku):\n\n # get advantageous blocks\n blocks_advn = board.get_adv_blocks(opaffinity)\n best_moves = []\n best_move = None\n\n # sort the blocks which may be countered\n for block in blocks_advn:\n if block.direction == 'horizontal':\n x1 = block.tiles[0][0] - 1\n y1 = block.tiles[0][1] \n x2 = block.tiles[2][0] + 1\n y2 = block.tiles[2][1] \n if x1 < 0 or x2 >= 7: return None\n if board.get_tile(x1,y1) == BLANK_TILE() and \\\n board.get_tile(x2,y2) == BLANK_TILE():\n best_moves.append((x1,y1))\n elif block.direction == 'vertical':\n x1 = block.tiles[0][0] \n y1 = block.tiles[0][1] - 1 \n x2 = block.tiles[2][0]\n y2 = block.tiles[2][1] + 1\n if y1 < 0 or y2 >= 7: return None\n if board.get_tile(x1,y1) == BLANK_TILE() and \\\n board.get_tile(x2,y2) == BLANK_TILE():\n best_moves.append((x2,y2))\n elif block.direction == 'diagonal(\\)':\n x1 = block.tiles[0][0] - 1\n y1 = block.tiles[0][1] - 1\n x2 = block.tiles[2][0] + 1\n y2 = block.tiles[2][1] + 1\n if x1 < 0 or y1 < 0 or x2 >= 7 or y2 >= 7: return None\n if board.get_tile(x1,y1) == BLANK_TILE() and \\\n board.get_tile(x2,y2) == BLANK_TILE():\n best_moves.append((x1,y1))\n elif block.direction == 'diagonal(/)':\n x1 = block.tiles[0][0] - 1\n y1 = block.tiles[0][1] + 1\n x2 = block.tiles[2][0] + 1\n y2 = block.tiles[2][1] - 1\n if x1 < 0 or y1 >= 7 or x2 >= 7 or y2 < 0: return None\n if board.get_tile(x1,y1) == BLANK_TILE() and \\\n board.get_tile(x2,y2) == BLANK_TILE():\n best_moves.append((x1,y1))\n\n # pick the best move in the best block to counter\n for move in best_moves:\n print('considered advantageous move:'+str(move))\n if best_move is None: best_move = move \n elif move[0] < best_move[0] and move[1] == best_move[1]:\n best_move = move\n elif move[0] == best_move[0] and move[1] > best_move[1]:\n best_move = move\n elif move[0] < best_move[0] and move[1] > best_move[1]:\n best_move = move\n\n return best_move", "def play_game():\n pass", "def test_play_game(self):\r\n\r\n \r\n a_players = [RandomPlayer(1), RandomPlayer(2)]\r\n a_x_dist = 3\r\n a_y_dist = 3\r\n a_num_to_win = 1\r\n a_game = Game(a_players, a_x_dist, a_y_dist, a_num_to_win)\r\n\r\n #Game is played to competion\r\n a_game.play_game()\r\n\r\n a_history = a_game.get_history()\r\n\r\n #Go through each move and check to be sure it's valid\r\n for i in range(1,len(a_history)):\r\n #Get copy of the board\r\n prev_board = a_history[i-1]\r\n cur_board = a_history[i]\r\n\r\n #Check if the board chosen is in valid states\r\n self.assertTrue(cur_board in prev_board.get_states(a_players[0].get_id()) or cur_board in prev_board.get_states(a_players[1].get_id()),\\\r\n \"An invalid board state was added to the history\")\r\n\r\n if i == len(a_history) - 1:\r\n self.assertTrue(cur_board.check_win(a_num_to_win, a_players[0].get_id()) or cur_board.check_win(a_num_to_win, a_players[1].get_id()) or cur_board.check_tie())\r\n else: \r\n self.assertFalse(cur_board.check_win(a_num_to_win, a_players[0].get_id()) or cur_board.check_win(a_num_to_win, a_players[1].get_id()) or cur_board.check_tie())", "def hangman_figure(attempt_left):\n if attempt_left == N_TURNS:\n print('___________')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 1:\n print('___________')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 2:\n print('___________')\n print('| |')\n print('| O')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 3:\n print('___________')\n print('| |')\n print('| O')\n print('| |')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 4:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 5:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 6:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| /')\n print('| |')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 7:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 8:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| |')\n print('|_____')\n if attempt_left == N_TURNS - 9:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')\n if attempt_left == N_TURNS - 10:\n print('___________')\n print('| |')\n print('| -O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')\n if attempt_left == N_TURNS - 11:\n print('___________')\n print('| |')\n print('| -O-')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')", "def play_game(self):\n TF = self.TF\n # keep updating\n actions = collections.defaultdict(dict)\n for i in range(10):\n for j in range(self.N):\n actions[i][j] = 0\n\n sums = []\n for time in range(self.MAX):\n print(\"begin time epoch: \" + str(time))\n train_state_pool = collections.defaultdict(dict)\n flow_num = 0\n sum_all = 0\n for i in TF.keys():\n for j in TF[i].keys():\n for agent in self.Ns:\n actions[flow_num][agent.id] = random.randint(0, agent.n_actions - 1)\n\n # update states to ss_\n sum_all = self.update_state(flow_num, actions)\n\n flow_num += 1\n\n sums.append(sum_all)\n print('cut-random: ' + str(sum_all))\n if time % 10000 == 0 and time != 0:\n str1 = 'cut-mini-random' + str(time) + '.txt'\n file = open(str1, 'w')\n file.write(str(sums))\n file.close()", "def main():\r\n\r\n movetwotimes()\r\n pick_beeper()\r\n move()\r\n turn_left()\r\n movetwotimes()\r\n put_beeper()\r\n turn_left()\r\n turn_left()\r\n movetwotimes()\r\n rotatethreetimes()\r\n movetwotimes()\r\n move()\r\n turn_left()\r\n turn_left()", "def play(self):\n utilities = {\n player: []\n for player\n in self.players\n }\n start_time = time.time()\n prev_print = 0\n for j in range(self.n_games):\n random.shuffle(self.players)\n initial_state = self.Game(\n self.players\n )\n contr = ahorn.Controller(\n initial_state\n )\n final_state = contr.play()\n for player in self.players:\n utilities[player].append(final_state.get_utility(player))\n\n elapsed = time.time()-start_time\n elapsed_since_print = time.time()-prev_print\n if self.verbose and ((elapsed_since_print > self.verbose_seconds) or j == self.n_games-1):\n prev_print = time.time()\n print(\"{}\".format(str(self.Game)))\n print(\n \"Game {} out of {} in {:2.1f}s ({:2.1f}s per game)\".format(\n j+1,\n self.n_games,\n elapsed,\n elapsed/(j+1)\n )\n )\n\n print(\"=\"*25)\n for player in sorted(self.players):\n low, mid, high = Arena.bootstrap(\n utilities[player],\n func=statistics.mean,\n confidence=self.confidence\n )\n print(\"{}\\t|\\t{:2.3f}/{:2.3f}/{:2.3f}\".format(\n str(player),\n low,\n mid,\n high\n ))\n print(\"\")\n result = {\n player: Arena.bootstrap(\n utility,\n func=statistics.mean,\n confidence=self.confidence\n )[1]\n for player, utility\n in utilities.items()\n }\n return result", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n \n pacmanPosition = currentGameState.getPacmanPosition()\n oldFood = currentGameState.getFood()\n newGhostStates = currentGameState.getGhostStates()\n capsulePositions = currentGameState.getCapsules()\n \n #print \"current pacman position \", pacmanPosition\n\n score = 0\n \n numGhostsNear = 0\n numGhostsScared = 0\n \n if currentGameState.isLose():\n score -= 1000000000\n if currentGameState.isWin() and numGhostsScared is 0:\n score += 100000000\n \n for ghost in newGhostStates: \n ghostScareTimer = ghost.scaredTimer\n if ghostScareTimer > 2:\n numGhostsScared += 1\n \n for ghost in newGhostStates:\n ghostPosition = ghost.getPosition()\n ghostScareTimer = ghost.scaredTimer\n ghostDistanceToPacman = util.manhattanDistance(ghostPosition, pacmanPosition)\n \n #if the ghost is near pacman...\n if ghostDistanceToPacman < 5:\n #check to see if the ghost is scared\n if ghostScareTimer >= ghostDistanceToPacman:\n \n #if the ghost will be scared long enough for pacman to eat it, chase\n #this will reward pacman for being closer to the scared ghost\n \n if ghostDistanceToPacman is 0:\n #right ontop of the ghost, lots of points!\n #KILL IT WITH FIRE\n score += 100000000000000000000000000000000000000000000000\n else:\n score += 80000/(ghostDistanceToPacman+1)\n \n else:\n #the ghost is either scared but too far, or not scared\n #mm... before running, check to see if there's a nearby power pellet...\n if pacmanPosition in capsulePositions:\n #is pacman on a power pellet? how convenient!\n score += 200000\n \n noNearbyPellets = True\n for pellet in capsulePositions:\n pelletDistanceToPacman = util.manhattanDistance(pellet, pacmanPosition)\n if ghostDistanceToPacman >= pelletDistanceToPacman:\n #the ghost is further away from this pellet\n #head to the pellet!\n if pelletDistanceToPacman is 0:\n score += 100400\n else:\n score += 100000/pelletDistanceToPacman\n if noNearbyPellets:\n if ghostDistanceToPacman < 4:\n numGhostsNear += 1\n \n score += ghostPosition[0] * 1000 - ghostPosition[1] * 1500\n \n if numGhostsNear > 1:\n for ghost in newGhostStates:\n ghostPosition = ghost.getPosition()\n ghostDistanceToPacman = util.manhattanDistance(ghostPosition, pacmanPosition)\n if ghostDistanceToPacman is 0:\n score -= 1000000000\n else:\n if ghostDistanceToPacman is 0:\n score -= 1000000000\n else:\n score -= 1000000000/(ghostDistanceToPacman+1)\n \n #look for the nearest food pellet (which is not at pacman's location) and the furthest\n nearestFoodDistance = float(\"inf\")\n \n temp = oldFood.asList()\n import random\n #random.shuffle(temp)\n temp.sort()\n for foodPosition in temp:\n foodDistanceToPacman = util.manhattanDistance(foodPosition, pacmanPosition)\n #print foodPosition, pacmanPosition, foodDistanceToPacman\n if foodDistanceToPacman is 0:\n #pacman is on some food, reward him!\n score += 20000\n continue\n if (foodDistanceToPacman < nearestFoodDistance) and (foodDistanceToPacman > 0):\n nearestFoodDistance = foodDistanceToPacman\n \n score += 8000/(pacmanPosition[1]+1)\n score += 8000/(pacmanPosition[0]+1)\n \n #print \"nearestFoodDistance \", nearestFoodDistance\n \n import time\n #time.sleep(1000)\n temp = (100000 / nearestFoodDistance)\n #print \"AGHGHHGHGHGHGHGHGG \", temp\n score += temp\n \n if currentGameState.getScore() < 500:\n if currentGameState.isLose():\n score -= 100000000000000000\n \n score += currentGameState.getScore()*1000000\n \n score -= 10000 * len(oldFood.asList())\n score -= 50000 * len(capsulePositions)\n \n \n #print score\n \n \n \n return score", "def step(self, action):\n if np.abs(action[0][0]-action[1][0])==2:\n self.board[(action[0][0]+action[1][0])//2,(action[0][1]+action[1][1])//2]=0\n self.board[action[1]] = self.board[action[0]] \n if action[1][0]==0 or action[1][0]==7:\n self.board[action[1]] = 2*np.sign(self.board[action[0]])\n self.board[action[0]] = 0\n self.turn = (self.turn + 1)%2\n self.actions=[]\n for i in range(8):\n for j in range(8):\n if np.sign(self.board[i,j])==(-1)**self.turn:\n moves=(self.bdiag(i,j),self.badiag(i,j),self.fdiag(i,j),self.fadiag(i,j))\n for r in range(4):\n if moves[r] is not None:\n self.actions.append(moves[r])\n winner = self.winner(action)\n if winner is not None:\n rewards = np.array([winner,(-1)*winner])\n else:\n rewards = np.array([0,0])\n self.done = winner is not None\n return self.board.copy(), rewards, self.done, self.turn", "def play_against_minimax():\n global FIRST_MOVE\n global done\n done = False\n g = Game()\n turn = np.random.randint(2)\n # if turn == RED:\n # FIRST_MOVE = False\n transitions_agent = []\n agent.epsilon = agent.eps_min\n while done == False:\n g.printBoard()\n # print(g.board)\n if turn == PLAYER:\n row = input('{}\\'s turn:'.format('Red'))\n g.insert(int(row), PLAYER_PIECE)\n else:\n observation = []\n obs = np.zeros((6, 7))\n for row, sublist in enumerate(g.board):\n for col, i in enumerate(sublist):\n observation.append(i)\n obs[col, row] = i\n\n observation = np.asarray(observation)\n action, _ = minimax(np.flipud(obs), 5, -math.inf, math.inf, True)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent += [(observation, action, observation_, done)]\n turn = (turn + 1) % 2\n return", "def test_endgame4(self):\n self.result = \"\"\"\n 0 0 0 1 x 1 1 x 1 0 0 0 0 0 1 1 1 0 0 1 x 3 x 3 1 2 1\n 1 1 0 1 1 1 1 1 1 0 0 0 0 0 1 x 1 1 1 2 1 3 x 3 x 2 x\n x 2 1 1 0 0 0 0 0 0 1 1 1 0 1 1 1 1 x 1 0 2 2 3 1 3 2\n 1 2 x 1 0 0 0 0 0 0 1 x 1 0 0 0 0 1 1 1 0 1 x 2 1 2 x\n 0 1 1 1 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 2 3 x 2 1\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 x 2 1 0\n \"\"\"", "def run_game():\n\n #global correct\n correct = False\n\n code = create_code()\n show_instructions()\n\n turns = 0\n while not correct and turns < 12:\n #print(code)\n correct_digits_and_position = take_turn(code)\n turns += 1\n #print(correct_digits_and_position[0])\n correct = check_correctness(turns, correct_digits_and_position[0])\n #print(correct)\n\n show_code(code)", "def evaluateBoardState(self, board):\n\n \"\"\"\n These are the variables and functions for board objects which may be helpful when creating your Agent.\n Look into board.py for more information/descriptions of each, or to look for any other definitions which may help you.\n\n Board Variables:\n board.width \n board.height\n board.last_move\n board.num_to_connect\n board.winning_zones\n board.score_array \n board.current_player_score\n\n Board Functions:\n get_cell_value(row, col)\n try_move(col)\n valid_move(row, col)\n valid_moves()\n terminal(self)\n legal_moves()\n next_state(turn)\n winner()\n \"\"\"\n if self.id == 1:\n opponent_id = 2\n else:\n opponent_id = 1\n\n maxvalue = 100000\n minvalue = -maxvalue\n winner = board.winner()\n if winner == self.id:\n return maxvalue\n elif winner == opponent_id:\n return minvalue\n size_y = board.height\n size_x = board.width\n map_ = []\n num_to_connect = board.num_to_connect\n total_points = 0\n\n multiply_reachable = 1\n multiply_oddeven = 1\n # basically this function is calculating all the possible win positions\n # more pieces in a possible win position will be counted with more weights\n # a win position with X pieces in it will be counted as X^2 points\n # initialise the zones maps\n for i in range(size_y):\n map_.append([])\n for j in range(size_x):\n map_[i].append([])\n\n # Fill in the horizontal win positions\n for i in range(size_y):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i][j + k] == self.id:\n points += len(board.winning_zones[j+k][i])\n if (self.id == 1 and i % 2 == 1) or (self.id == 2 and i%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return maxvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return minvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the vertical win positions\n for i in range(size_x):\n for j in range(size_y - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[j + k][i] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[j + k][i] == self.id:\n points += len(board.winning_zones[i][j+k])\n if (self.id == 1 and (j+k) % 2 == 1) or (self.id == 2 and (j+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n points *= multiply_reachable\n # if opponent_pieces_count == 3 and self_pieces_count == 0:\n # points *= -1\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the forward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j + k] == self.id:\n points += len(board.winning_zones[j+k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the backward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - 1, num_to_connect - 1 - 1, -1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j - k] == self.id:\n points += len(board.winning_zones[j-k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n points *= multiply_reachable\n\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n return total_points", "def custom_score_2(game, player):\n\n '''\n =========\n STRATEGY:\n =========\n Staged player: starts aggressively, but becomes increasingly defensive. Uses a shallow beam\n search of own or opponent's moves depending on game stage. Ignores alignment with knight's\n tour path or centrality.\n\n Could play better with deeper beam search of own position (and opponent's for defensive\n purposes), but this would require a longer timeout.\n Maximising the number of moves searched at depth 2 is a reasonable proxy.\n\n Cache is disabled throughout game - doesn't seem to help, lookup misses are probably stealing\n from opportunity to deepen search.\n '''\n\n # Cache. Accumulates across games. Best used with higher search depths, longer timeouts.\n # If specified, USE_TRANSFORMATIONS will check for matches by rotating and flipping the board.\n USE_EVAL_DICT = False\n USE_TRANSFORMATIONS = False\n\n # Captures stats to global dict.\n GET_STATS = False\n\n # Get rough progress to endgame (0 to 100) based on the number of empty cells remaining.\n # Pessimistic (tends to over-estimate by about 20%), scales with board size\n progress = 100.0 * (exp(game.move_count / (game.width * game.height)) - 1.0)\n\n if progress <= 20.0:\n '''\n ==============================================================================================================\n EARLY GAME\n ...\n ==============================================================================================================\n '''\n\n OPP_ISOL_SEARCH_PATHS = 1 # ignored if search depth < 2\n OPP_ISOL_SEARCH_DEPTH = 1\n PLAYER_SAFE_SEARCH_PATHS = 1 # ignored if search depth < 2\n PLAYER_SAFE_SEARCH_DEPTH = 2\n OPP_ISOL_MOVES_WEIGHT = 250.0\n PLAYER_SAFE_MOVES_WEIGHT = 150.0\n KT_WEIGHT = 0.0\n LATE_CENTRALITY_WEIGHT = 0.0\n KT_PROGRESS_MEASURED_TO = 0.0\n\n elif progress <= 80.0:\n '''\n ==============================================================================================================\n MID GAME\n ...\n ==============================================================================================================\n '''\n\n OPP_ISOL_SEARCH_PATHS = 1 # ignored if search depth < 2\n OPP_ISOL_SEARCH_DEPTH = 1\n PLAYER_SAFE_SEARCH_PATHS = 2 # ignored if search depth < 2\n PLAYER_SAFE_SEARCH_DEPTH = 2\n OPP_ISOL_MOVES_WEIGHT = 150.0\n PLAYER_SAFE_MOVES_WEIGHT = 250.0\n KT_WEIGHT = 0.0\n LATE_CENTRALITY_WEIGHT = 0.0\n KT_PROGRESS_MEASURED_TO = 0.0\n\n else:\n '''\n ==============================================================================================================\n END GAME\n ...\n ==============================================================================================================\n '''\n USE_EVAL_DICT = False\n\n OPP_ISOL_SEARCH_PATHS = 1 # ignored if search depth < 2\n OPP_ISOL_SEARCH_DEPTH = 2\n PLAYER_SAFE_SEARCH_PATHS = 2 # ignored if search depth < 2\n PLAYER_SAFE_SEARCH_DEPTH = 2\n OPP_ISOL_MOVES_WEIGHT = 100.0\n PLAYER_SAFE_MOVES_WEIGHT = 300.0\n KT_WEIGHT = 0.0\n LATE_CENTRALITY_WEIGHT = 0.0\n KT_PROGRESS_MEASURED_TO = 0.0\n\n\n\n\n return custom_score_generator(game, player, progress, use_eval_dict=USE_EVAL_DICT, use_transformations=USE_TRANSFORMATIONS, get_stats=GET_STATS,\n # opening_book_weight=OPENING_BOOK_WEIGHT,\n kt_weight=KT_WEIGHT, kt_progress_measured_to=KT_PROGRESS_MEASURED_TO,\n late_centrality_weight=LATE_CENTRALITY_WEIGHT,\n player_safe_moves_weight=PLAYER_SAFE_MOVES_WEIGHT,\n player_safe_search_depth=PLAYER_SAFE_SEARCH_DEPTH,\n player_safe_search_paths=PLAYER_SAFE_SEARCH_PATHS,\n opp_isol_moves_weight=OPP_ISOL_MOVES_WEIGHT,\n opp_isol_search_depth=OPP_ISOL_SEARCH_DEPTH,\n opp_isol_search_paths=OPP_ISOL_SEARCH_PATHS)", "def main():\n grid_size = ''\n pokemons_num = ''\n\n #input grid_size\n while True:\n grid_size = input('Please input the size of the grid: ')\n if grid_size.isdigit() == True and 1 <= int(grid_size) <= 26:\n break\n #input pokemons_num\n while pokemons_num.isdigit() == False:\n pokemons_num = input('Please input the number of pokemons: ')\n grid_size = int(grid_size)\n pokemons_num = int(pokemons_num)\n\n #initalize game\n pokemon_locations = generate_pokemons(grid_size, pokemons_num)\n #print(pokemon_locations)\n game = UNEXPOSED*(grid_size**2)\n \n display_game(game,grid_size)\n\n #loop until win or lose\n while True:\n print('')\n user_input = input('Please input action: ')\n #no input\n if len(user_input) == 0:\n print(\"That ain't a valid action buddy.\")\n display_game(game,grid_size)\n continue\n #help\n if user_input == 'h':\n print(HELP_TEXT)\n display_game(game,grid_size)\n continue\n #quit\n if user_input == 'q':\n input_tmp = input('You sure about that buddy? (y/n): ')\n if input_tmp == 'y':\n print('Catch you on the flip side.')\n break\n elif input_tmp == 'n':\n print(\"Let's keep going.\")\n display_game(game,grid_size)\n continue\n else:\n print(\"That ain't a valid action buddy.\")\n display_game(game,grid_size)\n continue\n #restart\n if user_input == ':)':\n game = UNEXPOSED*(grid_size**2)\n pokemon_locations = generate_pokemons(grid_size, pokemons_num)\n print(\"It's rewind time.\")\n display_game(game,grid_size)\n continue\n #flag\n if user_input[0] == 'f':\n user_input = user_input[2:]\n position = parse_position(user_input,grid_size)\n if position != None:\n index_tmp = position_to_index(position,grid_size)\n game = flag_cell(game, index_tmp)\n else:\n print(\"That ain't a valid action buddy.\")\n display_game(game,grid_size)\n else:\n position = parse_position(user_input,grid_size)\n if position != None:\n #valid action\n index_tmp = position_to_index(position,grid_size)\n #if position flagged\n if game[index_tmp] == FLAG:\n display_game(game,grid_size)\n continue\n #lose\n if position_to_index(position,grid_size) in pokemon_locations:\n for loc in pokemon_locations:\n game = replace_character_at_index(game,loc,POKEMON)\n display_game(game,grid_size)\n print('You have scared away all the pokemons.')\n break\n #next step\n positions_to_show = big_fun_search(game, grid_size, pokemon_locations, position_to_index(position,grid_size))\n game = replace_character_at_index(game, index_tmp, str(number_at_cell(game, pokemon_locations, grid_size, index_tmp)))\n for posi in positions_to_show:\n #if flagged\n if game[posi] == FLAG:\n continue\n game = replace_character_at_index(game, posi, str(number_at_cell(game, pokemon_locations, grid_size, posi)))\n else:#not valid action\n print(\"That ain't a valid action buddy.\")\n display_game(game,grid_size)\n #check win\n if check_win(game, pokemon_locations) == True:\n print('You win.')\n break", "def _run_one_game(self):\n sum_reward = 0\n done = False\n state = torch.tensor(self.env.reset(), device=device).view(1, -1)\n losses = list()\n\n while not done:\n\n # Choose action in function of observation and play it\n action = self._select_action(state)\n next_state, reward, done, _ = self.env.step(action.item())\n\n sum_reward += reward\n next_state = torch.tensor(next_state, device=device).view(1, -1)\n reward = torch.tensor([reward], device=device)\n done = torch.tensor([done], device=device)\n \n # Add transition to memory\n self._add_to_memory(state, action, next_state, reward, done)\n\n # Compute loss\n loss = self._optimize_model()\n losses += [loss]\n \n # Prepare next state\n state = next_state\n\n # Wait time_to_sleep second so the user can view the state\n sleep(self.time_to_sleep)\n \n\n return sum_reward, mean(losses)", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n \n pacmanPosition = currentGameState.getPacmanPosition()\n oldFood = currentGameState.getFood()\n newGhostStates = currentGameState.getGhostStates()\n capsulePositions = currentGameState.getCapsules()\n \n #print \"current pacman position \", pacmanPosition\n\n score = 0\n \n numGhostsNear = 0\n numGhostsScared = 0\n \n if currentGameState.isLose():\n score -= 1000000000\n if currentGameState.isWin() and numGhostsScared is 0:\n score += 100000000\n \n for ghost in newGhostStates: \n ghostScareTimer = ghost.scaredTimer\n if ghostScareTimer > 2:\n numGhostsScared += 1\n \n for ghost in newGhostStates:\n ghostPosition = ghost.getPosition()\n ghostScareTimer = ghost.scaredTimer\n ghostDistanceToPacman = util.manhattanDistance(ghostPosition, pacmanPosition)\n \n #if the ghost is near pacman...\n if ghostDistanceToPacman < 5:\n #check to see if the ghost is scared\n if ghostScareTimer >= ghostDistanceToPacman:\n \n #if the ghost will be scared long enough for pacman to eat it, chase\n #this will reward pacman for being closer to the scared ghost\n \n if ghostDistanceToPacman is 0:\n #right ontop of the ghost, lots of points!\n #KILL IT WITH FIRE\n score -= 1000000000000\n else:\n score -= 8000000/(ghostDistanceToPacman+1)\n \n else:\n #the ghost is either scared but too far, or not scared\n #mm... before running, check to see if there's a nearby power pellet...\n if pacmanPosition in capsulePositions:\n #is pacman on a power pellet? how convenient!\n score += 200000\n \n noNearbyPellets = True\n for pellet in capsulePositions:\n pelletDistanceToPacman = util.manhattanDistance(pellet, pacmanPosition)\n if ghostDistanceToPacman >= pelletDistanceToPacman:\n #the ghost is further away from this pellet\n #head to the pellet!\n if pelletDistanceToPacman is 0:\n score += 100400\n else:\n score += 100000/pelletDistanceToPacman\n if noNearbyPellets:\n if ghostDistanceToPacman < 4:\n numGhostsNear += 1\n \n score += ghostPosition[0] * 1000 - ghostPosition[1] * 1500\n \n if numGhostsNear > 1:\n for ghost in newGhostStates:\n ghostPosition = ghost.getPosition()\n ghostDistanceToPacman = util.manhattanDistance(ghostPosition, pacmanPosition)\n if ghostDistanceToPacman is 0:\n score -= 1000000000\n else:\n if ghostDistanceToPacman is 0:\n score -= 1000000000\n else:\n score -= 1000000000/(ghostDistanceToPacman+1)\n \n #look for the nearest food pellet (which is not at pacman's location) and the furthest\n nearestFoodDistance = float(\"inf\")\n \n temp = oldFood.asList()\n import random\n #random.shuffle(temp)\n temp.sort()\n for foodPosition in temp:\n foodDistanceToPacman = util.manhattanDistance(foodPosition, pacmanPosition)\n #print foodPosition, pacmanPosition, foodDistanceToPacman\n if foodDistanceToPacman is 0:\n #pacman is on some food, reward him!\n score += 20000\n continue\n if (foodDistanceToPacman < nearestFoodDistance) and (foodDistanceToPacman > 0):\n nearestFoodDistance = foodDistanceToPacman\n \n score += 8000/abs(20-pacmanPosition[1]+1)\n score += 8000/(pacmanPosition[0]+1)\n \n #print \"nearestFoodDistance \", nearestFoodDistance\n \n import time\n #time.sleep(1000)\n temp = (100000 / nearestFoodDistance)\n #print \"AGHGHHGHGHGHGHGHGG \", temp\n score += temp\n \n if currentGameState.getScore() < 500:\n if currentGameState.isLose():\n score -= 100000000000000000\n \n score += currentGameState.getScore()*1000000\n \n score -= 10000 * len(oldFood.asList())\n score -= 50000 * len(capsulePositions)\n \n \n #print score\n \n \n \n return score", "def check_strategy(strategy, goal=GOAL_SCORE):\n # BEGIN PROBLEM 6\n score = 0\n opponent_score = 0\n while(score<goal):\n opponent_score=0\n while(opponent_score<goal):\n check_strategy_roll(score,opponent_score,strategy(score,opponent_score))\n opponent_score = opponent_score+1\n score+=1\n # END PROBLEM 6", "def play_game(self):\n # need everyone to pass to move to next phase?\n self.deal_cards()\n self.plant_food()", "def the_game(t):\n d = create_dictionary()\n stopplay = False\n player1_pos = 0\n player2_pos = 0\n while stopplay == False:\n tuple_pos1_rollnum = track_player1(player1_pos, d, t, player2_pos)\n player1_pos = tuple_pos1_rollnum[0]\n roll_num1 = tuple_pos1_rollnum[1]\n stopplay = winner (player1_pos,\"player 1\")\n player1_pos = positions1(player1_pos,player2_pos,d,t,roll_num1)\n if stopplay == False: \n tuple_pos2_rollnum = track_player2(player2_pos, d, t, player1_pos)\n player2_pos = tuple_pos2_rollnum[0]\n roll_num2 = tuple_pos2_rollnum[1]\n stopplay = winner (player2_pos,\"player 2\")\n player2_pos = positions2(player2_pos,player1_pos,d,t,roll_num2)\n \n print(\"That's the end of the game.\")", "def UCTPlayGame():\n #start_time = time.time()\n state = GameState()\n m = UCT(rootstate=state, itermax=750, verbose=False) # play with values for itermax and verbose = True\n print str(m[0][0])+\" \"+str(m[0][1])+\" \"+str(m[1][0])+\" \"+str(m[1][1])\n state.DoMove(m)\n #print state #for user vs bot\n #print(\"--- %s seconds ---\" % (time.time() - start_time))\n \"\"\"if state.GetResult(state.whosemove) == 1.0:\n print \"Player \" + str(1 - state.whosemove) + \" wins!\"\n elif state.GetResult(state.whosemove) == 0.0:\n print \"Player \" + str(state.whosemove) + \" wins!\"\n else:\n print \"Nobody wins!\"\"\"", "def main():\n p.init() # Initializing pygame object\n screen = p.display.set_mode((WIDTH, HEIGHT))\n clock = p.time.Clock()\n screen.fill(p.Color(\"white\"))\n gs = ChessEngine.GameState()\n\n valid_moves = gs.get_valid_moves()\n\n # Flag to control the number of times get valid moves is called\n # Only if the user makes a valid move, it is called\n move_made = False\n\n load_images()\n game_running = True\n\n sq_selected = tuple() # (row, col), keeps track of user click\n player_clicks = list() # 2 tuples in the list, [(row, col), (row, col)]\n\n while game_running:\n\n for e in p.event.get():\n if e.type == p.QUIT:\n game_running = False\n\n elif e.type == p.KEYDOWN:\n if e.key == p.K_z: # undo when 'z' is pressed\n gs.undo_move()\n move_made = True # On undo we need to generate all valid moves again\n\n elif e.type == p.MOUSEBUTTONDOWN:\n location = p.mouse.get_pos() # Gets (col, row) location of mouse click\n row = location[1] // SQ_SIZE\n col = location[0] // SQ_SIZE\n\n # If user clicks on the same square again, i.e. as source and destination,\n # then we deselect it and reset player clicks\n if sq_selected == (row, col):\n sq_selected = tuple()\n player_clicks = list()\n else:\n if not (len(player_clicks) == 0 and gs.board[row][col] == gs.EMPTY_SQ):\n sq_selected = (row, col)\n player_clicks.append(sq_selected) # Append both first and second clicks\n\n # After second click only\n if len(player_clicks) == 2:\n move = ChessEngine.Move(start_sq=player_clicks[0], end_sq=player_clicks[1], board=gs.board)\n # move.print_move()\n for i in range(len(valid_moves)):\n\n if move == valid_moves[i]:\n gs.make_move(valid_moves[i])\n move_made = True\n\n player_clicks = list() # Resetting to restart the 2 click move logic\n sq_selected = tuple()\n if not move_made:\n player_clicks = [sq_selected]\n\n if move_made:\n valid_moves = gs.get_valid_moves()\n move_made = False\n\n draw_game_state(screen, gs)\n clock.tick(MAX_FPS)\n p.display.flip()", "def get_action_score(x, y, player, occupied):\n\n vertical_num = 1\n horizontal_num = 1\n diagonal_num = 1\n antidiagonal_num = 1\n\n dictionary = {}\n\n # Calculate the number of pieces in a roll on a vertical line, and how many sides are blocked.\n vertical_blocked_1 = 0\n vertical_blocked_2 = 0\n for i in range(1, min(5, x)):\n if (x - i, y) in occupied:\n if occupied[(x - i, y)] == player:\n vertical_num += 1\n if x - i ==1:\n vertical_blocked_1 =1\n else:\n vertical_blocked_1 = 1\n break\n else:\n break\n if x == 1:\n vertical_blocked_1 = 1 \n\n for i in range(1, min(5, size - x + 1)):\n if (x + i, y) in occupied:\n \n if occupied[(x + i, y)] == player:\n vertical_num += 1\n if x + i==15:\n vertical_blocked_2 = 1 \n else:\n vertical_blocked_2 = 1\n break\n else:\n break\n if x == 15:\n vertical_blocked_2 = 1 \n\n if (vertical_num, vertical_blocked_1 + vertical_blocked_2) in dictionary:\n dictionary[(vertical_num, vertical_blocked_1 + vertical_blocked_2)] += 1\n else:\n dictionary[(vertical_num, vertical_blocked_1 + vertical_blocked_2)] = 1\n\n # Calculate the number of pieces in a roll on a horizontal line, and how many sides are blocked.\n horizontal_blocked_1 = 0\n horizontal_blocked_2 = 0\n for i in range(1, min(5, y)):\n if (x, y - i) in occupied:\n if occupied[(x, y - i)] == player:\n horizontal_num += 1\n if y - i == 1:\n horizontal_blocked_1 = 1\n else:\n horizontal_blocked_1 = 1\n break\n else:\n break\n if y == 1:\n horizontal_blocked_1 = 1 \n for i in range(1, min(5, size - y + 1)):\n if (x, y + i) in occupied:\n if occupied[(x, y + i)] == player:\n horizontal_num += 1\n if y + i == 15:\n horizontal_blocked_2 = 1\n else:\n horizontal_blocked_2 = 1\n break\n else:\n break\n if y == 15:\n horizontal_blocked_1 = 1\n\n if (horizontal_num, horizontal_blocked_1 + horizontal_blocked_2) in dictionary:\n dictionary[(horizontal_num, horizontal_blocked_1 + horizontal_blocked_2)] += 1\n else:\n dictionary[(horizontal_num, horizontal_blocked_1 + horizontal_blocked_2)] = 1\n\n # Calculate the number of pieces in a roll through the diagonal, and how many sides are blocked.\n diagonal_blocked_1 = 0\n diagonal_blocked_2 = 0\n for i in range(1, min(5, x, y)):\n if (x - i, y - i) in occupied:\n if occupied[(x - i, y - i)] == player:\n diagonal_num += 1\n if x - i==1 or y-i==1:\n diagonal_blocked_1 = 1\n else:\n diagonal_blocked_1 = 1\n break\n else:\n break\n if x == 1 or y == 1:\n diagonal_blocked_1 = 1\n\n \n for i in range(1, min(5, size - x + 1, size - y + 1)):\n if (x + i, y + i) in occupied:\n if occupied[(x + i, y + i)] == player:\n diagonal_num += 1\n if x + i == 15 or y + i==15:\n diagonal_blocked_2 = 1\n else:\n diagonal_blocked_2 = 1\n break\n else:\n break\n if x == 15 or y < 15:\n diagonal_blocked_2 = 1 \n\n if (diagonal_num, diagonal_blocked_1 + diagonal_blocked_2) in dictionary:\n dictionary[(diagonal_num, diagonal_blocked_1 + diagonal_blocked_2)] += 1\n else:\n dictionary[(diagonal_num, diagonal_blocked_1 + diagonal_blocked_2)] = 1\n\n # Calculate the number of pieces in a roll through the antidiagonal, and how many sides are blocked.\n antidiagonal_blocked_1 = 0\n antidiagonal_blocked_2 = 0\n for i in range(1, min(5, size - x + 1, y)):\n if (x + i, y - i) in occupied:\n if occupied[(x + i, y - i)] == player:\n antidiagonal_num += 1\n if x + i==1 or y - i==1:\n antidiagonal_blocked_1 = 1\n else:\n antidiagonal_blocked_1 = 1\n break\n else:\n break\n if x == 1 or y == 1:\n antidiagonal_blocked_1 = 1 \n\n if x < 5 or size - y < 4:\n antidiagonal_blocked_2 = 1\n for i in range(1, min(5, x, size - y + 1)):\n if (x - i, y + i) in occupied:\n if occupied[(x - i, y + i)] == player:\n antidiagonal_num += 1\n if x - i==15 or y + i==15:\n antidiagonal_blocked_2 = 1\n else:\n antidiagonal_blocked_2 = 1\n break\n else:\n break\n if x == 15 or y == 15:\n antidiagonal_blocked_2 = 1 \n\n if (antidiagonal_num, antidiagonal_blocked_1 + antidiagonal_blocked_2) in dictionary:\n dictionary[(antidiagonal_num, antidiagonal_blocked_1 + antidiagonal_blocked_2)] += 1\n else:\n dictionary[(antidiagonal_num, antidiagonal_blocked_1 + antidiagonal_blocked_2)] = 1\n\n # Return the score\n if ((5, 0) in dictionary) or ((5, 1) in dictionary) or ((5, 2) in dictionary):\n return 100\n elif ((4, 0) in dictionary) or ((4, 1) in dictionary and dictionary[(4, 1)] > 1) or (\n (4, 1) in dictionary and (3, 0) in dictionary):\n return 90\n elif (4, 1) in dictionary:\n return 80\n elif ((3, 0) in dictionary) and (dictionary[(3, 0)] > 1):\n return 70\n elif ((3, 0) in dictionary) and ((3, 1) in dictionary):\n return 60\n elif (3, 0) in dictionary:\n return 50\n elif ((2, 0) in dictionary) and (dictionary[(2, 0)] > 1):\n return 40\n elif (3, 1) in dictionary:\n return 30\n elif (2, 0) in dictionary:\n return 20\n elif (2, 1) in dictionary:\n return 10\n else:\n return 0", "def run_ai():\n print(\"Vivian\") # First line is the name of this AI \n color = int(input()) # Then we read the color: 1 for dark (goes first), \n # 2 for light. \n\n while True: # This is the main loop \n # Read in the current game status, for example:\n # \"SCORE 2 2\" or \"FINAL 33 31\" if the game is over.\n # The first number is the score for player 1 (dark), the second for player 2 (light)\n next_input = input() \n status, dark_score_s, light_score_s = next_input.strip().split()\n dark_score = int(dark_score_s)\n light_score = int(light_score_s)\n\n if status == \"FINAL\": # Game is over. \n print \n else: \n board = eval(input()) # Read in the input and turn it into a Python\n # object. The format is a list of rows. The \n # squares in each row are represented by \n # 0 : empty square\n # 1 : dark disk (player 1)\n # 2 : light disk (player 2)\n \n # Select the move and send it to the manager \n# movei, movej = select_move_minimax(board, color)\n movei, movej = select_move_alphabeta(board, color)\n print(\"{} {}\".format(movei, movej))", "def main():\n number_of_players = get_number_of_players()\n number_of_decks = get_number_of_decks()\n game_data = setup_game(number_of_players)\n\n player_list = game_data[0]\n play_shoe = game_data[2]\n play_dealer = game_data[1]\n play_again = True\n\n while play_again:\n replay = play_game(play_shoe, player_list, play_dealer, number_of_decks)\n if replay:\n play_shoe = replay[1]\n else:\n play_again = False\n \n print(\"Thanks for playing\")", "def iterate():\n # States are of the form (coordinates, word so far, used spots)\n # Load the initial states into the stack\n global theStack\n for r,layer in enumerate(honeycomb):\n for e,el in enumerate(layer):\n theStack.append( ((e,r), [el],set([(e,r)])) )\n \n while (len(theStack) != 0):\n #pop the next run\n (e,r),soFar,used=theStack[-1]\n theStack=theStack[:-1]\n #run it!\n step((e,r),soFar,used)", "def play(the_game):\n\n print('-' * 50)\n print('')\n player = the_game.player_list[the_game.turn]\n print(' Turn {0} as {1}'.format(player.name, player.piece.name))\n print(' Rolling...\\n')\n die1, die2, roll = the_game.roll()\n\n print(' {0} + {1} = {2}!'.format(die1, die2, roll))\n\n if the_game.dice.doubles:\n print('** D O U B L E S ! **\\n')\n if player.in_jail:\n print('*** GET OUT OF JAIL ***')\n player.leave_jail()\n player.doubles = 0\n\n if player.doubles == 2:\n player.doubles = 0\n player.go_to_jail()\n print('*** DOUBLES THIRD TIME. GO TO JAIL! ***\\n')\n the_game.next_turn()\n else:\n player.doubles += 1\n if player.doubles == 1:\n print('Doubles First time')\n elif player.doubles == 2:\n print('Doubles Second time')\n else:\n player.doubles = 0\n\n if player.in_jail:\n player.position = 10\n\n if player.passed_go and not (player.doubles == 2 and the_game.dice.doubles):\n print('\\n $$$ {0} Passed GO! $$$\\n'.format(player.name))\n player.passed_go = False\n player.receive(200)\n\n print(' {0} Landed on {1}.'.format(\n player.name, the_game.board.location(player.position).name))", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n oldFood = currentGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n\n score = 10000\n if successorGameState.isWin():\n return 100000000\n for ghost in newGhostStates:\n ghostPos = ghost.getPosition()\n if util.manhattanDistance(ghostPos, newPos) < 2:\n score -= 10000\n else:\n score += util.manhattanDistance(ghostPos, newPos) * 1\n \n nearFood = 1000\n farFood = 1000\n for foodPos in oldFood.asList():\n dist = util.manhattanDistance(foodPos, newPos)\n if (dist < nearFood):\n nearFood = dist\n if (dist > farFood):\n farFood = dist\n if (currentGameState.getNumFood() < successorGameState.getNumFood()):\n score += 5\n\n if action == Directions.WEST:\n score -= 1\n if action == Directions.STOP:\n score -= 2\n \n for scareTime in newScaredTimes:\n score += scareTime * 1\n\n score -= 2 * farFood\n score -= 5 * nearFood\n capsuleplaces = currentGameState.getCapsules()\n if successorGameState.getPacmanPosition() in capsuleplaces:\n score += 5\n return max(score, 0)\n \n #their original return\n #return successorGameState.getScore()" ]
[ "0.66346675", "0.6546741", "0.6394211", "0.63758504", "0.636938", "0.6360782", "0.6356706", "0.6337687", "0.633712", "0.63219684", "0.6298109", "0.6297704", "0.629413", "0.62893116", "0.6288498", "0.6279202", "0.6278614", "0.62572694", "0.6235934", "0.6230041", "0.6225665", "0.62153363", "0.6210761", "0.6192105", "0.61906594", "0.6188423", "0.61770684", "0.61686856", "0.6139998", "0.6139754", "0.613291", "0.6126824", "0.61256593", "0.61243224", "0.6121138", "0.61128557", "0.61100465", "0.61077017", "0.60995466", "0.6084475", "0.6081376", "0.6081376", "0.6081376", "0.6081376", "0.6081376", "0.6081376", "0.6081038", "0.6051002", "0.6041752", "0.6031754", "0.6031506", "0.602482", "0.6023188", "0.60225636", "0.60196203", "0.60105973", "0.5996149", "0.59947354", "0.59886795", "0.5987498", "0.59866405", "0.5983636", "0.5975963", "0.59727216", "0.59646904", "0.59642315", "0.59641457", "0.5960546", "0.59593415", "0.5957324", "0.5956857", "0.59505606", "0.5946702", "0.59379345", "0.5936879", "0.59349334", "0.5930475", "0.593019", "0.5927281", "0.5926742", "0.59203386", "0.5918908", "0.59172136", "0.5915371", "0.59149563", "0.59094006", "0.59088415", "0.590747", "0.59060854", "0.5900197", "0.58973294", "0.5896795", "0.5889134", "0.5887589", "0.5884713", "0.5882916", "0.5881461", "0.58803767", "0.58732677", "0.5870396", "0.5864757" ]
0.0
-1
open the router's port to enable external connection
def check_and_open_port_by_upnp(external_port, internal_port, protocol): request_url = cast_rooter_request() if request_url is None: log.debug("node is not in local network protected by a router") return soap_url = get_soap_url(request_url) internal_client = get_localhost_ip() # check existence for mapping in soap_get_mapping(soap_url): if mapping.enabled == 1 and \ mapping.external_port == external_port and \ mapping.internal_port == internal_port and \ mapping.protocol == protocol and \ mapping.external_client == internal_client: return # open port soap_add_mapping(soap_url, external_port, internal_port, internal_client, protocol) log.info(f"open port by upnp {internal_port} -> {external_port}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _open(self):\n \n # Open Device\n try:\n logger.debug(\"%s: TCP port opening started...\" % \\\n self.__class__.__name__)\n self._tcp_socket.bind(tuple(['',self._port]))\n self._tcp_socket.listen(self._max)\n logger.debug(\"%s: ...TCP port opening complete.\" % \\\n self.__class__.__name__)\n \n # Instantiate router\n self._platform = router.HorizonRouteable()\n self._platform._version = self._version\n self._platform.message_routed = self.message_received\n def tmp():\n return self.__str__()\n self._platform.__str__ = tmp\n self._router = router.HorizonRouter(platform = self._platform, \n clients = [], \n send_all = self._send_all)\n \n # Open failed\n except Exception as ex:\n logger.error(\"%s: ...TCP port opening failed:\\n%s\" % \\\n (self.__class__.__name__, str(ex)))\n raise utils.TransportError \\\n (\"TCP Port open failed!\\n\" + str(ex))", "def open(self):\n self.__port.open()", "def open_tunnel(self, serial_no, port=19020):\n return self.open(ip_addr='tunnel:' + str(serial_no) + ':' + str(port))", "def open_port(self):\n if self.ser.is_open:\n print(\"ERASynth: port is already open\")\n else:\n self.ser.open()\n while not self.ser.is_open:\n time.sleep(50e-3)", "def open(self):\n self._server = socketserver.ThreadingTCPServer(\n server_address=('localhost', self._requested_local_port),\n RequestHandlerClass=self._create_handler(self._ssh_client, self._remote_host, self._remote_port),\n )\n\n threading.Thread(target=self.serve_forever).start()\n\n print('Forwarding local port {} to remote {}:{}'.format(self.local_port, self.remote_host, self.remote_port))", "def open_rtcp_port(self):\n self.rtcp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)", "def _open(self):\n \n # Open Device\n try:\n logger.debug(\"%s: TCP port opening started...\" % \\\n self.__class__.__name__)\n errno = 115\n while errno == 115:\n try:\n self._tcp_socket.connect(self._addr)\n errno = 0\n except socket.error as fx:\n if fx.args[0] != 115:\n raise fx\n self._socket = HorizonTransport_Socket(sock = self._tcp_socket,\n host = self._addr[0],\n port = self._addr[1],\n name = \"%s:%d\" % self._addr,\n encryption =self._encryption,\n key = self._key,\n store_timeout = self.store_timeout,\n version = self.version)\n self._socket.opened = True\n logger.debug(\"%s: ...TCP port opening complete.\" % \\\n self.__class__.__name__)\n \n # Open failed\n except Exception as ex:\n logger.error(\"%s: ...TCP port opening failed:\\n%s\" % \\\n (self.__class__.__name__, str(ex)))\n raise utils.TransportError \\\n (\"TCP Port open failed!\\n\" + str(ex))", "def _connect(self):\n hostport = self.getHost()\n channelOpenData = forwarding.packOpen_direct_tcpip((self.host, self.port), (hostport.host, hostport.port))\n self.connector.connection.openChannel(self, channelOpenData)", "def connect(self) -> None:\n self.s.connect((self.ip, self.port))", "def open(self):\n try:\n if self.verbose:\n print \"Trying to open connection to Leica at \",self.IP_address,\":\",str(self.port)\n self.leicasocket = socket.socket()\n self.leicasocket.connect((self.IP_address,self.port))\n if self.verbose:\n print(\"Connected.\")\n self.connected=True\n return True\n except:\n if self.verbose:\n print \"Error opening connection to \", self.IP_address\n self.connected=False\n return False", "def connect(self):\n self.socket.connect((\"localhost\",self.PORT_NUM))", "def turn_on_internet(verbose=False):\n if verbose:\n print(\"Internet access enabled\")\n socket.create_connection = socket_create_connection\n socket.socket.bind = socket_bind\n socket.socket.connect = socket_connect\n return socket", "def open_tcp_port():\n \n # Open an incoming tcp port to access the cluster endpoint\n try:\n vpc = ec2.Vpc(id=myClusterProps['VpcId'])\n defaultSg = list(vpc.security_groups.all())[0]\n print(defaultSg)\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)", "def open_rtp_port(self):\n self.rtp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.rtp_socket.settimeout(0.005)\n try:\n self.rtp_socket.bind((\"127.0.0.1\", self.rtp_port))\n except:\n QMessageBox.critical(self, 'Unable to Bind', 'Unable to bind PORT=%d' % self.rtp_port,\n QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)", "def opensock(ipaddr,port):\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n s.connect((ipaddr,port))\n \n return s", "def open(self):\n device_type = \"mikrotik_routeros\"\n if self.transport == \"telnet\":\n device_type = \"mikrotik_routeros_telnet\"\n self.device = self._netmiko_open(\n device_type, netmiko_optional_args=self._netmiko_optional_args\n )", "def open(self):\n self.device = ConnectHandler(\n device_type='vyos',\n host=self.hostname,\n username=self.username,\n password=self.password,\n timeout=self.timeout,\n port=self.port\n )", "def Connection(self):\n try:\n system(\n f'netsh advfirewall firewall add rule name=\"Open Port {self.PORT}\" dir=in action=allow protocol=TCP localport={self.PORT} remoteip={self.HOST}')\n with socket() as s: # Create a socket object\n print('Server started!')\n print('Waiting for clients...')\n s.bind((self.HOST, self.PORT)) # Bind to the port\n s.listen(5) # Now wait for client connection.\n self.c, addr = s.accept() # Establish connection with client.\n # Remote client machine connection\n print('Got connection from', addr)\n except error as strerror:\n print(\"Network problems:\", strerror)\n return 0\n return 1", "def connect(self, host, port):\n pass", "def port_connection(self, sock):\n sock.bind(('', 0)) # Bind to OS-assigned available & random port.\n sock.listen(1)", "def open_socket(self):\n try:\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)\n self.server.bind((self.host,self.port))\n self.server.listen(5)\n self.server.setblocking(0)\n except socket.error, (value,message):\n if self.server:\n self.server.close()\n print \"Could not open socket: \" + message\n sys.exit(1)", "def _open(self):\n \n # Open Device\n try:\n logger.debug(\"%s: UDP port opening started...\" % \\\n self.__class__.__name__)\n self._udp_socket.bind(tuple(['',self._port]))\n logger.debug(\"%s: ...UDP port opening complete.\" % \\\n self.__class__.__name__)\n \n # Instantiate router\n self._platform = router.HorizonRouteable()\n self._platform._version = self._version\n self._platform.message_routed = self.message_received\n def tmp():\n return self.__str__()\n self._platform.__str__ = tmp\n self._router = router.HorizonRouter(platform = self._platform, \n clients = [], \n send_all = self._send_all)\n \n # Open failed\n except Exception as ex:\n logger.error(\"%s: ...UDP port opening failed:\\n%s\" % \\\n (self.__class__.__name__, str(ex)))\n raise utils.TransportError \\\n (\"UDP Port open failed!\\n\" + str(ex))", "def openRtpPort(self):\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\tself.rtpSocket_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\t\t\t\tself.rtpSocket_client.bind(('', self.rtpPort))\r\n\t\t\t\tself.rtpSocket_client.settimeout(0.5)\r\n\t\t\t\tself.listenRtp()\r\n\t\t\texcept Exception as err:\r\n\t\t\t\tif (str(err) == \"[Errno 9] Bad file descriptor\"):\r\n\t\t\t\t\tbreak", "def connect(self,ip,port):\n return self.network.connect(ip,port)", "def openCircuit(srv):", "def setup_logical_port_connectivity(self, context, port_db):\n pass", "def connect_to_server(self):\n\t\tself.outside.start()\n\t\tself.outside.register(self.config.server_ip, self.config.server_port)\n\n\t\tself.thin.start()\n\t\tself.thin.register(self.config.server_ip, self.config.server_port)", "def _lowLevelOpen(self):\n import socket\n self.socket_reference = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "def connect_port(self, iface):\n self.iface_config(iface, adminMode='Up')", "def run(self):\n self.network_ctrl.connect_with_remote_system()\n cmd = self.create_command(self.on_or_off, self.port)\n self.network_ctrl.send_command(cmd)\n\n check = self._port_status(self.port)\n result = self.network_ctrl.send_command(check)\n result = result[0]\n if self.on_or_off:\n if result == \"1\":\n self.router.mode = Mode.normal\n logging.info(\"[+] Successfully switched on port \" + str(self.port))\n else:\n self.router.mode = Mode.unknown\n logging.info(\"[-] Error switching on port \" + str(self.port))\n else:\n if result == \"0\":\n self.router.mode = Mode.off\n logging.info(\"[+] Successfully switched off port \" + str(self.port))\n else:\n self.router.mode = Mode.unknown\n logging.info(\"[-] Error switching off port \" + str(self.port))\n\n self.network_ctrl.exit()", "def open(self):\n if self._connected:\n try:\n self.native.find_prompt()\n except: # noqa E722 pylint: disable=bare-except\n self._connected = False\n\n if not self._connected:\n self.native = ConnectHandler(\n device_type=\"cisco_asa\",\n ip=self.host,\n username=self.username,\n password=self.password,\n port=self.port,\n global_delay_factor=self.global_delay_factor,\n secret=self.secret,\n verbose=False,\n )\n self._connected = True\n\n log.debug(\"Host %s: Connection to controller was opened successfully.\", self.host)", "def is_port_open(port):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n return sock.connect_ex(('127.0.0.1', port)) == 0", "def port():", "def start(self):\n\n address = (socket.gethostbyname(self.hostname), self.port)\n logger.info(\"Connecting to %r\" % (address,))\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect(address)\n self._start_processors()\n return self", "def get_open_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((\"\", 0))\n s.listen(1)\n port = s.getsockname()[1]\n s.close()\n return port", "def connect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.connect((self.host, PORT)) # probably throws errors\n self.connected = True", "def connect(self):\n print(\"Connecting\")\n self.socket.connect((self.ip, self.port))\n self.startReading()", "def connect(self):\n\n import serial\n\n if self.addr == None:\n self.addr = self.get_EFu_addr()\n\n self.ser = serial.Serial(self.addr, 115200, timeout=1)\n if self.ser.isOpen():\n print('Opened port: {}'.format(self.addr))\n else:\n raise RuntimeError('Failed to open the serial port: {}'.format(self.addr))", "def connect(self):\n try:\n # Port and packet handler set up\n self.port_handler = port_h.PortHandler(self.port_name)\n self.packet_handler = packet_h.PacketHandler(self.protocol_version)\n\n # Set up port and baud rate\n self.port_handler.openPort()\n self.port_handler.setBaudRate(self.baud_rate)\n self.__find_motors()\n except rospy.ROSInterruptException: pass\n\n self.running = True", "def make_connection( hostname, port = 4663 ):\n \tconnection = socket.socket();", "def is_port_open(host, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.connect((host, port))\n sock.close()\n return True\n except:\n return False", "def connect(self):\n self.sock = s.socket(s.AF_INET,s.SOCK_STREAM)\n self.sock.connect((self.remote_host,\n self.remote_port))", "def connect(self, device_ip, device_port=DEFAULT_PORT):\n return", "def openProxy(self):\n self.state = ZenProcessTask.STATE_CONNECTING\n if (self.snmpProxy is None or\n self.snmpProxy.snmpConnInfo != self.snmpConnInfo):\n self.snmpProxy = self.snmpConnInfo.createSession()\n self.snmpProxy.open()", "def connect( self, str_address, port_no ):\r\n\r\n self._socket.connect( str_address, port_no )\r\n\r\n # return None \r", "def connect():", "def __init__(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.PORT = 2222\n # connect on construction,\n # use for duration of a game,\n # close connection on destruction later\n self.sock.connect((\"192.168.43.180\", self.PORT))", "def telnet(self):\n self.log.info(\"connect-via-telnet\")\n telnet = distutils.spawn.find_executable(\"telnet\")\n os.execv(telnet, (\"telnet\", \"localhost\", str(self.qemu.monitor_port)))", "def _connect_socket(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((self.ip, self.port))\n print(\"Connected to %s at port %d\" % (self.ip, self.port))", "def open_port():\n global steering_port\n\n try:\n steering_port = serial.Serial(find_usb(STEERING_PORT_NAME), writeTimeout=0)\n steering_port.baudrate = BAUDRATE_STEERING\n except serial.serialutil.SerialException as exc:\n print(\"Failed to open steering port: %s\" % exc)", "def make_port(self):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((\"0.0.0.0\", 0))\n return s.getsockname()[1]", "def openSocket(self):\n try:\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server.bind((self.host, self.port))\n self.server.listen(self.backlog)\n except socket.error as e:\n raise ErrorSocketOpen(self.feederName, e.strerror)\n if self.verbosity >= 1:\n print('Starting config server for %s at %s, port %s.' % (self.feederName, self.host, self.port))", "def __init__(self,\n host_name='127.0.0.1',\n port=ControlServer.CONTROL_PORT):\n\n self._socket = QtNetwork.QTcpSocket()\n self._socket.connected.connect(self._connected)\n self._socket.disconnected.connect(self._disconnected)\n self.connected = False\n self._socket.connectToHost(host_name, port)", "def _open(self):\n \n # Open Device\n try:\n logger.debug(\"%s: UDP port opening started...\" % \\\n self.__class__.__name__)\n self._udp_socket.bind(('',self._port))\n self._socket = HorizonTransport_Socket(sock = self._udp_socket,\n host = self._addr[0],\n port = self._addr[1],\n name = \"%s:%d\" % self._addr,\n store_timeout = self.store_timeout,\n version = self.version)\n self._socket.opened = True\n logger.debug(\"%s: ...UDP port opening complete.\" % \\\n self.__class__.__name__)\n \n # Open failed\n except Exception as ex:\n logger.error(\"%s: ...UDP port opening failed:\\n%s\" % \\\n (self.__class__.__name__, str(ex)))\n raise utils.TransportError \\\n (\"UDP Port open failed!\\n\" + str(ex))", "def port_is_open(port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', port))\n is_open = result == 0\n sock.close()\n return is_open", "def __init__(self, address=\"lex\", port=8000, **kwargs):\n self.connect(address, port)", "def connect(self):\n if not self._socket:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect((self.host, self.port))\n self._socket.settimeout(0.0)", "def start(self):\n\n self.socket.bind((self.ip, self.port))\n self.socket.listen(self.listenNumber)\n self.printLine()\n print(\"start for listening \")", "def connect(self, address, port):\n address = socket.getfqdn(address)\n self.channel = \"http://\" + str(address) + \":\" + str(port)", "def connect(self,ip,port):\n import time\n import socket\n\n try:\n self.socket_reference.connect((ip, port))\n except socket.error:\n self.close()\n reload(socket)\n raise CommClientException(\"Cannot connect to \" + ip + \":\" + str(port))", "def connect(self):\n # open serial port\n try:\n #device = self.get_device_name(self.serial_number)\n device = \"/dev/ttyAMA0\"\n self.serial.port = device\n # Set RTS line to low logic level\n self.serial.rts = False\n self.serial.open()\n except Exception as ex:\n self.handle_serial_error(ex)", "def start(self):\n self.protocol.makeConnection(self.transport)", "def start(self):\n self.protocol.makeConnection(self.transport)", "def init_connexion():\n connexion = socket(AF_INET, SOCK_STREAM)\n connexion.bind((hote, port))\n\n return connexion", "def setup_for_run(self):\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server.bind((self.ip_address, self.port))\n self.server.listen(100)", "def _connect_to_hardware(self):\n if False: # !!!TEMP:need to validate config...\n if len(self.config['ports']) > 1:\n self.log.fatal(\"only one slave com port is supported\")\n if len(self.config['ports']) == 0:\n self.log.warning(\"no communication port setted!\")\n return\n port = self.config['ports'][0]\n self.communicator = RaspSerialCommunicator(\n platform=self, port=port,\n baud=self.config['baud'])\n self.communicator = RaspSerialCommunicator(\n platform=self, port='/dev/ttyAMA0',\n baud=115200)", "def open(self):\n # Move all of the connection arguments into connect_args\n connect_args = {}\n\n # check for mode\n if self.get_option('port') is None:\n if self.get_option('mode') == 'telnet':\n connect_args['port'] = 23\n elif self.get_option('mode') == 'serial':\n connect_args['port'] = '/dev/ttyUSB0'\n else:\n connect_args['port'] = 830\n else:\n connect_args['port'] = self.get_option('port')\n\n if (self.get_option('mode') == 'telnet' or\n self.get_option('mode') == 'serial'):\n if self.get_option('baud') is None:\n # Default baud if serial or telnet mode\n connect_args['baud'] = 9600\n if self.get_option('attempts') is None:\n # Default attempts if serial or telnet mode\n connect_args['attempts'] = 10\n\n connect_args['host'] = self.get_option('host')\n # connect_args['port'] = self.get_option('port')\n connect_args['user'] = self.get_option('remote_user')\n connect_args['passwd'] = self.get_option('password')\n connect_args['ssh_private_key_file'] = self.get_option('private_key_file')\n connect_args['ssh_config'] = self.get_option('pyez_ssh_config')\n connect_args['timeout'] = self.get_option('persistent_connect_timeout')\n try:\n log_connect_args = dict(connect_args)\n log_connect_args[\"passwd\"] = \"NOT_LOGGING_PARAMETER\"\n\n self.queue_message(\"vvvv\", \"Creating device parameters: %s\" % log_connect_args)\n timeout = connect_args.pop(\"timeout\")\n self.dev = jnpr.junos.device.Device(**connect_args)\n self.queue_message(\"vvvv\", \"Opening device.\")\n self.dev.open()\n self.queue_message(\"vvvv\", \"Device opened.\")\n\n self.dev.timeout = self.get_option('persistent_command_timeout')\n self.queue_message(\"vvvv\", \"Setting default device timeout to %d.\" % timeout)\n # Exceptions raised by close() or open() are all sub-classes of\n # ConnectError, so this should catch all connection-related exceptions\n # raised from PyEZ.\n except pyez_exception.ConnectError as ex:\n raise AnsibleError(\"Unable to make a PyEZ connection: %s\" % (str(ex)))", "def start(self):\n self.port = self.conn.evalInServer(server_code.format(key=self.key))", "def init_tcp_conn(target: str, port: int) -> socket.socket:\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.settimeout(5)\n try:\n conn.connect((target, port))\n return conn\n except socket.timeout as e:\n print(e)\n return None", "def start(self):\n # create socket\n try:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # 10 minutes for timeout\n self._socket.settimeout(600)\n except socket.error as msg:\n logging.error(\"Can't create socket. Error code: {}, msg: {}\".format(*msg))\n raise\n\n # Open TCP connection\n try:\n self._socket.connect(self.address)\n except socket.error:\n logging.error(\"Can't connect to the server on {}:{}\".format(*self.address))\n raise", "def connect(self):\n\n self.tello.connect()\n self.tello.wait_for_connection(60.0)", "def connectionMade(self):\n \t#print \"[K] Connect effettuata\", self.port\n \tself.factory.state=\"0 open\"\n \tif (self.port in self.portfForHTTPGet):\n\t\tself.transport.write(\"GET / HTTP/1.1\\r\\n\\r\\n\")\n \t\t#self.transport.write(\"GET /index.html HTTP/1.1\\r\\n\\r\\n\")", "def main():\r\n if len(sys.argv) != 2:\r\n sys.exit(\"Usage: python router-python.py [Router Port]\")\r\n router_port = int(sys.argv[1])\r\n router(router_port)", "def socket_port(ip, port):\n socket.setdefaulttimeout(3) \n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = s.connect_ex((ip, port))\n if result == 0:\n print(ip, u':', port, u'port is occupied')\n return False\n return True\n except Exception as error:\n print('error:', error)\n return False", "def web():\n env['remote_port'] = env['port_map']['8000']\n\n sys.stdout.write('Launching browser on remote port %(remote_port)s\\n' % env)\n\n run('open http://%(relay_server)s:%(remote_port)s' % env)", "def connect(self):\n self.ipv4 = socket.gethostbyname(socket.gethostname())\n self.addr = (self.ipv4, HttpServer.PORT)\n self.server.bind(self.addr)\n print(\"[SETUP] server bound to IPv4 address\", self.ipv4, \"on port\", HttpServer.PORT)\n self.server.listen()\n print(\"[SETUP] server listening for connections\")", "def connect(self, addr):\n self._outbound = True\n rules = firewall.DefaultRule()\n self._state = SocketState(self._got_remote)\n self._endpoint = SocketEndpoint(rules, None)\n self._i2cp = client.Connection(self._endpoint)\n self._i2cp.open()\n while not self._state.is_connected():\n time.sleep(0.1)", "def start_socket(ip, port):\n try:\n # initiate socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # connect to server\n print(\"socket connected at ip {} and port {}\".format(ip, port))\n sock.connect((ip, port))\n return sock\n except Exception as e:\n print(\"Error start_socket\", e)\n #exit()", "def connect(self, port=None, options=None):\n pass", "def _connect(self):\n #print(\"Connecting...\")\n self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable", "def connect(self):\n \n try:\n self.__sock.connect((self.__host, self.__port))\n\n except socket.error,e:\n print 'Oops, unable to connect. Try again!',e\n sys.exit(1)", "def new_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n for i in range(12042, 16042):\n try:\n s.bind(('127.0.0.1', i))\n s.close()\n return i\n except socket.error, e:\n pass\n raise Exception('No local port available')", "def __init__(self, host=\"localhost\", port=60151, verbose=False):\n super(IGVSocketRobot, self).__init__(verbose=verbose)\n\n self.host = host\n self.port = port", "def tcp_socket_open(host, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(1)\n try:\n return sock.connect_ex((host, port)) == 0\n except socket.timeout:\n return False", "def openSocket():\n sock = socket.socket()\n sock.settimeout(2)\n sock.connect((IRC_HOST, IRC_PORT))\n sock.send(\"PASS {}\\r\\n\".format(priv.PASS).encode(\"utf-8\"))\n sock.send(\"NICK {}\\r\\n\".format(priv.NICK).encode(\"utf-8\"))\n\n sock.send(\"CAP REQ :twitch.tv/membership\\r\\n\".encode(\"utf-8\"))\n sock.send(\"CAP REQ :twitch.tv/tags\\r\\n\".encode(\"utf-8\"))\n sock.send(\"CAP REQ :twitch.tv/commands\\r\\n\".encode(\"utf-8\"))\n\n return sock", "def __enter__(self):\n if self.__open_tunnel is False:\n self.open(serial_no=self.__serial_no, ip_addr=self.__ip_addr)\n elif self.__open_tunnel is True:\n self.open_tunnel(serial_no=self.__serial_no)\n return self", "def mux_virtual_openconn(desthost, destport, virtualport, localip=None,localport=None,timeout=15):\r\n # Get the key to the existing multiplexer\r\n key = \"IP:\"+desthost+\":\"+str(destport)\r\n \r\n if key in MULTIPLEXER_OBJECTS:\r\n # Since a multiplexer already exists, lets just use that objects builtin method\r\n mux = MULTIPLEXER_OBJECTS[key]\r\n\r\n try:\r\n return mux.openconn(desthost, virtualport, localip,localport,timeout)\r\n except AttributeError, err:\r\n if str(err) == \"Multiplexer is not yet initialized or is closed!\":\r\n # There has been a fatal error in this multiplexer, delete it\r\n del MULTIPLEXER_OBJECTS[key]\r\n \r\n raise EnvironmentError, \"Connection Refused!\"\r\n \r\n else:\r\n raise ValueError, \"There is no pre-existing connection to the requested host!\"", "def OpenSocket(ip_address, port, timeout):\r\n # Create the socket.\r\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n client_socket.settimeout(timeout)\r\n\r\n # Connect to the Smart Car ESP.\r\n try:\r\n client_socket.connect((ip_address, port))\r\n except socket.timeout:\r\n print('Connection timed out connecting to {0}:{1}'.format(ip_address, port))\r\n quit()\r\n except:\r\n print('Error connecting to {0}:{1}: {2}'.format(ip_address, port, sys.exc_info()[0]))\r\n quit()\r\n\r\n return client_socket", "def _create_router_port(self, method, api, header, data):\n self._execute_api(method, api, header, data)", "def connect(self):\n if not self.debug:\n self.socket.connect((self.host, self.port))\n logging.info(\n \"Connect to real controller at host = %s:%d\" % (\n self.host, self.port))\n else:\n logging.info(\"Controller created in debug mode, pretent to CONNECT host = %s:%d\" %(self.host, self.port))", "def connect(self,addr=None,port=None):\n\n self.type = 'connect'\n\n if addr != None:\n self.remote_location = (addr,int(port))\n try:\n s = socket(AF_INET,SOCK_STREAM)\n s.settimeout(1.0)\n s.connect(self.remote_location)\n self.status = 'connected'\n s.settimeout(0.0)\n self.sock = s\n except error as e:\n self.errno = e.errno\n self.status = 'closed'", "def open(self):\n logging.debug('Connecting to device %s' % self.paramiko_cfg.get('hostname'))\n self.ssh = paramiko.SSHClient()\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.ssh.connect(**self.paramiko_cfg)", "def connect(self):\n try:\n if not self.serial.isOpen():\n self.serial = serial.Serial(\n self.port, \n self.baudrate, \n timeout=self.timeout, \n rtscts=self.hardware_flagging, \n xonxoff=self.software_flagging\n )\n print(\"connected to %s\") % (self.port)\n except serial.SerialException as e:\n msg = \"unable to connect to %s\" % (self.port)\n raise Exception(msg, e)", "def port_in_use(port_num):\n\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('0.0.0.0', port_num))\n except OSError:\n return True\n else:\n return False", "def check_port(self):\r\n\t\treturn(self.connect.is_open)", "def connect(self):\n self.client.connect(self.host, self.port)\n self.client.loop_forever()", "def cmd_port(args):", "def start_server(self):\n server_port = 8800\n incoming_addr = \"\"\n address = (incoming_addr, server_port)\n\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.SO_REUSEADDR)\n server_socket.bind(address)\n server_socket.listen(5)\n\n print(\"\\nServer Listening\\n\")\n return server_socket", "def get_open_port(host=\"localhost\"):\n temp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n temp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n temp_sock.bind((host, 0))\n port = temp_sock.getsockname()[1]\n temp_sock.close()\n del temp_sock\n return port", "def __init__(self, ip, port, automatic = True, control_buf_size = 32, data_buf_size = 128, \\\n m_to = 0.01, socket_to = 0.005):\n self.conn = Connector(ip, port, control_buf_size, data_buf_size, socket_to)\n self.conn.connect()\n self.m_to = m_to\n self.status = Modem.Status.IDLE\n self.node_status = 0\n self.automatic = automatic\n self.interpreter = Interpreter()\n self.mainPID = os.getpid()\n self.error_status = Modem.ErrorDict.NONE\n self.commands_queue = \"\".split(Interpreter.END_COMMAND)\n if automatic:\n thread.start_new_thread(self.run,())" ]
[ "0.70109576", "0.7003254", "0.6999157", "0.694705", "0.6878419", "0.67956185", "0.6750625", "0.67262423", "0.66802317", "0.6659476", "0.65642065", "0.65545535", "0.6504106", "0.6492532", "0.64708275", "0.6456751", "0.6438551", "0.6426962", "0.6417681", "0.63918823", "0.63559794", "0.63186234", "0.63125044", "0.63008493", "0.6262931", "0.61957324", "0.614679", "0.61424875", "0.61273474", "0.61147237", "0.61006063", "0.60968244", "0.60916907", "0.60820043", "0.60795885", "0.60685426", "0.6063675", "0.6062978", "0.60622585", "0.605744", "0.604934", "0.60480416", "0.6047992", "0.6045713", "0.6019426", "0.6015799", "0.6007235", "0.6001902", "0.59997255", "0.5997215", "0.59938854", "0.5987555", "0.5967172", "0.5966734", "0.5954981", "0.59519255", "0.5943251", "0.59412134", "0.594021", "0.5936795", "0.5931228", "0.59219605", "0.59219605", "0.59105426", "0.58981633", "0.5897305", "0.58971095", "0.5893146", "0.58841944", "0.5874696", "0.5866849", "0.58654803", "0.58645564", "0.5855055", "0.5847287", "0.5837623", "0.58349276", "0.5832551", "0.5830878", "0.58307815", "0.5826062", "0.5819059", "0.5805178", "0.57914317", "0.57882464", "0.57839245", "0.5783822", "0.5774912", "0.5772353", "0.5759592", "0.5756655", "0.5738998", "0.5735864", "0.5731549", "0.5731516", "0.5712634", "0.5711096", "0.5705525", "0.57033914", "0.5699507" ]
0.6510023
12
get external ip address
def get_external_ip(soap_url) -> str: s_o_a_p = '<?xml version="1.0"?>\r\n' s_o_a_p += '<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle=' \ '"http://schemas.xmlsoap.org/soap/encoding/">\r\n' s_o_a_p += '<s:Body>\r\n' s_o_a_p += '<u:GetExternalIPAddress xmlns:u="urn:schemas-upnp-org:service:WANPPPConnection:1">\r\n' s_o_a_p += '</u:GetExternalIPAddress>\r\n' s_o_a_p += '</s:Body>\r\n' s_o_a_p += '</s:Envelope>\r\n' try: req = Request(soap_url) req.add_header('Content-Type', 'text/xml; charset="utf-8"') req.add_header('SOAPACTION', '"urn:schemas-upnp-org:service:WANPPPConnection:1#GetExternalIPAddress"') req.data = s_o_a_p.encode('utf8') result = xmltodict.parse(urlopen(req).read().decode()) return result['s:Envelope']['s:Body']['u:GetExternalIPAddressResponse']['NewExternalIPAddress'] except Exception: log.debug("get_external_ip exception", exc_info=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetExternalIp():\n h = httplib2.Http(tempfile.gettempdir(), timeout=10)\n url = 'http://whatismyip.akamai.com'\n resp, content = h.request(url, 'GET')\n if resp.status == 200:\n return content\n for provider in (UltraDNSAuth(), MyResolverInfo()):\n answer = provider.GetClientIp()\n if answer:\n return answer", "def external_IP(self):\r\n return self._external_ip", "def get_ip(self):", "def get_local_host_ip(self) -> str:", "def get_public_ip():\n public_ip = get('https://api.ipify.org').text\n return public_ip", "def getLocalIpAddress() :\n \n if (platform.system() == 'Linux') :\n cmd = \"ifconfig wlan0 | grep 'inet addr:' | cut -d: -f2 | awk '{print $1}'\"\n return subprocess.check_output(cmd, shell=True) \n else : # Darwin\n return socket.gethostbyname(socket.gethostname())", "def get_host_ipaddress(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetHostIPAddress', self.handle)", "def get_IP(): \n \n return socket.gethostbyname(socket.gethostname())", "def get_host_ip_addr():\n return nova_conf.my_ip", "def ipAddress():\n \n sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sk.connect((\"8.8.8.8\", 80))\n ip = (sk.getsockname()[0])\n sk.close()\n return str(ip)", "def get_external_ip():\n try:\n r = requests.get(\n METADATA_NETWORK_INTERFACE_URL,\n headers={'Metadata-Flavor': 'Google'},\n timeout=2)\n return r.text\n except requests.RequestException:\n logging.info('Metadata server could not be reached, assuming local.')\n return 'localhost'", "def get_ip():\n with hide(\"everything\"):\n ip_addresses = run('hostname -I').split(' ')\n return ip_addresses[0]", "def ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip = s.getsockname()[0]\n s.close()\n return ip", "def get_ip_address(self):\n return self.adb.get_ip_address()", "def get_IPaddress():\n config = get_ifconfig()\n return config[0]", "def get_ip_address(self):\n raise NotImplementedError", "def get_IP():\n\n return socket.gethostbyname(socket.gethostname())", "def address(self):\n \n return self.__ip", "def obtain_public_ip():\n from urllib2 import urlopen\n my_ip = urlopen('http://ip.42.pl/raw').read()\n logger.debug('The public ip is: %s' % my_ip)\n return str(my_ip)", "def ip_address(self) -> str:\n return pulumi.get(self, \"ip_address\")", "def publicIP(self):\n return self.query('https://plex.tv/:/ip')", "def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']", "def getLocalhostIP():\n return socket.getaddrinfo('localhost', 0)[0][4][0]", "def detect_ip_address():\n # Rather hackish way to get the local ip-address, recipy from\n # https://stackoverflow.com/a/166589\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip_address = s.getsockname()[0]\n s.close()\n return ip_address", "def __lookup_public_ip(self):\n\n response = requests.get('https://api.ipify.org?format=json', timeout=self.timeout)\n\n if response.status_code == 200:\n ip_data = response.json()\n if 'ip' not in ip_data.keys():\n return 'Unable to determine IP'\n else:\n return ip_data['ip']\n else:\n return 'Unable to determine IP'", "def getLocalIP():\r\n try:\r\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n csock.connect(('8.8.8.8', 80))\r\n (addr, port) = csock.getsockname()\r\n csock.close()\r\n return addr\r\n except socket.error:\r\n return \"127.0.0.1\"", "def _get_my_ip():\n try:\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n csock.connect(('8.8.8.8', 80))\n (addr, port) = csock.getsockname()\n csock.close()\n return addr\n except socket.error:\n return \"127.0.0.1\"", "def GetIPAddr():\n cmd = \"ifconfig | awk '/192/ {print $2}'\"\n res = Run(cmd).replace(\"\\n\", \"\") # remove end of line char\n return res.replace(\"addr:\", \"\") # remove \"addr:\" prefix", "def ip_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self):\n return self.address", "def get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n local_ip = sock.getsockname()[0]\n sock.close()\n\n return local_ip", "def get_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n ip = s.getsockname()[0]\n except Exception:\n ip = '127.0.0.1'\n finally:\n s.close()\n return ip", "def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()", "def get_ip_address(self):\n return self.__ip_address", "def get_ip():\n return request.environ['HTTP_REMOTE_ADDR']", "def get_internal_ip(self, external_ip):\n if external_ip[-1:] == '2':\n return external_ip[:-1] + '1'\n else:\n # not a proper ip of a neighbor\n return ''", "def get_ip_address():\n try:\n return socket.gethostbyname(socket.getfqdn())\n except socket.gaierror as error:\n logger.warn(error)\n return socket.gethostbyname(\"\")", "def get_self_ip():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()\n s.close()\n return ip[0]", "def _get_ip():\n cmd_netstat = ['netstat', '-nr']\n p1 = subprocess.Popen(cmd_netstat, stdout=subprocess.PIPE)\n cmd_grep = ['grep', '^0\\.0\\.0\\.0']\n p2 = subprocess.Popen(cmd_grep, stdin=p1.stdout, stdout=subprocess.PIPE)\n cmd_awk = ['awk', '{ print $2 }']\n p3 = subprocess.Popen(cmd_awk, stdin=p2.stdout, stdout=subprocess.PIPE)\n galaxy_ip = p3.stdout.read()\n log.debug('Host IP determined to be %s', galaxy_ip)\n return galaxy_ip", "def get_ip():\n return os.getenv(\"HOST_IP\", \"127.0.0.1\")", "def ip_addr(self):\n return self.ip_addresses[0]", "def getIP():\n data = _get_page(\"http://myip.cz\")\n data = data.split(\"Your IP Address is: <b>\")[-1].split(\"</b>\")[0]\n return data.strip()", "def test_get_node_internal_ip_address(self):\n pass", "def ip(self) -> str:\n return pulumi.get(self, \"ip\")", "def internal_IP(self):\r\n return self._internal_ip", "def get_ip() -> str:\n for ip in socket.gethostbyname_ex(socket.gethostname())[2]:\n if not ip.startswith(\"127.\"):\n return ip\n for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]:\n s.connect((\"8.8.8.8\", 53))\n ip, port = s.getsockname()\n s.close()\n if not ip.startswith(\"127.\"):\n return ip\n raise ConnectionError(\"Can not get a suitable IP\")", "def get_device_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n ip = sock.getsockname()[0]\n sock.close()\n return ip", "def getIp(self):\n raise NotImplementedError", "def host_ip(host):\n return host.cmd('ip addr show {}-eth1 | awk \\'/inet / {{ print $2 }}\\' | cut -d\\'/\\' -f1'.format(host.name, host.name), stdout=sp.PIPE).strip()", "def get_host_ip(timeout=10):\n\n return get_default_route(timeout)[2]", "def internalIP(self):\r\n return self._internalIP", "def api_myip():\n return request.remote_addr, 200, {'Content-Type': 'text/plain'}", "def public_ip_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"public_ip_address\")", "def ip(self):\n if not self._ip:\n if 'ip' in self.config:\n ip = self.config['ip']\n else:\n ip = self.protocol.transport.get_extra_info('sockname')[0]\n ip = ip_address(ip)\n if ip.version == 4:\n self._ip = ip\n else: # pragma: no cover\n response = urlopen('http://ipv4.icanhazip.com/')\n ip = response.read().strip().decode()\n ip = ip_address(ip)\n self._ip = ip\n return self._ip", "def get_ip_address():\n\n # Windows\n if _IS_WINDOWS:\n local_ip = socket.gethostbyname(socket.gethostname())\n else:\n # Linux and MacOS\n local_ip = None\n try:\n # First way, tested in Ubuntu and MacOS\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n local_ip = s.getsockname()[0]\n s.close()\n except:\n # Second way, tested in CentOS\n try:\n local_ip = socket.gethostbyname(socket.gethostname())\n except:\n pass\n\n if local_ip == None or local_ip == '127.0.0.1' or local_ip == '127.0.1.1':\n logger.warning(\n 'get_ip_address failed, please set ip address manually.')\n return None\n\n return local_ip", "def ip_address(self) -> str:\n return self._device.ip if self.is_connected else None", "def _get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n sock.connect(('10.255.255.255', 1))\n ip = sock.getsockname()[0]\n except Exception:\n ip = '127.0.0.1'\n finally:\n sock.close()\n\n return ip", "def get_ip(pc_name):\n pc_ip = '' \n try: \n pc_ip = socket.gethostbyname(pc_name) \n except Exception, e:\n initlog('failed to get PC ip; %s' % str(e)) \n return pc_ip", "def get_ext_ip_addr(self, node_name):\n node = self._cloud.get_server(node_name)\n if node is None:\n raise CloudError('Cannot retrieve node/IP information. Is `node_name` set correctly?')\n return node.accessIPv4", "def get_ip():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n return s.getsockname()[0]\n except:\n return '127.0.0.1'\n finally:\n s.close()", "def get_ip():\n if not request.headers.getlist(\"X-Forwarded-For\"):\n return str(request.remote_addr)\n else:\n return str(request.headers.getlist(\"X-Forwarded-For\")[0])", "def getIP():\n try:\n page = urlopen(\"http://www.whatismyip.com/automation/n09230945.asp\")\n IP = page.read()\n page.close()\n return IP\n except:\n return \"Could not retrieve the IP address.\"", "def ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address\")", "def checkIP(self):\n\t\tself.get(\"https://ifconfig.me/\")\n\t\treturn self.findId(\"ip_address\").text", "def _open_stack_get_ip_(srv):\n addr_info = srv.addresses\n for net in addr_info.keys():\n for addr in addr_info[net]:\n ip = addr['addr']\n return ip", "def ip_address(self):\n return self._ip_address", "def _get_ip_address(ifname):\n cmd = (\"ifconfig %s| grep 'inet ' | awk -F: '{print $1}' | awk '{print $2}'\" %str(ifname))\n ip = os.popen(cmd).read().replace(\"\\n\",\"\")\n\n return ip", "def ip(self) -> Optional[str]:\n return pulumi.get(self, \"ip\")", "def ip(self):\n return os.environ.get('REMOTE_ADDR')", "def _GetIpAddress(self):\n ingress_name = '%s-ingress' % self.name\n get_cmd = [\n 'get', 'ing', ingress_name, '-o',\n 'jsonpath={.status.loadBalancer.ingress[*].ip}'\n ]\n stdout, _, _ = RunKubectlCommand(get_cmd)\n ip_address = stdout\n if ip_address:\n self.ip_address = ip_address", "def get_local_ip(self, system):\n if system == \"Linux\":\n # This is a bit ugly but it works\n ips = check_output(['hostname', '--all-ip-addresses']).decode(\"utf-8\")\n return ips.split(\" \")[0]\n else:\n return socket.gethostbyname(socket.gethostname())", "def get_my_ip():\r\n try:\r\n return [x[4] for x in conf.route.routes if x[2] != '0.0.0.0'][0]\r\n except IndexError:\r\n return '127.0.0.1'", "def get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n sock.connect(('8.8.8.8', 1))\n ip = sock.getsockname()[0]\n except:\n ip = '127.0.0.1'\n finally:\n sock.close()\n return ip", "def get_local_ip():\n try:\n ip_task = os.popen(\"ifconfig | grep -Eo 'inet (addr:)?(Adresse:)?([0-9]*\\.){3}[0-9]*' | grep -Eo '([0-9]*\\.){3}[0-9]*' | grep -v '127.0.0.1'\")\n local_ip = ip_task.read().strip()\n ip_task.close()\n if '\\n' in local_ip:\n local_ip = local_ip.split('\\n')[0]\n print ' >> got local ip:', local_ip\n return local_ip\n except:\n return '0.0.0.0'", "def get_internal_host(self):\n prefer_internal_ip = self.charm_config.get(\"prefer-internal-ip\")\n fqdn = socket.getfqdn()\n ip = socket.gethostbyname(fqdn)\n if prefer_internal_ip:\n return ip\n return fqdn", "def getPublicIp():\n global PUBLIC_IP\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n PUBLIC_IP = s.getsockname()[0]\n except Exception:\n PUBLIC_IP = '127.0.0.1'\n finally:\n s.close()\n return PUBLIC_IP", "def get_ipaddress():\n try:\n if request.headers.get('Cf-Connecting-Ip') == None \\\n and request.headers.get('X-Forwarded-For') == None:\n raise TypeError\n elif request.headers.get('Cf-Connecting-Ip') != None:\n return request.headers.get('Cf-Connecting-Ip')\n else:\n return request.headers.get('X-Forwarded-For')\n except TypeError:\n return request.get('REMOTE_ADDR')", "def _get_ipaddress(node):\n if \"ipaddress\" not in node:\n with settings(hide('stdout'), warn_only=True):\n output = sudo('ohai ipaddress')\n if output.succeeded:\n node['ipaddress'] = json.loads(output)[0]\n return True\n return False", "def urlToIp(self, url):\n return str(socket.gethostbyname(url))", "def ip_info():\n return str(getIP())", "def get_ip(self, node_id):\n return self.get_ip_network()[node_id]", "def get_tun_ip(ip_addr, username):\n cmd = \"ifconfig tun0 | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{ print $1}'\" \n tun_ip = remote_fetch(ip_addr, username, cmd)[0].strip()\n return tun_ip", "def get_remote_ip(request):\n \n return utilities.get_remote_ip(request)", "def getIp(name):\n tmp = []\n ips = socket.getaddrinfo(socket.gethostbyname(name), None)\n for x in ips:\n tmp.append(x[4][0])\n\n return tmp", "def public_ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"public_ip_address\")", "def ip(self):\n return self._ip", "def ip(self):\n return self._ip", "def public_address() -> str:\n check_timeout = float(CONFIG['network']['check_timeout'])\n check_host_list = CONFIG.get_list('network', 'check_host_list')\n try:\n for check_url in check_host_list:\n with urllib.request.urlopen(\n url=check_url, timeout=check_timeout,\n ) as response:\n return response.read().decode().strip()\n return None\n except Exception as error:\n return None", "def getPublicIP():\n try:\n # Try to get the internet-facing IP by attempting a connection\n # to a non-existent server and reading what IP was used.\n with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:\n # 203.0.113.0/24 is reserved as TEST-NET-3 by RFC 5737, so\n # there is guaranteed to be no one listening on the other\n # end (and we won't accidentally DOS anyone).\n sock.connect(('203.0.113.1', 1))\n ip = sock.getsockname()[0]\n return ip\n except:\n # Something went terribly wrong. Just give loopback rather\n # than killing everything, because this is often called just\n # to provide a default argument\n return '127.0.0.1'", "def getPublicIpAddress() :\n f = urllib.urlopen(\"http://www.canyouseeme.org/\")\n html_doc = f.read()\n f.close()\n ipAddress = re.search('(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)',html_doc)\n\n #response = urllib.urlopen('http://api.hostip.info/get_html.php?ip=' + ipAddress.group(0) + '&position=true').read()\n return urllib.urlopen('http://api.hostip.info/get_html.php?ip=' + ipAddress.group(0)).read()", "def get_host_ip(self, obj, host):\n\n server = self.nova(obj).server_get(host)\n return server.access_ipv4", "async def get_ip(self) -> Union[IPv4Address, IPv6Address]:\n xff = await self.get_x_forwarded_for()\n if xff: return xff[0]\n ip_addr = self._request.transport.get_extra_info('peername')[0]\n return ip_address(ip_addr)", "def get_redirect_ipaddress(self):\n\t\treturn call_sdk_function('PrlPortFwd_GetRedirectIPAddress', self.handle)", "def get_ip_address(ifname):\n # I did not write this function I give credit to this site\n # for it:\n # hpython-mysqldbttp://code.activestate.com/recipes/439094/\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15])\n )[20:24])", "def get_server_ip(srv):\n pass", "def getip(self):\n if configIpAddress == \"none\":\n strngtoXmit = 'M-SEARCH * HTTP/1.1' + '\\r\\n' + \\\n 'HOST: 239.255.255.250:1900' + '\\r\\n' + \\\n 'MAN: \"ssdp:discover\"' + '\\r\\n' + \\\n 'MX: 2' + '\\r\\n' + \\\n 'ST: urn:schemas-upnp-org:device:MediaRenderer:1' + '\\r\\n' + '\\r\\n'\n\n bytestoXmit = strngtoXmit.encode()\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(3)\n gotstr = 'notyet'\n found = False\n ipaddress = None\n sock.sendto(bytestoXmit, ('239.255.255.250', 1900))\n try:\n gotbytes, addressport = sock.recvfrom(512)\n gotstr = gotbytes.decode()\n except:\n sock.sendto(bytestoXmit, ('239.255.255.250', 1900))\n if re.search('LG', gotstr):\n ipaddress, _ = addressport\n found = True\n self._state = STATE_PLAYING\n else:\n gotstr = 'notyet'\n sock.close()\n if not found:\n print(\"LG TV not found\")\n ipaddress = None\n self._state = STATE_OFF\n lgtv[\"ipaddress\"] = ipaddress\n else:\n lgtv[\"ipaddress\"] = configIpAddress\n if self.isOnline():\n self._state = STATE_PLAYING\n else:\n self._state = STATE_OFF" ]
[ "0.83233213", "0.8125571", "0.7973399", "0.7969459", "0.7911862", "0.79069215", "0.78181463", "0.78089017", "0.7789853", "0.7779208", "0.7745145", "0.7729852", "0.77084523", "0.769121", "0.76832366", "0.76812357", "0.7676216", "0.7659212", "0.76183903", "0.7610075", "0.760447", "0.76023066", "0.76016754", "0.7596061", "0.7591735", "0.75833136", "0.7574649", "0.74947", "0.7463196", "0.7463196", "0.7453787", "0.7445903", "0.7442026", "0.7433374", "0.74049926", "0.73851913", "0.7382546", "0.73761964", "0.7371889", "0.735528", "0.73524606", "0.7349649", "0.7342884", "0.73245096", "0.73211664", "0.73156565", "0.7308117", "0.72998196", "0.7286604", "0.7285198", "0.7273089", "0.7255237", "0.72407377", "0.72338355", "0.72198606", "0.7211459", "0.7200192", "0.71821636", "0.7180396", "0.71517456", "0.71467453", "0.7137281", "0.7136102", "0.71325004", "0.71325004", "0.71325004", "0.71325004", "0.7116318", "0.7102897", "0.7102657", "0.70984924", "0.70808035", "0.7078264", "0.70563376", "0.7053892", "0.7053108", "0.7052464", "0.704576", "0.7043162", "0.703647", "0.7029935", "0.7024803", "0.702166", "0.7018681", "0.70183265", "0.70181966", "0.70170766", "0.6997927", "0.69964534", "0.6995715", "0.6995715", "0.6988665", "0.69857484", "0.6964808", "0.69555193", "0.6948965", "0.6948394", "0.6940076", "0.69265664", "0.6916643" ]
0.7011273
87
get local ip address
def get_localhost_ip(): try: return [ (s.connect((NAME_SERVER, 80)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)] ][0][1] except Exception: return '127.0.0.1'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_local_host_ip(self) -> str:", "def get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n local_ip = sock.getsockname()[0]\n sock.close()\n\n return local_ip", "def getLocalIpAddress() :\n \n if (platform.system() == 'Linux') :\n cmd = \"ifconfig wlan0 | grep 'inet addr:' | cut -d: -f2 | awk '{print $1}'\"\n return subprocess.check_output(cmd, shell=True) \n else : # Darwin\n return socket.gethostbyname(socket.gethostname())", "def get_local_ip():\n try:\n ip_task = os.popen(\"ifconfig | grep -Eo 'inet (addr:)?(Adresse:)?([0-9]*\\.){3}[0-9]*' | grep -Eo '([0-9]*\\.){3}[0-9]*' | grep -v '127.0.0.1'\")\n local_ip = ip_task.read().strip()\n ip_task.close()\n if '\\n' in local_ip:\n local_ip = local_ip.split('\\n')[0]\n print ' >> got local ip:', local_ip\n return local_ip\n except:\n return '0.0.0.0'", "def getLocalIP():\r\n try:\r\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n csock.connect(('8.8.8.8', 80))\r\n (addr, port) = csock.getsockname()\r\n csock.close()\r\n return addr\r\n except socket.error:\r\n return \"127.0.0.1\"", "def _get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n sock.connect(('10.255.255.255', 1))\n ip = sock.getsockname()[0]\n except Exception:\n ip = '127.0.0.1'\n finally:\n sock.close()\n\n return ip", "def localip(self) :\n\t\ttry :\n\t\t\treturn self._localip\n\t\texcept Exception as e:\n\t\t\traise e", "def getLocalhostIP():\n return socket.getaddrinfo('localhost', 0)[0][4][0]", "def get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n sock.connect(('8.8.8.8', 1))\n ip = sock.getsockname()[0]\n except:\n ip = '127.0.0.1'\n finally:\n sock.close()\n return ip", "def get_local_ip():\n\n return os.environ[LOCAL_IP_KEY]", "def get_local_ip(self, system):\n if system == \"Linux\":\n # This is a bit ugly but it works\n ips = check_output(['hostname', '--all-ip-addresses']).decode(\"utf-8\")\n return ips.split(\" \")[0]\n else:\n return socket.gethostbyname(socket.gethostname())", "def get_ip_address():\n\n # Windows\n if _IS_WINDOWS:\n local_ip = socket.gethostbyname(socket.gethostname())\n else:\n # Linux and MacOS\n local_ip = None\n try:\n # First way, tested in Ubuntu and MacOS\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n local_ip = s.getsockname()[0]\n s.close()\n except:\n # Second way, tested in CentOS\n try:\n local_ip = socket.gethostbyname(socket.gethostname())\n except:\n pass\n\n if local_ip == None or local_ip == '127.0.0.1' or local_ip == '127.0.1.1':\n logger.warning(\n 'get_ip_address failed, please set ip address manually.')\n return None\n\n return local_ip", "def get_ip(self):", "def detect_ip_address():\n # Rather hackish way to get the local ip-address, recipy from\n # https://stackoverflow.com/a/166589\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip_address = s.getsockname()[0]\n s.close()\n return ip_address", "def get_IP(): \n \n return socket.gethostbyname(socket.gethostname())", "def ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip = s.getsockname()[0]\n s.close()\n return ip", "def get_ip():\n with hide(\"everything\"):\n ip_addresses = run('hostname -I').split(' ')\n return ip_addresses[0]", "def get_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n ip = s.getsockname()[0]\n except Exception:\n ip = '127.0.0.1'\n finally:\n s.close()\n return ip", "def get_public_ip():\n public_ip = get('https://api.ipify.org').text\n return public_ip", "def _get_my_ip():\n try:\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n csock.connect(('8.8.8.8', 80))\n (addr, port) = csock.getsockname()\n csock.close()\n return addr\n except socket.error:\n return \"127.0.0.1\"", "def get_host_ip_addr():\n return nova_conf.my_ip", "def get_IP():\n\n return socket.gethostbyname(socket.gethostname())", "def get_ip():\n return os.getenv(\"HOST_IP\", \"127.0.0.1\")", "def get_ip():\n return request.environ['HTTP_REMOTE_ADDR']", "def local_ip(self) -> Optional[str]:\n if not self._send_parse_reply(b\"AT+IPADDR\", b\"+IPADDR:\"):\n return None\n return self._buf", "def print_local_ip():\n spacer = '-' * 50\n local_ip = gethostbyname(gethostname())\n print('\\n{}\\nLocal IP address is: {}\\n{}'.format(spacer, local_ip, spacer))", "def ipAddress():\n \n sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sk.connect((\"8.8.8.8\", 80))\n ip = (sk.getsockname()[0])\n sk.close()\n return str(ip)", "def get_IPaddress():\n config = get_ifconfig()\n return config[0]", "def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()", "def get_self_ip():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()\n s.close()\n return ip[0]", "def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']", "def get_my_ip():\r\n try:\r\n return [x[4] for x in conf.route.routes if x[2] != '0.0.0.0'][0]\r\n except IndexError:\r\n return '127.0.0.1'", "def get_ip_address(self):\n raise NotImplementedError", "def get_remote_ip(request):\n \n return utilities.get_remote_ip(request)", "def local_ip():\n sys_name = system()\n if sys_name == 'Darwin':\n # OSX\n route = Command('route')\n ifconfig = Command('ifconfig')\n\n iface = [\n line.strip()\n for line in route('-n', 'get', 'default')\n if line.strip().startswith('interface')\n ][0].split(':')[1].strip()\n return [\n line.strip()\n for line in ifconfig(iface)\n if line.strip().startswith('inet ')\n ][0].split(' ')[1]\n elif sys_name == 'Linux':\n try:\n ip = Command('ip')\n iface = [\n line.strip()\n for line in ip('route')\n if line.strip().startswith('default ')\n ][0].split(' ')[4]\n except CommandNotFound:\n route = Command('route')\n iface = [\n line.strip()\n for line in route('-n')\n if line.startswith('0.0.0.0')\n ][0].split(' ').pop()\n\n try:\n # try with IP\n ip = Command('ip')\n return [\n line.strip()\n for line in ip('addr', 'show', iface)\n if line.strip().startswith('inet ')\n ][0].split(' ')[1].split('/')[0]\n except CommandNotFound:\n pass\n\n # fallback to ifconfig\n ifconfig = Command('ifconfig')\n return [\n line.strip()\n for line in ifconfig(iface)\n if line.strip().startswith('inet ')\n ][0].split(' ')[1]\n\n return None", "def get_ip():\n if not request.headers.getlist(\"X-Forwarded-For\"):\n return str(request.remote_addr)\n else:\n return str(request.headers.getlist(\"X-Forwarded-For\")[0])", "def obtain_public_ip():\n from urllib2 import urlopen\n my_ip = urlopen('http://ip.42.pl/raw').read()\n logger.debug('The public ip is: %s' % my_ip)\n return str(my_ip)", "def get_ip_address(self):\n return self.adb.get_ip_address()", "def address(self):\n \n return self.__ip", "def ip_address(self) -> str:\n return pulumi.get(self, \"ip_address\")", "def get_ip_address():\n try:\n return socket.gethostbyname(socket.getfqdn())\n except socket.gaierror as error:\n logger.warn(error)\n return socket.gethostbyname(\"\")", "def get_ip_address(self):\n return self.__ip_address", "def get_ip():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n return s.getsockname()[0]\n except:\n return '127.0.0.1'\n finally:\n s.close()", "def ip(self) -> str:\n return pulumi.get(self, \"ip\")", "def ip(self):\n if not self._ip:\n if 'ip' in self.config:\n ip = self.config['ip']\n else:\n ip = self.protocol.transport.get_extra_info('sockname')[0]\n ip = ip_address(ip)\n if ip.version == 4:\n self._ip = ip\n else: # pragma: no cover\n response = urlopen('http://ipv4.icanhazip.com/')\n ip = response.read().strip().decode()\n ip = ip_address(ip)\n self._ip = ip\n return self._ip", "def get_ip(request):\n ip1 = request.META.get('REMOTE_ADDR', '')\n ip2 = request.META.get('HTTP_X_FORWARDED_FOR', '').split(\",\")[0].strip()\n ip = ip1 or ip2 or '0.0.0.0'\n return ip", "def ip(self):\n return os.environ.get('REMOTE_ADDR')", "def get_local_ip(self):\n # Get the local IP address used to communicate with the GNS3\n # server. Not the GNS3 server's address, but rather the local\n # machine's address that we use to send messages to the GNS3\n # server. If that address isn't 127.0.0.1 (localhost), use it.\n server_local_ip = self.server.get_local_ip()\n if server_local_ip != '127.0.0.1':\n return server_local_ip\n else:\n # Otherwise, find the first interface on the first cloud node (if it exists)\n try:\n first_cloud_node = next(node for node in self.nodes() if node['node_type'] == 'cloud')\n interface = first_cloud_node['properties']['ports_mapping'][0]['interface']\n\n # If the interface is virtual, find and record its\n # mate's first IP address, which is the address we can\n # send to.\n\n ip_proc = subprocess.Popen(['ip', 'link', 'show', interface], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n first_field = ip_proc.stdout.read().decode().split()[1].split('@')\n if first_field[0] == interface:\n paired_interface = first_field[1].split(':')[0]\n return ni.ifaddresses(paired_interface)[ni.AF_INET][0]['addr']\n except (StopIteration, ValueError):\n # StopIteration if there are no cloud nodes\n # ValueError if there are no IP addresses on the paired interface\n pass\n\n return None", "def getPublicIp():\n global PUBLIC_IP\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n PUBLIC_IP = s.getsockname()[0]\n except Exception:\n PUBLIC_IP = '127.0.0.1'\n finally:\n s.close()\n return PUBLIC_IP", "def getMyIP():\r\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n s.connect(('8.8.8.8', 1)) # connect() for UDP doesn't send packets\r\n return s.getsockname()[0]", "def get_ip(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip", "def ip_addr(self):\n return self.ip_addresses[0]", "def address_local(self):\n if self.local_ip is None or self.port is None:\n return None\n return URL_API.format(ip=self.local_ip, port=self.port)", "def api_myip():\n return request.remote_addr, 200, {'Content-Type': 'text/plain'}", "def get_ip(pc_name):\n pc_ip = '' \n try: \n pc_ip = socket.gethostbyname(pc_name) \n except Exception, e:\n initlog('failed to get PC ip; %s' % str(e)) \n return pc_ip", "def get_remote_ip(request):\n return request.META.get(\"HTTP_REMOTE_ADDR\", request.META.get(\"REMOTE_ADDR\", \"\"))", "def ip_address(self):\n return self.address", "def get_host_ipaddress(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetHostIPAddress', self.handle)", "def get_ip() -> str:\n for ip in socket.gethostbyname_ex(socket.gethostname())[2]:\n if not ip.startswith(\"127.\"):\n return ip\n for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]:\n s.connect((\"8.8.8.8\", 53))\n ip, port = s.getsockname()\n s.close()\n if not ip.startswith(\"127.\"):\n return ip\n raise ConnectionError(\"Can not get a suitable IP\")", "def get_device_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n ip = sock.getsockname()[0]\n sock.close()\n return ip", "def __lookup_public_ip(self):\n\n response = requests.get('https://api.ipify.org?format=json', timeout=self.timeout)\n\n if response.status_code == 200:\n ip_data = response.json()\n if 'ip' not in ip_data.keys():\n return 'Unable to determine IP'\n else:\n return ip_data['ip']\n else:\n return 'Unable to determine IP'", "def ip_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_address\")", "def getIp(self):\n raise NotImplementedError", "def getPublicIP():\n try:\n # Try to get the internet-facing IP by attempting a connection\n # to a non-existent server and reading what IP was used.\n with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:\n # 203.0.113.0/24 is reserved as TEST-NET-3 by RFC 5737, so\n # there is guaranteed to be no one listening on the other\n # end (and we won't accidentally DOS anyone).\n sock.connect(('203.0.113.1', 1))\n ip = sock.getsockname()[0]\n return ip\n except:\n # Something went terribly wrong. Just give loopback rather\n # than killing everything, because this is often called just\n # to provide a default argument\n return '127.0.0.1'", "def _open_stack_get_ip_(srv):\n addr_info = srv.addresses\n for net in addr_info.keys():\n for addr in addr_info[net]:\n ip = addr['addr']\n return ip", "def _get_ip_address(ifname):\n cmd = (\"ifconfig %s| grep 'inet ' | awk -F: '{print $1}' | awk '{print $2}'\" %str(ifname))\n ip = os.popen(cmd).read().replace(\"\\n\",\"\")\n\n return ip", "def get_global_ip():\n network_info_providers = [\n 'http://api.ipify.org/',\n 'http://myip.dnsomatic.com',\n 'http://inet-ip.info/ip',\n 'http://v4.ident.me/',\n ]\n random.shuffle(network_info_providers)\n for url in network_info_providers:\n try:\n return requests.get(url).text.lstrip().rstrip()\n except Exception:\n continue\n else:\n log.info('cannot find global ip')\n return \"\"", "def ip_info():\n return str(getIP())", "def get_server_ip(srv):\n pass", "async def get_ip(self) -> Union[IPv4Address, IPv6Address]:\n xff = await self.get_x_forwarded_for()\n if xff: return xff[0]\n ip_addr = self._request.transport.get_extra_info('peername')[0]\n return ip_address(ip_addr)", "def localhost_IP(self):\r\n return self._localhost_ip", "def GetIPAddr():\n cmd = \"ifconfig | awk '/192/ {print $2}'\"\n res = Run(cmd).replace(\"\\n\", \"\") # remove end of line char\n return res.replace(\"addr:\", \"\") # remove \"addr:\" prefix", "def ip_address(self) -> str:\n return self._device.ip if self.is_connected else None", "def get_client_ip(request):\n x_forwarded_for = request.META.get(\"HTTP_X_FORWARDED_FOR\")\n if x_forwarded_for:\n i_p = x_forwarded_for.split(\",\")[0]\n else:\n i_p = request.META.get(\"REMOTE_ADDR\")\n return i_p", "def ip(self):\n return self._ip", "def ip(self):\n return self._ip", "def publicIP(self):\n return self.query('https://plex.tv/:/ip')", "def get_my_ip_address(remote_server=\"google.com\"):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: \n s.connect((remote_server, 80))\n return s.getsockname()[0]", "def _get_ip():\n cmd_netstat = ['netstat', '-nr']\n p1 = subprocess.Popen(cmd_netstat, stdout=subprocess.PIPE)\n cmd_grep = ['grep', '^0\\.0\\.0\\.0']\n p2 = subprocess.Popen(cmd_grep, stdin=p1.stdout, stdout=subprocess.PIPE)\n cmd_awk = ['awk', '{ print $2 }']\n p3 = subprocess.Popen(cmd_awk, stdin=p2.stdout, stdout=subprocess.PIPE)\n galaxy_ip = p3.stdout.read()\n log.debug('Host IP determined to be %s', galaxy_ip)\n return galaxy_ip", "def GetExternalIp():\n h = httplib2.Http(tempfile.gettempdir(), timeout=10)\n url = 'http://whatismyip.akamai.com'\n resp, content = h.request(url, 'GET')\n if resp.status == 200:\n return content\n for provider in (UltraDNSAuth(), MyResolverInfo()):\n answer = provider.GetClientIp()\n if answer:\n return answer", "def get_local_ip_and_mac_address():\n if platform.system() == \"Windows\":\n command = os.popen(\"ipconfig /all\").read()\n local_ip = re.search(r\"(IPv4 Address)(\\D+)(\\d{1,3})(\\.\\d{1,3}){3}\", command)\n local_ip = re.search(r\"(\\d{1,3})(\\.\\d{1,3}){3}\", local_ip.group())\n mac_address = re.search(r\"(Physical Address)(\\D+)(\\w{2})([-]\\w{2}){5}\", command)\n mac_address = re.search(r\"\\w{2}([-]\\w{2}){5}\", mac_address.group())\n return local_ip.group(), mac_address.group()\n elif platform.system() == \"Linux\":\n command = os.popen(\"ifconfig\").read()\n local_ip = re.search(r\"(inet)(\\D+)(\\d{1,3})(\\.\\d{1,3}){3}\", command)\n local_ip = re.search(r\"(\\d{1,3})(\\.\\d{1,3}){3}\", local_ip.group())\n mac_address = re.search(r\"(ether)(\\D+)(\\w{2})([:]\\w{2}){5}\", command)\n mac_address = re.search(r\"\\w{2}([:]\\w{2}){5}\", mac_address.group())\n return local_ip.group(), mac_address.group()\n else:\n return", "def get_ip(request):\n\n # if neither header contain a value, just use local loopback\n ip_address = request.META.get('HTTP_X_FORWARDED_FOR',\n request.META.get('REMOTE_ADDR', '127.0.0.1'))\n if ip_address:\n # make sure we have one and only one IP\n try:\n ip_address = IP_RE.match(ip_address)\n if ip_address:\n ip_address = ip_address.group(0)\n else:\n # no IP, probably from some dirty proxy or other device\n # throw in some bogus IP\n ip_address = '10.0.0.1'\n except IndexError:\n pass\n return ip_address", "def get_IP_address(request):\n # Catchs the case when the user is on a proxy\n ip = request.META.get('HTTP_X_FORWARDED_FOR', '')\n if ip == '' or ip.lower() in ('unkown', ):\n ip = request.META.get('REMOTE_ADDR', '') # User is not on a proxy\n if ip == '' or ip.lower() in ('unkown', ):\n ip = request.META.get('HTTP_X_REAL_IP')\n return ip", "def ip(self) -> Optional[str]:\n return pulumi.get(self, \"ip\")", "def remote_addr(env):\r\n # In production the remote address is always the load balancer\r\n # So check X-Forwarded-For first\r\n # E.g. HTTP_X_FORWARDED_FOR: '66.249.72.73, 75.101.144.164'\r\n if env.has_key('HTTP_X_FORWARDED_FOR'):\r\n ips = re.split(r'\\s*,\\s*', env['HTTP_X_FORWARDED_FOR'])\r\n if len(ips) > 0:\r\n return ips[0]\r\n\r\n return env['REMOTE_ADDR']", "def local(self):\n return self.server.server_address", "def ip_address(self):\n return self._ip_address", "def get_client_ip_address(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip_address = x_forwarded_for.split(',')[-1].strip()\n else:\n ip_address = request.META.get('REMOTE_ADDR')\n return ip_address", "def get_ip_address(ifname):\n # I did not write this function I give credit to this site\n # for it:\n # hpython-mysqldbttp://code.activestate.com/recipes/439094/\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15])\n )[20:24])", "def get_host_ip(timeout=10):\n\n return get_default_route(timeout)[2]", "def get_ip(request):\n ip = request.META.get(\"HTTP_X_FORWARDED_FOR\", None)\n if ip:\n # X_FORWARDED_FOR returns client1, proxy1, proxy2,...\n ip = ip.split(\", \")[0]\n else:\n ip = request.META.get(\"REMOTE_ADDR\", \"\")\n return ip", "def getIP():\n data = _get_page(\"http://myip.cz\")\n data = data.split(\"Your IP Address is: <b>\")[-1].split(\"</b>\")[0]\n return data.strip()", "def external_IP(self):\r\n return self._external_ip", "def get_ipaddress():\n try:\n if request.headers.get('Cf-Connecting-Ip') == None \\\n and request.headers.get('X-Forwarded-For') == None:\n raise TypeError\n elif request.headers.get('Cf-Connecting-Ip') != None:\n return request.headers.get('Cf-Connecting-Ip')\n else:\n return request.headers.get('X-Forwarded-For')\n except TypeError:\n return request.get('REMOTE_ADDR')", "def public_ip_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"public_ip_address\")", "def get_main_ipv4():\n try:\n # No data is actually transmitted (UDP)\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect( ('8.8.8.8', 53) )\n real_ip = s.getsockname()[0]\n s.close()\n return real_ip\n except socket.error as e:\n logging.error(\"Cannot retrieve current IPv4 address: %s\" % e)\n return None", "def get_ip(self, node_id):\n return self.get_ip_network()[node_id]", "def get_external_ip():\n try:\n r = requests.get(\n METADATA_NETWORK_INTERFACE_URL,\n headers={'Metadata-Flavor': 'Google'},\n timeout=2)\n return r.text\n except requests.RequestException:\n logging.info('Metadata server could not be reached, assuming local.')\n return 'localhost'", "def get_public_ip(self):\n return self.public_ip" ]
[ "0.8890761", "0.86365914", "0.85778904", "0.846305", "0.84425664", "0.8404775", "0.8378487", "0.8368668", "0.8304376", "0.8284258", "0.80766225", "0.806916", "0.8051856", "0.79896796", "0.79758286", "0.79521865", "0.79032815", "0.78887117", "0.78794074", "0.7868217", "0.78568035", "0.7856203", "0.7838334", "0.7837733", "0.78184116", "0.7808097", "0.7806146", "0.77391356", "0.77197", "0.7676875", "0.76750916", "0.7659391", "0.764628", "0.76357186", "0.7627159", "0.75993156", "0.75972015", "0.7580217", "0.7535543", "0.75297505", "0.7517403", "0.7495834", "0.7492798", "0.7445231", "0.74318695", "0.74215865", "0.74142605", "0.738535", "0.73841256", "0.7369193", "0.73666525", "0.73444325", "0.7331149", "0.7328993", "0.73278534", "0.7323006", "0.7318234", "0.7304538", "0.73030126", "0.72988814", "0.7292182", "0.72917736", "0.72917736", "0.72892445", "0.728784", "0.7277261", "0.72771466", "0.7270985", "0.7258692", "0.7253486", "0.7243098", "0.7228599", "0.7226557", "0.721931", "0.7205084", "0.71924615", "0.71924615", "0.718697", "0.71745664", "0.7169328", "0.71631235", "0.7154421", "0.7153529", "0.7152938", "0.7142903", "0.71418905", "0.7135403", "0.7132132", "0.71192425", "0.7116136", "0.71129256", "0.7098042", "0.70958763", "0.7091023", "0.7083281", "0.7082951", "0.70656765", "0.7064488", "0.7064361", "0.7058232" ]
0.74135506
47
get global ip address
def get_global_ip(): network_info_providers = [ 'http://api.ipify.org/', 'http://myip.dnsomatic.com', 'http://inet-ip.info/ip', 'http://v4.ident.me/', ] random.shuffle(network_info_providers) for url in network_info_providers: try: return requests.get(url).text.lstrip().rstrip() except Exception: continue else: log.info('cannot find global ip') return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()", "def get_ip(self):", "def get_host_ip_addr():\n return nova_conf.my_ip", "def get_local_host_ip(self) -> str:", "def get_IP(): \n \n return socket.gethostbyname(socket.gethostname())", "def get_IPaddress():\n config = get_ifconfig()\n return config[0]", "def get_ip():\n return os.getenv(\"HOST_IP\", \"127.0.0.1\")", "def get_IP():\n\n return socket.gethostbyname(socket.gethostname())", "def get_ip():\n return request.environ['HTTP_REMOTE_ADDR']", "def get_my_ip():\r\n try:\r\n return [x[4] for x in conf.route.routes if x[2] != '0.0.0.0'][0]\r\n except IndexError:\r\n return '127.0.0.1'", "def getLocalIpAddress() :\n \n if (platform.system() == 'Linux') :\n cmd = \"ifconfig wlan0 | grep 'inet addr:' | cut -d: -f2 | awk '{print $1}'\"\n return subprocess.check_output(cmd, shell=True) \n else : # Darwin\n return socket.gethostbyname(socket.gethostname())", "def ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip = s.getsockname()[0]\n s.close()\n return ip", "def _get_my_ip():\n try:\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n csock.connect(('8.8.8.8', 80))\n (addr, port) = csock.getsockname()\n csock.close()\n return addr\n except socket.error:\n return \"127.0.0.1\"", "def get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n local_ip = sock.getsockname()[0]\n sock.close()\n\n return local_ip", "def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']", "def get_ip_address(self):\n raise NotImplementedError", "def get_local_ip():\n\n return os.environ[LOCAL_IP_KEY]", "def address(self):\n \n return self.__ip", "def getLocalIP():\r\n try:\r\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n csock.connect(('8.8.8.8', 80))\r\n (addr, port) = csock.getsockname()\r\n csock.close()\r\n return addr\r\n except socket.error:\r\n return \"127.0.0.1\"", "def get_ip():\n with hide(\"everything\"):\n ip_addresses = run('hostname -I').split(' ')\n return ip_addresses[0]", "def getLocalhostIP():\n return socket.getaddrinfo('localhost', 0)[0][4][0]", "def get_ip_address(self):\n return self.adb.get_ip_address()", "def ip(self):\n return os.environ.get('REMOTE_ADDR')", "def ipAddress():\n \n sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sk.connect((\"8.8.8.8\", 80))\n ip = (sk.getsockname()[0])\n sk.close()\n return str(ip)", "def ip_address(self) -> str:\n return pulumi.get(self, \"ip_address\")", "def ip(self) -> str:\n return pulumi.get(self, \"ip\")", "def get_ip_address():\n try:\n return socket.gethostbyname(socket.getfqdn())\n except socket.gaierror as error:\n logger.warn(error)\n return socket.gethostbyname(\"\")", "def get_ip_address(self):\n return self.__ip_address", "def get_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n ip = s.getsockname()[0]\n except Exception:\n ip = '127.0.0.1'\n finally:\n s.close()\n return ip", "def getIp(self):\n raise NotImplementedError", "def ip_addr(self):\n return self.ip_addresses[0]", "def get_self_ip():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()\n s.close()\n return ip[0]", "def get_global_ip_ipv6():\n network_info_providers = [\n 'http://v6.ipv6-test.com/api/myip.php',\n 'http://v6.ident.me/',\n ]\n random.shuffle(network_info_providers)\n for url in network_info_providers:\n try:\n return requests.get(url).text.lstrip().rstrip()\n except Exception:\n continue\n else:\n log.info('cannot find global ipv6 ip')\n return \"\"", "def _get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n sock.connect(('10.255.255.255', 1))\n ip = sock.getsockname()[0]\n except Exception:\n ip = '127.0.0.1'\n finally:\n sock.close()\n\n return ip", "def getPublicIp():\n global PUBLIC_IP\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n PUBLIC_IP = s.getsockname()[0]\n except Exception:\n PUBLIC_IP = '127.0.0.1'\n finally:\n s.close()\n return PUBLIC_IP", "def detect_ip_address():\n # Rather hackish way to get the local ip-address, recipy from\n # https://stackoverflow.com/a/166589\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip_address = s.getsockname()[0]\n s.close()\n return ip_address", "def ip(self):\n if not self._ip:\n if 'ip' in self.config:\n ip = self.config['ip']\n else:\n ip = self.protocol.transport.get_extra_info('sockname')[0]\n ip = ip_address(ip)\n if ip.version == 4:\n self._ip = ip\n else: # pragma: no cover\n response = urlopen('http://ipv4.icanhazip.com/')\n ip = response.read().strip().decode()\n ip = ip_address(ip)\n self._ip = ip\n return self._ip", "def get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n sock.connect(('8.8.8.8', 1))\n ip = sock.getsockname()[0]\n except:\n ip = '127.0.0.1'\n finally:\n sock.close()\n return ip", "def get_ip_address():\n\n # Windows\n if _IS_WINDOWS:\n local_ip = socket.gethostbyname(socket.gethostname())\n else:\n # Linux and MacOS\n local_ip = None\n try:\n # First way, tested in Ubuntu and MacOS\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n local_ip = s.getsockname()[0]\n s.close()\n except:\n # Second way, tested in CentOS\n try:\n local_ip = socket.gethostbyname(socket.gethostname())\n except:\n pass\n\n if local_ip == None or local_ip == '127.0.0.1' or local_ip == '127.0.1.1':\n logger.warning(\n 'get_ip_address failed, please set ip address manually.')\n return None\n\n return local_ip", "def ip_address(self):\n return self.address", "def localip(self) :\n\t\ttry :\n\t\t\treturn self._localip\n\t\texcept Exception as e:\n\t\t\traise e", "def get_local_ip(self, system):\n if system == \"Linux\":\n # This is a bit ugly but it works\n ips = check_output(['hostname', '--all-ip-addresses']).decode(\"utf-8\")\n return ips.split(\" \")[0]\n else:\n return socket.gethostbyname(socket.gethostname())", "def getMyIP():\r\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n s.connect(('8.8.8.8', 1)) # connect() for UDP doesn't send packets\r\n return s.getsockname()[0]", "def get_ip():\n if not request.headers.getlist(\"X-Forwarded-For\"):\n return str(request.remote_addr)\n else:\n return str(request.headers.getlist(\"X-Forwarded-For\")[0])", "def _get_ip():\n cmd_netstat = ['netstat', '-nr']\n p1 = subprocess.Popen(cmd_netstat, stdout=subprocess.PIPE)\n cmd_grep = ['grep', '^0\\.0\\.0\\.0']\n p2 = subprocess.Popen(cmd_grep, stdin=p1.stdout, stdout=subprocess.PIPE)\n cmd_awk = ['awk', '{ print $2 }']\n p3 = subprocess.Popen(cmd_awk, stdin=p2.stdout, stdout=subprocess.PIPE)\n galaxy_ip = p3.stdout.read()\n log.debug('Host IP determined to be %s', galaxy_ip)\n return galaxy_ip", "def GetIPAddr():\n cmd = \"ifconfig | awk '/192/ {print $2}'\"\n res = Run(cmd).replace(\"\\n\", \"\") # remove end of line char\n return res.replace(\"addr:\", \"\") # remove \"addr:\" prefix", "def get_remote_ip(request):\n \n return utilities.get_remote_ip(request)", "def get_ip(request):\n ip1 = request.META.get('REMOTE_ADDR', '')\n ip2 = request.META.get('HTTP_X_FORWARDED_FOR', '').split(\",\")[0].strip()\n ip = ip1 or ip2 or '0.0.0.0'\n return ip", "def get_local_ip():\n try:\n ip_task = os.popen(\"ifconfig | grep -Eo 'inet (addr:)?(Adresse:)?([0-9]*\\.){3}[0-9]*' | grep -Eo '([0-9]*\\.){3}[0-9]*' | grep -v '127.0.0.1'\")\n local_ip = ip_task.read().strip()\n ip_task.close()\n if '\\n' in local_ip:\n local_ip = local_ip.split('\\n')[0]\n print ' >> got local ip:', local_ip\n return local_ip\n except:\n return '0.0.0.0'", "def get_public_ip():\n public_ip = get('https://api.ipify.org').text\n return public_ip", "def ip(self) -> Optional[str]:\n return pulumi.get(self, \"ip\")", "def siteip(self) :\n\t\ttry :\n\t\t\treturn self._siteip\n\t\texcept Exception as e:\n\t\t\traise e", "def ip_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_address\")", "def internal_IP(self):\r\n return self._internal_ip", "def internalIP(self):\r\n return self._internalIP", "def get_ip(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip", "def get_device_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n ip = sock.getsockname()[0]\n sock.close()\n return ip", "def get_host_ip(timeout=10):\n\n return get_default_route(timeout)[2]", "def get_server_ip(srv):\n pass", "def api_myip():\n return request.remote_addr, 200, {'Content-Type': 'text/plain'}", "def ip(self):\n return self._ip", "def ip(self):\n return self._ip", "def random_public_ip():\n anip = random_ip()\n while not is_global(anip):\n anip = random_ip()\n return anip", "def IP(self):\r\n return self._ip", "def _open_stack_get_ip_(srv):\n addr_info = srv.addresses\n for net in addr_info.keys():\n for addr in addr_info[net]:\n ip = addr['addr']\n return ip", "def ip_address(self):\n return self._ip_address", "def get_remote_ip(request):\n return request.META.get(\"HTTP_REMOTE_ADDR\", request.META.get(\"REMOTE_ADDR\", \"\"))", "def get_main_ipv4():\n try:\n # No data is actually transmitted (UDP)\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect( ('8.8.8.8', 53) )\n real_ip = s.getsockname()[0]\n s.close()\n return real_ip\n except socket.error as e:\n logging.error(\"Cannot retrieve current IPv4 address: %s\" % e)\n return None", "def get_default_ip_address():\r\n gws = netifaces.gateways() # get all gateways\r\n default = gws['default'] # get the default gw\r\n adapter = default[2][1] # get the adapter identifier\r\n realadapter = netifaces.ifaddresses(adapter) # get the adapter\r\n addr_dict = realadapter[2][0] # get the first ipv4 address tuple\r\n return addr_dict['addr']", "def ip_info():\n return str(getIP())", "def get_client_ip(request):\n x_forwarded_for = request.META.get(\"HTTP_X_FORWARDED_FOR\")\n if x_forwarded_for:\n i_p = x_forwarded_for.split(\",\")[0]\n else:\n i_p = request.META.get(\"REMOTE_ADDR\")\n return i_p", "def get_ip():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n return s.getsockname()[0]\n except:\n return '127.0.0.1'\n finally:\n s.close()", "def get_IP_address(request):\n # Catchs the case when the user is on a proxy\n ip = request.META.get('HTTP_X_FORWARDED_FOR', '')\n if ip == '' or ip.lower() in ('unkown', ):\n ip = request.META.get('REMOTE_ADDR', '') # User is not on a proxy\n if ip == '' or ip.lower() in ('unkown', ):\n ip = request.META.get('HTTP_X_REAL_IP')\n return ip", "def sitepublicip(self) :\n\t\ttry :\n\t\t\treturn self._sitepublicip\n\t\texcept Exception as e:\n\t\t\traise e", "def obtain_public_ip():\n from urllib2 import urlopen\n my_ip = urlopen('http://ip.42.pl/raw').read()\n logger.debug('The public ip is: %s' % my_ip)\n return str(my_ip)", "def get_ipaddress():\n try:\n if request.headers.get('Cf-Connecting-Ip') == None \\\n and request.headers.get('X-Forwarded-For') == None:\n raise TypeError\n elif request.headers.get('Cf-Connecting-Ip') != None:\n return request.headers.get('Cf-Connecting-Ip')\n else:\n return request.headers.get('X-Forwarded-For')\n except TypeError:\n return request.get('REMOTE_ADDR')", "def _get_ip_address(ifname):\n cmd = (\"ifconfig %s| grep 'inet ' | awk -F: '{print $1}' | awk '{print $2}'\" %str(ifname))\n ip = os.popen(cmd).read().replace(\"\\n\",\"\")\n\n return ip", "def external_IP(self):\r\n return self._external_ip", "def get_ip(self, node_id):\n return self.get_ip_network()[node_id]", "def get_host_ipaddress(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetHostIPAddress', self.handle)", "def ip_address(self) -> str:\n return self._device.ip if self.is_connected else None", "def GlobalIpv6Address(self):\n if self.force_auto_sync:\n self.get('GlobalIpv6Address')\n return self._GlobalIpv6Address", "def getIP():\n data = _get_page(\"http://myip.cz\")\n data = data.split(\"Your IP Address is: <b>\")[-1].split(\"</b>\")[0]\n return data.strip()", "def GetExternalIp():\n h = httplib2.Http(tempfile.gettempdir(), timeout=10)\n url = 'http://whatismyip.akamai.com'\n resp, content = h.request(url, 'GET')\n if resp.status == 200:\n return content\n for provider in (UltraDNSAuth(), MyResolverInfo()):\n answer = provider.GetClientIp()\n if answer:\n return answer", "def server_ip(self) -> str:\n return pulumi.get(self, \"server_ip\")", "def get_client_ip_address(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip_address = x_forwarded_for.split(',')[-1].strip()\n else:\n ip_address = request.META.get('REMOTE_ADDR')\n return ip_address", "def remote_addr(env):\r\n # In production the remote address is always the load balancer\r\n # So check X-Forwarded-For first\r\n # E.g. HTTP_X_FORWARDED_FOR: '66.249.72.73, 75.101.144.164'\r\n if env.has_key('HTTP_X_FORWARDED_FOR'):\r\n ips = re.split(r'\\s*,\\s*', env['HTTP_X_FORWARDED_FOR'])\r\n if len(ips) > 0:\r\n return ips[0]\r\n\r\n return env['REMOTE_ADDR']", "def get_ip(self, request):\n\n return request.META.get(self.META_AE_IP) \\\n if self.META_AE_IP in request.META else request.META.get(self.FORWARDED_FOR)", "def get_client_address(self,environ):\n try:\n return environ['HTTP_X_FORWARDED_FOR'].split(',')[-1].strip()\n except KeyError:\n return environ['REMOTE_ADDR']", "def get_ip() -> str:\n for ip in socket.gethostbyname_ex(socket.gethostname())[2]:\n if not ip.startswith(\"127.\"):\n return ip\n for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]:\n s.connect((\"8.8.8.8\", 53))\n ip, port = s.getsockname()\n s.close()\n if not ip.startswith(\"127.\"):\n return ip\n raise ConnectionError(\"Can not get a suitable IP\")", "def checkIP(self):\n\t\tself.get(\"https://ifconfig.me/\")\n\t\treturn self.findId(\"ip_address\").text", "def _get_user_ip(request):\n x_forwarded_for = request.META.get(\"HTTP_X_FORWARDED_FOR\")\n if x_forwarded_for:\n ip = x_forwarded_for.split(\",\")[0]\n else:\n ip = request.META.get(\"REMOTE_ADDR\")\n return ip", "def get_default_ip():\r\n if CONFIG.BIND_INTERFACE is None:\r\n default_gw = netifaces.gateways()['default']\r\n if netifaces.AF_INET in default_gw:\r\n preferred_interface = default_gw[netifaces.AF_INET][1]\r\n else:\r\n interfaces = netifaces.interfaces()\r\n preferred_interface = next((i for i in interfaces if i != 'lo'), interfaces[0])\r\n else:\r\n preferred_interface = CONFIG.BIND_INTERFACE\r\n return netifaces.ifaddresses(preferred_interface)[netifaces.AF_INET][0]['addr']", "def ip(self) -> str:\n return self._ip", "def get_node_ip(\n self,\n name,\n ):\n pass", "def get_node_ip(self):\n return ray.services.get_node_ip_address()", "def get_ip(request):\n ip = request.META.get(\"HTTP_X_FORWARDED_FOR\", None)\n if ip:\n # X_FORWARDED_FOR returns client1, proxy1, proxy2,...\n ip = ip.split(\", \")[0]\n else:\n ip = request.META.get(\"REMOTE_ADDR\", \"\")\n return ip", "def get_client_ip(self, request):\n xforward_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if xforward_for:\n return xforward_for.split(',')[0]\n return request.META.get('REMOTE_ADDR')", "def get_gateway_ip(timeout=10):\n\n return get_default_route(timeout)[0]" ]
[ "0.8177861", "0.78713524", "0.7836615", "0.7808784", "0.7742288", "0.7727455", "0.7672631", "0.76301765", "0.7592437", "0.7556169", "0.7501851", "0.749981", "0.74838996", "0.7455439", "0.7448763", "0.7436739", "0.74339193", "0.74294287", "0.7428558", "0.73813033", "0.73690134", "0.73610294", "0.7331656", "0.73195267", "0.730993", "0.73076195", "0.73062843", "0.7304257", "0.7274842", "0.7255138", "0.7254264", "0.7253322", "0.7243973", "0.7223625", "0.7195904", "0.71913445", "0.71776664", "0.71743506", "0.7159605", "0.7157877", "0.71331143", "0.71286523", "0.7123905", "0.7121084", "0.7096922", "0.7081212", "0.707926", "0.706005", "0.70575595", "0.7056483", "0.705111", "0.70442134", "0.7043874", "0.7043874", "0.7039973", "0.70375097", "0.70354074", "0.7019931", "0.7012373", "0.7002889", "0.7000384", "0.6998216", "0.6998216", "0.69887966", "0.69635105", "0.6953924", "0.6939459", "0.692482", "0.6924205", "0.69120044", "0.6907726", "0.6887269", "0.6875298", "0.68687236", "0.685999", "0.6856297", "0.6853342", "0.6852072", "0.6837144", "0.6814176", "0.6808782", "0.6795917", "0.6789015", "0.6785048", "0.67757255", "0.67638963", "0.67556655", "0.67509735", "0.67423576", "0.6733617", "0.6728267", "0.672706", "0.672322", "0.6718045", "0.6713937", "0.6712576", "0.6712555", "0.67108095", "0.66944945", "0.6693944" ]
0.82571846
0
get global ipv6 address
def get_global_ip_ipv6(): network_info_providers = [ 'http://v6.ipv6-test.com/api/myip.php', 'http://v6.ident.me/', ] random.shuffle(network_info_providers) for url in network_info_providers: try: return requests.get(url).text.lstrip().rstrip() except Exception: continue else: log.info('cannot find global ipv6 ip') return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_main_ipv6():\n try:\n # No data is actually transmitted (UDP)\n s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)\n s.connect( ('2001:4860:4860::8888', 53) )\n real_ip = s.getsockname()[0]\n s.close()\n return real_ip\n except socket.error as e:\n logging.error(\"Cannot retrieve current IPv6 address: %s\" % e)\n return None", "def GlobalIpv6Address(self):\n if self.force_auto_sync:\n self.get('GlobalIpv6Address')\n return self._GlobalIpv6Address", "def ipv6_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ipv6_address\")", "def find_ipv6():\n\n test_host = '2600::' # Sprint.net\n try:\n with socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) as s:\n s.connect((test_host, 53))\n ipv6 = s.getsockname()[0]\n except:\n if cfg['debug']:\n print(\"Couldn't create a socket to %s\" % test_host)\n print(\"Check that you have a valid IPv6 default route\")\n ipv6 = None\n\n return ipv6", "def get_if_addr6(iff):\n return next((x[0] for x in in6_getifaddr()\n if x[2] == iff and x[1] == IPV6_ADDR_GLOBAL), None)", "def ipv6_address(self) -> pulumi.Output[str]:\n warnings.warn(\"\"\"use `ipv6_addresses` attribute instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: use `ipv6_addresses` attribute instead\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")", "def ipv6_address(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"use `ipv6_addresses` attribute instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: use `ipv6_addresses` attribute instead\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")", "def LinkLocalIpv6Address(self):\n if self.force_auto_sync:\n self.get('LinkLocalIpv6Address')\n return self._LinkLocalIpv6Address", "def ipv6_address(self) -> str:\n warnings.warn(\"\"\"The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")", "def PrefixIpv6Address(self):\n if self.force_auto_sync:\n self.get('PrefixIpv6Address')\n return self._PrefixIpv6Address", "def ipv6_address(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")", "def ipv6_address_space(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"ipv6_address_space\")", "def get_intf_address(self, intf, pod, v6=False):\n if v6:\n cmd = [\"ifconfig \" + intf + \" | grep Global\"]\n output = pod.run_cmd_on_vm(cmd)\n ip6 = re.search(\n r'inet6\\s+addr\\s*:\\s*(\\S*)',\n output['ifconfig eth0 | grep Global'])\n ip6_addr = ip6.group(1)\n return ip6_addr\n cmd = [\"ifconfig \" + intf + \" | grep inet\"]\n output = pod.run_cmd_on_vm(cmd)\n ip = re.search(\n r'inet\\s+addr\\s*:\\s*(\\d+.\\d+.\\d+.\\d+)',\n output['ifconfig eth0 | grep inet'])\n ip_addr = ip.group(1)\n return ip_addr", "def ipv6_address(self) -> pulumi.Output[str]:\n warnings.warn(\"\"\"The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")", "def local_address(self) -> T_SockAddr:\n from anyio._core._sockets import convert_ipv6_sockaddr\n return convert_ipv6_sockaddr(self.raw_socket.getsockname())", "def ipv6_address_space(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ipv6_address_space\")", "def _get_virtual_oper_VipV6_address(self):\n return self.__virtual_oper_VipV6_address", "def get_if_raw_addr6(iff):\n ip6 = get_if_addr6(iff)\n if ip6 is not None:\n return inet_pton(socket.AF_INET6, ip6)\n\n return None", "def ipv6_addresses(self) -> Dict[str, List[IPv6Address]]:\n log.debug(\"Host %s: ipv6 addresses of the devices interfaces %s.\", self.host, self._get_ipv6_addresses(\"self\"))\n return self._get_ipv6_addresses(\"self\")", "def toV6(self):\n return V6Address.fromV4(self)", "def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()", "def get_IPaddress():\n config = get_ifconfig()\n return config[0]", "def get_ip_address(ifname, family=socket.AF_INET):\n if family == socket.AF_INET:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n ip = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15]))[20:24])\n except IOError:\n return None\n return ip\n elif family == socket.AF_INET6:\n try:\n with open(\"/proc/net/if_inet6\", \"r\") as f:\n if6lines = f.readlines()\n for line in if6lines:\n val = line.split()\n # filter LINKLOCAL address\n if val[3] != '20' and val[-1] == str(ifname):\n return Convert.format_proc_address(val[0])\n return None\n except Exception as e:\n SysTools.logger.error(\"can not get the ipv6 address of %s : %s\", str(ifname), str(e))\n return None\n else:\n return None", "def ipv6_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ipv6_addresses\")", "def remote_address(self) -> T_SockAddr:\n from anyio._core._sockets import convert_ipv6_sockaddr\n return convert_ipv6_sockaddr(self.raw_socket.getpeername())", "def get_global_ip():\n network_info_providers = [\n 'http://api.ipify.org/',\n 'http://myip.dnsomatic.com',\n 'http://inet-ip.info/ip',\n 'http://v4.ident.me/',\n ]\n random.shuffle(network_info_providers)\n for url in network_info_providers:\n try:\n return requests.get(url).text.lstrip().rstrip()\n except Exception:\n continue\n else:\n log.info('cannot find global ip')\n return \"\"", "def get_ipv6_list():\n ipv6 = __grains__.get(\"ipv6\")\n\n return \" \".join([\"[\" + ip + \"]\" for ip in ipv6])", "def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']", "def ipv6_addresses(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"ipv6_addresses\")", "def local_ipv6_network_cidr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"local_ipv6_network_cidr\")", "def Ipv6Flag(self):\r\n\t\treturn self._get_attribute('ipv6Flag')", "def _get_my_ip():\n try:\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n csock.connect(('8.8.8.8', 80))\n (addr, port) = csock.getsockname()\n csock.close()\n return addr\n except socket.error:\n return \"127.0.0.1\"", "def get_local_host_ip(self) -> str:", "def get_my_ip():\r\n try:\r\n return [x[4] for x in conf.route.routes if x[2] != '0.0.0.0'][0]\r\n except IndexError:\r\n return '127.0.0.1'", "def associate_ipv6_address(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"associate_ipv6_address\")", "def get_main_ipv4():\n try:\n # No data is actually transmitted (UDP)\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect( ('8.8.8.8', 53) )\n real_ip = s.getsockname()[0]\n s.close()\n return real_ip\n except socket.error as e:\n logging.error(\"Cannot retrieve current IPv4 address: %s\" % e)\n return None", "def get_self_ip():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()\n s.close()\n return ip[0]", "def OSSupportsIPv6(self) -> bool:", "def local_ipv6_network_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"local_ipv6_network_cidr\")", "def local_ipv6_network_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"local_ipv6_network_cidr\")", "def getLocalIP():\r\n try:\r\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n csock.connect(('8.8.8.8', 80))\r\n (addr, port) = csock.getsockname()\r\n csock.close()\r\n return addr\r\n except socket.error:\r\n return \"127.0.0.1\"", "def remote_address(self) -> IPSockAddrType:\n from anyio._core._sockets import convert_ipv6_sockaddr\n return convert_ipv6_sockaddr(self.raw_socket.getpeername())", "def ipv6_cmd(args):\n r = requete(\"Devices:get\")\n for i in r['status']:\n a = \"-\"\n if 'IPv6Address' in i:\n for j in i['IPv6Address']:\n if j['Scope'] != 'link':\n a = j['Address']\n b = \"-\"\n if 'IPAddress' in i: b = i['IPAddress']\n if a == \"-\": continue\n print(\"%4s %-32s %-5s %-16s %s\" % (i['Index'], i['Name'], i['Active'], b, a))", "def ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip = s.getsockname()[0]\n s.close()\n return ip", "def getLocalhostIP():\n return socket.getaddrinfo('localhost', 0)[0][4][0]", "def SupportsIPv6(self) -> bool:", "def get_device_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n ip = sock.getsockname()[0]\n sock.close()\n return ip", "def GetIPAddr():\n cmd = \"ifconfig | awk '/192/ {print $2}'\"\n res = Run(cmd).replace(\"\\n\", \"\") # remove end of line char\n return res.replace(\"addr:\", \"\") # remove \"addr:\" prefix", "def get_ipv6_zone_connection(self):\n return self.m_connection.ipv6_zones", "def detect_ip_address():\n # Rather hackish way to get the local ip-address, recipy from\n # https://stackoverflow.com/a/166589\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip_address = s.getsockname()[0]\n s.close()\n return ip_address", "def get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n local_ip = sock.getsockname()[0]\n sock.close()\n\n return local_ip", "def get_IP(): \n \n return socket.gethostbyname(socket.gethostname())", "def ipv6_gateway_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ipv6_gateway_id\")", "def get_host_ip_addr():\n return nova_conf.my_ip", "def get_host_ip(timeout=10):\n\n return get_default_route(timeout)[2]", "def get_ip_address2(ifname):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(fcntl.ioctl(\n s.fileno(),\n 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15])\n )[20:24])\n except:\n return None", "async def get_ip(self) -> Union[IPv4Address, IPv6Address]:\n xff = await self.get_x_forwarded_for()\n if xff: return xff[0]\n ip_addr = self._request.transport.get_extra_info('peername')[0]\n return ip_address(ip_addr)", "def get_IP():\n\n return socket.gethostbyname(socket.gethostname())", "def get_ip_address(ifname):\n # I did not write this function I give credit to this site\n # for it:\n # hpython-mysqldbttp://code.activestate.com/recipes/439094/\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15])\n )[20:24])", "def get_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n ip = s.getsockname()[0]\n except Exception:\n ip = '127.0.0.1'\n finally:\n s.close()\n return ip", "def ipAddress():\n \n sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sk.connect((\"8.8.8.8\", 80))\n ip = (sk.getsockname()[0])\n sk.close()\n return str(ip)", "def Ipv6Srh(self):\r\n\t\treturn self._get_attribute('ipv6Srh')", "def getLocalIpAddress() :\n \n if (platform.system() == 'Linux') :\n cmd = \"ifconfig wlan0 | grep 'inet addr:' | cut -d: -f2 | awk '{print $1}'\"\n return subprocess.check_output(cmd, shell=True) \n else : # Darwin\n return socket.gethostbyname(socket.gethostname())", "def address(self):\n \n return self.__ip", "def get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n sock.connect(('8.8.8.8', 1))\n ip = sock.getsockname()[0]\n except:\n ip = '127.0.0.1'\n finally:\n sock.close()\n return ip", "def get_ip_address():\n try:\n return socket.gethostbyname(socket.getfqdn())\n except socket.gaierror as error:\n logger.warn(error)\n return socket.gethostbyname(\"\")", "def _get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n sock.connect(('10.255.255.255', 1))\n ip = sock.getsockname()[0]\n except Exception:\n ip = '127.0.0.1'\n finally:\n sock.close()\n\n return ip", "def private_ipv6_google_access(self) -> Optional[pulumi.Input['NetworkConfigPrivateIpv6GoogleAccess']]:\n return pulumi.get(self, \"private_ipv6_google_access\")", "def get_ip():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n return s.getsockname()[0]\n except:\n return '127.0.0.1'\n finally:\n s.close()", "def ip_address(self) -> Union[IPv4Address, IPv6Address]:\n try:\n ip_add = ip_address(self.host)\n except ValueError:\n # Assume hostname was used, and retrieve resolved IP from paramiko transport\n log.error(\"Host %s: value error for ip address used to establish connection.\", self.host)\n ip_add = ip_address(self.native.remote_conn.transport.getpeername()[0])\n log.debug(\"Host %s: ip address used to establish connection %s.\", self.host, ip_add)\n return ip_add", "def get_ip(self):", "def get_default_ip():\r\n if CONFIG.BIND_INTERFACE is None:\r\n default_gw = netifaces.gateways()['default']\r\n if netifaces.AF_INET in default_gw:\r\n preferred_interface = default_gw[netifaces.AF_INET][1]\r\n else:\r\n interfaces = netifaces.interfaces()\r\n preferred_interface = next((i for i in interfaces if i != 'lo'), interfaces[0])\r\n else:\r\n preferred_interface = CONFIG.BIND_INTERFACE\r\n return netifaces.ifaddresses(preferred_interface)[netifaces.AF_INET][0]['addr']", "def _get_ip():\n cmd_netstat = ['netstat', '-nr']\n p1 = subprocess.Popen(cmd_netstat, stdout=subprocess.PIPE)\n cmd_grep = ['grep', '^0\\.0\\.0\\.0']\n p2 = subprocess.Popen(cmd_grep, stdin=p1.stdout, stdout=subprocess.PIPE)\n cmd_awk = ['awk', '{ print $2 }']\n p3 = subprocess.Popen(cmd_awk, stdin=p2.stdout, stdout=subprocess.PIPE)\n galaxy_ip = p3.stdout.read()\n log.debug('Host IP determined to be %s', galaxy_ip)\n return galaxy_ip", "def get_default_ip_address():\r\n gws = netifaces.gateways() # get all gateways\r\n default = gws['default'] # get the default gw\r\n adapter = default[2][1] # get the adapter identifier\r\n realadapter = netifaces.ifaddresses(adapter) # get the adapter\r\n addr_dict = realadapter[2][0] # get the first ipv4 address tuple\r\n return addr_dict['addr']", "def get_ip_address(self):\n raise NotImplementedError", "def ipv6_gateway_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ipv6_gateway_id\")", "def getMyIP():\r\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n s.connect(('8.8.8.8', 1)) # connect() for UDP doesn't send packets\r\n return s.getsockname()[0]", "def get_remote_addr(self):\n connection = self._open_db()\n cursor = connection.cursor()\n cursor.execute('SELECT remote_addr FROM sessions WHERE id = ?;', \\\n (self.sid,))\n remote_addr = cursor.fetchone()\n cursor.close()\n connection.close()\n return remote_addr[0]", "def get_ipv6_host(self, host):\n\n try:\n host = u'{0}'.format(host)\n return IPv6Network(host, strict=False)\n except ValueError as e:\n error_msg = \"Given host {0} is an invalid IPv6 format -- \" \\\n \"error {1}\".format(host, str(e))\n LOG.error(error_msg)\n self.module.fail_json(msg=error_msg)", "def get_ip():\n return os.getenv(\"HOST_IP\", \"127.0.0.1\")", "def getPublicIp():\n global PUBLIC_IP\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n PUBLIC_IP = s.getsockname()[0]\n except Exception:\n PUBLIC_IP = '127.0.0.1'\n finally:\n s.close()\n return PUBLIC_IP", "def get_interface_ip_address(peer=\"8.8.8.8\"):\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((peer, 1))\n interface_ip_address = sock.getsockname()[0]\n sock.close()\n\n return interface_ip_address", "def get_address(ifname='ib0'):\n # https://stackoverflow.com/a/24196955/1889400\n ifname = ifname.encode()\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(fcntl.ioctl(\n s.fileno(),\n 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15])\n )[20:24])", "def _rloc_ip_net_addr(self):\n self.net_addr = ':'.join(self.rloc.split(':')[:-1]) + ':'\n return self.net_addr", "def ipv6_connected_prefix(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"ipv6_connected_prefix\")", "def get_ip_address(interface):\n try:\n import netifaces\n detail = netifaces.ifaddresses(interface)\n if netifaces.AF_INET in detail:\n ip = detail[netifaces.AF_INET][0]['addr']\n else:\n ip = 'unavailable'\n if netifaces.AF_LINK in detail:\n mac = detail[netifaces.AF_LINK][0]['addr']\n else:\n mac = 'unknown'\n except ImportError:\n import socket\n import fcntl\n import struct\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n ip = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', interface[:15]))[20:24])\n mac = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', interface[:15]))\n return ip, mac", "def remote_ipv6_network_cidr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"remote_ipv6_network_cidr\")", "def ipv6_connected_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ipv6_connected_prefix\")", "def _get_ip_address(ifname):\n cmd = (\"ifconfig %s| grep 'inet ' | awk -F: '{print $1}' | awk '{print $2}'\" %str(ifname))\n ip = os.popen(cmd).read().replace(\"\\n\",\"\")\n\n return ip", "def ipv6_native(self) -> bool:\n return pulumi.get(self, \"ipv6_native\")", "def remote_addr(env):\r\n # In production the remote address is always the load balancer\r\n # So check X-Forwarded-For first\r\n # E.g. HTTP_X_FORWARDED_FOR: '66.249.72.73, 75.101.144.164'\r\n if env.has_key('HTTP_X_FORWARDED_FOR'):\r\n ips = re.split(r'\\s*,\\s*', env['HTTP_X_FORWARDED_FOR'])\r\n if len(ips) > 0:\r\n return ips[0]\r\n\r\n return env['REMOTE_ADDR']", "def multicast_address(self):\n\n return self.config.dict[\"ssdp\"][\"multicast_address\"]", "def get_rug_address():\n net = netaddr.IPNetwork(ULA_PREFIX)\n return str(netaddr.IPAddress(net.first + 1))", "def get_interface_ip_address(device, interface, address_family,\r\n return_all=False):\r\n if address_family not in [\"ipv4\", \"ipv6\", \"inet\", \"inet6\"]:\r\n log.info('Must provide one of the following address families: '\r\n '\"ipv4\", \"ipv6\", \"inet\", \"inet6\"')\r\n return\r\n\r\n if address_family == \"ipv4\":\r\n address_family = \"inet\"\r\n elif address_family == \"ipv6\":\r\n address_family = \"inet6\"\r\n\r\n try:\r\n out = device.parse('show interfaces terse {interface}'.format(\r\n interface=interface))\r\n except SchemaEmptyParserError:\r\n return\r\n\r\n # Example dictionary structure:\r\n # {\r\n # \"ge-0/0/0.0\": {\r\n # \"protocol\": {\r\n # \"inet\": {\r\n # \"10.189.5.93/30\": {\r\n # \"local\": \"10.189.5.93/30\"\r\n # }\r\n # },\r\n # \"inet6\": {\r\n # \"2001:db8:223c:2c16::1/64\": {\r\n # \"local\": \"2001:db8:223c:2c16::1/64\"\r\n # },\r\n # \"fe80::250:56ff:fe8d:c829/64\": {\r\n # \"local\": \"fe80::250:56ff:fe8d:c829/64\"\r\n # }\r\n # },\r\n # }\r\n # }\r\n # }\r\n\r\n found = Dq(out).contains(interface).contains(address_family). \\\r\n get_values(\"local\")\r\n if found:\r\n if return_all:\r\n return found\r\n return found[0]\r\n return None", "def get_local_ip(self):\n # Get the local IP address used to communicate with the GNS3\n # server. Not the GNS3 server's address, but rather the local\n # machine's address that we use to send messages to the GNS3\n # server. If that address isn't 127.0.0.1 (localhost), use it.\n server_local_ip = self.server.get_local_ip()\n if server_local_ip != '127.0.0.1':\n return server_local_ip\n else:\n # Otherwise, find the first interface on the first cloud node (if it exists)\n try:\n first_cloud_node = next(node for node in self.nodes() if node['node_type'] == 'cloud')\n interface = first_cloud_node['properties']['ports_mapping'][0]['interface']\n\n # If the interface is virtual, find and record its\n # mate's first IP address, which is the address we can\n # send to.\n\n ip_proc = subprocess.Popen(['ip', 'link', 'show', interface], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n first_field = ip_proc.stdout.read().decode().split()[1].split('@')\n if first_field[0] == interface:\n paired_interface = first_field[1].split(':')[0]\n return ni.ifaddresses(paired_interface)[ni.AF_INET][0]['addr']\n except (StopIteration, ValueError):\n # StopIteration if there are no cloud nodes\n # ValueError if there are no IP addresses on the paired interface\n pass\n\n return None", "def ipv6_networks(view):\n return \"ipv6network?\" \\\n \"_return_fields=\" \\\n \"extattrs,\" \\\n \"comment,\" \\\n \"network,\" \\\n \"network_view,\" \\\n \"utilization&\" \\\n \"network_view=\" + view + \\\n \"&_max_results=-25000\"", "def get_addr(self):\n return Server.t_addresses.get(threading.get_ident())", "def get_localhost_ip():\n try:\n return [\n (s.connect((NAME_SERVER, 80)), s.getsockname()[0], s.close())\n for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]\n ][0][1]\n except Exception:\n return '127.0.0.1'", "def get_ip(ifn):\n sck = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(\n fcntl.ioctl(sck.fileno(), 0x8915, struct.pack(\"256s\", ifn[:15]))[20:24]\n )", "def get_ip_address(self):\n return self.adb.get_ip_address()" ]
[ "0.8276269", "0.82484275", "0.7695294", "0.7630343", "0.7439502", "0.7352564", "0.7306042", "0.7292912", "0.72007406", "0.7183485", "0.71534765", "0.71408135", "0.7075447", "0.7072936", "0.7067728", "0.700647", "0.6943251", "0.6901189", "0.68371254", "0.68331796", "0.6820401", "0.6760036", "0.6730715", "0.6719501", "0.67041224", "0.6695512", "0.6682327", "0.6625417", "0.6603928", "0.66029817", "0.65684956", "0.6550792", "0.6547065", "0.64740133", "0.64735675", "0.6459359", "0.64574474", "0.6452222", "0.64407533", "0.64407533", "0.6434536", "0.6430543", "0.64030975", "0.64010423", "0.6384213", "0.63658094", "0.6351179", "0.6322102", "0.63145614", "0.63134265", "0.6299404", "0.62895316", "0.6277661", "0.62702525", "0.62538093", "0.6251821", "0.62356097", "0.6234074", "0.62202156", "0.6215303", "0.6197451", "0.61773527", "0.6176969", "0.6174858", "0.6172188", "0.6162614", "0.6159436", "0.6156845", "0.6150364", "0.61500007", "0.6145393", "0.6140281", "0.612225", "0.61177146", "0.6114508", "0.61073136", "0.61064786", "0.6083144", "0.6070769", "0.6068557", "0.604649", "0.6035091", "0.6020621", "0.6018503", "0.601226", "0.5996274", "0.5992185", "0.59920067", "0.5978826", "0.5977829", "0.5949104", "0.5947153", "0.59438777", "0.59405357", "0.5936735", "0.59331095", "0.5932968", "0.5930886", "0.59229225", "0.59206617" ]
0.8327801
0
The column labels of the DataFrame. Returns orca.Index The column labels of each column.
def columns(self): return self._columns # return Index(self._data_columns)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def labels(self):\n \n return self.column_labels", "def column_labels(self):\n return tuple(self._columns.keys())", "def get_labels(self) -> {int: str}:\n return {x: col.label for x, col in self._cols.items()}", "def get_label_indices(df: DataFrame, labels: list):\n return [idx for idx, name in enumerate(df.columns) if name in labels]", "def get_labels(self):\n\n labels = list(self.meta_data[self.target_column])\n\n return labels", "def get_labels(df):\n labels = []\n for i in df.index:\n label = sample_label_from_sample_name(i)\n labels.append(label)\n return labels", "def label_columns(mapping):\n columns = []\n for name, column in mapping.items():\n columns.append(column.label(name))\n return columns", "def column_names(self):\n return self.data.columns.values", "def generate_colnames(df, labelnum=0): # need to be adjusted for GC content\n colnames = []\n for field in range(len(df.columns) - labelnum):\n colnames.append(BEDCOLS[field])\n for label in range(labelnum):\n colnames.append(f\"label_{label+1}\")\n return colnames", "def columns(self):\n if self._default_index:\n return list(self._df.columns)\n return list(self._df.index.names) + list(self._df.columns)", "def cat_labels(self):\n try:\n return list(self.cats.columns)\n except AttributeError:\n return []", "def names(self):\n return self._names_to_cols.keys()", "def labels(self) -> pd.Series:\n return self.data.apply(to_label, axis=1)", "def getColumnsNames(self):\r\n ColsName = []\r\n for i in range(len(self.columns)):\r\n ColsName.append(self.columns[i].getColName())\r\n return ColsName", "def get_data_labels(answer_mapping_df, column):\n labels = []\n for i in answer_mapping_df[column].columns.values:\n labels.append(answer_mapping_df.xs((column, i), level=('q_code', 'a_code'), axis=1).iloc[0,0])\n return labels", "def labels(self):\n return self._get_labels(self.label_vector)", "def labels(self) -> ndarray:\n return self._labels", "def labels(self):\n return self.label2cc.keys()", "def column(self, label):\n dis = []\n for x in self.rows:\n dis = dis + [x[self.column_labels.index(label)]]\n return dis\n # return self.rows[self.column_labels.index(label)]", "def GetColumnsOption(self, data) :\n indices = [ int(x.replace(self.label, '')) for x in data.columns if self.label in x and x.replace(self.label, '')!='' ]\n return indices", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def getColumnNames(self):\n return self.colnames", "def labels(self) -> List[str]:\n\n return list(self.t0.keys())", "def columns(self):\n return self._names_to_cols.values()", "def column_names(self):\n return self._hndl.column_names()", "def column_index(self, column_label):\n return self.column_labels.index(column_label)", "def labeled_indices(self):\n return self._labeled_indices", "def columns_names(self):\r\n return self._columns_names", "def labels(self):\n return self._labels", "def columns(self):\n return self._column_names", "def limmag_colnames(self):\n return self.__limmag_colnames", "def get_columns(self) -> List[str]:\n return self.get_dyf().toDF().columns", "def labels(self) -> list:\n return self._labels", "def matlabels(df, rowlabel_fn):\n return df.index.to_frame().apply(rowlabel_fn, axis=1)", "def column_names(self):\n if self._is_vertex_frame():\n return self.__graph__.__proxy__.get_vertex_fields()\n elif self._is_edge_frame():\n return self.__graph__.__proxy__.get_edge_fields()", "def get_index_names(self, axis=0):\n return self.get_axis(axis).names", "def getColumnNames(self):\n return self.columnNames", "def column_indexer(data):\n idCol = {label: index for index, label in enumerate(data.columns)}\n return idCol", "def _get_classify_labels(df):\n labels = np.ones((len(df), 1), dtype=dtype) * 2\n labels[df['A-coref']] = 0\n labels[df['B-coref']] = 1\n return labels", "def _get_labels(self, ind):\n pass", "def labels_(self) -> DNDarray:\n return self._labels", "def get_encoded_categorical_feature_indexes(self):\n cols = []\n for col_parent in self.categorical_feature_names:\n temp = [self.encoded_feature_names.index(\n col) for col in self.encoded_feature_names if col.startswith(col_parent) and\n col not in self.continuous_feature_names]\n cols.append(temp)\n return cols", "def graphcols(self):\n columns = []\n table = self.__parent_table\n for col in self.__column_list:\n columns.append(table.table_column(col).title())\n return columns", "def column_names(self) -> D2TXTColumnNameView:\n return D2TXTColumnNameView(self._column_names)", "def get_column_names(self):\n names = []\n names.append(self.question_column + \"_agree_lot\")\n names.append(self.question_column + \"_agree_little\")\n names.append(self.question_column + \"_neither\")\n names.append(self.question_column + \"_dis_little\")\n names.append(self.question_column + \"_dis_lot\")\n return names", "def order_column_indices(self):\n return self._order_column_indices()", "def getLabelColumn(self):\n return self.getOrDefault(self.labelColumn)", "def get_headers(df):\n return df.columns.values", "def labels(cls) -> FrozenSet[str]:\n return cls._meta.labels", "def list_columns(self):\n columns = []\n for icol in range(0, self.ncolumns()):\n columns.append(self.table_column(icol).title())\n return columns", "def get_labels(self) -> np.ndarray:\n return self._dataset.get_labels()[self._ids]", "def get_column_names(cls):\n return cls._meta.get_column_names()", "def columns(self):\n return self._columns.keys()", "def keys(self):\n return self.df.keys()", "def _label(self, column):\n # XXX\n return column", "def names(self):\n labels = [\n \"$X_{%i}$\" % i if d.name is None else d.name\n for i, d in enumerate(self.dimensions)\n ]\n return labels", "def indices_of_label(self, label_name):\n return self.indices_of('label', label_name)", "def colnum(self):\n \n colnum = 0\n for table in self.columnlabels:\n table = np.asarray(table)\n if np.ndim(table) <= 1:\n table = np.reshape(table, (1, -1))\n colnum += table.shape[1]\n return colnum", "def get_cora_label_names():\n # type: () -> List[str]\n return _label_names", "def names(self):\n \n return self.column_names.copy()", "def get_labels(self):\n return self.labels", "def column_names(self) -> Tuple[List[str], List[str]]:\n return (\n np.array(self._dataset_columns)[self._input_column_indices].tolist(),\n np.array(self._dataset_columns)[self._output_column_indices].tolist(),\n )", "def get_sorted_columns(data):\n if not isinstance(data, pd.DataFrame):\n raise TypeError('Invalid input type: type(data) = {}'.format(type(data)))\n col_names = pd.Series(index=data.index)\n for idx, row in data.iterrows():\n col_names[idx] = row.sort_values().index.tolist()\n return col_names", "def get_columns(self):\n columns = []\n for column in self.columns:\n columns.append(column.data.name)\n return columns", "def columnTitles(self):\n \n pass", "def columnTitles(self):\n \n pass", "def le_columns(df, columns):\n logging.debug('LabelEncoding %s columns', len(columns))\n le = LabelEncoder()\n for col in columns:\n df.loc[:, col] = le.fit_transform(df[col])\n return df", "def get_feature_labels(self):\n return self.feature_labels", "def columns(self):\n return self.frame.columns", "def get_labels(self) -> List[str]:\n return self.labels", "def labels_all(self):\n return self._labels_all", "def get_label_ix_mapping(labels):\n return {label: i for i, label in enumerate(labels)}", "def columns(self):\n return self._coldefs", "def show_columns(self):\n\n df = self.__df_timings\n return df.columns", "def get_labels_df():\n labels_df = pd.read_csv('data/train/truth_train.csv', header=None)\n return labels_df", "def header(self) -> list:\n cols = self.data.columns.tolist()\n header = [\"index\"]\n for col_int in cols:\n header.append(col_int)\n return header", "def header(self):\n\n return [c.name for c in self.columns]", "def label_encode(df):\n\n X = df.copy()\n for colname in X.select_dtypes([\"category\"]):\n X[colname] = X[colname].cat.codes\n return X", "def get_labels(self):\n return [token.label for token in self.tokens]", "def get_labels(self):\n raise NotImplementedError", "def label_index(self):\n return self._label_index", "def label_index(self):\n return self._label_index", "def train_labels(self):\n return self._train_labels", "def labels(self):\n return self.label(self.p_y_given_x)", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return 'Column %d' % (self.index + 1)", "def load_label_columns(self):\n with open(self.config.labels_local_path, 'r') as f:\n label_columns = yaml.safe_load(f)\n return label_columns", "def dataset_headers(dataset):\n return list(dataset.columns.values)", "def flabels(self):\n return self._cache.flabels", "def getLabels(self):\n labels = np.empty(self.numSites, dtype=np.intc)\n _cgco.gcoGetLabels(self.handle, labels)\n return labels", "def get_labels(self) -> List[str]:\n raise NotImplementedError()", "def get_feature_labels(self):\n\t\tfeature_labels = []\n\t\tfor feature, i in zip(self.feature_names,self.feature_mask):\n\t\t\tif i == True:\n\t\t\t\tfeature_labels.append(feature)\n\t\treturn feature_labels", "def get_columns(self, request, cl):\n columns = []\n for field_name in cl.model_admin.list_display:\n text, _ = label_for_field(field_name, cl.model, model_admin=cl.model_admin, return_attr=True)\n columns.append({field_name: text})\n return columns", "def get_labels(self):\n return [\"A轮\", \"B轮\",\"C轮\",\"天使轮\",\"战略融资\"]", "def get_group_names(self):\n return [self.frame.columns[i] for i in self.group_cols]", "def get_labels(self):\n\t\traise NotImplementedError()" ]
[ "0.7897917", "0.76980937", "0.7478725", "0.69867", "0.6826641", "0.68240386", "0.6750179", "0.6732042", "0.6705691", "0.66914195", "0.6630234", "0.65252477", "0.6519194", "0.64722145", "0.6469839", "0.64630276", "0.6462272", "0.6409666", "0.6367081", "0.63438576", "0.62805796", "0.62805796", "0.62805796", "0.62805796", "0.62805796", "0.62805796", "0.627658", "0.6254504", "0.624863", "0.62463284", "0.6227721", "0.6216025", "0.62079394", "0.6203463", "0.61781865", "0.61678976", "0.6160951", "0.61347455", "0.6109242", "0.61067545", "0.61034966", "0.608796", "0.6087356", "0.6083893", "0.60766613", "0.60559666", "0.6055583", "0.604733", "0.60282063", "0.59811556", "0.5976434", "0.5970417", "0.5953635", "0.59523225", "0.59515595", "0.59297425", "0.59207904", "0.5915441", "0.5907399", "0.5890471", "0.58816886", "0.58663154", "0.58631456", "0.5854869", "0.5853955", "0.5829466", "0.58245337", "0.58009475", "0.57999164", "0.5797305", "0.5797305", "0.578909", "0.57847327", "0.578192", "0.577899", "0.5771882", "0.5769639", "0.57622576", "0.57587135", "0.57544106", "0.57519937", "0.57425785", "0.5733444", "0.5726732", "0.57234526", "0.57126236", "0.57126236", "0.5703727", "0.56977004", "0.569562", "0.5688411", "0.5681896", "0.5681289", "0.5680837", "0.56700313", "0.5662954", "0.5661491", "0.5661354", "0.56587833", "0.5658219" ]
0.6088458
41
Return an int representing the number of axes / array dimensions. Returns int Return 1 if Series. Otherwise return 2 if DataFrame.
def ndim(self): return 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def size(self)->int:\n\n return np.prod([axes if is_integer(axes) else len(axes) for axes in self._dim_axes])", "def count_dims(da):\n return len(da.dims)", "def get_num_dimensions(self):\n dimensions = self.data.shape\n return dimensions[1]", "def _get_ndim(self):\n return len(self.level_shapes[0])", "def get_dimension_number(self) -> int:\n return np.squeeze(self._channel_arrays[0]).ndim", "def num_dims(self):\n return self.h5['{}/{}'.format(SETTINGS, N_DIMS_STR)][()]", "def num_dim(self):\n return len(self._dimensions)", "def num_dim(self):\n return len(self._dimensions)", "def dimensionality(self):\n return int(self.nDims)", "def n_dims(self):\n return len(self.dimensions)", "def ndim(self) -> int:\n return self[0].ndim", "def get_ndim(self):\n return self.ndim", "def ndim(self):\n\n self._check_assigned()\n\n if (\n self.lazy\n and self.transformer is not None\n and hasattr(self.transformer, \"get_transformed_shape\")\n ):\n return len(self.transformer.get_transformed_shape(self.values))\n else:\n return self.__array__().ndim", "def ndim(self):\n # type: () -> int\n return len(self.shape)", "def ndim(self):\n return self.__value.ndim", "def getNumDimensions(self):\n return len(self.di.keys())", "def ndim(self):\n return self._ndim", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return self.data.ndim", "def ndim(a):\n if isinstance(a, np.ndarray):\n return a.ndim\n else:\n return K.ndim(a)", "def get_dims(self):\n row_lbl, col_lbl = self.get_idxvals()\n return len(row_lbl), len(col_lbl)", "def getNumberOfAxes(self):\n return self.numAxes", "def ndim(self):\n return len(self.nvars)", "def ndims(x):\n return len(x.get_shape())", "def ndim(self):\n return self.X.ndim", "def getDimension(self):\n dim = len(self.__axis_labels__)\n if dim == 0:\n # Labels weren't set, so how about the data\n dim = self[0].dim()\n return dim", "def dims(x):\n return len(x.shape)", "def __len__(self) -> int:\n\n return self.layout.gaDims", "def ndim(self):\n return len(self._shape)", "def _get_observation_dimension(self):\n return len(self._get_observation_np())", "def size(self):\n\t\treturn self.dims", "def ndim(self) -> int:\n\n return 1 + len(self.shape)", "def num_dimensions(self):\n if self.__num_dimensions__ == 0:\n # Try to get the number of dimensions from the first point or bounding box\n if len(self.points) > 0:\n self.__num_dimensions__ = len(self.points[0].coordinate)\n elif len(self.bounding_boxes) > 0:\n self.__num_dimensions__ = len(self.bounding_boxes[0].start)\n return self.__num_dimensions__", "def dimensionality(self):\n if self.vector.shape is ():\n return 0\n if len(self.vector.shape) is 1:\n return 1\n _, dim = self.vector.shape\n return dim", "def dimension_count(self):\n return self._dimensionCount", "def ndim(self):\n return self._hist.rank()", "def ndim(self):\n return self.initial_value.ndim", "def ndim(self) -> int:\n ndims = [0]\n for mode_ds_val in self.state.values():\n for _, ds_val in mode_ds_val.items():\n for _, values in ds_val.items():\n if values.ndim in (0, 1):\n # A singular value (this should never happen based on implementation of summary)\n ndims.append(1)\n elif values.ndim == 2:\n if values.shape[0] == 1:\n # Metrics with only 1 time point can be displayed as singular values\n if isinstance(values[0][1], ValWithError):\n # ValWithError, however, will always be displayed grapically\n ndims.append(2)\n else:\n ndims.append(1)\n else:\n # A regular time vs metric value plot\n ndims.append(2)\n else:\n # Time vs array value. Not supported yet.\n ndims.append(3)\n return max(ndims)", "def dim(self) -> int:", "def dim(self) -> int:\n return self._n_dim", "def n_dims(self):\n return self.pdm.n_dims", "def ndim(x):\n dims = x.get_shape()._dims\n if dims is not None:\n return len(dims)\n return None", "def ndim(self):\n if self._ndim is None:\n self._ndim = self.get_mesh_dimension()\n\n return self._ndim", "def dim(self):\n return len(self.shape)", "def dim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.point)", "def n_dim(self):\n return self._n_dim", "def dim(self) -> int:\n pass", "def dimension(self):\n return self.__N", "def ndims(self, varname):\n if self.handle == None: return None\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return None\n return len(var.dimensions)", "def ndarray_size(self) -> int:\n pass", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def ndim(self) -> int:\r\n return len(self.plates)", "def dims(self):\n return self[0].dims", "def ndim(self):\n return len(self.edges)", "def dimensions():", "def dim(self):\n return len(self._n)", "def ndim(self):\n futures = self.client.map(_call_ndim, self.vecDask, pure=False)\n ndims = self.client.gather(futures)\n return ndims", "def dimensions(self):\n return len(self.parameter_names)", "def __len__(self) -> int:\n\n return self.shape.shape[1]", "def dim(df: DataFrame) -> DataFrame: \n \n return df.shape", "def n_dims(self):\n return self.model.template_instance.n_dims", "def ndim(self):\n return np.ndim(self.MJD)", "def get_dimension_length(self):\n pass", "def __len__(self):\n # type: () -> int\n return self.shape[0]", "def dim(self) -> int:\n return self.atoms.shape[:-1]", "def dims(self):\n return self.v.dims() # TODO: check (empty? etc)\n #return self.t.shape # TODO: check (empty? etc)\n # TODO: convert to tuple? here / in varset?", "def ndim(tensor):\n raise NotImplementedError", "def __len__(self):\n return self.xyz.shape[0]", "def __len__(self):\n return self.xyz.shape[0]", "def naxes(self, world=True):\n return _coordsys.coordsys_naxes(self, world)", "def _getdim(x):\n \n if np.ndim(x) > 1:\n \n dim = x.shape[-1]\n \n else:\n \n dim = 0\n \n return dim", "def xdim(self):\n return len(self._x)", "def dims(self):\n raise NotImplementedError('Please use Vector2Array or Vector3Array')", "def getDimensions():", "def getDimension(self):\n return len(self.components)", "def dimension(self):\n return np.prod(np.asarray(self.subsystem_dims))", "def dimension(self):\n return len(self.qubit_values)", "def ncol(arr):\n assert isinstance(arr, ndarray)\n assert len(arr.shape) == 2\n return arr.shape[1]", "def size(self):\n\t\t# Better to have this as a method rather than property, as self._dataframe may change\n\t\treturn self._dataframe.shape[0]", "def dimensions(self) -> int:\n return pulumi.get(self, \"dimensions\")", "def width(self) -> int:\n return self._obj[self.x_dim].size", "def __len__(self):\n return self._arr.shape[1]", "def get_in_dim(self) -> int:\n return self.in_dim", "def get_dim(self, name):\n return len(self.root_group.dimensions[name])", "def ydim(self):\n return len(self._y)", "def dim(self):\n raise NotImplementedError", "def size(self):\n return int(misc.intprod(self.shape))", "def array_dimensions(array):\n height = len(array)\n width = len(array[0])\n\n return width, height", "def __len__(self):\n return self.flatten_dim(self.shape[0])", "def col_count(self):\n if isinstance(self.data, pd.DataFrame) is False:\n return None\n else:\n return self.data.shape[1]", "def test_n_dim(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n hist0 = hg.Count()\n\n assert hist0.n_dim == 0\n assert hist1.n_dim == 1\n assert hist2.n_dim == 2\n assert hist3.n_dim == 3", "def dim(self):\n return self.__dim__", "def get_num_channels(x):\n return x.get_shape().as_list()[-1]", "def N(self):\n return self._dimensions" ]
[ "0.788396", "0.7611212", "0.7602772", "0.7502887", "0.74729747", "0.7424244", "0.7378051", "0.7378051", "0.73681766", "0.7364678", "0.7358198", "0.73216796", "0.7290911", "0.7280701", "0.726153", "0.7236144", "0.7176368", "0.71549875", "0.71549875", "0.71549875", "0.71549875", "0.71549875", "0.71549875", "0.7150003", "0.71490216", "0.71419936", "0.71280134", "0.7113884", "0.710205", "0.71008235", "0.7092815", "0.70687014", "0.70482874", "0.7039214", "0.70045674", "0.6970728", "0.69695204", "0.6963025", "0.69548327", "0.69387364", "0.693168", "0.69285125", "0.69283056", "0.6927875", "0.69267035", "0.6915584", "0.69067675", "0.6902503", "0.68828595", "0.68828595", "0.68808794", "0.6874458", "0.68571144", "0.6852767", "0.68331885", "0.6786432", "0.6733382", "0.670752", "0.6692575", "0.6684951", "0.6679728", "0.6677458", "0.66770107", "0.6672964", "0.6659952", "0.6647396", "0.66199386", "0.6564546", "0.65629524", "0.65624285", "0.65620273", "0.6549512", "0.6540235", "0.6527095", "0.6527095", "0.651454", "0.6510746", "0.64903957", "0.64829546", "0.64752245", "0.6451003", "0.64459497", "0.644457", "0.6443739", "0.64349705", "0.64271504", "0.64202935", "0.6411573", "0.6410276", "0.6406981", "0.6389865", "0.6388896", "0.6376851", "0.63767105", "0.63742924", "0.6371665", "0.6367745", "0.6367411", "0.6365258", "0.63580054" ]
0.6780645
56
Implementation of binary operator between DataFrames on different indices. A new DataFrame representing an inmemory DolphinDB table is returned. It is garenteed that both DataFrames have no where_expr.
def _binary_op_on_different_indices(self, other, func, axis): # TODO: add axis check def merge_columns(self_columns, other_columns): """ Align the input columns, filling the missing columns with None -------- Examples -------- >>> merge_columns( ... ["a", "b", "ba", "d", "f"], ... ["e", "c", "d", "g", "ga", "a"] ... ) (('a','a'),('b',None),('ba',None),(None,c),('d','d'),(None,'e'),('f',None),(None,'g'),(None,'ga')) """ sorted_self_columns, sorted_other_columns = sorted(self_columns), sorted(other_columns) self_idx = other_idx = 0 self_len, other_len = len(self_columns), len(other_columns) while self_idx < self_len and other_idx < other_len: curr_self_column, curr_other_column = sorted_self_columns[self_idx], sorted_other_columns[other_idx] if curr_self_column == curr_other_column: yield curr_self_column, curr_other_column self_idx += 1 other_idx += 1 elif curr_self_column < curr_other_column: yield curr_self_column, None self_idx += 1 else: yield None, curr_other_column other_idx += 1 while self_idx < self_len: yield sorted_self_columns[self_idx], None self_idx += 1 while other_idx < other_len: yield None, sorted_other_columns[other_idx] other_idx += 1 assert isinstance(self, _Frame) assert isinstance(other, _Frame) if ((not self._in_memory and len(self._index_columns) == 0) or (not other._in_memory and len(other._index_columns) == 0)): raise ValueError("Frame has no default index if it is not in memory") session = self._session self_var_name, other_var_name = self._var_name, other._var_name if other._is_dataframe_like: self_data_columns = self._data_columns other_data_columns = other._data_columns index_list, from_clause = _generate_joiner( self_var_name, other_var_name, self._index_columns, other._index_columns) if self_data_columns == other_data_columns: select_list = (f"{func}({self_var_name}.{c}, {other_var_name}.{c}) as {c}" for c in self_data_columns) data_columns = self_data_columns else: merged_columns = list(merge_columns(self_data_columns, other_data_columns)) select_list = (f"00f as {s if o is None else o}" if s is None or o is None else f"{func}({self_var_name}.{s}, {other_var_name}.{s}) as {s}" for s, o in merged_columns) data_columns = [s if o is None else o for s, o in merged_columns] select_list = itertools.chain(index_list, select_list) script = sql_select(select_list, from_clause) elif other._is_series_like: self_data_columns = self._data_columns other_data_column = other._data_columns[0] index_list, from_clause = _generate_joiner( self._var_name, other._var_name, self._index_columns, other._index_columns) select_list = (f"{func}({self_var_name}.{c}, {other_var_name}.{other_data_column}) as {c}" for c in self_data_columns) data_columns = self_data_columns select_list = itertools.chain(index_list, select_list) script = sql_select(select_list, from_clause) return self._get_from_script( session, script, data_columns=data_columns, index_map=self._index_map, index=self._index)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def where(self, cond, other, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.where)(\n self, cond=cond, other=other, **kwargs\n )", "def union_all(x: DataFrame, y: DataFrame) -> DataFrame:\n _check_xy(x, y)\n return bind_rows(x, y, __calling_env=CallingEnvs.REGULAR)", "def compare(self, other, align_axis, keep_shape, keep_equal, result_names):\n return DataFrameDefault.register(pandas.DataFrame.compare)(\n self,\n other=other,\n align_axis=align_axis,\n keep_shape=keep_shape,\n keep_equal=keep_equal,\n result_names=result_names,\n )", "def _copy_experimental_conditions_to_second_df(self, df1, df1_cols, df2, df2_cols):\n _cols_ = np.array([df1_cols, df2_cols])\n has_cols = _cols_ != set([])\n exp_cols = _cols_[has_cols]\n if len(exp_cols) == 1: # only one DataFrame has additional columns\n _dfs_ = [df1, df2]\n exp_cols = list(exp_cols[0])\n df_with_cols, df_without_cols = _dfs_[list(has_cols).index(True)], _dfs_[list(has_cols).index(False)]\n exp_cols_only_df = df_with_cols[exp_cols].drop_duplicates()\n num_unique_exp_rows = len(exp_cols_only_df)\n len_df_without_cols = len(df_without_cols)\n\n try:\n expanded_df_without_cols = pd.concat([df_without_cols] * num_unique_exp_rows, ignore_index=True)\n expanded_df_without_cols[exp_cols] = pd.DataFrame(np.repeat(\n exp_cols_only_df.values, len_df_without_cols, axis=0),\n columns=exp_cols)\n return tuple([(expanded_df_without_cols, df_with_cols)[i] for i in _cols_ != set([])]\n + [set(exp_cols), exp_cols_only_df])\n\n except ValueError: # breaks when df_with_out_columns is of len 0.\n return tuple([(pd.DataFrame(columns=list(set(exp_cols)|set(df_without_cols.columns))), df_with_cols)[i]\n for i in _cols_ != set([])] + [set(exp_cols), exp_cols_only_df])\n else:\n return self._combine_experimental_conditions(df1, df1_cols, df2, df2_cols)", "def intersection(self, other, mode: str = \"outer\"):\n # TODO options for which extra fields to keep\n # by default, keep just the fields in 'table'\n if mode == \"trim\":\n # Slower\n chunks = [\n chunk.data\n for _, chunk in self.by_ranges(other, mode=mode, keep_empty=False)\n ]\n return self.as_dataframe(pd.concat(chunks))\n # Faster\n slices = iter_slices(self.data, other.data, mode, False)\n indices = np.concatenate(list(slices))\n return self.as_dataframe(self.data.loc[indices])", "def __or__(self, other):\n tmp = self.rows[:]\n tmp.extend(other.rows[:]) # use copys of lists !\n return Table(tmp)", "def __eq__(self, other: Any) -> ColumnOperators: # type: ignore[override]\n return self.operate(eq, other)", "def __and__(self, other):\n tmp = [ r for r in self.rows if r in other.rows ]\n return Table(tmp)", "def dataframe_diff(xxa,xxb):\n\n xa=pd.DataFrame(xxa)\n xb=pd.DataFrame(xxb)\n merged = xa.merge(xb, indicator=True, how='outer')\n\n diff=merged[merged['_merge'] != 'both']\n\n return diff", "def df_update(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.update, inplace=True)(\n self, other=other, **kwargs\n )", "def combine(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.combine)(\n self, other=other, **kwargs\n )", "def __sub__(self, other: Any) -> ColumnOperators:\n return self.operate(sub, other)", "def _inherit_binary_operation(self, other, op):\n sdata = self.data\n if isinstance(op, basestring) and hasattr(sdata, op):\n bound_op = getattr(sdata, op)\n else:\n def bound_op(odata):\n return op(sdata, odata)\n\n bset = self.bset\n if isinstance(other, type(self)) or isinstance(self, type(other)):\n obset = other.bset\n if not ((bset == obset) or\n bset.shape == () or\n obset.shape == ()):\n raise ValueError(\"instances of {} must be defined over \"\n \"instances of {} that compare equal for \"\n \"binary operations to be defined\"\n .format(self.__class__.__name__,\n bset.__class__.__name__))\n new_data = bound_op(other.data)\n if bset.shape == ():\n bset = obset\n else:\n new_data = bound_op(other)\n\n return type(self)(new_data, bset)", "def mask(self, cond, other, **kwargs): # noqa: PR01\n return DataFrameDefault.register(pandas.DataFrame.mask)(\n self, cond, other, **kwargs\n )", "def outer_join(self, table: Union[str, sa.Table], left_where: Union[str, sa.Column, BinaryExpression], right_where: Union[str, sa.Column] = None, alias: str = None) -> B[B, E]:", "def _(x: DataFrame, y: DataFrame) -> DataFrame:\n _check_xy(x, y)\n indicator = \"__datar_setdiff__\"\n out = pandas.merge(x, y, how=\"left\", indicator=indicator)\n\n from .distinct import distinct\n\n return distinct(\n out[out[indicator] == \"left_only\"]\n .drop(columns=[indicator])\n .reset_index(drop=True),\n __calling_env=CallingEnvs.REGULAR,\n )", "def dataframe_crossjoin(df1, df2, **kwargs):\n df1['_tmpkey'] = 1\n df2['_tmpkey'] = 1\n\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\n\n df1.drop('_tmpkey', axis=1, inplace=True)\n df2.drop('_tmpkey', axis=1, inplace=True)\n\n return res", "def test_arithmetic_operations() -> None:\n\n # one two\n # 0 1\n # 2 3\n # 4 5\n df = pd.DataFrame(np.arange(6).reshape((3, 2)), columns=[\"one\", \"two\"])\n\n series = df.iloc[0] # first row == (0, 1)\n\n assert series.index.values.tolist() == [\"one\", \"two\"]\n assert series.values.tolist() == [0, 1]\n\n # Arithmetic operations between frames and series match the index of the\n # series (column names) on the columns of the frame, broadcasting over the\n # rows by default.\n\n df2 = df.sub(series) # axis=1\n\n # one two\n # 0 0\n # 2 2\n # 4 4\n assert df2.values.flatten().tolist() == [0, 0, 2, 2, 4, 4]\n\n # If you want to match on rows, use axis=0. This will match the index of the\n # series (row indices) on the rows of the frame, broadcasting over the\n # columns by default.\n series = df.loc[:, \"one\"]\n\n df2 = df.sub(series, axis=0)\n # one two\n # 0 1\n # 0 1\n # 0 1\n assert df2.values.flatten().tolist() == [0, 1, 0, 1, 0, 1]", "def join_where(self, table, one, operator, two, type='inner'):\n return self.join(table, one, operator, two, type, True)", "def cross(df1, df2, **kwargs):\r\n df1['_tmpkey'] = 1\r\n df2['_tmpkey'] = 1\r\n\r\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\r\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\r\n\r\n df1.drop('_tmpkey', axis=1, inplace=True)\r\n df2.drop('_tmpkey', axis=1, inplace=True)\r\n\r\n return res", "def join(self, right, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.join)(self, right, **kwargs)", "def cross(df1, df2, **kwargs):\r\n df1['_tmpkey'] = 1\r\n df2['_tmpkey'] = 1\r\n\r\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\r\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\r\n\r\n df1.drop('_tmpkey', axis=1, inplace=True)\r\n df2.drop('_tmpkey', axis=1, inplace=True)\r\n return res", "def run_and_compare(\n fn,\n data,\n data2=None,\n force_lazy=True,\n force_hdk_execute=False,\n force_arrow_execute=False,\n allow_subqueries=False,\n comparator=df_equals,\n **kwargs,\n):\n\n def run_modin(\n fn,\n data,\n data2,\n force_lazy,\n force_hdk_execute,\n force_arrow_execute,\n allow_subqueries,\n constructor_kwargs,\n **kwargs,\n ):\n kwargs[\"df1\"] = pd.DataFrame(data, **constructor_kwargs)\n kwargs[\"df2\"] = pd.DataFrame(data2, **constructor_kwargs)\n kwargs[\"df\"] = kwargs[\"df1\"]\n\n if force_hdk_execute:\n set_execution_mode(kwargs[\"df1\"], \"hdk\")\n set_execution_mode(kwargs[\"df2\"], \"hdk\")\n elif force_arrow_execute:\n set_execution_mode(kwargs[\"df1\"], \"arrow\")\n set_execution_mode(kwargs[\"df2\"], \"arrow\")\n elif force_lazy:\n set_execution_mode(kwargs[\"df1\"], \"lazy\")\n set_execution_mode(kwargs[\"df2\"], \"lazy\")\n\n exp_res = fn(lib=pd, **kwargs)\n\n if force_hdk_execute:\n set_execution_mode(exp_res, \"hdk\", allow_subqueries)\n elif force_arrow_execute:\n set_execution_mode(exp_res, \"arrow\", allow_subqueries)\n elif force_lazy:\n set_execution_mode(exp_res, None, allow_subqueries)\n\n return exp_res\n\n constructor_kwargs = kwargs.pop(\"constructor_kwargs\", {})\n try:\n kwargs[\"df1\"] = pandas.DataFrame(data, **constructor_kwargs)\n kwargs[\"df2\"] = pandas.DataFrame(data2, **constructor_kwargs)\n kwargs[\"df\"] = kwargs[\"df1\"]\n ref_res = fn(lib=pandas, **kwargs)\n except Exception as err:\n with pytest.raises(type(err)):\n exp_res = run_modin(\n fn=fn,\n data=data,\n data2=data2,\n force_lazy=force_lazy,\n force_hdk_execute=force_hdk_execute,\n force_arrow_execute=force_arrow_execute,\n allow_subqueries=allow_subqueries,\n constructor_kwargs=constructor_kwargs,\n **kwargs,\n )\n _ = exp_res.index\n else:\n exp_res = run_modin(\n fn=fn,\n data=data,\n data2=data2,\n force_lazy=force_lazy,\n force_hdk_execute=force_hdk_execute,\n force_arrow_execute=force_arrow_execute,\n allow_subqueries=allow_subqueries,\n constructor_kwargs=constructor_kwargs,\n **kwargs,\n )\n comparator(ref_res, exp_res)", "def filter_input(input_df, target_df):\n # input_df = input_df.reindex(target_df.index, copy=False)\n data_df = pd.concat((input_df, target_df), join=\"inner\", copy=False, axis=1)\n return data_df", "def filter_dataframes(dfs, xs, ys, table_ys, args_list, valid_keys):\n # Descs: descriptions\n # ys_dict == {string (y): List(Serial Data)}\n xs_dict = {x: [] for x in xs}\n ys_dict = {y: [] for y in ys}\n tables = collections.OrderedDict(\n [(key, []) for key in ['index'] + valid_keys + list(table_ys.keys())])\n for i, args in enumerate(args_list):\n # get df from a result\n tmp = dfs\n for key, val in args.items():\n if val is None:\n tmp = tmp[tmp[key].isnull()]\n else:\n tmp = tmp[tmp[key] == val]\n\n for x in xs:\n xs_dict[x].append(tmp[x].values.tolist())\n for y in ys:\n ys_dict[y].append(tmp[y].values.tolist())\n\n for table_y, value_type in table_ys.items():\n if value_type == 'min':\n tables[table_y].append(tmp[table_y].min())\n elif value_type == 'max':\n tables[table_y].append(tmp[table_y].max())\n else:\n raise ValueError\n for key in valid_keys:\n if key in args:\n tables[key].append(args[key])\n else:\n tables[key].append(None)\n\n tables['index'] = list(range(len(args_list)))\n return xs_dict, ys_dict, tables", "def create_Xy_df(X_df, y_df, on_cols):\n return pd.merge(X_df, y_df, how='inner', on=on_cols)", "def return_subtraction_df(\n df_1: pd.DataFrame,\n df_2: pd.DataFrame,\n index_col=\"yearmon\"\n) -> pd.DataFrame:\n df_1 = df_1.set_index(index_col).copy()\n df_2 = df_2.set_index(index_col).copy()\n\n overlapping_index_values = sorted(list(set(df_1.index.intersection(df_2.index))))\n num_cols = df_1.select_dtypes(include=np.number).columns.to_list()\n\n df_1_num_values = df_1.loc[overlapping_index_values, num_cols].to_numpy()\n df_2_num_values = df_2.loc[overlapping_index_values, num_cols].to_numpy()\n df_diff_values = df_1_num_values - df_2_num_values\n df_diff = pd.DataFrame(\n df_diff_values,\n columns=num_cols,\n index=sorted(overlapping_index_values)\n )\n return df_diff", "def sjoin(left_df, right_df, how=..., op=..., lsuffix=..., rsuffix=...):\n ...", "def df_equal(left: pd.DataFrame, right: pd.DataFrame, **kwargs) -> bool:\n pd.testing.assert_frame_equal(left, right, **kwargs)\n return True", "def __ge__(self, other: Any) -> ColumnOperators:\n return self.operate(ge, other)", "def left_join_where(self, table, one, operator, two):\n return self.join_where(table, one, operator, two, 'left')", "def match(self, other: Any, **kwargs: Any) -> ColumnOperators:\n return self.operate(match_op, other, **kwargs)", "def __le__(self, other: Any) -> ColumnOperators:\n return self.operate(le, other)", "def __mod__(self, other: Any) -> ColumnOperators:\n return self.operate(mod, other)", "def test_create_from_dataframe(self):\n self.insert()\n data = self.tbl.select()\n data.index.name = None\n tbl = Table.create(':memory:', \"Foo_2\", data, verbose=True,\n primary_key='id', autoincrement=True)\n self.check(self.idata, tbl.select())", "def merge(self, right, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.merge)(\n self, right=right, **kwargs\n )", "def compute_diffrowset(self, table1, table2):\n\n logging.info(f\"\"\"compute_diffrowset {table1.tableName}\"\"\")\n\n def build_where(table, result_row):\n i = 0\n where_clause = \"\"\n rowslist = table.get_pk()\n rows = rowslist.split(',')\n pk_idxlist = table.get_pk_idx()\n pk_idx = pk_idxlist.split(',')\n for result_col in rows:\n idx = int(pk_idx[i])\n rst_row = result_row[idx - 1]\n i = i + 1\n # remove\n # if rst_row == \"3800497307669\":\n # logging.info(f\"\"\"cdebug\"\"\")\n if rst_row is not None:\n columnname = result_col\n value_utf8 = \"\"\n field_type = table.get_field_datatype(\n table.viewName,columnname)\n if (\"char\".upper() in field_type.upper()) or (\n \"text\".upper() in field_type.upper()):\n if type(rst_row) is int:\n value_from_db = str(rst_row)\n else:\n value_from_db = rst_row\n try:\n value_utf8 = value_from_db.encode('utf-8')\n except Exception as e:\n error, = e.args\n logging.error(\n f\"\"\"error executing encode for {value_from_db} : {error}\"\"\")\n\n quote = \"'\"\n where_clause = where_clause + ' AND ' + columnname + \\\n ' = ' + quote + \\\n value_utf8.decode(\n 'utf-8').replace(\"'\",\"''\") + quote\n elif ((\"timestamp\".upper() in field_type.upper()) or (\"date\".upper() in field_type.upper())):\n st1 = \"to_date('\"\n st2 = \"','YYYY-MM-DD HH24:MI:SS')\"\n where_clause = where_clause + ' AND ' + columnname + \\\n ' = ' + st1 + str(rst_row) + st2\n# elif (\"date\".upper() in field_type.upper()):\n# st1 = \"to_date('\"\n# st2 = \"','YYYY-MM-DD HH24:MI:SS')\"\n# if (\".\" in str(rst_row)):\n# st1 = \"to_timestamp('\"\n# st2 = \"','YYYY-MM-DD HH24:MI:SS.FF')\"\n# where_clause = where_clause + ' AND ' + columnname + \\\n# ' = ' + st1 + str(rst_row) + st2\n else:\n where_clause = where_clause + ' AND ' + \\\n columnname + ' = ' + str(rst_row)\n return where_clause\n\n def format_result(result):\n \"\"\"[summary]\n will seek all differences between 2 datasets, and then will query\n server1 and server2, to check if rows does not exists or is really\n different.\n\n When dataset are <> it does not means that the missing row is not\n in an other partition.\n for example, we used to have this problem : \"when there is a text\n field in then primary key, the order by clause with uppercase and\n lowercase in oracle and pgsql are different. Lowercase is first in\n pgsql, but in oracle uppercase comes first.\"\n This problem was solved by Pierre Caron using the order by collate\n \"c\" on character fields.\n\n Oracle :\n select * from foo order by ch;\n CH\n ----------\n 1\n A\n a\n\n PostgreSQL :\n select * from foo order by ch;\n ch\n ----\n 1\n a\n A\n\n select * from foo order by ch collate \"C\";\n ch\n ----\n 1\n A\n a\n\n We keep this control, even if it is not needed, we should face some\n new differences between the 2 db engines.\n\n an other use case :\n\n oracle pgsql\n table1 (c1,c2,c3) table1(c1,c2,c3)\n c1 c2 c3 c1 c2 c3\n line1 1 1 1 1 1 1\n line2 2 2 2 3 3 3\n line3 3 3 3 4 4 4\n line4 4 4 4 6 6 6\n\n partition1 = line1 and line2, we compare oracle and pgsql, we can\n see <> (333 is missing in oracle, but it exists in the 2nd\n partition\n (line3). So format_result see that 333 is missing in partition1 of\n oracle, but it must check that it does not exists in an other\n partition (partition2 line 3). As it exists then nb_error = 0\n\n\n Returns:\n [int] -- number of errors\n \"\"\"\n nbdiff = 0\n errloc = 0\n i = 0\n total_diff = len(result)\n for result_row in result:\n i = i + 1\n logging.info(f\"\"\"search row {i}/{len(result)}\"\"\")\n if nbdiff >= int(maxdiff):\n logging.warning(\n f\"\"\"line {id}:reach max diff {maxdiff} for {table1.schema}.{table1.tableName} total diff:{total_diff}\"\"\")\n errloc = nbdiff\n self.total_nbdiff = nbdiff\n break\n list_fields = ''\n fields1 = table1.concatened_fields\n fields2 = table2.concatened_fields\n qry1_fields = f\"\"\"select {fields1} from\n {table1.schema}.{table1.viewName}\n where\n 1 = 1 \"\"\"\n qry2_fields = f\"\"\"select {fields2} from\n {table2.schema}.{table2.viewName}\n where\n 1 = 1 \"\"\"\n\n qry1 = qry1_fields + build_where(table1,result_row)\n qry2 = qry2_fields + build_where(table2,result_row)\n # list_fields = '|'\n # list_fields = list_fields.join(result_row)\n\n for result_col in result_row:\n test = \"{}\"\n list_fields = list_fields + '|' + test.format(result_col)\n # if type(result_col) is str:\n # list_fields = ''.join(list_fields,'|',result_col)\n # else:\n # list_fields = ''.join(list_fields,'|',result_col)\n\n # .encode\n # ('utf-8').strip()\n # list_fields = list_fields + '|' + result_col.encode('utf-8')\n\n list_fields = list_fields.lstrip('|')\n quotedsql = qry1.replace(\"'\",\"''\")\n\n qry_thread_1 = ExecQry(\n table1.getengine() + '_dtsDiff',table1,qry1)\n qry_thread_2 = ExecQry(\n table2.getengine() + '_dtsDiff',table2,qry2)\n\n \"\"\"\n start the threads on server1 and server2\n \"\"\"\n qry_thread_1.start()\n qry_thread_2.start()\n\n \"\"\"\n wait for the 2 thread to terminate\n \"\"\"\n try:\n \"\"\"\n wait for the qry being executed\n \"\"\"\n row1detail = qry_thread_1.join()\n except Exception as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing thread:\n {error.code}\"\"\")\n try:\n \"\"\"\n wait for the qry being executed\n \"\"\"\n row2detail = qry_thread_2.join()\n except Exception as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing thread:\n {error.code}\"\"\")\n\n if (row1detail is not None) and (len(row1detail) != 0):\n nbrows1 = 1\n fieldst1 = row1detail[0][0]\n fieldst1 = fieldst1.replace(\"\\x00\",\"\")\n fieldst1 = fieldst1.replace(\"\\r\\n\",\"\\n\")\n fieldst1 = fieldst1.replace(\" \\n\",\"\\n\")\n fieldst1 = fieldst1.replace(\"\\t\",\"\")\n if fieldst1 != row1detail[0][0]:\n self.set_comments(id,'x00 or other found')\n else:\n nbrows1 = 0\n\n if (row2detail is not None) and len(row2detail) != 0:\n nbrows2 = 1\n fieldst2 = row2detail[0][0]\n fieldst2 = fieldst2.replace(\"\\x00\",\"\")\n fieldst2 = fieldst2.replace(\"\\r\\n\",\"\\n\")\n fieldst2 = fieldst2.replace(\"\\t\",\"\")\n fieldst2 = fieldst2.replace(\" \\n\",\"\\n\")\n if fieldst2 != row2detail[0][0]:\n self.set_comments(id,'x00 or other found')\n else:\n nbrows2 = 0\n\n desc = \"ok\"\n if nbrows1 == 1 and nbrows2 == 1:\n if fieldst1 != fieldst2:\n desc = f\"\"\"( <> in server1 {table1.getengine()} and server2 {table2.getengine()})\n server1 {fieldst1}\n server2 {fieldst2}\"\"\"\n # logging.info(f\"\"\"delta in {table1.tableName}:\\n {desc}\n # \"\"\")\n elif nbrows1 == 1 and nbrows2 == 0:\n desc = f\"\"\"(+ in server1 {table1.getengine()}) {fieldst1} ; (- in server2 {table2.getengine()}) \"\"\"\n elif nbrows1 == 0 and nbrows2 == 1:\n desc = f\"\"\"(- in server1 {table1.getengine()}) ; (+ in server2 {table2.getengine()}) {fieldst2}\"\"\"\n\n quoteddesc = desc.replace(\"'\",\"''\")\n quotedlist_fields = list_fields.replace(\"'\",\"''\")\n quotedtableName = table1.tableName.replace(\"'\",\"''\")\n sql = f\"\"\"insert into {schemaRepo}.rowdiff (idtable,table_name,\n comments,fields,qry) select '{id}','{quotedtableName}','\n {quoteddesc}','{quotedlist_fields}','{quotedsql}'\n where not exists\n (select 1 from {schemaRepo}.rowdiff where (idtable,lower\n (table_name),\n comments,qry) =\n ({id},lower('{quotedtableName}'),'{quoteddesc}','{quotedsql}'))\"\"\"\n\n if desc != 'ok':\n errloc = errloc + 1\n nbdiff = nbdiff + 1\n logging.info(\n f\"\"\"diffrowset nok {table1.tableName} for id = {id}\"\"\")\n logging.error(f\"\"\"{desc}\"\"\")\n conn = self.connect(cxRepo)\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except Exception as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {sql}:\n {error.code}\"\"\")\n return errloc\n\n \"\"\"\n list all rows with step = 2 (or step = 1 without step = 2) and result\n = 'nok' and status = 'done'\n \"\"\"\n quotedtableName = table1.tableName.replace(\"'\",\"''\")\n sql = f\"\"\"select id,start,stop from {self.schemaRepo}.tablediff where (step = 2\n or (step = 1 and not exists (select 1 from {self.schemaRepo}.tablediff\n where\n step = 2\n and lower(table_name) = lower('{quotedtableName}')\n and lower(schema1) = lower('{table1.schema}'))))\n and result = 'nok'\n and server1_status = 'done'\n and server2_status = 'done'\n and lower(table_name) = lower('{quotedtableName}')\n and lower(schema1) = lower('{table1.schema}')\n order by id\"\"\"\n\n \"\"\"\n for each of them, execute qry on separate thread to retreive datasets\n \"\"\"\n nberr = 1\n logging.debug(f\"\"\"qry compute_diffrowset : {sql}\"\"\")\n conn = self.connect(self.cxRepo)\n with conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n rows = curs.fetchall()\n if rows is not None:\n for row in rows:\n if self.total_nbdiff != 0:\n break\n id = row[0]\n start = row[1]\n stop = row[2]\n logging.debug(\n f\"\"\"search diff in {table1.tableName} range : {start} -> {stop} retreiving dataset from server1 & 2...\"\"\")\n qry1 = table1.format_qry_last(start,stop)\n qry2 = table2.format_qry_last(start,stop)\n\n qry_thread_1 = ExecQry(\n table1.getengine() + '_dtsFetch',table1,qry1)\n qry_thread_2 = ExecQry(\n table2.getengine() + '_dtsFetch',table2,qry2)\n\n self.set_status(id,'running',1)\n self.set_status(id,'running',2)\n\n qry_thread_1.start()\n qry_thread_2.start()\n\n try:\n \"\"\"\n wait for the qry being executed\n \"\"\"\n rows1 = qry_thread_1.join()\n except Exception as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing thread:\n {error.code}\"\"\")\n try:\n \"\"\"\n wait for the qry being executed\n \"\"\"\n rows2 = qry_thread_2.join()\n except Exception as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing thread:\n {error.code}\"\"\")\n\n \"\"\"\"\n 2 datasets are ready to be compared\n \"\"\"\n if rows1 is not None:\n rowsets1 = set(rows1)\n if rows2 is not None:\n rowsets2 = set(rows2)\n result_a_b = rowsets1 - rowsets2\n result_b_a = rowsets2 - rowsets1\n nberr = 0\n nberr2 = 0\n nberr1 = 0\n if len(result_a_b) > 0:\n \"\"\"\n there are some <> inside\n \"\"\"\n logging.info(\n f\"\"\"find {len(result_a_b)} in {table1.getengine()}/{len(rowsets1)}\"\"\")\n nberr1 = format_result(result_a_b)\n if nberr1 < int(maxdiff) and len(result_b_a) > 0:\n \"\"\"\n there are some <> inside\n \"\"\"\n logging.info(\n f\"\"\"find {len(result_b_a)} in {table2.getengine()}/{len(rowsets2)}\"\"\")\n nberr2 = format_result(result_b_a)\n nberr = nberr1 + nberr2\n if nberr == 0:\n self.set_result(id,'ok')\n else:\n self.set_result(id,'nok')\n else:\n self.set_result(id,'nok')\n\n self.set_status(id,'done',1)\n self.set_status(id,'done',2)\n return nberr", "def _(x: DataFrame, y: DataFrame) -> DataFrame:\n _check_xy(x, y)\n from .distinct import distinct\n\n return distinct(\n pandas.merge(x, y, how=\"outer\"), __calling_env=CallingEnvs.REGULAR\n )", "def sdc_join_series_indexes_overload(left, right):\n\n # check that both operands are of types used for representing Pandas indexes\n if not (isinstance(left, sdc_pandas_index_types) and isinstance(right, sdc_pandas_index_types)\n and not isinstance(left, types.NoneType)\n and not isinstance(right, types.NoneType)):\n return None\n\n convert_left = isinstance(left, (RangeIndexType, Int64IndexType))\n convert_right = isinstance(right, (RangeIndexType, Int64IndexType))\n\n def _convert_to_arrays_impl(left, right):\n _left = left.values if convert_left == True else left # noqa\n _right = right.values if convert_right == True else right # noqa\n return sdc_join_series_indexes(_left, _right)\n\n if isinstance(left, RangeIndexType) and isinstance(right, RangeIndexType):\n\n def sdc_join_range_indexes_impl(left, right):\n if (left is right or numpy_like.array_equal(left, right)):\n joined = left.values\n lidx = numpy.arange(len(joined))\n ridx = lidx\n return joined, lidx, ridx\n else:\n return sdc_join_series_indexes(left.values, right.values)\n\n return sdc_join_range_indexes_impl\n\n elif (isinstance(left, (RangeIndexType, Int64IndexType, types.Array))\n and isinstance(right, (RangeIndexType, Int64IndexType, types.Array))\n and not (isinstance(left, types.Array) and isinstance(right, types.Array))):\n return _convert_to_arrays_impl\n\n # TODO: remove code duplication below and merge numeric and StringArray impls into one\n # needs equivalents of numpy.arsort and _hpat_ensure_array_capacity for StringArrays\n elif isinstance(left, types.Array) and isinstance(right, types.Array):\n\n numba_common_dtype = find_common_dtype_from_numpy_dtypes([left.dtype, right.dtype], [])\n if isinstance(numba_common_dtype, types.Number):\n\n def sdc_join_series_indexes_impl(left, right):\n\n # allocate result arrays\n lsize = len(left)\n rsize = len(right)\n est_total_size = int(1.1 * (lsize + rsize))\n\n lidx = numpy.empty(est_total_size, numpy.int64)\n ridx = numpy.empty(est_total_size, numpy.int64)\n joined = numpy.empty(est_total_size, numba_common_dtype)\n\n left_nan = []\n right_nan = []\n for i in range(lsize):\n if numpy.isnan(left[i]):\n left_nan.append(i)\n for i in range(rsize):\n if numpy.isnan(right[i]):\n right_nan.append(i)\n\n # sort arrays saving the old positions\n sorted_left = numpy_like.argsort(left, kind='mergesort')\n sorted_right = numpy_like.argsort(right, kind='mergesort')\n # put the position of the nans in an increasing sequence\n sorted_left[lsize-len(left_nan):] = left_nan\n sorted_right[rsize-len(right_nan):] = right_nan\n\n i, j, k = 0, 0, 0\n while (i < lsize and j < rsize):\n joined = _hpat_ensure_array_capacity(k + 1, joined)\n lidx = _hpat_ensure_array_capacity(k + 1, lidx)\n ridx = _hpat_ensure_array_capacity(k + 1, ridx)\n\n left_index = left[sorted_left[i]]\n right_index = right[sorted_right[j]]\n\n if (left_index < right_index) or numpy.isnan(right_index):\n joined[k] = left_index\n lidx[k] = sorted_left[i]\n ridx[k] = -1\n i += 1\n k += 1\n elif (left_index > right_index) or numpy.isnan(left_index):\n joined[k] = right_index\n lidx[k] = -1\n ridx[k] = sorted_right[j]\n j += 1\n k += 1\n else:\n # find ends of sequences of equal index values in left and right\n ni, nj = i, j\n while (ni < lsize and left[sorted_left[ni]] == left_index):\n ni += 1\n while (nj < rsize and right[sorted_right[nj]] == right_index):\n nj += 1\n\n # join the blocks found into results\n for s in numpy.arange(i, ni, 1):\n block_size = nj - j\n to_joined = numpy.repeat(left_index, block_size)\n to_lidx = numpy.repeat(sorted_left[s], block_size)\n to_ridx = numpy.array([sorted_right[k] for k in numpy.arange(j, nj, 1)], numpy.int64)\n\n joined = _hpat_ensure_array_capacity(k + block_size, joined)\n lidx = _hpat_ensure_array_capacity(k + block_size, lidx)\n ridx = _hpat_ensure_array_capacity(k + block_size, ridx)\n\n joined[k:k + block_size] = to_joined\n lidx[k:k + block_size] = to_lidx\n ridx[k:k + block_size] = to_ridx\n k += block_size\n i = ni\n j = nj\n\n # fill the end of joined with remaining part of left or right\n if i < lsize:\n block_size = lsize - i\n joined = _hpat_ensure_array_capacity(k + block_size, joined)\n lidx = _hpat_ensure_array_capacity(k + block_size, lidx)\n ridx = _hpat_ensure_array_capacity(k + block_size, ridx)\n ridx[k: k + block_size] = numpy.repeat(-1, block_size)\n while i < lsize:\n joined[k] = left[sorted_left[i]]\n lidx[k] = sorted_left[i]\n i += 1\n k += 1\n\n elif j < rsize:\n block_size = rsize - j\n joined = _hpat_ensure_array_capacity(k + block_size, joined)\n lidx = _hpat_ensure_array_capacity(k + block_size, lidx)\n ridx = _hpat_ensure_array_capacity(k + block_size, ridx)\n lidx[k: k + block_size] = numpy.repeat(-1, block_size)\n while j < rsize:\n joined[k] = right[sorted_right[j]]\n ridx[k] = sorted_right[j]\n j += 1\n k += 1\n\n return joined[:k], lidx[:k], ridx[:k]\n\n return sdc_join_series_indexes_impl\n\n else:\n return None\n\n elif (left == string_array_type and right == string_array_type):\n\n def sdc_join_series_indexes_impl(left, right):\n\n # allocate result arrays\n lsize = len(left)\n rsize = len(right)\n est_total_size = int(1.1 * (lsize + rsize))\n\n lidx = numpy.empty(est_total_size, numpy.int64)\n ridx = numpy.empty(est_total_size, numpy.int64)\n\n # use Series.sort_values since argsort for StringArrays not implemented\n original_left_series = pandas.Series(left)\n original_right_series = pandas.Series(right)\n\n # sort arrays saving the old positions\n left_series = original_left_series.sort_values(kind='mergesort')\n right_series = original_right_series.sort_values(kind='mergesort')\n sorted_left = left_series._index\n sorted_right = right_series._index\n\n i, j, k = 0, 0, 0\n while (i < lsize and j < rsize):\n lidx = _hpat_ensure_array_capacity(k + 1, lidx)\n ridx = _hpat_ensure_array_capacity(k + 1, ridx)\n\n left_index = left[sorted_left[i]]\n right_index = right[sorted_right[j]]\n\n if (left_index < right_index):\n lidx[k] = sorted_left[i]\n ridx[k] = -1\n i += 1\n k += 1\n elif (left_index > right_index):\n lidx[k] = -1\n ridx[k] = sorted_right[j]\n j += 1\n k += 1\n else:\n # find ends of sequences of equal index values in left and right\n ni, nj = i, j\n while (ni < lsize and left[sorted_left[ni]] == left_index):\n ni += 1\n while (nj < rsize and right[sorted_right[nj]] == right_index):\n nj += 1\n\n # join the blocks found into results\n for s in numpy.arange(i, ni, 1):\n block_size = nj - j\n to_lidx = numpy.repeat(sorted_left[s], block_size)\n to_ridx = numpy.array([sorted_right[k] for k in numpy.arange(j, nj, 1)], numpy.int64)\n\n lidx = _hpat_ensure_array_capacity(k + block_size, lidx)\n ridx = _hpat_ensure_array_capacity(k + block_size, ridx)\n\n lidx[k:k + block_size] = to_lidx\n ridx[k:k + block_size] = to_ridx\n k += block_size\n i = ni\n j = nj\n\n # fill the end of joined with remaining part of left or right\n if i < lsize:\n block_size = lsize - i\n lidx = _hpat_ensure_array_capacity(k + block_size, lidx)\n ridx = _hpat_ensure_array_capacity(k + block_size, ridx)\n ridx[k: k + block_size] = numpy.repeat(-1, block_size)\n while i < lsize:\n lidx[k] = sorted_left[i]\n i += 1\n k += 1\n\n elif j < rsize:\n block_size = rsize - j\n lidx = _hpat_ensure_array_capacity(k + block_size, lidx)\n ridx = _hpat_ensure_array_capacity(k + block_size, ridx)\n lidx[k: k + block_size] = numpy.repeat(-1, block_size)\n while j < rsize:\n ridx[k] = sorted_right[j]\n j += 1\n k += 1\n\n # count total number of characters and allocate joined array\n total_joined_size = k\n num_chars_in_joined = 0\n for i in numpy.arange(total_joined_size):\n if lidx[i] != -1:\n num_chars_in_joined += len(left[lidx[i]])\n elif ridx[i] != -1:\n num_chars_in_joined += len(right[ridx[i]])\n\n joined = pre_alloc_string_array(total_joined_size, num_chars_in_joined)\n\n # iterate over joined and fill it with indexes using lidx and ridx indexers\n for i in numpy.arange(total_joined_size):\n if lidx[i] != -1:\n joined[i] = left[lidx[i]]\n if (str_arr_is_na(left, lidx[i])):\n str_arr_set_na(joined, i)\n elif ridx[i] != -1:\n joined[i] = right[ridx[i]]\n if (str_arr_is_na(right, ridx[i])):\n str_arr_set_na(joined, i)\n else:\n str_arr_set_na(joined, i)\n\n return joined, lidx, ridx\n\n return sdc_join_series_indexes_impl\n\n return None", "def any(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.any)(self, **kwargs)", "def __call__(self, a, b):\n return _table.DSTable___call__(self, a, b)", "def __rtruediv__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(truediv, other)", "def test_join_on_eq_with_pos_dt_outside_window(self):\n dt = 8\n I, J = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, \"pos_dt\")\n self.assertEqual(0, I.size)\n self.assertEqual(0, J.size)\n\n dt = np.int64(8)\n I, J = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, \"pos_dt\")\n self.assertEqual(0, I.size)\n self.assertEqual(0, J.size)\n\n I, J = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, \"pos_dt\", int(0))\n self.assertEqual(0, I.size)\n self.assertEqual(0, J.size)", "def _(x: DataFrame, y: DataFrame) -> DataFrame:\n _check_xy(x, y)\n from .distinct import distinct\n\n return distinct(\n pandas.merge(x, y, how=\"inner\"), __calling_env=CallingEnvs.REGULAR\n )", "def equal_operator(ds1, ds2):\n ds3 = ds1 == ds2\n ds3.tolist()\n return ds3", "def __contains(self, other):\n return _VirtualBooleanColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"contains\",\n operand1=self,\n operand2=other\n )", "def __rsub__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(sub, other)", "def _link_index(self, df_a, df_b):\n\n # Select records with names starting with a 'letter'.\n a_startswith_w = df_a[df_a['given_name'].str.startswith(self.letter) == True]\n b_startswith_w = df_b[df_b['given_name'].str.startswith(self.letter) == True]\n\n # Make a product of the two numpy arrays\n return pandas.MultiIndex.from_product(\n [a_startswith_w.index.values, b_startswith_w.index.values],\n names=[df_a.index.name, df_b.index.name]\n )", "def compare_with_old_data_query(self):\n raise NotImplementedError", "def test_roundtrip_from_dataframe1(self):\n import pandas as pd\n df = pd.DataFrame(data={\n 'a': np.arange(3),\n 'b': np.arange(3)[::-1]\n })\n assert_array_equal(df, carray(df, dtype=None))", "def diff(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.diff)(self, **kwargs)", "def outer_join(file_one, file_two, attribute_one, attribute_two, res):\r\n # Indexes of attributes\r\n index_one = join_index(file_one, attribute_one)\r\n index_two = join_index(file_two, attribute_two)\r\n # Constructing logical statement\r\n logical_statement = res[2].split()[2]\r\n # Reading tables into temporary lists\r\n with open(file_one + \".txt\") as f:\r\n table_one = f.read().splitlines()\r\n with open(file_two + \".txt\") as r:\r\n table_two = r.read().splitlines()\r\n compiled = [str(table_one[0]) + \" \" + str(table_two[0])]\r\n table_one.pop(0)\r\n table_two.pop(0)\r\n # Check values of tables with indexes\r\n for i in range(len(table_one)):\r\n found = False\r\n for j in range(len(table_two)):\r\n # logical evaluation statement\r\n var = table_one[i].split()[index_one] + \" \" + res[2].split()[2] + \" \" + table_two[j].split()[index_two]\r\n if res[2].split()[2] == '=':\r\n if table_one[i].split()[index_one] == table_two[j].split()[index_two]:\r\n compiled.append(str(table_one[i]) + \" \" + str(table_two[j]))\r\n found = True\r\n # All other string logical expressions can be evaluated with eval()\r\n else:\r\n if eval(var):\r\n found = True\r\n compiled.append(str(table_one[i]) + \" \" + str(table_two[j]))\r\n if not found:\r\n # append the length of table 2 with NULL\r\n empty = [\"NULL\" for i in range(len(table_two[j].split()))]\r\n compiled.append(table_one[i] + \" \" + \" \".join(empty))\r\n print_data(compiled)", "def standart_filter(left_df, right_df):\n return left_df.join(right_df, left_df.tconst == right_df.tconst) \\\n .drop(right_df.tconst) \\\n .filter((left_df.titleType == 'movie')\n & (right_df.numVotes >= 100000))", "def in_(self, other: Any) -> ColumnOperators:\n return self.operate(in_op, other)", "def cross_join(left, right, suffixes=(\"_left\", \"_right\")):\n left[\"_TMP_KEY\"] = 1\n right[\"_TMP_KEY\"] = 1\n res = pd.merge(left, right, on=\"_TMP_KEY\", suffixes=suffixes).drop(\"_TMP_KEY\", axis=1)\n left.drop(\"_TMP_KEY\", axis=1, inplace=True)\n right.drop(\"_TMP_KEY\", axis=1, inplace=True)\n return res", "def df_merge(left, right, on=[\"lat\", \"lon\", \"start_date\"], how=\"outer\"):\r\n if left is None:\r\n return right\r\n else:\r\n return pd.merge(left, right, on=on, how=how)", "def test_join_on_eq_with_abs_dt_outside_window(self):\n dt = 8\n I, J = ak.join_on_eq_with_dt(self.a1, self.a1, self.t1, self.t1 * 10, dt, \"abs_dt\")\n self.assertEqual(0, I.size)\n self.assertEqual(0, J.size)\n\n I, J = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, \"abs_dt\")\n self.assertEqual(0, I.size)\n self.assertEqual(0, J.size)", "def _adjust_indices(left_df, right_df):\n index_diff = left_df.shape[0] - right_df.shape[0]\n if index_diff > 0:\n # right_df is shorter\n empty_df = pd.DataFrame(\n np.full((np.abs(index_diff), right_df.shape[1]), np.nan),\n columns=right_df.columns,\n )\n right_df = pd.concat((empty_df, right_df), axis=0).reset_index(drop=True)\n elif index_diff < 0:\n # left_df is shorter\n empty_df = pd.DataFrame(\n np.full((np.abs(index_diff), left_df.shape[1]), np.nan),\n columns=left_df.columns,\n )\n left_df = pd.concat((empty_df, left_df), axis=0).reset_index(drop=True)\n\n return left_df, right_df", "def right_join_where(self, table, one, operator, two):\n return self.join_where(table, one, operator, two, 'right')", "def selection(self, clause):\n result = DBTable()\n result.columnNames = self.columnNames\n if clause.operator == '=':\n for rec in self.records:\n if rec[clause.operand1] == clause.operand2:\n result.records.append(rec)\n return result", "def _combine_experimental_conditions(df1, df1_cols, df2, df2_cols):\n if df1_cols == df2_cols:\n exp_cols = list(df1_cols)\n df1_exp_idx = df1[exp_cols].drop_duplicates()\n df2_exp_idx = df2[exp_cols].drop_duplicates()\n combined_exp_idx = pd.concat([df1_exp_idx, df2_exp_idx], ignore_index=True).drop_duplicates()\n return df1, df2, set(exp_cols), combined_exp_idx\n else:\n raise AttributeError(\"Means and Covariances use the same columns to index experiments\")", "def eval(self, expr, **kwargs):\n return DataFrameDefault.register(pandas.DataFrame.eval)(\n self, expr=expr, **kwargs\n )", "def merge (*a_data) :\n i = 0\n for loc_data in a_data :\n i += 1\n if i == 1 :\n loc_new_df = loc_data\n else :\n loc_new_df = __pd.merge(loc_new_df,loc_data,left_index=True,right_index=True)\n return loc_new_df", "def _(x: DataFrame, y: DataFrame) -> bool:\n _check_xy(x, y)\n\n x = x.sort_values(by=x.columns.to_list()).reset_index(drop=True)\n y = y.sort_values(by=y.columns.to_list()).reset_index(drop=True)\n return x.equals(y)", "def get_subdataframe(Acol,Bdf):\n\n cAdf=pd.DataFrame(index=Bdf.index)\n for i in Acol:\n cAdf[i]=Bdf[i] if i in Bdf.columns else np.nan\n return cAdf", "def join(self, table: Union[str, sa.Table], left_where: Union[str, sa.Column, BinaryExpression], right_where: Union[str, sa.Column] = None, alias: str = None, method: str = 'join') -> B[B, E]:", "def is_(self, other: Any) -> ColumnOperators:\n return self.operate(is_, other)", "def dataframe_difference(df1, df2, which=None):\r\n comparison_df = df1.merge(\r\n df2,\r\n indicator=True,\r\n how='outer'\r\n )\r\n if which is None:\r\n diff_df = comparison_df[comparison_df['_merge'] != 'both']\r\n else:\r\n diff_df = comparison_df[comparison_df['_merge'] == which]\r\n diff_df.drop(\"_merge\",axis = 1, inplace = True)\r\n return diff_df.drop_duplicates().reset_index(drop = True)", "def __truediv__(self, other: Any) -> ColumnOperators:\n return self.operate(truediv, other)", "def get_pandas_dataframe(self, datasize, summary=None):\n\n # If left and right filters are identical, do not combine bins\n if self.left_filter == self.right_filter:\n df = self.left_filter.get_pandas_dataframe(datasize, summary)\n\n # If left and right filters are different, combine their bins\n else:\n left_df = self.left_filter.get_pandas_dataframe(datasize, summary)\n right_df = self.right_filter.get_pandas_dataframe(datasize, summary)\n left_df = left_df.astype(str)\n right_df = right_df.astype(str)\n df = '(' + left_df + ' ' + self.binary_op + ' ' + right_df + ')'\n\n return df", "def applyFilter(df: pandas.DataFrame, attribute:str, op: str, val: object) -> pandas.DataFrame: \n if (op == '='):\n return df[df[attribute] == val]\n elif (op == '<'):\n return df[df[attribute] < val]\n elif (op == '>'):\n return df[df[attribute] > val]\n elif (op == '<='):\n return df[df[attribute] <= val]\n elif (op == '>='):\n return df[df[attribute] >= val]\n elif (op == '!='):\n return df[df[attribute] != val]\n return df", "def test_stream_to_data_frame():\n # -- Setup - Create archive in main memory --------------------------------\n archive = Archive()\n for df in [DF1, DF2, DF3]:\n doc = DataFrameDocument(df=df)\n archive.commit(doc)\n # -- Read dataframes for first two snapshots ------------------------------\n #\n # The snapshots are only identical if the data frames where sorted by the\n # data frame index. Thus, the third snapshot will return a data frame in\n # different order.\n pd.testing.assert_frame_equal(archive.open(version=0).to_df(), DF1)\n pd.testing.assert_frame_equal(archive.open(version=1).to_df(), DF2)", "def test_compute_container_hash__df(self):\n column_1 = [1, 2, 3]\n column_2 = [4, 5, 6]\n\n df_1 = QFDataFrame(data={\"A\": column_1, \"B\": column_2}, columns=[\"A\", \"B\"])\n df_2 = QFDataFrame(data={\"A\": column_1, \"B\": column_2}, columns=[\"B\", \"A\"])\n\n self.assertNotEqual(compute_container_hash(df_1), compute_container_hash(df_2))\n self.assertEqual(compute_container_hash(df_1.sort_index(axis=1)),\n compute_container_hash(df_2.sort_index(axis=1)))", "def df_diff(self, df1, df2, which=None):\n comparison_df = df1.merge(df2, indicator=True, on=self.columns, how=\"outer\")\n if which is None:\n diff_df = comparison_df[comparison_df[\"_merge\"] != \"both\"].reset_index(\n drop=True\n )\n else:\n diff_df = comparison_df[comparison_df[\"_merge\"] == which].reset_index(\n drop=True\n )\n\n return diff_df", "def __ne__(self, other: Any) -> ColumnOperators: # type: ignore[override]\n return self.operate(ne, other)", "def dst(df):\n pass", "def merge_arrays(df_left,df_right,method = 'left', join_column = 'datetime'):\n df = df_left.merge(df_right,how = method,left_on = join_column,right_on = join_column)\n return df", "def test_dataframe_indexes() -> None:\n # By default, an int64-like RangeIndex (0-n) is applied.\n df = pd.DataFrame(data={\"one\": [1, 2, 3], \"two\": [4, 5, 6]})\n\n # By default, a 0 based RangeIndex is created for each DF\n assert isinstance(df.index, pd.RangeIndex)\n assert np.array_equal(df.index.values, [0, 1, 2])\n assert df.index.is_monotonic_increasing\n assert df.index.is_unique\n\n # Set axis 0's (row) index to a custom index using `.index``\n df.index = [\"first\", \"second\", \"third\"]\n\n # Set axis 1's (col) index to new values\n df.columns = [\"one_updated\", \"two_updated\"]\n\n # one_updated two_updated\n # first 1 4\n # second 2 5\n # third 3 6\n\n assert isinstance(df.index, pd.Index)\n assert df.loc[\"first\", \"one_updated\"] == 1\n assert np.array_equal(df.loc[\"first\"].values, np.array([1, 4]))", "def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join", "def test_build_dataframe(self):\n insert_good_data()\n dataframe = get_dataframe()\n # 1 2 3\n self.assertIs(type(dataframe['Total'][0]), numpy.float64)\n self.assertIs(type(dataframe['InvoiceDate'][0]), str)\n self.assertIs(type(dataframe['Count'][0]), numpy.int64)\n # 4\n self.assertEqual(dataframe['Total'][0], 8198.79)\n # 5\n self.assertDataframeEqual(dataframe, get_equal_dataframe())\n alt_dataframe = get_alter_dataframe(self.database_connection)\n # 6\n self.assertNotEqual(alt_dataframe['Count'][0], dataframe['Count'][0])\n # 7\n with self.assertRaises(AssertionError):\n self.assertDataframeEqual(alt_dataframe, dataframe)\n # 8\n self.assertEqual(dataframe['Total'][0], alt_dataframe['Total'][0])", "def __ixor__(self, other: t.Any) -> te.Self:\n return self._op_inplace('__isub__', other)", "def bitwise_and(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_and_op, other)", "def cross_time_index(df1, df2):\n series = pd.core.series.Series\n crossed_index = df1.index.intersection(df2.index)\n\n if type(df1) == series and type(df2) == series:\n df1 = df1[crossed_index]\n df2 = df2[crossed_index]\n elif type(df1) == series and type(df2) != series:\n df1 = df1[crossed_index]\n df2 = df2.loc[crossed_index, :]\n elif type(df1) != series and type(df2) == series:\n df1 = df1.loc[crossed_index, :]\n df2 = df2[crossed_index]\n else:\n df1 = df1.loc[crossed_index, :]\n df2 = df2.loc[crossed_index, :]\n return df1, df2", "def load(cls):\n df = Operator_Table.df\n df.operator = df.operator.apply(sp.normalize)\n df.operator_alias = df.operator_alias.apply(sp.normalize)\n df = df.rename(columns={\"operator_alias\": \"alias\"})\n return SQLIndex(data=df).set_index(\"operator\")", "def all(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.all)(self, **kwargs)", "def isEqual(self,other,epsilon=None):\n memberShip(other,DataTable)\n Ok=0\n \n nbc1=len(self.getColumnNames())\n nbc2=len(other.getColumnNames())\n \n if nbc1!=nbc2: return 0\n nbu1 = len(self.getColumnUnits())\n nbu2 = len(other.getColumnUnits())\n if nbu1!=nbu2: return 0\n for i in range(nbc1):\n if self.getColumnNames()[i].lower().strip()!=\\\n other.getColumnNames()[i].lower().strip():\n return 0\n pass\n for i in range(nbu1):\n if self.getColumnUnits()[i].lower().strip()!=\\\n other.getColumnUnits()[i].lower().strip():\n return 0\n pass\n \n nbc1=self.getNbColumns()\n nbc2=other.getNbColumns()\n if nbc1!=nbc2: return 0\n nbl1=self.getNbColumns()\n nbl2=other.getNbColumns()\n if nbl1!=nbl2: return 0\n for i in range(nbl1):\n for j in range(nbc1):\n v1=self.getItem(j,i)\n v2=other.getItem(j,i)\n if not epsilon:\n if v1!=v2:\n return 0\n pass\n elif not areClose(float(v1),float(v2),epsilon,'rel'):\n return 0\n pass\n pass\n return 1", "def align(self, other, **kwargs):\n return DataFrameDefault.register(pandas.DataFrame.align)(\n self, other=other, **kwargs\n )", "def __call__(self,\n other: 'PlainFrame',\n assert_column_order: bool = False,\n assert_row_order: bool = False):\n\n self._assert_shape(other)\n self._assert_column_names(other, assert_column_order)\n self._assert_dtypes(other)\n\n if not assert_row_order:\n order_left = self._get_row_order(self.parent)\n order_right = self._get_row_order(other)\n\n for column in self.parent.columns:\n left = self.parent.get_column(column).values\n right = other.get_column(column).values\n\n if not assert_row_order:\n left = [left[idx] for idx in order_left]\n right = [right[idx] for idx in order_right]\n\n msg = \"\\nDifference for column: {} \\n\\n\".format(column)\n msg += tabulate.tabulate(zip(*[left, right]),\n headers=[\"ACTUAL\", \"DESIRED\"], )\n msg += \"\\n\"\n assert_equal(left, right, err_msg=msg)", "def _binaryop(self, other, op: str):\n raise NotImplementedError", "def __rmod__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(mod, other)", "def tab_unsorted(table1, table2, where_conditions, dw_rep):\n sql = \\\n \" SELECT * \" + \\\n \" FROM \" + table1 + \\\n \" AS table1 \" + \\\n \" WHERE NOT EXISTS\" \\\n \" ( \" + \\\n \" SELECT NULL \" + \\\n \" FROM \" + table2 + \\\n \" AS table2 \" + \\\n \" WHERE \" + \" AND \".join(where_conditions) + \\\n \" ) \"\n\n cursor = dw_rep.connection.cursor()\n cursor.execute(sql)\n return cursor.fetchall()", "def test_self_union():\n gdf = GeoDataFrame(\n {\n \"geometry\": GeoSeries(\n [\n Polygon([(0, 0), (0, 2), (2, 2), (2, 0)]),\n Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),\n Polygon([(1, 1), (1, 2), (2, 2), (2, 1)]),\n ]\n ),\n \"x\": [0, 1, 2],\n \"y\": [4.0, 8.0, 1.0],\n }\n )\n\n result_one = self_union(gdf)\n expected_one = GeoDataFrame(\n {\n \"geometry\": GeoSeries(\n [\n Polygon([(0, 0), (0, 2), (1, 2), (1, 1), (2, 1), (2, 0)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(2, 2), (1, 2), (1, 3), (3, 3), (3, 1), (2, 1)]),\n ],\n index=[(0,), (0, 1, 2), (0, 1, 2), (0, 1, 2), (1,)],\n ),\n \"x\": [0, 0, 1, 2, 1],\n \"y\": [4.0, 4.0, 8.0, 1.0, 8.0],\n }\n )\n assert_geodataframe_equal(result_one, expected_one)\n\n result_two = self_union(gdf, ratios=[\"y\"])\n expected_two = GeoDataFrame(\n {\n \"geometry\": GeoSeries(\n [\n Polygon([(0, 0), (0, 2), (1, 2), (1, 1), (2, 1), (2, 0)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(2, 2), (1, 2), (1, 3), (3, 3), (3, 1), (2, 1)]),\n ],\n index=[(0,), (0, 1, 2), (0, 1, 2), (0, 1, 2), (1,)],\n ),\n \"x\": [0, 0, 1, 2, 1],\n \"y\": [3.0, 1.0, 2.0, 1.0, 6.0],\n }\n )\n assert_geodataframe_equal(result_two, expected_two)", "def compare_tables(t1, t2):\n assert type(t1)==type(t2)\n assert isinstance(t1,(np.ndarray,DataTable,pd.DataFrame))\n assert np.shape(t1)==np.shape(t2)\n if isinstance(t1,DataTable):\n assert all([np.all(t1.c[i]==t2.c[i]) for i in range(np.shape(t1)[1])])\n else:\n assert np.all(t1==t2)\n if isinstance(t1,DataTable):\n assert t1.get_column_names()==t2.get_column_names()\n if isinstance(t1,pd.DataFrame):\n assert np.all(t1.columns==t2.columns)", "def assert_correct_and_equal(self, other: Union[pd.DataFrame, dict]):\n if isinstance(other, dict):\n other = pd.DataFrame.from_records(other)\n if not isinstance(other, pd.DataFrame):\n raise TypeError(\"other must be a dataframe or a dict!\")\n # Sort cols\n cols = list(self._data.columns) + [c for c in other.columns if c not in self._data.columns]\n other = other[cols]\n SampleDataSchema.to_schema().select_columns(self._data.columns).validate(other)\n assert_frame_equal(\n self._data.sort_values(by=list(self._data.columns)).reset_index(drop=True),\n other.sort_values(by=list(self._data.columns)).reset_index(drop=True),\n )", "def __handle_filters(self, df) -> DataFrame:\n if not len(df):\n return df\n starting_df = df.copy()\n running_df = df\n for filter_ in self.filters:\n filter_value = filter_.value\n if filter_value is None:\n continue\n filter_condition = filter_.condition\n if filter_condition == FilterCondition.OR:\n df = starting_df\n else:\n df = running_df\n\n column_name = filter_.columnName\n operation = filter_.operation\n if operation == FilterOperation.TOP:\n df = df.sort_values(by=column_name, ascending=False, na_position='last').head(filter_value)\n elif operation == FilterOperation.BOTTOM:\n df = df.sort_values(by=column_name, ascending=True, na_position='last').head(filter_value)\n elif operation == FilterOperation.ABSOLUTE_TOP:\n df = df.reindex(df[column_name].abs().sort_values(ascending=False, na_position='last').index).head(\n filter_value)\n elif operation == FilterOperation.ABSOLUTE_BOTTOM:\n df = df.reindex(df[column_name].abs().sort_values(ascending=True, na_position='last').index).head(\n filter_value)\n elif operation == FilterOperation.EQUALS:\n if not isinstance(filter_value, list):\n filter_value = [filter_value]\n # Special case to handle different types of floats\n if isinstance(filter_value[0], str):\n df = df.loc[df[column_name].isin(filter_value)]\n else:\n # Add a tolerance for the special case to handle different types of floats\n df = df[np.isclose(df[column_name].values[:, None], filter_value, atol=1e-10).any(axis=1)]\n elif operation == FilterOperation.NOT_EQUALS:\n if not isinstance(filter_value, list):\n filter_value = [filter_value]\n if isinstance(filter_value[0], str):\n df = df.loc[~df[column_name].isin(filter_value)]\n else:\n # Add a tolerance for the special case to handle different types of float\n df = df[~np.isclose(df[column_name].values[:, None], filter_value, atol=1e-10).any(axis=1)]\n elif operation == FilterOperation.GREATER_THAN:\n df = df[df[column_name] > filter_value]\n elif operation == FilterOperation.LESS_THAN:\n df = df[df[column_name] < filter_value]\n elif operation == FilterOperation.LESS_THAN_EQUALS:\n df = df[df[column_name] <= filter_value]\n elif operation == FilterOperation.GREATER_THAN_EQUALS:\n df = df[df[column_name] >= filter_value]\n else:\n raise MqValueError(f'Invalid Filter operation Type: {operation}')\n\n if filter_.condition == FilterCondition.OR:\n # Need to merge the results\n running_df = running_df.merge(df, how='outer')\n else:\n running_df = df\n\n return running_df", "def subtract(self, other):\n return self.as_dataframe(subtract(self.data, other.data))", "def bitwise_xor(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_xor_op, other)", "def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def test_reset_index(self):\n\n # reminder on multi index in columns\n df1 = pd.DataFrame([[1, 3], [2, 4], [11, 33], [22, 44]]).T\n df1.index = pd.Series([1, 2], name=\"idx1\")\n df1.columns = pd.MultiIndex.from_product([['a', 'b'], ['aa', 'bb']], names=['idx_c', 'idx2'])\n\n # same data frame in single command\n df2 = pd.DataFrame([[1, 2, 11, 22], [3, 4, 33, 44]],\n index=pd.Series([1, 2], name=\"idx1\"),\n columns=pd.MultiIndex.from_product([['a', 'b'], ['aa', 'bb']], names=['idx_c', 'idx2']))\n\n df2.loc[:, pd.IndexSlice[:, 'aa']] # getting all info using the second level of the column index out of it\n\n df2.T.reset_index().set_index(['idx_c', 'idx2']) # all together a nop\n self.assertTrue(df2.T.equals(df2.T.reset_index().set_index(['idx_c', 'idx2'])))\n df2.T.reset_index(0) # pull out first index level (idx_c)\n df2.T.reset_index(1) # pull out second index level (idx2)", "def intersection(self, other: Union[DataFrame, Series, Index, List]) -> \"MultiIndex\":\n if isinstance(other, Series) or not is_list_like(other):\n raise TypeError(\"other must be a MultiIndex or a list of tuples\")\n elif isinstance(other, DataFrame):\n raise ValueError(\"Index data must be 1-dimensional\")\n elif isinstance(other, MultiIndex):\n spark_frame_other = other.to_frame()._to_spark()\n keep_name = self.names == other.names\n elif isinstance(other, Index):\n # Always returns an empty MultiIndex if `other` is Index.\n return cast(MultiIndex, self.to_frame().head(0).index)\n elif not all(isinstance(item, tuple) for item in other):\n raise TypeError(\"other must be a MultiIndex or a list of tuples\")\n else:\n other = MultiIndex.from_tuples(list(other))\n spark_frame_other = cast(MultiIndex, other).to_frame()._to_spark()\n keep_name = True\n\n index_fields = self._index_fields_for_union_like(other, func_name=\"intersection\")\n\n default_name: List[Name] = [SPARK_INDEX_NAME_FORMAT(i) for i in range(self.nlevels)]\n spark_frame_self = self.to_frame(name=default_name)._to_spark()\n spark_frame_intersected = spark_frame_self.intersect(spark_frame_other)\n if keep_name:\n index_names = self._internal.index_names\n else:\n index_names = None\n\n internal = InternalFrame(\n spark_frame=spark_frame_intersected,\n index_spark_columns=[\n scol_for(spark_frame_intersected, cast(str, col)) for col in default_name\n ],\n index_names=index_names,\n index_fields=index_fields,\n )\n return cast(MultiIndex, DataFrame(internal).index)" ]
[ "0.5993737", "0.59794873", "0.5897485", "0.5851102", "0.5847667", "0.58340734", "0.57924235", "0.5676979", "0.564678", "0.56334287", "0.54969245", "0.5460089", "0.5447747", "0.544339", "0.5425218", "0.5419818", "0.5405718", "0.5401967", "0.53926265", "0.5366445", "0.5351063", "0.53500974", "0.534686", "0.5345861", "0.5337002", "0.53364885", "0.5335789", "0.5335751", "0.5326944", "0.5318533", "0.5316001", "0.52960986", "0.5293134", "0.52865165", "0.5276003", "0.52568406", "0.52462626", "0.52323365", "0.5225857", "0.52167684", "0.52084255", "0.5194301", "0.519184", "0.5182236", "0.51480824", "0.51450497", "0.5136876", "0.51272637", "0.51178116", "0.5100906", "0.50889957", "0.5088411", "0.5070379", "0.5061832", "0.50561666", "0.50447875", "0.50437504", "0.5043106", "0.50335264", "0.50330836", "0.5025612", "0.50092006", "0.50024694", "0.5002139", "0.49815658", "0.49794456", "0.49768728", "0.4963746", "0.49633008", "0.49554422", "0.49542764", "0.49541003", "0.49497956", "0.49464715", "0.49449584", "0.49413234", "0.4934992", "0.49258384", "0.4921541", "0.49127287", "0.49074075", "0.49052453", "0.49007338", "0.48979032", "0.4893507", "0.48917785", "0.48885408", "0.4882246", "0.48693347", "0.48583585", "0.48516083", "0.4851417", "0.48477596", "0.4839788", "0.4834177", "0.48330757", "0.48329118", "0.48298007", "0.48174468", "0.48149022" ]
0.6160066
0
Align the input columns, filling the missing columns with None Examples >>> merge_columns( ... ["a", "b", "ba", "d", "f"], ... ["e", "c", "d", "g", "ga", "a"] ... ) (('a','a'),('b',None),('ba',None),(None,c),('d','d'),(None,'e'),('f',None),(None,'g'),(None,'ga'))
def merge_columns(self_columns, other_columns): sorted_self_columns, sorted_other_columns = sorted(self_columns), sorted(other_columns) self_idx = other_idx = 0 self_len, other_len = len(self_columns), len(other_columns) while self_idx < self_len and other_idx < other_len: curr_self_column, curr_other_column = sorted_self_columns[self_idx], sorted_other_columns[other_idx] if curr_self_column == curr_other_column: yield curr_self_column, curr_other_column self_idx += 1 other_idx += 1 elif curr_self_column < curr_other_column: yield curr_self_column, None self_idx += 1 else: yield None, curr_other_column other_idx += 1 while self_idx < self_len: yield sorted_self_columns[self_idx], None self_idx += 1 while other_idx < other_len: yield None, sorted_other_columns[other_idx] other_idx += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combine_columns(allowed_columns):\n\n v_columns = [v for v in allowed_columns if v in df.columns]\n v_columns.sort()\n for i in range(1, len(v_columns)):\n df[v_columns[0]] = df[v_columns[0]].fillna(df[v_columns[i]])\n df.drop(v_columns[i], 1, inplace=True)\n return v_columns[0]", "def merge_columns(ingredients, rows, merge=None):\n if merge is None or len(merge) == 0:\n return ingredients, rows\n merge = Merge(merge, ingredients)\n new_rows = list(merge.merge_rows(rows))\n new_ingredients = merge.merge_ingredients(ingredients)\n return new_ingredients, new_rows", "def merge_columns(df, column_names, output):\n df = df.withColumn(output, f.concat_ws(\" \", *column_names))\n df = normalize_nulls(df, output)\n return df.drop(*column_names)", "def concatenate_columns(df, summary_column: str, merge_columns: list, sep=', ', drop_merge_columns=True):\n \n # create summary column if not exist\n if summary_column not in df.columns:\n df[summary_column] = np.nan\n\n df['separator_symbol'] = sep\n merge_columns = [column for column in merge_columns if column in df.columns]\n \n if not merge_columns:\n return df\n\n for column in merge_columns:\n # value in summary column is empty\n mask_summary_note_empty = df[summary_column].isna()\n # value in current column is empty\n mask_current_note_empty = df[column].isna()\n \"\"\"if value in summary column is empty take value from column to add (if it's not nan)\n if value in column to add is empty take value from summary note column (if it's not nan)\n if both values are empty use nan value\n if both values in summary and current columns exist then cancatenate them\"\"\"\n df[summary_column] = np.select(\n [mask_summary_note_empty, mask_current_note_empty, mask_summary_note_empty & mask_current_note_empty],\n [df[column], df[summary_column], np.nan],\n default=df[summary_column] + df['separator_symbol'] + df[column])\n # drop merge_columns\n if drop_merge_columns:\n df.drop(columns=merge_columns, inplace=True)\n df.drop(columns='separator_symbol', inplace=True)\n return df", "def dataframe_fillna(left_df, right_df, join_lst, filled_lst, remove_duplicates=True, drop_na=True):\n\n # add missing columns to left_df from filled_lst if required\n left_df_columns_lst = left_df.columns.to_list()\n add_columns_lst = [column for column in filled_lst if column not in left_df_columns_lst]\n if add_columns_lst:\n left_df = left_df.reindex(columns = [*left_df_columns_lst, *add_columns_lst])\n\n # cut off unnecessary columns from right DataFrame\n right_join_df = right_df.loc[:, join_lst + filled_lst].copy()\n # drop rows with null values in columns to join on\n if drop_na:\n right_join_df.dropna(subset = join_lst, inplace = True)\n # if required (deafult) drop duplicates values from join columns \n # to avoid rows duplication in left DataDrame\n if remove_duplicates:\n right_join_df.drop_duplicates(subset=join_lst, inplace = True)\n # rename columns with filled values for right DataFrame\n filled_join_lst = [name+'_join' for name in filled_lst]\n right_join_df.rename(columns = dict(zip(filled_lst, filled_join_lst)), inplace = True)\n # left join left and right DataFrames on join_lst columns\n left_df = left_df.merge(right_join_df, how = 'left', on = join_lst)\n # for each columns pair (w/o (null values) and w _join prefix (filled values)\n for filled_name, filled_join_name in zip(filled_lst, filled_join_lst):\n # copy values from right DataFrame column to left DataFrame if left value ios null \n left_df[filled_name].fillna(left_df[filled_join_name], inplace = True)\n # drop column with _join prefix\n left_df.drop(columns = [filled_join_name], inplace = True)\n return left_df", "def _resolve_join_columns(left_table, right_table, indices):\n return [ merge_columns(left_table.columns[li], right_table.columns[ri]) for li, ri in indices ]", "def merge_columns(df, summary_column: str, merge_columns: list, sep=', ', drop_merge_columns=True, sort_summary=False):\n \n df.reset_index(drop=True, inplace=True)\n merge_columns = [column for column in merge_columns if column in df.columns]\n if not merge_columns:\n return df\n df[summary_column] = df[merge_columns].stack().groupby(level=0).agg(sep.join)\n # drop merge_columns\n if drop_merge_columns:\n drop_columns = [column for column in merge_columns if column != summary_column]\n df.drop(columns=drop_columns, inplace=True)\n\n if sort_summary:\n sort_cell_values(df, summary_column, sep=sep)\n return df", "def merge_tables(tables):\n base = tables[0]\n for table in tables[1:]:\n for row_index, row in enumerate(table):\n # Chop off duplicate leftmost column\n base[row_index] += row[1:]\n return base", "def merge_columns(column_specs, kwargs):\n for spec in column_specs:\n column = column_specs[spec]\n try:\n if column not in kwargs:\n kwargs[column] = column\n except TypeError:\n if not any(item in kwargs for item in column):\n for item in column:\n kwargs[item] = item\n return kwargs", "def map_column_indexes(self, merge_specification, ingredients):\n last_column = len(ingredients) - 1\n accumulating = {}\n remove = set()\n # default behavior, no column merge\n for column_index in range(0, last_column + 1):\n self.column_index_to_columns[column_index] \\\n = [(column_index, 1.0)]\n \n for columns in merge_specification:\n accumulating_column = columns[0][0]\n if accumulating_column > last_column or accumulating_column < 0:\n raise MergeConfigError(\n \"Attempted to merge missing column %d\" % accumulating_column)\n # specifies which columns should be merged into this one\n accumulating[accumulating_column] = columns\n for column_index, _ in columns[1:]:\n column_index = column_index\n if column_index > last_column or column_index < 0:\n raise MergeConfigError(\n \"Attempted to merge missing column %d\" % column_index) \n # drop this column; it will be merged into another\n remove.add(column_index)\n \n # drop columns first so that any columns both specified as\n # accumulating *and* merged columns do not get dropped\n for column_index in remove:\n self.column_index_to_columns[column_index] = None\n \n for column_index, columns in accumulating.items():\n self.column_index_to_columns[column_index] = columns", "def _join_on_col_with_na(left, right, col_name):\n tmp_col_name = \"tmp.\" + col_name\n left[tmp_col_name] = left[col_name].astype(\"O\")\n merged = pd.merge(\n left, right, left_on=tmp_col_name, right_index=True, how=\"left\"\n )\n del merged[tmp_col_name]\n return merged", "def merge_df_columns(dlist):\n df = concat(dlist, axis=1, join='outer', sort=False)\n df = droprows(df)\n\n return df", "def FE_concatenate_multiple_columns(df, cols, filler=\" \", drop=True):\r\n df = df.copy(deep=True)\r\n df['combined'] = df[cols].apply(lambda row: filler.join(row.values.astype(str)), axis=1)\r\n if drop:\r\n df.drop(cols, axis=1, inplace=True)\r\n return df", "def add_missing_columns(df, columns):\r\n df_columns = list(df.columns)\r\n table_columns = columns\r\n col_not_in_df = set(table_columns) - set(df_columns)\r\n # print(f' missing columns from df : {col_not_in_df}')\r\n for col in col_not_in_df:\r\n df[col] = ''\r\n df = df[table_columns]\r\n # print(f' added missing columns to df')\r\n # print(f' final df col length : {len(df.columns)}')\r\n return df", "def dim_col_imputation(self, columns):\n for column in columns:\n if column in self.col_with_nulls:\n if not self._pandas_flag:\n mode = self.data_frame.select(column).toPandas().mode().values[0][0]\n self.data_frame = self.data_frame.fillna({ column:mode })\n else:\n self.data_frame[column] = self.mode_impute(self.data_frame[column])\n self.data_change_dict['ModeImputeCols'].append(column)", "def _perform_non_overlapping_column_merge(\n dst_df: pd.DataFrame,\n src_df_no_overlap: pd.DataFrame,\n merge_info: Dict,\n dst_key: str,\n src_key: str,\n) -> pd.DataFrame:\n # Step A. Perform the merge of non-overlapping columns\n new_df = dst_df\n if len(src_df_no_overlap.columns) > 1:\n new_df = pd.merge(\n new_df,\n src_df_no_overlap,\n how=merge_info['how_merge'],\n left_on=dst_key,\n right_on=src_key)\n\n # VERY special case: The key used for the merge in src_df can have an\n # identical column in dst_df, but it is not the one used for the\n # merge. For example: DST has columns C1(key), C2, C3, SRC has\n # columns C2(key) and C4. The merge is done matching C1 in DST with\n # C2 in SRC, but this will produce two columns C2_x and C2_y. In this\n # case we drop C2_y because C2_x has been properly updated with the\n # values from C2_y in the previous step (Step A).\n if src_key != dst_key and src_key in dst_df.columns:\n # Drop column_y\n new_df.drop([src_key + '_y'], axis=1, inplace=True)\n # Rename column_x\n new_df = new_df.rename(columns={src_key + '_x': src_key})\n\n return new_df", "def add_data_from_columns_into_rows(columns: list, fixed_rows: list):\n for column in range(len(max(columns))):\n for row in range(len(columns)):\n try:\n fixed_rows[column].append(columns[row][column])\n except IndexError:\n fixed_rows[column].append('')\n return fixed_rows", "def _align(self, columns, widths, alignments):\n aligned_columns = []\n\n for column, width, alignment in zip(columns, widths, alignments):\n aligned_column = []\n\n for item in column:\n # add padding to the actual column width\n total_width = width + self.padding\n\n # build formatstring depending on alignment\n if alignment == 'l':\n format_str = '{{:<{}}}'.format(total_width)\n\n elif alignment == 'r':\n format_str = '{{:>{}}}'.format(total_width)\n\n elif alignment == 'c':\n format_str = '{{:^{}}}'.format(total_width)\n\n else:\n raise RuntimeError('Wrong alignment string')\n\n aligned_item = format_str.format(item)\n aligned_column.append(aligned_item)\n\n aligned_columns.append(aligned_column)\n\n return aligned_columns", "def _binary_op_on_different_indices(self, other, func, axis): # TODO: add axis check\n\n def merge_columns(self_columns, other_columns):\n \"\"\"\n Align the input columns, filling the missing columns with None\n --------\n \n Examples\n --------\n >>> merge_columns(\n ... [\"a\", \"b\", \"ba\", \"d\", \"f\"],\n ... [\"e\", \"c\", \"d\", \"g\", \"ga\", \"a\"]\n ... )\n (('a','a'),('b',None),('ba',None),(None,c),('d','d'),(None,'e'),('f',None),(None,'g'),(None,'ga'))\n \"\"\"\n sorted_self_columns, sorted_other_columns = sorted(self_columns), sorted(other_columns)\n self_idx = other_idx = 0\n self_len, other_len = len(self_columns), len(other_columns)\n while self_idx < self_len and other_idx < other_len:\n curr_self_column, curr_other_column = sorted_self_columns[self_idx], sorted_other_columns[other_idx]\n if curr_self_column == curr_other_column:\n yield curr_self_column, curr_other_column\n self_idx += 1\n other_idx += 1\n elif curr_self_column < curr_other_column:\n yield curr_self_column, None\n self_idx += 1\n else:\n yield None, curr_other_column\n other_idx += 1\n while self_idx < self_len:\n yield sorted_self_columns[self_idx], None\n self_idx += 1\n while other_idx < other_len:\n yield None, sorted_other_columns[other_idx]\n other_idx += 1\n\n assert isinstance(self, _Frame)\n assert isinstance(other, _Frame)\n if ((not self._in_memory and len(self._index_columns) == 0)\n or (not other._in_memory and len(other._index_columns) == 0)):\n raise ValueError(\"Frame has no default index if it is not in memory\")\n session = self._session\n self_var_name, other_var_name = self._var_name, other._var_name\n if other._is_dataframe_like:\n self_data_columns = self._data_columns\n other_data_columns = other._data_columns\n index_list, from_clause = _generate_joiner(\n self_var_name, other_var_name, self._index_columns, other._index_columns)\n if self_data_columns == other_data_columns:\n select_list = (f\"{func}({self_var_name}.{c}, {other_var_name}.{c}) as {c}\"\n for c in self_data_columns)\n data_columns = self_data_columns\n else:\n merged_columns = list(merge_columns(self_data_columns, other_data_columns))\n select_list = (f\"00f as {s if o is None else o}\" if s is None or o is None\n else f\"{func}({self_var_name}.{s}, {other_var_name}.{s}) as {s}\"\n for s, o in merged_columns)\n data_columns = [s if o is None else o for s, o in merged_columns]\n select_list = itertools.chain(index_list, select_list)\n script = sql_select(select_list, from_clause)\n elif other._is_series_like:\n self_data_columns = self._data_columns\n other_data_column = other._data_columns[0]\n index_list, from_clause = _generate_joiner(\n self._var_name, other._var_name, self._index_columns, other._index_columns)\n select_list = (f\"{func}({self_var_name}.{c}, {other_var_name}.{other_data_column}) as {c}\"\n for c in self_data_columns)\n data_columns = self_data_columns\n select_list = itertools.chain(index_list, select_list)\n script = sql_select(select_list, from_clause)\n return self._get_from_script(\n session, script, data_columns=data_columns, index_map=self._index_map, index=self._index)", "def setOptionalColumns(self, colnames):\n # Make sure all column names are lower case so comparisons in _TableRow\n # are not case sensitive. From a modularity standpoint, this should be\n # done in _TableRow, but it is more efficient to do it here, since the\n # conversion need be done only once.\n if colnames == [0]:\n self.optional_cols = colnames\n else:\n self.optional_cols = [colname.lower() for colname in colnames]", "def _add_missing_cols(user_list, fields=None):\n new_list = []\n required_cols = ['type', 'id', 'view_href', 'login']\n\n # Add any defined fields to the list of required columns\n if fields and fields != '*':\n parsed_fields = fields.split(',')\n for field in parsed_fields:\n if field not in required_cols:\n required_cols.append(field)\n\n # Loop through the messages and add any missing columns\n for user in user_list:\n for col in required_cols:\n if col not in user:\n user[col] = ''\n new_list.append(user)\n return new_list", "def rmerge(*colls):\n if isinstance(colls, tuple) and len(colls) == 1:\n # A squeeze operation since merge_with generates tuple(list_of_objs,)\n colls = colls[0]\n if all(is_mapping, colls):\n # Merges all the collections, recursively applies merging to the combined values\n return merge_with(rmerge, *colls)\n else:\n # If colls does not contain mappings, simply pick the last one\n return last(colls)", "def _validate_merge_col_exists(self):\n msg = (\"Cannot hybridize: merge column {!r} missing from the \"\n \"{} meta data! ({!r})\")\n\n mc = ColNameFormatter.fmt(MERGE_COLUMN)\n for cols, fp, res in zip([self.__solar_cols, self.__wind_cols],\n [self.solar_fpath, self.wind_fpath],\n ['solar', 'wind']):\n if mc not in cols:\n e = msg.format(MERGE_COLUMN, res, fp)\n logger.error(e)\n raise FileInputError(e)", "def pad_table(table, min_width=0, extra_pad=0):\n longest = []\n most_cols = 0\n for row in table:\n # naively assumes we're always passing in collections and not a string\n most_cols = max(len(row), most_cols)\n num = 0\n for row in table:\n if len(row) != most_cols:\n continue\n col_length = []\n for col in row:\n col_length.append(len(col))\n if not longest:\n longest = col_length\n num = len(col_length)\n else:\n for i in range(num):\n a = longest[i]\n b = col_length[i]\n if b > a:\n longest[i] = b\n # pad step\n for ri, row in enumerate(table):\n last_col = find_last_valid_col(row)\n for i, col in enumerate(row):\n # do not pad last column in each row as it makes reports format funny\n if i > last_col:\n continue\n if i == last_col:\n # trim off any space\n row[i] = col.strip()\n continue\n pad = longest[i]\n row[i] = \"%-*s\" % (max(pad + extra_pad, min_width), col)\n table[ri] = row", "def columnize(columns, max_lens, widths, sep=4*' '):\n \n padded_columns = []\n \n for _str, max_len, width in zip(columns, max_lens, widths):\n length_diff = max_len - len(str(_str))\n\n padded_column = ' ' * length_diff\n padded_column += str(_str)\n padded_column = padded_column.center(width)\n\n padded_columns.append(padded_column)\n \n return sep.join(padded_columns)", "def remove_gapped_columns(aln):\n cols = zip(* aln.values())\n ind = util.find(lambda col: \"-\" not in col, cols)\n return subalign(aln, ind)", "def CleanUp(self):\n blankColumnPattern = re.compile('^-*$')\n blankColumns = []\n for columnIndex in range(self.alignment.get_alignment_length() - 1):\n columnValues = self.alignment[:,columnIndex]\n match = blankColumnPattern.search(columnValues)\n if (match):\n blankColumns.append(str(columnIndex))\n for column in blankColumns[::-1]:\n self.DeleteRange(',' + str(column), True)\n self.Show(self.displayedColumn)\n self.BackupAlignment()", "def coalesce_tables(tables):\n ## For each table, we can:\n ## Process the names in the first row, as column names\n ## If we have a \"quantity\" column, convert this from euro style to float\n\n ## If the column names are the same, we can append one to the other.\n \n proc_tables = OrderedDict()\n most_recent_key = None\n for tn,t in enumerate(tables):\n for i, r in enumerate(t):\n ##print(f\"Table {tn}, Row number {i}\")\n col_accessors = [str(x) for x in range(len(r))]\n ## Get the processed row names\n if i == 0: \n cnames = {}\n for c in col_accessors:\n cnames[c] = r[c].lower().strip().replace(\" \", \"\")\n continue\n ## Now, cnames was defined from iteration i==0\n rec = {}\n for c in col_accessors:\n rec[cnames[c]] = r[c]\n\n fixweight = lambda x: float(x.replace(\",\", \".\"))\n \n \n if 'netweight' in rec.keys():\n if rec['netweight'] is not None:\n rec['netweight'] = fixweight(rec['netweight'])\n\n if rec['no.'] is not None:\n ## new record\n most_recent_key = rec['no.']\n proc_tables[most_recent_key] = rec\n else:\n ## append the description to previous\n if rec['description'] is not None:\n proc_tables[most_recent_key]['description'] = proc_tables[most_recent_key]['description'] + \" \" + rec['description']\n\n\n return(list(proc_tables.values()))", "def fillna_mode(data, columns, verbose=True):\n for col in columns:\n fill_val = data[col].mode()[0]\n if verbose: print('Filling ' + col + ' with: ' + fill_val)\n data[col].fillna(fill_val, inplace=True)", "def test_columns_set_to_all_columns_when_none(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=None)\n\n x.columns_set_or_check(X=df)\n\n h.assert_equal_dispatch(\n expected=list(df.columns.values),\n actual=x.columns,\n msg=\"x.columns set when None\",\n )", "def _join_columns(left_table, right_table, join_indices):\n\n join_columns = _resolve_join_columns(left_table, right_table, join_indices)\n nonjoin_columns = _resolve_nonjoin_columns(left_table, right_table, join_indices)\n\n join_result_columns = join_columns + nonjoin_columns\n LOG.debug('Resolved join result column names as [{0}]'.format(\n ', '.join([repr(c) for c in join_result_columns])))\n\n return join_result_columns", "def _add_necessary_columns(args, custom_columns):\n # we need to add the variant's chrom, start and gene if \n # not already there.\n if custom_columns.find(\"gene\") < 0:\n custom_columns += \", gene\"\n if custom_columns.find(\"start\") < 0:\n custom_columns += \", start\"\n \n return custom_columns", "def _fillna_meta_cols(self):\n for col_name, fill_value in self._fillna.items():\n if col_name in self._hybrid_meta.columns:\n self._hybrid_meta[col_name].fillna(fill_value, inplace=True)\n else:\n self.__warn_missing_col(col_name, action='fill')\n\n self._hybrid_meta[self.__solar_rpi_n].fillna(-1, inplace=True)\n self._hybrid_meta[self.__wind_rpi_n].fillna(-1, inplace=True)", "def date_col_imputation(self, columns):\n for column in columns:\n if column in self.col_with_nulls:\n if not self._pandas_flag:\n mode = self.data_frame.select(column).toPandas().mode().values[0][0]\n self.data_frame = self.data_frame.fillna({ column:mode })\n else:\n self.data_frame[column] = self.mode_impute(self.data_frame[column])\n self.data_change_dict['ModeImputeCols'].append(column)", "def test_merge_columns1(self, a=a, b=b, columns=columns):\t\n\n\t\t# Solution: Right Dominant #\n\n\t\t# - Settings - #\n\t\ton \t\t\t\t\t= list(a.columns[0:3])\n\t\tcollapse_columns \t= ('value_x', 'value_y', 'value')\n\t\tdominant \t\t\t= 'right'\t\n\n\t\t# - Diagnostic Return - #\n\n\t\tsol1_columns = ['iso3c', 'sitc4', 'year', 'value_x', 'quantity_x', 'value_y', 'quantity_y', 'value']\n\n\t\tsol1 = \t[\n\t\t\t\t\t['AFG', '0011', 1970, np.nan, \tnp.nan, \t300, \t\tnp.nan, \t300], \t\\\n\t\t\t\t\t['ZWE', '0012', 1970, 300, \t\tnp.nan, \tnp.nan,\t\t1, \t\t\t300],\t\\\n\t\t\t\t\t['USA', '0011', 1970, 500, \t\t5,\t\t\t750, \t\t2, \t\t\t750], \t\\\n\t\t\t\t\t['USA', '0012', 1970, 1000, \t10, \t\tnp.nan, \tnp.nan, \t1000],\t\\\n\t\t\t\t\t['USA', '0013', 1970, np.nan, \tnp.nan, \t5000, \t\tnp.nan, \t5000],\t\\\n\t\t\t\t\t]\n\t\tR1 = pd.DataFrame(sol1, columns=sol1_columns)\n\n\t\t# Usual Return #\n\n\t\tsol2 = \t[\n\t\t\t\t\t['AFG', '0011', 1970, 300, np.nan], \t\\\n\t\t\t\t\t['ZWE', '0012', 1970, 300, np.nan],\t\t\\\n\t\t\t\t\t['USA', '0011', 1970, 750, 5], \t\t\t\\\n\t\t\t\t\t['USA', '0012', 1970, 1000, 10],\t\t\\\n\t\t\t\t\t['USA', '0013', 1970, 5000, np.nan]\t\t\\\n\t\t\t\t\t]\n\n\t\tR2 = pd.DataFrame(sol2, columns=columns)\n\n\t\tcomputed = merge_columns(a,b, on=on, collapse_columns=collapse_columns, dominant=dominant, verbose=False)\n\n\t\tassert_series_equal(computed['value'], R1['value'], check_dtype=False)", "def _combine_omnipage_cell_list(table, inds, row_flag):\n if row_flag:\n row_or_col_list = [table[i, :] for i in inds]\n else:\n row_or_col_list = [table[:, i] for i in inds]\n return [' '.join(_unique_sorted([str(k) for k in j])).strip()\n for j in zip(*row_or_col_list)]", "def attach_columns(self, left_col, right_col):\r\n assert len(left_col) == len(right_col)\r\n for (left, right) in zip(left_col, right_col):\r\n left.set_right(right)\r\n right.set_left(left)", "def MergeValues(self, join_source, num_columns=1):\n assert len(self.rows) == len(join_source.rows)\n\n for r, row in enumerate(self.rows):\n self.rows[r] = row + join_source.rows[r][0:num_columns]\n\n return self", "def _column_fields_to_columns(fields, organization):\n if fields is None:\n return None\n\n col_fields = [] # Container for the strings of the column_names\n if isinstance(fields, list):\n col_fields.extend(fields)\n else:\n col_fields = [fields]\n\n cols = [] # Container for our Column instances.\n\n # It'd be nice if we could do this in a batch.\n for col_name in col_fields:\n if not col_name:\n continue\n\n col = None\n\n is_extra_data = col_name not in get_mappable_columns()\n org_col = Column.objects.filter(\n organization=organization,\n column_name=col_name,\n is_extra_data=is_extra_data\n ).first()\n\n if org_col is not None:\n col = org_col\n\n else:\n # Try for \"global\" column definitions, e.g. BEDES.\n global_col = Column.objects.filter(\n organization=None,\n column_name=col_name\n ).first()\n\n if global_col is not None:\n # create organization mapped column\n global_col.pk = None\n global_col.id = None\n global_col.organization = organization\n global_col.save()\n\n col = global_col\n\n else:\n col, _ = Column.objects.get_or_create(\n organization=organization,\n column_name=col_name,\n is_extra_data=is_extra_data,\n )\n\n cols.append(col)\n\n return cols", "def izip_fill(*iterables, **kw):\n iterables = map(iter, iterables)\n default = kw.pop('default', None)\n if kw:\n raise TypeError(\"unrecognized keyword arguments\")\n columns = len(iterables)\n columns_range = range(columns)\n while True:\n found_data = False\n row = [None] * columns\n for i in columns_range:\n try:\n row[i] = iterables[i].next()\n found_data = True\n except StopIteration:\n row[i] = default\n if not found_data:\n break\n yield tuple(row)", "def anti_join_all_cols(x, y):\r\n assert set(x.columns.values) == set(y.columns.values)\r\n return anti_join(x, y, x.columns.tolist())", "def up_merge(data1, data2):\r\n data1['TABLE_up'] = data1['TABLE'].str.upper()\r\n data1['NAME_up'] = data1['COLUMN'].str.upper()\r\n data2['TABLE_up'] = data2['TABLE'].str.upper()\r\n data2['NAME_up'] = data2['COLUMN'].str.upper()\r\n strp_columns = list(data2)\r\n strp_columns.remove('TABLE')\r\n strp_columns.remove('COLUMN')\r\n data2 = data2[strp_columns]\r\n fin_data = pd.merge(data1, data2, how='left',\r\n left_on=['TABLE_up', 'NAME_up'],\r\n right_on=['TABLE_up', 'NAME_up'])\r\n fin_list = list(fin_data)\r\n fin_list.remove('TABLE_up')\r\n fin_list.remove('NAME_up')\r\n fin_data = fin_data[fin_list]\r\n return fin_data", "def _merge_fields(a: Field, b: Field) -> Optional[Field]:\n\n # Merge the types:\n merged_type: Optional[FieldType] = None\n\n # Constant fields can be merged with any other type. To make type merging easier, swap a and b if b is\n # constant.\n if b.type is FieldType.CONST:\n a, b = b, a\n\n # Constant fields can be merged with any other type without losing semantics.\n if a.type is FieldType.CONST:\n merged_type = b.type\n\n # Two fields of type multiplexer or value can be merged, but semantics are potentially lost, thus the type\n # is reduced to unknown.\n if a.type is b.type and a.type in [ FieldType.MULTIPLEXER, FieldType.VALUE ]:\n merged_type = FieldType.UNKNOWN\n\n # If a merged type was not found at this point, abort.\n if merged_type is None:\n return None\n\n # Merge the size:\n merged_size = a.size + b.size\n\n # Merge anchors and endianness:\n merged_lsb_anchor = None\n merged_msb_anchor = None\n merged_endianness = None\n\n # Check which bytes are affected by the fields\n affected_bytes_a = _get_affected_bytes(a)\n affected_bytes_b = _get_affected_bytes(b)\n affected_bytes_both = affected_bytes_a & affected_bytes_b\n affected_bytes_any = affected_bytes_a | affected_bytes_b\n\n # Fields may have at most one affected byte in common, otherwise they are guaranteed to overlap.\n if len(affected_bytes_both) > 1:\n return None\n\n # If no common byte is affected by both fields, the LSB of one must be the byte after the MSB of the\n # other.\n if len(affected_bytes_both) == 0:\n b_after_a = max(affected_bytes_a) + 1 == min(affected_bytes_b)\n a_after_b = max(affected_bytes_b) + 1 == min(affected_bytes_a)\n\n # If a common byte is affected by both fields, it must be the MSB of one and the LSB of the other.\n if len(affected_bytes_both) == 1:\n b_after_a = max(affected_bytes_a) == min(affected_bytes_b)\n a_after_b = max(affected_bytes_b) == min(affected_bytes_a)\n\n # Check whether the affected bytes follow the above rules, to rule out a byte-level overlap.\n if not (b_after_a or a_after_b):\n return None\n\n # Swap the variables so that b follows a.\n if a_after_b:\n affected_bytes_a, affected_bytes_b = affected_bytes_b, affected_bytes_a\n a, b = b, a\n\n # Not used after this point but better safe than sorry\n b_after_a, a_after_b = a_after_b, b_after_a\n\n # The next step is to rule out a bit-level overlap and to make sure that the fields are adjacent on the\n # bit-level too:\n # Check which bits are affected by a and b at the (potential) border between them\n affected_border_bits_a = _get_affected_bits(a, max(affected_bytes_a))\n affected_border_bits_b = _get_affected_bits(b, min(affected_bytes_b))\n\n # This is where endianness comes into play: unknown endianness can be merged with any other endianness,\n # while big can not be merged with little.\n current_endianness = { a.endianness, b.endianness }\n\n # Check whether a merged field with unknown endianness can be created:\n # - Both fields must be of unknown endianness\n # - Both fields must affect the same byte\n # - No other bytes must be affected (theoretically implied by being unknown in the first place)\n # - The affected bits must not overlap\n # - The affected bits must be adjacent\n if (\n current_endianness == { FieldEndianness.UNKNOWN } and\n len(affected_bytes_both) == 1 and\n len(affected_bytes_any) == 1 and\n len(affected_border_bits_a & affected_border_bits_b) == 0\n ):\n if max(affected_border_bits_a) + 1 == min(affected_border_bits_b):\n # The fields are adjacent and of unknown endianness; b follows a\n merged_lsb_anchor = a.lsb_anchor\n merged_msb_anchor = b.msb_anchor\n merged_endianness = FieldEndianness.UNKNOWN\n\n if max(affected_border_bits_b) + 1 == min(affected_border_bits_a):\n # The fields are adjacent and of unknown endianness; a follows b\n merged_lsb_anchor = b.lsb_anchor\n merged_msb_anchor = a.msb_anchor\n merged_endianness = FieldEndianness.UNKNOWN\n\n # Check whether a merged field with little endianness can be created:\n # - Both fields must be of unknown or little endianness\n # - Multiple bytes must be affected\n # - In case there is no commonly affected byte:\n # - Bit 7 of the MSB of a must be affected\n # - Bit 0 of the LSB of b must be affected\n # - In case there is a commonly affected byte:\n # - The affected bits must not overlap\n # - The most significant bit affected by a must be adjacent to the least significant bit affected by b\n if (\n current_endianness <= { FieldEndianness.LITTLE, FieldEndianness.UNKNOWN } and\n len(affected_bytes_any) > 1 and\n (\n (\n len(affected_bytes_both) == 0 and\n 7 in affected_border_bits_a and\n 0 in affected_border_bits_b\n ) or\n (\n len(affected_bytes_both) == 1 and\n len(affected_border_bits_a & affected_border_bits_b) == 0 and\n max(affected_border_bits_a) + 1 == min(affected_border_bits_b)\n )\n )\n ):\n merged_lsb_anchor = a.lsb_anchor\n merged_msb_anchor = b.msb_anchor\n merged_endianness = FieldEndianness.LITTLE\n\n # Check whether a merged field with big endianness can be created:\n # - Both fields must be of unknown or big endianness\n # - Multiple bytes must be affected\n # - In case there is no commonly affected byte:\n # - Bit 0 of the MSB of a must be affected\n # - Bit 7 of the LSB of b must be affected\n # - In case there is a commonly affected byte:\n # - The affected bits must not overlap\n # - The most significant bit affected by b must be adjacent to the least significant bit affected by a\n if (\n current_endianness <= { FieldEndianness.BIG, FieldEndianness.UNKNOWN } and\n len(affected_bytes_any) > 1 and\n (\n (\n len(affected_bytes_both) == 0 and\n 0 in affected_border_bits_a and\n 7 in affected_border_bits_b\n ) or\n (\n len(affected_bytes_both) == 1 and\n len(affected_border_bits_a & affected_border_bits_b) == 0 and\n max(affected_border_bits_b) + 1 == min(affected_border_bits_a)\n )\n )\n ):\n merged_lsb_anchor = b.lsb_anchor\n merged_msb_anchor = a.msb_anchor\n merged_endianness = FieldEndianness.BIG\n\n # Make sure that all properties could be merged.\n if (\n merged_lsb_anchor is None or\n merged_msb_anchor is None or\n merged_size is None or\n merged_endianness is None or\n merged_type is None\n ):\n return None\n\n return Field(\n lsb_anchor=merged_lsb_anchor,\n msb_anchor=merged_msb_anchor,\n size=merged_size,\n endianness=merged_endianness,\n type=merged_type\n )", "def _update_columns(self):\n self.columns, self.new_columns = self.new_columns, self.columns\n self.num_columns = self.num_new_columns\n self.num_new_columns = 0\n\n # Now update new_columns and mapping with the information for the commit\n # after this one.\n #\n # First, make sure we have enough room. At most, there will be\n # self.num_columns + self.num_parents columns for the next commit.\n max_new_columns = self.num_columns + self.num_parents\n\n # Clear out self.mapping\n self.mapping_size = 2 * max_new_columns\n for i in range(self.mapping_size):\n self.mapping[i] = -1\n\n # Populate self.new_columns and self.mapping\n #\n # Some of the parents of this commit may already be in self.columns. If\n # so, self.new_columns should only contain a single entry for each such\n # commit. self.mapping should contain information about where each\n # current branch line is supposed to end up after the collapsing is\n # performed.\n seen_this = False\n mapping_idx = 0\n is_commit_in_columns = True\n for i in range(self.num_columns + 1):\n if i == self.num_columns:\n if seen_this:\n break\n is_commit_in_columns = False\n col_commit = self.commit\n else:\n col_commit = self.columns[i].commit\n\n if col_commit == self.commit:\n old_mapping_idx = mapping_idx\n seen_this = True\n self.commit_index = i\n for parent in self._interesting_parents():\n # If this is a merge, or the start of a new childless\n # column, increment the current color.\n if self.num_parents > 1 or not is_commit_in_columns:\n self._increment_column_color()\n mapping_idx = self._insert_into_new_columns(\n parent,\n mapping_idx)\n # We always need to increment mapping_idx by at least 2, even if\n # it has no interesting parents. The current commit always takes\n # up at least 2 spaces.\n if mapping_idx == old_mapping_idx:\n mapping_idx += 2\n else:\n mapping_idx = self._insert_into_new_columns(col_commit,\n mapping_idx)\n\n # Shrink mapping_size to be the minimum necessary\n while (self.mapping_size > 1 and\n self.mapping[self.mapping_size - 1] < 0):\n self.mapping_size -= 1\n\n # Compute self.width for this commit\n self._update_width(is_commit_in_columns)", "def complete_columns(training_df, valid_df):\n for c in valid_df.columns:\n if c not in training_df.columns:\n training_df[c] = 0\n for c in training_df.columns:\n if c not in valid_df.columns:\n valid_df[c] = 0\n return training_df, valid_df", "def remove_empty_columns(aln, enforce_codon=False):\n\n ind = []\n seqs = aln.values()\n alnlen = aln.alignlen()\n\n if not enforce_codon:\n for i in range(alnlen):\n for seq in seqs:\n if seq[i] != \"-\":\n ind.append(i)\n break\n else:\n if alnlen % 3 != 0:\n raise Exception(\n \"cannot set enforce_codon if alignment length \"\n \"is not a multiple of three\")\n\n for i in range(0, alnlen, 3):\n for seq in seqs:\n if seq[i:i+3] != \"---\":\n ind.extend([i, i+1, i+2])\n break\n\n return subalign(aln, ind)", "def add_cdm_missing_columns(self, all_merged_obs):\n #cdm_keys = self.obs_table_columns \n nan_array = np.empty( all_merged_obs['observed_variable'].shape )\n nan_array[:] = np.nan\n for k in self.obs_table_columns:\n if k not in list(all_merged_obs.columns ):\n logging.debug('Adding missing cdm colum with empty values: %s' , k )\n all_merged_obs[k] = ( nan_array )\n \n return all_merged_obs", "def remove_insertion_columns(self):\n cols = self.get_insertion_columns()\n s = []\n a = 0\n for b in cols:\n if b > a:\n s.append((a, b))\n a = b + 1\n s.append((a, len(self.col_labels)))\n for name, seq in list(self.items()):\n news = []\n for c in s:\n news.append(seq[c[0]:c[1]])\n self[name] = \"\".join(news)", "def merge_up(lsts):\r\n lsts1 = transpose(lsts)\r\n lsts2 = merge_AllLeft(lsts1)\r\n lsts3 = shift(lsts2)\r\n lsts4 = transpose(lsts3)\r\n lsts = lsts4\r\n\r\n return lsts", "def _propagate_duplicate_cols(self, duplicate_cols):\n for duplicate in duplicate_cols:\n no_suffix = \"_\".join(duplicate.split(\"_\")[:-1])\n null_idx = self._hybrid_meta[no_suffix].isnull()\n non_null_vals = self._hybrid_meta.loc[null_idx, duplicate].values\n self._hybrid_meta.loc[null_idx, no_suffix] = non_null_vals", "def concat(column_based_table_1: dict[str, list[str]], column_based_table_2: dict[str, list[str]]) -> dict[str, list[str]]:\n combined_data_table: dict[str, list[str]] = {}\n for column in column_based_table_1:\n combined_data_table[column] = column_based_table_1[column]\n keys_list = list(combined_data_table.keys())\n for column in column_based_table_2:\n if column in keys_list:\n column_data = combined_data_table[column]\n column_data_2 = column_based_table_2[column]\n # append to list\n for item in column_data_2:\n column_data.append(item)\n combined_data_table[column] = column_data\n else:\n combined_data_table[column] = column_based_table_2[column]\n return combined_data_table", "def anonymize_cols(_pddf=None, columns=None):\n if not isinstance(_pddf, pd.DataFrame):\n print ('_pddf debe ser una instancia de Pandas.DataFrame')\n return None\n if not isinstance(columns, list):\n print ('columns debe ser una instancia de LIST.')\n return None\n headers_count = len(columns)\n for col in columns:\n try:\n _pddf[col] = _pddf[col].apply(lambda x: generate_unique_id(x))\n headers_count -= 1\n except Exception as e:\n print (e)\n print ('Fallo el procesamiento de la columna:\\\"{}\\\", err: NOT-FOUND.'.format(col))\n if headers_count > 0:\n print ('No fue posible procesar todas las columnas')\n return _pddf", "def impute(self, columns, method='median', all_null='raise'):\n # Ensure all_null is one of the valid choices.\n allowed = {'drop', 'raise', 'ignore'}\n if all_null not in allowed:\n raise ValueError(\n 'all_null must be one of: %s' % ', '.join(allowed))\n\n self.verify_columns_in_dataset(columns)\n\n # If all_null='raise', check all columns first to avoid side effects.\n if all_null == 'raise':\n for col in columns:\n if self.train_column_is_all_null(col):\n raise ValueError(\"all null column '%s'\" % col)\n\n for col in columns:\n if self.train_column_is_all_null(col):\n if all_null == 'drop':\n self.remove_feature(col)\n logging.info(\"all null column '%s' was dropped\" % col)\n continue\n # Already checked all_null == 'raise'\n else:\n logging.info(\"all null column '%s' ignored\" % col)\n\n # Compute fill value and fill all NaN values.\n train_column = self.train[col]\n fill_value = getattr(train_column, method)()\n self.train.loc[:, col] = train_column.fillna(fill_value)\n self.test.loc[:, col] = self.test[col].fillna(fill_value)\n\n # Store fill_value imputed.\n self.imputations[col] = fill_value", "def merge(*iterables):\n return map(None, _IMerge(iterables))", "def nullable(self):\n _columns = []\n if not isinstance(self._last_column, list):\n _columns = [self._last_column]\n\n for column in _columns:\n column.nullable()\n return self", "def flatmap(\n self,\n arg: Union[Dict, Callable],\n na_action: Literal[\"ignore\", None] = None,\n dtype: Optional[dt.DType] = None,\n columns: Optional[List[str]] = None,\n ):\n if columns is None:\n return super().flatmap(arg, na_action, dtype)\n self._check_columns(columns)\n\n if len(columns) == 1:\n return self._field_data[columns[0]].flatmap(\n arg,\n na_action,\n dtype,\n )\n else:\n\n def func(x):\n return arg.get(x, None) if isinstance(arg, dict) else arg(x)\n\n dtype_ = dtype if dtype is not None else self._dtype\n cols = [self._field_data[n] for n in columns]\n res = Scope._EmptyColumn(dtype_)\n for i in range(len(self)):\n if self.valid(i):\n res._extend(func(*[col[i] for col in cols]))\n elif na_action is None:\n res._extend(func(None))\n else:\n res._append([])\n return res._finalize()", "def _rewrap_columns(self, unwrapped_columns, rev_dict=None):\n if not unwrapped_columns:\n return None # <- EXIT!\n\n if rev_dict:\n interface_dict = dict((old, new) for new, old in rev_dict.items())\n else:\n interface_dict = dict(self._interface)\n\n if isinstance(unwrapped_columns, str):\n return interface_dict[unwrapped_columns]\n return tuple(interface_dict[k] for k in unwrapped_columns)", "def padnone(iterable):\r\n return chain(iterable, repeat(None))", "def measure_col_imputation(self, columns):\n mean_impute_cols = []\n median_impute_cols = []\n for column in columns:\n if column in self.col_with_nulls:\n if not self._pandas_flag:\n mean = self.data_frame.agg({column: \"mean\"}).head()[0]\n self.data_frame = self.data_frame.fillna({ column:mean })\n else:\n self.data_frame[column] = self.mean_impute(self.data_frame[column])\n self.data_change_dict['MeanImputeCols'].append(column)", "def not_null(table_rows, col_name_list=[], col_num_list=[]):\n keys = col_name_list\n rst = True\n lst = []\n if(not keys): #key == [] or key == None\n keys = [table_rows[0].keys[x] for x in col_num_list]\n\n row_num = 0\n for row in table_rows:\n for key in keys:\n if(row.kv[key].strip() == \"\"):\n rst = False\n lst.append(\"(col:{0},row:{1})\".format(\n key, row_num\n ))\n row_num += 1\n return rst,\",\".join(lst)", "def build_column_mapping(raw_columns, dest_columns, previous_mapping=None, map_args=None,\n default_mappings=None, thresh=0):\n\n return MappingColumns(raw_columns, dest_columns, previous_mapping=previous_mapping,\n map_args=map_args, default_mappings=default_mappings,\n threshold=thresh).final_mappings", "def df_combine(array_df):\n import pandas as pd\n cols = []\n for i in range(len(a)):\n #print(i)\n if array_df[i].columns[0] in cols:\n array_df[i].columns = [array_df[i].columns[0] + '_' + str(i)]\n cols.append(array_df[i].columns[0])\n return pd.concat(a, axis=1, sort=True)", "def _unwrap_columns(self, columns, interface_dict=None):\n if not columns:\n return None # <- EXIT!\n\n if not interface_dict:\n interface_dict = dict((new, old) for old, new in self._interface)\n\n if isinstance(columns, str):\n return interface_dict[columns] # <- EXIT!\n\n unwrapped = (interface_dict[k] for k in columns)\n return tuple(x for x in unwrapped if x != None)", "def impute(self, columns, method='median', all_null='raise'):\n # Ensure all_null is one of the valid choices.\n allowed = {'drop', 'raise', 'ignore'}\n if all_null not in allowed:\n raise ValueError(\n 'all_null must be one of: %s' % ', '.join(allowed))\n\n self.verify_columns_in_dataset(columns)\n\n # If all_null='raise', check all columns first to avoid side effects.\n if all_null == 'raise':\n for col in columns:\n if self.column_is_all_null(col):\n raise ValueError(\"all null column '%s'\" % col)\n\n for col in columns:\n if self.column_is_all_null(col):\n if all_null == 'drop':\n self.remove_feature(col)\n logging.info(\"all null column '%s' was dropped\" % col)\n continue\n # Already checked all_null == 'raise'\n else:\n logging.info(\"all null column '%s' ignored\" % col)\n\n # Compute fill value and fill all NaN values.\n column = self.dataset[col]\n fill_value = getattr(column, method)()\n self.dataset[col] = column.fillna(fill_value)\n\n # Store fill_value imputed.\n self.imputations[col] = fill_value", "def fill_blanks_randomly(grid):\n for row in grid:\n for i in range(len(row)):\n if row[i] is None:\n row[i] = get_random_char()", "def _fill_missing_entries(df, combi_cols, static_cols, site_id_col):\n if df.empty:\n return df\n\n # Create a DataFrame with rows for all possible combinations of combi_cols.\n # This results in rows with NaNs being created in the DataFrame.\n unique_vals_from_combi_cols = [df[c].unique() for c in combi_cols]\n new_index = pd.MultiIndex.from_product(\n unique_vals_from_combi_cols, names=combi_cols\n )\n df = df.set_index(combi_cols).reindex(new_index).reset_index(level=combi_cols)\n\n # Fill the NaNs within the static columns for each wmo_id.\n filled_df = (\n df.groupby(site_id_col)[combi_cols + static_cols]\n .fillna(method=\"ffill\")\n .fillna(method=\"bfill\")\n )\n df = df.drop(columns=static_cols)\n df = df.merge(filled_df, on=combi_cols)\n\n # Fill the blend_time and forecast_reference_time columns.\n if \"forecast_period\" in df.columns:\n for col in [\"blend_time\", \"forecast_reference_time\"]:\n df[col] = df[\"time\"] - df[\"forecast_period\"]\n return df", "def _validate_optional_columns(data, optional_columns: Iterable[str]) -> List[str]:\n return [col for col in optional_columns if col in data.columns]", "def combine_ingredients(ingredients, columns_to_combine):\n return ingredients[columns_to_combine[0][0]]", "def merge_in(self, other, convert_to_string=True):\n assert isinstance(other, ExtendedAlignment)\n #_LOG.debug(\"Merging started ...\")\n if other.is_empty():\n return\n me = 0\n she = 0 # Assumption: alignments are female!\n me_len = self.get_length() if not self.is_empty() else 0\n she_len = other.get_length()\n insertion = -1\n\n merged_insertion_columns = 0\n\n ''' Add sequences from her to my alignment '''\n for f in other.fragments:\n self.fragments.add(f)\n if convert_to_string:\n self.from_string_to_bytearray()\n\n selfother = {}\n for k, v in other.items():\n # assert(k not in self,\n # \"Merging overlapping alignments not implemented\")\n if k not in self:\n selfother[k] = bytearray(v, encoding=\"utf8\")\n while True:\n ''' Check exit conditions'''\n if me == me_len and she == she_len:\n break\n\n ''' Check the 5 possible statuses between she and I '''\n if she != she_len and other.is_insertion_column(she):\n if me != me_len and self.is_insertion_column(me):\n ''' We both have a series of insertion columns'''\n start = me\n while(me != me_len and self.is_insertion_column(me) and\n she != she_len and other.is_insertion_column(she)):\n me += 1\n she += 1\n merged_insertion_columns += 1\n run = me - start\n self.col_labels[start:me] = list(range(\n insertion, insertion-run, -1))\n else:\n ''' Hers is a series of insertion columns'''\n start = she\n while she != she_len and other.is_insertion_column(she):\n she += 1\n run = she - start\n ins = bytearray(b\"-\") * run\n for seq in self.values():\n seq[me:me] = ins\n self._col_labels[me:me] = list(range(\n insertion, insertion - run, -1))\n insertion -= run\n me += run\n me_len += run\n elif me != me_len and self.is_insertion_column(me):\n ''' Mine is a series of insertion column'''\n start = me\n while me != me_len and self.is_insertion_column(me):\n me += 1\n run = me - start\n ins = bytearray(b\"-\") * run\n for v in selfother.values():\n v[start:start] = ins\n self.col_labels[start:me] = list(\n range(insertion, insertion-run, -1))\n insertion -= run\n elif(she == she_len or (me != me_len and\n self.col_labels[me] < other.col_labels[she])):\n ''' My column is not present (i.e. was allgap) in the\n \"other\"'''\n start = me\n while(me < me_len and (she == she_len or me != me_len and\n self.col_labels[me] < other.col_labels[she])):\n me += 1\n run = me - start\n ins = bytearray(b\"-\") * run\n for v in selfother.values():\n v[start:start] = ins\n elif(me == me_len or (she != she_len and\n self.col_labels[me] > other.col_labels[she])):\n ''' Her column is not present (i.e. was allgap) in \"me\"'''\n start = she\n while(she < she_len and (me == me_len or she != she_len and\n self.col_labels[me] > other.col_labels[she])):\n she += 1\n run = she - start\n ins = bytearray(b\"-\") * run\n for seq in self.values():\n seq[me:me] = ins\n self._col_labels[me:me] = other.col_labels[start:she]\n me += run\n me_len += run\n elif self.col_labels[me] == other.col_labels[she]:\n ''' A shared column'''\n while(me < me_len and she < she_len and\n self.col_labels[me] == other.col_labels[she]):\n she += 1\n me += 1\n else:\n raise \"hmmm, we thought this should be impossible? %d %d\" % (\n me, she)\n\n self.update(selfother)\n\n if convert_to_string:\n self.from_bytearray_to_string()\n #_LOG.debug(\"Merging finished ...\")\n\n return merged_insertion_columns", "def merge_extras(extras1, extras2):\n if not extras1:\n return extras2\n if not extras2:\n return extras1\n return tuple(sorted(set(extras1) | set(extras2)))", "def coalesce(*args):\n for arg in args:\n if arg is not None:\n return arg", "def combine_columns(nd_arrays):\n # Assertions\n assert isinstance(nd_arrays, list), \\\n 'Input must be a list'\n for i in nd_arrays:\n assert isinstance(i, np.ndarray), \\\n 'Each element in list must be a numpy ndarray'\n\n l_prev = len(nd_arrays[0])\n for i in range(1, len(nd_arrays)):\n l = len(nd_arrays[i])\n assert l == l_prev, \\\n 'Array at index ' + str(i) + ' is the wrong size. ' \\\n 'All arrays in input array must be the same length'\n l_prev = l\n # Functionality\n columns_combined = np.column_stack((nd_arrays))\n\n return columns_combined", "def _rearrange_columns(self, df):\n if self.all_columns is None:\n content_columns = [c for c in df.columns if not c.startswith(\"_\")]\n indicator_columns = [\"__in_{}\".format(t) for t in self.table_names\n ] if self.add_full_join_indicators else []\n fanout_columns = _get_fanout_columns(\n self.table_info) if self.add_full_join_fanouts else []\n self.all_columns = content_columns + indicator_columns + fanout_columns\n df = df[self.all_columns]\n if not self.disambiguate_column_names:\n df.columns = [\n c if c.startswith(\"_\") else c.split(\":\")[1] for c in df.columns\n ]\n return df", "def extend_dummy_columns(data, final_columns, category_features):\n # Columns that has integer values.\n category_int = set(['channels', 'delivery_method', 'fb_published',\n 'has_analytics', 'has_header', 'has_logo',\n 'show_map', 'user_type'])\n new_data = {}\n for final_col in final_columns:\n idx_cat_orig = [i for i in xrange(len(category_features))\n if category_features[i] == final_col[:len(category_features[i])]]\n if len(idx_cat_orig):\n # original feature name\n cat_orig = category_features[idx_cat_orig[0]]\n # value (in string) of the feature for this column\n val = final_col.replace(cat_orig, '')[1:]\n if cat_orig in category_int:\n if val.isdigit() and int(data[cat_orig]) == int(val):\n new_data[final_col] = 1\n else:\n new_data[final_col] = 0\n else:\n if str(data[cat_orig]) == str(val):\n new_data[final_col] = 1\n else:\n new_data[final_col] = 0\n else:\n new_data[final_col] = convert_to_float(data[final_col])\n return new_data # dictionary containing all values for our final columns.", "def _merge(self, box_list):\n if isinstance(box_list, self.__class__):\n box_list = [box_list]\n for box in box_list:\n for row in box:\n row[IND] = len(self)\n self.append(row)\n self._combine(row)", "def reorder_columns(data: pd.DataFrame):\n return data.reindex(all_columns, axis=1)", "def merge_rowdicts(\n list_of_rowdicts, psm_colnames_to_merge_multiple_values, joinchar=\"<|>\"\n):\n merged_d = {}\n fieldnames = []\n for rowdict in list_of_rowdicts:\n for k in rowdict.keys():\n if k not in fieldnames:\n fieldnames.append(k)\n for fieldname in fieldnames:\n values = []\n for d in list_of_rowdicts:\n if fieldname in d.keys():\n values.append(d[fieldname])\n if fieldname in psm_colnames_to_merge_multiple_values.keys():\n no_empty_values = [v for v in values if v != \"\"]\n values_as_floats = [float(value) for value in no_empty_values]\n\n if psm_colnames_to_merge_multiple_values[fieldname] == \"max_value\":\n merged_d[fieldname] = max(values_as_floats)\n\n elif psm_colnames_to_merge_multiple_values[fieldname] == \"min_value\":\n merged_d[fieldname] = min(values_as_floats)\n\n elif psm_colnames_to_merge_multiple_values[fieldname] == \"avg_value\":\n merged_d[fieldname] = sum(values_as_floats) / len(values_as_floats)\n\n elif psm_colnames_to_merge_multiple_values[fieldname] == \"most_frequent\":\n value_occurences = Counter(no_empty_values)\n most_common_value, most_occurences = value_occurences.most_common(1)[0]\n value_occurences_dict = dict(value_occurences)\n final_values = []\n for value in no_empty_values:\n if value in final_values:\n continue\n if value_occurences_dict[value] == most_occurences:\n final_values.append(value)\n merged_d[fieldname] = joinchar.join(final_values)\n\n else:\n if len(set(values)) == 1:\n merged_d[fieldname] = values[0]\n else:\n no_empty_values = [v for v in values if v != \"\"]\n if len(set(no_empty_values)) == 1:\n merged_d[fieldname] = no_empty_values[0]\n else:\n merged_d[fieldname] = joinchar.join(values)\n return merged_d", "def fill_col(col, x):\n col.append(x)\n return col", "def fillna_negtive1(df, target=None):\n if not target:\n target = ['price', 'image_top_1']\n for col in target:\n df[col] = df[col].fillna(-1)\n return None", "def _cols_if_none(X, self_cols):\n return X.columns.tolist() if not self_cols else self_cols", "def concatenate_columns(params: List[str]) -> str:\n convert_columns_to_string = [f'string({col})' for col in params]\n\n return f\"concat({','.join(convert_columns_to_string)})\"", "def distinct_cols_multi_table(table_list=[], remove_columns=['PNR_LOC','PNR_CRT_DTE'], output_format='sql'):\n alfa = \"abcdefghijklmnopqrstuvwxyz\"\n \n all_cols=set()\n all_cols_str=''\n \n for i, table in enumerate(table_list):\n cols=table.columns\n\n #find new cols (cols in B but not in A)\n different_cols = set(table.columns)-all_cols\n\n #remove additional removal cols\n different_cols = different_cols-set(remove_columns)\n\n #append table references\n if output_format == 'sql':\n ref_cols = ',{0}.'.format(alfa[i])+', {0}.'.join(different_cols).format(alfa[i])\n #append to full column list & str\n all_cols = all_cols.union(different_cols)\n all_cols_str = all_cols_str +\"\"+ ref_cols\n \n elif output_format=='pyspark':\n ref_cols = ','.join('{1}.{0}'.format(w,alfa[i]) for w in different_cols)\n #append to full column list & str\n all_cols = all_cols.union(different_cols)\n all_cols_str = all_cols_str +\",\"+ ref_cols\n else:\n print(\"please select output_format = ['pyspark','sql']\")\n \n\n # remove first comma\n if output_format == 'sql':\n return all_cols_str[1:]\n elif output_format=='pyspark':\n return all_cols_str[1:].split(',')", "def flag_epoch_null_cols(self, col_list: list):\n\n for wearable in self.wearables.values():\n for col in col_list:\n if col not in wearable.data.keys():\n raise KeyError(\"Col %s is not available for PID %s\" % (col, wearable.get_pid()))\n\n wearable.data.loc[wearable.data[col].isnull(), self.invalid_col] |= InvCode.FLAG_EPOCH_NULL_VALUE", "def justify_column(column):\n formatted_strings = ['{:,.3f}'.format(v) for v in column]\n justify = len(max(formatted_strings, key=len))\n column_strings = [s.rjust(justify) for s in formatted_strings]\n return (column_strings, justify)", "def _justify(self):\n minLengths = [max([max(map(len, row[i].split() + [''])) for row in self._rows if len(row) > 0])\n for i in range(self._colsNum)]\n shifts = [w - mw for mw, w in zip(minLengths, self._widthes)]\n # length = len(shifts)\n borrow = zip(self._colsRange, shifts)\n borrow.sort(lambda a, b: cmp(a[1], b[1]))\n delta = [0] * self._colsNum\n\n donorIdx = self._colsNum - 1\n recIdx = 0\n while True:\n\n curDonation = borrow[donorIdx][1]\n curRec = borrow[recIdx][1]\n\n if curRec >= 0 or curDonation <= 0:\n break\n\n curDelta = min(curDonation, -curRec)\n curDonation -= curDelta\n curRec += curDelta\n delta[borrow[donorIdx][0]] -= curDelta\n delta[borrow[recIdx][0]] += curDelta\n\n if curDonation == 0:\n donorIdx -= 1\n\n if curRec == 0:\n recIdx += 1\n\n for i in self._colsRange:\n self._widthes[i] += delta[i]", "def assemble_col(c1, c2):\n c1.extend(c2)\n return c1", "def _merge(x, y):\n for key in x:\n if key in y:\n x[key] = _merge(x[key], y[key])\n y[key] = None\n for key in y:\n if y[key] is not None:\n x[key] = y[key]\n return x", "def order_cols_with_meta(df, cols, meta_cols, col_name, meta_name):\n col1 = df.columns[cols[0]]\n meta_col1 = df.columns[meta_cols[0]]\n col2 = df.columns[cols[1]]\n meta_col2 = df.columns[meta_cols[1]]\n four_col_df = df[[col1, col2, meta_col1, meta_col2]].drop_duplicates()\n four_col_df[col_name + '_a'] = four_col_df.apply(lambda row: (row[col1] if row[col1] <= row[col2]\n else row[col2]), axis=1)\n four_col_df[col_name + '_a_' + meta_name] = four_col_df.apply(lambda row: (row[meta_col1] if row[col1] <= row[col2]\n else row[meta_col2]), axis=1)\n four_col_df[col_name + '_b'] = four_col_df.apply(lambda row: (row[col2] if row[col1] <= row[col2]\n else row[col1]), axis=1)\n four_col_df[col_name + '_b_' + meta_name] = four_col_df.apply(lambda row: (row[meta_col2] if row[col1] <= row[col2]\n else row[meta_col1]), axis=1)\n ordered_df = df.merge(four_col_df, how='inner', on=[col1, meta_col1, col2, meta_col2])\n return ordered_df", "def tuple_merge(tuples):\n\n\t# Add your code here\n\treturn", "def _swapcolumns(self):\n return self.reindex_axis([self.columns[1], self.columns[0]], axis=1)", "def _merge_by_append(self, tables: Tuple[Table, ...]):\n columns = uniq(column for table in tables for column in table.columns)\n\n merged = Table(columns=columns)\n for table in tables:\n merged.append_rows(table)\n\n return merged", "def regular_collate_fn(data):\n\timg, box, q, a = list(zip(*data))\n\tq = torch.nn.utils.rnn.pad_sequence(q, batch_first=True)\n\treturn torch.stack(img), torch.stack(box), q, torch.stack(a).long()", "def merge_down(lists):\r\n lst1 = transpose(lists)\r\n lst2 = merge_AllRight(lst1)\r\n lst3 = transpose(lst2)\r\n\r\n lists = lst3\r\n\r\n return lists", "def padAlignment(align, applyPadding=True):\n if type(align) in [dict, np.ndarray, list]:\n align = pd.Series(align)\n\n \"\"\"Replace * and # with - and - \"\"\"\n for ind in align.index:\n if '*' in align[ind]:\n align[ind] = align[ind].replace('*', '-')\n if '#' in align[ind]:\n align[ind] = align[ind].replace('#', '-')\n \"\"\"Pad with gaps if the lengths are all the same\"\"\"\n if applyPadding:\n L = align.map(len).unique()\n if len(L) > 1:\n #print 'Sequences have different lengths (pading with gaps): %s' % L\n L = L.max()\n for ind in align.index:\n if len(align[ind]) < L:\n align[ind] = align[ind].ljust(L, '-')\n else:\n L = L.max()\n return align", "def concat_columns(self, frame_list):\n frame_list = list(frame_list)\n if len(frame_list) <= 0:\n return None\n if len(frame_list) == 1:\n return frame_list[0]\n res = pl.concat(frame_list, how=\"horizontal\")\n return res", "def check_column_values(self, values):\n none_keys = sorted(list(self._necessary_input_columns.intersection(set([elem for elem in self._columns if values[self.column_id[elem]] in [None, 'None']]))))\n if len(none_keys) > 0:\n raise Exception('missing_keys in ForcingOnMesh_DBManager add function parameter file_info:\\n%s\\n'%('\\n'.join([' - %s'%elem for elem in none_keys])))", "def _merge_inplace(self, other):\n if other is None:\n yield\n else:\n # don't include indexes in priority_vars, because we didn't align\n # first\n priority_vars = OrderedDict(\n (k, v) for k, v in self.variables.items() if k not in self.dims)\n variables = merge_coords_without_align(\n [self.variables, other.variables], priority_vars=priority_vars)\n yield\n self._update_coords(variables)", "def DealWithMissingValues(data_set: pd.DataFrame):\n data_set.fillna(method=\"pad\", inplace=True)", "def reduce_join(df, columns,sep='_'):\n assert len(columns) > 1\n slist = [df[x].astype(str) for x in columns]\n return reduce(lambda x, y: x + sep + y, slist[1:], slist[0])", "def setAllColumns(self, newAllColumns):\n \n pass" ]
[ "0.6452826", "0.59999216", "0.5639353", "0.53557676", "0.5288406", "0.51761496", "0.50632006", "0.5041874", "0.5031121", "0.49720797", "0.49664128", "0.49384537", "0.49003986", "0.4899783", "0.48729792", "0.4778065", "0.47356743", "0.47148484", "0.46847248", "0.4650927", "0.46477723", "0.4626838", "0.4625752", "0.4614495", "0.45995593", "0.45970088", "0.45873684", "0.4534471", "0.45314267", "0.45283726", "0.45234036", "0.4491404", "0.44800186", "0.44751546", "0.4467175", "0.4456549", "0.44467208", "0.44456047", "0.4440865", "0.44341236", "0.44229987", "0.44169798", "0.44154832", "0.44117546", "0.4400445", "0.4386784", "0.43850115", "0.4373342", "0.43608087", "0.43488556", "0.43389934", "0.4318023", "0.431307", "0.42973253", "0.42963046", "0.4294479", "0.42904717", "0.42690066", "0.42687234", "0.42568418", "0.42543676", "0.4250642", "0.4249141", "0.42481214", "0.42377114", "0.4234131", "0.42294496", "0.4224684", "0.42075235", "0.42046", "0.42031807", "0.41986674", "0.41877386", "0.41829038", "0.41826537", "0.4171254", "0.41419947", "0.41375065", "0.4135395", "0.41228348", "0.41207528", "0.4117133", "0.41155246", "0.41151938", "0.41140202", "0.41107413", "0.41087428", "0.40881327", "0.40852088", "0.40849894", "0.4080315", "0.4074826", "0.40661514", "0.40527362", "0.40522915", "0.40497288", "0.4047827", "0.40444845", "0.40439507", "0.40395012" ]
0.5532568
3
r""" Write object to a commaseparated values (csv) file.
def to_csv( self, path_or_buf=None, sep=",", na_rep="", float_format=None, columns=None, header=True, index=True, index_label=None, mode="w", encoding=None, compression="infer", quoting=None, quotechar='"', line_terminator=None, chunksize=None, date_format=None, doublequote=True, escapechar=None, decimal=".", engine="dolphindb", append=False, ): if engine == "pandas": df = self.to_pandas() from pandas.io.formats.csvs import CSVFormatter formatter = CSVFormatter( df, path_or_buf, line_terminator=line_terminator, sep=sep, encoding=encoding, compression=compression, quoting=quoting, na_rep=na_rep, float_format=float_format, cols=columns, header=header, index=index, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, decimal=decimal, ) formatter.save() if path_or_buf is None: return formatter.path_or_buf.getvalue() elif engine == "dolphindb": append = 'true' if append else 'false' self_script = self._to_script(ignore_index=True) script = f"saveText({self_script},'{path_or_buf}', '{sep}', {append})" self._session.run(script) else: raise ValueError("Unsupport type engine " + engine) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_to_csv(self):\r\n # Save the read values to a csv file\r\n with open(self.fname, \"a\") as f:\r\n wr = csv.writer(f, dialect='excel')\r\n wr.writerow([self.set_time, self.read_time_P_ac, self.read_time_P_bat,\r\n self.soc0, self.set_val, self.P_ac, self.P_bat])", "def writeToCSV(self, filepath):\r\n\t\twith open(filepath, 'w') as outputFile:\r\n\t\t\toutputFile.write(str(self))", "def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fname, 'w') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n writer.writerow(['set_time',\r\n 'read_time_P_ac',\r\n 'read_time_P_bat',\r\n 'soc',\r\n 'set_value',\r\n 'P_ac',\r\n 'P_bat'])", "def save_csv(self):\n if not self.__is_csv():\n # creates the csv file if it did not exist.\n self.__create_csv()\n try:\n with open(self.__csv_file_name, 'a', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writerow(self.__values)\n except IOError: # this exception avoid a product does not have saved in csv file\n time.sleep(0.5)\n self.save_csv()\n # display on the screen what is being record on csv\n for key, value in self.__values.items():\n print('{}: {}'.format(key, value), end='; ' if key != 'url' else '\\n')", "def write_to_csv(self, data):\n with open(\"out.csv\", \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerow(self.column_names)\n writer.writerows(data)\n print(\" Updated succesfully \")", "def write(self, values, file_obj, format=None):\n pass", "def write(self): \n # Open csv file\n with open(self.file_name, 'w', newline='') as file:\n self._writer = csv.writer(file)\n \n # Write header rows\n# self.write_sim_header_data(self.trace.sim.get_data())\n \n # Write trace table\n self._writer.writerow(['Record #', 'Rep', 'Time',\n 'Priority', 'Record Type', 'Name'])\n for trace_record in self.trace._record_list:\n self._writer.writerow(trace_record.get_row())\n file.close()", "def __create_csv(self):\n with open(self.__csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writeheader()", "def write_into_csv(self, loc_details=[], itype='atm', mode='w'): \n \n if itype==\"brc\":\n csvfile_name = self.branch_file\n headers = self.branch_headers\n else:\n csvfile_name = self.atm_file\n headers = self.atm_headers\n\n with open(csvfile_name, mode, newline='') as csvfile:\n locwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_ALL)\n if mode=='w':\n locwriter.writerow(headers) \n\n for loc in loc_details:\n locwriter.writerow(loc)", "def writeCSV(filename, separator, data):\n \n filetowrite = open(filename, \"w\")\n values = []\n i = 0 #Count the number of objects already written\n for item in data:\n filetowrite.write(item)\n i += 1\n if i < len(data.keys()):\n filetowrite.write(separator)\n values.append(data[item])\n filetowrite.write(\"\\n\")\n i = 0\n for value in values:\n filetowrite.write(str(value))\n i += 1\n if i < len(values):\n filetowrite.write(separator)\n \n filetowrite.close()", "def write_output(self):\n with open(self.filename, 'a', newline='', encoding='utf-8') as \\\n csv_file:\n csv_writer = csv.writer(csv_file)\n if os.stat(self.filename).st_size == 0:\n # if the csv file needs a headers\n csv_writer.writerow(Configurations.header)\n for quote in self.quotes_objects:\n csv_writer.writerow(quote.info)", "def write_to_csv(self, name_suffix = ''):\n f_path = os.path.join(self.root_dir, 'res' + name_suffix + '.csv')\n field_names = [] # the first field in CSV is 'obj_val'\n\n # put the keys in the cost, prim_var_change, dual_var_change and fea_conditions as field names if any\n for key in self.cost.keys():\n field_names.append(key)\n for key in self.cost_change.keys():\n field_names.append(key)\n for key in self.prim_var_change.keys():\n field_names.append(key)\n for key in self.dual_var_change.keys():\n field_names.append(key)\n for key in self.fea_conditions.keys():\n field_names.append(key)\n\n\tprint f_path\n\n with open(f_path, mode = 'wb') as csv_file: # open the file, if not exist, create it\n writer = csv.DictWriter(csv_file, fieldnames = field_names) # create a writer which maps the dictionaries onto output rows in CSV\n writer.writeheader() # write the field names to the header\n temp_dict = {} # create a temporary dict used to output rows\n row_max = self.get_iter_num() # get the max iters which indicates the number of rows in CSV\n print ('number of rows: ' + str(row_max))\n #print (field_names)\n for row in range(row_max + 1):\n temp_dict.clear() # clear all items\n start_idx = 0\n for i in range(len(self.cost)):\n field = field_names[start_idx + i]\n\t\t if row > len(self.cost[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n\t\t else: temp_dict[field] = self.get_cost_val(field, row)\n\n start_idx = start_idx + len(self.cost) # the start pos of fields in field_names for prim_var_change\n for i in range(len(self.cost_change)): # for each cost_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of primal variables\n temp_dict[field] = '/'\n elif row > len(self.cost_change[field]) - 1:\n\t\t\t temp_dict[field] = ''\n\t\t else:\n temp_dict[field] = self.get_cost_change_value(field, row - 1)\n\n\n start_idx = start_idx + len(self.cost_change)\n for i in range(len(self.prim_var_change)): # for each prim_var_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of primal variables\n temp_dict[field] = '/'\n\t\t elif row > len(self.prim_var_change[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n else:\n temp_dict[field] = self.get_prim_change_value(field, row - 1)\n\n start_idx = start_idx + len(self.prim_var_change) # go to the start pos of fields in field_names for dual_var_change\n for i in range(len(self.dual_var_change)): # for each dual_var_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of dual variables\n temp_dict[field] = '/'\n elif row > len(self.dual_var_change[field]) - 1:\n\t\t\ttemp_dict[field] = '' \n\t\t else:\n temp_dict[field] = self.get_dual_change_value(field, row - 1)\n\n start_idx = start_idx + len(self.dual_var_change) # go the the start pos of fields in field_names for fea_conditions\n for i in range(len(self.fea_conditions)): # for each fea_condition\n field = field_names[start_idx + i]\n\t\t if row > len(self.fea_conditions[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n else: temp_dict[field] = self.get_fea_condition_value(field, row)\n\n writer.writerow(temp_dict)\n\n # we also save the value of primal values if not saved\n if not self.pdv_to_csv:\n self.save_last_prims()", "def save_to_file_csv(cls, list_objs):\n with open(cls.__name__ + \".csv\", \"w\", newline='') as f:\n if cls.__name__ == \"Rectangle\":\n fieldnames = ['id', 'width', 'height', 'x', 'y']\n elif cls.__name__ == \"Square\":\n fieldnames = ['id', 'size', 'x', 'y']\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n if list_objs is not None:\n for model in list_objs:\n writer.writerow(model.to_dictionary())", "def write_csv(self, out_file_name, header):\n\n with open(out_file_name, 'wb') as outf:\n writer = csv.writer(outf, quoting=csv.QUOTE_ALL)\n writer.writerow(header)\n writer.writerows(self.records)", "def write(self, file: IO) -> None:\n serializer = self.serializer_class(self.get_queryset(), many=True)\n\n writer = csv.DictWriter(file, self.serializer_class.Meta.fields)\n writer.writeheader()\n\n # Write serializer data and replace None/'' with 'NA'\n writer.writerows(\n OrderedDict(\n (\n field_name,\n \"NA\" if (field_value is None or field_value == \"\") else field_value,\n )\n for field_name, field_value in row.items()\n )\n for row in serializer.data\n )\n\n file.seek(0)", "def write_csv(self, file):\n # Write header row\n file.write('Timestamp,MessageType,Queue,Price,Volume,OrderID\\n')\n # Write content\n for x in self.records:\n row = (str(x[0]) + ',' + x[1][\"MessageType\"] + ',' +\n x[1][\"Queue\"] + ',' + str(x[1][\"Price\"]) + ',' +\n str(x[1][\"Volume\"]) + ',' + str(x[1][\"OrderID\"]) + '\\n')\n file.write(row)", "def to_csv(self, csvwriter):\n csvwriter.writerow(self.to_csv_row())", "def store_csv(self):\n\n with open(self.filepath.with_suffix(\".csv\"), 'w',\n newline='') as csvfile:\n fieldnames = ['counter', 'timestamp', 'acceleration']\n writer = DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n writer.writerows(self.values)", "def export_csv(self, csvfileobject):\n for index, track in enumerate(self._tracks):\n csvfileobject.writerow(track.properties)\n for delta in track.periods: \n csvfileobject.writerow(delta.properties)", "def _csvWriter(self):\r\n # Initialize Header\r\n table = []\r\n voltageRow = []\r\n for i in range(len(self._voltages)):\r\n voltageRow.append(self._voltages[i][0])\r\n voltageRow.append(\" \")\r\n if self._vna.isTwoComponents():\r\n voltageRow.append(\" \")\r\n table.append(voltageRow)\r\n \r\n # Fill table with data\r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._frequency[0])):\r\n # row = []\r\n # for j in range(len(self._frequency)):\r\n # row.append(self._frequency[j][i])\r\n # row.append(self._intensity[j][2*i])\r\n # row.append(self._intensity[j][2*i + 1])\r\n # table.append(row)\r\n # else: \r\n for i in range(len(self._frequency[0])):\r\n row = []\r\n for j in range(len(self._frequency)):\r\n row.append(self._frequency[j][i])\r\n row.append(self._intensity[j][i])\r\n table.append(row)\r\n\r\n # Write to CSV\r\n filename = 'CSVs/' + self._vna.getDateFormatted() + '.csv'\r\n with open(filename, 'w', newline='') as csvfile:\r\n dataWriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)\r\n for i in range(len(table)):\r\n dataWriter.writerow(table[i])", "def writetoCSV(self, fileName):\n\n with open(fileName, 'w') as writeFile:\n writeFile.write(\"ID,Fx,Fy,Fz\\n\")\n for fstnr in F:\n writeFile.write(str(fstnr.ID))\n for i in fstnr.force:\n writeFile.write(',' + str(i))\n writeFile.write('\\n')", "def __open_csv(self):\n self.__csv_file = open(self.__csv_file_name, 'w', encoding='utf-8')\n self.__csv_writer = csv.writer(self.__csv_file, delimiter=',', )", "def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)", "def write_csv(self, filelike):\r\n items = self.rows()\r\n writer = unicodecsv.writer(filelike, encoding=\"utf-8\")\r\n writer.writerow(self.header())\r\n for item in items:\r\n writer.writerow(item)", "def save_to_file_csv(cls, list_objs):\n f_name = cls.__name__ + \".csv\"\n with open(f_name, 'w', newline='') as f:\n if list_objs is None or list_objs == []:\n f.write(\"[]\")\n\n else:\n if cls.__name__ == 'Rectangle':\n h = ['id', 'width', 'height', 'x', 'y']\n else:\n h = ['id', 'size', 'x', 'y']\n ncsv = csv.DictWriter(f, fieldnames=h)\n for obj in list_objs:\n ncsv.writerow(obj.to_dictionary())", "def export_csv(self, path):\r\n\r\n with open(path, 'w') as f:\r\n f.write('# h,hr,m')\r\n\r\n if self.rho is not None:\r\n f.write(',rho')\r\n if self.temperature is not None:\r\n f.write(',temperature')\r\n\r\n f.write('\\n')\r\n for i in range(self.shape[0]):\r\n for j in range(self.shape[1]):\r\n f.write(f'{self.h[i, j]},{self.hr[i, j]},{self.m[i, j]}')\r\n if self.rho is not None:\r\n f.write(f',{self.rho[i, j]}')\r\n if self.temperature is not None:\r\n f.write(f',{self.temperature[i, j]}')\r\n f.write('\\n')\r\n return", "def save_to_file_csv(cls, list_objs):\n ld = []\n with open(cls.__name__ + \".csv\", \"w\", encoding=\"utf-8\") as f:\n if list_objs:\n for obj in list_objs:\n if cls.__name__ == 'Rectangle':\n ld.append([\n obj.id, obj.width, obj.height, obj.x, obj.y])\n if cls.__name__ == 'Square':\n ld.append([obj.id, obj.size, obj.x, obj.y])\n writer = csv.writer(f)\n for row in ld:\n writer.writerow(row)", "def write_to_file(data, method, delimiter):\r\n output_file = 'data.csv'\r\n with open(output_file, method, newline='', encoding='utf-8') as file:\r\n writer = csv.writer(file, delimiter=delimiter)\r\n writer.writerows([data])", "def _CsvFunc(self, obj=None, verbose=False, use_pager=None, to_file=None):\n if obj is not None:\n self._printed_variables.append(obj)\n lines = describe.GenerateLines(obj, verbose=verbose, recursive=False,\n format_name='csv')\n _WriteToStream(lines, use_pager=use_pager, to_file=to_file)", "def save_csv(vals: Vals):\n logging.info('Writing data to csv file')\n with open(PureWindowsPath(os.path.realpath(__file__)).parent / 'results.csv', 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(('X', 'Y'))\n\n for x, y in dict(zip(vals.x, vals.y)).items():\n csvwriter.writerow((x, y))\n\n logging.info('Finished writing')\n messagebox.showinfo('Save to CSV', 'Successfully saved!')", "def _csv_export(self, exppath):\n with open(exppath, 'w') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',', skipinitialspace=True)\n csvwriter.writerow(['hexstr','dmc','name'])\n for clr in self.lookup_table:\n csvwriter.writerow([clr.hex.to_str(), clr.id, clr.name])", "def save_csv(self, filename): # DONE\n self.data.to_csv(filename)", "def _write(self):\n # Reload\n with portalocker.Lock(self.filename, 'w') as fh:\n self.data.to_csv(fh, index=False)\n fh.flush()\n os.fsync(fh.fileno())", "def csv_output(self):\r\n fh = open(\"output.csv\",'w')\r\n for i in range(len(self.population.columns)):\r\n if i != len(self.population.columns)-1:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\"\\n\")\r\n\r\n for i in range(len(self.population.data)):\r\n for j in range(len(self.population.data[i])):\r\n if j != len(self.population.data[i])-1:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\"\\n\")\r\n fh.close()", "def to_csv_file_obj(self, rows):\n output = StringIO.StringIO()\n writer = csv.writer(output)\n writer.writerows(rows)\n return output", "def csvWrite(self, data, csvFileName):\n\twith open(csvFileName, 'w') as csv_file:\n\t\twriter = csv.writer(csv_file)\n\t\t\tfor key, value in data.items():\n\t\t\t\twriter.writerow([key,value])", "def writerow(self, data):\n self.get_csv_writer().writerow(data)", "def write_csv(settings, row, mode):\n with open(settings.output_file_path, mode=mode) as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(row)", "def csv_writer(data, path):\n\twith open(path, \"wb\") as csv_file:\n\t\twriter= csv.writer(csv_file, delimiter=',')\n\t\twriter.writerows(data)", "def save_to_file_csv(cls, list_objs):\n list_rectangle = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n list_square = [\"id\", \"size\", \"x\", \"y\"]\n filename = cls.__name__ + \".csv\"\n result = []\n\n if list_objs:\n for objs in list_objs:\n # First recollect the info of the object with a dict\n dictionary = objs.to_dictionary()\n middle_result = []\n # Second obtein the values in a ordered class list\n if cls.__name__ == \"Rectangle\":\n for item in list_rectangle:\n middle_result.append(dictionary[item])\n if cls.__name__ == \"Square\":\n for item in list_square:\n middle_result.append(dictionary[item])\n # append the list to result list\n result.append(middle_result)\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n writer = csv.writer(file)\n writer.writerows(result)", "def csv_writer(data, path):\n\n with open(path, \"a\") as csv_file:\n\n writer = csv.writer(csv_file,delimiter=',')\n\n \n\n writer.writerow(data)", "def save_to_file_csv(cls, list_objs):\n l = []\n if list_objs is not None:\n for item in list_objs:\n l.append(item.to_dictionary())\n with open(\"%s.csv\" % cls.__name__, mode='w') as f:\n f.write(Base.to_json_string(l))", "def write_to_csv(self):\n\n dump_list = []\n\n # add rows one by one, each as a list, even if only 1 element\n\n dump_list.append([\"challenge execution ID\",self.ID])\n dump_list.append([\"challenge execution name\",self.name])\n\n dump_list.append([\"challenge definition ID\",self.challenge_def_ID])\n challenge_def_name = get_indexed_item_from_file(self.challenge_def_ID, FILE_CHALLENGE_DEFINITIONS)\n dump_list.append([\"challenge definition name\",challenge_def_name])\n\n if self.start_time != None:\n dump_list.append([\"challenge start time\",self.start_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n if self.stop_time != None:\n dump_list.append([\"challenge stop time\",self.stop_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n\n if self.log.length() > 0 :\n dump_list.append([\"Log:\"])\n for item in self.log.get_timestamped_strings():\n dump_list.append([item])\n\n if self.CLI_responses.length() > 0 :\n dump_list.append([\"CLI responses:\"])\n for item in self.CLI_responses.get_timestamped_strings():\n dump_list.append([item])\n\n if self.API_responses.length() > 0 :\n dump_list.append([\"API responses:\"])\n for item in self.API_responses.get_timestamped_strings():\n dump_list.append([item])\n\n try:\n # output CSV file name: challDefExec + ID + start time + .csv\n file_name = \"challDefExec\" + \"{0:0=3d}\".format(self.challenge_def_ID) + \"-\" + self.start_time.strftime(\"%Y-%m-%d-%H-%M-%S\") + \".csv\"\n with open(file_name, \"w\", newline=\"\") as file:\n csv_file_writer = csv.writer(file)\n csv_file_writer.writerows(dump_list)\n except Exception as e:\n print(type(e), e)\n sys.exit()", "def to_csv(self, file_path: str) -> None:\n with open(file_path, \"w\") as f:\n writer = csv.writer(f)\n writer.writerow([\"parameter\", \"value\"])\n writer.writerow([\"pool_maxsize\", str(self.pool_maxsize)])\n writer.writerow([\"pool_increment_period\", str(self.pool_increment_period)])\n writer.writerow([\"head_to_head_period\", str(self.head_to_head_period)])\n writer.writerow([\"quality_scores\", str(self.quality_scores)])\n writer.writerow([\"quality_score_eta\", str(self.quality_score_eta)])\n writer.writerow([\"pool_prob\", str(self.pool_prob)])\n writer.writerow([\"initial_quality\", str(self.initial_quality)])", "def write_csv(fname, olist):\n ofile = open(fname, \"wb\")\n writer = csv.writer(ofile, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_ALL)\n writer.writerows(olist)", "def save_to_file_csv(cls, list_objs):\n r_fields = ['id', 'width', 'height', 'x', 'y']\n s_fields = ['id', 'size', 'x', 'y']\n filename = cls.__name__ + \".csv\"\n new_list = []\n with open(filename, \"w\") as fp:\n if cls.__name__ == \"Rectangle\":\n dict_writer = csv.DictWriter(fp, fieldnames=r_fields)\n elif cls.__name__ == \"Square\":\n dict_writer = csv.DictWriter(fp, fieldnames=s_fields)\n dict_writer.writeheader()\n for objs in list_objs:\n dict_writer.writerow(objs.to_dictionary())", "def saveCsv(self, filePath = None, rowDeliminator = None, columnDeliminator = None, sheetDeliminator = None, yieldForNone = \"\", skipLineCondition = None):\n\n\t\t\tdef yieldValues(row):\n\t\t\t\tfor cell in row:\n\t\t\t\t\tvalue = cell.value\n\t\t\t\t\tif (value is None):\n\t\t\t\t\t\tyield yieldForNone\n\t\t\t\t\telse:\n\t\t\t\t\t\tyield value\n\n\t\t\tif (skipLineCondition is None):\n\t\t\t\tdef yieldRows(mySheet):\n\t\t\t\t\tfor row in mySheet.thing.rows:\n\t\t\t\t\t\tyield rowDeliminator.join(yieldValues(row))\n\t\t\telse:\n\t\t\t\tdef yieldRows(mySheet):\n\t\t\t\t\tfor row in mySheet.thing.rows:\n\t\t\t\t\t\tif (not skipLineCondition(row)):\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tyield rowDeliminator.join(yieldValues(row))\n\n\t\t\tdef yieldSheet():\n\t\t\t\tnonlocal self\n\n\t\t\t\tfor mySheet in self:\n\t\t\t\t\tyield columnDeliminator.join(yieldRows(mySheet))\n\n\t\t\t############################\n\n\t\t\trowDeliminator = self.ensure_default(rowDeliminator, \",\")\n\t\t\tsheetDeliminator = self.ensure_default(sheetDeliminator, \"\\n\")\n\t\t\tcolumnDeliminator = self.ensure_default(columnDeliminator, \"\\n\")\n\n\t\t\t_filePath = self.ensure_filePath(self.ensure_default(filePath, default = self.filePath), ending = (\".csv\"), checkExists = False)\n\t\t\twith open(_filePath, \"w+\") as fileHandle:\n\t\t\t\tfileHandle.write(sheetDeliminator.join(yieldSheet()))", "def write_data_csv(file,data,im_id,lock,num_validators=1):\n lock.acquire()\n with open(file, mode = 'a') as f:\n for row in data:\n f.write((str(im_id) + \",\" + str(num_validators)))\n for val in row:\n f.write(\",\")\n f.write(str(val))\n f.write(\"\\n\") \n lock.release()", "def write_to_csv(self, data_points):\n keys = data_points[0].keys()\n with open(self.report_path, 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(data_points)", "def save_to_file_csv(cls, list_objs):\n list_dictionaries = []\n if list_objs is None or list_objs == []:\n string_dictionary = \"[]\"\n else:\n for _obj_dict in list_objs:\n list_dictionaries.append(_obj_dict.to_dictionary())\n string_dictionary = Base.to_json_string(list_dictionaries)\n with open(cls.__name__ + \".csv\", \"w\") as _file:\n _file.write(string_dictionary)\n _file.close()", "def csv_writer(data, path):\r\n with open(path, \"w\") as csv_file:\r\n writer = csv.writer(csv_file, delimiter=',')\r\n for line in data:\r\n writer.writerow(line)", "def save(self, close=True):\n rows = []\n # find out how many rows we're going to need to write\n max_rows = 0\n for _, cont in self.data:\n if len(cont) > max_rows:\n max_rows = len(cont)\n max_rows += 1 # add the header row\n\n for i in range(0, max_rows):\n row = []\n for (col_name, col_contents) in self.data:\n col_data = [col_name] + col_contents\n if len(col_data) > i:\n row.append(col_data[i])\n else:\n row.append(\"\")\n rows.insert(i, row)\n\n # Remove current contents of file\n self.file_object.seek(0)\n self.file_object.truncate()\n\n # Write new CSV data\n writer = UnicodeWriter(self.file_object, encoding=self.output_encoding)\n writer.writerows(rows)\n\n if close:\n self.file_object.close()", "def write_csv(reviewer_data, file_obj):\n writer = csv.writer(file_obj)\n writer.writerow(\n ('Reviewer', 'Reviews', '-2', '-1', '+1', '+2', '+A', '+/- %',\n 'Disagreements', 'Disagreement%'))\n for (name, r_data, d_data) in reviewer_data:\n row = (name,) + r_data + d_data\n writer.writerow(row)", "def csv_writer(data, path):\n with open(path, \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n for line in data:\n writer.writerow(line)", "def write (self, path):\n\t\ts=[]; add=s.append\n\t\tadd ('\\t'.join (self.schema))\n\t\tfor record in self.data:\n\t\t\tadd (record.asTabDelimitedRecord())\n\t\t\n\t\t# f = open (path, 'w')\n\t\tf = codecs.open(path, 'w', 'utf-8')\n\t\tf.write (self.linesep.join (s))\n\t\tf.close()\n\t\tprint (\"data written to \" + path)", "def csv_writer(data, path):\n with open(path, \"w\", newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n for line in data:\n writer.writerow(line)", "def csv_writer(file_path, data):\n with open(file_path, \"a+\") as f:\n #writer = csv.writer(f, delimiter=',')\n writer = csv.writer(f, lineterminator='\\n')\n writer.writerows(data)\n f.close()", "def save_entries(self):\n with open(self.file_name, \"w\") as file:\n file.write('date,name,minutes,note\\n')\n for entry in self.entries:\n writer = csv.writer(file)\n writer.writerow([entry.date, entry.name, entry.minutes, entry.note])", "def save_data_to_file(file_name, list_of_product_objects):\r\n objfile = open(file_name, 'w')\r\n for row in list_of_product_objects:\r\n objfile.write(row.product_name + \",\" + str(row.product_price) + \"\\n\")\r\n objfile.close()", "def write_csv(data, arg2, arg3):\n\n with open(\"data.csv\", newline='') as csvfile:\n # csv writer object\n writer = csv.writer(csvfile)\n\n # to write to file: writer.writerow([thing1, thing2, ...])", "def write_csv(self, outfile, collapse_orders=False, show_age=False):\r\n # Write header row\r\n outfile.write(self.get_csv_header(collapse_orders, show_age).encode())\r\n\r\n # Write content\r\n for x in self.records:\r\n x.write_csv(outfile, collapse_orders, show_age)", "def save_data_to_file(file_name, list_of_product_objects):\r\n try:\r\n objF = open(file_name, \"w\")\r\n for row in list_of_product_objects:\r\n objF.write(str(row[0]) + \",\" + str(row[1]) + \"\\n\")\r\n objF.close()\r\n except IOError:\r\n print(\"Unable to locate file\")", "def _export_csv(x, y, export_to):\r\n\r\n with open(export_to, 'w', newline='') as e:\r\n writer = csv.writer(e, delimiter=',')\r\n for i in range (0, len(x)):\r\n writer.writerow([x[i], y[i]])", "def csvWriter(asin, price, name):\n # NOT USED\n date = arrow.now().format('YYYY/MM/DD')\n headers = ['Date', 'ASIN', 'Price', 'Name']\n with open('CSVs/' + asin + '.csv', 'w') as newWrite:\n writer = csv.writer(newWrite)", "def write_csv(self, file: str, table: str, libref: str =\"\", nosub: bool =False, dsopts: dict = None, opts: dict = None) -> 'The LOG showing the results of the step':\n dsopts = dsopts if dsopts is not None else {}\n opts = opts if opts is not None else {}\n\n code = \"filename x \\\"\"+file+\"\\\";\\n\"\n code += \"options nosource;\\n\"\n code += \"proc export data=\"\n\n if len(libref):\n code += libref+\".\"\n\n code += \"'\"+table.strip()+\"'n \"+self._sb._dsopts(dsopts)+\" outfile=x dbms=csv replace; \"\n code += self._sb._expopts(opts)+\" run\\n;\"\n code += \"options source;\\n\"\n\n if nosub:\n print(code)\n else:\n ll = self.submit(code, \"text\")\n return ll['LOG']", "def toCsv(self, csv_path):\n ser = pd.Series(self)\n ser.to_csv(csv_path)", "def to_csv(self, file_path: str) -> None:\n with open(file_path, \"w\") as f:\n writer = csv.writer(f)\n writer.writerow([\"parameter\", \"value\"])\n writer.writerow([\"render\", str(self.render)])\n writer.writerow([\"sleep\", str(self.sleep)])\n writer.writerow([\"log_frequency\", str(self.log_frequency)])\n writer.writerow([\"video\", str(self.video)])\n writer.writerow([\"video_fps\", str(self.video_fps)])\n writer.writerow([\"video_dir\", str(self.video_dir)])\n writer.writerow([\"num_episodes\", str(self.num_episodes)])\n writer.writerow([\"gifs\", str(self.gifs)])\n writer.writerow([\"gifdir\", str(self.gif_dir)])\n writer.writerow([\"video_frequency\", str(self.video_frequency)])", "def save_csv(net, wires, net_id, chip_id, chip):\n with open('output/output.csv', 'w') as file:\n # Write first line\n output = csv.writer(file)\n output.writerow([\"net\", \"wires\"])\n\n # Index and fill the body\n for step in range(len(wires)):\n output.writerow([net[step],wires[step]])\n\n # End of file\n output.writerow([f\"chip_{chip_id}_net_{net_id}\", chip.cost])", "def write_csv(self, stock_list):\n\n with open(self.outfile, 'w') as outfile:\n writer = csv.writer(outfile, delimiter=',',\n quoting=csv.QUOTE_MINIMAL)\n for symbol, values in stock_list.items():\n # Need to find a better way to handle this...\n writer.writerow([values['symbol'], values['name']])", "def csv_writer(data, path):\n with open(path, \"wb\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n for line in data:\n writer.writerow(line)", "def write_csv(self, filename, cutoff=2):\n f = csv.writer(open(filename, 'wb'))\n for row in self.rows(cutoff=cutoff):\n f.writerow(row)", "def write_csv(filename, **values):\n writeheader = not os.path.isfile(filename)\n fieldnames = sorted(values.keys())\n\n with open(filename, 'a') as f:\n writer = csv.DictWriter(f, fieldnames, dialect='excel-tab')\n if writeheader:\n writer.writeheader()\n writer.writerow(values)", "def write(self, args):\n\t\tnewcsvfile = self.filename[:len(self.filename)-4] + \"NEW.csv\" #clever naming MIGHT NEED TO CHANGE THIS LATER/OVERWRITE OLD FILE?\n\t\twith open(newcsvfile, 'wb') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerows(self.all_likes)", "def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')", "def ToCsv(self):\n\n def csv_helper(the_dict, the_field):\n if the_field not in the_dict:\n return \"\"\n value = the_dict[the_field]\n if value is None:\n return \"\"\n if isinstance(value, set):\n value = \"{}\".format(value)\n #yes, I want to fallback to the previous case\n\n\n if isinstance(value, str):\n value = value.replace(\"\\\"\",\"\\\"\\\"\")\n value = value.replace(\"\\r\",\"\")\n #value = value.replace(\"\\n\",\"\\\\n\")\n return \"\\\"{}\\\"\".format(value)\n return value\n\n output = \"\"\n first = True\n for one_field in self.CSV_FIELDS:\n if first:\n first = False\n template = \"{}{}\"\n else:\n template = \"{},{}\"\n output = template.format(output, csv_helper(self.__dict__, one_field))\n return output", "def write(self, data, filename=None):\n if not filename:\n filename = self.output_csv\n\n with open(filename, \"w\") as _file:\n writer = csv.writer(_file)\n\n writer.writerow(list(_ for _ in self.header()))\n writer.writerows(data)", "def save_values(self):\n f_name = self.img_path.split('.')[0] + '_{}_'.\\\n format(self.data_type_name) + '.csv'\n dir_name = os.path.join(self.base_dir, f_name)\n if not os.path.exists(dir_name):\n for data_list in self.converted_values():\n with open(f_name, 'a') as f:\n wr = csv.writer(f, delimiter=';')\n wr.writerow(data_list)\n else:\n os.remove(f_name)\n for data_list in self.converted_values():\n with open(f_name, 'a') as f:\n wr = csv.writer(f, delimiter=';')\n wr.writerow(data_list)", "def write_to_file(self):\n name = datetime.today().date()\n with open(f'{name}.csv', 'w', newline='') as file_create:\n fieldnames = ['date', 'value_in_pln']\n writer = csv.DictWriter(file_create, fieldnames=fieldnames)\n writer.writeheader()\n while datetime.today() < self.track_to:\n value_of_currency = PriceTracker.track_price()\n with open(f'{file_create.name}', 'a', newline='') as file_append:\n fieldnames = ['date', 'value_in_pln']\n writer = csv.DictWriter(file_append, fieldnames=fieldnames)\n writer.writerow({'date': datetime.today().strftime(\"%H:%M:%S\"), 'value_in_pln': value_of_currency})\n\n self.check_min_value(tracked_price=value_of_currency)\n sleep(1)\n\n return self.generate_report(file_create.name)", "def object_export_save(simulation, object_name, dir):\n query = get_query(object_name, simulation)\n # To avoid conflict if two users export a file at the same time, we\n # generate a random name for the export file.\n filename = dir + '/' + object_name + 's.tsv'\n\n with codecs.open(filename, 'w', encoding='utf8') as f:\n if object_name == 'centroid':\n filename = dir + '/zones.tsv'\n fields = ['id', 'name', 'x', 'y', 'db_id']\n elif object_name == 'crossing':\n filename = dir + '/Intersections.tsv'\n fields = ['id', 'name', 'x', 'y', 'db_id']\n elif object_name == 'link':\n filename = dir + '/links.tsv'\n fields = ['id', 'name', 'origin', 'destination', 'lanes', 'length',\n 'speed', 'capacity', 'vdf']\n elif object_name == 'function':\n filename = dir + '/functions.tsv'\n fields = ['id', 'expression']\n writer = csv.writer(f, delimiter='\\t')\n if object_name in ('centroid', 'crossing'):\n writer.writerow(['id', 'name', 'x', 'y', 'db_id'])\n values = query.values_list('user_id', 'name', 'x', 'y', 'id')\n elif object_name == 'function':\n writer.writerow(['id', 'name', 'expression'])\n values = query.values_list('user_id', 'name', 'expression')\n elif object_name == 'link':\n writer.writerow(['id', 'name', 'lanes', 'length', 'speed',\n 'capacity', 'function', 'origin', 'destination'])\n values = query.values_list('user_id', 'name', 'lanes', 'length',\n 'speed', 'capacity', 'vdf__user_id')\n # Origin and destination id must be converted to user_id.\n centroids = get_query('centroid', simulation)\n crossings = get_query('crossing', simulation)\n ids = list(centroids.values_list('id', 'user_id'))\n ids += list(crossings.values_list('id', 'user_id'))\n # Map id of nodes to their user_id.\n id_mapping = dict(ids)\n origins = query.values_list('origin', flat=True)\n origins = np.array([id_mapping[n] for n in origins])\n destinations = query.values_list('destination', flat=True)\n destinations = np.array([id_mapping[n] for n in destinations])\n # Add origin and destination user ids to the values array.\n origins = np.transpose([origins])\n destinations = np.transpose([destinations])\n if values:\n values = np.hstack([values, origins, destinations])\n writer.writerows(values)\n\n return filename", "def csvWriter(data, out_file):\n print '[+] Writing CSV output.'\n logging.info('Writing CSV to ' + out_file + '.')\n headers = ['ID', 'Name', 'Path', 'Session ID', 'Count', 'Last Used Date (UTC)', 'Focus Time (ms)', 'Focus Count']\n\n with open(out_file, 'wb') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=headers, extrasaction='ignore')\n # Writes the header from list supplied to fieldnames keyword argument\n writer.writeheader()\n\n for i, dictionary in enumerate(data):\n # Insert the 'ID' value to each dictionary in the list. Add 1 to start ID at 1 instead of 0.\n dictionary['ID'] = i + 1\n # Convert the FILETIME object in the fourth index to human readable value\n dictionary['Last Used Date (UTC)'] = fileTime(dictionary['Last Used Date (UTC)'])\n writer.writerow(dictionary)\n\n csvfile.flush()\n csvfile.close()\n msg = 'Completed writing CSV file. Program exiting successfully.'\n print '[*]', msg\n logging.info(msg)", "def delimited_write(self, obj):\n try:\n self.obj.write(\",\" + json.dumps(obj))\n except:\n self.bad_obj(obj)", "def write_csv_file (metadata_list, csv_file, append) :\n try :\n with open (csv_file, 'a' if append else 'w' , newline='') as file :\n writer = csv.DictWriter(file, fieldnames=MetadataEntity.get_fieldnames())\n if not append: writer.writeheader()\n for e in metadata_list :\n writer.writerow(e.get_values())\n file.close()\n except :\n print ('ERROR: writing csv file: ' + csv_file)\n return False\n return True", "def write_as_csv(self,destination=sys.stdout):\n # write sorted\n the_destination=None\n if isinstance(destination,types.FileType):\n the_destination=destination\n elif isinstance(destination,types.StringTypes):\n the_destination=file(destination,\"w\")\n else:\n raise Exception(\"sorry destination %s is not valid\"%(repr(destination)))\n\n the_destination.write(\"# quantity:\"+str(self.quantity_name))\n the_destination.write(\"# x y ysigma n\\n\")\n for x in self.get_xdata():\n y=UserDict.UserDict.__getitem__(self,x)\n if type(y) is types.FloatType:\n the_destination.write(\"%g %g 0 1\\n\"%(x,y)) \n else:\n the_destination.write(\"%g %g %g %d\\n\"%(x,y.mean(),y.mean_sigma(),y.n))\n\n the_destination=None", "def write_to_file(self, file):\n f = open(file, \"w+\")\n for row in self.value:\n line = \" \".join(str(el) for el in row) + \"\\n\"\n f.write(line)\n\n f.close()", "def save(self, path, separator=\",\", encoder=lambda j,v: v):\n # csv.writer will handle str, int, float and bool:\n s = StringIO()\n w = csv.writer(s, delimiter=separator)\n w.writerows([[encode_utf8(encoder(j,v)) for j,v in enumerate(row)] for row in self])\n f = open(path, \"wb\")\n f.write(BOM_UTF8)\n f.write(s.getvalue())\n f.close()", "def save_class_list():\r\n try:\r\n classStringList.clear() #clear the classString List\r\n for i in range(0,len(classes)):\r\n classStringList.append(classes[i].csvRow()) #enter classes to the classStringList from the classes\r\n f = open(\"mySchedule.csv\", 'w', newline ='')\r\n csv.writer(f).writerow([\"Day\", \"Class\", \"Start Time\", \"End Time\"])\r\n for classCSVString in classStringList:\r\n csv.writer(f).writerow(classCSVString)\r\n f.close()\r\n except Exception as e:\r\n print(\"Exception found:\" + e)", "def write_csv(elongation, file_name):\n e = elongation\n\n with open(file_name, 'w') as f:\n f.write(f\"\"\"\\\nBreak Load, {e.break_load()}\nBreak Strength, {e.break_strength()}\nBreak Elongation, {e.break_elongation()}\nYield Load, {e.yield_load()}\nYield Strength, {e.yield_strength()}\nYield Elongation, {e.yield_elongation()}\nGauge Length, {e.gauge_length}\nSample Width, {e.sample_width}\nSample Thickness, {e.sample_thickness}\n\nPoints\n %, N\"\"\")\n for x, y in zip(e.xs, e.ys):\n f.write(f'\\n{x:>8.4f}, {y:>8.4f}')", "def CSVWriter (iterable, outLoc, header=\"\", ):\n if not iterable:\n print (\"nothing to write\")\n return 0\n\n out = open(outLoc, 'w')\n\n if header:\n out.write(header+'\\n')\n\n #Only works if iterable is a nested list\n for member in iterable:\n for item in member:\n out.write(str(item)+',')\n out.write('\\n')\n\n print(\"write to \"+outLoc+\" successful.\")\n return 1", "def output_to(self, writer):\n\n record = [\n self.address, # Property address\n self.license_type, # License type\n self.street_num, # House\n self.street, # Street\n self.license_number, # License / Folio\n self.address, # Civic address\n self.business_trade_name, # Business name 2\n self.business_name, # Business name 1\n self.mail_address_1, # Mailing address 1\n self.other_mail_address(), # Mailing address 2\n '', # Total Assess\n '', # Included Assess\n '', # Annual Charge\n self.unit # Unit\n ]\n\n writer.writerow(record)", "def writeToFile(self):\n self.dto.writeToCsv()\n print(\"File written.\")", "def writetofile(self,direction,value):\r\n output = str(\"{},{} \\n\".format(direction,value))\r\n self.new_file.write(output)", "def save(self, data, outpath):\n data.to_csv(outpath)", "def save_csv(outputfile):\n with open(outputfile, 'w', newline='') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(DATA_KEYS)\n\n # Add data to csv-file\n for data in data_list:\n writer.writerow(data)", "def to_csv(header, rows):\r\n with open('result.csv', 'w') as result:\r\n result_writer = csv.writer(result, delimiter=';')\r\n result_writer.writerow(header)\r\n result_writer.writerows(rows)", "def save_csv(data): \n bank_data = data\n\n #Creating headers for the csv file\n header = [\"Lender\", \"Max Loan Amount\", \"Max LTV\", \"Max DTI\", \"Max Credit Score\", \"Interest Rate\"]\n\n #Creating output path of the CSV file\n csvpath = Path(\"save_file.csv\")\n\n #Opening the csv file in csvpath by using the open() method\n with open(csvpath, \"w\", newline='') as csvfile:\n\n csvwriter = csv.writer(csvfile, delimiter = \",\")\n csvwriter.writerow(header)\n for row in bank_data:\n csvwriter.writerow(row)\n\n return data", "def writeCSV(self, outfilename, delimiter=';'):\n\n input = open(self.filename)\n dialect = csv.Sniffer().sniff(input.read(1024))\n input.seek(0)\n reader = csv.reader(input, dialect)\n out = open(outfilename, \"w\")\n writer = csv.writer(out, delimiter=delimiter, quotechar='\"', quoting=csv.QUOTE_ALL)\n\n for row in reader:\n writer.writerow(row)\n\n input.close()\n out.close()", "def setup_csv(self) -> None:\n csvData = ['Followers', 'Time']\n\n # Create our CSV file header\n with open(self.graphfile, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(csvData)\n csvFile.close()", "def _write_csv(self, file_name, metadata, dates, data, disclaimer,\n float_fmt):\n\n version = '# file_format: pysonde csv format version 1.0\\n'\n header = [version]\n #prepend parameter list and units with single #\n param_header = '# datetime, '\n unit_header = '# yyyy/mm/dd HH:MM:SS, '\n dtype_fmts = ['|S19']\n fmt = '%s, '\n for param in np.sort(data.keys()):\n param_header += param + ', '\n try:\n unit_header += data[param].dimensionality.keys()[0].symbol + \\\n ', '\n except:\n unit_header += 'nd, '\n fill_value = float(metadata['fill_value']) * data[param].units\n data[param][np.isnan(data[param])] = fill_value\n dtype_fmts.append('f8')\n fmt += float_fmt + ', '\n\n #prepend disclaimer and metadata with ##\n for line in disclaimer.splitlines():\n header.append('# disclaimer: ' + line + '\\n')\n\n #for key,val in metadata.items():\n # if not isinstance(val, np.ndarray):\n # header.append('# ' + str(key) + ': ' + str(val) + '\\n')\n # else:\n # param_header += key + ', '\n # unit_header += 'n/a, '\n # dtype_fmts.append(val.dtype)\n # fmt += '%s, '\n for key in np.sort(metadata.keys()):\n if not isinstance(metadata[key], np.ndarray):\n header.append('# %s: %s\\n' % (str(key), str(metadata[key])))\n\n else:\n param_header += key + ', '\n unit_header += 'n/a, '\n dtype_fmts.append(metadata[key].dtype)\n fmt += '%s, '\n\n #remove trailing commas\n param_header = param_header[:-2] + '\\n'\n unit_header = unit_header[:-2] + '\\n'\n fmt = fmt[:-2]\n\n header.append('# timezone: ' + str(self.default_tzinfo) + '\\n')\n header.append(param_header)\n header.append(unit_header)\n\n dtype = np.dtype({\n 'names': param_header.replace(' ', '').strip('#\\n').split(','),\n 'formats': dtype_fmts})\n\n write_data = np.zeros(dates.size, dtype=dtype)\n write_data['datetime'] = np.array(\n [datetime.datetime.strftime(dt, '%Y/%m/%d %H:%M:%S')\n for dt in dates])\n\n for key, val in metadata.items():\n if isinstance(val, np.ndarray):\n write_data[key] = val\n\n for param in data.keys():\n write_data[param] = data[param]\n\n #start writing file\n fid = open(file_name, 'w')\n fid.writelines(header)\n np.savetxt(fid, write_data, fmt=fmt)\n fid.close()", "def save_data_csv(self, filename):\n #add masked entry as last column\n fields = numpy.r_[self.colLabels, ['masked']]\n\n #add dynamic expression to column headers\n for k, col in enumerate(self.dynamic_cols):\n fields[col] += \" [%s]\"%self.dynamic_expressions[k] if self.dynamic_expressions[k] else ''\n\n #add custom labels to field names \n for col, fieldname in enumerate(fields):\n custom_label = self.column_labels_custom.get(col)\n fields[col] += \" (%s)\"%custom_label if custom_label else ''\n\n fields[col] += \" {*}\" if (col in self.colsel and (fieldname.find('user')==0 or col in self.dynamic_cols)) else ''\n \n #add options\n \n \n #don't save last two lines\n data = numpy.c_[self.data[:-2], self.rowmask[:-2]]\n\n with open(filename, 'wb') as f:\n import csv\n writer = csv.writer(f)\n writer.writerow(fields)\n #writer.writerows(data)\n for row in data:\n r = [entry.encode('latin_1') if type(entry) is types.UnicodeType else entry for entry in row]\n writer.writerow(r)\n self.modified = False", "def write_csv(fhandle, outages, fields):\n writer = csv.DictWriter(fhandle, fields)\n writer.writeheader()\n writer.writerows([o.for_json() for o in outages])", "def save_to_csv(today, task, description, hours, start_time, end_time):\n fee = '$5'\n with open('timeTracker.csv', 'a', newline='') as file:\n fieldnames = ['Date', 'Task Name', 'Description', 'Start Time',\n 'End Time', 'Number of hours', 'Price per hour', 'Fee Charged']\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerow({'Date': today, 'Task Name': task, 'Description': description, 'Start Time': start_time, 'End Time': end_time,\n 'Number of hours': hours, 'Price per hour': fee, 'Fee Charged': price})" ]
[ "0.7310468", "0.7034704", "0.6893769", "0.6833797", "0.68261456", "0.6813147", "0.67906994", "0.6789112", "0.6752092", "0.67305475", "0.6667265", "0.6658432", "0.66504896", "0.66496783", "0.6628684", "0.66130286", "0.66057646", "0.65871006", "0.6575944", "0.65313613", "0.65304416", "0.6529059", "0.6497789", "0.64924675", "0.6475277", "0.6423388", "0.64187706", "0.6401992", "0.6374082", "0.63578624", "0.6349491", "0.6330308", "0.6325193", "0.63232595", "0.63199544", "0.62987363", "0.6297186", "0.62824434", "0.6277843", "0.62682986", "0.62638825", "0.6247451", "0.62403464", "0.6234767", "0.62289894", "0.62251747", "0.6221804", "0.6220339", "0.6219771", "0.6203463", "0.6179239", "0.6168123", "0.6162681", "0.61606425", "0.6154427", "0.61504066", "0.6150083", "0.6148979", "0.614292", "0.6141095", "0.61407846", "0.61265004", "0.612606", "0.61241186", "0.61135674", "0.61124855", "0.6110541", "0.6105038", "0.60929567", "0.60800964", "0.60789514", "0.6076052", "0.6074818", "0.60705", "0.6068526", "0.6057733", "0.6042883", "0.6041102", "0.6036466", "0.6030556", "0.60272384", "0.6020161", "0.60100585", "0.6009024", "0.60065955", "0.60045105", "0.6000335", "0.5997793", "0.59897614", "0.59839255", "0.59687835", "0.5964115", "0.5960916", "0.596024", "0.5941325", "0.59353703", "0.5934185", "0.59264576", "0.59199226", "0.5911078", "0.5910789" ]
0.0
-1
Find the stack frame of the caller so that we can note the source file name, line number and function name.
def file_descriptor(self): f = frame() # On some versions of IronPython, currentframe() returns None if # IronPython isn't run with -X:Frames. if f is not None: f = f.f_back rv = "(unknown file)", 0, "(unknown function)" while hasattr(f, "f_code"): co = f.f_code filename = os.path.normcase(co.co_filename).capitalize() if filename == _srcfile: f = f.f_back continue rv = (co.co_filename, f.f_lineno, co.co_name) break self.line = "{0}:{2}[{1}]".format(*rv)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _findCaller(stack_info=False):\n f = logging.currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\", None\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == logging._srcfile:\n f = f.f_back\n continue\n sinfo = None\n if stack_info:\n sio = io.StringIO()\n sio.write('Stack (most recent call last):\\n')\n traceback.print_stack(f, file=sio)\n sinfo = sio.getvalue()\n if sinfo[-1] == '\\n':\n sinfo = sinfo[:-1]\n sio.close()\n rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)\n break\n return rv", "def findCaller(cls):\n f = currentframe()\n # On some versions of IronPython, currentframe() returns None if\n # IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == _srcfile:\n f = f.f_back\n continue\n rv = (co.co_filename, f.f_lineno, co.co_name)\n break\n return rv", "def findCallerPatch():\n f = currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == _srcfile:\n f = f.f_back\n continue\n rv = (filename, f.f_lineno, co.co_name)\n break\n return rv", "def findCaller(self, stack_info=False, stacklevel=2):\n f = currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n orig_f = f\n while f and stacklevel > 1:\n f = f.f_back\n stacklevel -= 1\n if not f:\n f = orig_f\n rv = \"(unknown file)\", 0, \"(unknown function)\", None\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == _srcfile:\n f = f.f_back\n continue\n sinfo = None\n if stack_info:\n sio = io.StringIO()\n sio.write('Stack (most recent call last):\\n')\n traceback.print_stack(f, file=sio)\n sinfo = sio.getvalue()\n if sinfo[-1] == '\\n':\n sinfo = sinfo[:-1]\n sio.close()\n rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)\n break\n return rv", "def findCaller(self, stack_info=False):\n \n _frame_object = logging.currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X: Frames.\n if (_frame_object is not None):\n _frame_object = _frame_object.f_back\n \n rv = (\"(unknown file)\", 0, \"(unknown function)\", None)\n while hasattr(_frame_object, 'f_code'):\n _code_object = _frame_object.f_code\n filename = os.path.normcase(_code_object.co_filename)\n \n _next = _frame_object.f_back\n # noinspection PyProtectedMember,PyUnresolvedReferences\n if (filename == logging._srcfile):\n _frame_object = _next\n continue\n \n if (_next and hasattr(_next, 'f_code')):\n _parent_code = _next.f_code\n if (_parent_code.co_name == LOGGING_WRAPPER_NAME):\n _frame_object = _next.f_back\n continue\n \n _stack_info = None\n if (stack_info):\n _str_io = StringIO()\n _str_io.write('Stack (most recent call last):\\n')\n traceback.print_stack(_frame_object, file=_str_io)\n _stack_info = _str_io.getvalue()\n if (_stack_info[-1] == '\\n'):\n _stack_info = _stack_info[:-1]\n _str_io.close()\n \n rv = (_code_object.co_filename, _frame_object.f_lineno, _code_object.co_name, _stack_info)\n break\n return rv", "def get_caller_context(depth=None, **kwarg):\r\n if TIK_ERROR_MSG.api_source_info is not None:\r\n return TIK_ERROR_MSG.api_source_info\r\n if depth is None:\r\n raise RuntimeError(\"There are two reasons for the error:\\n\"\r\n \"If it is called by the user, please register source\"\r\n \" info before entering decorators;\\n\"\r\n \"If it is an internal call, please specify \"\r\n \"the stack depth;\")\r\n additional_stack = kwarg.get('stack_depth', 0)\r\n depth += additional_stack\r\n if ERROR_MSG_LEVEL.err_msg_level == 0:\r\n caller = stack(depth)\r\n else:\r\n caller = current_frame(depth)\r\n return caller", "def caller_info(self):\n\n frames = traceback.extract_stack()\n frames.reverse()\n try:\n (_, mod_name) = __name__.rsplit('.', 1)\n except ValueError:\n mod_name = __name__\n for (fpath, lnum, _, _) in frames:\n (fname, _) = os.path.basename(fpath).rsplit('.', 1)\n if fname != mod_name:\n break\n\n return (fname, lnum)", "def getframeinfo(frame, context=1):\r\n if istraceback(frame):\r\n lineno = frame.tb_lineno\r\n frame = frame.tb_frame\r\n else:\r\n lineno = frame.f_lineno\r\n if not isframe(frame):\r\n raise TypeError('{!r} is not a frame or traceback object'.format(frame))\r\n\r\n filename = getsourcefile(frame) or getfile(frame)\r\n if context > 0:\r\n start = lineno - 1 - context//2\r\n try:\r\n lines, lnum = findsource(frame)\r\n except IOError:\r\n lines = index = None\r\n else:\r\n start = max(start, 1)\r\n start = max(0, min(start, len(lines) - context))\r\n lines = lines[start:start+context]\r\n index = lineno - 1 - start\r\n else:\r\n lines = index = None\r\n\r\n return Traceback(filename, lineno, frame.f_code.co_name, lines, index)", "def get_caller_frame() -> FrameType:\n return cast(FrameType, cast(FrameType, inspect.currentframe()).f_back)", "def calling_stack_info(print_res=True, code_context=1):\n\n start_frame = inspect.currentframe().f_back\n\n fil = generate_frame_list_info(start_frame, code_context=code_context)\n\n if print_res:\n # noinspection PyUnresolvedReferences\n print(fil.tb_txt)\n return fil", "def find_actual_caller(self):\n\n # Gleaned from code in the logging module itself...\n try:\n f = sys._getframe(1)\n ##f = inspect.currentframe(1)\n except Exception:\n f = None\n # On some versions of IronPython, currentframe() returns None if\n # IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown module)\", \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n mod = inspect.getmodule(f)\n\n if mod is None:\n modname = '__main__'\n else:\n modname = mod.__name__\n\n if modname == __name__:\n # Crawl back until the first frame outside of this module\n f = f.f_back\n continue\n\n rv = (modname, filename, f.f_lineno, co.co_name)\n break\n return rv", "def _find_the_caller(i=0):\n import inspect\n\n # the first 2 elements in the stack are the current line and the line\n # of caller of `_find_the_caller`\n i = i + 2\n caller = inspect.stack()[i]\n return caller[1], caller[2], caller[4][0].rstrip(\"\\n\").strip()", "def _get_caller_detail(n=2):\n if not _show_caller_details:\n return None\n s = inspect.stack()[:n + 1]\n try:\n frame = s[n]\n try:\n return frame[1]\n # WARNING(dhellmann): Using frame.lineno to include the\n # line number in the return value causes some sort of\n # memory or stack corruption that manifests in values not\n # being cleaned up in the cfgfilter tests.\n # return '%s:%s' % (frame[1], frame[2])\n finally:\n del frame\n finally:\n del s", "def _print_caller(self):\n import traceback\n print '\\n'.join(['%s:%d %s'%(f,l,c) for f,l,m,c in traceback.extract_stack()])", "def get_cur_info():\n try:\n raise Exception\n except:\n f = sys.exc_info()[2].tb_frame.f_back\n # return (f.f_code.co_name, f.f_lineno)\n return f.f_code.co_name", "def callersName():\r\n import sys\r\n return sys._getframe(2).f_code.co_name", "def GetCallerName(num_frame=1):\n frame = sys._getframe(num_frame + 1) # pylint: disable=protected-access\n return inspect.getframeinfo(frame, 1)[2]", "def getStackPosition(self):\r\n return self.callstack.getStack()", "def caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0] \n \n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)", "def _sourceFrame(self):\n try:\n raise Exception('catch me') # forced exception to get stack traceback\n except:\n exc_traceback = sys.exc_info()[2]\n return exc_traceback.tb_frame.f_back.f_back.f_back.f_back\n #endTry", "def get_caller(delta=0):\n if delta < 0:\n raise RuntimeError(\"Delta must be positive!\")\n for i, frame in enumerate(inspect.stack()):\n if i == 2 + delta:\n return os.path.abspath(frame.filename)", "def __get_caller_name(caller_frame):\n\n caller_name = caller_frame.f_code.co_name\n if 'self' in caller_frame.f_locals:\n caller_name = \"%s.%s\" % (\n caller_frame.f_locals['self'].__class__.__name__, caller_name\n )\n module = inspect.getmodule(caller_frame)\n if module:\n caller_name = \"%s.%s\" % (module.__name__, caller_name)\n return caller_name", "def findCaller(self):\n frames = inspect.stack()\n thisfile = os.path.normcase(frames[0][1])\n for frame in frames:\n filename = os.path.normcase(frame[1])\n if filename != thisfile and filename != logging._srcfile:\n major, minor, micro, _, _ = sys.version_info\n if (major, minor, micro) >= (2, 4, 2):\n return filename, frame[2], frame[3]\n else:\n return filename, frame[2]", "def debug_caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0]\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)", "def caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0]\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)", "def who_is_calling():\n return sys._getframe(2).f_code.co_name", "def getCallerParams(self,frameLevel=1):\n # frameLevel=0 is always getCallerParams. Caller should be level 1, but sometimes level 1 is still in Debug. This causes many dirty hacks.\n levelsToAdd=frameLevel-1\n #debugDir=dir(self)\n #debugDir.remove('__init__') # without removing __init__ was debug unusable in any __init__. Following line is temporary unslashed only\n debugDir=['allowed', 'allowedLevels', 'caller', 'callerLocals', 'callerName', 'dprint', 'getCallerName', 'getCallerParams', 'printHeader', 'restricted', 'settings']\n while sys._getframe(frameLevel).f_code.co_name in debugDir: # restrict returning functions from Debug instance -- dirty hack\n # but causes trouble for init which is in every class. property debugDir hacks this issue.\n if frameLevel>1: print '%i: %s'%(frameLevel,sys._getframe(frameLevel).f_code.co_name)\n frameLevel+=1\n frameLevel+=levelsToAdd # another hack to get another frame\n self.caller=sys._getframe(frameLevel)\n self.callerLocals=self.caller.f_locals\n try:\n if self.callerLocals.has_key('self'):\n #debug.dprint(print str(self.callerLocals['self'].__class__).split(' ')[1],4)\n self.callerName=(\n str(self.callerLocals['self']).split(' ')[0].replace('<__main__.','')+\n '.'+self.caller.f_code.co_name)\n # 026 #if self.callerLocals.has_key('self'): del self.callerLocals['self'] # 025 Fix - caused errors in multithreadng.\n else: self.callerName=self.caller.f_code.co_name\n except KeyError, errorInstance:\n #026 #self.headerLogger.error(\"Caught KeyError. Error: %s; Arguments: %s\"%(errorInstance,str(errorInstance.args)))\n self.headerLogger.exception(\"Caught KeyError. Error: %s; Arguments: %s\"%(errorInstance,str(errorInstance.args)))\n self.headerLogger.debug(\"callerLocals is %s\"%(str(self.callerLocals)))\n return (self.callerName,self.callerLocals)", "def print_callsite_location():\n fi = inspect.getouterframes( inspect.currentframe() )[2]\n print(\"{path}:{line} {fname}\".format(\n line=fi.lineno, path=fi.filename, fname=fi.function))", "def caller_name(self, skip=6):\r\n stack = inspect.stack()\r\n start = 0 + skip\r\n if len(stack) < start + 1:\r\n return ''\r\n parentframe = stack[start][0] \r\n\r\n name = []\r\n module = inspect.getmodule(parentframe)\r\n # `modname` can be None when frame is executed directly in console\r\n # TODO(techtonik): consider using __main__\r\n if module:\r\n name.append(module.__name__)\r\n # detect classname\r\n if 'self' in parentframe.f_locals:\r\n # I don't know any way to detect call from the object method\r\n # XXX: there seems to be no way to detect static method call - it will\r\n # be just a function call\r\n name.append(parentframe.f_locals['self'].__class__.__name__)\r\n codename = parentframe.f_code.co_name\r\n if codename != '<module>': # top level usually\r\n name.append( codename ) # function or a method\r\n\r\n ## Avoid circular refs and frame leaks\r\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\r\n del parentframe, stack\r\n\r\n return \".\".join(name)", "def GetFunctionName():\n return traceback.extract_stack(None, 2)[0][2]", "def _get_vispy_caller():\n records = inspect.stack()\n # first few records are vispy-based logging calls\n for record in records[5:]:\n module = record[0].f_globals['__name__']\n if module.startswith('vispy'):\n line = str(record[0].f_lineno)\n func = record[3]\n cls = record[0].f_locals.get('self', None)\n clsname = \"\" if cls is None else cls.__class__.__name__ + '.'\n caller = \"{0}:{1}{2}({3}): \".format(module, clsname, func, line)\n return caller\n return 'unknown'", "def trace():\n import traceback, inspect\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n filename = inspect.getfile(inspect.currentframe())\n # script name + line number\n line = tbinfo.split(\", \")[1]\n # Get Python syntax error\n #\n synerror = traceback.format_exc().splitlines()[-1]\n return line, filename, synerror", "def _get_caller_path(frames_above=0):\n assert len(inspect.stack()) > 2, 'Call _get_caller_path from a public function in the classypy.util.dirs package.'\n\n # frame[0] is this function, frame[1] is the internal function that\n # was called to get here, frame[2] is the caller,\n # frame[2 + frames_above] is for times when things are deeper.\n frame = inspect.stack()[2 + frames_above] # 0 = this function, 1 = intermediate caller, 2 = actual caller\n caller_module_src_file = frame[1] # frame is a tuple, 2nd value is the file\n caller_module_dir = op.dirname(op.abspath(caller_module_src_file))\n\n return caller_module_dir", "def currentframe():\n try:\n raise Exception\n except:\n return sys.exc_info()[2].tb_frame.f_back", "def currentframe(_no_of_go_up_level):\n try:\n raise Exception\n except Exception:\n return sys.exc_info()[_no_of_go_up_level - 1].tb_frame.f_back", "def currentframe():\n try:\n raise Exception\n except Exception:\n return sys.exc_info()[2].tb_frame.f_back", "def getCallerName(self,frameLevel=1):\n self.getCallerParams(frameLevel)\n result=self.callerName\n return result", "def frame_location_info(self):\n\n return str(self.active_frame.f_code.co_filename) + \":\" + str(self.active_frame.f_lineno)", "def record_python_call_stack(self, frames_to_skip: int) -> infra.Stack:\n frames_to_skip += 1 # Skip this function.\n stack = utils.python_call_stack(frames_to_skip=frames_to_skip)\n self.with_stack(stack)\n if len(stack.frames) > 0:\n self.with_location(stack.frames[0].location)\n return stack", "def currentframe():\n return sys._getframe(3)", "def _FindTransactionFrameInStack():\n frame = sys._getframe()\n filename = frame.f_code.co_filename\n\n frame = frame.f_back.f_back\n while frame:\n if (frame.f_code.co_filename == filename and\n frame.f_code.co_name == 'RunInTransactionCustomRetries'):\n return frame\n frame = frame.f_back\n\n return None", "def GetCurrentFuncName():\n return sys._getframe(1).f_code.co_name", "def _cpp_call_stack(frames_to_skip: int = 0, frames_to_log: int = 32) -> infra.Stack:\n # NOTE: Cannot use `@_beartype.beartype`. It somehow erases the cpp stack frame info.\n frames = cpp_backtrace.get_cpp_backtrace(frames_to_skip, frames_to_log).split(\"\\n\")\n frame_messages = []\n for frame in frames:\n segments = frame.split(\":\", 1)\n if len(segments) == 2:\n frame_messages.append(segments[1].strip())\n else:\n frame_messages.append(\"<unknown frame>\")\n return infra.Stack(\n frames=[\n infra.StackFrame(location=infra.Location(message=message))\n for message in frame_messages\n ]\n )", "def trace(context=1):\r\n return getinnerframes(sys.exc_info()[2], context)", "def _getframe(depth=None): # real signature unknown; restored from __doc__\n pass", "def _exceptionStackTTB(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s]\" % (sourcefile,line,function)\n else:\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n #endIf\n else:\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s] - %s\" % (sourcefile,line,function,text)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endIf\n #endFor\n stack = \"\\tFrame stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting frame stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n\n try:\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endFor\n stack = \"\\tException stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting exception stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n \n # At the very top - put the exception string\n stack = \"\\t%s\\n%s\" % (exc,stack)\n \n return stack", "def mock_frame(stack):\n return inspect.stack()[0]", "def mock_frame(stack):\n return inspect.stack()[0]", "def getMyCaller(self):\n stack = Throwable().getStackTrace()\n return stack[2].getClassName() + \".\" + stack[2].getMethodName()", "def get_frame_info(tb, context_lines=7):\n # line numbers / function / variables\n lineno = tb.tb_lineno\n function = tb.tb_frame.f_code.co_name\n variables = tb.tb_frame.f_locals\n\n # get filename\n fn = tb.tb_frame.f_globals.get('__file__')\n if not fn:\n fn = _os.path.realpath(\n _inspect.getsourcefile(tb) or _inspect.getfile(tb)\n )\n if fn[-4:] in ('.pyc', '.pyo'):\n fn = fn[:-1]\n\n # module name\n modname = tb.tb_frame.f_globals.get('__name__')\n\n # get loader\n loader = tb.tb_frame.f_globals.get('__loader__')\n\n # sourcecode\n try:\n if not loader is None:\n source = loader.get_source(modname)\n else:\n source = file(fn).read()\n except (SystemExit, KeyboardInterrupt):\n raise\n except:\n source = ''\n pre_context, post_context = [], []\n context_line, context_lineno = None, None\n else:\n parser = PythonParser(source)\n parser.parse()\n parsed_source = parser.get_html_output()\n lbound = max(0, lineno - context_lines - 1)\n ubound = lineno + context_lines\n try:\n context_line = parsed_source[lineno - 1]\n pre_context = parsed_source[lbound:lineno - 1]\n post_context = parsed_source[lineno:ubound]\n except IndexError:\n context_line = None\n pre_context = post_context = [], []\n context_lineno = lbound\n\n return {\n 'tb': tb,\n 'filename': fn,\n 'loader': loader,\n 'function': function,\n 'lineno': lineno,\n 'vars': variables,\n 'pre_context': pre_context,\n 'context_line': context_line,\n 'post_context': post_context,\n 'context_lineno': context_lineno,\n 'source': source\n }", "def get_linenumber():\n\n # inspect.stack()[0][2] returns line number in this function\n lineno = str(inspect.stack()[1][2])\n\n return lineno", "def lineno():\n return \"line \" + str(inspect.currentframe().f_back.f_lineno) + \": \"", "def trace():\n import traceback\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n # script name + line number\n line = tbinfo.split(\", \")[1]\n # Get Python syntax error\n #\n synerror = traceback.format_exc().splitlines()[-1]\n return line, __file__, synerror", "def extract_function_name():\n tb = sys.exc_info()[-1]\n stk = traceback.extract_tb(tb, 1)\n fname = stk[0][3]\n return fname", "def stack(context=1):\r\n return getouterframes(sys._getframe(1), context)", "def funcName():\r\n import sys\r\n return sys._getframe(1).f_code.co_name", "def _exceptionStackBTT(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n stack = \"\\tFrame stack (most recent call last):\\n\"\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else:\n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\n\\tException getting frame stack. Type: %s, Value: %s\" % (stack,exc_type,exc_value)\n #endTry\n \n try:\n stack = \"%s\\tException stack (most recent call last):\\n\" % stack\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else: \n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\tException getting exception stack. Type: %s, Value: %s\\n\" % (stack,exc_type,exc_value)\n #endTry\n\n # At the very end - put the exception string\n stack = \"%s\\t%s\" % (stack,exc)\n \n return stack", "def get_caller_module(depth=2):\n frame = sys._getframe(depth)\n module = inspect.getmodule(frame)\n if module is None:\n return get_caller_module(depth=depth)\n return module", "def call_chain_to_next_log_calls_fn(cls):\n curr_frame = sys._getframe(2) # caller-of-caller's frame\n\n call_list = []\n prev_indent_level = -1\n\n found = False\n found_enabled = False\n hit_bottom = False # break both loops: reached <module>\n while not found_enabled and not hit_bottom:\n while 1: # until found a deco'd fn or <module> reached\n curr_funcname = curr_frame.f_code.co_name\n if curr_funcname == '_deco_base_f_wrapper_':\n # Previous was decorated inner fn, fixup; overwrite '_deco_base_f_wrapper_'\n # with name of wrapped function\n inner_fn = curr_frame.f_locals['f']\n call_list[-1] = inner_fn.__name__ # ~ placeholder\n\n wrapper_frame = curr_frame\n found = True\n break # inner loop\n\n call_list.append(curr_funcname)\n\n if curr_funcname == '<module>':\n hit_bottom = True\n break # inner loop\n\n globs = curr_frame.f_back.f_globals\n curr_fn = None\n if curr_funcname in globs:\n wrapper_frame = curr_frame.f_back\n curr_fn = globs[curr_funcname]\n # If curr_funcname is a decorated inner function,\n # then it's not in globs. If it's called from outside\n # its enclosing function, its caller is '_deco_base_f_wrapper_'\n # so we'll see that on next iteration.\n else:\n try:\n # if it's a decorated inner function that's called\n # by its enclosing function, detect that:\n locls = curr_frame.f_back.f_back.f_locals\n except AttributeError: # \"never happens\"\n # print(\"**** %s not found (inner fn?)\" % curr_funcname) # <<<DEBUG>>>\n pass\n else:\n wrapper_frame = curr_frame.f_back\n if curr_funcname in locls:\n curr_fn = locls[curr_funcname]\n # print(\"**** %s found in locls = curr_frame.f_back.f_back.f_locals, \"\n # \"curr_frame.f_back.f_back.f_code.co_name = %s\"\n # % (curr_funcname, curr_frame.f_back.f_back.f_locals)) # <<<DEBUG>>>\n if hasattr(curr_fn, cls._sentinels['SENTINEL_ATTR']):\n found = True\n break # inner loop\n\n curr_frame = curr_frame.f_back\n\n # If found, then call_list[-1] is log_calls-wrapped\n if found:\n # Look in stack frame (!) for (0.2.4) STACKFRAME_HACK_DICT_NAME\n # and use its values\n # _enabled, _log_call_numbers, _active_call_number, _extra_indent_level, _prefixed_fname\n if wrapper_frame.f_locals.get(STACKFRAME_HACK_DICT_NAME):\n active_call_items = wrapper_frame.f_locals[STACKFRAME_HACK_DICT_NAME]\n enabled = active_call_items['_enabled'] # it's >= 0\n log_call_numbers = active_call_items['_log_call_numbers']\n active_call_number = active_call_items['_active_call_number']\n call_list[-1] = active_call_items['_prefixed_fname'] # Hack alert (Pt 3)\n\n # only change prev_indent_level once, for nearest deco'd fn\n if prev_indent_level < 0:\n prev_indent_level = active_call_items['_extra_indent_level']\n\n if enabled and log_call_numbers:\n call_list[-1] += (' [%d]' % active_call_number)\n found_enabled = enabled # done with outer loop too if enabled\n else: # bypassed\n enabled = False\n\n if not enabled:\n curr_frame = curr_frame.f_back\n else: # not found\n # if not found, truncate call_list to first element.\n hit_bottom = True\n\n if hit_bottom:\n call_list = call_list[:1]\n return call_list, prev_indent_level", "def find_traceback_start(self):\n ### FILL IN ###", "def get_test_frame(self):\n\n # get function from end of unittest id()\n target = self.id().split('.')[-1]\n\n # traverse frames until function name is found\n for frame in inspect.stack():\n if frame[3] == target:\n return frame\n return None", "def get_stack(self, f, t):\n stack = []\n if t and t.tb_frame is f:\n t = t.tb_next\n while f is not None:\n stack.append((f, f.f_lineno))\n if f is self.botframe:\n break\n f = f.f_back\n stack.reverse()\n i = max(0, len(stack) - 1)\n while t is not None:\n stack.append((t.tb_frame, t.tb_lineno))\n t = t.tb_next\n if f is None:\n i = max(0, len(stack) - 1)\n return stack, i", "def lineno():\n linenum = inspect.currentframe().f_back.f_lineno\n frameinfo = inspect.getframeinfo(inspect.currentframe())\n filename = frameinfo.filename\n return str(\"File: \" + str(filename) + \" Line: \" + str(linenum))", "def get_err_source_info(original_traceback=None) -> dict:\n try: # carefully try to get the actual place where the error happened\n if not original_traceback:\n original_traceback = sys.exc_info()[2] # class, exc, traceback\n first_call = traceback.extract_tb(original_traceback)[-1]\n return dict(\n src_module=first_call[0],\n src_linenr=first_call[1],\n src_func=first_call[2],\n src_code=first_call[3],\n )\n except Exception as e:\n current_app.warning(\n \"I was unable to retrieve error source information: %s.\" % str(e)\n )\n return dict(module=\"\", linenr=0, method=\"\", src_code=\"\")", "def extractStack(frame, context=10, exceptionsFrameSymbol=EXCEPTIONS_FRAME_SYMBOL):\n\n\tstack = []\n\n\tfor frame, fileName, lineNumber, name, context, index in inspect.getouterframes(frame, context):\n\t\tif frame.f_locals.get(exceptionsFrameSymbol):\n\t\t\tcontinue\n\n\t\tstack.append((frame,\n\t\t\t\t\tfileName,\n\t\t\t\t\tlineNumber,\n\t\t\t\t\tname, context\n\t\t\t\t\tif context is not None else [],\n\t\t\t\t\tindex if index is not None else -1))\n\n\treturn list(reversed(stack))", "def lineno():\n return str(' - IpAddr - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def callstack_now():\n return checkpoints[-1]", "def lineno():\n return str(' - RDSInstanceMasterUserPasswordRule- caller: '+str(inspect.stack()[1][3])+' - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def get_caller_name(depth=2, mod=True, cls=False, mth=False):\n stack = inspect.stack()\n start = 0 + depth\n if len(stack) < start + 1:\n return ''\n parent_frame = stack[start][0]\n name = []\n module = inspect.getmodule(parent_frame)\n if module and mod:\n name.append(module.__name__)\n if cls and 'self' in parent_frame.f_locals:\n name.append(parent_frame.f_locals['self'].__class__.__name__)\n if mth:\n codename = parent_frame.f_code.co_name\n if codename != '<module>':\n name.append(codename)\n del parent_frame, stack\n return '.'.join(name)", "def GetLineno():\n return inspect.currentframe().f_back.f_lineno", "def extract_function_name(maybe_function_str: str) -> Optional[str]:\n match = STACK_TRACE_LINE_RE.search(maybe_function_str)\n if match is not None:\n return match.group(2)\n return None", "def _get_debug_text(self, text):\n\n func = inspect.currentframe().f_back.f_back.f_code\n return \"{}: Function {} in {}:{}\".format(text, func.co_name, os.path.basename(func.co_filename), func.co_firstlineno)", "def record_cpp_call_stack(self, frames_to_skip: int) -> infra.Stack:\n # NOTE: Cannot use `@_beartype.beartype`. It somehow erases the cpp stack frame info.\n # No need to skip this function because python frame is not recorded\n # in cpp call stack.\n stack = _cpp_call_stack(frames_to_skip=frames_to_skip)\n stack.message = \"C++ call stack\"\n self.with_stack(stack)\n return stack", "def _location_from_fx_stack_trace(\n node_stack_trace: str,\n) -> Optional[diagnostics.infra.Location]:\n if \"File\" not in node_stack_trace:\n return None\n\n lines = node_stack_trace.strip().split(\"\\n\")\n idx = 0\n while idx < len(lines) and \"File\" not in lines[idx]:\n idx += 1\n if idx + 1 >= len(lines):\n return None\n\n pattern = re.compile(r\"^File \\\"(.+)\\\", line (\\d+), in (.+)$\")\n matches = pattern.match(lines[idx].strip())\n if matches:\n uri = matches.group(1)\n line_number = int(matches.group(2))\n snippet = lines[idx + 1].strip()\n return diagnostics.infra.Location(uri=uri, line=line_number, snippet=snippet)\n return None", "def enclosing_frame(frame=None, level=3):\n frame = frame or sys._getframe(level)\n while frame.f_globals.get('__name__') == __name__:\n frame = frame.f_back\n return frame", "def curframe(self):\n return self._stack[self._curframe_index][0]", "def lineno():\n return str(' - SecurityGroupIngressPortRangeRule - caller: '+str(inspect.stack()[1][3])+' - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def get_frame(self, i):\n return self.get_traceback(i).tb_frame", "def currentLineno():\n cf = inspect.currentframe()\n return cf.f_back.f_lineno", "def DumpStackTracebacks():\n results = []\n id_name_map = {}\n for thread in threading.enumerate():\n id_name_map[thread.ident] = thread.name\n\n results.append(\n '*****\\n'\n '*\\n'\n '* Dumping debug information.\\n'\n '*\\n'\n '*****\\n')\n # pylint: disable=protected-access\n for thread_id, stack in sys._current_frames().items():\n results.append('Thread %s (id=%d):\\n' %\n (id_name_map.get(thread_id, 'unnamed-%d' % thread_id),\n thread_id))\n for filename, line_no, function_name, text in (\n traceback.extract_stack(stack)):\n # Same format as the usual Python stack trace, but indented\n # twice\n results.append(' File: \"%s\", line %d, in %s\\n' % (\n filename, line_no, function_name))\n if text:\n results.append(' %s\\n' % text.strip())\n\n results.append('***** End of debug information.\\n')\n\n return ''.join(results)", "def tb():\n etype, value, tb = sys.exc_info()\n return \"%s: %s (%s@%s:%d)\" % (etype.__name__, value, tb.tb_frame.f_code.co_name, os.path.basename(tb.tb_frame.f_code.co_filename), tb.tb_lineno)", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def outerLineno2():\n cf = inspect.currentframe()\n return cf.f_back.f_back.f_back.f_lineno", "def lineno():\r\n\treturn inspect.currentframe().f_back.f_lineno", "def stack():\n return currentframe().f_back.f_locals.setdefault(SN, [])", "def stacktrace(self):\n stacktrace = self.StacktraceParser().Parse(\n self._raw_stacktrace,\n self._dependency_analyzer.regression_version_deps,\n signature=self.signature, top_n_frames=self._top_n_frames)\n if not stacktrace:\n logging.warning('Failed to parse the stacktrace %s',\n self._raw_stacktrace)\n return stacktrace", "def lineno():\n\n return inspect.currentframe().f_back.f_lineno", "def get_line(cls, frame, sys_context=None):\n\t\tcode = cls._dispatch_frame(frame)\n\n\t\tif not code: \n\t\t\treturn ''\n\t\t\n\t\treturn code.splitlines()[frame.f_lineno]", "def outerLineno():\n cf = inspect.currentframe()\n return cf.f_back.f_back.f_lineno", "def lineno():\n\treturn inspect.currentframe().f_back.f_lineno", "def extract_detail():\r\n tb = sys.exc_info()[-1]\r\n stk = traceback.extract_tb(tb, -1)[0]\r\n return \"{} in {} line num {} on line {} \".format(\r\n stk.name, stk.filename, stk.lineno, stk.line\r\n )", "def linenum(self):\n return self.source_frame_stack.linenum()", "def get_last_frame(tb):\n frames = traceback.extract_tb(tb)\n if not frames:\n return None\n\n target_frame = frames[-1]\n for frame in frames[::-1]:\n # ignore stack from installed and std packages\n if 'site-packages' in frame.filename or '/synchrolog_flask' in frame.filename:\n continue\n target_frame = frame\n break\n return target_frame", "def get_origin() -> dict:\n\n stackback = [\n y for x in [x.split(\"\\n\") for x in traceback.format_stack()] for y in x if y\n ]\n interest = stackback[-6].split(\",\")\n\n complete_file = interest[0].strip()[6:-1].split(os.sep)\n\n try:\n if complete_file[-2] != PyFunceble.storage.PROJECT_NAME:\n file = \"/\".join(complete_file)\n else:\n file = \"/\".join(complete_file[-2:])\n except IndexError:\n file = \"/\".join(complete_file)\n\n line = interest[1].strip().split()[-1].strip()\n func_name = interest[2].strip()[3:]\n\n if PyFunceble.storage.PROJECT_NAME in file:\n file = os.path.relpath(file)\n\n return {\"origin_path\": file, \"origin_line\": line, \"origin_func\": func_name}", "def trace_stack_top(trace_stack_var: ContextVar) -> Any | None:\n trace_stack = trace_stack_var.get()\n return trace_stack[-1] if trace_stack else None", "def caller_reference(self) -> str:\n return pulumi.get(self, \"caller_reference\")" ]
[ "0.79303205", "0.7862949", "0.77929926", "0.77833045", "0.7717962", "0.7654662", "0.75871235", "0.75725806", "0.749834", "0.7496779", "0.74145174", "0.73589015", "0.7355899", "0.7281678", "0.72635937", "0.72341293", "0.7191653", "0.71636987", "0.7156922", "0.7154463", "0.7148924", "0.7134913", "0.709367", "0.70866424", "0.70718426", "0.70693713", "0.7061816", "0.7059859", "0.7006686", "0.7006464", "0.69265646", "0.6875568", "0.6867173", "0.68641394", "0.6862911", "0.6835373", "0.6800128", "0.6774365", "0.67614704", "0.6748096", "0.673945", "0.6699377", "0.6666132", "0.6651053", "0.6642731", "0.6596833", "0.6586085", "0.6586085", "0.6551674", "0.6527924", "0.6520252", "0.64902526", "0.64705974", "0.64511013", "0.64391756", "0.643235", "0.6425343", "0.64129436", "0.639743", "0.63927984", "0.6379619", "0.63747525", "0.637075", "0.637024", "0.6342814", "0.6314876", "0.6305245", "0.6294456", "0.6292676", "0.6277702", "0.6263024", "0.62573993", "0.6242831", "0.622622", "0.6188235", "0.6177631", "0.61727977", "0.6157795", "0.6130317", "0.6125566", "0.6115813", "0.6111933", "0.6111933", "0.6111933", "0.6111933", "0.6111933", "0.6111933", "0.6097538", "0.60816264", "0.6081137", "0.60767126", "0.605066", "0.60419005", "0.60301405", "0.60207707", "0.59985435", "0.5993028", "0.59681517", "0.59512234", "0.5950952", "0.59046537" ]
0.0
-1
Open a window to compose an email, with the edi invoice dian template message loaded by default
def action_invoice_dian_resend(self): self.ensure_one() template = self.env.ref('l10n_co_e-invoice.email_template_edi_invoice_dian', False) compose_form = self.env.ref('mail.email_compose_message_wizard_form', False) ctx = dict( default_model='account.invoice', default_res_id=self.id, default_use_template=bool(template), default_template_id=template and template.id or False, default_composition_mode='comment', mark_invoice_as_sent=True, ) return { 'name': _('Compose Email'), 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'mail.compose.message', 'views': [(compose_form.id, 'form')], 'view_id': compose_form.id, 'target': 'new', 'context': ctx, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_by_email(self):\r\n ir_model_data = self.env['ir.model.data']\r\n try:\r\n template_id = ir_model_data.get_object_reference(\r\n 'ng_church', 'email_template_church_pledge_report')[1]\r\n except ValueError:\r\n template_id = False\r\n try:\r\n compose_form_id = ir_model_data.get_object_reference(\r\n 'mail', 'email_compose_message_wizard_form')[1]\r\n except ValueError:\r\n compose_form_id = False\r\n ctx = dict(self._context)\r\n ctx.update({\r\n 'default_model': 'church.pledge',\r\n 'default_res_id': self._ids[0],\r\n 'default_use_template': bool(template_id),\r\n 'default_template_id': template_id,\r\n 'default_composition_mode': 'comment',\r\n })\r\n return {\r\n 'name': _('Compose Email'),\r\n 'type': 'ir.actions.act_window',\r\n 'view_type': 'form',\r\n 'view_mode': 'form',\r\n 'res_model': 'mail.compose.message',\r\n 'views': [(compose_form_id, 'form')],\r\n 'view_id': compose_form_id,\r\n 'target': 'new',\r\n 'context': ctx,\r\n }", "def __init__(self):\r\n self.window = 'dag_emailWindow'\r\n self.title = 'dagRenderMail'\r\n self.size= (195, 290);\r\n \r\n #Sets some defaults\r\n self.subject='Render Complete on '+str(dag_compName());\r\n self.login= 'sender@email.com'\r\n self.password='Password'\r\n self.to='destination@email.com'\r\n self.time='10'\r\n self.smtp='smtp.gmail.com:587'\r\n self.render = ''\r\n \r\n #Default message body\r\n self.body='Your render on '+str(dag_compName())+' is now complete.' + \"this message is automatically generated by dagMail. \\n dagmail script by Dhruv Govil www.dgovil.com \\n\\n\\n\"\r\n \r\n \r\n #default name for settings file. Can be anything. \r\n self.config='dagmail.settings'\r\n \r\n #Default MEL scripts. Don't change.\r\n self.preScr = 'python \"import dagMail\";python \"dagMail.dagMail.preScript()\"'\r\n self.postScr = 'python \"import dagMail\";python \"dagMail.dagMail.postScript()\"'", "def send_payslip(self):\n self.ensure_one()\n ir_model_data = self.env['ir.model.data']\n try:\n template_id = ir_model_data.get_object_reference('send_email_payslips', 'email_template_hr_payslip')[1]\n except ValueError:\n template_id = False\n try:\n compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]\n except ValueError:\n compose_form_id = False\n\n print 'user', self.employee_id.user_id\n\n user = self.env['res.users'].browse(self.employee_id.user_id.id)\n print 'partner_id', user.partner_id.id\n ctx = dict()\n ctx.update({\n 'default_model': 'hr.payslip',\n 'default_res_id': self.ids[0],\n 'default_use_template': bool(template_id),\n 'default_template_id': template_id,\n 'default_composition_mode': 'comment',\n 'default_partner_id': user.partner_id.id,\n })\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form_id, 'form')],\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': ctx,\n }", "def _show_popup(self) -> None:\n\n top = tk.Toplevel()\n email_list_len = len(self.get_recipients())\n msg = tk.messagebox.askquestion('Confirm send emails', 'Are you sure you want to email {} client{}?'\n .format(email_list_len, \"s\" if email_list_len > 1 else \"\"),\n icon='warning')\n if msg == \"yes\":\n self._disable_buttons()\n email_process(self.get_recipients())\n top.destroy()\n else:\n top.destroy()", "def open_client(introducing, msg):\n subject = urllib.quote(\"Introduction from %s\" % settings.name)\n body = urllib.quote(msg)\n s = \"mailto:?subject=%s&body=%s\" % (subject, body)\n if \"linux\" in sys.platform:\n proc_args = [\"xdg-open\", s]\n elif \"darwin\" in sys.platform:\n proc_args = [\"open\", s]\n # TODO: os.startfile works in Windows?\n p = subprocess.Popen(proc_args)", "def open_email(self):\n self.driver.execute_script(\"window.scrollTo(0, 700)\")\n self.click_on_element_by_css(tep.OPEN_EMAIL_BUTTON)", "def openemail(event):\n import webbrowser\n webbrowser.open(emailurl)\n close(event)", "def createSendMailFrame(self, empireDict):\n self.destroyTempFrames()\n self.sendMailInfo = anwp.gui.sendmailinfo.SendMailInfoFrame(self, self.game.app, empireDict)\n self.tempFrames.append(self.sendMailInfo)", "def email(self):\r\n webbrowser.open(\"mailto: gorm90@gmail.com\")", "def email_page(data):\n subject = f\"Inkbusters form contact: {data['title']}\"\n sender = current_app.config[\"MAIL_USERNAME\"]\n recipients= ['adrian.borowski.tattoo@gmail.com']\n text_body=render_template('email/email_contact.txt', data=data)\n html_body=render_template('email/email_contact.html', data=data)\n\n send_email(\n subject=subject,\n sender=sender,\n recipients=recipients,\n text_body=text_body,\n html_body=html_body\n )", "def send_contact_us_receipt_email(**data):\n mail_file = os.path.join(APP_PATH, \"templates\", \"main\",\n \"contact-us-receipt\", \"content.txt\")\n with open(mail_file, \"r\") as f:\n msg_text = f.read()\n msg_html = render_template(\"main/contact-us-receipt/content.html\")\n msg = Message(\n f'[SetNow Support] Re: {data[\"subject\"]}',\n sender=\"setnow@tuta.io\",\n recipients=[data[\"email\"]],\n )\n msg.body = msg_text\n msg.html = msg_html\n mail.send(msg)", "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = 'parkingpermit-donotreply@janelia.hhmi.org'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def pcorMacVerification(window,refrenceid,objectidentifier,texttoenter):\n try:\n buttons = getAppButtons(window)\n atomacclick(buttons[9])\n childwindow = refrenceid.windowsR()\n protectMoreDevicestitle = getApplicatontitle(childwindow[0])\n entertext(protectMoreDevicestitle,objectidentifier,texttoenter)\n except Exception as er:\n return False\n print \"Not able to able to send mail\"", "def send_email(self):\n EmailMsg = EmailMessage(\"Your quotation\", \"Please fin attached the quotation you requested\", 'no-reply@email.com', [\n self.customer.email], headers={'Reply-To': 'no-reply@email.com'})\n pdf = self.generate_pdf()\n EmailMsg.attach('yourChoosenFileName.pdf', pdf, 'application/pdf')\n # Use True when able to handle exception\n # see in settings.py for EMAIL_BACKEND configuration\n EmailMsg.send(fail_silently=False)", "def sendsms(window,refrenceid,image,email):\n try:\n buttons = getAppButtons(window)\n atomacclick(buttons[10])\n childwindow = refrenceid.windowsR()\n protectMoreDevicesbuttons = getAppButtons(childwindow[0])\n protectMoreDevicestitle = childwindow[0].getApplicatontitle()\n ldtp.enterstring(protectMoreDevicestitle,image,email)\n #Need to write after click\n except Exception as er:\n return False\n print \"Not able to send SMS\"", "def create_new_mail(self):\n self.driver.get(consts.TEMP_MAIL)\n soup = BeautifulSoup(self.driver.page_source)\n self.mail = soup.find(id=\"email_id\").attrs[\"data-value\"]", "def __init__(self,template_file, **kwargs):\r\n \r\n env = Environment(\r\n loader=PackageLoader('email_generator', 'templates'),\r\n autoescape=select_autoescape(['html', 'xml'])\r\n )\r\n template = env.get_template(template_file)\r\n self.body = template.render(**kwargs)", "def action_invite(self):\n self.ensure_one()\n\n if not self.env.user.email:\n raise UserError(_(\"Unable to post message, please configure the sender's email address.\"))\n\n mail_values = []\n for partner_id in self.partner_ids:\n slide_channel_partner = self.channel_id._action_add_members(partner_id)\n if slide_channel_partner:\n mail_values.append(self._prepare_mail_values(slide_channel_partner))\n\n # TODO awa: change me to create multi when mail.mail supports it\n for mail_value in mail_values:\n self.env['mail.mail'].sudo().create(mail_value)\n\n return {'type': 'ir.actions.act_window_close'}", "def send_object(self):\n for object_ in self.objects:\n strCC = '; '.join([object_.ter_dir_email, object_.successor_email])\n strCC += \"; ekb.inkas.net@maxus.ru; schugunov@svyaznoy.ru\"\n strSubject = \"Инкассация и вывоз POS-терминала при закрытии ТТ\"\n outMail = self.outlook.Application.CreateItemFromTemplate(\n CLOSING_MAIL_TEMPLATE\n )\n fixture = {\n 'дата+1': self.event_date.strftime('%d.%m.%Y'),\n 'преемник': object_.successor_full_name,\n 'имяТТ': f'ЦМС {object_.object_code[-4:]} {object_.object_name}'\n }\n HTML_body_without_signature = outMail.HTMLBody\n outMail.Display()\n for k, v in fixture.items():\n HTML_body_without_signature = HTML_body_without_signature.replace('{' + k + '}', v)\n\n outMail.HTMLBody = HTML_body_without_signature\n outMail.To = object_.object_SAP_code\n outMail.CC = strCC\n outMail.Subject = strSubject\n outMail.importance = 2\n if datetime.now().date() + timedelta(days=1) < self.event_date:\n outMail.DeferredDeliveryTime = \\\n (self.event_date - timedelta(days=1)).strftime('%d.%m.%Y') + \" 17:00\"", "def issue_book():\n issue_book_tk = IssueBookDialog()\n entries_args = [\n (\"Book ID : \", 0.2),\n (\"Issued To : \", 0.4)\n ]\n issue_book_tk.create_components(entries_args)\n issue_book_tk.mainloop()", "def sendEmail(body, subject, email=\"\"):\n dest = [\"micneeley14@gmail.com\", \"hunterreid49@gmail.com\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"michael@neeley.dev\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def onAboutLeoEmail(self,event=None):\n \n try:\n import webbrowser\n webbrowser.open(\"mailto:\" + self.email)\n except:\n g.es(\"not found: \" + self.email)", "def email_body_meeting_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Drats. <a href=\"#\" style=\"color:#1488CC\">{insert seller name} cancelled your appointment</a>.<br><br>'\n\tmsg = msg + '\\t\\t\\t <a href=\"#\" style=\"color:#1488CC\">Reschedule</a> or you can send a message to inquire about the cancellation. <br><br>'\n\tmsg = msg + '\\t\\t\\t And, don\\'t worry! You won\\'t be charged, promise. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def email_body_review_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr>td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px; padding-right:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t <font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">We hope you had a great appointment!<br>'\n\tmsg = msg + '\\t\\t\\t Your opinion goes a long way&mdash;write up your review of the appointment so others can learn from your experience with <a href=\"#\" style=\"color:#1488CC\">{user\\'s name}</a></font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:200px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<a href=\"#\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:100px;text-align:center;\" target=\"_blank\">Rate & Review</a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def send_main_email(self):\n\n print \"Sending main email\"\n \n # Make an html table to be body of email\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_nfs_changed_rows(\"sprint\") # New features only\n html_table += self.make_nfs_changed_rows(\"status\") # New features only\n html_table += self.make_time_in_status_rows(self.stalled_nf_issues) \n html_table += self.make_time_in_status_rows(self.stalled_st_issues) # Sub-tasks\n html_table += '</table>' # Closing table tag\n\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n \n# emails = self.config.items('recipients')\n# for key, email in emails:\n# recipients = ', '.join(self.config.items('recipients'))\n \n print recipients\n# sys.exit()\n self.send_email(recipients, html_table)", "def email_body_appointment_confirmation_for_buyer(meeting, buyer_profile, sellr_profile, msg_url=\"https://127.0.0.1:5000/message?profile=xxxx\"):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Ain\\'t life grand? Meeting\\'s on! <a href=\"https://127.0.0.1:5000/profile?'+ sellr_profile.prof_id + ' style=\"color:#1488CC\">\"' + sellr_profile.prof_name + '\" accepted your proposal.</a><br><br>'\n\tmsg = msg + '\\t\\t\\t Check out the details: <br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\t\\t\\t Need to edit, manage or update the appointment? <a href=\"https://127.0.0.1:5000/dashboard\" style=\"color:#1488CC\">Go for it</a>, or send <a href=\"'+msg_url+'\" style=\"color:#1488CC\">\"' + sellr_profile.prof_name + '\" a message.</a><br><br></font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://maps.googleapis.com/maps/api/staticmap?center=' + meeting.meet_location + '&zoom=15&size=400x450&markers=size:large%8Ccolor:0xFFFF00%7Clabel:Insprite%7C' + meeting.meet_location + '\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"> <a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">Contact Us</a>'\n\tmsg = msg + '\\t\\t| Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '\\t\\t<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def send_contact_us_email(**data):\n mail_file = os.path.join(APP_PATH, \"templates\", \"main\", \"contact-us\",\n \"content.txt\")\n with open(mail_file, \"r\") as f:\n msg_text = f.read()\n msg_text = msg_text.format(**data)\n msg_html = render_template(\"main/contact-us/content.html\", **data)\n msg = Message(data[\"subject\"],\n sender=\"setnow@tuta.io\",\n recipients=[\"setnow@tuta.io\"])\n msg.body = msg_text\n msg.html = msg_html\n mail.send(msg)", "def email_document(document, to, template='django_dms/email.txt', subject=''):\n # Start a new thread to email the document\n # This avoids a frozen screen while the email is being sent (particularly if the document is big).\n t = threading.Thread(target=_email_document, args=[document, to, template, subject])\n t.setDaemon(True)\n t.start()", "def compose(self, msg, recipient):\n email = Email(msg, self, recipient)\n self.mailman.send(email)", "def message_new(\n self, cr, uid, msg_dict, custom_values=None, context=None): \n if context is None:\n context = {}\n # prevent changes in context from \"bubbling up\" to calling methods\n local_context = dict(context)\n\n users_pool = self.pool.get('res.users')\n base_model = self.pool.get('ir.model.data')\n partner_model = self.pool.get('res.partner') \n\n # As the scheduler is run without language,\n # set the administrator's language\n if not local_context.get('lang'):\n user = users_pool.browse(cr, uid, uid, context=local_context)\n local_context['lang'] = user.partner_id.lang\n\n if custom_values is None:\n custom_values = {}\n email_from = msg_dict.get('from', False)\n if email_from:\n custom_values['name'] = _(\"Received by email from %s\") % email_from\n email_date = msg_dict.get('date', False)\n if email_date:\n custom_values['date_invoice'] = email_date\n\n company_id = (\n ('force_company' in local_context\n and local_context['force_company']) or False)\n\n # Retrieve partner_id from message dictionary.\n # Partner might be:\n # 1. Supplier sending email (author_id in msg dict.)\n # 2. Partner receiving message (special partner setup to receive\n # email). Should be linked to the appropiate company in multi-\n # company databases.\n # 3. Dummy invoice partner.\n # Partner MUST be a supplier.\n\n # 1. Try author:\n supplier_partner_id = False\n author_id = (\n 'author_id' in msg_dict and msg_dict['author_id'] or False)\n if (author_id\n and self._is_partner_supplier(\n cr, uid, author_id, context=local_context)):\n supplier_partner_id = author_id\n\n # 2. Try recipients:\n # Unfortunately we have to do a new lookup on partner, because\n # the method message_process in mail_thread removes the partner_ids\n # already found, from the message dictionary:\n if not supplier_partner_id:\n s = ', '.join(\n [msg_dict.get(h)\n for h in ['to', 'cc'] if msg_dict.get(h)])\n for email_address in tools.email_split(s):\n partner_ids = self.get_partner_from_mail(\n cr, uid, email_address, company_id, force_supplier=True,\n context=local_context)\n if partner_ids:\n supplier_partner_id = partner_ids[0]\n break\n\n # 3. Try default partner for company (company might be False):\n if not supplier_partner_id:\n args = [('fetchmail_invoice_default', '=', True),]\n if company_id:\n args.append(('company_id', '=', company_id))\n default_ids = partner_model.search(\n cr, uid, args, context=local_context)\n if default_ids: # can be only one\n supplier_partner_id = default_ids[0]\n\n # We should have a supplier/partner by now....\n assert supplier_partner_id, _('No partner found to link invoice to')\n\n # Get company for supplier, if any. If present, should be the same\n # as company for fetchmail config, if present. If still no\n # company is found, use main company.\n supplier_record = partner_model.read(\n cr, uid, supplier_partner_id, ['company_id', 'supplier'],\n context=local_context)\n supplier_company_id = (\n supplier_record['company_id'] and supplier_record['company_id'][0]\n or False)\n if supplier_company_id:\n if company_id:\n assert company_id == supplier_company_id, (_(\n 'Supplier found not valid for company %d.') %\n company_id)\n else:\n company_id = supplier_company_id\n if not company_id:\n # Last resort, use main company\n company_id = base_model.get_object_reference( \n cr, uid, 'base', 'main_company')[1]\n \n # Now we should have a company, and we should use it for everything\n assert company_id, (_(\n 'All attempts to determine company for invoice failed'))\n local_context['force_company'] = company_id\n \n # Paranoid check\n assert supplier_record['supplier'], (_(\n 'Partner %d is not a supplier') % supplier_partner_id)\n\n # And we should have an account property\n # (read again, as company might have changed)\n supplier_record = partner_model.read(\n cr, uid, supplier_partner_id, ['property_account_payable_id'],\n context=local_context)\n assert supplier_record['property_account_payable_id'], (\n _('No account payable on partner %d.') % supplier_partner_id)\n\n # And we need some information in context as well\n local_context.update({\n 'company_id': company_id,\n 'type': 'in_invoice',\n })\n\n supplier = partner_model.browse(cr, uid, supplier_partner_id, context=local_context)\n\n journal_id = self.pool.get('account.invoice').default_get(cr, uid, ['journal_id'], context=local_context)['journal_id']\n if not journal_id:\n raise UserError(_('Please define an accounting sale journal for this company.'))\n\n custom_values.update({\n 'company_id': company_id,\n 'partner_id': supplier_partner_id,\n 'type': 'in_invoice',\n\n 'account_id': supplier.property_account_payable_id.id,\n 'journal_id': journal_id,\n })\n\n\n # custom_values.update(\n # self.onchange_partner_id(\n # cr, uid, [], 'in_invoice', supplier_partner_id,\n # company_id=company_id)['value'])\n\n # Create the resource\n res_id = super(account_invoice, self).message_new(\n cr, uid, msg_dict, custom_values=custom_values,\n context=local_context)\n return res_id", "def open_create_partner(self, cr, uid, ids, context=None):\n view_obj = self.pool.get('ir.ui.view')\n view_id = view_obj.search(cr, uid, [('model', '=', self._name), \\\n ('name', '=', self._name+'.view')])\n return {\n 'view_mode': 'form',\n 'view_type': 'form',\n 'view_id': view_id or False,\n 'res_model': self._name,\n 'context': context,\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n }", "def email_body_new_proposal_notification_to_seller(meeting, buyer_name, buyer_profile_id):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">\\n<tbody>\\n\\t<tr><td align=\"center\" valign=\"top\">\\n\\t</td></tr>\\n</tbody>\\n</table>'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">'\n\tmsg = msg + '\\n<tbody><tr>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tbody>'\n\tmsg = msg + '\\n\\t\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" />'\n\tmsg = msg + '\\n\\t\\t\\t\\t</a>'\n\tmsg = msg + '\\n\\t\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t\\t</tbody>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">'\n\tmsg = msg + '\\n\\t\\t\\t\\tGreat! You received a new proposal from <a href=\\\"https://127.0.0.1:5000/profile?hero=' + buyer_profile_id + '\\\" style=\"color:#1488CC\">'+ buyer_name + '</a>.'\n\tmsg = msg + '\\n\\t\\t\\t\\t<br><br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDescription: ' + meeting.get_description_html() + '<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t</font><br><br>'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:150px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<a href=\\\"'+ meeting.accept_url() +'\\\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:50px;text-align:center;\" target=\"_blank\">Accept</a> '\n\tmsg = msg + '\\n\\t\\t\\t<a href=\\\"'+ meeting.reject_url() +'\\\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #e55e62;font-size: 14px;border-radius: 3px;border: 1px solid #e55e62;font-family:Garamond, EB Garamond, Georgia, serif; width:50px;text-align:center\" target=\"_blank\">Reject</a> '\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n</tr></tbody>'\n\tmsg = msg + '</table>'\n\treturn msg", "def show_template_dialogue(self):\n # TODO verifier l'import de classe pour pyinstaller\n\n dialog = QDialog()\n dialog.ui = template_dialogue.Ui_Dialog()\n dialog.ui.setupUi(dialog)\n self.combo = dialog.ui.comboBox\n self.btn_box = dialog.ui.buttonBox\n\n # self.proxy_pilot_name = self.proxyModel2.index(1, 1).data(Qt.DisplayRole)\n # templates_list = [\"template_try.docx\", \"template_essai.docx\"]\n path = pathlib.Path.cwd()\n templates_list2 = [x for x in os.listdir(path) if x.startswith(\"template\") and x.endswith(\".docx\")]\n dialog.ui.comboBox.addItems(\n templates_list2) # templates_list2 used: if problem with combo template revert to template_list1\n # self.combo_ac.addItems(self.retrieve_aircraft_var())\n # self.combo_pil.addItems((self.retrieve_pilot_var()))\n\n self.combo.activated.connect(self.select_template)\n self.btn_box.accepted.connect(self.create_document)\n # self.btn_box.accepted.connect(self.write_csv)\n # self.btn_box.accepted.connect(self.read_stored_pilot)\n\n dialog.setAttribute(Qt.WA_DeleteOnClose)\n dialog.exec_()", "def initM(self, num):\n prefix = C_Messaging.PREFIX\n if not wait_el_xpath_click(self.driver, C_Messaging.PATH_BTN_CREATE):\n logging.info('{0}: Create new message unsucceed.'.format(prefix))\n self.fail('{0}: Create new message unsucceed.'.format(prefix))\n recipients = wait_el_xpath(self.driver, C_Messaging.PATH_RECIPIENTS)\n action(recipients, Commands.CLEAR)\n action(recipients, Commands.CLICK)\n\n # phone number: 147 8230 5348\n for s in num:\n self.driver.press_keycode(Keycode.get(self, s))\n\n self.driver.press_keycode(Keycode.ENTER)\n\n text_editor = wait_el_xpath(self.driver, C_Messaging.PATH_TEXT_EDITOR)\n return text_editor", "def email_body_cancellation_from_seller_to_buyer():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\"> <a href=\"#\" style=\"color:#1488CC\">{Insert user - seller}</a> cancelled your appointment.<br><br>'\n\tmsg = msg + '\\t\\t\\t Check out <a href=\"#\" style=\"color:#1488CC\">{Insert seller}</a>\\'s availability, and send a new proposal. (Sometimes, a little reshuffling can really make things happen!)</font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def show_popup(cls, content, level):\n\n current_view = sublime.active_window().active_view()\n message = cls.get_message_template(content, level)\n\n current_view.show_popup(content=message, max_width=400)", "def _make_message(request, issue, message, comments=None, send_mail=False,\n draft=None, in_reply_to=None):\n attach_patch = request.POST.get(\"attach_patch\") == \"yes\"\n template, context = _get_mail_template(request, issue, full_diff=attach_patch)\n # Decide who should receive mail\n my_email = db.Email(request.user.email())\n to = ([db.Email(issue.owner.email())] +\n issue.reviewers +\n [db.Email(email) for email in issue.collaborator_emails()])\n cc = issue.cc[:]\n if django_settings.RIETVELD_INCOMING_MAIL_ADDRESS:\n cc.append(db.Email(django_settings.RIETVELD_INCOMING_MAIL_ADDRESS))\n reply_to = to + cc\n if my_email in to and len(to) > 1: # send_mail() wants a non-empty to list\n to.remove(my_email)\n if my_email in cc:\n cc.remove(my_email)\n issue_id = issue.key.id()\n subject = issue.mail_subject()\n patch = None\n if attach_patch:\n subject = 'PATCH: ' + subject\n if 'patch' in context:\n patch = context['patch']\n del context['patch']\n if issue.num_messages:\n subject = 'Re: ' + subject\n if comments:\n details = _get_draft_details(request, comments)\n else:\n details = ''\n message = message.replace('\\r\\n', '\\n')\n text = ((message.strip() + '\\n\\n' + details.strip())).strip()\n if draft is None:\n msg = models.Message(issue_key=issue.key,\n subject=subject,\n sender=my_email,\n recipients=reply_to,\n text=text,\n parent=issue.key,\n issue_was_closed=issue.closed)\n else:\n msg = draft\n msg.subject = subject\n msg.recipients = reply_to\n msg.text = text\n msg.draft = False\n msg.date = datetime.datetime.now()\n msg.issue_was_closed = issue.closed\n issue.calculate_updates_for(msg)\n\n if in_reply_to:\n try:\n replied_msg_id = int(in_reply_to)\n replied_msg = models.Message.get_by_id(replied_msg_id, parent=issue.key)\n msg.in_reply_to_key = replied_msg.key\n replied_issue_id = replied_msg.issue_key.id()\n if replied_issue_id != issue_id:\n logging.warn('In-reply-to Message is for a different issue: '\n '%s instead of %s', replied_issue_id, issue_id)\n msg.in_reply_to_key = None\n except (db.KindError, db.BadKeyError, ValueError):\n logging.warn('Invalid in-reply-to Message or key given: %s', in_reply_to)\n\n if send_mail:\n # Limit the list of files in the email to approximately 200\n if 'files' in context and len(context['files']) > 210:\n num_trimmed = len(context['files']) - 200\n del context['files'][200:]\n context['files'].append('[[ %d additional files ]]' % num_trimmed)\n url = request.build_absolute_uri(reverse(show, args=[issue.key.id()]))\n reviewer_nicknames = ', '.join(library.get_nickname(rev_temp, True,\n request)\n for rev_temp in issue.reviewers)\n cc_nicknames = ', '.join(library.get_nickname(cc_temp, True, request)\n for cc_temp in cc)\n my_nickname = library.get_nickname(request.user, True, request)\n reply_to = ', '.join(reply_to)\n description = (issue.description or '').replace('\\r\\n', '\\n')\n home = request.build_absolute_uri(reverse(index))\n modified_added_count, modified_removed_count = _get_modified_counts(issue)\n context.update({'reviewer_nicknames': reviewer_nicknames,\n 'cc_nicknames': cc_nicknames,\n 'my_nickname': my_nickname, 'url': url,\n 'message': message, 'details': details,\n 'description': description, 'home': home,\n 'added_lines' : modified_added_count,\n 'removed_lines': modified_removed_count,\n })\n for key, value in context.iteritems():\n if isinstance(value, str):\n try:\n encoding.force_unicode(value)\n except UnicodeDecodeError:\n logging.error('Key %s is not valid unicode. value: %r' % (key, value))\n # The content failed to be decoded as utf-8. Enforce it as ASCII.\n context[key] = value.decode('ascii', 'replace')\n body = django.template.loader.render_to_string(\n template, context, context_instance=RequestContext(request))\n logging.warn('Mail: to=%s; cc=%s', ', '.join(to), ', '.join(cc))\n send_args = {'sender': my_email,\n 'to': [_encode_safely(address) for address in to],\n 'subject': _encode_safely(subject),\n 'body': _encode_safely(body),\n 'reply_to': _encode_safely(reply_to)}\n if cc:\n send_args['cc'] = [_encode_safely(address) for address in cc]\n if patch:\n send_args['attachments'] = [('issue_%s_patch.diff' % issue.key.id(),\n patch)]\n\n attempts = 0\n while True:\n try:\n mail.send_mail(**send_args)\n break\n except mail.InvalidSenderError:\n if django_settings.RIETVELD_INCOMING_MAIL_ADDRESS:\n previous_sender = send_args['sender']\n if previous_sender not in send_args['to']:\n send_args['to'].append(previous_sender)\n send_args['sender'] = django_settings.RIETVELD_INCOMING_MAIL_ADDRESS\n else:\n raise\n except apiproxy_errors.DeadlineExceededError:\n # apiproxy_errors.DeadlineExceededError is raised when the\n # deadline of an API call is reached (e.g. for mail it's\n # something about 5 seconds). It's not the same as the lethal\n # runtime.DeadlineExeededError.\n attempts += 1\n if attempts >= 3:\n raise\n if attempts:\n logging.warning(\"Retried sending email %s times\", attempts)\n\n return msg", "def _open_window(self):\r\n\t\t# Creating the window\r\n\t\tself._window = Window(self, Locations.RESTAL)", "def MessageWindow(screen, title, text, width=40, help=None, timer_ms=None, \n run_type=RT_EXECUTEANDPOP):\n \n g = GridFormHelp(screen, title, help, 1, 3)\n\n t = TextboxReflowed(width, text)\n g.add(t, 0, 0)\n\n if timer_ms:\n g.form.w.settimer(timer_ms)\n\n (button, is_esc) = ActivateWindow(g, run_type)\n\n return {'is_esc': is_esc, \n 'grid': g,\n }", "def open_invoice(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n mod_obj = self.pool.get('ir.model.data')\n for advance_pay in self.browse(cr, uid, ids, context=context):\n form_res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')\n form_id = form_res and form_res[1] or False\n tree_res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_tree')\n tree_id = tree_res and tree_res[1] or False\n\n return {\n 'name': _('Advance Invoice'),\n 'view_type': 'form',\n 'view_mode': 'form,tree',\n 'res_model': 'account.invoice',\n 'res_id': int(context['invoice_id'][0]),\n 'view_id': False,\n 'views': [(form_id, 'form'), (tree_id, 'tree')],\n 'context': context,\n 'type': 'ir.actions.act_window',\n }", "def opm_popup(opmvers, text, nrow):\n\n layout1 = [[sg.Multiline(text, size=(80, nrow), background_color='white', text_color='darkgreen')],\n [sg.CloseButton('OK')]]\n window1 = sg.Window('OPMRUN - Flow Job Scheduler ' + opmvers, layout=layout1)\n window1.Read()\n return ()", "def notify(template_name, context):\n to_address = context['to_address']\n template_name = 'emails/' + template_name\n subject_template = get_template(\n template_name + '_subject.html')\n body_template = get_template(template_name + '_body.html')\n context = Context(context)\n # Strip, otherwise we get header errors.\n subject = subject_template.render(context).strip()\n body = body_template.render(context)\n try:\n sent = send_mail(subject, body, FROM_ADDRESS, [to_address])\n except Exception:\n pass\n\n status = 's' if sent else 'e'\n Notification.objects.create(\n from_address=FROM_ADDRESS,\n to_address=to_address,\n subject=subject,\n body=body,\n status=status,\n )", "def send_sber(self):\n if len(self.objects) == 1:\n object_ = self.objects[0]\n subject = 'Закрытие ТТ и вывоз терминала ' \\\n f'{object_.object_SAP_code} {object_.object_name}'\n else:\n subject = 'Закрытие ТТ и вывоз терминалов'\n with open(SBER_TEMPLATE, encoding='utf-8') as f:\n template_text = f.read()\n template = Template(template_text)\n body = template.render(objects=self.objects, date=self.event_date.strftime(\"%d.%m.%Y\"))\n # body = \\\n # '<p>Добрый день!</p>' \\\n # '<p>В связи с закрытием ТТ '\\\n # f'{self.object_SAP_code} {self.object_name}, ' \\\n # 'прошу организовать вывоз терминала в первой половине дня '\\\n # f'{self.event_date.strftime(\"%d.%m.%Y\")}, адрес ТТ - {self.object_address}' \\\n # '<br>Заранее спасибо!</p>'\n\n self.sendmail(\n self.outlook,\n ['VASaparkina@sberbank.ru', 'RAMinazhetdinov@sberbank.ru'],\n 'schugunov@svyaznoy.ru',\n subject,\n body,\n '',\n 1\n )", "def main_email(name, total, answered, not_answered, declines, remaining):\n\n start = smtplib.SMTP(host=HOST, port=PORT)\n start.starttls()\n start.login(ADDRESS, PASSWORD)\n\n date = datetime.datetime.now()\n date_now = date.strftime(\"%m-%d-%Y\")\n\n print_list, email_dict = simple_contacts('contacts.txt')\n\n emails = get_emails(print_list, email_dict)\n\n message_template = read_template()\n\n for mail in emails:\n pretty_print(f\"Sending email to {mail}\", \"!\")\n msg = MIMEMultipart()\n\n message = message_template.substitute(PERSON_NAME=name, DATE=date_now, TOTAL_CALLED=total, ANSWERED=answered, NOT_ANSWERED=not_answered, DECLINES=declines, REMAINING=remaining)\n\n msg['From'] = ADDRESS\n msg['To'] = mail\n msg['Subject'] = f\"{name} - Calling Campaign Summary - {date_now}\"\n\n msg.attach(MIMEText(message, 'plain'))\n start.send_message(msg)\n pretty_print(f\"Mail sent to {mail}\", \"!\")\n\n del msg\n\n start.quit()", "def sendEmail(householdID):\n contactID = mdb.getContact(householdID)\n sqlq = \"\"\"\n SELECT Name, Surname, Address1, Address2, Town, Postcode, email, status\n FROM Contact\n WHERE idContact = '{}';\n \"\"\".format(contactID)\n result = mdb.getSQL(sqlq)[0]\n\n # thisName = (\"%s %s\" % (result['Name'], result['Surname']))\n thisName = (\"%s\" % (result['Name']))\n thisAddress = (\"%s</br>%s</br>%s %s\" % (result['Address1'], result['Address2'], result['Town'], result['Postcode']))\n thisAddress = thisAddress.replace(\"None </br>\", \"\")\n thisEmail = (\"%s\" % (result['email']))\n thisStatus = (\"%s\" % (result['status']))\n thisAddress = thisAddress.replace(\"None</br>\", \"\")\n participantCount = (\"%s\" % mdb.getParticipantCount(str(householdID)))\n # prepare the custom email\n\n thisPath = os.path.dirname(os.path.abspath(__file__))\n if (thisStatus == 'de'):\n # DOESN'T happen yet - de is excluded from query for now\n # emailPath = os.path.join(thisPath, \"emails/email_confirm_de.html\")\n locale.setlocale(locale.LC_ALL, 'de_DE.utf8')\n else:\n emailPath = os.path.join(thisPath, \"emails/email_automated_date.html\")\n\n templateFile = open(emailPath, \"r\")\n templateText = templateFile.read()\n templateFile.close()\n templateText = templateText.replace(\"[householdID]\", householdID)\n templateText = templateText.replace(\"[contactID]\", contactID)\n templateText = templateText.replace(\"[name]\", thisName)\n templateText = templateText.replace(\"[address]\", thisAddress)\n templateText = templateText.replace(\"[securityCode]\", mdb.getSecurityCode(householdID))\n templateText = templateText.replace(\"[participantCount]\", participantCount)\n\n # Subject\n subjectLine = templateText.splitlines()[0]\n templateText = templateText[templateText.find('\\n') + 1:] # find line break and return all from there - i.e. remove first line\n \n # email file\n emailFilePath = os.path.join(thisPath, \"tempEmail.htmail\")\n\n emailFile = open(emailFilePath, \"w+\")\n emailFile.write(templateText)\n emailFile.close()\n\n # call('mutt -e \"set content_type=text/html\" -s \"[TESTING]' + subjectLine + '\" philipp.grunewald@ouce.ox.ac.uk < ' + emailFilePath, shell=True)\n call('mutt -e \"set content_type=text/html\" -s \"' + subjectLine + '\" ' + thisEmail + ' -b meter@energy.ox.ac.uk < ' + emailFilePath, shell=True)", "def sendEmail(householdID):\n contactID = mdb.getContact(householdID)\n sqlq = \"\"\"\n SELECT Name, Surname, Address1, Address2, Town, Postcode, email, status\n FROM Contact\n WHERE idContact = '{}';\n \"\"\".format(contactID)\n result = mdb.getSQL(sqlq)[0]\n\n thisName = (\"%s\" % (result['Name']))\n thisEmail = (\"%s\" % (result['email']))\n thisStatus = (\"%s\" % (result['status']))\n\n # prepare the custom email\n thisPath = os.path.dirname(os.path.abspath(__file__))\n if (thisStatus == 'de'):\n emailPath = os.path.join(thisPath, \"emails/email_graph_de.html\")\n locale.setlocale(locale.LC_ALL, 'de_DE.utf8')\n else:\n emailPath = os.path.join(thisPath, \"emails/email_graph.html\")\n dtChoice = mdb.getHHdtChoice(householdID)\n thisDate = dtChoice.strftime(\"%A, %-d %B\")\n\n templateFile = open(emailPath, \"r\")\n templateText = templateFile.read()\n templateFile.close()\n templateText = templateText.replace(\"[householdID]\", householdID)\n templateText = templateText.replace(\"[contactID]\", contactID)\n templateText = templateText.replace(\"[name]\", thisName)\n templateText = templateText.replace(\"[date]\", thisDate)\n templateText = templateText.replace(\"[securityCode]\", mdb.getSecurityCode(householdID))\n\n # Subject\n subjectLine = templateText.splitlines()[0]\n templateText = templateText[templateText.find('\\n') + 1:] # find line break and return all from there - i.e. remove first line\n \n # email file\n emailFilePath = os.path.join(thisPath, \"tempEmail.htmail\")\n emailFile = open(emailFilePath, \"w+\")\n emailFile.write(templateText)\n emailFile.close()\n\n # call('mutt -e \"set content_type=text/html\" -s \"[TESTING]' + subjectLine + '\" philipp.grunewald@ouce.ox.ac.uk < ' + emailFilePath, shell=True)\n call('mutt -e \"set content_type=text/html\" -s \"' + subjectLine + '\" ' + thisEmail + ' -b meter@energy.ox.ac.uk < ' + emailFilePath, shell=True)", "def _email_document(document, to, template='django_dms/email.txt', subject=''): \n # TODO: A really cool system would delay sending the email for 10 seconds or so, \n # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)\n # Create the message\n message = EmailMessage(to=to, subject=subject)\n message.to = to\n message.subject = subject\n message.body = render_to_string(template, {'document': document})\n message.attach(document.friendly_filename, document.file.read(), document.file_mimetype)\n\n # Send the message\n message.send()", "def msg_open(self,msg):\r\n filepaths = msg.get_data()\r\n if filepaths is ():\r\n #Create the file open dialog.\r\n filepaths,index = DoFileDialog(self.frame, wildcard = \"Python source (*.py,*.pyw)|*.py;*.pyw|All files (*,*.*)|*.*;*\")\r\n if filepaths==None:\r\n return\r\n\r\n if (filepaths is not None) and (filepaths!=[]):\r\n #open the file requested\r\n for path in filepaths:\r\n self.frame.notebook.OpenFile(path)\r\n self.frame.Show()\r\n self.frame.Raise()", "def get_email():\n return Email(\n subject='[Messages] Integration Test',\n body='Conducting Integration Testing',\n attachments=str(TESTDIR.joinpath('file2.png')))", "def action_my_payslip_sent(self):\n self.ensure_one()\n template = self.env.ref('payroll_email.email_template_for_my_payroll')\n if template:\n self.env['mail.template'].browse(template.id).send_mail(self.id,force_send=True)\n self.flag = True", "def send_mail():\n email_address = request.args.get('emailAddress') # get email address from the form\n response = call_sendmail_endpoint(session['access_token'], session['alias'], email_address)\n print(session)\n if response == 'SUCCESS':\n show_success = 'true'\n show_error = 'false'\n else:\n print(response)\n show_success = 'false'\n show_error = 'true'\n\n session['pageRefresh'] = 'false'\n return render_template('main.html', name=session['alias'],\n emailAddress=email_address, showSuccess=show_success,\n showError=show_error)", "def invit_form(request):\n global invit_form\n ret = {'invit_form': invit_form}\n invit_form = EmailInvitationForm()\n return ret", "def __init__(self):\n self.outlook = win32.Dispatch('outlook.application')\n locale.setlocale(locale.LC_ALL, '')", "def send_email(to, subject, body, attachment=None):\n outlook = win32.Dispatch('outlook.application')\n new_mail = outlook.CreateItem(0)\n new_mail.Subject = subject\n new_mail.HTMLBody = body\n new_mail.To = to\n\n if attachment:\n new_mail.Attachments.Add(attachment)\n\n new_mail.Send()", "def openingMessage():\n print(\"=\" * 55)\n print(\"\\nNote: \")\n print(\"Input of an RSA Encrypted message is required.\")\n print(\"Public parameters of the system are n=31313 and e=4913\\n\")\n print(\"=\" * 55)", "def _send(to, context, subject, from_email, template):\n body = render_to_string(template, context)\n msg = EmailMultiAlternatives(subject, body, from_email, to)\n msg.attach_alternative(body, \"text/html\")\n msg.send()", "def messageHtml(self,fileName,doc=None):\n if doc:\n fileName=os.path.join(doc,'doc',fileName)\n if fileName[0]=='/': fileName = 'file://'+fileName\n WebBrowser=self.get('WebBrowser')\n if WebBrowser==DEFAULT:\n if os.path.isfile(FIREFOX):\n os.system(\"%s %s &\"%(FIREFOX,fileName))\n else:\n webbrowser.open(fileName, 1)\n else:\n os.system(\"%s %s &\"%(WebBrowser,fileName))", "def send_confirmation(send_to, apply_info):\n msg = \"\"\"Hello,\n\nThis is a friendly confirmation for your Simply Apply application for position '{job_title}' at {job_company}.\n\nThank you,\nThe Simply Hired Team\"\"\".format(**apply_info)\n\n send_email('Simply Apply <noreply@simplyhired.com>', send_to, 'Simply Apply Confirmation', msg)", "def navigate_to_email_view(self):\r\n # Pull up email view on instructor dashboard\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url)\r\n email_section = '<div class=\"vert-left send-email\" id=\"section-send-email\">'\r\n # If this fails, it is likely because ENABLE_INSTRUCTOR_EMAIL is set to False\r\n self.assertTrue(email_section in response.content)", "def notify(self, builder):\n\n # Build the <files> section for the template...\n commit = builder.commit\n files = E.files()\n\n commit_msg = commit.message.strip()\n commit_msg = re.sub(r'[\\x00-\\x09\\x0B-\\x1f\\x7f-\\xff]', '', commit_msg)\n\n for filename in commit.files_changed:\n safe_filename = re.sub(r'[\\x00-\\x09\\x0B-\\x1f\\x7f-\\xff]', '', filename)\n file_element = E.file(safe_filename)\n files.append(file_element)\n\n # Build the message\n cia_message = self.MESSAGE()\n cia_message.append(self._generator)\n\n source = self.SOURCE(E.project(\"KDE\"))\n source.append(E.module(self.repository.path))\n source.append(E.branch(self.repository.ref_name))\n\n cia_message.append(source)\n cia_message.append(self.TIMESTAMP(commit.date))\n\n body = self.BODY()\n\n commit_data = self.COMMIT()\n commit_data.append(E.author(commit.author_name))\n commit_data.append(E.revision(commit.description))\n commit_data.append(files)\n commit_data.append(E.log(commit_msg))\n commit_data.append(E.url(commit.url))\n\n body.append(commit_data)\n cia_message.append(body)\n\n # Convert to a string\n commit_xml = etree.tostring(cia_message)\n\n # Craft the email....\n message = MIMEText( commit_xml, 'xml', 'utf-8' )\n message['Subject'] = \"DeliverXML\"\n message['From'] = \"sysadmin@kde.org\"\n message['To'] = \"commits@platna.kde.org\"\n\n # Send email...\n self.smtp.sendmail(\"sysadmin@kde.org\", [\"commits@platna.kde.org\"],\n message.as_string())", "def reopen_copyedit_notify(request, project):\n subject = 'Project reopened for copyediting: {0}'.format(project.title)\n\n for email, name in project.author_contact_info():\n body = loader.render_to_string(\n 'notification/email/reopen_copyedit_notify.html', {\n 'name': name,\n 'project': project,\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'signature': settings.EMAIL_SIGNATURE,\n 'project_info': email_project_info(project),\n 'footer': email_footer(),\n 'SITE_NAME': settings.SITE_NAME,\n })\n\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [email], fail_silently=False)", "def open_invoices(self):\n return {\n 'domain': \"[('id', 'in', \" + str(self.invoice_ids.ids) + \" )]\",\n 'name': 'Invoices',\n 'view_mode': 'tree,form',\n 'res_model': 'account.move',\n 'type': 'ir.actions.act_window',\n }", "def activation_email_template(cls, user_id):\n user = get_user_model().objects.get(id=user_id)\n email = user.e_mail\n activation_key = user.activation_key\n\n htmly = get_template('activation.html')\n \n context_kw = Context({'user': {'email': email, 'activation_key': activation_key}})\n \n email_subject = 'Account confirmation - NoTes'\n from_email = 'testntsystems@gmail.com'\n html_content = htmly.render(context_kw)\n msg = EmailMultiAlternatives(email_subject, html_content, \n from_email, [email])\n msg.content_subtype = \"html\"\n msg.send()", "def update_helpdesk(self, data):\n self.sr=data\n try:\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n except Exception, imperr:\n print(\"emailNotify failure - import error %s\" % imperr)\n return(-1)\n nHtml = []\n noHtml = \"\"\n clientEmail = ['helpdesk@mscsoftware.com']\n msg = MIMEMultipart()\n # This is the official email notifier\n rtUser = 'DONOTREPLY@mscsoftware.com'\n\n msg['From'] = rtUser\n msg['To'] = \", \".join(clientEmail)\n if self.data['groupid'] == 'Nastran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n elif self.data['groupid'] == 'Patran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n else: \n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n\n msg['Subject'] = 'Your Request SR# %s for VM provisioning \\\n reported failure for product %s' % \\\n\t\t\t ( self.sr['requestNumber'], pdict[self.data['groupid']] )\n nHtml.append(\"<html> <head></head> <body> <p>Jenkin's \\\n vCAC cloud client notification<br>\")\n nHtml.append(\"<b>Hi Helpdesk,</b><br><br><br>\")\n nHtml.append(\"Please create a ticket to solve \\\n the following problem and notify infra team.\")\n nHtml.append(\"VM creation readiness from vCAC cloud \\\n is reported failure, \\\n Product is <b>%s</b> is stuck.\" \\\n % pdict[self.data['groupid']])\n\n nHtml.append(\"Regression test for product <b>%s</b> \\\n is impacted.<br><br>\" % pdict[self.data['groupid']])\n if os.path.isdir(self.data['rundir']):\n jnfilepath=os.path.join(self.data['rundir'], 'hudjobname.dat')\n if os.path.isfile(jnfilepath):\n lines = [line.rstrip() for line in open(jnfilepath)]\n nHtml.append(\"Please follow job link for SR# \\\n related information.<br>\")\n nHtml.append(\"Jenkins Effected Job URL: \\\n <a href=%s> Effected Build \\\n Console</a><br><br><br>\" % (lines[0]))\n\n nHtml.append(\"This needs immediate attention.<br><br>\")\n nHtml.append(\"Regards,<br>\")\n nHtml.append(\"Rtest Administrator.<br>\")\n nHtml.append(\"[Note: This is an automated mail,\\\n Please do not reply to this mail.]<br>\")\n nHtml.append(\"</p> </body></html>\")\n noHtml = ''.join(nHtml)\n noBody = MIMEText(noHtml, 'html')\n msg.attach(noBody)\n s = smtplib.SMTP('postgate01.mscsoftware.com')\n s.sendmail(rtUser, [clientEmail] + \\\n msg[\"Cc\"].split(\",\"), msg.as_string())\n s.quit()\n return 0", "def draft_message(self, text=None, template_path=None, template_args=None):\n self.message['From'] = self.sender\n self.message['To'] = '; '.join(self.destinations)\n self.message['BCC'] = '; '.join(self.bcc)\n self.message['CC'] = '; '.join(self.cc)\n self.message['Subject'] = self.subject\n\n # check if email template is used\n if template_path:\n text = self.body_template(template_path)\n text = text.format(**template_args)\n\n # attach text part of message\n self.message.attach(MIMEText(text))\n\n # return self to encourage method chaining\n return self", "def email_body_email_address_changed_confirmation(url, new_email):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ebebeb\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF; padding-top:35px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoB.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-1.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:50px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">We\\'re just sending you a reminder: You changed your email.<br><br>'\n\tmsg = msg + '\\t\\t\\t We want to keep your information safe and secure, so if you didn\\'t change it yourself <a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">give us a holler ASAP</a> and we\\'ll get on it.<br><br></font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"> <a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">Contact Us</a>'\n\tmsg = msg + '\\t\\t| Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '\\t\\t<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def _message(self, recipient, connection, context=None):\n base_subject = '{{ event.calendar.course.name }} {{ event.title }}'\n if not self.event.get_documents(True):\n template_name = self.REQUEST_TEMPLATE\n subject = 'Got a {} study guide?'.format(base_subject)\n else:\n template_name = self.PUBLISH_TEMPLATE\n subject = '{} study guide'.format(base_subject)\n\n subject = Template(subject).render(context)\n body = get_template(template_name).render(context)\n\n return make_email_message(subject, body,\n make_display_email(\n self.sender_address,\n self.sender_name),\n recipient, connection)", "def create_mail_content(daily: bool = False):\n if not daily:\n order = STATE['order'] if STATE['order'] else get_closed_order()\n trade_part = create_report_part_trade(order)\n performance_part = create_report_part_performance(daily)\n advice_part = create_report_part_advice()\n settings_part = create_report_part_settings()\n general_part = create_mail_part_general()\n\n if not daily:\n trade = [\"Last trade\", \"----------\", '\\n'.join(trade_part['mail']), '\\n\\n']\n performance = [\"Performance\", \"-----------\",\n '\\n'.join(performance_part['mail']) + '\\n* (change within 24 hours)', '\\n\\n']\n advice = [\"Assessment / advice\", \"-------------------\", '\\n'.join(advice_part['mail']), '\\n\\n']\n settings = [\"Your settings\", \"-------------\", '\\n'.join(settings_part['mail']), '\\n\\n']\n general = [\"General\", \"-------\", '\\n'.join(general_part), '\\n\\n']\n\n bcs_url = 'https://bitcoin-schweiz.ch/bot/'\n text = '' if daily else '\\n'.join(trade)\n\n if not CONF.info:\n text += '\\n'.join(performance) + '\\n'.join(advice) + '\\n'.join(settings) + '\\n'.join(general) + bcs_url + '\\n'\n else:\n text += '\\n'.join(performance) + '\\n'.join(advice) + '\\n'.join(settings) + '\\n'.join(general) + CONF.info \\\n + '\\n\\n' + bcs_url + '\\n'\n\n csv = None if not daily else INSTANCE + ';' + str(datetime.datetime.utcnow().replace(microsecond=0)) + ' UTC;' + \\\n (';'.join(performance_part['csv']) + ';' + ';'.join(advice_part['csv']) + ';' +\n ';'.join(settings_part['csv']) + ';' + CONF.info + '\\n')\n\n return {'text': text, 'csv': csv}", "def show_popup(self, data):\r\n store = get_store()\r\n self.ids.inlayout.rows = 1 + store.get('Nbtimecompound')[\"value\"]\r\n #the first µEOF\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"µEOF :\",\"FFFFFF\")))\r\n value = \"{:.2E}\".format(store.get('MicroEOF')[\"value\"])\r\n value = value+\" \"+store.get('MicroEOF')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value,\"FFFFFF\")))\r\n #add all the µEP\r\n for i in range(1, store.get('Nbtimecompound')[\"value\"]+1):\r\n if i%2 != 0:\r\n color = \"BFBFBF\"\r\n else:\r\n color = \"FFFFFF\"\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"µEP\"+str(i)+\" :\", color)))\r\n value = \"{:.2E}\".format(store.get('MicroEP'+str(i))[\"value\"])\r\n value = value +\" \"+store.get('MicroEP'+str(i))[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value,color)))\r\n #open the popup\r\n self.open()", "def test_send_email_mailhog(self):\n\n pk = self.saved_pks[0]\n publisher = RssNotificationEmailPublisher(self.saved_pks, self.feedsource.pk)\n\n notification = RssNotification.objects.get(pk=pk)\n rendered_content = publisher.render_notification(notification)\n publisher.send_email(rendered_content, notification)", "def test_send_notification(self):\n management.call_command('send_first_report_notification', [], {})\n eq_(len(mail.outbox), 4)", "def verification_email_body(case_name, url, display_name, category, subcategory, breakpoint_1, breakpoint_2, hgnc_symbol, panels, gtcalls, tx_changes, name, comment):\n html = \"\"\"\n <ul>\n <li>\n <strong>Case {case_name}</strong>: <a href=\"{url}\">{display_name}</a>\n </li>\n <li><strong>Variant type</strong>: {category} ({subcategory})\n <li><strong>Breakpoint 1</strong>: {breakpoint_1}</li>\n <li><strong>Breakpoint 2</strong>: {breakpoint_2}</li>\n <li><strong>HGNC symbols</strong>: {hgnc_symbol}</li>\n <li><strong>Gene panels</strong>: {panels}</li>\n <li><strong>GT call</strong></li>\n {gtcalls}\n <li><strong>Amino acid changes</strong></li>\n {tx_changes}\n <li><strong>Comment</strong>: {comment}</li>\n <li><strong>Ordered by</strong>: {name}</li>\n </ul>\n \"\"\".format(\n case_name=case_name,\n url=url,\n display_name=display_name,\n category=category,\n subcategory=subcategory,\n breakpoint_1=breakpoint_1,\n breakpoint_2=breakpoint_2,\n hgnc_symbol=hgnc_symbol,\n panels=panels,\n gtcalls=gtcalls,\n tx_changes=tx_changes,\n name=name,\n comment=comment)\n\n return html", "def open_create_obj_modal(obj_type):\n selenium_utils.open_url(url.dashboard())\n obj_modal = dashboard.Dashboard().open_create_obj_modal(obj_type=obj_type)\n return obj_modal", "def msg_new(self,msg):\r\n self.frame.notebook.New()\r\n self.frame.Show()\r\n self.frame.Raise()", "def send_popup_message(self, title, header, message):\n data = self.device_id_str + \"\\tMSSG\\t{}\\t{}\\t{}\\n\".format(title, header, message)\n self.tx_zmq_pub.send_multipart([b\"ALL\", b'0', data.encode('utf-8')])", "def send_confirmation_email(data, key, text_template=\"inviteme/confirmation_email.txt\", html_template=\"inviteme/confirmation_email.html\"):\n site = Site.objects.get_current()\n subject = \"[%s] %s\" % (site.name, _(\"confirm invitation request\"))\n confirmation_url = reverse(\"inviteme-confirm-mail\", args=[key])\n message_context = Context({ 'data': data,\n 'confirmation_url': confirmation_url,\n 'support_email': DEFAULT_FROM_EMAIL,\n 'site': site })\n\n # prepare text message\n text_message_template = loader.get_template(text_template)\n text_message = text_message_template.render(message_context)\n # prepare html message\n html_message_template = loader.get_template(html_template)\n html_message = html_message_template.render(message_context)\n\n send_mail(subject, text_message, DEFAULT_FROM_EMAIL, [data['email'],], html=html_message)", "def show_popup(self, data):\r\n store = get_store()\r\n if data[\"errcode\"] == 2:\r\n self.ids.inlayout.rows = 12\r\n else:\r\n self.ids.inlayout.rows = 11\r\n #if there is an error to print\r\n if data[\"errcode\"] == 2:\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Warning :\", \"FF0000\")))\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(data[\"errtext\"], \"FF0000\")))\r\n #Hydrodynamic injection\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Hydrodynamic injection :\", \"FFFFFF\")))\r\n value = round(store.get('Hydrodynamicinjection')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Hydrodynamicinjection')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"FFFFFF\")))\r\n #Capillary volume\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Capillary volume :\", \"BFBFBF\")))\r\n value = round(store.get('Capillaryvolume')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Capillaryvolume')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"BFBFBF\")))\r\n #Capillary volume to window\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Capillary volume to window :\", \"FFFFFF\")))\r\n value = round(store.get('Capillaryvolumetowin')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Capillaryvolumetowin')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"FFFFFF\")))\r\n #Injection plug length\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Injection plug length :\", \"BFBFBF\")))\r\n value = round(store.get('Injectionpluglen')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Injectionpluglen')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"BFBFBF\")))\r\n #Plug (% of total length)\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Plug (% of total length) :\", \"FFFFFF\")))\r\n value = round(store.get('Pluglenpertotallen')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Pluglenpertotallen')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"FFFFFF\")))\r\n #Plug (% of length to window)\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Plug (% of length to window) :\", \"BFBFBF\")))\r\n value = round(store.get('Pluglenperlentowin')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Pluglenperlentowin')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"BFBFBF\")))\r\n #Injected analyte \r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Injected analyte :\", \"FFFFFF\")))\r\n value = round(store.get('Injectedanalyteng')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Injectedanalyteng')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"FFFFFF\")))\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=\"\"))\r\n value = round(store.get('Injectedanalytepmol')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Injectedanalytepmol')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"FFFFFF\")))\r\n #Injection pressure\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Injection pressure :\", \"BFBFBF\")))\r\n value = round(store.get('Injectionpressure')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Injectionpressure')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"BFBFBF\")))\r\n #Flow rate\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Flow rate :\", \"FFFFFF\")))\r\n value = round(store.get('Flowrate')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Flowrate')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"FFFFFF\")))\r\n #Field strength\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Field strength :\", \"BFBFBF\")))\r\n value = round(store.get('Fieldstrength')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Fieldstrength')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"BFBFBF\")))\r\n #open the popup\r\n self.open()", "def render_mail(request, template, params, insert_template_vars=True, method='html'):\n result = render(template, params, request=request)\n return unicode(result, 'utf8')", "def sendEmail(_name, _email, _body):\n\n _mailer = app.config['MAIL_USERNAME']\n msg = Message(\"Contact Form\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[f'{_mailer}'])\n msg.body = f'''{_body}\n\n\nSender's Name: {_name}\nSender's Email: {_email}\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(msg)\n return 'OK'", "def test_notification_cp_email(self):\n # publish the item\n api.content.transition(obj=self.event, transition='publish')\n mailhost = api.portal.get_tool('MailHost')\n self.assertEqual(len(mailhost.messages), 2)\n msg = message_from_string(mailhost.messages[1])\n\n self.assertEqual(msg['To'], CP_LIST_ADDRESS)\n self.assertEqual(\n msg['From'], 'EESTEC International <noreply@eestec.net>')\n self.assertEqual(\n msg['Subject'],\n '=?utf-8?q?=5BCP=5D_=5BEVENTS=5D_T=C3=A9st_event?=',\n )\n self.assertIn('a new Event has been published', msg.get_payload())\n self.assertIn('http://nohost/plone/lc/test-event', msg.get_payload())", "def notification_interface():\n return render_template(\"notifications.html\")", "def email_body_to_user_sending_msg(profile, message):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Way to get the conversation started! You messaged <a href=\\\"https://127.0.0.1:5000/profile?hero=' + profile.prof_id + '\\\" style=\"color:#1488CC\">' + profile.prof_name.encode('utf8', 'ignore') + '</a> and should get a response soon.<br><br>'\n\tmsg = msg + 'Until then, stand tight. <br><br>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def send(self, recipient, template_path, context, subject, bcc_email=[]):\n\n body = self.email_render(template_path, context)\n self.send_email(recipient, subject, body, bcc_email)", "def send_mail():\n msg = MIMEMultipart()\n msg[\"From\"] = \"SIRP-Reminders@company.com\"\n msg[\"To\"] = SENT_TO\n msg[\"Subject\"] = \"The Hive Case Metrics\"\n msg.attach(MIMEText(\"Attached are the requested case metrics in .XLSX format.\"))\n part = MIMEBase(\"application\", \"octet-stream\")\n part.set_payload(open(\"Hive Metrics.xlsx\", \"rb\").read())\n encoders.encode_base64(part)\n part.add_header(\"Content-Disposition\", 'attachment; filename=\"Hive Metrics.xlsx\"')\n msg.attach(part)\n smtp = smtplib.SMTP(SMTP_SERVER)\n smtp.starttls()\n smtp.sendmail(msg[\"From\"], msg[\"To\"].split(\",\"), msg.as_string())\n smtp.quit()", "def email_body_beta_email(url):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ebebeb\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ebebeb\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF; padding-top:35px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoB.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-1.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:50px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Thanks for signing up for Insprite! We are excited that you\\'re interested in what we are doing over here. We are creating Insprite to be a vibrant, friendly community where you can both learn from creative people in your area, and teach your passions to others. We sincerely hope that you will be a part of it!'\n\tmsg = msg + '<br><br>We\\'re currently in the process of finishing up Insprite... and we\\'re nearly there. We\\'re just adding some bells and whistles so it\\'ll be the best possible experience.<br><br>'\n\tmsg = msg + 'We will be in touch when we\\'re ready to launch&mdash;tentatively in late 2014. We can\\'t wait to show you what we\\'ve been working on. You\\'re going to love it.<br><br>'\n\tmsg = msg + 'In the meantime, feel free to drop us a line, or follow us on our <a href=\"#\" style=\"color:#1488CC\">Blog</a>, where we will post lots of cool bloggy things (no, really, we\\'re gonna try and keep it interesting).<br><br>'\n\tmsg = msg + '<br>Spritely yours,<br>'\n\tmsg = msg + 'The Insprite Gang </font>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'", "def _get_message_body(self, template_file, message_data):\r\n return \"Test email message from bookie\"\r\n # lookup = config['pylons.app_globals'].mako_lookup\r\n # template = lookup.get_template(template_file)\r\n\r\n # # template vars are a combo of the obj dict and the extra dict\r\n # template_vars = {'data': message_data}\r\n # return template.render(**template_vars)\r", "def send(target: Owner, template: str, context: dict=None, session: SQLA_SESSION=None):\n wrapper = CURRENT_WRAPPER or DEFAULT_WRAPPER\n subject = wrapper.render(template, Layout.SUBJECT, target, context)\n body = wrapper.render(template, Layout.BODY, target, context)\n recipient = (owner_desc(target, True), target.email)\n send_mail(recipient, subject, body, copy_sysadmins=False, session=session)", "def message_box(subject, content):\r\n root = tk.Tk()\r\n root.attributes(\"-topmost\", True)\r\n root.withdraw()\r\n messagebox.showinfo(subject, content)\r\n try:\r\n root.destroy()\r\n except:\r\n pass", "def safe_message_dialog(self, markup, msgtype=gtk.MESSAGE_ERROR):\n gtk.gdk.threads_enter()\n mbox = gtk.MessageDialog(type=msgtype, buttons=gtk.BUTTONS_OK)\n mbox.set_markup(markup)\n mbox.run()\n mbox.destroy()\n gtk.gdk.threads_leave()", "def email_invite(request):\n if request.method == 'POST':\n form = EmailInviteForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n name = f'{cd[\"name\"]}'\n subject = f'{cd[\"name\"]} has sent you an invitation'\n from_email = settings.DEFAULT_FROM_EMAIL\n comment = f'{cd[\"comment\"]}'\n html_template = get_template(\n 'profiles/email/email_invite_message.html'\n ).render()\n msg = EmailMultiAlternatives(\n subject, comment, from_email, [cd['to']]\n )\n msg.attach_alternative(html_template, 'text/html')\n msg.send(fail_silently=False)\n messages.success(request, 'Your email has been sent')\n return HttpResponseRedirect(reverse('profiles:find_friends'))\n messages.error(request, \"We are sorry but we could not send your email this time.\")\n return redirect('home')\n else:\n form = EmailInviteForm()\n\n template = 'profiles/email_invite.html'\n context = {\n 'form': form,\n }\n return render(request, template, context)", "def sendMsg(pC,principalInvestigator,email,trackFileName,start=0) :\n\n startlst=lst()\n lstsym=str(int(startlst))+\":\"+str(int(60*(startlst-int(startlst))))\n date=str(yearMonthDay())\n msg = 'To: '+email+'\\n'\n msg += 'From: obs@mmarray.org\\n'\n\n if start == 0:\n msg += 'Subject: Your track has been run.\\n\\n'\n msg += 'Attn: '+principalInvestigator+'\\n\\n'\n msg += 'Project '+pC+' ended at '+lstsym+'LST('\n msg += getMiriadUTStamp() + \"UT).\\n\"\n msg += 'Date: '+date+'\\n\\n'\n msg += 'The edited file that the observers ran is: '+trackFileName+' .\\n'\n msg += 'Please go to http://carma-server.ncsa.uiuc.edu:8181\\n'\n msg += ' to retrieve your data, including quality script '\n msg += 'output and plots.\\n\\n'\n msg += 'Send observers feedback at obs@mmarray.org\\n\\n'\n msg += '*****This email was automatically generated and sent '\n msg += 'at the completion of your track.*****\\n'\n else:\n msg += 'Subject: Your track has just started running.\\n\\n'\n msg += 'Attn: '+principalInvestigator+'\\n\\n'\n msg += 'Project '+pC+' started at '+lstsym+'LST('\n msg += getMiriadUTStamp() + \"UT).\\n\"\n msg += 'Date: '+date+'\\n\\n'\n msg += 'The edited file that the observers ran is: '+trackFileName+'\\n\\n'\n msg += 'Send observers feedback at obs@mmarray.org\\n\\n'\n msg += '*****This email was automatically generated and sent '\n msg += 'at the beginning of your track.*****\\n'\n MAIL = \"/usr/sbin/sendmail\"\n if (False) : # for debugging\n f = open(\"test.txt\",'w')\n f.write(msg)\n f.close()\n return\n p = os.popen(\"%s -t\" %MAIL, 'w')\n p.write(msg)\n exitcode = p.close()\n if exitcode: print \"Exit code from sendmail: %s\" %exitcode", "def build_hello_email():\n from_email = Email(\"test@example.com\")\n subject = \"Hello World from the SendGrid Python Library\"\n to_email = Email(\"test@example.com\")\n content = Content(\"text/plain\", \"some text here\")\n mail = Mail(from_email, subject, to_email, content)\n mail.personalizations[0].add_to(Email(\"test2@example.com\"))\n\n return mail.get()", "def render_mail(self, template_prefix, email, context):\n subject = render_to_string('{0}_subject.txt'.format(template_prefix),\n context)\n # remove superfluous line breaks\n subject = \" \".join(subject.splitlines()).strip()\n subject = self.format_email_subject(subject)\n\n bodies = {}\n for ext in ['html', 'txt']:\n try:\n template_name = '{0}_message.{1}'.format(template_prefix, ext)\n bodies[ext] = render_to_string(template_name,\n context).strip()\n except TemplateDoesNotExist:\n if ext == 'txt' and not bodies:\n # We need at least one body\n raise\n if 'txt' in bodies:\n msg = EmailMultiAlternatives(subject,\n bodies['txt'],\n settings.DEFAULT_FROM_EMAIL,\n [email])\n if 'html' in bodies:\n msg.attach_alternative(bodies['html'], 'text/html')\n else:\n msg = EmailMessage(subject,\n bodies['html'],\n settings.DEFAULT_FROM_EMAIL,\n [email])\n msg.content_subtype = 'html' # Main content is now text/html\n return msg", "def email_body_appointment_confirmation_for_seller(meeting, buyer_profile, sellr_profile, msg_user_link='https://INSPRITE.co/message/USER'):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Fantastic! You accepted <a href=\"https://127.0.0.1:5000/profile?' + buyer_profile.prof_id + '\" style=\"color:#1488CC\">' + buyer_profile.prof_name + '\\'s proposal.</a><br><br>'\n\tmsg = msg + '\\t\\t\\t Check out the details:<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\t\\t\\t Need to edit, manage or update the appointment? <a href=\"https://127.0.0.1:5000/dashboard\" style=\"color:#1488CC\">Go for it</a>, or send <a href=\"' + msg_user_link + '\" style=\"color:#1488CC\"> ' + buyer_profile.prof_name + ' a message.</a><br><br>We know life can be busy, so we\\'ll send you a reminder 24 hours in advance too.</font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://maps.googleapis.com/maps/api/staticmap?center=' + meeting.meet_location + '&zoom=15&size=400x450&markers=size:large%8Ccolor:0xFFFF00%7Clabel:Insprite%7C' + meeting.meet_location + '\"><br>'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"><a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">Contact Us</a> '\n\tmsg = msg + '| Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font><br>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def notify_user(self, svno, ops):\n\n self.sr=svno\n self.ops=ops\n try:\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n except Exception, imperr:\n print(\"emailNotify failure - import error %s\" % imperr)\n return(-1)\n nHtml = []\n noHtml = \"\"\n clientEmail = ['helpdesk@mscsoftware.com']\n msg = MIMEMultipart()\n # This is the official email notifier\n rtUser = 'DONOTREPLY@mscsoftware.com'\n\n msg['From'] = rtUser\n msg['To'] = \", \".join(clientEmail)\n if self.data['groupid'] == 'Nastran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n elif self.data['groupid'] == 'Patran-RG':\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n else:\n msg[\"Cc\"] = \"msc-itsupport@mscsoftware.com,\\\n DL-ENG-BUILD@mscsoftware.com,\\\n raj.behera@mscsoftware.com\"\n\n if self.ops == 'ipnw':\n msg['Subject'] = '%s regression got impacted due \\\n to vCAC cloud for VMID %s' % \\\n ( pdict[self.data['groupid']], self.sr['requestNumber'])\n else:\n msg['Subject'] = '%s regression got impacted due \\\n to vCAC cloud for service request: %s' % \\\n ( pdict[self.data['groupid']], self.sr['requestNumber'])\n\n nHtml.append(\"<html> <head></head> <body> <p>Jenkin's \\\n vCAC cloud client notification<br>\")\n nHtml.append(\"<b>Hi Helpdesk,</b><br><br><br>\")\n nHtml.append(\"Please create a ticket to solve the \\\n following problem and notify infra team.\")\n if self.ops == 'ipnw':\n nHtml.append(\"VM creation readiness from vCAC \\\n cloud is taking long time, \\\n vm creation service request completed, \\\n But network configuration is having an issue \\\n for VMID <b>%s</b> is stuck. \" % self.sr['requestNumber'])\n else:\n nHtml.append(\"Creation of VM through vCAC cloud is taking \\\n longer time than expected, the service \\\n request <b>%s</b> is stuck. \" % self.sr['requestNumber'])\n\n nHtml.append(\"Regression test for product <b>%s</b> \\\n is stuck and impacted.<br><br>\" % \\\n pdict[self.data['groupid']])\n if os.path.isdir(self.data['rundir']):\n jnfilepath=os.path.join(self.data['rundir'], 'hudjobname.dat')\n if os.path.isfile(jnfilepath):\n lines = [line.rstrip() for line in open(jnfilepath)]\n nHtml.append(\"Please follow job link for \\\n SR# related information.<br>\")\n nHtml.append(\"Jenkins Effected Job URL: <a href=%s> \\\n Effected Build Console \\\n </a><br><br><br>\" % (lines[0]))\n\n nHtml.append(\"This needs immediate attention.<br><br>\")\n nHtml.append(\"Regards,<br>\")\n nHtml.append(\"Rtest Administrator.<br>\")\n nHtml.append(\"[Note: This is an automated mail,\\\n Please do not reply to this mail.]<br>\")\n nHtml.append(\"</p> </body></html>\")\n noHtml = ''.join(nHtml)\n noBody = MIMEText(noHtml, 'html')\n msg.attach(noBody)\n s = smtplib.SMTP('postgate01.mscsoftware.com')\n s.sendmail(rtUser, [clientEmail] + msg[\"Cc\"].split(\",\"), msg.as_string())\n s.quit()\n return 0", "def open_generatorWindow(self):\n self.window = generatorWindow(self)\n self.hide()", "def _get_message_body(self, template_file, message_data):\r\n return \"\"\"\r\nPlease click the link below to activate your account.\r\n\r\n{0}\r\n\r\nWe currently support importing from Google Bookmarks and Delicious exports.\r\nImporting from a Chrome or Firefox export does work, however it reads the\r\nfolder names in as tags. So be aware of that.\r\n\r\nGet the Chrome extension from the Chrome web store:\r\nhttps://chrome.google.com/webstore/detail/knnbmilfpmbmlglpeemajjkelcbaaega\r\n\r\nIf you have any issues feel free to join #bookie on freenode.net or report\r\nthe issue or idea on https://github.com/bookieio/Bookie/issues.\r\n\r\nWe also encourage you to sign up for our mailing list at:\r\nhttps://groups.google.com/forum/#!forum/bookie_bookmarks\r\n\r\nand our Twitter account:\r\nhttp://twitter.com/BookieBmarks\r\n\r\nBookie is open source. Check out the source at:\r\nhttps://github.com/bookieio/Bookie\r\n\r\n---\r\nThe Bookie Team\"\"\".format(message_data)", "def ui(self):\r\n \r\n # delete the window if its handle exists\r\n if cmds.window(self.window, exists=True):\r\n cmds.deleteUI(self.window, window=True)\r\n \r\n #reads settings\r\n self.read()\r\n \r\n # initialize the window\r\n self.window = cmds.window(\r\n self.window,\r\n title=self.title,\r\n width=200,\r\n sizeable=False,\r\n mnb=False,\r\n mxb=False\r\n )\r\n self.mainCol = cmds.columnLayout( adjustableColumn=True )\r\n cmds.text( label='', align='center',height=5)\r\n cmds.text( label='email (From)', align='left' , width=150)\r\n self.userFld = cmds.textField(text=self.login)\r\n \r\n \r\n cmds.text( label='', align='center',height=10)\r\n cmds.text( label='Password', align='left' )\r\n self.pswdFld = cmds.textField(text=self.password)\r\n \r\n \r\n cmds.text( label='', align='center',height=10)\r\n cmds.text( label='email (To)', align='left' )\r\n self.toFld = cmds.textField(text=self.to)\r\n \r\n cmds.text( label='', align='center',height=10)\r\n cmds.text( label='RenderTime in Minutes', align='left' )\r\n self.timeFld = cmds.textField(text=self.time, )\r\n\r\n cmds.text( label='', align='center',height=10)\r\n cmds.text( label='Smtp Server:port', align='left' )\r\n self.smtpFld = cmds.textField(text=self.smtp, )\r\n\r\n\r\n cmds.text( label='', align='center',height=10)\r\n\r\n self.saveBtn = cmds.button( label='Save Settings',command=self.save)\r\n self.installBtn = cmds.button( label='Install Scripts',command=self.install)\r\n self.installBtn = cmds.button( label='Send Test Email',command=self.test)\r\n \r\n cmds.separator( height=40, style='doubleDash' )\r\n cmds.text( label='Script by Dhruv Govil', align='center' )\r\n cmds.text( label='www.dgovil.com', align='center',hyperlink=True )\r\n cmds.text( label='', align='center',height=10)\r\n cmds.showWindow( self.window )", "def test_notification_creation_email(self):\n mailhost = api.portal.get_tool('MailHost')\n self.assertEqual(len(mailhost.messages), 1)\n msg = message_from_string(mailhost.messages[0])\n\n self.assertEqual(msg['To'], BOARD_LIST_ADDRESS)\n self.assertEqual(\n msg['From'], 'EESTEC International <noreply@eestec.net>')\n self.assertEqual(\n msg['Subject'],\n '=?utf-8?q?=5BEVENTS=5D=5BCreated=5D_T=C3=A9st_event?=',\n )\n self.assertIn('a new Event has been created', msg.get_payload())\n self.assertIn('T=C3=A9st event', msg.get_payload())", "def _render_mail(self, rebuild, success, canceled):\n subject_template = 'Image %(image)s; Status %(endstate)s; Submitted by %(user)s'\n body_template = '\\n'.join([\n 'Image: %(image)s',\n 'Status: %(endstate)s',\n 'Submitted by: %(user)s',\n 'Logs: %(logs)s',\n ])\n\n endstate = None\n if canceled:\n endstate = 'canceled'\n else:\n endstate = 'successful' if success else 'failed'\n url = None\n if self.url and self.workflow.openshift_build_selflink:\n url = urljoin(self.url, self.workflow.openshift_build_selflink + '/log')\n\n formatting_dict = {\n 'image': self.workflow.image,\n 'endstate': endstate,\n 'user': '<autorebuild>' if rebuild else self.submitter,\n 'logs': url\n }\n return (subject_template % formatting_dict, body_template % formatting_dict)" ]
[ "0.73784035", "0.647709", "0.6436048", "0.6249949", "0.6230297", "0.61297333", "0.60587174", "0.59738946", "0.5970479", "0.59629035", "0.57741827", "0.5760959", "0.5647457", "0.5546402", "0.55030817", "0.5446904", "0.53914595", "0.536303", "0.5356848", "0.5346636", "0.5345804", "0.53432846", "0.53340894", "0.5279925", "0.5275254", "0.5260225", "0.52559614", "0.5248557", "0.52465105", "0.5232883", "0.5230798", "0.5230233", "0.5221684", "0.52207214", "0.52049124", "0.52033", "0.5195692", "0.51739943", "0.51667434", "0.51648426", "0.51585317", "0.5150935", "0.5147506", "0.5141418", "0.5139252", "0.5131695", "0.5122708", "0.51221377", "0.5121297", "0.51139843", "0.5110785", "0.5098427", "0.50975907", "0.5097536", "0.5087654", "0.5086622", "0.5070753", "0.507044", "0.506586", "0.5064297", "0.5059151", "0.5041274", "0.5026315", "0.5023534", "0.50232035", "0.50163275", "0.50142443", "0.49962407", "0.4993144", "0.49915692", "0.49912333", "0.4988688", "0.49850065", "0.49843764", "0.49738282", "0.49730754", "0.49686083", "0.49679804", "0.49675912", "0.49670428", "0.49622208", "0.49554554", "0.4955418", "0.49524835", "0.49488872", "0.49472514", "0.49429706", "0.49417037", "0.49356613", "0.4934552", "0.49295795", "0.4927887", "0.49277171", "0.49255544", "0.49243143", "0.49193382", "0.49182683", "0.49141103", "0.49053094", "0.49015862" ]
0.6925168
1
spark up an instance
def __init__(self): self.stats = {} self.stats['hits'] = 0 self.stats['operations'] = {} self.stats['operations']['GetCapabilities'] = {} self.stats['operations']['GetCapabilities']['hits'] = 0 self.stats['operations']['POST'] = {} self.stats['operations']['POST']['hits'] = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n spark_it_up()", "def spark(self, *args, **kwargs):\n self.spark_submit(*args, **kwargs)", "def up(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"up --build\"\n\n if remote:\n command = f\"{command} --detach\"\n\n run_command_with_services(context, user, remote, instance, stack, command, services)", "def up(self, connection):\n raise NotImplementedError", "def dev_up():\n _with_deploy_env(['./bin/develop up'])", "def setUp(self):\n self.spark, self.log, self.config = start_spark(app_name = \"test_etl_job\",\n files='configs/etl_config.json')", "def up(self, arguments):\n gui = arguments['--gui']\n save = not arguments['--no-cache']\n requests_kwargs = utils.get_requests_kwargs(arguments)\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n utils.index_active_instance(instance_name)\n\n vmx = utils.init_box(self.box_name, self.box_version, requests_kwargs=requests_kwargs, save=save)\n vmrun = VMrun(vmx, user=self.user, password=self.password)\n puts_err(colored.blue(\"Bringing machine up...\"))\n started = vmrun.start(gui=gui)\n if started is None:\n puts_err(colored.red(\"VM not started\"))\n else:\n time.sleep(3)\n puts_err(colored.blue(\"Getting IP address...\"))\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(lookup=lookup)\n puts_err(colored.blue(\"Sharing current folder...\"))\n vmrun.enableSharedFolders()\n vmrun.addSharedFolder('mech', os.getcwd(), quiet=True)\n if ip:\n if started:\n puts_err(colored.green(\"VM started on {}\".format(ip)))\n else:\n puts_err(colored.yellow(\"VM was already started on {}\".format(ip)))\n else:\n if started:\n puts_err(colored.green(\"VM started on an unknown IP address\"))\n else:\n puts_err(colored.yellow(\"VM was already started on an unknown IP address\"))", "def up_cmd(ctx):\n pass", "def prepare_instance():\n sudo(\"apt-get -y update\")\n sudo(\"apt-get -y upgrade\")\n sudo(\"apt-get install -y python-pip python-setuptools\")\n sudo(\"pip install BeautifulSoup\")\n sudo(\"pip install --upgrade boto\")\n sudo(\"mv /usr/lib/pymodules/python2.6/boto /tmp\")", "def __init__(self, spark, logger):\n self.spark = spark\n self.logger = logger", "def test_ec2_up_no_instance(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', 'dummy'])\n assert result.exit_code == 2", "def common_setup(ssh_client):\n with open_cfg() as cfg:\n delete_hdfs = cfg.getboolean('main', 'delete_hdfs')\n # preliminary steps required due to differences between azure and aws\n if c.PROVIDER == \"AZURE\":\n\n # todo only if first run\n if c.NUM_INSTANCE > 0 or True:\n print(\"In common_setup, NUM_INSTANCE=\" + str(c.NUM_INSTANCE))\n # add ssh key that matches the public one used during creation\n if not c.PRIVATE_KEY_NAME in ssh_client.listdir(\"/home/ubuntu/.ssh/\"):\n ssh_client.put(localpath=c.PRIVATE_KEY_PATH, remotepath=\"/home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n ssh_client.run(\"chmod 400 /home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n\n # ssh_client.run(\"sudo groupadd supergroup\")\n ssh_client.run(\"sudo usermod -aG supergroup $USER\")\n ssh_client.run(\"sudo usermod -aG supergroup root\")\n\n # join docker group\n ssh_client.run(\"sudo usermod -aG docker $USER\")\n\n ssh_client.run(\"mkdir /usr/local/spark/spark-events\")\n\n # ssh_client.run(\"sudo chmod -R 777 /mnt\")\n\n # to refresh groups\n ssh_client.close()\n ssh_client.connect()\n\n # restore environmental variables lost when creating the image\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native/' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n\n ssh_client.run(\"source $HOME/.bashrc\")\n\n if c.PROVIDER == \"AWS_SPOT\":\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n ssh_client.run(\"source $HOME/.bashrc\")\n \n ssh_client.run(\"export GOMAXPROCS=`nproc`\")\n\n if c.UPDATE_SPARK_DOCKER:\n print(\" Updating Spark Docker Image...\")\n ssh_client.run(\"docker pull elfolink/spark:2.0\")\n\n if delete_hdfs:\n ssh_client.run(\"sudo umount /mnt\")\n ssh_client.run(\n \"sudo mkfs.ext4 -E nodiscard \" + c.TEMPORARY_STORAGE + \" && sudo mount -o discard \" + c.TEMPORARY_STORAGE + \" /mnt\")\n\n ssh_client.run(\"test -d /mnt/tmp || sudo mkdir -m 1777 /mnt/tmp\")\n ssh_client.run(\"sudo mount --bind /mnt/tmp /tmp\")\n\n ssh_client.run('ssh-keygen -f \"/home/ubuntu/.ssh/known_hosts\" -R localhost')\n\n print(\" Stop Spark Slave/Master\")\n # ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-master.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && sudo {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n \n stdout, stderr, status = ssh_client.run(\n \"cd \" + c.SPARK_HOME + \" && cp conf/log4j.properties.template conf/log4j.properties\")\n print(stdout, stderr)\n print(\" Set Log Level\")\n ssh_client.run(\n \"sed -i '19s/.*/log4j.rootCategory={}, console /' {}conf/log4j.properties\".format(c.LOG_LEVEL,\n c.SPARK_HOME))\n if c.KILL_JAVA:\n print(\" Killing Java\")\n ssh_client.run('sudo killall java && sudo killall java && sudo killall java')\n\n print(\" Kill SAR CPU Logger\")\n ssh_client.run(\"screen -ls | grep Detached | cut -d. -f1 | awk '{print $1}' | xargs -r kill\")\n\n if c.SYNC_TIME:\n print(\" SYNC TIME\")\n ssh_client.run(\"sudo ntpdate -s time.nist.gov\")\n\n print(\" Removing Stopped Docker\")\n ssh_client.run(\"docker ps -a | awk '{print $1}' | xargs --no-run-if-empty docker rm\")", "def spark():\n return SparkSession.builder.master(\"local\").appName(\"tests\").getOrCreate()", "def spark_session(request):\n def fin():\n \"\"\"Clean up.\n \"\"\"\n spark.stop()\n request.addfinalizer(fin)\n\n spark = ps.SparkSession.builder.master('local')\\\n .appName('Spark Tute PyTest')\\\n .config('spark.executor.memory', '2g')\\\n .config('spark.executor.cores', '2')\\\n .config('spark.cores.max', '10')\\\n .config('spark.ui.port', '4050')\\\n .config('spark.logConf', True)\\\n .config('spark.debug.maxToStringFields', 100)\\\n .getOrCreate()\n\n return spark", "def spark_setup(self):\n # Update the global variables for config details\n globals()[\"spark_token\"] = self.spark_bot_token\n globals()[\"bot_email\"] = self.spark_bot_email\n\n sys.stderr.write(\"Spark Bot Email: \" + self.spark_bot_email + \"\\n\")\n sys.stderr.write(\"Spark Token: REDACTED\\n\")\n\n # Setup the Spark Connection\n globals()[\"spark\"] = CiscoSparkAPI(access_token=self.spark_bot_token)\n globals()[\"webhook\"] = self.setup_webhook(self.spark_bot_name,\n self.spark_bot_url)\n sys.stderr.write(\"Configuring Webhook. \\n\")\n sys.stderr.write(\"Webhook ID: \" + globals()[\"webhook\"].id + \"\\n\")", "def spark_config_set(is_spark_submit):\n if is_spark_submit:\n global sc, sqlContext\n sc = SparkContext()\n sqlContext = HiveContext(sc)", "def create_sparksession():\n return SparkSession.builder.\\\n appName(\"Transforming the historical parking occupancy and blockface datasets\").\\\n getOrCreate()", "def test_ec2_up(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', ec2['server'].id])\n assert result.exit_code == 0", "def main():\n\n print(\"Initiating Spark session...\")\n print('-' * 50)\n spark = create_spark_session()\n \n # Use these settings if you want to test on the full\n # dataset, but it takes a LONG time.\n song_input_data = config['AWS']['SONG_DATA']\n log_input_data = config['AWS']['LOG_DATA']\n \n # Uncomment the two lines if you want to test on\n # minimal data\n #song_input_data = config['AWS']['SINGLE_SONG_DATA']\n #log_input_data = config['AWS']['SINGLE_LOG_DATA']\n \n output_data = config['AWS']['OUTPUT_DATA']\n \n print('-' * 50)\n print(\"Processing song data...\")\n print('-' * 50)\n print('')\n process_song_data(spark, song_input_data, output_data)\n \n print('-' * 50) \n print(\"Processing log data...\")\n print('-' * 50)\n print('')\n process_log_data(spark, song_input_data, log_input_data, output_data)", "def up(image):\n ovpn_file_queue = vpn_file_queue('./VPN')\n ovpn_file_count = len(list(ovpn_file_queue.queue))\n port_range = range(START_PORT, START_PORT + ovpn_file_count)\n write_haproxy_conf(port_range)\n write_proxychains_conf(port_range)\n start_containers(image, ovpn_file_queue, port_range)", "def cli():\n # Configuration\n AppConfig()\n\n # Parse the cli arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('standard_data_path', help='path to the standard data directory')\n parser.add_argument('queue', help='job queue')\n parser.add_argument('--app-name', help='spark application name which must contain the application prd',\n default='gmt00-diaman-ai')\n parser.add_argument('--driver-mem', help='amount of memory to use for the driver process',\n default='4g')\n parser.add_argument('--driver-cores', help='number of cores to use for the driver process',\n default=1)\n parser.add_argument('--executor-mem', help='amount of memory to use per executor process',\n default='8g')\n parser.add_argument('--executor-cores', help='number of cores to use on each executor',\n default=4)\n parser.add_argument('--min-executors', help='minimum number of executors to run if dynamic allocation is enabled',\n default=4)\n parser.add_argument('--max-executors', help='maximum number of executors to run if dynamic allocation is enabled',\n default=12)\n parser.add_argument('--ini-executors', help='initial number of executors to run if dynamic allocation is enabled',\n default=4)\n args = parser.parse_args()\n\n # Instantiate spark\n _, spark_session = spark_config.get_spark(app_name=args.app_name,\n queue=args.queue,\n driver_mem=args.driver_mem,\n driver_cores=args.driver_cores,\n executor_mem=args.executor_mem,\n executor_cores=args.executor_cores,\n min_executors=args.min_executors,\n max_executors=args.max_executors,\n ini_executors=args.ini_executors)\n\n # Run the train pipeline\n train_pipeline.run(spark_session, args.standard_data_path)", "def setUpClass(cls):\n \n logging.info(\"Logging from within setup\")\n cls.spark=SparkSession \\\n .builder \\\n .appName(\"sampleTest\") \\\n .master(\"local\") \\\n .getOrCreate()\n cls.spark.sparkContext.setLogLevel(\"ERROR\")", "def setup_kubernetes_version(skuba, kubernetes_version=None):\n\n skuba.cluster_init(kubernetes_version)\n skuba.node_bootstrap()\n skuba.node_join(role=\"worker\", nr=0)", "def create_spark_session(self):\n\n spark_jar_path = os.getenv(\"SPARK_JARS_PATH\")\n spark_jars = [os.path.join(spark_jar_path, jars) for jars in os.listdir(spark_jar_path)] \n\n self.spark = SparkSession\\\n .builder\\\n .config(\"spark.jars\", \",\".join(spark_jars))\\\n .appName(appname)\\\n .getOrCreate()", "def spark(tmp_path_factory, app_name=\"Sample\", url=\"local[*]\"):\n\n with TemporaryDirectory(dir=tmp_path_factory.getbasetemp()) as td:\n config = {\n \"spark.local.dir\": td,\n \"spark.sql.shuffle.partitions\": 1,\n \"spark.sql.crossJoin.enabled\": \"true\",\n }\n spark = start_or_get_spark(app_name=app_name, url=url, config=config)\n yield spark\n spark.stop()", "def add_spark(self,node):\n import os\n import json\n from urllib.request import urlopen\n import ssl\n if \"SPARK_ENV_LOADED\" not in os.environ:\n return # no Spark\n\n spark = ET.SubElement(node, 'spark')\n try:\n import requests\n import urllib3\n urllib3.disable_warnings()\n except ImportError:\n ET.SubElement(spark,'error').text = \"SPARK_ENV_LOADED present but requests module not available\"\n return \n\n host = 'localhost'\n p1 = 4040\n p2 = 4050\n import urllib.error\n for port in range(p1,p2+1):\n try:\n url = 'http://{}:{}/api/v1/applications/'.format(host,port)\n resp = urlopen(url, context=ssl._create_unverified_context())\n spark_data = resp.read()\n break\n except (ConnectionError, ConnectionRefusedError, urllib.error.URLError) as e:\n continue\n if port>=p2:\n ET.SubElement(spark,'error').text = f\"SPARK_ENV_LOADED present but no listener on {host} ports {p1}-{p2}\"\n return\n\n # Looks like we have Spark!\n for app in json.loads(spark_data):\n app_id = app['id']\n app_name = app['name']\n e = ET.SubElement(spark,'application',{'id':app_id,'name':app_name})\n\n attempt_count = 1\n for attempt in app['attempts']:\n e = ET.SubElement(spark,'attempt')\n json_to_xml(e,attempt)\n for param in ['jobs','allexecutors','storage/rdd']:\n url = f'http://{host}:{port}/api/v1/applications/{app_id}/{param}'\n resp = urlopen(url, context=ssl._create_unverified_context())\n data = resp.read()\n e = ET.SubElement(spark,param.replace(\"/\",\"_\"))\n json_to_xml(e,json.loads(data))", "def up(vm, env=''):\n local( main_dir + '/vagrant/bin/vm.sh up ' + str(vm) + ' ' + str(env) )", "def sshtest():\n vbox = Vbox(env.vm_name)\n print vbox.ssh_up", "def spark():\n\n quiet_log4j()\n\n builder = (\n SparkSession.builder\n .master(\"local[2]\")\n .appName(\"pytest-pyspark-local-testing\")\n # By default spark will shuffle to 200 partitions, which is\n # way too many for our small test cases. This cuts execution\n # time of the tests in half.\n .config('spark.sql.shuffle.partitions', 4)\n )\n if 'XDG_CACHE_HOME' in os.environ:\n builder.config('spark.jars.ivy', os.path.join(os.environ['XDG_CACHE_HOME'], 'ivy2'))\n\n with builder.getOrCreate() as spark:\n yield spark", "def handle_hup(self):\n pass", "def startUp(self):\n pass", "def Instance():\n if not Spark._active_instance:\n Spark._active_instance = Spark()\n return Spark._active_instance", "def setup(ctx, cluster_url):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"cluster setup subcommand\")", "def setup(self, stage: Optional[str] = None) -> None:", "def create_spark_session():\n \n print(\"Create Spark Session\")\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark\n print(\"Spark Session Created\")", "def create_spark_session():\n \n spark = SparkSession.builder\\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\")\\\n .getOrCreate()\n \n return spark", "def scale_up_application(asg_name):\n if_verbose(\"Scaling up %s in steps of %d\" % (asg_name, args.instance_count_step))\n current_capacity_count = args.instance_count_step\n while(True):\n check_error(scale_up_autoscaling_group(asg_name, current_capacity_count))\n check_error(check_autoscaling_group_health(asg_name, current_capacity_count))\n\n if args.elb_name:\n asg_instances = [{\"InstanceId\": a[\"InstanceId\"]} for a in asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]]\n check_error(check_elb_instance_health(args.elb_name, asg_instances))\n\n if args.instance_count == current_capacity_count:\n break\n else:\n current_capacity_count += args.instance_count_step\n else:\n break\n\n if_verbose(\"Scaling up %s successful\" % asg_name)", "def _bootup_node(self, conn):\n compose_fname = COMPOSE_FNAME\n exec_plan = self.node_exec_plan.copy()\n while len(exec_plan) > 0:\n container_name = exec_plan.popleft()\n self.__bootup_service(conn, compose_fname, container_name)", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID']) \\\n .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY']) \\\n .enableHiveSupport().getOrCreate()\n \n return spark", "def create_spark_session():\n try:\n spark = (\n SparkSession.builder\n .config(\"spark.jars.packages\", os.environ['SAS_JAR'])\n # .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n .enableHiveSupport()\n .getOrCreate()\n )\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.impl\",\"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n # spark._jsc.hadoopConfiguration().set(\"com.amazonaws.services.s3.enableV4\", \"true\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.aws.credentials.provider\",\"org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.endpoint\", \"s3.amazonaws.com\")\n except Exception as e:\n logger.error('Pyspark session failed to be created...')\n raise\n return spark", "def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)", "def spinUp(self,\n btstrap_loc='s3://ddapi.data/ddapp_emr_bootstrap.sh',\n mstr_cnt=1,\n mstr_mkt='ON_DEMAND',\n slave_cnt=2,\n slave_mkt='ON_DEMAND',\n ):\n\n self.btstrap_loc = btstrap_loc\n logging.info(f'starting to spin up emr cluster from emr client: {self.emr_client}')\n response = self.emr_client.run_job_flow(\n Name=self.clustername,\n LogUri=self.logpath,\n ReleaseLabel='emr-5.23.0',\n Instances={\n 'InstanceGroups': [\n {\n 'Name': \"Master nodes\",\n 'Market': mstr_mkt,\n 'InstanceRole': 'MASTER',\n 'InstanceType': 'm4.large',\n 'InstanceCount': mstr_cnt,\n },\n {\n 'Name': \"Slave nodes\",\n 'Market': slave_mkt,\n 'InstanceRole': 'CORE',\n 'InstanceType': 'm4.large',\n 'InstanceCount': slave_cnt,\n }\n ],\n 'Ec2KeyName': self.key,\n 'KeepJobFlowAliveWhenNoSteps': True,\n 'TerminationProtected': False,\n # 'Ec2SubnetId': 'string',\n },\n Applications=[\n {'Name': 'Hadoop'},\n {'Name': 'Spark'}\n ],\n BootstrapActions=[\n {\n 'Name': 'bootstrap requirements',\n 'ScriptBootstrapAction': {\n 'Path': btstrap_loc,\n }\n },\n ],\n VisibleToAllUsers=True,\n JobFlowRole='EMR_EC2_DefaultRole',\n ServiceRole='EMR_DefaultRole',\n Configurations=[\n {\n 'Classification': 'spark-env',\n 'Configurations': [\n {\n 'Classification': 'export',\n 'Properties': {\n 'PYSPARK_PYTHON': '/usr/bin/python3',\n 'PYSPARK_DRIVER_PYTHON': '/usr/bin/python3'\n }\n }\n ]\n },\n {\n 'Classification': 'spark-defaults',\n 'Properties': {\n 'spark.sql.execution.arrow.enabled': 'true'\n }\n },\n {\n 'Classification': 'spark',\n 'Properties': {\n 'maximizeResourceAllocation': 'true'\n }\n }\n ],\n )\n logging.info(f'spinning up emr cluster from emr client: {self.emr_client}')\n self.job_flow_id = response['JobFlowId']\n logging.info(f'job flow id {self.emr_client} logged')\n\n # get cluster id\n resp = self.emr_client.list_clusters()\n clus = resp['Clusters'][0]\n self.clusID = clus['Id']\n\n # don't forget to tip the waiter\n logging.info(f'start waiter')\n create_waiter = self.emr_client.get_waiter('cluster_running')\n try:\n create_waiter.wait(ClusterId=self.clusID,\n WaiterConfig={\n 'Delay': 15,\n 'MaxAttempts': 480\n })\n\n except WaiterError as e:\n if 'Max attempts exceeded' in e.message:\n print('EMR Cluster did not finish spinning up in two hours')\n else:\n print(e.message)", "def main():\n session, cluster = create_database()\n \n drop_tables(session)\n create_tables(session)\n\n session.shutdown()\n cluster.shutdown()", "def start_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Starting up a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"start\"])\n popdir()", "def whenup(sourcename) :\n return s.whenUp(sourcename)", "def _environment(self):\n\n self.spark_home = self._config_default(\"spark-home\",\n self._context(SparkSubmit.SPARK_HOME, default = os.environ.get(SparkSubmit.SPARK_HOME,None)))\n assert self.spark_home, \"unable to detect SPARK_HOME. set SPARK_HOME as directed in the task documentation\"\n assert os.path.exists(self.spark_home), \"provided SPARK_HOME doesn't exists\"\n\n spark_config = {'cluster-config': {}, 'other-config': {}}\n if 'config-file' in self._config_keys():\n spark_config.update(yaml.load(open(self._config('config-file')))['spark-config'])\n\n self.app_config = []\n\n spark_app = self._config('app-config')\n self.app_config.append(spark_app['application'])\n app_params = SparkSubmit._flat_node_to_cmd_line_args(spark_app['params']) if 'params' in spark_app else []\n self.app_config.extend(app_params)\n if 'resources' in spark_app:\n resources = [ ['--%s' % item] + (spark_app['resources'][item]) for item in spark_app['resources'].keys() ]\n self.resources = list(itertools.chain(*resources))\n else:\n self.resources = []\n\n\n cluster_config = self._config_default('cluster-config', {})\n cluster_config.update(spark_config['cluster-config'])\n self.cluster_options = list(itertools.chain(*[ ['--%s' % item, str(cluster_config[item]) ] for item in cluster_config.keys() ]))\n\n\n ##other options\n ## cluster options\n other_options = self._config_default('other-config',{})\n cluster_config.update(spark_config['other-config'])\n self.other_options = list(itertools.chain(*[ ['--%s' % item, str(other_options[item]) ] for item in other_options.keys() ]))", "def test_slurm_xsede_supermic_spark(self):\n\n # Set environment variables\n os.environ['SLURM_NODELIST'] = 'nodes[1-2]'\n os.environ['SLURM_NPROCS'] = '24'\n os.environ['SLURM_NNODES'] = '2'\n os.environ['SLURM_CPUS_ON_NODE'] = '24'\n\n # Run component with desired configuration\n self.component._cfg = self.cfg_xsede_supermic_spark\n self.component._configure()\n\n # Verify configured correctly\n self.assertEqual(self.component.cores_per_node, 20)\n self.assertEqual(self.component.gpus_per_node, 0)\n self.assertEqual(self.component.lfs_per_node['path'], \"/var/scratch/\")\n self.assertEqual(self.component.lfs_per_node['size'], 200496)\n self.assertEqual(self.component.lm_info['cores_per_node'], 20)\n\n return", "def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance", "def _work(self):\n command = [ os.path.join(self.spark_home,'bin/spark-submit') ] + self.cluster_options + self.other_options + \\\n self.resources + self.app_config\n\n sideput.sideput( \"spark submit command is %s\" % ' '.join(command) )\n\n with sideput.Timing(\"spark job completed in %d seconds\"):\n result, stdout, stderr = os_util.execute_command(command, do_sideput=True)\n\n sideput.sideput(\"[%s] stderr:\\n%s\" % (self.name(), stderr), level=\"INFO\")\n sideput.sideput(\"[%s] stdout:\\n%s\" % (self.name(), stdout), level=\"INFO\")\n if result != 0:\n raise Exception(\"spark job failed with code %d\" % result)\n else:\n try:\n result_hash = yaml_util.load(stdout) if self._emits() else {}\n sideput.sideput(\"parsed stdout is %s\\n\" % result_hash, level=\"INFO\")\n except Exception as e:\n result_hash = {}\n sideput.sideput(\"parsing stdout as json failed with message %s \\n\" % e.message , level= \"ERROR\")\n sideput.sideput(\"stdout is \\n %s \\n\" % stdout, level=\"ERROR\")\n raise e\n sideput.sideput(\"[%s] spark job completed successfully\"\n % self.name(), level = \"INFO\")\n return result_hash", "def do_start(self,processor):\n # app_logger = self.construct_logger(rta_constants.PROPERTIES_LOG_FILE)\n running_dict = {}\n for item in self.get_running_status():\n running_dict[item.get('processor')]=item.get('status')\n\n if processor == 'spark':\n if running_dict:\n if running_dict['spark<spark_worker>'] != 'Running' and running_dict['spark<spark_master>'] != 'Running':\n try:\n cmd_line = self.cmd_start_spark\n cmd = subprocess.Popen([cmd_line],shell=True,stdout=subprocess.PIPE)\n (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['spark<spark_worker>'] == 'Running' or running_dict['spark<spark_master>'] == 'Running':\n print('Spark Server is running!! please trying to stop it before it starts.')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n return\n\n elif processor == 'tomcat':\n if running_dict.has_key('tomcat') and running_dict['tomcat'] != 'Running':\n try:\n cmd_line = self.cmd_start_tomcat\n # print('staring tomcat server------->')\n print cmd_line\n\n # 2311 Vpl update to fix problem of catalina shutdown when term exit (10.x timeout)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n #print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('tomcat'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Tomcat Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'HDFS':\n #1/5/2017 Commit by JOJO\n '''\n if running_dict.has_key('HDFS') and running_dict['HDFS'] != 'Running':\n try:\n cmd_line = self.cmd_start_hadoop_hdfs\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('HDFS has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('HDFS'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('HDFS server is running!! please trying to stop it before it start.')\n return\n '''\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif processor == 'web_management':\n if running_dict.has_key('web_management') and running_dict['web_management'] != 'Running':\n try:\n cmd_line = 'python '+self.cmd_start_web_management\n print('starting web_management webserver------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n (output,err) = cmd.communicate()\n print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('web_management webserver has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('web_management'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Flask webserver is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'novelty':\n if running_dict.has_key('novelty') and running_dict['novelty'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_novelty_detector\n # print('staring novelty------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('novelty has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('novelty'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['novelty'] == 'Running':\n print('novelty processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'raw_writer':\n if running_dict.has_key('raw_writer') and running_dict['raw_writer'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_raw_writer\n # print('staring raw_writer------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n print('raw_writer has been started!')\n return\n\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('raw_writer'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['raw_writer'] == 'Running':\n print('raw_writer processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'cassandra':\n if running_dict.has_key('cassandra') and running_dict['cassandra'] != 'Running':\n try:\n cmd_line = self.cmd_start_cassandra\n # print('starting cassandra------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of cassandra shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('cassandra has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('cassandra'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('cassandra Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'kairosDb':\n if running_dict.has_key('kairosDb') and running_dict['kairosDb'] != 'Running' and running_dict['cassandra']=='Running':\n try:\n cmd_line = self.cmd_start_kairosDB\n # print('staring kairosDB------->')\n\n # print cmd_line\n\t\t\t\t\t#2311 Vpl update to fix problem of kairosDb shutdown when term exit (10.x timeout)\n\t\t\t\t\t#cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('kairosDb has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kairosDb'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['cassandra']=='Stopped':\n print('cassandra required starting before kairosDb is running!! please trying to \"start cassandra\" first')\n return\n elif running_dict['kairosDB'] == 'Running':\n print('kairosDB Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'grafana':\n if running_dict.has_key('grafana') and running_dict['grafana'] != 'Running' and running_dict['kairosDb']=='Running':\n try:\n cmd_line = self.cmd_start_grafana\n # print('staring grafana------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('grafana has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('grafana'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['kairosDb']=='Stopped':\n print('kairosDb required starting before grafana is running!! please trying to \"start kairoseDb\" first')\n return\n elif running_dict['grafana'] == 'Running':\n print('grafana Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'kafka':\n if running_dict.has_key('kafka') and running_dict['kafka'] != 'Running' and running_dict['zookeeper']=='Running':\n try:\n cmd_line = self.cmd_start_kafka\n print('starting kafka------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n print ('kafka has been started!')\n return\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kafka'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['zookeeper']=='Stopped':\n print('zookeeper required starting before kafka is running!! please trying to \"start zookeeper\" first')\n return\n elif running_dict['kafka'] == 'Running':\n print('Kafka Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'zookeeper':\n if running_dict.has_key('zookeeper') and running_dict['zookeeper'] != 'Running':\n try:\n cmd_line = self.cmd_start_zookeeper\n # print('staring zookeeper------->')\n # print (cmd_line)\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n\n print('zookeeper has been started!')\n return\n except Exception as ex:\n print(\" Failed to stop processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('zookeeper'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Zookeeper Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'accl_processor':\n if running_dict:\n if running_dict['accl_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_accl_processor\n print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #cmd = subprocess.Popen(['nohup',cmd_line])\n # cmd = subprocess.Popen(cmd_line)\n\n print ('Accelerometer processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['accl_processor'] == 'Running':\n print('Accelerometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'baro_processor':\n if running_dict:\n if running_dict['baro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_baro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Barometer processor has been started')\n\t\t\tprint (cmd_line)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['baro_processor'] == 'Running':\n print('Barometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'gyro_processor':\n if running_dict:\n if running_dict['gyro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_gyro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Gyroscope processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['gyro_processor'] == 'Running':\n print('Gyroscope processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'aggr_processor':\n if running_dict:\n if running_dict['aggr_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_aggr_naiv\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Aggregator processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['aggr_processor'] == 'Running':\n print('Aggregator processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print ('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n else:\n print ('Please type correct command! You may use \"help start\" see more help')", "def get_spark_i_know_what_i_am_doing():\n return _spark", "def start_up(self, velocity=VELOCITY):\n action = StartUp(velocity=velocity)\n self._velocity_control_client(pickle.dumps(action))", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend\"\n output_data = \"s3a://vivek1bucket\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def create_spark_session():\n spark = SparkSession\\\n .builder\\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\")\\\n .getOrCreate()\n # print (spark.sparkContext.getConf().getAll)\n return spark", "def __init__(self,env,config_file):\n #load all the properties\n self.properties = util.load_application_properties(env, config_file)\n self.cassandra_server = self.properties[\"cassandra.host.name\"]\n self.cassandra_trip_table = self.properties[\"cassandra.trip_data_table\"]\n self.cassandra_stats_table = self.properties[\"cassandra.trip_stats_table\"]\n self.cassandra_keyspace = self.properties[\"cassandra.trip.keyspace\"]\n self.spark_master = self.properties[\"spark.master\"]\n self.s3_url=self.properties[\"batch_s3_url\"]\n\n #initialize SparkConf and SparkContext along with cassandra settings\n self.conf = SparkConf().setAppName(\"trip\").set(\"spark.cassandra.connection.host\",self.cassandra_server)\n self.sc = SparkContext(conf=self.conf)\n self.sqlContext = SQLContext(self.sc)", "def create_spark_session():\n try:\n spark = (\n SparkSession.builder\n # .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n .enableHiveSupport()\n .getOrCreate()\n )\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.impl\",\"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n # spark._jsc.hadoopConfiguration().set(\"com.amazonaws.services.s3.enableV4\", \"true\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.aws.credentials.provider\",\"org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.endpoint\", \"s3.amazonaws.com\")\n except Exception as e:\n logger.error('Pyspark session failed to be created...')\n raise\n return spark", "def setUpClass(cls):\n GlusterBaseClass.setUpClass.im_func(cls)\n # Create and start volume\n g.log.info(\"Starting volume setup process %s\", cls.volname)\n ret = cls.setup_volume()\n if not ret:\n raise ExecutionError(\"Failed to setup \"\n \"and start volume %s\" % cls.volname)\n g.log.info(\"Successfully created and started the volume: %s\",\n cls.volname)", "def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True", "def main():\n # Initiate Spark Session\n spark = create_spark_session()\n \n # Data files\n # Root Data Path\n # Uncomment below line for AWS S3\n #input_data = \"s3a://udacity-dend\"\n # Uncomment below line for local files\n input_data = \"data\"\n\n # Warehouse\n # Root WH\n # Uncomment below line for AWS S3\n #output_data = \"s3a://jerryespn-project-out\"\n # Uncomment below line for local files\n output_data = \"spark-warehouse\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def test_pyspark(container):\n c = container.run(\n tty=True,\n command=['start.sh', 'python', '-c', 'import pyspark']\n )\n rv = c.wait(timeout=30)\n assert rv == 0 or rv[\"StatusCode\"] == 0, \"pyspark not in PYTHONPATH\"\n logs = c.logs(stdout=True).decode('utf-8')\n LOGGER.debug(logs)", "def instantiate(cls, spark):\n logger = ProcessLog().getLogger()\n return cls(spark, logger)", "def launch_spot():\n ec2 = boto3.client('ec2')\n ec2r = boto3.resource('ec2')\n ec2spec = dict(ImageId=AMI,\n KeyName = KeyName,\n SecurityGroupIds = [SecurityGroupId, ],\n InstanceType = \"p2.xlarge\",\n Monitoring = {'Enabled': True,},\n IamInstanceProfile = IAM_ROLE)\n output = ec2.request_spot_instances(DryRun=False,\n SpotPrice=\"0.4\",\n InstanceCount=1,\n LaunchSpecification = ec2spec)\n spot_request_id = output[u'SpotInstanceRequests'][0][u'SpotInstanceRequestId']\n logging.info(\"instance requested\")\n time.sleep(30)\n waiter = ec2.get_waiter('spot_instance_request_fulfilled')\n waiter.wait(SpotInstanceRequestIds=[spot_request_id,])\n instance_id = get_status(ec2, spot_request_id)\n while instance_id is None:\n time.sleep(30)\n instance_id = get_status(ec2,spot_request_id)\n instance = ec2r.Instance(instance_id)\n with open(\"host\",'w') as out:\n out.write(instance.public_ip_address)\n logging.info(\"instance allocated\")\n time.sleep(10) # wait while the instance starts\n env.hosts = [instance.public_ip_address,]\n fh = open(\"connect.sh\", 'w')\n fh.write(\"#!/bin/bash\\n\" + \"ssh -i \" + env.key_filename + \" \" + env.user + \"@\" + env.hosts[0] + \"\\n\")\n fh.close()\n local(\"fab deploy_ec2\") # this forces fab to set new env.hosts correctly", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def setup(self, cluster):\n raise NotImplementedError()", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def run_instance():\n data = check_args(\n ('cloudProvider', 'apiKey', 'secretKey', 'packageName', 'OS',\n 'sgPorts')\n )\n job = jobs.deploy.apply_async(args=(data,))\n current_user.add_job(job.id)\n return make_response(job_id=job.id)", "def _deploy_instance(self):\n if not os.path.exists(self.instance_path):\n pw = pwd.getpwnam(self.user)\n mode = (\n stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |\n stat.S_IROTH | stat.S_IXOTH)\n utils.mkdir(self.instance_path, mode, pw[2], pw[3])\n path = \"{}/src/automx_wsgi.py\".format(self.repo_dir)\n utils.exec_cmd(\"cp {} {}\".format(path, self.instance_path),\n sudo_user=self.user, cwd=self.home_dir)", "def make_test_instance(branchname, instance_name=\"schedule\"):\n if not instance_name:\n instance_name = branchname\n instance_dir = env.site_root + instance_name\n if not exists(instance_dir):\n with cd(env.site_root):\n run('git clone %s %s' % (env.repo_url, instance_name))\n with cd(instance_dir):\n run('git checkout %s' % branchname)\n else:\n with cd(instance_dir):\n run(\"git pull\")\n\n bootstrap(instance_name, 'test')\n\n upstart_conf_templ = os.path.join(instance_dir, 'example', 'conf', 'upstart-test.conf.template')\n upstart_conf = os.path.join(instance_dir, 'example', 'conf', 'upstart-test.conf')\n if not exists(upstart_conf):\n run('cp %s %s' % (upstart_conf_templ, upstart_conf))\n sed(upstart_conf, '\\\\{branchname\\\\}', instance_name)\n upstart_link = \"/etc/init/%s.conf\" % instance_name\n if not exists(upstart_link):\n sudo('ln -s %s %s' % (upstart_conf, upstart_link))\n sudo('initctl reload-configuration')\n sudo('start %s' % instance_name)\n\n apache_config_templ = os.path.join(instance_dir, 'example', 'conf', 'nginx-test.conf.template')\n apache_config = os.path.join(instance_dir, 'example', 'conf', 'nginx-test.conf')\n if not exists(apache_config):\n run('cp %s %s' % (apache_config_templ, apache_config))\n sed(apache_config, '\\\\{branchname\\\\}', instance_name)\n apache_name = '/etc/nginx/sites-available/%s' % instance_name\n if not exists(apache_name):\n sudo('ln -s %s %s' % (apache_config, apache_name))\n sudo('nxensite %s' % instance_name)\n sudo('mkdir -p %s%s/media/static' % (env.site_root, instance_name))\n sudo('chgrp -R www-data %s%s/media/static' % (env.site_root, instance_name))\n sudo('chmod -R g+w %s%s/media/static' % (env.site_root, instance_name))\n sudo('/etc/init.d/nginx reload')", "def launch_instance(cloud):\n js = _get_jetstream_conn()\n\n sgs = ['CloudLaunchDefault']\n kp_name = \"cloudman_key_pair\"\n inst_size = 'm1.small'\n network_id = '86a1c3e8-b1fb-41f3-bcaf-8334567fe989'\n lc = js.compute.instances.create_launch_config()\n lc.add_network_interface(network_id)\n\n img_id = '2cf07e4a-62a8-41c2-9282-f3c53962f296' # Gxy Standalone 161021b01\n name = 'ea-galaxy-{0}'.format(strftime(\"%m-%d-%H-%M\", localtime()))\n\n i = js.compute.instances.create(\n name, img_id, inst_size, security_groups=sgs, launch_config=lc,\n key_pair=kp_name)\n return i", "def main():\n # start Spark application and get Spark session, logger and config\n spark = SparkSession \\\n .builder \\\n .appName(\"PokemonBasicETLOperations\") \\\n .config(\"spark.eventLog.enabled\", True) \\\n .enableHiveSupport() \\\n .getOrCreate()\n\n print('PokemonBasicETLOperations ETL is up-and-running')\n \n # execute ETL pipeline\n pokemon = extract(spark)\n max_attack_per_type,agg_legend_poke,special_criteria_poke = transform(pokemon)\n load(max_attack_per_type,agg_legend_poke,special_criteria_poke)\n\n print('PokemonBasicETLOperations ETL job is finished')\n spark.stop()\n return None", "def start_instance(InstanceId=None):\n pass", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def ec2_start(resource, metadata):\n\n # do minimal provisioning of machine through cloud-init\n # this installs git and bootstraps puppet to provision the rest\n # requires recent ubuntu (14.04/16.04) or RHEL/CentOS 7\n userdata = \"\"\"#cloud-config\npackage_update: true\nhostname: {hostname}\nfqdn: {fqdn}\nmanage_etc_hosts: true\npackages:\n - git\nwrite_files:\n - path: /etc/facter/facts.d/hostgroup.txt\n content: hostgroup=aws\n - path: /etc/facter/facts.d/role.txt\n content: role={role}\nruncmd:\n - git clone {repo} /etc/puppet\n - /etc/puppet/support_scripts/bootstrap-puppet.sh\"\"\".format(\n hostname=metadata['hostname'], fqdn=metadata['fqdn'],\n role=metadata['role'], repo=metadata['repo'])\n\n instances = resource.create_instances(\n ImageId=metadata['ami'],\n MinCount=1,\n MaxCount=1,\n InstanceType=metadata['type'],\n SubnetId=metadata['subnet'],\n SecurityGroupIds=[metadata['secgroup']],\n KeyName=metadata['keypair'],\n UserData=userdata,\n BlockDeviceMappings=[\n {\n 'DeviceName': '/dev/sda1', # root so far, sometimes /dev/xvdh ?\n 'Ebs': {\n 'VolumeSize': 20,\n 'DeleteOnTermination': True,\n 'VolumeType': 'gp2'\n },\n },\n ]\n )\n\n # not sure if we really need to sleep before tagging but\n # we wait until running anyway which takes much longer than 1 second\n time.sleep(1)\n for instance in instances:\n # first set tags, Name and Role\n instance.create_tags(\n Resources=[instance.id],\n Tags=[\n {\n 'Key': 'Role',\n 'Value': metadata['role']\n },\n {\n 'Key': 'Name',\n 'Value': metadata['fqdn']\n },\n ]\n )\n\n # ensure system is running before we print address to connect to\n instance.wait_until_running()\n # instance.load()\n ec2_status(resource, metadata)", "def dvs_multiple_uplinks_active(self):\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(1)\n self.show_step(2)\n plugin.install_dvs_plugin(self.ssh_manager.admin_ip)\n\n self.show_step(3)\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=DEPLOYMENT_MODE,\n settings={\n \"net_provider\": 'neutron',\n \"net_segment_type\": NEUTRON_SEGMENT_TYPE\n }\n )\n self.show_step(4)\n self.show_step(5)\n self.show_step(6)\n self.show_step(7)\n self.fuel_web.update_nodes(cluster_id,\n {'slave-01': ['controller'],\n 'slave-02': ['compute-vmware'],\n 'slave-03': ['compute'],\n 'slave-04': ['compute']})\n\n self.show_step(8)\n self.show_step(9)\n self.fuel_web.vcenter_configure(\n cluster_id,\n target_node_2=self.node_name('slave-02'),\n multiclusters=True)\n\n self.show_step(10)\n plugin.enable_plugin(cluster_id, self.fuel_web, au=3, su=0)\n\n self.show_step(11)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(12)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(13)\n self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke'])", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\") \\\n .getOrCreate()\n return spark", "def getSparkContext():\n conf = (SparkConf()\n .setMaster(\"local\") # run on local\n .setAppName(\"Logistic Regression\") # Name of App\n .set(\"spark.executor.memory\", \"1g\")) # Set 1 gig of memory\n sc = SparkContext(conf = conf) \n return sc", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def __init__(self, sparkContext, minPartitions=None):\n from thunder.utils.aws import AWSCredentials\n self.sc = sparkContext\n self.minPartitions = minPartitions\n self.awsCredentialsOverride = AWSCredentials.fromContext(sparkContext)", "def create_spark_session() -> SparkSession:\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\") \\\n .getOrCreate()\n return spark" ]
[ "0.7255347", "0.58859587", "0.578518", "0.57325435", "0.57292104", "0.5720284", "0.56871897", "0.5679635", "0.5546256", "0.54448164", "0.5438893", "0.5424962", "0.541132", "0.54103553", "0.53980225", "0.5390525", "0.53686017", "0.5367046", "0.53380203", "0.5331934", "0.5286234", "0.5270096", "0.5235454", "0.5228658", "0.5206304", "0.52032954", "0.5199506", "0.5195851", "0.5146053", "0.5127637", "0.511699", "0.509955", "0.509666", "0.5089193", "0.50717664", "0.50587887", "0.50536716", "0.5049728", "0.5049377", "0.50272524", "0.5018328", "0.50131476", "0.5002543", "0.50008047", "0.49945518", "0.4964589", "0.4961737", "0.4939378", "0.49381736", "0.49367008", "0.49316472", "0.49294335", "0.49267942", "0.4923488", "0.4917696", "0.49133232", "0.4905058", "0.4902218", "0.48974627", "0.48919496", "0.48836", "0.48762673", "0.4876181", "0.4876181", "0.4876181", "0.4876181", "0.48691157", "0.4865171", "0.4856438", "0.4856258", "0.48519975", "0.48513728", "0.48335415", "0.48328194", "0.48238355", "0.48237792", "0.48226494", "0.48050871", "0.48024312", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.4798061", "0.4798061", "0.4798061", "0.4798061", "0.4798061", "0.4798061", "0.4798061", "0.4798061", "0.4798061", "0.47979367", "0.47955394" ]
0.0
-1
spark up an instance
def __init__(self): OWSReport.__init__(self) self.stats['type'] = 'OGC:WMS' self.stats['operations']['GetMap'] = {} self.stats['operations']['GetMap']['hits'] = 0 self.stats['operations']['GetMap']['resource'] = {} self.stats['operations']['GetMap']['resource']['param'] = 'layers' self.stats['operations']['GetMap']['resource']['list'] = {} self.stats['operations']['GetFeatureInfo'] = {} self.stats['operations']['GetFeatureInfo']['hits'] = 0 self.stats['operations']['GetLegendGraphic'] = {} self.stats['operations']['GetLegendGraphic']['hits'] = 0 self.stats['operations']['GetStyles'] = {} self.stats['operations']['GetStyles']['hits'] = 0 self.stats['operations']['DescribeLayer'] = {} self.stats['operations']['DescribeLayer']['hits'] = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n spark_it_up()", "def spark(self, *args, **kwargs):\n self.spark_submit(*args, **kwargs)", "def up(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"up --build\"\n\n if remote:\n command = f\"{command} --detach\"\n\n run_command_with_services(context, user, remote, instance, stack, command, services)", "def up(self, connection):\n raise NotImplementedError", "def dev_up():\n _with_deploy_env(['./bin/develop up'])", "def setUp(self):\n self.spark, self.log, self.config = start_spark(app_name = \"test_etl_job\",\n files='configs/etl_config.json')", "def up(self, arguments):\n gui = arguments['--gui']\n save = not arguments['--no-cache']\n requests_kwargs = utils.get_requests_kwargs(arguments)\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n utils.index_active_instance(instance_name)\n\n vmx = utils.init_box(self.box_name, self.box_version, requests_kwargs=requests_kwargs, save=save)\n vmrun = VMrun(vmx, user=self.user, password=self.password)\n puts_err(colored.blue(\"Bringing machine up...\"))\n started = vmrun.start(gui=gui)\n if started is None:\n puts_err(colored.red(\"VM not started\"))\n else:\n time.sleep(3)\n puts_err(colored.blue(\"Getting IP address...\"))\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(lookup=lookup)\n puts_err(colored.blue(\"Sharing current folder...\"))\n vmrun.enableSharedFolders()\n vmrun.addSharedFolder('mech', os.getcwd(), quiet=True)\n if ip:\n if started:\n puts_err(colored.green(\"VM started on {}\".format(ip)))\n else:\n puts_err(colored.yellow(\"VM was already started on {}\".format(ip)))\n else:\n if started:\n puts_err(colored.green(\"VM started on an unknown IP address\"))\n else:\n puts_err(colored.yellow(\"VM was already started on an unknown IP address\"))", "def up_cmd(ctx):\n pass", "def prepare_instance():\n sudo(\"apt-get -y update\")\n sudo(\"apt-get -y upgrade\")\n sudo(\"apt-get install -y python-pip python-setuptools\")\n sudo(\"pip install BeautifulSoup\")\n sudo(\"pip install --upgrade boto\")\n sudo(\"mv /usr/lib/pymodules/python2.6/boto /tmp\")", "def __init__(self, spark, logger):\n self.spark = spark\n self.logger = logger", "def test_ec2_up_no_instance(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', 'dummy'])\n assert result.exit_code == 2", "def common_setup(ssh_client):\n with open_cfg() as cfg:\n delete_hdfs = cfg.getboolean('main', 'delete_hdfs')\n # preliminary steps required due to differences between azure and aws\n if c.PROVIDER == \"AZURE\":\n\n # todo only if first run\n if c.NUM_INSTANCE > 0 or True:\n print(\"In common_setup, NUM_INSTANCE=\" + str(c.NUM_INSTANCE))\n # add ssh key that matches the public one used during creation\n if not c.PRIVATE_KEY_NAME in ssh_client.listdir(\"/home/ubuntu/.ssh/\"):\n ssh_client.put(localpath=c.PRIVATE_KEY_PATH, remotepath=\"/home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n ssh_client.run(\"chmod 400 /home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n\n # ssh_client.run(\"sudo groupadd supergroup\")\n ssh_client.run(\"sudo usermod -aG supergroup $USER\")\n ssh_client.run(\"sudo usermod -aG supergroup root\")\n\n # join docker group\n ssh_client.run(\"sudo usermod -aG docker $USER\")\n\n ssh_client.run(\"mkdir /usr/local/spark/spark-events\")\n\n # ssh_client.run(\"sudo chmod -R 777 /mnt\")\n\n # to refresh groups\n ssh_client.close()\n ssh_client.connect()\n\n # restore environmental variables lost when creating the image\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native/' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n\n ssh_client.run(\"source $HOME/.bashrc\")\n\n if c.PROVIDER == \"AWS_SPOT\":\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n ssh_client.run(\"source $HOME/.bashrc\")\n \n ssh_client.run(\"export GOMAXPROCS=`nproc`\")\n\n if c.UPDATE_SPARK_DOCKER:\n print(\" Updating Spark Docker Image...\")\n ssh_client.run(\"docker pull elfolink/spark:2.0\")\n\n if delete_hdfs:\n ssh_client.run(\"sudo umount /mnt\")\n ssh_client.run(\n \"sudo mkfs.ext4 -E nodiscard \" + c.TEMPORARY_STORAGE + \" && sudo mount -o discard \" + c.TEMPORARY_STORAGE + \" /mnt\")\n\n ssh_client.run(\"test -d /mnt/tmp || sudo mkdir -m 1777 /mnt/tmp\")\n ssh_client.run(\"sudo mount --bind /mnt/tmp /tmp\")\n\n ssh_client.run('ssh-keygen -f \"/home/ubuntu/.ssh/known_hosts\" -R localhost')\n\n print(\" Stop Spark Slave/Master\")\n # ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-master.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && sudo {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n \n stdout, stderr, status = ssh_client.run(\n \"cd \" + c.SPARK_HOME + \" && cp conf/log4j.properties.template conf/log4j.properties\")\n print(stdout, stderr)\n print(\" Set Log Level\")\n ssh_client.run(\n \"sed -i '19s/.*/log4j.rootCategory={}, console /' {}conf/log4j.properties\".format(c.LOG_LEVEL,\n c.SPARK_HOME))\n if c.KILL_JAVA:\n print(\" Killing Java\")\n ssh_client.run('sudo killall java && sudo killall java && sudo killall java')\n\n print(\" Kill SAR CPU Logger\")\n ssh_client.run(\"screen -ls | grep Detached | cut -d. -f1 | awk '{print $1}' | xargs -r kill\")\n\n if c.SYNC_TIME:\n print(\" SYNC TIME\")\n ssh_client.run(\"sudo ntpdate -s time.nist.gov\")\n\n print(\" Removing Stopped Docker\")\n ssh_client.run(\"docker ps -a | awk '{print $1}' | xargs --no-run-if-empty docker rm\")", "def spark():\n return SparkSession.builder.master(\"local\").appName(\"tests\").getOrCreate()", "def spark_session(request):\n def fin():\n \"\"\"Clean up.\n \"\"\"\n spark.stop()\n request.addfinalizer(fin)\n\n spark = ps.SparkSession.builder.master('local')\\\n .appName('Spark Tute PyTest')\\\n .config('spark.executor.memory', '2g')\\\n .config('spark.executor.cores', '2')\\\n .config('spark.cores.max', '10')\\\n .config('spark.ui.port', '4050')\\\n .config('spark.logConf', True)\\\n .config('spark.debug.maxToStringFields', 100)\\\n .getOrCreate()\n\n return spark", "def spark_setup(self):\n # Update the global variables for config details\n globals()[\"spark_token\"] = self.spark_bot_token\n globals()[\"bot_email\"] = self.spark_bot_email\n\n sys.stderr.write(\"Spark Bot Email: \" + self.spark_bot_email + \"\\n\")\n sys.stderr.write(\"Spark Token: REDACTED\\n\")\n\n # Setup the Spark Connection\n globals()[\"spark\"] = CiscoSparkAPI(access_token=self.spark_bot_token)\n globals()[\"webhook\"] = self.setup_webhook(self.spark_bot_name,\n self.spark_bot_url)\n sys.stderr.write(\"Configuring Webhook. \\n\")\n sys.stderr.write(\"Webhook ID: \" + globals()[\"webhook\"].id + \"\\n\")", "def spark_config_set(is_spark_submit):\n if is_spark_submit:\n global sc, sqlContext\n sc = SparkContext()\n sqlContext = HiveContext(sc)", "def create_sparksession():\n return SparkSession.builder.\\\n appName(\"Transforming the historical parking occupancy and blockface datasets\").\\\n getOrCreate()", "def test_ec2_up(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', ec2['server'].id])\n assert result.exit_code == 0", "def main():\n\n print(\"Initiating Spark session...\")\n print('-' * 50)\n spark = create_spark_session()\n \n # Use these settings if you want to test on the full\n # dataset, but it takes a LONG time.\n song_input_data = config['AWS']['SONG_DATA']\n log_input_data = config['AWS']['LOG_DATA']\n \n # Uncomment the two lines if you want to test on\n # minimal data\n #song_input_data = config['AWS']['SINGLE_SONG_DATA']\n #log_input_data = config['AWS']['SINGLE_LOG_DATA']\n \n output_data = config['AWS']['OUTPUT_DATA']\n \n print('-' * 50)\n print(\"Processing song data...\")\n print('-' * 50)\n print('')\n process_song_data(spark, song_input_data, output_data)\n \n print('-' * 50) \n print(\"Processing log data...\")\n print('-' * 50)\n print('')\n process_log_data(spark, song_input_data, log_input_data, output_data)", "def up(image):\n ovpn_file_queue = vpn_file_queue('./VPN')\n ovpn_file_count = len(list(ovpn_file_queue.queue))\n port_range = range(START_PORT, START_PORT + ovpn_file_count)\n write_haproxy_conf(port_range)\n write_proxychains_conf(port_range)\n start_containers(image, ovpn_file_queue, port_range)", "def cli():\n # Configuration\n AppConfig()\n\n # Parse the cli arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('standard_data_path', help='path to the standard data directory')\n parser.add_argument('queue', help='job queue')\n parser.add_argument('--app-name', help='spark application name which must contain the application prd',\n default='gmt00-diaman-ai')\n parser.add_argument('--driver-mem', help='amount of memory to use for the driver process',\n default='4g')\n parser.add_argument('--driver-cores', help='number of cores to use for the driver process',\n default=1)\n parser.add_argument('--executor-mem', help='amount of memory to use per executor process',\n default='8g')\n parser.add_argument('--executor-cores', help='number of cores to use on each executor',\n default=4)\n parser.add_argument('--min-executors', help='minimum number of executors to run if dynamic allocation is enabled',\n default=4)\n parser.add_argument('--max-executors', help='maximum number of executors to run if dynamic allocation is enabled',\n default=12)\n parser.add_argument('--ini-executors', help='initial number of executors to run if dynamic allocation is enabled',\n default=4)\n args = parser.parse_args()\n\n # Instantiate spark\n _, spark_session = spark_config.get_spark(app_name=args.app_name,\n queue=args.queue,\n driver_mem=args.driver_mem,\n driver_cores=args.driver_cores,\n executor_mem=args.executor_mem,\n executor_cores=args.executor_cores,\n min_executors=args.min_executors,\n max_executors=args.max_executors,\n ini_executors=args.ini_executors)\n\n # Run the train pipeline\n train_pipeline.run(spark_session, args.standard_data_path)", "def setUpClass(cls):\n \n logging.info(\"Logging from within setup\")\n cls.spark=SparkSession \\\n .builder \\\n .appName(\"sampleTest\") \\\n .master(\"local\") \\\n .getOrCreate()\n cls.spark.sparkContext.setLogLevel(\"ERROR\")", "def setup_kubernetes_version(skuba, kubernetes_version=None):\n\n skuba.cluster_init(kubernetes_version)\n skuba.node_bootstrap()\n skuba.node_join(role=\"worker\", nr=0)", "def create_spark_session(self):\n\n spark_jar_path = os.getenv(\"SPARK_JARS_PATH\")\n spark_jars = [os.path.join(spark_jar_path, jars) for jars in os.listdir(spark_jar_path)] \n\n self.spark = SparkSession\\\n .builder\\\n .config(\"spark.jars\", \",\".join(spark_jars))\\\n .appName(appname)\\\n .getOrCreate()", "def spark(tmp_path_factory, app_name=\"Sample\", url=\"local[*]\"):\n\n with TemporaryDirectory(dir=tmp_path_factory.getbasetemp()) as td:\n config = {\n \"spark.local.dir\": td,\n \"spark.sql.shuffle.partitions\": 1,\n \"spark.sql.crossJoin.enabled\": \"true\",\n }\n spark = start_or_get_spark(app_name=app_name, url=url, config=config)\n yield spark\n spark.stop()", "def add_spark(self,node):\n import os\n import json\n from urllib.request import urlopen\n import ssl\n if \"SPARK_ENV_LOADED\" not in os.environ:\n return # no Spark\n\n spark = ET.SubElement(node, 'spark')\n try:\n import requests\n import urllib3\n urllib3.disable_warnings()\n except ImportError:\n ET.SubElement(spark,'error').text = \"SPARK_ENV_LOADED present but requests module not available\"\n return \n\n host = 'localhost'\n p1 = 4040\n p2 = 4050\n import urllib.error\n for port in range(p1,p2+1):\n try:\n url = 'http://{}:{}/api/v1/applications/'.format(host,port)\n resp = urlopen(url, context=ssl._create_unverified_context())\n spark_data = resp.read()\n break\n except (ConnectionError, ConnectionRefusedError, urllib.error.URLError) as e:\n continue\n if port>=p2:\n ET.SubElement(spark,'error').text = f\"SPARK_ENV_LOADED present but no listener on {host} ports {p1}-{p2}\"\n return\n\n # Looks like we have Spark!\n for app in json.loads(spark_data):\n app_id = app['id']\n app_name = app['name']\n e = ET.SubElement(spark,'application',{'id':app_id,'name':app_name})\n\n attempt_count = 1\n for attempt in app['attempts']:\n e = ET.SubElement(spark,'attempt')\n json_to_xml(e,attempt)\n for param in ['jobs','allexecutors','storage/rdd']:\n url = f'http://{host}:{port}/api/v1/applications/{app_id}/{param}'\n resp = urlopen(url, context=ssl._create_unverified_context())\n data = resp.read()\n e = ET.SubElement(spark,param.replace(\"/\",\"_\"))\n json_to_xml(e,json.loads(data))", "def up(vm, env=''):\n local( main_dir + '/vagrant/bin/vm.sh up ' + str(vm) + ' ' + str(env) )", "def sshtest():\n vbox = Vbox(env.vm_name)\n print vbox.ssh_up", "def spark():\n\n quiet_log4j()\n\n builder = (\n SparkSession.builder\n .master(\"local[2]\")\n .appName(\"pytest-pyspark-local-testing\")\n # By default spark will shuffle to 200 partitions, which is\n # way too many for our small test cases. This cuts execution\n # time of the tests in half.\n .config('spark.sql.shuffle.partitions', 4)\n )\n if 'XDG_CACHE_HOME' in os.environ:\n builder.config('spark.jars.ivy', os.path.join(os.environ['XDG_CACHE_HOME'], 'ivy2'))\n\n with builder.getOrCreate() as spark:\n yield spark", "def handle_hup(self):\n pass", "def startUp(self):\n pass", "def Instance():\n if not Spark._active_instance:\n Spark._active_instance = Spark()\n return Spark._active_instance", "def setup(ctx, cluster_url):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"cluster setup subcommand\")", "def setup(self, stage: Optional[str] = None) -> None:", "def create_spark_session():\n \n print(\"Create Spark Session\")\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark\n print(\"Spark Session Created\")", "def create_spark_session():\n \n spark = SparkSession.builder\\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\")\\\n .getOrCreate()\n \n return spark", "def scale_up_application(asg_name):\n if_verbose(\"Scaling up %s in steps of %d\" % (asg_name, args.instance_count_step))\n current_capacity_count = args.instance_count_step\n while(True):\n check_error(scale_up_autoscaling_group(asg_name, current_capacity_count))\n check_error(check_autoscaling_group_health(asg_name, current_capacity_count))\n\n if args.elb_name:\n asg_instances = [{\"InstanceId\": a[\"InstanceId\"]} for a in asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]]\n check_error(check_elb_instance_health(args.elb_name, asg_instances))\n\n if args.instance_count == current_capacity_count:\n break\n else:\n current_capacity_count += args.instance_count_step\n else:\n break\n\n if_verbose(\"Scaling up %s successful\" % asg_name)", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID']) \\\n .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY']) \\\n .enableHiveSupport().getOrCreate()\n \n return spark", "def _bootup_node(self, conn):\n compose_fname = COMPOSE_FNAME\n exec_plan = self.node_exec_plan.copy()\n while len(exec_plan) > 0:\n container_name = exec_plan.popleft()\n self.__bootup_service(conn, compose_fname, container_name)", "def create_spark_session():\n try:\n spark = (\n SparkSession.builder\n .config(\"spark.jars.packages\", os.environ['SAS_JAR'])\n # .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n .enableHiveSupport()\n .getOrCreate()\n )\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.impl\",\"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n # spark._jsc.hadoopConfiguration().set(\"com.amazonaws.services.s3.enableV4\", \"true\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.aws.credentials.provider\",\"org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.endpoint\", \"s3.amazonaws.com\")\n except Exception as e:\n logger.error('Pyspark session failed to be created...')\n raise\n return spark", "def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)", "def spinUp(self,\n btstrap_loc='s3://ddapi.data/ddapp_emr_bootstrap.sh',\n mstr_cnt=1,\n mstr_mkt='ON_DEMAND',\n slave_cnt=2,\n slave_mkt='ON_DEMAND',\n ):\n\n self.btstrap_loc = btstrap_loc\n logging.info(f'starting to spin up emr cluster from emr client: {self.emr_client}')\n response = self.emr_client.run_job_flow(\n Name=self.clustername,\n LogUri=self.logpath,\n ReleaseLabel='emr-5.23.0',\n Instances={\n 'InstanceGroups': [\n {\n 'Name': \"Master nodes\",\n 'Market': mstr_mkt,\n 'InstanceRole': 'MASTER',\n 'InstanceType': 'm4.large',\n 'InstanceCount': mstr_cnt,\n },\n {\n 'Name': \"Slave nodes\",\n 'Market': slave_mkt,\n 'InstanceRole': 'CORE',\n 'InstanceType': 'm4.large',\n 'InstanceCount': slave_cnt,\n }\n ],\n 'Ec2KeyName': self.key,\n 'KeepJobFlowAliveWhenNoSteps': True,\n 'TerminationProtected': False,\n # 'Ec2SubnetId': 'string',\n },\n Applications=[\n {'Name': 'Hadoop'},\n {'Name': 'Spark'}\n ],\n BootstrapActions=[\n {\n 'Name': 'bootstrap requirements',\n 'ScriptBootstrapAction': {\n 'Path': btstrap_loc,\n }\n },\n ],\n VisibleToAllUsers=True,\n JobFlowRole='EMR_EC2_DefaultRole',\n ServiceRole='EMR_DefaultRole',\n Configurations=[\n {\n 'Classification': 'spark-env',\n 'Configurations': [\n {\n 'Classification': 'export',\n 'Properties': {\n 'PYSPARK_PYTHON': '/usr/bin/python3',\n 'PYSPARK_DRIVER_PYTHON': '/usr/bin/python3'\n }\n }\n ]\n },\n {\n 'Classification': 'spark-defaults',\n 'Properties': {\n 'spark.sql.execution.arrow.enabled': 'true'\n }\n },\n {\n 'Classification': 'spark',\n 'Properties': {\n 'maximizeResourceAllocation': 'true'\n }\n }\n ],\n )\n logging.info(f'spinning up emr cluster from emr client: {self.emr_client}')\n self.job_flow_id = response['JobFlowId']\n logging.info(f'job flow id {self.emr_client} logged')\n\n # get cluster id\n resp = self.emr_client.list_clusters()\n clus = resp['Clusters'][0]\n self.clusID = clus['Id']\n\n # don't forget to tip the waiter\n logging.info(f'start waiter')\n create_waiter = self.emr_client.get_waiter('cluster_running')\n try:\n create_waiter.wait(ClusterId=self.clusID,\n WaiterConfig={\n 'Delay': 15,\n 'MaxAttempts': 480\n })\n\n except WaiterError as e:\n if 'Max attempts exceeded' in e.message:\n print('EMR Cluster did not finish spinning up in two hours')\n else:\n print(e.message)", "def main():\n session, cluster = create_database()\n \n drop_tables(session)\n create_tables(session)\n\n session.shutdown()\n cluster.shutdown()", "def start_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Starting up a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"start\"])\n popdir()", "def whenup(sourcename) :\n return s.whenUp(sourcename)", "def _environment(self):\n\n self.spark_home = self._config_default(\"spark-home\",\n self._context(SparkSubmit.SPARK_HOME, default = os.environ.get(SparkSubmit.SPARK_HOME,None)))\n assert self.spark_home, \"unable to detect SPARK_HOME. set SPARK_HOME as directed in the task documentation\"\n assert os.path.exists(self.spark_home), \"provided SPARK_HOME doesn't exists\"\n\n spark_config = {'cluster-config': {}, 'other-config': {}}\n if 'config-file' in self._config_keys():\n spark_config.update(yaml.load(open(self._config('config-file')))['spark-config'])\n\n self.app_config = []\n\n spark_app = self._config('app-config')\n self.app_config.append(spark_app['application'])\n app_params = SparkSubmit._flat_node_to_cmd_line_args(spark_app['params']) if 'params' in spark_app else []\n self.app_config.extend(app_params)\n if 'resources' in spark_app:\n resources = [ ['--%s' % item] + (spark_app['resources'][item]) for item in spark_app['resources'].keys() ]\n self.resources = list(itertools.chain(*resources))\n else:\n self.resources = []\n\n\n cluster_config = self._config_default('cluster-config', {})\n cluster_config.update(spark_config['cluster-config'])\n self.cluster_options = list(itertools.chain(*[ ['--%s' % item, str(cluster_config[item]) ] for item in cluster_config.keys() ]))\n\n\n ##other options\n ## cluster options\n other_options = self._config_default('other-config',{})\n cluster_config.update(spark_config['other-config'])\n self.other_options = list(itertools.chain(*[ ['--%s' % item, str(other_options[item]) ] for item in other_options.keys() ]))", "def test_slurm_xsede_supermic_spark(self):\n\n # Set environment variables\n os.environ['SLURM_NODELIST'] = 'nodes[1-2]'\n os.environ['SLURM_NPROCS'] = '24'\n os.environ['SLURM_NNODES'] = '2'\n os.environ['SLURM_CPUS_ON_NODE'] = '24'\n\n # Run component with desired configuration\n self.component._cfg = self.cfg_xsede_supermic_spark\n self.component._configure()\n\n # Verify configured correctly\n self.assertEqual(self.component.cores_per_node, 20)\n self.assertEqual(self.component.gpus_per_node, 0)\n self.assertEqual(self.component.lfs_per_node['path'], \"/var/scratch/\")\n self.assertEqual(self.component.lfs_per_node['size'], 200496)\n self.assertEqual(self.component.lm_info['cores_per_node'], 20)\n\n return", "def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance", "def _work(self):\n command = [ os.path.join(self.spark_home,'bin/spark-submit') ] + self.cluster_options + self.other_options + \\\n self.resources + self.app_config\n\n sideput.sideput( \"spark submit command is %s\" % ' '.join(command) )\n\n with sideput.Timing(\"spark job completed in %d seconds\"):\n result, stdout, stderr = os_util.execute_command(command, do_sideput=True)\n\n sideput.sideput(\"[%s] stderr:\\n%s\" % (self.name(), stderr), level=\"INFO\")\n sideput.sideput(\"[%s] stdout:\\n%s\" % (self.name(), stdout), level=\"INFO\")\n if result != 0:\n raise Exception(\"spark job failed with code %d\" % result)\n else:\n try:\n result_hash = yaml_util.load(stdout) if self._emits() else {}\n sideput.sideput(\"parsed stdout is %s\\n\" % result_hash, level=\"INFO\")\n except Exception as e:\n result_hash = {}\n sideput.sideput(\"parsing stdout as json failed with message %s \\n\" % e.message , level= \"ERROR\")\n sideput.sideput(\"stdout is \\n %s \\n\" % stdout, level=\"ERROR\")\n raise e\n sideput.sideput(\"[%s] spark job completed successfully\"\n % self.name(), level = \"INFO\")\n return result_hash", "def do_start(self,processor):\n # app_logger = self.construct_logger(rta_constants.PROPERTIES_LOG_FILE)\n running_dict = {}\n for item in self.get_running_status():\n running_dict[item.get('processor')]=item.get('status')\n\n if processor == 'spark':\n if running_dict:\n if running_dict['spark<spark_worker>'] != 'Running' and running_dict['spark<spark_master>'] != 'Running':\n try:\n cmd_line = self.cmd_start_spark\n cmd = subprocess.Popen([cmd_line],shell=True,stdout=subprocess.PIPE)\n (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['spark<spark_worker>'] == 'Running' or running_dict['spark<spark_master>'] == 'Running':\n print('Spark Server is running!! please trying to stop it before it starts.')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n return\n\n elif processor == 'tomcat':\n if running_dict.has_key('tomcat') and running_dict['tomcat'] != 'Running':\n try:\n cmd_line = self.cmd_start_tomcat\n # print('staring tomcat server------->')\n print cmd_line\n\n # 2311 Vpl update to fix problem of catalina shutdown when term exit (10.x timeout)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n #print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('tomcat'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Tomcat Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'HDFS':\n #1/5/2017 Commit by JOJO\n '''\n if running_dict.has_key('HDFS') and running_dict['HDFS'] != 'Running':\n try:\n cmd_line = self.cmd_start_hadoop_hdfs\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('HDFS has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('HDFS'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('HDFS server is running!! please trying to stop it before it start.')\n return\n '''\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif processor == 'web_management':\n if running_dict.has_key('web_management') and running_dict['web_management'] != 'Running':\n try:\n cmd_line = 'python '+self.cmd_start_web_management\n print('starting web_management webserver------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n (output,err) = cmd.communicate()\n print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('web_management webserver has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('web_management'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Flask webserver is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'novelty':\n if running_dict.has_key('novelty') and running_dict['novelty'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_novelty_detector\n # print('staring novelty------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('novelty has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('novelty'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['novelty'] == 'Running':\n print('novelty processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'raw_writer':\n if running_dict.has_key('raw_writer') and running_dict['raw_writer'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_raw_writer\n # print('staring raw_writer------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n print('raw_writer has been started!')\n return\n\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('raw_writer'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['raw_writer'] == 'Running':\n print('raw_writer processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'cassandra':\n if running_dict.has_key('cassandra') and running_dict['cassandra'] != 'Running':\n try:\n cmd_line = self.cmd_start_cassandra\n # print('starting cassandra------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of cassandra shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('cassandra has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('cassandra'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('cassandra Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'kairosDb':\n if running_dict.has_key('kairosDb') and running_dict['kairosDb'] != 'Running' and running_dict['cassandra']=='Running':\n try:\n cmd_line = self.cmd_start_kairosDB\n # print('staring kairosDB------->')\n\n # print cmd_line\n\t\t\t\t\t#2311 Vpl update to fix problem of kairosDb shutdown when term exit (10.x timeout)\n\t\t\t\t\t#cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('kairosDb has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kairosDb'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['cassandra']=='Stopped':\n print('cassandra required starting before kairosDb is running!! please trying to \"start cassandra\" first')\n return\n elif running_dict['kairosDB'] == 'Running':\n print('kairosDB Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'grafana':\n if running_dict.has_key('grafana') and running_dict['grafana'] != 'Running' and running_dict['kairosDb']=='Running':\n try:\n cmd_line = self.cmd_start_grafana\n # print('staring grafana------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('grafana has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('grafana'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['kairosDb']=='Stopped':\n print('kairosDb required starting before grafana is running!! please trying to \"start kairoseDb\" first')\n return\n elif running_dict['grafana'] == 'Running':\n print('grafana Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'kafka':\n if running_dict.has_key('kafka') and running_dict['kafka'] != 'Running' and running_dict['zookeeper']=='Running':\n try:\n cmd_line = self.cmd_start_kafka\n print('starting kafka------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n print ('kafka has been started!')\n return\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kafka'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['zookeeper']=='Stopped':\n print('zookeeper required starting before kafka is running!! please trying to \"start zookeeper\" first')\n return\n elif running_dict['kafka'] == 'Running':\n print('Kafka Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'zookeeper':\n if running_dict.has_key('zookeeper') and running_dict['zookeeper'] != 'Running':\n try:\n cmd_line = self.cmd_start_zookeeper\n # print('staring zookeeper------->')\n # print (cmd_line)\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n\n print('zookeeper has been started!')\n return\n except Exception as ex:\n print(\" Failed to stop processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('zookeeper'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Zookeeper Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'accl_processor':\n if running_dict:\n if running_dict['accl_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_accl_processor\n print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #cmd = subprocess.Popen(['nohup',cmd_line])\n # cmd = subprocess.Popen(cmd_line)\n\n print ('Accelerometer processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['accl_processor'] == 'Running':\n print('Accelerometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'baro_processor':\n if running_dict:\n if running_dict['baro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_baro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Barometer processor has been started')\n\t\t\tprint (cmd_line)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['baro_processor'] == 'Running':\n print('Barometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'gyro_processor':\n if running_dict:\n if running_dict['gyro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_gyro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Gyroscope processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['gyro_processor'] == 'Running':\n print('Gyroscope processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'aggr_processor':\n if running_dict:\n if running_dict['aggr_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_aggr_naiv\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Aggregator processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['aggr_processor'] == 'Running':\n print('Aggregator processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print ('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n else:\n print ('Please type correct command! You may use \"help start\" see more help')", "def get_spark_i_know_what_i_am_doing():\n return _spark", "def start_up(self, velocity=VELOCITY):\n action = StartUp(velocity=velocity)\n self._velocity_control_client(pickle.dumps(action))", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend\"\n output_data = \"s3a://vivek1bucket\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def create_spark_session():\n spark = SparkSession\\\n .builder\\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\")\\\n .getOrCreate()\n # print (spark.sparkContext.getConf().getAll)\n return spark", "def __init__(self,env,config_file):\n #load all the properties\n self.properties = util.load_application_properties(env, config_file)\n self.cassandra_server = self.properties[\"cassandra.host.name\"]\n self.cassandra_trip_table = self.properties[\"cassandra.trip_data_table\"]\n self.cassandra_stats_table = self.properties[\"cassandra.trip_stats_table\"]\n self.cassandra_keyspace = self.properties[\"cassandra.trip.keyspace\"]\n self.spark_master = self.properties[\"spark.master\"]\n self.s3_url=self.properties[\"batch_s3_url\"]\n\n #initialize SparkConf and SparkContext along with cassandra settings\n self.conf = SparkConf().setAppName(\"trip\").set(\"spark.cassandra.connection.host\",self.cassandra_server)\n self.sc = SparkContext(conf=self.conf)\n self.sqlContext = SQLContext(self.sc)", "def create_spark_session():\n try:\n spark = (\n SparkSession.builder\n # .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n .enableHiveSupport()\n .getOrCreate()\n )\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.impl\",\"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n # spark._jsc.hadoopConfiguration().set(\"com.amazonaws.services.s3.enableV4\", \"true\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.aws.credentials.provider\",\"org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.endpoint\", \"s3.amazonaws.com\")\n except Exception as e:\n logger.error('Pyspark session failed to be created...')\n raise\n return spark", "def setUpClass(cls):\n GlusterBaseClass.setUpClass.im_func(cls)\n # Create and start volume\n g.log.info(\"Starting volume setup process %s\", cls.volname)\n ret = cls.setup_volume()\n if not ret:\n raise ExecutionError(\"Failed to setup \"\n \"and start volume %s\" % cls.volname)\n g.log.info(\"Successfully created and started the volume: %s\",\n cls.volname)", "def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True", "def main():\n # Initiate Spark Session\n spark = create_spark_session()\n \n # Data files\n # Root Data Path\n # Uncomment below line for AWS S3\n #input_data = \"s3a://udacity-dend\"\n # Uncomment below line for local files\n input_data = \"data\"\n\n # Warehouse\n # Root WH\n # Uncomment below line for AWS S3\n #output_data = \"s3a://jerryespn-project-out\"\n # Uncomment below line for local files\n output_data = \"spark-warehouse\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def test_pyspark(container):\n c = container.run(\n tty=True,\n command=['start.sh', 'python', '-c', 'import pyspark']\n )\n rv = c.wait(timeout=30)\n assert rv == 0 or rv[\"StatusCode\"] == 0, \"pyspark not in PYTHONPATH\"\n logs = c.logs(stdout=True).decode('utf-8')\n LOGGER.debug(logs)", "def instantiate(cls, spark):\n logger = ProcessLog().getLogger()\n return cls(spark, logger)", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def launch_spot():\n ec2 = boto3.client('ec2')\n ec2r = boto3.resource('ec2')\n ec2spec = dict(ImageId=AMI,\n KeyName = KeyName,\n SecurityGroupIds = [SecurityGroupId, ],\n InstanceType = \"p2.xlarge\",\n Monitoring = {'Enabled': True,},\n IamInstanceProfile = IAM_ROLE)\n output = ec2.request_spot_instances(DryRun=False,\n SpotPrice=\"0.4\",\n InstanceCount=1,\n LaunchSpecification = ec2spec)\n spot_request_id = output[u'SpotInstanceRequests'][0][u'SpotInstanceRequestId']\n logging.info(\"instance requested\")\n time.sleep(30)\n waiter = ec2.get_waiter('spot_instance_request_fulfilled')\n waiter.wait(SpotInstanceRequestIds=[spot_request_id,])\n instance_id = get_status(ec2, spot_request_id)\n while instance_id is None:\n time.sleep(30)\n instance_id = get_status(ec2,spot_request_id)\n instance = ec2r.Instance(instance_id)\n with open(\"host\",'w') as out:\n out.write(instance.public_ip_address)\n logging.info(\"instance allocated\")\n time.sleep(10) # wait while the instance starts\n env.hosts = [instance.public_ip_address,]\n fh = open(\"connect.sh\", 'w')\n fh.write(\"#!/bin/bash\\n\" + \"ssh -i \" + env.key_filename + \" \" + env.user + \"@\" + env.hosts[0] + \"\\n\")\n fh.close()\n local(\"fab deploy_ec2\") # this forces fab to set new env.hosts correctly", "def setup(self, cluster):\n raise NotImplementedError()", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def run_instance():\n data = check_args(\n ('cloudProvider', 'apiKey', 'secretKey', 'packageName', 'OS',\n 'sgPorts')\n )\n job = jobs.deploy.apply_async(args=(data,))\n current_user.add_job(job.id)\n return make_response(job_id=job.id)", "def _deploy_instance(self):\n if not os.path.exists(self.instance_path):\n pw = pwd.getpwnam(self.user)\n mode = (\n stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |\n stat.S_IROTH | stat.S_IXOTH)\n utils.mkdir(self.instance_path, mode, pw[2], pw[3])\n path = \"{}/src/automx_wsgi.py\".format(self.repo_dir)\n utils.exec_cmd(\"cp {} {}\".format(path, self.instance_path),\n sudo_user=self.user, cwd=self.home_dir)", "def launch_instance(cloud):\n js = _get_jetstream_conn()\n\n sgs = ['CloudLaunchDefault']\n kp_name = \"cloudman_key_pair\"\n inst_size = 'm1.small'\n network_id = '86a1c3e8-b1fb-41f3-bcaf-8334567fe989'\n lc = js.compute.instances.create_launch_config()\n lc.add_network_interface(network_id)\n\n img_id = '2cf07e4a-62a8-41c2-9282-f3c53962f296' # Gxy Standalone 161021b01\n name = 'ea-galaxy-{0}'.format(strftime(\"%m-%d-%H-%M\", localtime()))\n\n i = js.compute.instances.create(\n name, img_id, inst_size, security_groups=sgs, launch_config=lc,\n key_pair=kp_name)\n return i", "def make_test_instance(branchname, instance_name=\"schedule\"):\n if not instance_name:\n instance_name = branchname\n instance_dir = env.site_root + instance_name\n if not exists(instance_dir):\n with cd(env.site_root):\n run('git clone %s %s' % (env.repo_url, instance_name))\n with cd(instance_dir):\n run('git checkout %s' % branchname)\n else:\n with cd(instance_dir):\n run(\"git pull\")\n\n bootstrap(instance_name, 'test')\n\n upstart_conf_templ = os.path.join(instance_dir, 'example', 'conf', 'upstart-test.conf.template')\n upstart_conf = os.path.join(instance_dir, 'example', 'conf', 'upstart-test.conf')\n if not exists(upstart_conf):\n run('cp %s %s' % (upstart_conf_templ, upstart_conf))\n sed(upstart_conf, '\\\\{branchname\\\\}', instance_name)\n upstart_link = \"/etc/init/%s.conf\" % instance_name\n if not exists(upstart_link):\n sudo('ln -s %s %s' % (upstart_conf, upstart_link))\n sudo('initctl reload-configuration')\n sudo('start %s' % instance_name)\n\n apache_config_templ = os.path.join(instance_dir, 'example', 'conf', 'nginx-test.conf.template')\n apache_config = os.path.join(instance_dir, 'example', 'conf', 'nginx-test.conf')\n if not exists(apache_config):\n run('cp %s %s' % (apache_config_templ, apache_config))\n sed(apache_config, '\\\\{branchname\\\\}', instance_name)\n apache_name = '/etc/nginx/sites-available/%s' % instance_name\n if not exists(apache_name):\n sudo('ln -s %s %s' % (apache_config, apache_name))\n sudo('nxensite %s' % instance_name)\n sudo('mkdir -p %s%s/media/static' % (env.site_root, instance_name))\n sudo('chgrp -R www-data %s%s/media/static' % (env.site_root, instance_name))\n sudo('chmod -R g+w %s%s/media/static' % (env.site_root, instance_name))\n sudo('/etc/init.d/nginx reload')", "def main():\n # start Spark application and get Spark session, logger and config\n spark = SparkSession \\\n .builder \\\n .appName(\"PokemonBasicETLOperations\") \\\n .config(\"spark.eventLog.enabled\", True) \\\n .enableHiveSupport() \\\n .getOrCreate()\n\n print('PokemonBasicETLOperations ETL is up-and-running')\n \n # execute ETL pipeline\n pokemon = extract(spark)\n max_attack_per_type,agg_legend_poke,special_criteria_poke = transform(pokemon)\n load(max_attack_per_type,agg_legend_poke,special_criteria_poke)\n\n print('PokemonBasicETLOperations ETL job is finished')\n spark.stop()\n return None", "def start_instance(InstanceId=None):\n pass", "def ec2_start(resource, metadata):\n\n # do minimal provisioning of machine through cloud-init\n # this installs git and bootstraps puppet to provision the rest\n # requires recent ubuntu (14.04/16.04) or RHEL/CentOS 7\n userdata = \"\"\"#cloud-config\npackage_update: true\nhostname: {hostname}\nfqdn: {fqdn}\nmanage_etc_hosts: true\npackages:\n - git\nwrite_files:\n - path: /etc/facter/facts.d/hostgroup.txt\n content: hostgroup=aws\n - path: /etc/facter/facts.d/role.txt\n content: role={role}\nruncmd:\n - git clone {repo} /etc/puppet\n - /etc/puppet/support_scripts/bootstrap-puppet.sh\"\"\".format(\n hostname=metadata['hostname'], fqdn=metadata['fqdn'],\n role=metadata['role'], repo=metadata['repo'])\n\n instances = resource.create_instances(\n ImageId=metadata['ami'],\n MinCount=1,\n MaxCount=1,\n InstanceType=metadata['type'],\n SubnetId=metadata['subnet'],\n SecurityGroupIds=[metadata['secgroup']],\n KeyName=metadata['keypair'],\n UserData=userdata,\n BlockDeviceMappings=[\n {\n 'DeviceName': '/dev/sda1', # root so far, sometimes /dev/xvdh ?\n 'Ebs': {\n 'VolumeSize': 20,\n 'DeleteOnTermination': True,\n 'VolumeType': 'gp2'\n },\n },\n ]\n )\n\n # not sure if we really need to sleep before tagging but\n # we wait until running anyway which takes much longer than 1 second\n time.sleep(1)\n for instance in instances:\n # first set tags, Name and Role\n instance.create_tags(\n Resources=[instance.id],\n Tags=[\n {\n 'Key': 'Role',\n 'Value': metadata['role']\n },\n {\n 'Key': 'Name',\n 'Value': metadata['fqdn']\n },\n ]\n )\n\n # ensure system is running before we print address to connect to\n instance.wait_until_running()\n # instance.load()\n ec2_status(resource, metadata)", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def dvs_multiple_uplinks_active(self):\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(1)\n self.show_step(2)\n plugin.install_dvs_plugin(self.ssh_manager.admin_ip)\n\n self.show_step(3)\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=DEPLOYMENT_MODE,\n settings={\n \"net_provider\": 'neutron',\n \"net_segment_type\": NEUTRON_SEGMENT_TYPE\n }\n )\n self.show_step(4)\n self.show_step(5)\n self.show_step(6)\n self.show_step(7)\n self.fuel_web.update_nodes(cluster_id,\n {'slave-01': ['controller'],\n 'slave-02': ['compute-vmware'],\n 'slave-03': ['compute'],\n 'slave-04': ['compute']})\n\n self.show_step(8)\n self.show_step(9)\n self.fuel_web.vcenter_configure(\n cluster_id,\n target_node_2=self.node_name('slave-02'),\n multiclusters=True)\n\n self.show_step(10)\n plugin.enable_plugin(cluster_id, self.fuel_web, au=3, su=0)\n\n self.show_step(11)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(12)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(13)\n self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke'])", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\") \\\n .getOrCreate()\n return spark", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def getSparkContext():\n conf = (SparkConf()\n .setMaster(\"local\") # run on local\n .setAppName(\"Logistic Regression\") # Name of App\n .set(\"spark.executor.memory\", \"1g\")) # Set 1 gig of memory\n sc = SparkContext(conf = conf) \n return sc", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def __init__(self, sparkContext, minPartitions=None):\n from thunder.utils.aws import AWSCredentials\n self.sc = sparkContext\n self.minPartitions = minPartitions\n self.awsCredentialsOverride = AWSCredentials.fromContext(sparkContext)", "def create_spark_session() -> SparkSession:\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\") \\\n .getOrCreate()\n return spark" ]
[ "0.7255299", "0.5885112", "0.57854474", "0.5732422", "0.5728568", "0.57188094", "0.5686993", "0.56795335", "0.55460984", "0.54442745", "0.54389185", "0.54236734", "0.5411601", "0.5410782", "0.539766", "0.53900146", "0.5368531", "0.53666943", "0.53366995", "0.53314143", "0.52853173", "0.52692574", "0.5235823", "0.5229087", "0.52060443", "0.5203687", "0.51997", "0.5195361", "0.51463693", "0.5127799", "0.51164204", "0.5099871", "0.5095755", "0.50892156", "0.5072066", "0.5059149", "0.50531006", "0.5049627", "0.5048304", "0.5027425", "0.5017536", "0.5012346", "0.5001642", "0.4999611", "0.49954787", "0.4963975", "0.49606976", "0.49390978", "0.4937356", "0.49355713", "0.493246", "0.4929713", "0.49250352", "0.4923733", "0.4916192", "0.49137157", "0.49037406", "0.4901357", "0.4896235", "0.48905668", "0.48833445", "0.48766047", "0.48766047", "0.48766047", "0.48766047", "0.48752612", "0.48684382", "0.48640826", "0.4856041", "0.48556623", "0.48512846", "0.48511493", "0.48325047", "0.48319644", "0.48235676", "0.48233774", "0.48213205", "0.4805509", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.4802022", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.47972357", "0.47959027" ]
0.0
-1
spark up an instance
def __init__(self): OWSReport.__init__(self) self.stats['type'] = 'OGC:WFS' self.stats['operations']['GetFeature'] = {} self.stats['operations']['GetFeature']['hits'] = 0 self.stats['operations']['GetFeature']['resource'] = {} self.stats['operations']['GetFeature']['resource']['param'] = 'typename' self.stats['operations']['GetFeature']['resource']['list'] = {} self.stats['operations']['DescribeFeatureType'] = {} self.stats['operations']['DescribeFeatureType']['hits'] = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n spark_it_up()", "def spark(self, *args, **kwargs):\n self.spark_submit(*args, **kwargs)", "def up(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"up --build\"\n\n if remote:\n command = f\"{command} --detach\"\n\n run_command_with_services(context, user, remote, instance, stack, command, services)", "def up(self, connection):\n raise NotImplementedError", "def dev_up():\n _with_deploy_env(['./bin/develop up'])", "def setUp(self):\n self.spark, self.log, self.config = start_spark(app_name = \"test_etl_job\",\n files='configs/etl_config.json')", "def up(self, arguments):\n gui = arguments['--gui']\n save = not arguments['--no-cache']\n requests_kwargs = utils.get_requests_kwargs(arguments)\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n utils.index_active_instance(instance_name)\n\n vmx = utils.init_box(self.box_name, self.box_version, requests_kwargs=requests_kwargs, save=save)\n vmrun = VMrun(vmx, user=self.user, password=self.password)\n puts_err(colored.blue(\"Bringing machine up...\"))\n started = vmrun.start(gui=gui)\n if started is None:\n puts_err(colored.red(\"VM not started\"))\n else:\n time.sleep(3)\n puts_err(colored.blue(\"Getting IP address...\"))\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(lookup=lookup)\n puts_err(colored.blue(\"Sharing current folder...\"))\n vmrun.enableSharedFolders()\n vmrun.addSharedFolder('mech', os.getcwd(), quiet=True)\n if ip:\n if started:\n puts_err(colored.green(\"VM started on {}\".format(ip)))\n else:\n puts_err(colored.yellow(\"VM was already started on {}\".format(ip)))\n else:\n if started:\n puts_err(colored.green(\"VM started on an unknown IP address\"))\n else:\n puts_err(colored.yellow(\"VM was already started on an unknown IP address\"))", "def up_cmd(ctx):\n pass", "def prepare_instance():\n sudo(\"apt-get -y update\")\n sudo(\"apt-get -y upgrade\")\n sudo(\"apt-get install -y python-pip python-setuptools\")\n sudo(\"pip install BeautifulSoup\")\n sudo(\"pip install --upgrade boto\")\n sudo(\"mv /usr/lib/pymodules/python2.6/boto /tmp\")", "def __init__(self, spark, logger):\n self.spark = spark\n self.logger = logger", "def test_ec2_up_no_instance(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', 'dummy'])\n assert result.exit_code == 2", "def common_setup(ssh_client):\n with open_cfg() as cfg:\n delete_hdfs = cfg.getboolean('main', 'delete_hdfs')\n # preliminary steps required due to differences between azure and aws\n if c.PROVIDER == \"AZURE\":\n\n # todo only if first run\n if c.NUM_INSTANCE > 0 or True:\n print(\"In common_setup, NUM_INSTANCE=\" + str(c.NUM_INSTANCE))\n # add ssh key that matches the public one used during creation\n if not c.PRIVATE_KEY_NAME in ssh_client.listdir(\"/home/ubuntu/.ssh/\"):\n ssh_client.put(localpath=c.PRIVATE_KEY_PATH, remotepath=\"/home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n ssh_client.run(\"chmod 400 /home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n\n # ssh_client.run(\"sudo groupadd supergroup\")\n ssh_client.run(\"sudo usermod -aG supergroup $USER\")\n ssh_client.run(\"sudo usermod -aG supergroup root\")\n\n # join docker group\n ssh_client.run(\"sudo usermod -aG docker $USER\")\n\n ssh_client.run(\"mkdir /usr/local/spark/spark-events\")\n\n # ssh_client.run(\"sudo chmod -R 777 /mnt\")\n\n # to refresh groups\n ssh_client.close()\n ssh_client.connect()\n\n # restore environmental variables lost when creating the image\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native/' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n\n ssh_client.run(\"source $HOME/.bashrc\")\n\n if c.PROVIDER == \"AWS_SPOT\":\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n ssh_client.run(\"source $HOME/.bashrc\")\n \n ssh_client.run(\"export GOMAXPROCS=`nproc`\")\n\n if c.UPDATE_SPARK_DOCKER:\n print(\" Updating Spark Docker Image...\")\n ssh_client.run(\"docker pull elfolink/spark:2.0\")\n\n if delete_hdfs:\n ssh_client.run(\"sudo umount /mnt\")\n ssh_client.run(\n \"sudo mkfs.ext4 -E nodiscard \" + c.TEMPORARY_STORAGE + \" && sudo mount -o discard \" + c.TEMPORARY_STORAGE + \" /mnt\")\n\n ssh_client.run(\"test -d /mnt/tmp || sudo mkdir -m 1777 /mnt/tmp\")\n ssh_client.run(\"sudo mount --bind /mnt/tmp /tmp\")\n\n ssh_client.run('ssh-keygen -f \"/home/ubuntu/.ssh/known_hosts\" -R localhost')\n\n print(\" Stop Spark Slave/Master\")\n # ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-master.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && sudo {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n \n stdout, stderr, status = ssh_client.run(\n \"cd \" + c.SPARK_HOME + \" && cp conf/log4j.properties.template conf/log4j.properties\")\n print(stdout, stderr)\n print(\" Set Log Level\")\n ssh_client.run(\n \"sed -i '19s/.*/log4j.rootCategory={}, console /' {}conf/log4j.properties\".format(c.LOG_LEVEL,\n c.SPARK_HOME))\n if c.KILL_JAVA:\n print(\" Killing Java\")\n ssh_client.run('sudo killall java && sudo killall java && sudo killall java')\n\n print(\" Kill SAR CPU Logger\")\n ssh_client.run(\"screen -ls | grep Detached | cut -d. -f1 | awk '{print $1}' | xargs -r kill\")\n\n if c.SYNC_TIME:\n print(\" SYNC TIME\")\n ssh_client.run(\"sudo ntpdate -s time.nist.gov\")\n\n print(\" Removing Stopped Docker\")\n ssh_client.run(\"docker ps -a | awk '{print $1}' | xargs --no-run-if-empty docker rm\")", "def spark():\n return SparkSession.builder.master(\"local\").appName(\"tests\").getOrCreate()", "def spark_session(request):\n def fin():\n \"\"\"Clean up.\n \"\"\"\n spark.stop()\n request.addfinalizer(fin)\n\n spark = ps.SparkSession.builder.master('local')\\\n .appName('Spark Tute PyTest')\\\n .config('spark.executor.memory', '2g')\\\n .config('spark.executor.cores', '2')\\\n .config('spark.cores.max', '10')\\\n .config('spark.ui.port', '4050')\\\n .config('spark.logConf', True)\\\n .config('spark.debug.maxToStringFields', 100)\\\n .getOrCreate()\n\n return spark", "def spark_setup(self):\n # Update the global variables for config details\n globals()[\"spark_token\"] = self.spark_bot_token\n globals()[\"bot_email\"] = self.spark_bot_email\n\n sys.stderr.write(\"Spark Bot Email: \" + self.spark_bot_email + \"\\n\")\n sys.stderr.write(\"Spark Token: REDACTED\\n\")\n\n # Setup the Spark Connection\n globals()[\"spark\"] = CiscoSparkAPI(access_token=self.spark_bot_token)\n globals()[\"webhook\"] = self.setup_webhook(self.spark_bot_name,\n self.spark_bot_url)\n sys.stderr.write(\"Configuring Webhook. \\n\")\n sys.stderr.write(\"Webhook ID: \" + globals()[\"webhook\"].id + \"\\n\")", "def spark_config_set(is_spark_submit):\n if is_spark_submit:\n global sc, sqlContext\n sc = SparkContext()\n sqlContext = HiveContext(sc)", "def create_sparksession():\n return SparkSession.builder.\\\n appName(\"Transforming the historical parking occupancy and blockface datasets\").\\\n getOrCreate()", "def test_ec2_up(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', ec2['server'].id])\n assert result.exit_code == 0", "def main():\n\n print(\"Initiating Spark session...\")\n print('-' * 50)\n spark = create_spark_session()\n \n # Use these settings if you want to test on the full\n # dataset, but it takes a LONG time.\n song_input_data = config['AWS']['SONG_DATA']\n log_input_data = config['AWS']['LOG_DATA']\n \n # Uncomment the two lines if you want to test on\n # minimal data\n #song_input_data = config['AWS']['SINGLE_SONG_DATA']\n #log_input_data = config['AWS']['SINGLE_LOG_DATA']\n \n output_data = config['AWS']['OUTPUT_DATA']\n \n print('-' * 50)\n print(\"Processing song data...\")\n print('-' * 50)\n print('')\n process_song_data(spark, song_input_data, output_data)\n \n print('-' * 50) \n print(\"Processing log data...\")\n print('-' * 50)\n print('')\n process_log_data(spark, song_input_data, log_input_data, output_data)", "def up(image):\n ovpn_file_queue = vpn_file_queue('./VPN')\n ovpn_file_count = len(list(ovpn_file_queue.queue))\n port_range = range(START_PORT, START_PORT + ovpn_file_count)\n write_haproxy_conf(port_range)\n write_proxychains_conf(port_range)\n start_containers(image, ovpn_file_queue, port_range)", "def cli():\n # Configuration\n AppConfig()\n\n # Parse the cli arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('standard_data_path', help='path to the standard data directory')\n parser.add_argument('queue', help='job queue')\n parser.add_argument('--app-name', help='spark application name which must contain the application prd',\n default='gmt00-diaman-ai')\n parser.add_argument('--driver-mem', help='amount of memory to use for the driver process',\n default='4g')\n parser.add_argument('--driver-cores', help='number of cores to use for the driver process',\n default=1)\n parser.add_argument('--executor-mem', help='amount of memory to use per executor process',\n default='8g')\n parser.add_argument('--executor-cores', help='number of cores to use on each executor',\n default=4)\n parser.add_argument('--min-executors', help='minimum number of executors to run if dynamic allocation is enabled',\n default=4)\n parser.add_argument('--max-executors', help='maximum number of executors to run if dynamic allocation is enabled',\n default=12)\n parser.add_argument('--ini-executors', help='initial number of executors to run if dynamic allocation is enabled',\n default=4)\n args = parser.parse_args()\n\n # Instantiate spark\n _, spark_session = spark_config.get_spark(app_name=args.app_name,\n queue=args.queue,\n driver_mem=args.driver_mem,\n driver_cores=args.driver_cores,\n executor_mem=args.executor_mem,\n executor_cores=args.executor_cores,\n min_executors=args.min_executors,\n max_executors=args.max_executors,\n ini_executors=args.ini_executors)\n\n # Run the train pipeline\n train_pipeline.run(spark_session, args.standard_data_path)", "def setUpClass(cls):\n \n logging.info(\"Logging from within setup\")\n cls.spark=SparkSession \\\n .builder \\\n .appName(\"sampleTest\") \\\n .master(\"local\") \\\n .getOrCreate()\n cls.spark.sparkContext.setLogLevel(\"ERROR\")", "def setup_kubernetes_version(skuba, kubernetes_version=None):\n\n skuba.cluster_init(kubernetes_version)\n skuba.node_bootstrap()\n skuba.node_join(role=\"worker\", nr=0)", "def create_spark_session(self):\n\n spark_jar_path = os.getenv(\"SPARK_JARS_PATH\")\n spark_jars = [os.path.join(spark_jar_path, jars) for jars in os.listdir(spark_jar_path)] \n\n self.spark = SparkSession\\\n .builder\\\n .config(\"spark.jars\", \",\".join(spark_jars))\\\n .appName(appname)\\\n .getOrCreate()", "def spark(tmp_path_factory, app_name=\"Sample\", url=\"local[*]\"):\n\n with TemporaryDirectory(dir=tmp_path_factory.getbasetemp()) as td:\n config = {\n \"spark.local.dir\": td,\n \"spark.sql.shuffle.partitions\": 1,\n \"spark.sql.crossJoin.enabled\": \"true\",\n }\n spark = start_or_get_spark(app_name=app_name, url=url, config=config)\n yield spark\n spark.stop()", "def add_spark(self,node):\n import os\n import json\n from urllib.request import urlopen\n import ssl\n if \"SPARK_ENV_LOADED\" not in os.environ:\n return # no Spark\n\n spark = ET.SubElement(node, 'spark')\n try:\n import requests\n import urllib3\n urllib3.disable_warnings()\n except ImportError:\n ET.SubElement(spark,'error').text = \"SPARK_ENV_LOADED present but requests module not available\"\n return \n\n host = 'localhost'\n p1 = 4040\n p2 = 4050\n import urllib.error\n for port in range(p1,p2+1):\n try:\n url = 'http://{}:{}/api/v1/applications/'.format(host,port)\n resp = urlopen(url, context=ssl._create_unverified_context())\n spark_data = resp.read()\n break\n except (ConnectionError, ConnectionRefusedError, urllib.error.URLError) as e:\n continue\n if port>=p2:\n ET.SubElement(spark,'error').text = f\"SPARK_ENV_LOADED present but no listener on {host} ports {p1}-{p2}\"\n return\n\n # Looks like we have Spark!\n for app in json.loads(spark_data):\n app_id = app['id']\n app_name = app['name']\n e = ET.SubElement(spark,'application',{'id':app_id,'name':app_name})\n\n attempt_count = 1\n for attempt in app['attempts']:\n e = ET.SubElement(spark,'attempt')\n json_to_xml(e,attempt)\n for param in ['jobs','allexecutors','storage/rdd']:\n url = f'http://{host}:{port}/api/v1/applications/{app_id}/{param}'\n resp = urlopen(url, context=ssl._create_unverified_context())\n data = resp.read()\n e = ET.SubElement(spark,param.replace(\"/\",\"_\"))\n json_to_xml(e,json.loads(data))", "def up(vm, env=''):\n local( main_dir + '/vagrant/bin/vm.sh up ' + str(vm) + ' ' + str(env) )", "def sshtest():\n vbox = Vbox(env.vm_name)\n print vbox.ssh_up", "def spark():\n\n quiet_log4j()\n\n builder = (\n SparkSession.builder\n .master(\"local[2]\")\n .appName(\"pytest-pyspark-local-testing\")\n # By default spark will shuffle to 200 partitions, which is\n # way too many for our small test cases. This cuts execution\n # time of the tests in half.\n .config('spark.sql.shuffle.partitions', 4)\n )\n if 'XDG_CACHE_HOME' in os.environ:\n builder.config('spark.jars.ivy', os.path.join(os.environ['XDG_CACHE_HOME'], 'ivy2'))\n\n with builder.getOrCreate() as spark:\n yield spark", "def handle_hup(self):\n pass", "def startUp(self):\n pass", "def Instance():\n if not Spark._active_instance:\n Spark._active_instance = Spark()\n return Spark._active_instance", "def setup(ctx, cluster_url):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"cluster setup subcommand\")", "def setup(self, stage: Optional[str] = None) -> None:", "def create_spark_session():\n \n print(\"Create Spark Session\")\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark\n print(\"Spark Session Created\")", "def create_spark_session():\n \n spark = SparkSession.builder\\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\")\\\n .getOrCreate()\n \n return spark", "def scale_up_application(asg_name):\n if_verbose(\"Scaling up %s in steps of %d\" % (asg_name, args.instance_count_step))\n current_capacity_count = args.instance_count_step\n while(True):\n check_error(scale_up_autoscaling_group(asg_name, current_capacity_count))\n check_error(check_autoscaling_group_health(asg_name, current_capacity_count))\n\n if args.elb_name:\n asg_instances = [{\"InstanceId\": a[\"InstanceId\"]} for a in asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]]\n check_error(check_elb_instance_health(args.elb_name, asg_instances))\n\n if args.instance_count == current_capacity_count:\n break\n else:\n current_capacity_count += args.instance_count_step\n else:\n break\n\n if_verbose(\"Scaling up %s successful\" % asg_name)", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID']) \\\n .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY']) \\\n .enableHiveSupport().getOrCreate()\n \n return spark", "def _bootup_node(self, conn):\n compose_fname = COMPOSE_FNAME\n exec_plan = self.node_exec_plan.copy()\n while len(exec_plan) > 0:\n container_name = exec_plan.popleft()\n self.__bootup_service(conn, compose_fname, container_name)", "def create_spark_session():\n try:\n spark = (\n SparkSession.builder\n .config(\"spark.jars.packages\", os.environ['SAS_JAR'])\n # .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n .enableHiveSupport()\n .getOrCreate()\n )\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.impl\",\"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n # spark._jsc.hadoopConfiguration().set(\"com.amazonaws.services.s3.enableV4\", \"true\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.aws.credentials.provider\",\"org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.endpoint\", \"s3.amazonaws.com\")\n except Exception as e:\n logger.error('Pyspark session failed to be created...')\n raise\n return spark", "def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)", "def spinUp(self,\n btstrap_loc='s3://ddapi.data/ddapp_emr_bootstrap.sh',\n mstr_cnt=1,\n mstr_mkt='ON_DEMAND',\n slave_cnt=2,\n slave_mkt='ON_DEMAND',\n ):\n\n self.btstrap_loc = btstrap_loc\n logging.info(f'starting to spin up emr cluster from emr client: {self.emr_client}')\n response = self.emr_client.run_job_flow(\n Name=self.clustername,\n LogUri=self.logpath,\n ReleaseLabel='emr-5.23.0',\n Instances={\n 'InstanceGroups': [\n {\n 'Name': \"Master nodes\",\n 'Market': mstr_mkt,\n 'InstanceRole': 'MASTER',\n 'InstanceType': 'm4.large',\n 'InstanceCount': mstr_cnt,\n },\n {\n 'Name': \"Slave nodes\",\n 'Market': slave_mkt,\n 'InstanceRole': 'CORE',\n 'InstanceType': 'm4.large',\n 'InstanceCount': slave_cnt,\n }\n ],\n 'Ec2KeyName': self.key,\n 'KeepJobFlowAliveWhenNoSteps': True,\n 'TerminationProtected': False,\n # 'Ec2SubnetId': 'string',\n },\n Applications=[\n {'Name': 'Hadoop'},\n {'Name': 'Spark'}\n ],\n BootstrapActions=[\n {\n 'Name': 'bootstrap requirements',\n 'ScriptBootstrapAction': {\n 'Path': btstrap_loc,\n }\n },\n ],\n VisibleToAllUsers=True,\n JobFlowRole='EMR_EC2_DefaultRole',\n ServiceRole='EMR_DefaultRole',\n Configurations=[\n {\n 'Classification': 'spark-env',\n 'Configurations': [\n {\n 'Classification': 'export',\n 'Properties': {\n 'PYSPARK_PYTHON': '/usr/bin/python3',\n 'PYSPARK_DRIVER_PYTHON': '/usr/bin/python3'\n }\n }\n ]\n },\n {\n 'Classification': 'spark-defaults',\n 'Properties': {\n 'spark.sql.execution.arrow.enabled': 'true'\n }\n },\n {\n 'Classification': 'spark',\n 'Properties': {\n 'maximizeResourceAllocation': 'true'\n }\n }\n ],\n )\n logging.info(f'spinning up emr cluster from emr client: {self.emr_client}')\n self.job_flow_id = response['JobFlowId']\n logging.info(f'job flow id {self.emr_client} logged')\n\n # get cluster id\n resp = self.emr_client.list_clusters()\n clus = resp['Clusters'][0]\n self.clusID = clus['Id']\n\n # don't forget to tip the waiter\n logging.info(f'start waiter')\n create_waiter = self.emr_client.get_waiter('cluster_running')\n try:\n create_waiter.wait(ClusterId=self.clusID,\n WaiterConfig={\n 'Delay': 15,\n 'MaxAttempts': 480\n })\n\n except WaiterError as e:\n if 'Max attempts exceeded' in e.message:\n print('EMR Cluster did not finish spinning up in two hours')\n else:\n print(e.message)", "def main():\n session, cluster = create_database()\n \n drop_tables(session)\n create_tables(session)\n\n session.shutdown()\n cluster.shutdown()", "def start_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Starting up a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"start\"])\n popdir()", "def whenup(sourcename) :\n return s.whenUp(sourcename)", "def _environment(self):\n\n self.spark_home = self._config_default(\"spark-home\",\n self._context(SparkSubmit.SPARK_HOME, default = os.environ.get(SparkSubmit.SPARK_HOME,None)))\n assert self.spark_home, \"unable to detect SPARK_HOME. set SPARK_HOME as directed in the task documentation\"\n assert os.path.exists(self.spark_home), \"provided SPARK_HOME doesn't exists\"\n\n spark_config = {'cluster-config': {}, 'other-config': {}}\n if 'config-file' in self._config_keys():\n spark_config.update(yaml.load(open(self._config('config-file')))['spark-config'])\n\n self.app_config = []\n\n spark_app = self._config('app-config')\n self.app_config.append(spark_app['application'])\n app_params = SparkSubmit._flat_node_to_cmd_line_args(spark_app['params']) if 'params' in spark_app else []\n self.app_config.extend(app_params)\n if 'resources' in spark_app:\n resources = [ ['--%s' % item] + (spark_app['resources'][item]) for item in spark_app['resources'].keys() ]\n self.resources = list(itertools.chain(*resources))\n else:\n self.resources = []\n\n\n cluster_config = self._config_default('cluster-config', {})\n cluster_config.update(spark_config['cluster-config'])\n self.cluster_options = list(itertools.chain(*[ ['--%s' % item, str(cluster_config[item]) ] for item in cluster_config.keys() ]))\n\n\n ##other options\n ## cluster options\n other_options = self._config_default('other-config',{})\n cluster_config.update(spark_config['other-config'])\n self.other_options = list(itertools.chain(*[ ['--%s' % item, str(other_options[item]) ] for item in other_options.keys() ]))", "def test_slurm_xsede_supermic_spark(self):\n\n # Set environment variables\n os.environ['SLURM_NODELIST'] = 'nodes[1-2]'\n os.environ['SLURM_NPROCS'] = '24'\n os.environ['SLURM_NNODES'] = '2'\n os.environ['SLURM_CPUS_ON_NODE'] = '24'\n\n # Run component with desired configuration\n self.component._cfg = self.cfg_xsede_supermic_spark\n self.component._configure()\n\n # Verify configured correctly\n self.assertEqual(self.component.cores_per_node, 20)\n self.assertEqual(self.component.gpus_per_node, 0)\n self.assertEqual(self.component.lfs_per_node['path'], \"/var/scratch/\")\n self.assertEqual(self.component.lfs_per_node['size'], 200496)\n self.assertEqual(self.component.lm_info['cores_per_node'], 20)\n\n return", "def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance", "def _work(self):\n command = [ os.path.join(self.spark_home,'bin/spark-submit') ] + self.cluster_options + self.other_options + \\\n self.resources + self.app_config\n\n sideput.sideput( \"spark submit command is %s\" % ' '.join(command) )\n\n with sideput.Timing(\"spark job completed in %d seconds\"):\n result, stdout, stderr = os_util.execute_command(command, do_sideput=True)\n\n sideput.sideput(\"[%s] stderr:\\n%s\" % (self.name(), stderr), level=\"INFO\")\n sideput.sideput(\"[%s] stdout:\\n%s\" % (self.name(), stdout), level=\"INFO\")\n if result != 0:\n raise Exception(\"spark job failed with code %d\" % result)\n else:\n try:\n result_hash = yaml_util.load(stdout) if self._emits() else {}\n sideput.sideput(\"parsed stdout is %s\\n\" % result_hash, level=\"INFO\")\n except Exception as e:\n result_hash = {}\n sideput.sideput(\"parsing stdout as json failed with message %s \\n\" % e.message , level= \"ERROR\")\n sideput.sideput(\"stdout is \\n %s \\n\" % stdout, level=\"ERROR\")\n raise e\n sideput.sideput(\"[%s] spark job completed successfully\"\n % self.name(), level = \"INFO\")\n return result_hash", "def do_start(self,processor):\n # app_logger = self.construct_logger(rta_constants.PROPERTIES_LOG_FILE)\n running_dict = {}\n for item in self.get_running_status():\n running_dict[item.get('processor')]=item.get('status')\n\n if processor == 'spark':\n if running_dict:\n if running_dict['spark<spark_worker>'] != 'Running' and running_dict['spark<spark_master>'] != 'Running':\n try:\n cmd_line = self.cmd_start_spark\n cmd = subprocess.Popen([cmd_line],shell=True,stdout=subprocess.PIPE)\n (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['spark<spark_worker>'] == 'Running' or running_dict['spark<spark_master>'] == 'Running':\n print('Spark Server is running!! please trying to stop it before it starts.')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n return\n\n elif processor == 'tomcat':\n if running_dict.has_key('tomcat') and running_dict['tomcat'] != 'Running':\n try:\n cmd_line = self.cmd_start_tomcat\n # print('staring tomcat server------->')\n print cmd_line\n\n # 2311 Vpl update to fix problem of catalina shutdown when term exit (10.x timeout)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n #print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('tomcat'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Tomcat Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'HDFS':\n #1/5/2017 Commit by JOJO\n '''\n if running_dict.has_key('HDFS') and running_dict['HDFS'] != 'Running':\n try:\n cmd_line = self.cmd_start_hadoop_hdfs\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('HDFS has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('HDFS'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('HDFS server is running!! please trying to stop it before it start.')\n return\n '''\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif processor == 'web_management':\n if running_dict.has_key('web_management') and running_dict['web_management'] != 'Running':\n try:\n cmd_line = 'python '+self.cmd_start_web_management\n print('starting web_management webserver------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n (output,err) = cmd.communicate()\n print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('web_management webserver has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('web_management'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Flask webserver is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'novelty':\n if running_dict.has_key('novelty') and running_dict['novelty'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_novelty_detector\n # print('staring novelty------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('novelty has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('novelty'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['novelty'] == 'Running':\n print('novelty processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'raw_writer':\n if running_dict.has_key('raw_writer') and running_dict['raw_writer'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_raw_writer\n # print('staring raw_writer------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n print('raw_writer has been started!')\n return\n\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('raw_writer'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['raw_writer'] == 'Running':\n print('raw_writer processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'cassandra':\n if running_dict.has_key('cassandra') and running_dict['cassandra'] != 'Running':\n try:\n cmd_line = self.cmd_start_cassandra\n # print('starting cassandra------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of cassandra shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('cassandra has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('cassandra'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('cassandra Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'kairosDb':\n if running_dict.has_key('kairosDb') and running_dict['kairosDb'] != 'Running' and running_dict['cassandra']=='Running':\n try:\n cmd_line = self.cmd_start_kairosDB\n # print('staring kairosDB------->')\n\n # print cmd_line\n\t\t\t\t\t#2311 Vpl update to fix problem of kairosDb shutdown when term exit (10.x timeout)\n\t\t\t\t\t#cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('kairosDb has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kairosDb'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['cassandra']=='Stopped':\n print('cassandra required starting before kairosDb is running!! please trying to \"start cassandra\" first')\n return\n elif running_dict['kairosDB'] == 'Running':\n print('kairosDB Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'grafana':\n if running_dict.has_key('grafana') and running_dict['grafana'] != 'Running' and running_dict['kairosDb']=='Running':\n try:\n cmd_line = self.cmd_start_grafana\n # print('staring grafana------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('grafana has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('grafana'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['kairosDb']=='Stopped':\n print('kairosDb required starting before grafana is running!! please trying to \"start kairoseDb\" first')\n return\n elif running_dict['grafana'] == 'Running':\n print('grafana Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'kafka':\n if running_dict.has_key('kafka') and running_dict['kafka'] != 'Running' and running_dict['zookeeper']=='Running':\n try:\n cmd_line = self.cmd_start_kafka\n print('starting kafka------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n print ('kafka has been started!')\n return\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kafka'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['zookeeper']=='Stopped':\n print('zookeeper required starting before kafka is running!! please trying to \"start zookeeper\" first')\n return\n elif running_dict['kafka'] == 'Running':\n print('Kafka Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'zookeeper':\n if running_dict.has_key('zookeeper') and running_dict['zookeeper'] != 'Running':\n try:\n cmd_line = self.cmd_start_zookeeper\n # print('staring zookeeper------->')\n # print (cmd_line)\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n\n print('zookeeper has been started!')\n return\n except Exception as ex:\n print(\" Failed to stop processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('zookeeper'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Zookeeper Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'accl_processor':\n if running_dict:\n if running_dict['accl_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_accl_processor\n print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #cmd = subprocess.Popen(['nohup',cmd_line])\n # cmd = subprocess.Popen(cmd_line)\n\n print ('Accelerometer processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['accl_processor'] == 'Running':\n print('Accelerometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'baro_processor':\n if running_dict:\n if running_dict['baro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_baro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Barometer processor has been started')\n\t\t\tprint (cmd_line)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['baro_processor'] == 'Running':\n print('Barometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'gyro_processor':\n if running_dict:\n if running_dict['gyro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_gyro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Gyroscope processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['gyro_processor'] == 'Running':\n print('Gyroscope processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'aggr_processor':\n if running_dict:\n if running_dict['aggr_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_aggr_naiv\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Aggregator processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['aggr_processor'] == 'Running':\n print('Aggregator processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print ('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n else:\n print ('Please type correct command! You may use \"help start\" see more help')", "def get_spark_i_know_what_i_am_doing():\n return _spark", "def start_up(self, velocity=VELOCITY):\n action = StartUp(velocity=velocity)\n self._velocity_control_client(pickle.dumps(action))", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend\"\n output_data = \"s3a://vivek1bucket\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def create_spark_session():\n spark = SparkSession\\\n .builder\\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\")\\\n .getOrCreate()\n # print (spark.sparkContext.getConf().getAll)\n return spark", "def __init__(self,env,config_file):\n #load all the properties\n self.properties = util.load_application_properties(env, config_file)\n self.cassandra_server = self.properties[\"cassandra.host.name\"]\n self.cassandra_trip_table = self.properties[\"cassandra.trip_data_table\"]\n self.cassandra_stats_table = self.properties[\"cassandra.trip_stats_table\"]\n self.cassandra_keyspace = self.properties[\"cassandra.trip.keyspace\"]\n self.spark_master = self.properties[\"spark.master\"]\n self.s3_url=self.properties[\"batch_s3_url\"]\n\n #initialize SparkConf and SparkContext along with cassandra settings\n self.conf = SparkConf().setAppName(\"trip\").set(\"spark.cassandra.connection.host\",self.cassandra_server)\n self.sc = SparkContext(conf=self.conf)\n self.sqlContext = SQLContext(self.sc)", "def create_spark_session():\n try:\n spark = (\n SparkSession.builder\n # .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n .enableHiveSupport()\n .getOrCreate()\n )\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.impl\",\"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n # spark._jsc.hadoopConfiguration().set(\"com.amazonaws.services.s3.enableV4\", \"true\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.aws.credentials.provider\",\"org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.endpoint\", \"s3.amazonaws.com\")\n except Exception as e:\n logger.error('Pyspark session failed to be created...')\n raise\n return spark", "def setUpClass(cls):\n GlusterBaseClass.setUpClass.im_func(cls)\n # Create and start volume\n g.log.info(\"Starting volume setup process %s\", cls.volname)\n ret = cls.setup_volume()\n if not ret:\n raise ExecutionError(\"Failed to setup \"\n \"and start volume %s\" % cls.volname)\n g.log.info(\"Successfully created and started the volume: %s\",\n cls.volname)", "def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True", "def main():\n # Initiate Spark Session\n spark = create_spark_session()\n \n # Data files\n # Root Data Path\n # Uncomment below line for AWS S3\n #input_data = \"s3a://udacity-dend\"\n # Uncomment below line for local files\n input_data = \"data\"\n\n # Warehouse\n # Root WH\n # Uncomment below line for AWS S3\n #output_data = \"s3a://jerryespn-project-out\"\n # Uncomment below line for local files\n output_data = \"spark-warehouse\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def test_pyspark(container):\n c = container.run(\n tty=True,\n command=['start.sh', 'python', '-c', 'import pyspark']\n )\n rv = c.wait(timeout=30)\n assert rv == 0 or rv[\"StatusCode\"] == 0, \"pyspark not in PYTHONPATH\"\n logs = c.logs(stdout=True).decode('utf-8')\n LOGGER.debug(logs)", "def instantiate(cls, spark):\n logger = ProcessLog().getLogger()\n return cls(spark, logger)", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def launch_spot():\n ec2 = boto3.client('ec2')\n ec2r = boto3.resource('ec2')\n ec2spec = dict(ImageId=AMI,\n KeyName = KeyName,\n SecurityGroupIds = [SecurityGroupId, ],\n InstanceType = \"p2.xlarge\",\n Monitoring = {'Enabled': True,},\n IamInstanceProfile = IAM_ROLE)\n output = ec2.request_spot_instances(DryRun=False,\n SpotPrice=\"0.4\",\n InstanceCount=1,\n LaunchSpecification = ec2spec)\n spot_request_id = output[u'SpotInstanceRequests'][0][u'SpotInstanceRequestId']\n logging.info(\"instance requested\")\n time.sleep(30)\n waiter = ec2.get_waiter('spot_instance_request_fulfilled')\n waiter.wait(SpotInstanceRequestIds=[spot_request_id,])\n instance_id = get_status(ec2, spot_request_id)\n while instance_id is None:\n time.sleep(30)\n instance_id = get_status(ec2,spot_request_id)\n instance = ec2r.Instance(instance_id)\n with open(\"host\",'w') as out:\n out.write(instance.public_ip_address)\n logging.info(\"instance allocated\")\n time.sleep(10) # wait while the instance starts\n env.hosts = [instance.public_ip_address,]\n fh = open(\"connect.sh\", 'w')\n fh.write(\"#!/bin/bash\\n\" + \"ssh -i \" + env.key_filename + \" \" + env.user + \"@\" + env.hosts[0] + \"\\n\")\n fh.close()\n local(\"fab deploy_ec2\") # this forces fab to set new env.hosts correctly", "def setup(self, cluster):\n raise NotImplementedError()", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def run_instance():\n data = check_args(\n ('cloudProvider', 'apiKey', 'secretKey', 'packageName', 'OS',\n 'sgPorts')\n )\n job = jobs.deploy.apply_async(args=(data,))\n current_user.add_job(job.id)\n return make_response(job_id=job.id)", "def _deploy_instance(self):\n if not os.path.exists(self.instance_path):\n pw = pwd.getpwnam(self.user)\n mode = (\n stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |\n stat.S_IROTH | stat.S_IXOTH)\n utils.mkdir(self.instance_path, mode, pw[2], pw[3])\n path = \"{}/src/automx_wsgi.py\".format(self.repo_dir)\n utils.exec_cmd(\"cp {} {}\".format(path, self.instance_path),\n sudo_user=self.user, cwd=self.home_dir)", "def launch_instance(cloud):\n js = _get_jetstream_conn()\n\n sgs = ['CloudLaunchDefault']\n kp_name = \"cloudman_key_pair\"\n inst_size = 'm1.small'\n network_id = '86a1c3e8-b1fb-41f3-bcaf-8334567fe989'\n lc = js.compute.instances.create_launch_config()\n lc.add_network_interface(network_id)\n\n img_id = '2cf07e4a-62a8-41c2-9282-f3c53962f296' # Gxy Standalone 161021b01\n name = 'ea-galaxy-{0}'.format(strftime(\"%m-%d-%H-%M\", localtime()))\n\n i = js.compute.instances.create(\n name, img_id, inst_size, security_groups=sgs, launch_config=lc,\n key_pair=kp_name)\n return i", "def make_test_instance(branchname, instance_name=\"schedule\"):\n if not instance_name:\n instance_name = branchname\n instance_dir = env.site_root + instance_name\n if not exists(instance_dir):\n with cd(env.site_root):\n run('git clone %s %s' % (env.repo_url, instance_name))\n with cd(instance_dir):\n run('git checkout %s' % branchname)\n else:\n with cd(instance_dir):\n run(\"git pull\")\n\n bootstrap(instance_name, 'test')\n\n upstart_conf_templ = os.path.join(instance_dir, 'example', 'conf', 'upstart-test.conf.template')\n upstart_conf = os.path.join(instance_dir, 'example', 'conf', 'upstart-test.conf')\n if not exists(upstart_conf):\n run('cp %s %s' % (upstart_conf_templ, upstart_conf))\n sed(upstart_conf, '\\\\{branchname\\\\}', instance_name)\n upstart_link = \"/etc/init/%s.conf\" % instance_name\n if not exists(upstart_link):\n sudo('ln -s %s %s' % (upstart_conf, upstart_link))\n sudo('initctl reload-configuration')\n sudo('start %s' % instance_name)\n\n apache_config_templ = os.path.join(instance_dir, 'example', 'conf', 'nginx-test.conf.template')\n apache_config = os.path.join(instance_dir, 'example', 'conf', 'nginx-test.conf')\n if not exists(apache_config):\n run('cp %s %s' % (apache_config_templ, apache_config))\n sed(apache_config, '\\\\{branchname\\\\}', instance_name)\n apache_name = '/etc/nginx/sites-available/%s' % instance_name\n if not exists(apache_name):\n sudo('ln -s %s %s' % (apache_config, apache_name))\n sudo('nxensite %s' % instance_name)\n sudo('mkdir -p %s%s/media/static' % (env.site_root, instance_name))\n sudo('chgrp -R www-data %s%s/media/static' % (env.site_root, instance_name))\n sudo('chmod -R g+w %s%s/media/static' % (env.site_root, instance_name))\n sudo('/etc/init.d/nginx reload')", "def main():\n # start Spark application and get Spark session, logger and config\n spark = SparkSession \\\n .builder \\\n .appName(\"PokemonBasicETLOperations\") \\\n .config(\"spark.eventLog.enabled\", True) \\\n .enableHiveSupport() \\\n .getOrCreate()\n\n print('PokemonBasicETLOperations ETL is up-and-running')\n \n # execute ETL pipeline\n pokemon = extract(spark)\n max_attack_per_type,agg_legend_poke,special_criteria_poke = transform(pokemon)\n load(max_attack_per_type,agg_legend_poke,special_criteria_poke)\n\n print('PokemonBasicETLOperations ETL job is finished')\n spark.stop()\n return None", "def start_instance(InstanceId=None):\n pass", "def ec2_start(resource, metadata):\n\n # do minimal provisioning of machine through cloud-init\n # this installs git and bootstraps puppet to provision the rest\n # requires recent ubuntu (14.04/16.04) or RHEL/CentOS 7\n userdata = \"\"\"#cloud-config\npackage_update: true\nhostname: {hostname}\nfqdn: {fqdn}\nmanage_etc_hosts: true\npackages:\n - git\nwrite_files:\n - path: /etc/facter/facts.d/hostgroup.txt\n content: hostgroup=aws\n - path: /etc/facter/facts.d/role.txt\n content: role={role}\nruncmd:\n - git clone {repo} /etc/puppet\n - /etc/puppet/support_scripts/bootstrap-puppet.sh\"\"\".format(\n hostname=metadata['hostname'], fqdn=metadata['fqdn'],\n role=metadata['role'], repo=metadata['repo'])\n\n instances = resource.create_instances(\n ImageId=metadata['ami'],\n MinCount=1,\n MaxCount=1,\n InstanceType=metadata['type'],\n SubnetId=metadata['subnet'],\n SecurityGroupIds=[metadata['secgroup']],\n KeyName=metadata['keypair'],\n UserData=userdata,\n BlockDeviceMappings=[\n {\n 'DeviceName': '/dev/sda1', # root so far, sometimes /dev/xvdh ?\n 'Ebs': {\n 'VolumeSize': 20,\n 'DeleteOnTermination': True,\n 'VolumeType': 'gp2'\n },\n },\n ]\n )\n\n # not sure if we really need to sleep before tagging but\n # we wait until running anyway which takes much longer than 1 second\n time.sleep(1)\n for instance in instances:\n # first set tags, Name and Role\n instance.create_tags(\n Resources=[instance.id],\n Tags=[\n {\n 'Key': 'Role',\n 'Value': metadata['role']\n },\n {\n 'Key': 'Name',\n 'Value': metadata['fqdn']\n },\n ]\n )\n\n # ensure system is running before we print address to connect to\n instance.wait_until_running()\n # instance.load()\n ec2_status(resource, metadata)", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def dvs_multiple_uplinks_active(self):\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(1)\n self.show_step(2)\n plugin.install_dvs_plugin(self.ssh_manager.admin_ip)\n\n self.show_step(3)\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=DEPLOYMENT_MODE,\n settings={\n \"net_provider\": 'neutron',\n \"net_segment_type\": NEUTRON_SEGMENT_TYPE\n }\n )\n self.show_step(4)\n self.show_step(5)\n self.show_step(6)\n self.show_step(7)\n self.fuel_web.update_nodes(cluster_id,\n {'slave-01': ['controller'],\n 'slave-02': ['compute-vmware'],\n 'slave-03': ['compute'],\n 'slave-04': ['compute']})\n\n self.show_step(8)\n self.show_step(9)\n self.fuel_web.vcenter_configure(\n cluster_id,\n target_node_2=self.node_name('slave-02'),\n multiclusters=True)\n\n self.show_step(10)\n plugin.enable_plugin(cluster_id, self.fuel_web, au=3, su=0)\n\n self.show_step(11)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(12)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(13)\n self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke'])", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\") \\\n .getOrCreate()\n return spark", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def getSparkContext():\n conf = (SparkConf()\n .setMaster(\"local\") # run on local\n .setAppName(\"Logistic Regression\") # Name of App\n .set(\"spark.executor.memory\", \"1g\")) # Set 1 gig of memory\n sc = SparkContext(conf = conf) \n return sc", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def __init__(self, sparkContext, minPartitions=None):\n from thunder.utils.aws import AWSCredentials\n self.sc = sparkContext\n self.minPartitions = minPartitions\n self.awsCredentialsOverride = AWSCredentials.fromContext(sparkContext)", "def create_spark_session() -> SparkSession:\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\") \\\n .getOrCreate()\n return spark" ]
[ "0.7255299", "0.5885112", "0.57854474", "0.5732422", "0.5728568", "0.57188094", "0.5686993", "0.56795335", "0.55460984", "0.54442745", "0.54389185", "0.54236734", "0.5411601", "0.5410782", "0.539766", "0.53900146", "0.5368531", "0.53666943", "0.53366995", "0.53314143", "0.52853173", "0.52692574", "0.5235823", "0.5229087", "0.52060443", "0.5203687", "0.51997", "0.5195361", "0.51463693", "0.5127799", "0.51164204", "0.5099871", "0.5095755", "0.50892156", "0.5072066", "0.5059149", "0.50531006", "0.5049627", "0.5048304", "0.5027425", "0.5017536", "0.5012346", "0.5001642", "0.4999611", "0.49954787", "0.4963975", "0.49606976", "0.49390978", "0.4937356", "0.49355713", "0.493246", "0.4929713", "0.49250352", "0.4923733", "0.4916192", "0.49137157", "0.49037406", "0.4901357", "0.4896235", "0.48905668", "0.48833445", "0.48766047", "0.48766047", "0.48766047", "0.48766047", "0.48752612", "0.48684382", "0.48640826", "0.4856041", "0.48556623", "0.48512846", "0.48511493", "0.48325047", "0.48319644", "0.48235676", "0.48233774", "0.48213205", "0.4805509", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.4802022", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.47972357", "0.47959027" ]
0.0
-1
spark up an instance
def __init__(self): OWSReport.__init__(self) self.stats['type'] = 'OGC:WCS' self.stats['operations']['GetCoverage'] = {} self.stats['operations']['GetCoverage']['hits'] = 0 self.stats['operations']['GetCoverage']['resource'] = {} self.stats['operations']['GetCoverage']['resource']['param'] = 'coverage' self.stats['operations']['GetCoverage']['resource']['list'] = {} self.stats['operations']['DescribeCoverage'] = {} self.stats['operations']['DescribeCoverage']['hits'] = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n spark_it_up()", "def spark(self, *args, **kwargs):\n self.spark_submit(*args, **kwargs)", "def up(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"up --build\"\n\n if remote:\n command = f\"{command} --detach\"\n\n run_command_with_services(context, user, remote, instance, stack, command, services)", "def up(self, connection):\n raise NotImplementedError", "def dev_up():\n _with_deploy_env(['./bin/develop up'])", "def setUp(self):\n self.spark, self.log, self.config = start_spark(app_name = \"test_etl_job\",\n files='configs/etl_config.json')", "def up(self, arguments):\n gui = arguments['--gui']\n save = not arguments['--no-cache']\n requests_kwargs = utils.get_requests_kwargs(arguments)\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n utils.index_active_instance(instance_name)\n\n vmx = utils.init_box(self.box_name, self.box_version, requests_kwargs=requests_kwargs, save=save)\n vmrun = VMrun(vmx, user=self.user, password=self.password)\n puts_err(colored.blue(\"Bringing machine up...\"))\n started = vmrun.start(gui=gui)\n if started is None:\n puts_err(colored.red(\"VM not started\"))\n else:\n time.sleep(3)\n puts_err(colored.blue(\"Getting IP address...\"))\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(lookup=lookup)\n puts_err(colored.blue(\"Sharing current folder...\"))\n vmrun.enableSharedFolders()\n vmrun.addSharedFolder('mech', os.getcwd(), quiet=True)\n if ip:\n if started:\n puts_err(colored.green(\"VM started on {}\".format(ip)))\n else:\n puts_err(colored.yellow(\"VM was already started on {}\".format(ip)))\n else:\n if started:\n puts_err(colored.green(\"VM started on an unknown IP address\"))\n else:\n puts_err(colored.yellow(\"VM was already started on an unknown IP address\"))", "def up_cmd(ctx):\n pass", "def prepare_instance():\n sudo(\"apt-get -y update\")\n sudo(\"apt-get -y upgrade\")\n sudo(\"apt-get install -y python-pip python-setuptools\")\n sudo(\"pip install BeautifulSoup\")\n sudo(\"pip install --upgrade boto\")\n sudo(\"mv /usr/lib/pymodules/python2.6/boto /tmp\")", "def __init__(self, spark, logger):\n self.spark = spark\n self.logger = logger", "def test_ec2_up_no_instance(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', 'dummy'])\n assert result.exit_code == 2", "def common_setup(ssh_client):\n with open_cfg() as cfg:\n delete_hdfs = cfg.getboolean('main', 'delete_hdfs')\n # preliminary steps required due to differences between azure and aws\n if c.PROVIDER == \"AZURE\":\n\n # todo only if first run\n if c.NUM_INSTANCE > 0 or True:\n print(\"In common_setup, NUM_INSTANCE=\" + str(c.NUM_INSTANCE))\n # add ssh key that matches the public one used during creation\n if not c.PRIVATE_KEY_NAME in ssh_client.listdir(\"/home/ubuntu/.ssh/\"):\n ssh_client.put(localpath=c.PRIVATE_KEY_PATH, remotepath=\"/home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n ssh_client.run(\"chmod 400 /home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n\n # ssh_client.run(\"sudo groupadd supergroup\")\n ssh_client.run(\"sudo usermod -aG supergroup $USER\")\n ssh_client.run(\"sudo usermod -aG supergroup root\")\n\n # join docker group\n ssh_client.run(\"sudo usermod -aG docker $USER\")\n\n ssh_client.run(\"mkdir /usr/local/spark/spark-events\")\n\n # ssh_client.run(\"sudo chmod -R 777 /mnt\")\n\n # to refresh groups\n ssh_client.close()\n ssh_client.connect()\n\n # restore environmental variables lost when creating the image\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native/' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n\n ssh_client.run(\"source $HOME/.bashrc\")\n\n if c.PROVIDER == \"AWS_SPOT\":\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n ssh_client.run(\"source $HOME/.bashrc\")\n \n ssh_client.run(\"export GOMAXPROCS=`nproc`\")\n\n if c.UPDATE_SPARK_DOCKER:\n print(\" Updating Spark Docker Image...\")\n ssh_client.run(\"docker pull elfolink/spark:2.0\")\n\n if delete_hdfs:\n ssh_client.run(\"sudo umount /mnt\")\n ssh_client.run(\n \"sudo mkfs.ext4 -E nodiscard \" + c.TEMPORARY_STORAGE + \" && sudo mount -o discard \" + c.TEMPORARY_STORAGE + \" /mnt\")\n\n ssh_client.run(\"test -d /mnt/tmp || sudo mkdir -m 1777 /mnt/tmp\")\n ssh_client.run(\"sudo mount --bind /mnt/tmp /tmp\")\n\n ssh_client.run('ssh-keygen -f \"/home/ubuntu/.ssh/known_hosts\" -R localhost')\n\n print(\" Stop Spark Slave/Master\")\n # ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-master.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && sudo {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n \n stdout, stderr, status = ssh_client.run(\n \"cd \" + c.SPARK_HOME + \" && cp conf/log4j.properties.template conf/log4j.properties\")\n print(stdout, stderr)\n print(\" Set Log Level\")\n ssh_client.run(\n \"sed -i '19s/.*/log4j.rootCategory={}, console /' {}conf/log4j.properties\".format(c.LOG_LEVEL,\n c.SPARK_HOME))\n if c.KILL_JAVA:\n print(\" Killing Java\")\n ssh_client.run('sudo killall java && sudo killall java && sudo killall java')\n\n print(\" Kill SAR CPU Logger\")\n ssh_client.run(\"screen -ls | grep Detached | cut -d. -f1 | awk '{print $1}' | xargs -r kill\")\n\n if c.SYNC_TIME:\n print(\" SYNC TIME\")\n ssh_client.run(\"sudo ntpdate -s time.nist.gov\")\n\n print(\" Removing Stopped Docker\")\n ssh_client.run(\"docker ps -a | awk '{print $1}' | xargs --no-run-if-empty docker rm\")", "def spark():\n return SparkSession.builder.master(\"local\").appName(\"tests\").getOrCreate()", "def spark_session(request):\n def fin():\n \"\"\"Clean up.\n \"\"\"\n spark.stop()\n request.addfinalizer(fin)\n\n spark = ps.SparkSession.builder.master('local')\\\n .appName('Spark Tute PyTest')\\\n .config('spark.executor.memory', '2g')\\\n .config('spark.executor.cores', '2')\\\n .config('spark.cores.max', '10')\\\n .config('spark.ui.port', '4050')\\\n .config('spark.logConf', True)\\\n .config('spark.debug.maxToStringFields', 100)\\\n .getOrCreate()\n\n return spark", "def spark_setup(self):\n # Update the global variables for config details\n globals()[\"spark_token\"] = self.spark_bot_token\n globals()[\"bot_email\"] = self.spark_bot_email\n\n sys.stderr.write(\"Spark Bot Email: \" + self.spark_bot_email + \"\\n\")\n sys.stderr.write(\"Spark Token: REDACTED\\n\")\n\n # Setup the Spark Connection\n globals()[\"spark\"] = CiscoSparkAPI(access_token=self.spark_bot_token)\n globals()[\"webhook\"] = self.setup_webhook(self.spark_bot_name,\n self.spark_bot_url)\n sys.stderr.write(\"Configuring Webhook. \\n\")\n sys.stderr.write(\"Webhook ID: \" + globals()[\"webhook\"].id + \"\\n\")", "def spark_config_set(is_spark_submit):\n if is_spark_submit:\n global sc, sqlContext\n sc = SparkContext()\n sqlContext = HiveContext(sc)", "def create_sparksession():\n return SparkSession.builder.\\\n appName(\"Transforming the historical parking occupancy and blockface datasets\").\\\n getOrCreate()", "def test_ec2_up(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', ec2['server'].id])\n assert result.exit_code == 0", "def main():\n\n print(\"Initiating Spark session...\")\n print('-' * 50)\n spark = create_spark_session()\n \n # Use these settings if you want to test on the full\n # dataset, but it takes a LONG time.\n song_input_data = config['AWS']['SONG_DATA']\n log_input_data = config['AWS']['LOG_DATA']\n \n # Uncomment the two lines if you want to test on\n # minimal data\n #song_input_data = config['AWS']['SINGLE_SONG_DATA']\n #log_input_data = config['AWS']['SINGLE_LOG_DATA']\n \n output_data = config['AWS']['OUTPUT_DATA']\n \n print('-' * 50)\n print(\"Processing song data...\")\n print('-' * 50)\n print('')\n process_song_data(spark, song_input_data, output_data)\n \n print('-' * 50) \n print(\"Processing log data...\")\n print('-' * 50)\n print('')\n process_log_data(spark, song_input_data, log_input_data, output_data)", "def up(image):\n ovpn_file_queue = vpn_file_queue('./VPN')\n ovpn_file_count = len(list(ovpn_file_queue.queue))\n port_range = range(START_PORT, START_PORT + ovpn_file_count)\n write_haproxy_conf(port_range)\n write_proxychains_conf(port_range)\n start_containers(image, ovpn_file_queue, port_range)", "def cli():\n # Configuration\n AppConfig()\n\n # Parse the cli arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('standard_data_path', help='path to the standard data directory')\n parser.add_argument('queue', help='job queue')\n parser.add_argument('--app-name', help='spark application name which must contain the application prd',\n default='gmt00-diaman-ai')\n parser.add_argument('--driver-mem', help='amount of memory to use for the driver process',\n default='4g')\n parser.add_argument('--driver-cores', help='number of cores to use for the driver process',\n default=1)\n parser.add_argument('--executor-mem', help='amount of memory to use per executor process',\n default='8g')\n parser.add_argument('--executor-cores', help='number of cores to use on each executor',\n default=4)\n parser.add_argument('--min-executors', help='minimum number of executors to run if dynamic allocation is enabled',\n default=4)\n parser.add_argument('--max-executors', help='maximum number of executors to run if dynamic allocation is enabled',\n default=12)\n parser.add_argument('--ini-executors', help='initial number of executors to run if dynamic allocation is enabled',\n default=4)\n args = parser.parse_args()\n\n # Instantiate spark\n _, spark_session = spark_config.get_spark(app_name=args.app_name,\n queue=args.queue,\n driver_mem=args.driver_mem,\n driver_cores=args.driver_cores,\n executor_mem=args.executor_mem,\n executor_cores=args.executor_cores,\n min_executors=args.min_executors,\n max_executors=args.max_executors,\n ini_executors=args.ini_executors)\n\n # Run the train pipeline\n train_pipeline.run(spark_session, args.standard_data_path)", "def setUpClass(cls):\n \n logging.info(\"Logging from within setup\")\n cls.spark=SparkSession \\\n .builder \\\n .appName(\"sampleTest\") \\\n .master(\"local\") \\\n .getOrCreate()\n cls.spark.sparkContext.setLogLevel(\"ERROR\")", "def setup_kubernetes_version(skuba, kubernetes_version=None):\n\n skuba.cluster_init(kubernetes_version)\n skuba.node_bootstrap()\n skuba.node_join(role=\"worker\", nr=0)", "def create_spark_session(self):\n\n spark_jar_path = os.getenv(\"SPARK_JARS_PATH\")\n spark_jars = [os.path.join(spark_jar_path, jars) for jars in os.listdir(spark_jar_path)] \n\n self.spark = SparkSession\\\n .builder\\\n .config(\"spark.jars\", \",\".join(spark_jars))\\\n .appName(appname)\\\n .getOrCreate()", "def spark(tmp_path_factory, app_name=\"Sample\", url=\"local[*]\"):\n\n with TemporaryDirectory(dir=tmp_path_factory.getbasetemp()) as td:\n config = {\n \"spark.local.dir\": td,\n \"spark.sql.shuffle.partitions\": 1,\n \"spark.sql.crossJoin.enabled\": \"true\",\n }\n spark = start_or_get_spark(app_name=app_name, url=url, config=config)\n yield spark\n spark.stop()", "def add_spark(self,node):\n import os\n import json\n from urllib.request import urlopen\n import ssl\n if \"SPARK_ENV_LOADED\" not in os.environ:\n return # no Spark\n\n spark = ET.SubElement(node, 'spark')\n try:\n import requests\n import urllib3\n urllib3.disable_warnings()\n except ImportError:\n ET.SubElement(spark,'error').text = \"SPARK_ENV_LOADED present but requests module not available\"\n return \n\n host = 'localhost'\n p1 = 4040\n p2 = 4050\n import urllib.error\n for port in range(p1,p2+1):\n try:\n url = 'http://{}:{}/api/v1/applications/'.format(host,port)\n resp = urlopen(url, context=ssl._create_unverified_context())\n spark_data = resp.read()\n break\n except (ConnectionError, ConnectionRefusedError, urllib.error.URLError) as e:\n continue\n if port>=p2:\n ET.SubElement(spark,'error').text = f\"SPARK_ENV_LOADED present but no listener on {host} ports {p1}-{p2}\"\n return\n\n # Looks like we have Spark!\n for app in json.loads(spark_data):\n app_id = app['id']\n app_name = app['name']\n e = ET.SubElement(spark,'application',{'id':app_id,'name':app_name})\n\n attempt_count = 1\n for attempt in app['attempts']:\n e = ET.SubElement(spark,'attempt')\n json_to_xml(e,attempt)\n for param in ['jobs','allexecutors','storage/rdd']:\n url = f'http://{host}:{port}/api/v1/applications/{app_id}/{param}'\n resp = urlopen(url, context=ssl._create_unverified_context())\n data = resp.read()\n e = ET.SubElement(spark,param.replace(\"/\",\"_\"))\n json_to_xml(e,json.loads(data))", "def up(vm, env=''):\n local( main_dir + '/vagrant/bin/vm.sh up ' + str(vm) + ' ' + str(env) )", "def sshtest():\n vbox = Vbox(env.vm_name)\n print vbox.ssh_up", "def spark():\n\n quiet_log4j()\n\n builder = (\n SparkSession.builder\n .master(\"local[2]\")\n .appName(\"pytest-pyspark-local-testing\")\n # By default spark will shuffle to 200 partitions, which is\n # way too many for our small test cases. This cuts execution\n # time of the tests in half.\n .config('spark.sql.shuffle.partitions', 4)\n )\n if 'XDG_CACHE_HOME' in os.environ:\n builder.config('spark.jars.ivy', os.path.join(os.environ['XDG_CACHE_HOME'], 'ivy2'))\n\n with builder.getOrCreate() as spark:\n yield spark", "def handle_hup(self):\n pass", "def startUp(self):\n pass", "def Instance():\n if not Spark._active_instance:\n Spark._active_instance = Spark()\n return Spark._active_instance", "def setup(ctx, cluster_url):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"cluster setup subcommand\")", "def setup(self, stage: Optional[str] = None) -> None:", "def create_spark_session():\n \n print(\"Create Spark Session\")\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark\n print(\"Spark Session Created\")", "def create_spark_session():\n \n spark = SparkSession.builder\\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\")\\\n .getOrCreate()\n \n return spark", "def scale_up_application(asg_name):\n if_verbose(\"Scaling up %s in steps of %d\" % (asg_name, args.instance_count_step))\n current_capacity_count = args.instance_count_step\n while(True):\n check_error(scale_up_autoscaling_group(asg_name, current_capacity_count))\n check_error(check_autoscaling_group_health(asg_name, current_capacity_count))\n\n if args.elb_name:\n asg_instances = [{\"InstanceId\": a[\"InstanceId\"]} for a in asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]]\n check_error(check_elb_instance_health(args.elb_name, asg_instances))\n\n if args.instance_count == current_capacity_count:\n break\n else:\n current_capacity_count += args.instance_count_step\n else:\n break\n\n if_verbose(\"Scaling up %s successful\" % asg_name)", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID']) \\\n .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY']) \\\n .enableHiveSupport().getOrCreate()\n \n return spark", "def _bootup_node(self, conn):\n compose_fname = COMPOSE_FNAME\n exec_plan = self.node_exec_plan.copy()\n while len(exec_plan) > 0:\n container_name = exec_plan.popleft()\n self.__bootup_service(conn, compose_fname, container_name)", "def create_spark_session():\n try:\n spark = (\n SparkSession.builder\n .config(\"spark.jars.packages\", os.environ['SAS_JAR'])\n # .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n .enableHiveSupport()\n .getOrCreate()\n )\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.impl\",\"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n # spark._jsc.hadoopConfiguration().set(\"com.amazonaws.services.s3.enableV4\", \"true\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.aws.credentials.provider\",\"org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.endpoint\", \"s3.amazonaws.com\")\n except Exception as e:\n logger.error('Pyspark session failed to be created...')\n raise\n return spark", "def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)", "def spinUp(self,\n btstrap_loc='s3://ddapi.data/ddapp_emr_bootstrap.sh',\n mstr_cnt=1,\n mstr_mkt='ON_DEMAND',\n slave_cnt=2,\n slave_mkt='ON_DEMAND',\n ):\n\n self.btstrap_loc = btstrap_loc\n logging.info(f'starting to spin up emr cluster from emr client: {self.emr_client}')\n response = self.emr_client.run_job_flow(\n Name=self.clustername,\n LogUri=self.logpath,\n ReleaseLabel='emr-5.23.0',\n Instances={\n 'InstanceGroups': [\n {\n 'Name': \"Master nodes\",\n 'Market': mstr_mkt,\n 'InstanceRole': 'MASTER',\n 'InstanceType': 'm4.large',\n 'InstanceCount': mstr_cnt,\n },\n {\n 'Name': \"Slave nodes\",\n 'Market': slave_mkt,\n 'InstanceRole': 'CORE',\n 'InstanceType': 'm4.large',\n 'InstanceCount': slave_cnt,\n }\n ],\n 'Ec2KeyName': self.key,\n 'KeepJobFlowAliveWhenNoSteps': True,\n 'TerminationProtected': False,\n # 'Ec2SubnetId': 'string',\n },\n Applications=[\n {'Name': 'Hadoop'},\n {'Name': 'Spark'}\n ],\n BootstrapActions=[\n {\n 'Name': 'bootstrap requirements',\n 'ScriptBootstrapAction': {\n 'Path': btstrap_loc,\n }\n },\n ],\n VisibleToAllUsers=True,\n JobFlowRole='EMR_EC2_DefaultRole',\n ServiceRole='EMR_DefaultRole',\n Configurations=[\n {\n 'Classification': 'spark-env',\n 'Configurations': [\n {\n 'Classification': 'export',\n 'Properties': {\n 'PYSPARK_PYTHON': '/usr/bin/python3',\n 'PYSPARK_DRIVER_PYTHON': '/usr/bin/python3'\n }\n }\n ]\n },\n {\n 'Classification': 'spark-defaults',\n 'Properties': {\n 'spark.sql.execution.arrow.enabled': 'true'\n }\n },\n {\n 'Classification': 'spark',\n 'Properties': {\n 'maximizeResourceAllocation': 'true'\n }\n }\n ],\n )\n logging.info(f'spinning up emr cluster from emr client: {self.emr_client}')\n self.job_flow_id = response['JobFlowId']\n logging.info(f'job flow id {self.emr_client} logged')\n\n # get cluster id\n resp = self.emr_client.list_clusters()\n clus = resp['Clusters'][0]\n self.clusID = clus['Id']\n\n # don't forget to tip the waiter\n logging.info(f'start waiter')\n create_waiter = self.emr_client.get_waiter('cluster_running')\n try:\n create_waiter.wait(ClusterId=self.clusID,\n WaiterConfig={\n 'Delay': 15,\n 'MaxAttempts': 480\n })\n\n except WaiterError as e:\n if 'Max attempts exceeded' in e.message:\n print('EMR Cluster did not finish spinning up in two hours')\n else:\n print(e.message)", "def main():\n session, cluster = create_database()\n \n drop_tables(session)\n create_tables(session)\n\n session.shutdown()\n cluster.shutdown()", "def start_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Starting up a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"start\"])\n popdir()", "def whenup(sourcename) :\n return s.whenUp(sourcename)", "def _environment(self):\n\n self.spark_home = self._config_default(\"spark-home\",\n self._context(SparkSubmit.SPARK_HOME, default = os.environ.get(SparkSubmit.SPARK_HOME,None)))\n assert self.spark_home, \"unable to detect SPARK_HOME. set SPARK_HOME as directed in the task documentation\"\n assert os.path.exists(self.spark_home), \"provided SPARK_HOME doesn't exists\"\n\n spark_config = {'cluster-config': {}, 'other-config': {}}\n if 'config-file' in self._config_keys():\n spark_config.update(yaml.load(open(self._config('config-file')))['spark-config'])\n\n self.app_config = []\n\n spark_app = self._config('app-config')\n self.app_config.append(spark_app['application'])\n app_params = SparkSubmit._flat_node_to_cmd_line_args(spark_app['params']) if 'params' in spark_app else []\n self.app_config.extend(app_params)\n if 'resources' in spark_app:\n resources = [ ['--%s' % item] + (spark_app['resources'][item]) for item in spark_app['resources'].keys() ]\n self.resources = list(itertools.chain(*resources))\n else:\n self.resources = []\n\n\n cluster_config = self._config_default('cluster-config', {})\n cluster_config.update(spark_config['cluster-config'])\n self.cluster_options = list(itertools.chain(*[ ['--%s' % item, str(cluster_config[item]) ] for item in cluster_config.keys() ]))\n\n\n ##other options\n ## cluster options\n other_options = self._config_default('other-config',{})\n cluster_config.update(spark_config['other-config'])\n self.other_options = list(itertools.chain(*[ ['--%s' % item, str(other_options[item]) ] for item in other_options.keys() ]))", "def test_slurm_xsede_supermic_spark(self):\n\n # Set environment variables\n os.environ['SLURM_NODELIST'] = 'nodes[1-2]'\n os.environ['SLURM_NPROCS'] = '24'\n os.environ['SLURM_NNODES'] = '2'\n os.environ['SLURM_CPUS_ON_NODE'] = '24'\n\n # Run component with desired configuration\n self.component._cfg = self.cfg_xsede_supermic_spark\n self.component._configure()\n\n # Verify configured correctly\n self.assertEqual(self.component.cores_per_node, 20)\n self.assertEqual(self.component.gpus_per_node, 0)\n self.assertEqual(self.component.lfs_per_node['path'], \"/var/scratch/\")\n self.assertEqual(self.component.lfs_per_node['size'], 200496)\n self.assertEqual(self.component.lm_info['cores_per_node'], 20)\n\n return", "def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance", "def _work(self):\n command = [ os.path.join(self.spark_home,'bin/spark-submit') ] + self.cluster_options + self.other_options + \\\n self.resources + self.app_config\n\n sideput.sideput( \"spark submit command is %s\" % ' '.join(command) )\n\n with sideput.Timing(\"spark job completed in %d seconds\"):\n result, stdout, stderr = os_util.execute_command(command, do_sideput=True)\n\n sideput.sideput(\"[%s] stderr:\\n%s\" % (self.name(), stderr), level=\"INFO\")\n sideput.sideput(\"[%s] stdout:\\n%s\" % (self.name(), stdout), level=\"INFO\")\n if result != 0:\n raise Exception(\"spark job failed with code %d\" % result)\n else:\n try:\n result_hash = yaml_util.load(stdout) if self._emits() else {}\n sideput.sideput(\"parsed stdout is %s\\n\" % result_hash, level=\"INFO\")\n except Exception as e:\n result_hash = {}\n sideput.sideput(\"parsing stdout as json failed with message %s \\n\" % e.message , level= \"ERROR\")\n sideput.sideput(\"stdout is \\n %s \\n\" % stdout, level=\"ERROR\")\n raise e\n sideput.sideput(\"[%s] spark job completed successfully\"\n % self.name(), level = \"INFO\")\n return result_hash", "def do_start(self,processor):\n # app_logger = self.construct_logger(rta_constants.PROPERTIES_LOG_FILE)\n running_dict = {}\n for item in self.get_running_status():\n running_dict[item.get('processor')]=item.get('status')\n\n if processor == 'spark':\n if running_dict:\n if running_dict['spark<spark_worker>'] != 'Running' and running_dict['spark<spark_master>'] != 'Running':\n try:\n cmd_line = self.cmd_start_spark\n cmd = subprocess.Popen([cmd_line],shell=True,stdout=subprocess.PIPE)\n (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['spark<spark_worker>'] == 'Running' or running_dict['spark<spark_master>'] == 'Running':\n print('Spark Server is running!! please trying to stop it before it starts.')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n return\n\n elif processor == 'tomcat':\n if running_dict.has_key('tomcat') and running_dict['tomcat'] != 'Running':\n try:\n cmd_line = self.cmd_start_tomcat\n # print('staring tomcat server------->')\n print cmd_line\n\n # 2311 Vpl update to fix problem of catalina shutdown when term exit (10.x timeout)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n #print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('tomcat'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Tomcat Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'HDFS':\n #1/5/2017 Commit by JOJO\n '''\n if running_dict.has_key('HDFS') and running_dict['HDFS'] != 'Running':\n try:\n cmd_line = self.cmd_start_hadoop_hdfs\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('HDFS has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('HDFS'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('HDFS server is running!! please trying to stop it before it start.')\n return\n '''\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif processor == 'web_management':\n if running_dict.has_key('web_management') and running_dict['web_management'] != 'Running':\n try:\n cmd_line = 'python '+self.cmd_start_web_management\n print('starting web_management webserver------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n (output,err) = cmd.communicate()\n print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('web_management webserver has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('web_management'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Flask webserver is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'novelty':\n if running_dict.has_key('novelty') and running_dict['novelty'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_novelty_detector\n # print('staring novelty------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('novelty has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('novelty'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['novelty'] == 'Running':\n print('novelty processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'raw_writer':\n if running_dict.has_key('raw_writer') and running_dict['raw_writer'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_raw_writer\n # print('staring raw_writer------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n print('raw_writer has been started!')\n return\n\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('raw_writer'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['raw_writer'] == 'Running':\n print('raw_writer processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'cassandra':\n if running_dict.has_key('cassandra') and running_dict['cassandra'] != 'Running':\n try:\n cmd_line = self.cmd_start_cassandra\n # print('starting cassandra------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of cassandra shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('cassandra has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('cassandra'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('cassandra Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'kairosDb':\n if running_dict.has_key('kairosDb') and running_dict['kairosDb'] != 'Running' and running_dict['cassandra']=='Running':\n try:\n cmd_line = self.cmd_start_kairosDB\n # print('staring kairosDB------->')\n\n # print cmd_line\n\t\t\t\t\t#2311 Vpl update to fix problem of kairosDb shutdown when term exit (10.x timeout)\n\t\t\t\t\t#cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('kairosDb has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kairosDb'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['cassandra']=='Stopped':\n print('cassandra required starting before kairosDb is running!! please trying to \"start cassandra\" first')\n return\n elif running_dict['kairosDB'] == 'Running':\n print('kairosDB Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'grafana':\n if running_dict.has_key('grafana') and running_dict['grafana'] != 'Running' and running_dict['kairosDb']=='Running':\n try:\n cmd_line = self.cmd_start_grafana\n # print('staring grafana------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('grafana has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('grafana'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['kairosDb']=='Stopped':\n print('kairosDb required starting before grafana is running!! please trying to \"start kairoseDb\" first')\n return\n elif running_dict['grafana'] == 'Running':\n print('grafana Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'kafka':\n if running_dict.has_key('kafka') and running_dict['kafka'] != 'Running' and running_dict['zookeeper']=='Running':\n try:\n cmd_line = self.cmd_start_kafka\n print('starting kafka------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n print ('kafka has been started!')\n return\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kafka'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['zookeeper']=='Stopped':\n print('zookeeper required starting before kafka is running!! please trying to \"start zookeeper\" first')\n return\n elif running_dict['kafka'] == 'Running':\n print('Kafka Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'zookeeper':\n if running_dict.has_key('zookeeper') and running_dict['zookeeper'] != 'Running':\n try:\n cmd_line = self.cmd_start_zookeeper\n # print('staring zookeeper------->')\n # print (cmd_line)\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n\n print('zookeeper has been started!')\n return\n except Exception as ex:\n print(\" Failed to stop processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('zookeeper'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Zookeeper Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'accl_processor':\n if running_dict:\n if running_dict['accl_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_accl_processor\n print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #cmd = subprocess.Popen(['nohup',cmd_line])\n # cmd = subprocess.Popen(cmd_line)\n\n print ('Accelerometer processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['accl_processor'] == 'Running':\n print('Accelerometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'baro_processor':\n if running_dict:\n if running_dict['baro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_baro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Barometer processor has been started')\n\t\t\tprint (cmd_line)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['baro_processor'] == 'Running':\n print('Barometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'gyro_processor':\n if running_dict:\n if running_dict['gyro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_gyro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Gyroscope processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['gyro_processor'] == 'Running':\n print('Gyroscope processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'aggr_processor':\n if running_dict:\n if running_dict['aggr_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_aggr_naiv\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Aggregator processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['aggr_processor'] == 'Running':\n print('Aggregator processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print ('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n else:\n print ('Please type correct command! You may use \"help start\" see more help')", "def get_spark_i_know_what_i_am_doing():\n return _spark", "def start_up(self, velocity=VELOCITY):\n action = StartUp(velocity=velocity)\n self._velocity_control_client(pickle.dumps(action))", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend\"\n output_data = \"s3a://vivek1bucket\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def create_spark_session():\n spark = SparkSession\\\n .builder\\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\")\\\n .getOrCreate()\n # print (spark.sparkContext.getConf().getAll)\n return spark", "def __init__(self,env,config_file):\n #load all the properties\n self.properties = util.load_application_properties(env, config_file)\n self.cassandra_server = self.properties[\"cassandra.host.name\"]\n self.cassandra_trip_table = self.properties[\"cassandra.trip_data_table\"]\n self.cassandra_stats_table = self.properties[\"cassandra.trip_stats_table\"]\n self.cassandra_keyspace = self.properties[\"cassandra.trip.keyspace\"]\n self.spark_master = self.properties[\"spark.master\"]\n self.s3_url=self.properties[\"batch_s3_url\"]\n\n #initialize SparkConf and SparkContext along with cassandra settings\n self.conf = SparkConf().setAppName(\"trip\").set(\"spark.cassandra.connection.host\",self.cassandra_server)\n self.sc = SparkContext(conf=self.conf)\n self.sqlContext = SQLContext(self.sc)", "def create_spark_session():\n try:\n spark = (\n SparkSession.builder\n # .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n .enableHiveSupport()\n .getOrCreate()\n )\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.impl\",\"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n # spark._jsc.hadoopConfiguration().set(\"com.amazonaws.services.s3.enableV4\", \"true\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.aws.credentials.provider\",\"org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.endpoint\", \"s3.amazonaws.com\")\n except Exception as e:\n logger.error('Pyspark session failed to be created...')\n raise\n return spark", "def setUpClass(cls):\n GlusterBaseClass.setUpClass.im_func(cls)\n # Create and start volume\n g.log.info(\"Starting volume setup process %s\", cls.volname)\n ret = cls.setup_volume()\n if not ret:\n raise ExecutionError(\"Failed to setup \"\n \"and start volume %s\" % cls.volname)\n g.log.info(\"Successfully created and started the volume: %s\",\n cls.volname)", "def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True", "def main():\n # Initiate Spark Session\n spark = create_spark_session()\n \n # Data files\n # Root Data Path\n # Uncomment below line for AWS S3\n #input_data = \"s3a://udacity-dend\"\n # Uncomment below line for local files\n input_data = \"data\"\n\n # Warehouse\n # Root WH\n # Uncomment below line for AWS S3\n #output_data = \"s3a://jerryespn-project-out\"\n # Uncomment below line for local files\n output_data = \"spark-warehouse\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def test_pyspark(container):\n c = container.run(\n tty=True,\n command=['start.sh', 'python', '-c', 'import pyspark']\n )\n rv = c.wait(timeout=30)\n assert rv == 0 or rv[\"StatusCode\"] == 0, \"pyspark not in PYTHONPATH\"\n logs = c.logs(stdout=True).decode('utf-8')\n LOGGER.debug(logs)", "def instantiate(cls, spark):\n logger = ProcessLog().getLogger()\n return cls(spark, logger)", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def launch_spot():\n ec2 = boto3.client('ec2')\n ec2r = boto3.resource('ec2')\n ec2spec = dict(ImageId=AMI,\n KeyName = KeyName,\n SecurityGroupIds = [SecurityGroupId, ],\n InstanceType = \"p2.xlarge\",\n Monitoring = {'Enabled': True,},\n IamInstanceProfile = IAM_ROLE)\n output = ec2.request_spot_instances(DryRun=False,\n SpotPrice=\"0.4\",\n InstanceCount=1,\n LaunchSpecification = ec2spec)\n spot_request_id = output[u'SpotInstanceRequests'][0][u'SpotInstanceRequestId']\n logging.info(\"instance requested\")\n time.sleep(30)\n waiter = ec2.get_waiter('spot_instance_request_fulfilled')\n waiter.wait(SpotInstanceRequestIds=[spot_request_id,])\n instance_id = get_status(ec2, spot_request_id)\n while instance_id is None:\n time.sleep(30)\n instance_id = get_status(ec2,spot_request_id)\n instance = ec2r.Instance(instance_id)\n with open(\"host\",'w') as out:\n out.write(instance.public_ip_address)\n logging.info(\"instance allocated\")\n time.sleep(10) # wait while the instance starts\n env.hosts = [instance.public_ip_address,]\n fh = open(\"connect.sh\", 'w')\n fh.write(\"#!/bin/bash\\n\" + \"ssh -i \" + env.key_filename + \" \" + env.user + \"@\" + env.hosts[0] + \"\\n\")\n fh.close()\n local(\"fab deploy_ec2\") # this forces fab to set new env.hosts correctly", "def setup(self, cluster):\n raise NotImplementedError()", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def run_instance():\n data = check_args(\n ('cloudProvider', 'apiKey', 'secretKey', 'packageName', 'OS',\n 'sgPorts')\n )\n job = jobs.deploy.apply_async(args=(data,))\n current_user.add_job(job.id)\n return make_response(job_id=job.id)", "def _deploy_instance(self):\n if not os.path.exists(self.instance_path):\n pw = pwd.getpwnam(self.user)\n mode = (\n stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |\n stat.S_IROTH | stat.S_IXOTH)\n utils.mkdir(self.instance_path, mode, pw[2], pw[3])\n path = \"{}/src/automx_wsgi.py\".format(self.repo_dir)\n utils.exec_cmd(\"cp {} {}\".format(path, self.instance_path),\n sudo_user=self.user, cwd=self.home_dir)", "def launch_instance(cloud):\n js = _get_jetstream_conn()\n\n sgs = ['CloudLaunchDefault']\n kp_name = \"cloudman_key_pair\"\n inst_size = 'm1.small'\n network_id = '86a1c3e8-b1fb-41f3-bcaf-8334567fe989'\n lc = js.compute.instances.create_launch_config()\n lc.add_network_interface(network_id)\n\n img_id = '2cf07e4a-62a8-41c2-9282-f3c53962f296' # Gxy Standalone 161021b01\n name = 'ea-galaxy-{0}'.format(strftime(\"%m-%d-%H-%M\", localtime()))\n\n i = js.compute.instances.create(\n name, img_id, inst_size, security_groups=sgs, launch_config=lc,\n key_pair=kp_name)\n return i", "def make_test_instance(branchname, instance_name=\"schedule\"):\n if not instance_name:\n instance_name = branchname\n instance_dir = env.site_root + instance_name\n if not exists(instance_dir):\n with cd(env.site_root):\n run('git clone %s %s' % (env.repo_url, instance_name))\n with cd(instance_dir):\n run('git checkout %s' % branchname)\n else:\n with cd(instance_dir):\n run(\"git pull\")\n\n bootstrap(instance_name, 'test')\n\n upstart_conf_templ = os.path.join(instance_dir, 'example', 'conf', 'upstart-test.conf.template')\n upstart_conf = os.path.join(instance_dir, 'example', 'conf', 'upstart-test.conf')\n if not exists(upstart_conf):\n run('cp %s %s' % (upstart_conf_templ, upstart_conf))\n sed(upstart_conf, '\\\\{branchname\\\\}', instance_name)\n upstart_link = \"/etc/init/%s.conf\" % instance_name\n if not exists(upstart_link):\n sudo('ln -s %s %s' % (upstart_conf, upstart_link))\n sudo('initctl reload-configuration')\n sudo('start %s' % instance_name)\n\n apache_config_templ = os.path.join(instance_dir, 'example', 'conf', 'nginx-test.conf.template')\n apache_config = os.path.join(instance_dir, 'example', 'conf', 'nginx-test.conf')\n if not exists(apache_config):\n run('cp %s %s' % (apache_config_templ, apache_config))\n sed(apache_config, '\\\\{branchname\\\\}', instance_name)\n apache_name = '/etc/nginx/sites-available/%s' % instance_name\n if not exists(apache_name):\n sudo('ln -s %s %s' % (apache_config, apache_name))\n sudo('nxensite %s' % instance_name)\n sudo('mkdir -p %s%s/media/static' % (env.site_root, instance_name))\n sudo('chgrp -R www-data %s%s/media/static' % (env.site_root, instance_name))\n sudo('chmod -R g+w %s%s/media/static' % (env.site_root, instance_name))\n sudo('/etc/init.d/nginx reload')", "def main():\n # start Spark application and get Spark session, logger and config\n spark = SparkSession \\\n .builder \\\n .appName(\"PokemonBasicETLOperations\") \\\n .config(\"spark.eventLog.enabled\", True) \\\n .enableHiveSupport() \\\n .getOrCreate()\n\n print('PokemonBasicETLOperations ETL is up-and-running')\n \n # execute ETL pipeline\n pokemon = extract(spark)\n max_attack_per_type,agg_legend_poke,special_criteria_poke = transform(pokemon)\n load(max_attack_per_type,agg_legend_poke,special_criteria_poke)\n\n print('PokemonBasicETLOperations ETL job is finished')\n spark.stop()\n return None", "def start_instance(InstanceId=None):\n pass", "def ec2_start(resource, metadata):\n\n # do minimal provisioning of machine through cloud-init\n # this installs git and bootstraps puppet to provision the rest\n # requires recent ubuntu (14.04/16.04) or RHEL/CentOS 7\n userdata = \"\"\"#cloud-config\npackage_update: true\nhostname: {hostname}\nfqdn: {fqdn}\nmanage_etc_hosts: true\npackages:\n - git\nwrite_files:\n - path: /etc/facter/facts.d/hostgroup.txt\n content: hostgroup=aws\n - path: /etc/facter/facts.d/role.txt\n content: role={role}\nruncmd:\n - git clone {repo} /etc/puppet\n - /etc/puppet/support_scripts/bootstrap-puppet.sh\"\"\".format(\n hostname=metadata['hostname'], fqdn=metadata['fqdn'],\n role=metadata['role'], repo=metadata['repo'])\n\n instances = resource.create_instances(\n ImageId=metadata['ami'],\n MinCount=1,\n MaxCount=1,\n InstanceType=metadata['type'],\n SubnetId=metadata['subnet'],\n SecurityGroupIds=[metadata['secgroup']],\n KeyName=metadata['keypair'],\n UserData=userdata,\n BlockDeviceMappings=[\n {\n 'DeviceName': '/dev/sda1', # root so far, sometimes /dev/xvdh ?\n 'Ebs': {\n 'VolumeSize': 20,\n 'DeleteOnTermination': True,\n 'VolumeType': 'gp2'\n },\n },\n ]\n )\n\n # not sure if we really need to sleep before tagging but\n # we wait until running anyway which takes much longer than 1 second\n time.sleep(1)\n for instance in instances:\n # first set tags, Name and Role\n instance.create_tags(\n Resources=[instance.id],\n Tags=[\n {\n 'Key': 'Role',\n 'Value': metadata['role']\n },\n {\n 'Key': 'Name',\n 'Value': metadata['fqdn']\n },\n ]\n )\n\n # ensure system is running before we print address to connect to\n instance.wait_until_running()\n # instance.load()\n ec2_status(resource, metadata)", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def dvs_multiple_uplinks_active(self):\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(1)\n self.show_step(2)\n plugin.install_dvs_plugin(self.ssh_manager.admin_ip)\n\n self.show_step(3)\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=DEPLOYMENT_MODE,\n settings={\n \"net_provider\": 'neutron',\n \"net_segment_type\": NEUTRON_SEGMENT_TYPE\n }\n )\n self.show_step(4)\n self.show_step(5)\n self.show_step(6)\n self.show_step(7)\n self.fuel_web.update_nodes(cluster_id,\n {'slave-01': ['controller'],\n 'slave-02': ['compute-vmware'],\n 'slave-03': ['compute'],\n 'slave-04': ['compute']})\n\n self.show_step(8)\n self.show_step(9)\n self.fuel_web.vcenter_configure(\n cluster_id,\n target_node_2=self.node_name('slave-02'),\n multiclusters=True)\n\n self.show_step(10)\n plugin.enable_plugin(cluster_id, self.fuel_web, au=3, su=0)\n\n self.show_step(11)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(12)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(13)\n self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke'])", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\") \\\n .getOrCreate()\n return spark", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def getSparkContext():\n conf = (SparkConf()\n .setMaster(\"local\") # run on local\n .setAppName(\"Logistic Regression\") # Name of App\n .set(\"spark.executor.memory\", \"1g\")) # Set 1 gig of memory\n sc = SparkContext(conf = conf) \n return sc", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def __init__(self, sparkContext, minPartitions=None):\n from thunder.utils.aws import AWSCredentials\n self.sc = sparkContext\n self.minPartitions = minPartitions\n self.awsCredentialsOverride = AWSCredentials.fromContext(sparkContext)", "def create_spark_session() -> SparkSession:\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\") \\\n .getOrCreate()\n return spark" ]
[ "0.7255299", "0.5885112", "0.57854474", "0.5732422", "0.5728568", "0.57188094", "0.5686993", "0.56795335", "0.55460984", "0.54442745", "0.54389185", "0.54236734", "0.5411601", "0.5410782", "0.539766", "0.53900146", "0.5368531", "0.53666943", "0.53366995", "0.53314143", "0.52853173", "0.52692574", "0.5235823", "0.5229087", "0.52060443", "0.5203687", "0.51997", "0.5195361", "0.51463693", "0.5127799", "0.51164204", "0.5099871", "0.5095755", "0.50892156", "0.5072066", "0.5059149", "0.50531006", "0.5049627", "0.5048304", "0.5027425", "0.5017536", "0.5012346", "0.5001642", "0.4999611", "0.49954787", "0.4963975", "0.49606976", "0.49390978", "0.4937356", "0.49355713", "0.493246", "0.4929713", "0.49250352", "0.4923733", "0.4916192", "0.49137157", "0.49037406", "0.4901357", "0.4896235", "0.48905668", "0.48833445", "0.48766047", "0.48766047", "0.48766047", "0.48766047", "0.48752612", "0.48684382", "0.48640826", "0.4856041", "0.48556623", "0.48512846", "0.48511493", "0.48325047", "0.48319644", "0.48235676", "0.48233774", "0.48213205", "0.4805509", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.48021105", "0.4802022", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.4798475", "0.47972357", "0.47959027" ]
0.0
-1
spark up an instance
def __init__(self): OWSReport.__init__(self) self.stats['type'] = 'OGC:SOS' self.stats['operations']['GetObservation'] = {} self.stats['operations']['GetObservation']['hits'] = 0 self.stats['operations']['GetObservation']['resource'] = {} self.stats['operations']['GetObservation']['resource']['param'] = 'observedproperty' self.stats['operations']['GetObservation']['resource']['list'] = {} self.stats['operations']['DescribeSensor'] = {} self.stats['operations']['DescribeSensor']['hits'] = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n spark_it_up()", "def spark(self, *args, **kwargs):\n self.spark_submit(*args, **kwargs)", "def up(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"up --build\"\n\n if remote:\n command = f\"{command} --detach\"\n\n run_command_with_services(context, user, remote, instance, stack, command, services)", "def up(self, connection):\n raise NotImplementedError", "def dev_up():\n _with_deploy_env(['./bin/develop up'])", "def setUp(self):\n self.spark, self.log, self.config = start_spark(app_name = \"test_etl_job\",\n files='configs/etl_config.json')", "def up(self, arguments):\n gui = arguments['--gui']\n save = not arguments['--no-cache']\n requests_kwargs = utils.get_requests_kwargs(arguments)\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n utils.index_active_instance(instance_name)\n\n vmx = utils.init_box(self.box_name, self.box_version, requests_kwargs=requests_kwargs, save=save)\n vmrun = VMrun(vmx, user=self.user, password=self.password)\n puts_err(colored.blue(\"Bringing machine up...\"))\n started = vmrun.start(gui=gui)\n if started is None:\n puts_err(colored.red(\"VM not started\"))\n else:\n time.sleep(3)\n puts_err(colored.blue(\"Getting IP address...\"))\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(lookup=lookup)\n puts_err(colored.blue(\"Sharing current folder...\"))\n vmrun.enableSharedFolders()\n vmrun.addSharedFolder('mech', os.getcwd(), quiet=True)\n if ip:\n if started:\n puts_err(colored.green(\"VM started on {}\".format(ip)))\n else:\n puts_err(colored.yellow(\"VM was already started on {}\".format(ip)))\n else:\n if started:\n puts_err(colored.green(\"VM started on an unknown IP address\"))\n else:\n puts_err(colored.yellow(\"VM was already started on an unknown IP address\"))", "def up_cmd(ctx):\n pass", "def prepare_instance():\n sudo(\"apt-get -y update\")\n sudo(\"apt-get -y upgrade\")\n sudo(\"apt-get install -y python-pip python-setuptools\")\n sudo(\"pip install BeautifulSoup\")\n sudo(\"pip install --upgrade boto\")\n sudo(\"mv /usr/lib/pymodules/python2.6/boto /tmp\")", "def __init__(self, spark, logger):\n self.spark = spark\n self.logger = logger", "def test_ec2_up_no_instance(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', 'dummy'])\n assert result.exit_code == 2", "def common_setup(ssh_client):\n with open_cfg() as cfg:\n delete_hdfs = cfg.getboolean('main', 'delete_hdfs')\n # preliminary steps required due to differences between azure and aws\n if c.PROVIDER == \"AZURE\":\n\n # todo only if first run\n if c.NUM_INSTANCE > 0 or True:\n print(\"In common_setup, NUM_INSTANCE=\" + str(c.NUM_INSTANCE))\n # add ssh key that matches the public one used during creation\n if not c.PRIVATE_KEY_NAME in ssh_client.listdir(\"/home/ubuntu/.ssh/\"):\n ssh_client.put(localpath=c.PRIVATE_KEY_PATH, remotepath=\"/home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n ssh_client.run(\"chmod 400 /home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n\n # ssh_client.run(\"sudo groupadd supergroup\")\n ssh_client.run(\"sudo usermod -aG supergroup $USER\")\n ssh_client.run(\"sudo usermod -aG supergroup root\")\n\n # join docker group\n ssh_client.run(\"sudo usermod -aG docker $USER\")\n\n ssh_client.run(\"mkdir /usr/local/spark/spark-events\")\n\n # ssh_client.run(\"sudo chmod -R 777 /mnt\")\n\n # to refresh groups\n ssh_client.close()\n ssh_client.connect()\n\n # restore environmental variables lost when creating the image\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native/' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n\n ssh_client.run(\"source $HOME/.bashrc\")\n\n if c.PROVIDER == \"AWS_SPOT\":\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n ssh_client.run(\"source $HOME/.bashrc\")\n \n ssh_client.run(\"export GOMAXPROCS=`nproc`\")\n\n if c.UPDATE_SPARK_DOCKER:\n print(\" Updating Spark Docker Image...\")\n ssh_client.run(\"docker pull elfolink/spark:2.0\")\n\n if delete_hdfs:\n ssh_client.run(\"sudo umount /mnt\")\n ssh_client.run(\n \"sudo mkfs.ext4 -E nodiscard \" + c.TEMPORARY_STORAGE + \" && sudo mount -o discard \" + c.TEMPORARY_STORAGE + \" /mnt\")\n\n ssh_client.run(\"test -d /mnt/tmp || sudo mkdir -m 1777 /mnt/tmp\")\n ssh_client.run(\"sudo mount --bind /mnt/tmp /tmp\")\n\n ssh_client.run('ssh-keygen -f \"/home/ubuntu/.ssh/known_hosts\" -R localhost')\n\n print(\" Stop Spark Slave/Master\")\n # ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-master.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && sudo {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n \n stdout, stderr, status = ssh_client.run(\n \"cd \" + c.SPARK_HOME + \" && cp conf/log4j.properties.template conf/log4j.properties\")\n print(stdout, stderr)\n print(\" Set Log Level\")\n ssh_client.run(\n \"sed -i '19s/.*/log4j.rootCategory={}, console /' {}conf/log4j.properties\".format(c.LOG_LEVEL,\n c.SPARK_HOME))\n if c.KILL_JAVA:\n print(\" Killing Java\")\n ssh_client.run('sudo killall java && sudo killall java && sudo killall java')\n\n print(\" Kill SAR CPU Logger\")\n ssh_client.run(\"screen -ls | grep Detached | cut -d. -f1 | awk '{print $1}' | xargs -r kill\")\n\n if c.SYNC_TIME:\n print(\" SYNC TIME\")\n ssh_client.run(\"sudo ntpdate -s time.nist.gov\")\n\n print(\" Removing Stopped Docker\")\n ssh_client.run(\"docker ps -a | awk '{print $1}' | xargs --no-run-if-empty docker rm\")", "def spark():\n return SparkSession.builder.master(\"local\").appName(\"tests\").getOrCreate()", "def spark_session(request):\n def fin():\n \"\"\"Clean up.\n \"\"\"\n spark.stop()\n request.addfinalizer(fin)\n\n spark = ps.SparkSession.builder.master('local')\\\n .appName('Spark Tute PyTest')\\\n .config('spark.executor.memory', '2g')\\\n .config('spark.executor.cores', '2')\\\n .config('spark.cores.max', '10')\\\n .config('spark.ui.port', '4050')\\\n .config('spark.logConf', True)\\\n .config('spark.debug.maxToStringFields', 100)\\\n .getOrCreate()\n\n return spark", "def spark_setup(self):\n # Update the global variables for config details\n globals()[\"spark_token\"] = self.spark_bot_token\n globals()[\"bot_email\"] = self.spark_bot_email\n\n sys.stderr.write(\"Spark Bot Email: \" + self.spark_bot_email + \"\\n\")\n sys.stderr.write(\"Spark Token: REDACTED\\n\")\n\n # Setup the Spark Connection\n globals()[\"spark\"] = CiscoSparkAPI(access_token=self.spark_bot_token)\n globals()[\"webhook\"] = self.setup_webhook(self.spark_bot_name,\n self.spark_bot_url)\n sys.stderr.write(\"Configuring Webhook. \\n\")\n sys.stderr.write(\"Webhook ID: \" + globals()[\"webhook\"].id + \"\\n\")", "def spark_config_set(is_spark_submit):\n if is_spark_submit:\n global sc, sqlContext\n sc = SparkContext()\n sqlContext = HiveContext(sc)", "def create_sparksession():\n return SparkSession.builder.\\\n appName(\"Transforming the historical parking occupancy and blockface datasets\").\\\n getOrCreate()", "def test_ec2_up(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', ec2['server'].id])\n assert result.exit_code == 0", "def main():\n\n print(\"Initiating Spark session...\")\n print('-' * 50)\n spark = create_spark_session()\n \n # Use these settings if you want to test on the full\n # dataset, but it takes a LONG time.\n song_input_data = config['AWS']['SONG_DATA']\n log_input_data = config['AWS']['LOG_DATA']\n \n # Uncomment the two lines if you want to test on\n # minimal data\n #song_input_data = config['AWS']['SINGLE_SONG_DATA']\n #log_input_data = config['AWS']['SINGLE_LOG_DATA']\n \n output_data = config['AWS']['OUTPUT_DATA']\n \n print('-' * 50)\n print(\"Processing song data...\")\n print('-' * 50)\n print('')\n process_song_data(spark, song_input_data, output_data)\n \n print('-' * 50) \n print(\"Processing log data...\")\n print('-' * 50)\n print('')\n process_log_data(spark, song_input_data, log_input_data, output_data)", "def up(image):\n ovpn_file_queue = vpn_file_queue('./VPN')\n ovpn_file_count = len(list(ovpn_file_queue.queue))\n port_range = range(START_PORT, START_PORT + ovpn_file_count)\n write_haproxy_conf(port_range)\n write_proxychains_conf(port_range)\n start_containers(image, ovpn_file_queue, port_range)", "def cli():\n # Configuration\n AppConfig()\n\n # Parse the cli arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('standard_data_path', help='path to the standard data directory')\n parser.add_argument('queue', help='job queue')\n parser.add_argument('--app-name', help='spark application name which must contain the application prd',\n default='gmt00-diaman-ai')\n parser.add_argument('--driver-mem', help='amount of memory to use for the driver process',\n default='4g')\n parser.add_argument('--driver-cores', help='number of cores to use for the driver process',\n default=1)\n parser.add_argument('--executor-mem', help='amount of memory to use per executor process',\n default='8g')\n parser.add_argument('--executor-cores', help='number of cores to use on each executor',\n default=4)\n parser.add_argument('--min-executors', help='minimum number of executors to run if dynamic allocation is enabled',\n default=4)\n parser.add_argument('--max-executors', help='maximum number of executors to run if dynamic allocation is enabled',\n default=12)\n parser.add_argument('--ini-executors', help='initial number of executors to run if dynamic allocation is enabled',\n default=4)\n args = parser.parse_args()\n\n # Instantiate spark\n _, spark_session = spark_config.get_spark(app_name=args.app_name,\n queue=args.queue,\n driver_mem=args.driver_mem,\n driver_cores=args.driver_cores,\n executor_mem=args.executor_mem,\n executor_cores=args.executor_cores,\n min_executors=args.min_executors,\n max_executors=args.max_executors,\n ini_executors=args.ini_executors)\n\n # Run the train pipeline\n train_pipeline.run(spark_session, args.standard_data_path)", "def setUpClass(cls):\n \n logging.info(\"Logging from within setup\")\n cls.spark=SparkSession \\\n .builder \\\n .appName(\"sampleTest\") \\\n .master(\"local\") \\\n .getOrCreate()\n cls.spark.sparkContext.setLogLevel(\"ERROR\")", "def setup_kubernetes_version(skuba, kubernetes_version=None):\n\n skuba.cluster_init(kubernetes_version)\n skuba.node_bootstrap()\n skuba.node_join(role=\"worker\", nr=0)", "def create_spark_session(self):\n\n spark_jar_path = os.getenv(\"SPARK_JARS_PATH\")\n spark_jars = [os.path.join(spark_jar_path, jars) for jars in os.listdir(spark_jar_path)] \n\n self.spark = SparkSession\\\n .builder\\\n .config(\"spark.jars\", \",\".join(spark_jars))\\\n .appName(appname)\\\n .getOrCreate()", "def spark(tmp_path_factory, app_name=\"Sample\", url=\"local[*]\"):\n\n with TemporaryDirectory(dir=tmp_path_factory.getbasetemp()) as td:\n config = {\n \"spark.local.dir\": td,\n \"spark.sql.shuffle.partitions\": 1,\n \"spark.sql.crossJoin.enabled\": \"true\",\n }\n spark = start_or_get_spark(app_name=app_name, url=url, config=config)\n yield spark\n spark.stop()", "def add_spark(self,node):\n import os\n import json\n from urllib.request import urlopen\n import ssl\n if \"SPARK_ENV_LOADED\" not in os.environ:\n return # no Spark\n\n spark = ET.SubElement(node, 'spark')\n try:\n import requests\n import urllib3\n urllib3.disable_warnings()\n except ImportError:\n ET.SubElement(spark,'error').text = \"SPARK_ENV_LOADED present but requests module not available\"\n return \n\n host = 'localhost'\n p1 = 4040\n p2 = 4050\n import urllib.error\n for port in range(p1,p2+1):\n try:\n url = 'http://{}:{}/api/v1/applications/'.format(host,port)\n resp = urlopen(url, context=ssl._create_unverified_context())\n spark_data = resp.read()\n break\n except (ConnectionError, ConnectionRefusedError, urllib.error.URLError) as e:\n continue\n if port>=p2:\n ET.SubElement(spark,'error').text = f\"SPARK_ENV_LOADED present but no listener on {host} ports {p1}-{p2}\"\n return\n\n # Looks like we have Spark!\n for app in json.loads(spark_data):\n app_id = app['id']\n app_name = app['name']\n e = ET.SubElement(spark,'application',{'id':app_id,'name':app_name})\n\n attempt_count = 1\n for attempt in app['attempts']:\n e = ET.SubElement(spark,'attempt')\n json_to_xml(e,attempt)\n for param in ['jobs','allexecutors','storage/rdd']:\n url = f'http://{host}:{port}/api/v1/applications/{app_id}/{param}'\n resp = urlopen(url, context=ssl._create_unverified_context())\n data = resp.read()\n e = ET.SubElement(spark,param.replace(\"/\",\"_\"))\n json_to_xml(e,json.loads(data))", "def up(vm, env=''):\n local( main_dir + '/vagrant/bin/vm.sh up ' + str(vm) + ' ' + str(env) )", "def sshtest():\n vbox = Vbox(env.vm_name)\n print vbox.ssh_up", "def spark():\n\n quiet_log4j()\n\n builder = (\n SparkSession.builder\n .master(\"local[2]\")\n .appName(\"pytest-pyspark-local-testing\")\n # By default spark will shuffle to 200 partitions, which is\n # way too many for our small test cases. This cuts execution\n # time of the tests in half.\n .config('spark.sql.shuffle.partitions', 4)\n )\n if 'XDG_CACHE_HOME' in os.environ:\n builder.config('spark.jars.ivy', os.path.join(os.environ['XDG_CACHE_HOME'], 'ivy2'))\n\n with builder.getOrCreate() as spark:\n yield spark", "def handle_hup(self):\n pass", "def startUp(self):\n pass", "def Instance():\n if not Spark._active_instance:\n Spark._active_instance = Spark()\n return Spark._active_instance", "def setup(ctx, cluster_url):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"cluster setup subcommand\")", "def setup(self, stage: Optional[str] = None) -> None:", "def create_spark_session():\n \n print(\"Create Spark Session\")\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark\n print(\"Spark Session Created\")", "def create_spark_session():\n \n spark = SparkSession.builder\\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\")\\\n .getOrCreate()\n \n return spark", "def scale_up_application(asg_name):\n if_verbose(\"Scaling up %s in steps of %d\" % (asg_name, args.instance_count_step))\n current_capacity_count = args.instance_count_step\n while(True):\n check_error(scale_up_autoscaling_group(asg_name, current_capacity_count))\n check_error(check_autoscaling_group_health(asg_name, current_capacity_count))\n\n if args.elb_name:\n asg_instances = [{\"InstanceId\": a[\"InstanceId\"]} for a in asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]]\n check_error(check_elb_instance_health(args.elb_name, asg_instances))\n\n if args.instance_count == current_capacity_count:\n break\n else:\n current_capacity_count += args.instance_count_step\n else:\n break\n\n if_verbose(\"Scaling up %s successful\" % asg_name)", "def _bootup_node(self, conn):\n compose_fname = COMPOSE_FNAME\n exec_plan = self.node_exec_plan.copy()\n while len(exec_plan) > 0:\n container_name = exec_plan.popleft()\n self.__bootup_service(conn, compose_fname, container_name)", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID']) \\\n .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY']) \\\n .enableHiveSupport().getOrCreate()\n \n return spark", "def create_spark_session():\n try:\n spark = (\n SparkSession.builder\n .config(\"spark.jars.packages\", os.environ['SAS_JAR'])\n # .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n .enableHiveSupport()\n .getOrCreate()\n )\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.impl\",\"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n # spark._jsc.hadoopConfiguration().set(\"com.amazonaws.services.s3.enableV4\", \"true\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.aws.credentials.provider\",\"org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.endpoint\", \"s3.amazonaws.com\")\n except Exception as e:\n logger.error('Pyspark session failed to be created...')\n raise\n return spark", "def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)", "def spinUp(self,\n btstrap_loc='s3://ddapi.data/ddapp_emr_bootstrap.sh',\n mstr_cnt=1,\n mstr_mkt='ON_DEMAND',\n slave_cnt=2,\n slave_mkt='ON_DEMAND',\n ):\n\n self.btstrap_loc = btstrap_loc\n logging.info(f'starting to spin up emr cluster from emr client: {self.emr_client}')\n response = self.emr_client.run_job_flow(\n Name=self.clustername,\n LogUri=self.logpath,\n ReleaseLabel='emr-5.23.0',\n Instances={\n 'InstanceGroups': [\n {\n 'Name': \"Master nodes\",\n 'Market': mstr_mkt,\n 'InstanceRole': 'MASTER',\n 'InstanceType': 'm4.large',\n 'InstanceCount': mstr_cnt,\n },\n {\n 'Name': \"Slave nodes\",\n 'Market': slave_mkt,\n 'InstanceRole': 'CORE',\n 'InstanceType': 'm4.large',\n 'InstanceCount': slave_cnt,\n }\n ],\n 'Ec2KeyName': self.key,\n 'KeepJobFlowAliveWhenNoSteps': True,\n 'TerminationProtected': False,\n # 'Ec2SubnetId': 'string',\n },\n Applications=[\n {'Name': 'Hadoop'},\n {'Name': 'Spark'}\n ],\n BootstrapActions=[\n {\n 'Name': 'bootstrap requirements',\n 'ScriptBootstrapAction': {\n 'Path': btstrap_loc,\n }\n },\n ],\n VisibleToAllUsers=True,\n JobFlowRole='EMR_EC2_DefaultRole',\n ServiceRole='EMR_DefaultRole',\n Configurations=[\n {\n 'Classification': 'spark-env',\n 'Configurations': [\n {\n 'Classification': 'export',\n 'Properties': {\n 'PYSPARK_PYTHON': '/usr/bin/python3',\n 'PYSPARK_DRIVER_PYTHON': '/usr/bin/python3'\n }\n }\n ]\n },\n {\n 'Classification': 'spark-defaults',\n 'Properties': {\n 'spark.sql.execution.arrow.enabled': 'true'\n }\n },\n {\n 'Classification': 'spark',\n 'Properties': {\n 'maximizeResourceAllocation': 'true'\n }\n }\n ],\n )\n logging.info(f'spinning up emr cluster from emr client: {self.emr_client}')\n self.job_flow_id = response['JobFlowId']\n logging.info(f'job flow id {self.emr_client} logged')\n\n # get cluster id\n resp = self.emr_client.list_clusters()\n clus = resp['Clusters'][0]\n self.clusID = clus['Id']\n\n # don't forget to tip the waiter\n logging.info(f'start waiter')\n create_waiter = self.emr_client.get_waiter('cluster_running')\n try:\n create_waiter.wait(ClusterId=self.clusID,\n WaiterConfig={\n 'Delay': 15,\n 'MaxAttempts': 480\n })\n\n except WaiterError as e:\n if 'Max attempts exceeded' in e.message:\n print('EMR Cluster did not finish spinning up in two hours')\n else:\n print(e.message)", "def main():\n session, cluster = create_database()\n \n drop_tables(session)\n create_tables(session)\n\n session.shutdown()\n cluster.shutdown()", "def start_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Starting up a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"start\"])\n popdir()", "def whenup(sourcename) :\n return s.whenUp(sourcename)", "def _environment(self):\n\n self.spark_home = self._config_default(\"spark-home\",\n self._context(SparkSubmit.SPARK_HOME, default = os.environ.get(SparkSubmit.SPARK_HOME,None)))\n assert self.spark_home, \"unable to detect SPARK_HOME. set SPARK_HOME as directed in the task documentation\"\n assert os.path.exists(self.spark_home), \"provided SPARK_HOME doesn't exists\"\n\n spark_config = {'cluster-config': {}, 'other-config': {}}\n if 'config-file' in self._config_keys():\n spark_config.update(yaml.load(open(self._config('config-file')))['spark-config'])\n\n self.app_config = []\n\n spark_app = self._config('app-config')\n self.app_config.append(spark_app['application'])\n app_params = SparkSubmit._flat_node_to_cmd_line_args(spark_app['params']) if 'params' in spark_app else []\n self.app_config.extend(app_params)\n if 'resources' in spark_app:\n resources = [ ['--%s' % item] + (spark_app['resources'][item]) for item in spark_app['resources'].keys() ]\n self.resources = list(itertools.chain(*resources))\n else:\n self.resources = []\n\n\n cluster_config = self._config_default('cluster-config', {})\n cluster_config.update(spark_config['cluster-config'])\n self.cluster_options = list(itertools.chain(*[ ['--%s' % item, str(cluster_config[item]) ] for item in cluster_config.keys() ]))\n\n\n ##other options\n ## cluster options\n other_options = self._config_default('other-config',{})\n cluster_config.update(spark_config['other-config'])\n self.other_options = list(itertools.chain(*[ ['--%s' % item, str(other_options[item]) ] for item in other_options.keys() ]))", "def test_slurm_xsede_supermic_spark(self):\n\n # Set environment variables\n os.environ['SLURM_NODELIST'] = 'nodes[1-2]'\n os.environ['SLURM_NPROCS'] = '24'\n os.environ['SLURM_NNODES'] = '2'\n os.environ['SLURM_CPUS_ON_NODE'] = '24'\n\n # Run component with desired configuration\n self.component._cfg = self.cfg_xsede_supermic_spark\n self.component._configure()\n\n # Verify configured correctly\n self.assertEqual(self.component.cores_per_node, 20)\n self.assertEqual(self.component.gpus_per_node, 0)\n self.assertEqual(self.component.lfs_per_node['path'], \"/var/scratch/\")\n self.assertEqual(self.component.lfs_per_node['size'], 200496)\n self.assertEqual(self.component.lm_info['cores_per_node'], 20)\n\n return", "def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance", "def _work(self):\n command = [ os.path.join(self.spark_home,'bin/spark-submit') ] + self.cluster_options + self.other_options + \\\n self.resources + self.app_config\n\n sideput.sideput( \"spark submit command is %s\" % ' '.join(command) )\n\n with sideput.Timing(\"spark job completed in %d seconds\"):\n result, stdout, stderr = os_util.execute_command(command, do_sideput=True)\n\n sideput.sideput(\"[%s] stderr:\\n%s\" % (self.name(), stderr), level=\"INFO\")\n sideput.sideput(\"[%s] stdout:\\n%s\" % (self.name(), stdout), level=\"INFO\")\n if result != 0:\n raise Exception(\"spark job failed with code %d\" % result)\n else:\n try:\n result_hash = yaml_util.load(stdout) if self._emits() else {}\n sideput.sideput(\"parsed stdout is %s\\n\" % result_hash, level=\"INFO\")\n except Exception as e:\n result_hash = {}\n sideput.sideput(\"parsing stdout as json failed with message %s \\n\" % e.message , level= \"ERROR\")\n sideput.sideput(\"stdout is \\n %s \\n\" % stdout, level=\"ERROR\")\n raise e\n sideput.sideput(\"[%s] spark job completed successfully\"\n % self.name(), level = \"INFO\")\n return result_hash", "def do_start(self,processor):\n # app_logger = self.construct_logger(rta_constants.PROPERTIES_LOG_FILE)\n running_dict = {}\n for item in self.get_running_status():\n running_dict[item.get('processor')]=item.get('status')\n\n if processor == 'spark':\n if running_dict:\n if running_dict['spark<spark_worker>'] != 'Running' and running_dict['spark<spark_master>'] != 'Running':\n try:\n cmd_line = self.cmd_start_spark\n cmd = subprocess.Popen([cmd_line],shell=True,stdout=subprocess.PIPE)\n (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['spark<spark_worker>'] == 'Running' or running_dict['spark<spark_master>'] == 'Running':\n print('Spark Server is running!! please trying to stop it before it starts.')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n return\n\n elif processor == 'tomcat':\n if running_dict.has_key('tomcat') and running_dict['tomcat'] != 'Running':\n try:\n cmd_line = self.cmd_start_tomcat\n # print('staring tomcat server------->')\n print cmd_line\n\n # 2311 Vpl update to fix problem of catalina shutdown when term exit (10.x timeout)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n #print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('tomcat'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Tomcat Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'HDFS':\n #1/5/2017 Commit by JOJO\n '''\n if running_dict.has_key('HDFS') and running_dict['HDFS'] != 'Running':\n try:\n cmd_line = self.cmd_start_hadoop_hdfs\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('HDFS has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('HDFS'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('HDFS server is running!! please trying to stop it before it start.')\n return\n '''\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif processor == 'web_management':\n if running_dict.has_key('web_management') and running_dict['web_management'] != 'Running':\n try:\n cmd_line = 'python '+self.cmd_start_web_management\n print('starting web_management webserver------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n (output,err) = cmd.communicate()\n print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('web_management webserver has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('web_management'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Flask webserver is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'novelty':\n if running_dict.has_key('novelty') and running_dict['novelty'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_novelty_detector\n # print('staring novelty------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('novelty has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('novelty'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['novelty'] == 'Running':\n print('novelty processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'raw_writer':\n if running_dict.has_key('raw_writer') and running_dict['raw_writer'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_raw_writer\n # print('staring raw_writer------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n print('raw_writer has been started!')\n return\n\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('raw_writer'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['raw_writer'] == 'Running':\n print('raw_writer processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'cassandra':\n if running_dict.has_key('cassandra') and running_dict['cassandra'] != 'Running':\n try:\n cmd_line = self.cmd_start_cassandra\n # print('starting cassandra------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of cassandra shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('cassandra has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('cassandra'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('cassandra Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'kairosDb':\n if running_dict.has_key('kairosDb') and running_dict['kairosDb'] != 'Running' and running_dict['cassandra']=='Running':\n try:\n cmd_line = self.cmd_start_kairosDB\n # print('staring kairosDB------->')\n\n # print cmd_line\n\t\t\t\t\t#2311 Vpl update to fix problem of kairosDb shutdown when term exit (10.x timeout)\n\t\t\t\t\t#cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('kairosDb has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kairosDb'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['cassandra']=='Stopped':\n print('cassandra required starting before kairosDb is running!! please trying to \"start cassandra\" first')\n return\n elif running_dict['kairosDB'] == 'Running':\n print('kairosDB Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'grafana':\n if running_dict.has_key('grafana') and running_dict['grafana'] != 'Running' and running_dict['kairosDb']=='Running':\n try:\n cmd_line = self.cmd_start_grafana\n # print('staring grafana------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('grafana has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('grafana'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['kairosDb']=='Stopped':\n print('kairosDb required starting before grafana is running!! please trying to \"start kairoseDb\" first')\n return\n elif running_dict['grafana'] == 'Running':\n print('grafana Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'kafka':\n if running_dict.has_key('kafka') and running_dict['kafka'] != 'Running' and running_dict['zookeeper']=='Running':\n try:\n cmd_line = self.cmd_start_kafka\n print('starting kafka------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n print ('kafka has been started!')\n return\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kafka'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['zookeeper']=='Stopped':\n print('zookeeper required starting before kafka is running!! please trying to \"start zookeeper\" first')\n return\n elif running_dict['kafka'] == 'Running':\n print('Kafka Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'zookeeper':\n if running_dict.has_key('zookeeper') and running_dict['zookeeper'] != 'Running':\n try:\n cmd_line = self.cmd_start_zookeeper\n # print('staring zookeeper------->')\n # print (cmd_line)\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n\n print('zookeeper has been started!')\n return\n except Exception as ex:\n print(\" Failed to stop processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('zookeeper'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Zookeeper Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'accl_processor':\n if running_dict:\n if running_dict['accl_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_accl_processor\n print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #cmd = subprocess.Popen(['nohup',cmd_line])\n # cmd = subprocess.Popen(cmd_line)\n\n print ('Accelerometer processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['accl_processor'] == 'Running':\n print('Accelerometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'baro_processor':\n if running_dict:\n if running_dict['baro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_baro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Barometer processor has been started')\n\t\t\tprint (cmd_line)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['baro_processor'] == 'Running':\n print('Barometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'gyro_processor':\n if running_dict:\n if running_dict['gyro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_gyro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Gyroscope processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['gyro_processor'] == 'Running':\n print('Gyroscope processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'aggr_processor':\n if running_dict:\n if running_dict['aggr_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_aggr_naiv\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Aggregator processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['aggr_processor'] == 'Running':\n print('Aggregator processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print ('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n else:\n print ('Please type correct command! You may use \"help start\" see more help')", "def get_spark_i_know_what_i_am_doing():\n return _spark", "def start_up(self, velocity=VELOCITY):\n action = StartUp(velocity=velocity)\n self._velocity_control_client(pickle.dumps(action))", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend\"\n output_data = \"s3a://vivek1bucket\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def create_spark_session():\n spark = SparkSession\\\n .builder\\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\")\\\n .getOrCreate()\n # print (spark.sparkContext.getConf().getAll)\n return spark", "def __init__(self,env,config_file):\n #load all the properties\n self.properties = util.load_application_properties(env, config_file)\n self.cassandra_server = self.properties[\"cassandra.host.name\"]\n self.cassandra_trip_table = self.properties[\"cassandra.trip_data_table\"]\n self.cassandra_stats_table = self.properties[\"cassandra.trip_stats_table\"]\n self.cassandra_keyspace = self.properties[\"cassandra.trip.keyspace\"]\n self.spark_master = self.properties[\"spark.master\"]\n self.s3_url=self.properties[\"batch_s3_url\"]\n\n #initialize SparkConf and SparkContext along with cassandra settings\n self.conf = SparkConf().setAppName(\"trip\").set(\"spark.cassandra.connection.host\",self.cassandra_server)\n self.sc = SparkContext(conf=self.conf)\n self.sqlContext = SQLContext(self.sc)", "def create_spark_session():\n try:\n spark = (\n SparkSession.builder\n # .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n .enableHiveSupport()\n .getOrCreate()\n )\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.impl\",\"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n # spark._jsc.hadoopConfiguration().set(\"com.amazonaws.services.s3.enableV4\", \"true\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.aws.credentials.provider\",\"org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.endpoint\", \"s3.amazonaws.com\")\n except Exception as e:\n logger.error('Pyspark session failed to be created...')\n raise\n return spark", "def setUpClass(cls):\n GlusterBaseClass.setUpClass.im_func(cls)\n # Create and start volume\n g.log.info(\"Starting volume setup process %s\", cls.volname)\n ret = cls.setup_volume()\n if not ret:\n raise ExecutionError(\"Failed to setup \"\n \"and start volume %s\" % cls.volname)\n g.log.info(\"Successfully created and started the volume: %s\",\n cls.volname)", "def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True", "def main():\n # Initiate Spark Session\n spark = create_spark_session()\n \n # Data files\n # Root Data Path\n # Uncomment below line for AWS S3\n #input_data = \"s3a://udacity-dend\"\n # Uncomment below line for local files\n input_data = \"data\"\n\n # Warehouse\n # Root WH\n # Uncomment below line for AWS S3\n #output_data = \"s3a://jerryespn-project-out\"\n # Uncomment below line for local files\n output_data = \"spark-warehouse\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def test_pyspark(container):\n c = container.run(\n tty=True,\n command=['start.sh', 'python', '-c', 'import pyspark']\n )\n rv = c.wait(timeout=30)\n assert rv == 0 or rv[\"StatusCode\"] == 0, \"pyspark not in PYTHONPATH\"\n logs = c.logs(stdout=True).decode('utf-8')\n LOGGER.debug(logs)", "def instantiate(cls, spark):\n logger = ProcessLog().getLogger()\n return cls(spark, logger)", "def launch_spot():\n ec2 = boto3.client('ec2')\n ec2r = boto3.resource('ec2')\n ec2spec = dict(ImageId=AMI,\n KeyName = KeyName,\n SecurityGroupIds = [SecurityGroupId, ],\n InstanceType = \"p2.xlarge\",\n Monitoring = {'Enabled': True,},\n IamInstanceProfile = IAM_ROLE)\n output = ec2.request_spot_instances(DryRun=False,\n SpotPrice=\"0.4\",\n InstanceCount=1,\n LaunchSpecification = ec2spec)\n spot_request_id = output[u'SpotInstanceRequests'][0][u'SpotInstanceRequestId']\n logging.info(\"instance requested\")\n time.sleep(30)\n waiter = ec2.get_waiter('spot_instance_request_fulfilled')\n waiter.wait(SpotInstanceRequestIds=[spot_request_id,])\n instance_id = get_status(ec2, spot_request_id)\n while instance_id is None:\n time.sleep(30)\n instance_id = get_status(ec2,spot_request_id)\n instance = ec2r.Instance(instance_id)\n with open(\"host\",'w') as out:\n out.write(instance.public_ip_address)\n logging.info(\"instance allocated\")\n time.sleep(10) # wait while the instance starts\n env.hosts = [instance.public_ip_address,]\n fh = open(\"connect.sh\", 'w')\n fh.write(\"#!/bin/bash\\n\" + \"ssh -i \" + env.key_filename + \" \" + env.user + \"@\" + env.hosts[0] + \"\\n\")\n fh.close()\n local(\"fab deploy_ec2\") # this forces fab to set new env.hosts correctly", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def setup(self, cluster):\n raise NotImplementedError()", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def run_instance():\n data = check_args(\n ('cloudProvider', 'apiKey', 'secretKey', 'packageName', 'OS',\n 'sgPorts')\n )\n job = jobs.deploy.apply_async(args=(data,))\n current_user.add_job(job.id)\n return make_response(job_id=job.id)", "def _deploy_instance(self):\n if not os.path.exists(self.instance_path):\n pw = pwd.getpwnam(self.user)\n mode = (\n stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |\n stat.S_IROTH | stat.S_IXOTH)\n utils.mkdir(self.instance_path, mode, pw[2], pw[3])\n path = \"{}/src/automx_wsgi.py\".format(self.repo_dir)\n utils.exec_cmd(\"cp {} {}\".format(path, self.instance_path),\n sudo_user=self.user, cwd=self.home_dir)", "def make_test_instance(branchname, instance_name=\"schedule\"):\n if not instance_name:\n instance_name = branchname\n instance_dir = env.site_root + instance_name\n if not exists(instance_dir):\n with cd(env.site_root):\n run('git clone %s %s' % (env.repo_url, instance_name))\n with cd(instance_dir):\n run('git checkout %s' % branchname)\n else:\n with cd(instance_dir):\n run(\"git pull\")\n\n bootstrap(instance_name, 'test')\n\n upstart_conf_templ = os.path.join(instance_dir, 'example', 'conf', 'upstart-test.conf.template')\n upstart_conf = os.path.join(instance_dir, 'example', 'conf', 'upstart-test.conf')\n if not exists(upstart_conf):\n run('cp %s %s' % (upstart_conf_templ, upstart_conf))\n sed(upstart_conf, '\\\\{branchname\\\\}', instance_name)\n upstart_link = \"/etc/init/%s.conf\" % instance_name\n if not exists(upstart_link):\n sudo('ln -s %s %s' % (upstart_conf, upstart_link))\n sudo('initctl reload-configuration')\n sudo('start %s' % instance_name)\n\n apache_config_templ = os.path.join(instance_dir, 'example', 'conf', 'nginx-test.conf.template')\n apache_config = os.path.join(instance_dir, 'example', 'conf', 'nginx-test.conf')\n if not exists(apache_config):\n run('cp %s %s' % (apache_config_templ, apache_config))\n sed(apache_config, '\\\\{branchname\\\\}', instance_name)\n apache_name = '/etc/nginx/sites-available/%s' % instance_name\n if not exists(apache_name):\n sudo('ln -s %s %s' % (apache_config, apache_name))\n sudo('nxensite %s' % instance_name)\n sudo('mkdir -p %s%s/media/static' % (env.site_root, instance_name))\n sudo('chgrp -R www-data %s%s/media/static' % (env.site_root, instance_name))\n sudo('chmod -R g+w %s%s/media/static' % (env.site_root, instance_name))\n sudo('/etc/init.d/nginx reload')", "def launch_instance(cloud):\n js = _get_jetstream_conn()\n\n sgs = ['CloudLaunchDefault']\n kp_name = \"cloudman_key_pair\"\n inst_size = 'm1.small'\n network_id = '86a1c3e8-b1fb-41f3-bcaf-8334567fe989'\n lc = js.compute.instances.create_launch_config()\n lc.add_network_interface(network_id)\n\n img_id = '2cf07e4a-62a8-41c2-9282-f3c53962f296' # Gxy Standalone 161021b01\n name = 'ea-galaxy-{0}'.format(strftime(\"%m-%d-%H-%M\", localtime()))\n\n i = js.compute.instances.create(\n name, img_id, inst_size, security_groups=sgs, launch_config=lc,\n key_pair=kp_name)\n return i", "def main():\n # start Spark application and get Spark session, logger and config\n spark = SparkSession \\\n .builder \\\n .appName(\"PokemonBasicETLOperations\") \\\n .config(\"spark.eventLog.enabled\", True) \\\n .enableHiveSupport() \\\n .getOrCreate()\n\n print('PokemonBasicETLOperations ETL is up-and-running')\n \n # execute ETL pipeline\n pokemon = extract(spark)\n max_attack_per_type,agg_legend_poke,special_criteria_poke = transform(pokemon)\n load(max_attack_per_type,agg_legend_poke,special_criteria_poke)\n\n print('PokemonBasicETLOperations ETL job is finished')\n spark.stop()\n return None", "def start_instance(InstanceId=None):\n pass", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def ec2_start(resource, metadata):\n\n # do minimal provisioning of machine through cloud-init\n # this installs git and bootstraps puppet to provision the rest\n # requires recent ubuntu (14.04/16.04) or RHEL/CentOS 7\n userdata = \"\"\"#cloud-config\npackage_update: true\nhostname: {hostname}\nfqdn: {fqdn}\nmanage_etc_hosts: true\npackages:\n - git\nwrite_files:\n - path: /etc/facter/facts.d/hostgroup.txt\n content: hostgroup=aws\n - path: /etc/facter/facts.d/role.txt\n content: role={role}\nruncmd:\n - git clone {repo} /etc/puppet\n - /etc/puppet/support_scripts/bootstrap-puppet.sh\"\"\".format(\n hostname=metadata['hostname'], fqdn=metadata['fqdn'],\n role=metadata['role'], repo=metadata['repo'])\n\n instances = resource.create_instances(\n ImageId=metadata['ami'],\n MinCount=1,\n MaxCount=1,\n InstanceType=metadata['type'],\n SubnetId=metadata['subnet'],\n SecurityGroupIds=[metadata['secgroup']],\n KeyName=metadata['keypair'],\n UserData=userdata,\n BlockDeviceMappings=[\n {\n 'DeviceName': '/dev/sda1', # root so far, sometimes /dev/xvdh ?\n 'Ebs': {\n 'VolumeSize': 20,\n 'DeleteOnTermination': True,\n 'VolumeType': 'gp2'\n },\n },\n ]\n )\n\n # not sure if we really need to sleep before tagging but\n # we wait until running anyway which takes much longer than 1 second\n time.sleep(1)\n for instance in instances:\n # first set tags, Name and Role\n instance.create_tags(\n Resources=[instance.id],\n Tags=[\n {\n 'Key': 'Role',\n 'Value': metadata['role']\n },\n {\n 'Key': 'Name',\n 'Value': metadata['fqdn']\n },\n ]\n )\n\n # ensure system is running before we print address to connect to\n instance.wait_until_running()\n # instance.load()\n ec2_status(resource, metadata)", "def dvs_multiple_uplinks_active(self):\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(1)\n self.show_step(2)\n plugin.install_dvs_plugin(self.ssh_manager.admin_ip)\n\n self.show_step(3)\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=DEPLOYMENT_MODE,\n settings={\n \"net_provider\": 'neutron',\n \"net_segment_type\": NEUTRON_SEGMENT_TYPE\n }\n )\n self.show_step(4)\n self.show_step(5)\n self.show_step(6)\n self.show_step(7)\n self.fuel_web.update_nodes(cluster_id,\n {'slave-01': ['controller'],\n 'slave-02': ['compute-vmware'],\n 'slave-03': ['compute'],\n 'slave-04': ['compute']})\n\n self.show_step(8)\n self.show_step(9)\n self.fuel_web.vcenter_configure(\n cluster_id,\n target_node_2=self.node_name('slave-02'),\n multiclusters=True)\n\n self.show_step(10)\n plugin.enable_plugin(cluster_id, self.fuel_web, au=3, su=0)\n\n self.show_step(11)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(12)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(13)\n self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke'])", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\") \\\n .getOrCreate()\n return spark", "def getSparkContext():\n conf = (SparkConf()\n .setMaster(\"local\") # run on local\n .setAppName(\"Logistic Regression\") # Name of App\n .set(\"spark.executor.memory\", \"1g\")) # Set 1 gig of memory\n sc = SparkContext(conf = conf) \n return sc", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "def __init__(self, sparkContext, minPartitions=None):\n from thunder.utils.aws import AWSCredentials\n self.sc = sparkContext\n self.minPartitions = minPartitions\n self.awsCredentialsOverride = AWSCredentials.fromContext(sparkContext)", "def create_spark_session() -> SparkSession:\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\") \\\n .getOrCreate()\n return spark" ]
[ "0.7255347", "0.58859587", "0.578518", "0.57325435", "0.57292104", "0.5720284", "0.56871897", "0.5679635", "0.5546256", "0.54448164", "0.5438893", "0.5424962", "0.541132", "0.54103553", "0.53980225", "0.5390525", "0.53686017", "0.5367046", "0.53380203", "0.5331934", "0.5286234", "0.5270096", "0.5235454", "0.5228658", "0.5206304", "0.52032954", "0.5199506", "0.5195851", "0.5146053", "0.5127637", "0.511699", "0.509955", "0.509666", "0.5089193", "0.50717664", "0.50587887", "0.50536716", "0.5049728", "0.5049377", "0.50272524", "0.5018328", "0.50131476", "0.5002543", "0.50008047", "0.49945518", "0.4964589", "0.4961737", "0.4939378", "0.49381736", "0.49367008", "0.49316472", "0.49294335", "0.49267942", "0.4923488", "0.4917696", "0.49133232", "0.4905058", "0.4902218", "0.48974627", "0.48919496", "0.48836", "0.48762673", "0.4876181", "0.4876181", "0.4876181", "0.4876181", "0.48691157", "0.4865171", "0.4856438", "0.4856258", "0.48519975", "0.48513728", "0.48335415", "0.48328194", "0.48238355", "0.48237792", "0.48226494", "0.48050871", "0.48024312", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.48018536", "0.4798061", "0.4798061", "0.4798061", "0.4798061", "0.4798061", "0.4798061", "0.4798061", "0.4798061", "0.4798061", "0.47979367", "0.47955394" ]
0.0
-1
Estimate covariance matrix with POET algorithm
def poet_known(self, K, tau): # check K and tau if K < 0 or K >= self.N: raise RuntimeError("Invalid value for K, number of factors.") if tau < 0 or tau > 1: raise RuntimeError("Invalid value for tau, which should be between 0 and 1.") # mean centering and calculate SVD for pca Rc = self.R.T - np.mean(self.R, axis=1) u, s, vt = np.linalg.svd(Rc/np.sqrt(self.T-1)) eigvecs = vt.T eigvals = s**2 # decomposition of covariance matrix cov_pca = eigvecs[:,:K] @ np.diag(eigvals[:K]) @ eigvecs[:,:K].T Rk = self.cov - cov_pca # thresholding the complement matrix rii = np.diag(Rk) tauij = np.sqrt(np.outer(rii, rii))*tau RkT = Rk*(Rk > tauij) # combine the two terms result = cov_pca + RkT return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mn_cov_ ( self , size = -1 , root = False ) :\n #\n if size <= 0 : size = len ( self )\n size = min ( size , len ( self ) ) \n #\n from array import array\n matrix = array ( 'd' , [ 0 for i in range(0, size * size) ] )\n self.mnemat ( matrix , size )\n #\n import ostap.math.linalg\n from ostap.core.core import Ostap \n mtrx = Ostap.Math.SymMatrix ( size )() \n for i in range ( 0 , size ) :\n for j in range ( i , size ) : \n mtrx [ i , j ] = matrix [ i * size + j ]\n \n return mtrx", "def _getCovMat(self, cov_expr):\n # store the expression\n self.expr = cov_expr\n # create a PETSC matrix for cov_mat\n cov_mat = PETSc.Mat().create()\n cov_mat.setType('aij')\n cov_mat.setSizes(self.domain.getNodes(), self.domain.getNodes())\n cov_mat.setUp()\n\n # scalar valued function is evaluated in this variable\n cov_ij = np.empty((1), dtype=float)\n # the points to evalute the expression\n xycor = np.empty((4), dtype=float)\n\n print '---------------------------'\n print '---------------------------'\n print ' Building Covariance Matrix'\n print '---------------------------'\n print '---------------------------'\n # Loop through global nodes and build the matrix for i < j because of\n # symmetric nature.\n for node_i in range(0, self.domain.getNodes()):\n # global node node_i\n for node_j in range(node_i, self.domain.getNodes()):\n # global node node_j\n temp_cov_ij = 0\n for elem_i in self.node_to_elem[node_i]:\n # elem_i : element attached to node_i\n # x1 : x co-ordinate of the centroid of element elem_i\n x1 = self.c_centroid_array[elem_i].x()\n # y1 : x co-ordinate of the centroid of element elem_i\n y1 = self.c_centroid_array[elem_i].y()\n for elem_j in self.node_to_elem[node_j]:\n # elem_j : element attached to node_j\n # x2 : x co-ordinate for the centroid of element elem_j\n x2 = self.c_centroid_array[elem_j].x()\n # y2 : y co-ordinate for the centroid of element elem_j\n y2 = self.c_centroid_array[elem_j].y()\n xycor[0] = x1\n xycor[1] = x2\n xycor[2] = y1\n xycor[3] = y2\n # evaluate the expression\n cov_expr.eval(cov_ij, xycor)\n if cov_ij[0] > 0:\n temp_cov_ij += (1.0 / 3) * (1.0 / 3) * \\\n cov_ij[0] * \\\n self.c_volume_array[elem_i] * \\\n self.c_volume_array[elem_j]\n\n cov_mat.setValue(node_i, node_j, temp_cov_ij)\n cov_mat.setValue(node_j, node_i, temp_cov_ij)\n cov_mat.assemblyBegin()\n cov_mat.assemblyEnd()\n print '---------------------------'\n print '---------------------------'\n print ' Finished Covariance Matrix'\n print '---------------------------'\n print '---------------------------'\n\n return cov_mat", "def covariance(data_matrix):\n return np.asmatrix(np.cov(data_matrix, rowvar=0))", "def getCovarianceMatrix(self):\n #ypost = np.dot ( self.getA().T, self.priorX )\n\n theta = np.mat ( self.getA() )\n Xm = np.mat ( self.priorX )\n\n ypost = Xm * theta\n yprior = self.priorY\n error = ypost - yprior\n #error = error - np.mean ( error, axis = 0 )\n return np.dot ( error.T, error )", "def calcCovarianceMatrix(data):\n # Create covariance matrix and array to store the mean values for x_mean, y_mean, z_mean\n C = np.zeros((data.shape[1], data.shape[1]))\n mean_xyz = []\n # Calculate all mean values\n for i in range(0, data.shape[1]):\n mean_xyz.append(data[:,i].mean())\n mean_xyz = np.array(mean_xyz)\n # Check whether dimensions agree \n if data[:,0].size != data[:,1].size or data[:,0].size != data[:,2].size:\n print \"X, Y and Z must be of same dimensions.\"\n else:\n # For each row in covariance matrix C\n for i in range(0, C.shape[0]):\n # For each column in covariance matrix C\n for j in range(0, C.shape[1]):\n C[i,j] = 0\n # For each point in the dataset, access x, y, z-values\n for point in data:\n # For each point, access x,y and z in all combinations (xx, xy, xz, yx, yy, yz etc)\n C[i][j] = C[i][j] + (point[i]-mean_xyz[i])*(point[j]-mean_xyz[j])\n # Divide by the total number of points \n C = (1.0/data.shape[0]) * C\n return C", "def muscovite():\n\n rho = 2834.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 181.; C[0,1] = 48.8; C[0,2] = 25.6; C[0,3] = 0.; C[0,4] = -14.2; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 178.4; C[1,2] = 21.2; C[1,3] = 0.; C[1,4] = 1.1; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 58.6; C[2,3] = 0.; C[2,4] = 1.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 16.5; C[3,4] = 0.; C[3,5] = -5.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 19.5; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 72.\n\n return C, rho", "def covariance(mtrx):\r\n\r\n # Average column of matrix\r\n T = np.transpose(mtrx)\r\n ave = np.zeros(len(mtrx))\r\n mtrx = np.asarray(mtrx)\r\n\r\n if isinstance(mtrx, np.ndarray):\r\n ave = average(T)\r\n\r\n for col in T:\r\n if type(mtrx) == list:\r\n # If data isn't standardized\r\n ave += np.asarray(col)\r\n\r\n\r\n if len(mtrx[0]) > len(mtrx):\r\n for moreRows in range(len(mtrx[0]), len(mtrx)):\r\n mtrx[moreRows] = np.asarray(mtrx[moreRows])\r\n\r\n ave /= len(mtrx[0])\r\n\r\n\r\n phi = T - ave\r\n # Covariance matrix\r\n return np.dot(np.transpose(phi), phi)", "def covariance_matrix(self):\n\n cov_filename = self.covariance_filename\n cov_press, cov_data = self._co_star_read(cov_filename)\n\n # \"Fix\" covariances that are not positive definite\n if not np.all(np.linalg.eigvals(cov_data) > 0):\n warnings.warn(\"Covariance matrix for species {} is not positive definite, modifying eigenvals\".format(self.species))\n\n # Get eigen values and vector from matrix\n eigval, eigvec = np.linalg.eig(cov_data)\n\n # Find negative eigen values and set to the media\n eigval[np.where(eigval < 0)] = np.median(eigval)\n\n # Reconstruct matrix with modified eigen values\n cov_data = eigvec @ np.diag(eigval) @ np.linalg.inv(eigvec)\n\n return cov_data", "def cov_matrix(gx, gy, winsize, alpha):\n\n gx = edge_mirror(gx, winsize)\n gy = edge_mirror(gy, winsize)\n radius_filter = gen_gaussian(winsize)\n radius_filter = numpy.rot90(radius_filter, 2)\n\n lenth = sum(sum(radius_filter))\n\n gx = signal.convolve2d(gx, radius_filter, mode='valid')\n gy = signal.convolve2d(gy, radius_filter, mode='valid')\n\n c11 = numpy.multiply(gx, gx)\n c22 = numpy.multiply(gy, gy)\n c12 = numpy.multiply(gx, gy)\n\n\n # SVD closed form\n lambda1 = (c11 + c22 + numpy.sqrt((c11 - c22)**2 + 4*c12**2)) / 2\n lambda2 = (c11 + c22 - numpy.sqrt((c11 - c22)**2 + 4*c12**2)) / 2\n numer = c11 + c12 - lambda1\n denom = c22 + c12 - lambda2\n\n ev1 = numpy.zeros_like(numer)\n ev2 = numpy.zeros_like(ev1)\n\n rows, cols = numer.shape\n for r in range(rows):\n for c in range(cols):\n if abs(denom[r, c]) < _opzero:\n if abs(numer[r, c]) < _opzero:\n if abs(denom[r, c]) > abs(numer[r, c]):\n ev1[r, c] = 0\n ev2[r, c] = 1\n else:\n ev1[r, c] = 1\n ev2[r, c] = 0\n else:\n ev1[r, c] = 1\n ev2[r, c] = 0\n else:\n theta = math.atan(-numer[r, c]/denom[r, c])\n ev1 = math.sin(theta)\n ev2 = math.cos(theta)\n\n sv1 = math.sqrt(abs(lambda1[r, c]))\n sv2 = math.sqrt(abs(lambda2[r, c]))\n p = ((sv1 * sv2 + _epsa) / lenth)**alpha\n s1 = (sv1 + 1) / (sv2 + 1)\n s2 = 1. / s1\n c11[r, c] = p * (s1 * ev2 ** 2 + s2 * ev1 ** 2)\n c22[r, c] = p * (s1 * ev1 ** 2 + s2 * ev2 ** 2)\n c12[r, c] = p * (s1 - s2) * ev1 * ev2\n\n c11 = edge_mirror(c11, winsize)\n c12 = edge_mirror(c12, winsize)\n c22 = edge_mirror(c22, winsize)\n\n return c11, c12, c22", "def solutionCovariance(self):\n return self.standardError2()*self.AtAinv", "def getCovarianceNoiseMatrix(self):\n return np.dot ( self.getB().T, self.getB() )", "def covariance_matrix(self):\n\n self._order_observations()\n self.cov_matrix = self._compute_covariance_matrix(\n self.list_observations, self.list_observations)\n\n self.cov_matrix += np.diag(np.array([self.noise] * self.n_observation))\n\n return self.cov_matrix", "def covariance(self,pt0,pt1):\n #raise Exception()\n cov = self.nugget\n for vario in self.variograms:\n cov += vario.covariance(pt0,pt1)\n return cov", "def cov(self):\n return self.cond_proba.cov", "def cov(self):\n cov_ = np.dot(self.weights * self.demeaned.T, self.demeaned)\n cov_ /= self.sum_weights - self.ddof\n return cov_", "def train_pca(self, cov):\n d, v = np.linalg.eigh(cov)\n eps = d.max() * 1e-5\n n_0 = (d < eps).sum()\n if n_0 > 0:\n d[d < eps] = eps\n\n # total energy\n totenergy = d.sum()\n\n # sort eigenvectors with eigenvalues order\n idx = np.argsort(d)[::-1][:self.dim]\n d = d[idx]\n v = v[:, idx]\n\n print(\"keeping %.2f %% of the energy\" % (d.sum() / totenergy * 100.0))\n\n # for the whitening\n d = np.diag(1. / d**self.whit)\n\n # principal components\n self.dvt = np.dot(d, v.T)", "def mpi_cov(data):\n m = mpi_mean(data)\n data_centered = data - m\n cov_local = dot(data_centered.T, data_centered)\n covmat = np.empty_like(cov_local)\n mpi.COMM.Allreduce(cov_local, covmat)\n num_data = mpi.COMM.allreduce(data.shape[0])\n covmat /= float(num_data)\n return covmat", "def estimateCovariance(df):\n import numpy as np\n m = df.select(df['scaledFeatures']).map(lambda x: x[0]).mean()\n dfZeroMean = df.select(df['scaledFeatures']).map(lambda x: x[0]).map(lambda x: x-m) # subtract the mean\n\n return dfZeroMean.map(lambda x: np.outer(x,x)).sum()/df.count()", "def _covariance_matrix_prob_v1(self, merged_df, prob_vector):\n total_cov = merged_df.groupby(CYCLE_LABEL, as_index=True).cov()\n cov_matrix = 0\n for i in range(5):\n cov_matrix += total_cov.loc[i, :] * prob_vector[:, i]\n return cov_matrix", "def set_cov(self):\n v_mpart = self.d_vars['MPart']\n n_mpart = len(v_mpart)\n for p in combinations_with_replacement(range(n_mpart), 2):\n self.add_parameter('Cov', p[0], p[1])\n\n m_cov = np.zeros((n_mpart, n_mpart))\n return m_cov", "def get_process_covariance_matrix(dt):\n # a = np.array([\n # [0.25 * dt ** 4, 0.5 * dt ** 3, 0.5 * dt ** 2],\n # [0.5 * dt ** 3, dt ** 2, dt],\n # [0.5 * dt ** 2, dt, 1]\n # ])\n\n a = np.array([\n [dt ** 6 / 36., dt ** 5 / 24., dt ** 4 / 6.],\n [dt ** 5 / 24., 0.25 * dt ** 4, 0.5 * dt ** 3],\n [dt ** 4 / 6., 0.5 * dt ** 3, dt ** 2]\n ])\n return a", "def get_cov_matrix_parameters(self):\n cov = numpy.diag(numpy.zeros(self.get_num_parameters()))\n i = 0\n for p in self.parameters:\n cov[i,i] = p.get_covariance()\n i += 1\n return cov", "def covariance(self, point_one, point_two):\n raise NotImplementedError(\"C++ wrapper currently does not support computing covariance quantities.\")", "def get_covariance(self):\n x = self.particles[:, 0]\n y = self.particles[:, 1]\n X = np.stack((x, y), axis=0)\n return np.cov(X)", "def covariance(G, variables = [], conditionants = []):\n return parameters(G, variables = variables, \n conditionants = conditionants )[\"cov\"]", "def get_covariance(self):\n ...", "def cov_matrix(X, mu):\n m, n = X.shape\n X_minus_mu = X - mu\n sigma = (1 / m) * (X_minus_mu.T).dot(X_minus_mu)\n\n return sigma", "def FormCovarianceMatrix(mat):\n nPts = mat.shape[0]\n sumVect = sum(mat)\n sumVect /= float(nPts)\n for row in mat:\n row -= sumVect\n return numpy.dot(numpy.transpose(mat),mat)/(nPts-1)", "def get_cov(self, npar=None, **args):\n return get_par(self, dummy='cov_mat', npar=npar, **args)", "def compute_covariance_matrix(Xs, sigma_2):\n m, d = Xs.shape\n t1 = np.reshape(np.tile(Xs, m), (m, m, d))\n t2 = np.reshape(np.tile(Xs, (m, 1)), (m, m, d))\n K1 = np.linalg.norm(t1 - t2, axis=2)\n coeff = 0.1\n Sigma = np.ones((m, m)) - coeff*K1\n return Sigma", "def FormCovarianceMatrix(mat):\n nPts = mat.shape[0]\n sumVect = sum(mat)\n sumVect /= float(nPts)\n for row in mat:\n row -= sumVect\n return numpy.dot(numpy.transpose(mat), mat) / (nPts - 1)", "def pca(data):\n mean = data.sum(axis=0) / data.shape[0]\n # show_image(mean)\n cv_matrix = np.cov(data.T)\n e_values, e_vectors = la.eig(cv_matrix)\n return e_values, e_vectors.T, mean", "def covariance(x, y):\n n = len(x)\n return dot(de_mean(x), de_mean(y)) / (n - 1)", "def get_cov_matrix_state_pars(self):\n cov = numpy.diag(numpy.zeros(self.get_num_variables() + self.get_num_parameters()))\n i = 0\n for v in self.variables:\n cov[i,i] = v.get_covariance()\n i += 1\n for p in self.parameters:\n cov[i,i] = p.get_covariance()\n i += 1\n return cov", "def test_cov_q(self, ndlys=13):\n for d in self.d:\n d.flag_array[:] = False #ensure that there are no flags!\n d.select(times=np.unique(d.time_array)[:10], frequencies=d.freq_array[:16])\n for d_std in self.d_std:\n d_std.flag_array[:] = False\n d_std.select(times=np.unique(d_std.time_array)[:10], frequencies=d_std.freq_array[:16])\n self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w, dsets_std=self.d_std)\n self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w, dsets_std=self.d_std)\n Ntime = self.ds.Ntimes\n self.ds.set_Ndlys(ndlys)\n # Here is the analytic covariance matrix...\n chan_x, chan_y = np.meshgrid(range(self.ds.Nfreqs), range(self.ds.Nfreqs))\n cov_analytic = np.zeros((self.ds.spw_Ndlys, self.ds.spw_Ndlys), dtype=np.complex128)\n for alpha in range(self.ds.spw_Ndlys):\n for beta in range(self.ds.spw_Ndlys):\n cov_analytic[alpha, beta] = np.exp(-2j*np.pi*(alpha-beta)*(chan_x-chan_y)/self.ds.spw_Ndlys).sum()\n key1 = (0, 24, 38)\n key2 = (1, 25, 38)\n #print(cov_analytic)\n\n for input_data_weight in ['identity','iC', 'dayenu']:\n self.ds.set_weighting(input_data_weight)\n #check error raised\n if input_data_weight == 'dayenu':\n pytest.raises(ValueError,self.ds.R, key1)\n rpk = {'filter_centers':[0.],'filter_half_widths':[0.],'filter_factors':[0.]}\n self.ds.set_r_param(key1,rpk)\n self.ds.set_r_param(key2,rpk)\n for taper in taper_selection:\n qc = self.ds.cov_q_hat(key1,key2,model='dsets')\n self.assertTrue(np.allclose(np.array(list(qc.shape)),\n np.array([self.ds.Ntimes, self.ds.spw_Ndlys, self.ds.spw_Ndlys]), atol=1e-6))\n qc = self.ds.cov_q_hat(key1,key2,model='empirical')\n self.assertTrue(np.allclose(np.array(list(qc.shape)),\n np.array([self.ds.Ntimes, self.ds.spw_Ndlys, self.ds.spw_Ndlys]), atol=1e-6))\n\n \"\"\"\n Now test that analytic Error calculation gives Nchan^2\n \"\"\"\n self.ds.set_weighting('identity')\n qc = self.ds.cov_q_hat(key1, key2, model='dsets')\n self.assertTrue(np.allclose(qc,\n np.repeat(cov_analytic[np.newaxis, :, :], self.ds.Ntimes, axis=0), atol=1e-6))\n \"\"\"\n Test lists of keys\n \"\"\"\n self.ds.set_weighting('identity')\n qc=self.ds.cov_q_hat([key1], [key2], time_indices=[0], model='dsets')\n self.assertTrue(np.allclose(qc,\n np.repeat(cov_analytic[np.newaxis, :, :], self.ds.Ntimes, axis=0), atol=1e-6))\n self.assertRaises(ValueError, self.ds.cov_q_hat, key1, key2, time_indices=200)\n self.assertRaises(ValueError, self.ds.cov_q_hat, key1, key2, time_indices=\"watch out!\")", "def getPCA(data):\n #covM = np.cov(data.T) #note that np.cov define row as variables, col as observations\n #corM = np.corrcoef(data.T) # we will use correlation matrix instead of cov.\n covM = np.cov(data.T)\n eigvalue,eigvector = np.linalg.eig(covM) # each col of the eigvector matrix corresponds to one eigenvalue. So, each col is the coeff of one component\n pca = np.dot(data,eigvector) # each col is one pca, each row is one obs in that pca. \n return eigvalue,eigvector,pca", "def _compute_total_covariance_matrix(self) -> tf.Tensor:\n total_covariance_matrix = self.total_c_phi\\\n + tf.matmul(self.s_matrix_inv,\n tf.matmul(self.t_matrix, self.s_matrix_inv))\n return total_covariance_matrix", "def covariance (x, y):\n n = len(x)\n return dot(de_mean(x), de_mean(y))/(n-1)", "def postfit_covariance(self) -> NONEARRAY:\n return self._calc_covariance()", "def calculate_covariance(self, x):\n # tx = self.reshape_tensor2d(x)\n # Calcualte the covariance\n # tx_mean = K.mean(tx, axis=0)\n # return tx_mean\n # tx_normal = tx - tx_mean\n # return tx_normal\n # tx_cov = K.dot(tx_normal.T, tx_normal) / (self.cols * self.rows - 1)\n # return tx_cov\n raise DeprecationWarning(\"deprecated, should use calculate_pre_cov to do 4D direct computation\")", "def my_pca(data_matrix, k):\n cov_matrix = np.cov(data_matrix.transpose())\n \n eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)\n eigenvalues.sort()\n # sorts the eigenvalues in ascending order\n decending_eigenvalues = eigenvalues[-k:][::-1]\n # choose the highest k values and change the order to decending\n \n evalues, evectors = np.linalg.eig(cov_matrix)\n \n index_list = []\n for i in decending_eigenvalues:\n indexes = np.where(i == evalues)[0][0]\n index_list.append(indexes)\n \n \n evector_list = []\n for i in index_list:\n evector_list.append(evectors[i])\n \n evector_array = np.array(evector_list)\n \n reduced_matrix = np.dot(data_matrix, evector_array.transpose())\n \n return pd.DataFrame(reduced_matrix)", "def matcov(\n nWfs, nSubaps, nxSubaps, subapDiam, subapPos, gsAlt, gsPos, nLayers,\n layerHeights, cn2, L0, data, covMatPart=0 , pupilOffset=None,\n gsMag=None, wfsRot=None):\n subapLayerPos = subap_position(\n nWfs, nSubaps, nxSubaps, gsAlt, gsPos, subapPos, nLayers,\n layerHeights, pupilOffset, gsMag,\n wfsRot\n )\n\n # Rescale the projected suze of all subapertures at the different altitudes\n subapSizes = numpy.zeros((nWfs, nLayers))\n for n in range(nWfs):\n for l in range(nLayers):\n subapSizes[n, l] = subapDiam[n] * (1. - gsAlt[l]*layerHeights[l])\n\n # Computation of the covariance matrix\n #####################################\n\n # lambda2 = pow(206265.*0.5e-6/2./3.1415926535,2);\n lambda2 = 0.00026942094446267851\n\n # Truth Sensor no\n ts = nWfs - 1\n\n ioff = joff = 0\n units = numpy.zeros(nLayers)\n\n # Find the total number of slopes\n totSlopes = 2 * nSubaps.sum()\n\n # Loop over WFS 1\n for m in range(0, nWfs):\n Ni = nSubaps[m] + ioff\n\n # Loop over WFS 2. Dont loop over all WFSs, use symmetry\n for n in range(0, m+1):\n\n off_XY = nSubaps[n]\n off_YX = nSubaps[m] * totSlopes\n off_YY = off_XY + off_YX\n\n Nj = nSubaps[n] + joff\n\n kk = 1./( subapDiam[m] * subapDiam[n])\n\n\n for l in range(0, nLayers):\n units[l] = kk * lambda2 * cn2[l]\n\n # Loop through subap i on WFS 1\n for i in range(ioff, Ni):\n # Loop through subap j on WFS 2\n for j in range(joff, Nj):\n\n caa_xx = 0\n caa_yy = 0\n caa_xy = 0\n\n # Loop through altitude layers\n for l in range(0, nLayers):\n # Check layer is not above LGS\n if subapSizes[m, l]>0 and subapSizes[n,l]>0:\n # Distances in x and y between the subapertures i and j\n du = (subapLayerPos[0, m, i-ioff, l]\n - subapLayerPos[0, n, j-joff, l])\n dv = (subapLayerPos[1, m, i-ioff, l]\n - subapLayerPos[1,n, j-joff, l])\n\n s1 = subapSizes[m, l] * 0.5\n s2 = subapSizes[n, l] * 0.5\n\n ac = s1 - s2\n ad = s1 + s2\n bc = -ad\n bd = -ac\n\n cov = compute_cov(\n du, dv, ac, ad, bc, bd, s1, s2, L0[l],\n units[l])\n\n caa_xx += cov[0]\n caa_yy += cov[1]\n caa_xy += cov[2]\n\n # print(\"i: {}, j: {}, i0: {}, NL: {}\".format(i, j,i0, NL))\n # print(\"off_XY: {}, off_YX: {}, off_YY:{}\".format(\n # off_XY, off_YX, off_YY))\n # print(\"du: {:.4f}, dv: {:4f}\".format(du,dv))\n # print(\"caa_xx: {:.3f}, caa_yy: {:.3f}, caa_xy: {:.3f}\\n\".format(\n # caa_xx, caa_yy, caa_xy ))\n\n\n data[i, j] = caa_xx\n data[i+nSubaps[n], j] = caa_xy\n data[i, j+nSubaps[m]] = caa_xy\n data[i+nSubaps[n], j+nSubaps[m]] = caa_yy\n\n\n joff = joff + 2*nSubaps[n]\n ioff += 2*nSubaps[m]\n joff = 0\n\n data = mirrorCovMat(data, nSubaps)\n\n return data", "def calc_cov(self, array_x, array_y):\n cov = np.empty([len(array_x),len(array_x)], dtype = float) # initialize an empty 16*16 matrix (16 pressure levels)\n for x in range(len(array_x)):\n for y in range(len(array_y)):\n entry = array_x[x] * array_y[y]\n cov[x,y] = entry\n return cov", "def jackknifed_coh_variance(tx, ty, eigvals, adaptive=True):\r\n\r\n K = tx.shape[0]\r\n\r\n # calculate leave-one-out estimates of MSC (magnitude squared coherence)\r\n jk_coh = []\r\n # coherence is symmetric (right??)\r\n sides = 'onesided'\r\n all_orders = set(range(K))\r\n\r\n import nitime.algorithms as alg\r\n\r\n # get the leave-one-out estimates\r\n for i in range(K):\r\n items = list(all_orders.difference([i]))\r\n tx_i = np.take(tx, items, axis=0)\r\n ty_i = np.take(ty, items, axis=0)\r\n eigs_i = np.take(eigvals, items)\r\n if adaptive:\r\n wx, _ = adaptive_weights(tx_i, eigs_i, sides=sides)\r\n wy, _ = adaptive_weights(ty_i, eigs_i, sides=sides)\r\n else:\r\n wx = wy = eigs_i[:, None]\r\n # The CSD\r\n sxy_i = alg.mtm_cross_spectrum(tx_i, ty_i, (wx, wy), sides=sides)\r\n # The PSDs\r\n sxx_i = alg.mtm_cross_spectrum(tx_i, tx_i, wx, sides=sides)\r\n syy_i = alg.mtm_cross_spectrum(ty_i, ty_i, wy, sides=sides)\r\n # these are the | c_i | samples\r\n msc = np.abs(sxy_i)\r\n msc /= np.sqrt(sxx_i * syy_i)\r\n jk_coh.append(msc)\r\n\r\n jk_coh = np.array(jk_coh)\r\n # now normalize the coherence estimates and take the mean\r\n normalize_coherence(jk_coh, 2 * K - 2, copy=False) # inplace\r\n jk_avg = np.mean(jk_coh, axis=0)\r\n\r\n jk_var = (jk_coh - jk_avg)\r\n np.power(jk_var, 2, jk_var)\r\n jk_var = jk_var.sum(axis=0)\r\n\r\n # Do/Don't use the alternative scaling here??\r\n f = float(K - 1) / K\r\n\r\n jk_var *= f\r\n\r\n return jk_var", "def covariance(x, mu_x, y, mu_y, pdf):\n if pdf.shape[0] != x.shape[0] or pdf.shape[1] != y.shape[0]:\n print(\"Error, mesh size does not match x and y\")\n n_x = x.shape[0]\n n_y = y.shape[0]\n cov_int = 0\n p_of_x = np.zeros(n_x)\n for i in range(0, n_x):\n for j in range(1, n_y):\n delta_y = y[j] - y[j - 1]\n p_of_x[i] += (\n delta_y\n / 2.0\n * ((y[j] - mu_y) * pdf[i, j] + (y[j - 1] - mu_y) * pdf[i, j - 1])\n )\n if i > 0:\n delta_x = x[i] - x[i - 1]\n cov_int += (\n delta_x\n / 2.0\n * ((x[i] - mu_x) * p_of_x[i] + (x[i - 1] - mu_x) * p_of_x[i - 1])\n )\n return cov_int", "def get_cov_matrix_states(self):\n cov = numpy.diag(numpy.zeros(self.get_num_variables()))\n i = 0\n for v in self.variables:\n cov[i,i] = v.get_covariance()\n i += 1\n return cov", "def build_covariance_matrix (numpy_cloud, reduce_by_center_of_mass=True ):\r\n\r\n # build a sum over all points\r\n sum_xyz = np.sum (numpy_cloud, axis=0 )\r\n\r\n # and normalize it to get center of mass\r\n mass_center = sum_xyz / numpy_cloud.shape[0]\r\n\r\n # reduce point cloud by center of mass\r\n if (reduce_by_center_of_mass ):\r\n numpy_cloud_reduced = np.subtract (numpy_cloud[:, 0:3], mass_center )\r\n else:\r\n numpy_cloud_reduced = numpy_cloud.copy ()\r\n\r\n # build ATA matrix\r\n a_transposed_a = np.zeros ((3, 3 ))\r\n\r\n for point in numpy_cloud_reduced:\r\n a_transposed_a[0, 0] = a_transposed_a[0, 0] + np.float_power(point[0], 2 )\r\n a_transposed_a[0, 1] = a_transposed_a[0, 1] + point[0] * point[1]\r\n a_transposed_a[0, 2] = a_transposed_a[0, 2] + point[0] * point[2]\r\n\r\n a_transposed_a[1, 0] = a_transposed_a[1, 0] + point[0] * point[1]\r\n a_transposed_a[1, 1] = a_transposed_a[1, 1] + np.float_power(point[1], 2 )\r\n a_transposed_a[1, 2] = a_transposed_a[1, 2] + point[1] * point[2]\r\n\r\n a_transposed_a[2, 0] = a_transposed_a[2, 0] + point[0] * point[2]\r\n a_transposed_a[2, 1] = a_transposed_a[2, 1] + point[2] * point[1]\r\n a_transposed_a[2, 2] = a_transposed_a[2, 2] + np.float_power(point[2], 2 )\r\n\r\n return a_transposed_a, mass_center", "def fit_evd(self):\n\n # EVD only work on square matrices as we need to compute the eigenvalues and eigenvectors\n # For this we compute the covariance matrix K\n # K should be n x n matrix (pixels x pixels)\n\n # The covariance matrix is nxn\n self.cov_matrix = np.zeros(shape=[self.n_features, self.n_features], dtype='uint8')\n\n self.cov_matrix = np.cov(self.norm_matrix, rowvar=False)\n # C is a symmetric matrix and so it can be diagonalized:\n eig_val, eig_vec = linalg.eig(self.cov_matrix)\n\n # Sorting the eigenvectors by decreasing eigenvalues\n # [Start : stop : stepcount] stepcount is reversed\n idx = eig_val.argsort()[::-1]\n eig_val, eig_vec = eig_val[idx], eig_vec[:, idx]\n\n # Explained_variance tell us how much of the variance in the data each eigen value explains\n explained_variance = eig_val / (self.n_samples - 1)\n # total_var is the total variance in the data\n total_var = explained_variance.sum()\n explained_variance_ratio = explained_variance / total_var\n # The cumulative sum of all ratios\n ratio_cumsum = np.cumsum(explained_variance_ratio)\n\n # We search in the cumsum for the index of the value which, when added, corresponds to the quality_percent\n # The index of the cumsum gives us the components we need to add to explain X quality percent of our data\n n_components = np.searchsorted(ratio_cumsum, self.quality_percent, side='right') + 1\n\n self.components = eig_vec[:n_components]\n print(\"The principal components have been calculated using eigendecomposition\", self.components.shape)\n\n return self.components", "def covariance2d(self, P_x, P_y, P_z, P_x_dot, P_y_dot, P_z_dot, P_x_ddot, P_y_ddot, P_z_ddot):\n cov_matrix = numpy.array([[P_x, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, P_y, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, P_z, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, P_x_dot, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, P_y_dot, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, P_z_dot, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, P_x_ddot, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, P_y_ddot, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, P_z_ddot]])\n return cov_matrix", "def P(self):\n self.eigenmatrix()", "def make_covariance_matrix(points, kernel):\n\n dim = len(points)\n p1 = np.reshape(points, (dim, 1, -1))\n p2 = np.reshape(points, (dim, -1, 1))\n\n return kernel(p1, p2)", "def postfit_covariance(self) -> NONEARRAY:\n pass", "def build_covariance(self):\n raise RuntimeError(\"Your Gaussian covariance code needs to \"\n \"over-ride the build_covariance method so it knows how to \"\n \"load the data covariance (or set constant_covariance=False and \"\n \"over-ride the extract_covariance method)\")\n\n #using info in self.options,\n #like filenames etc,\n #build covariance", "def _calc_covariance(self):\n\n # if the fit was unsuccessful return None\n if not self.successful:\n return None\n\n # if the covariance is cached return it\n if self._postfit_covariance is not None:\n return self._postfit_covariance\n\n # otherwise compute it\n weight_matrix = self._compute_weight_matrix(len(self.model.state_vector), self.measurements.size)\n\n if not np.isscalar(weight_matrix):\n orthogonal_project_mat = np.linalg.inv(self._jacobian.T @ self._jacobian) @ self._jacobian.T\n self._postfit_covariance = np.linalg.inv(orthogonal_project_mat @\n weight_matrix @\n orthogonal_project_mat.T)\n else:\n self._postfit_covariance = np.linalg.inv(self._jacobian.T @\n self._jacobian * weight_matrix)\n\n return self._postfit_covariance", "def test_covar_on_vectors(self):\n input_file = self.get_file(\"vector.csv\")\n vect_schema = [(\"items\", sparktk.dtypes.vector(400))]\n\n # create a frame and covariance matrix\n cov_frame = self.context.frame.import_csv(input_file,\n schema=vect_schema)\n cov_matrix = cov_frame.covariance_matrix(['items'])\n\n # call numpy to get numpy result\n numpy_result = list(numpy.cov(list(cov_frame.take(cov_frame.count()).data),\n rowvar=False))\n\n # convert the frame rows into lists for ease of comparison\n sparktk_flat = list(numpy.array(cov_matrix.take(cov_matrix.count())).flat)\n numpy_flat = list(numpy.array(numpy_result).flat)\n\n # finally compare the expected results with those resturned by sparktk\n numpy.testing.assert_almost_equal(sparktk_flat, numpy_flat)", "def _posterior_covariance(self, X):\n if isinstance(X, np.ndarray):\n X = pd.DataFrame(X, index=[str(i) for i in range(len(X))])\n cov = self.kernel.make_K(X, hypers=self.hypers)\n k_off = np.matrix(self._k_star(X))\n v = np.linalg.lstsq(self._L, k_off.T)[0]\n return cov - v.T * v", "def compute_covariance_matrix1d(Xs):\n m, d = Xs.shape\n t1 = np.reshape(np.tile(Xs, m), (m, m, d))\n t2 = np.reshape(np.tile(Xs, (m, 1)), (m, m, d))\n K1 = np.abs(t1 - t2)\n K1 = np.reshape(K1, (m, m))\n coeff = 1.0\n Sigma = np.ones((m, m)) - coeff*K1\n return Sigma", "def build_covariance(self):\n raise RuntimeError(\"Internal cosmosis error in SingleValueGaussianLikelihood\")", "def test__get_covariance(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n\n expected_covariance = np.array([\n [1., -0.01261819, -0.19821644],\n [-0.01261819, 1., -0.16896087],\n [-0.19821644, -0.16896087, 1.]\n ])\n\n # Run\n covariance = copula._get_covariance(self.data)\n\n # Check\n assert np.isclose(covariance, expected_covariance).all().all()", "def pca(X, k):\n n, dim = X.shape\n\n # Center the data\n X_mean = np.mean(X, axis = 0)\n X = X - X_mean\n # Get the covariance matrix\n covariance_matrix = np.dot(X.T, X) / (n - 1)\n eigval, eigvec = eigs(covariance_matrix, k)\n return np.array(eigvec), np.array(eigval)", "def _get_covar(minuit):\n n = len(minuit.parameters)\n m = np.zeros((n, n))\n for i1, k1 in enumerate(minuit.parameters):\n for i2, k2 in enumerate(minuit.parameters):\n if set([k1, k2]).issubset(minuit.list_of_vary_param()):\n m[i1, i2] = minuit.covariance[(k1, k2)]\n return m", "def _get_variance_covariance_table(self):\n\n # variance-covariance matrix\n res = self._model.fit()\n X = self._model.exog\n x_prime_x_inverse = np.linalg.inv(np.matmul(X.transpose(), X))\n var_cov_matrix = res.mse_resid * x_prime_x_inverse\n var_cov_table = SimpleTable(data=var_cov_matrix,\n headers=self._model.exog_names,\n stubs=self._model.exog_names,\n title='Variance-covariance matrix')\n\n return var_cov_table", "def _gp_cov_matrix(Nt, snr2, clen2):\n f = lambda x: np.exp(-(x**2)/clen2)\n C = snr2 * f(np.arange(Nt))\n C[0] += 1 # noise\n return scipy.linalg.toeplitz(C)", "def _ntk_cov(op, ntk_td, nngp_dd, nngp_td, nngp_tt):\n # op(vec) here should compute \\Theta^{-1} @ (I - e^{-\\Theta dt}) @ vec\n # for the time dependent case and\n # op(vec) = \\Theta^{-1} @ vec for the infinite time case.\n # below implements Equation 15 from https://arxiv.org/abs/1902.06720\n term_1 = op(np.transpose(ntk_td))\n cov = np.dot(nngp_dd, term_1)\n cov = np.dot(np.transpose(term_1), cov)\n term_2 = np.dot(ntk_td, op(np.transpose(nngp_td)))\n term_2 += np.transpose(term_2)\n cov += (-term_2 + nngp_tt)\n return cov", "def adaptCovarianceMatrix(self, evalcount):\n\n cc, cs, c_1, c_mu, n = self.c_c, self.c_sigma, self.c_1, self.c_mu, self.n\n wcm, wcm_old, mueff, invsqrt_C = self.wcm, self.wcm_old, self.mu_eff, self.sqrt_C\n lambda_ = self.lambda_\n\n self.p_sigma = (1-cs) * self.p_sigma + \\\n sqrt(cs*(2-cs)*mueff) * dot(invsqrt_C, (wcm - wcm_old) / self.sigma)\n power = (2*evalcount/lambda_)\n if power < 1000: #TODO: Solve more neatly\n hsig = sum(self.p_sigma**2)/(1-(1-cs)**power)/n < 2 + 4/(n+1)\n else:\n #Prevent underflow error,\n hsig = sum(self.p_sigma**2)/n < 2 + 4/(n+1)\n self.p_c = (1-cc) * self.p_c + hsig * sqrt(cc*(2-cc)*mueff) * (wcm - wcm_old) / self.sigma\n offset = self.offset[:, :self.mu_int]\n\n # Regular update of C\n self.C = (1 - c_1 - c_mu) * self.C \\\n + c_1 * (outer(self.p_c, self.p_c) + (1-hsig) * cc * (2-cc) * self.C) \\\n + c_mu * dot(offset, self.weights*offset.T)\n if self.active and len(self.all_offspring) >= 2*self.mu_int: # Active update of C\n offset_bad = self.offset[:, -self.mu_int:]\n self.C -= c_mu * dot(offset_bad, self.weights*offset_bad.T)\n\n # Adapt step size sigma\n if self.tpa:\n alpha_act = self.tpa_result * self.alpha\n alpha_act += self.beta_tpa if self.tpa_result > 1 else 0\n self.alpha_s += self.c_alpha * (alpha_act - self.alpha_s)\n self.sigma *= exp(self.alpha_s)\n else:\n exponent = (norm(self.p_sigma) / self.chiN - 1) * self.c_sigma / self.damps\n if exponent < 1000: #TODO: Solve more neatly\n self.sigma = self.sigma * exp(exponent)\n else:\n self.sigma = self.sigma_mean\n self.sigma_mean = self.sigma\n\n ### Update BD ###\n C = self.C # lastest setting for\n C = triu(C) + triu(C, 1).T # eigen decomposition\n\n degenerated = False\n if any(isinf(C)) > 1: # interval\n degenerated = True\n # raise Exception(\"Values in C are infinite\")\n elif not 1e-16 < self.sigma_mean < 1e6:\n degenerated = True\n else:\n try:\n w, e_vector = eigh(C)\n e_value = sqrt(list(map(complex, w))).reshape(-1, 1)\n if any(~isreal(e_value)):\n degenerated = True\n # raise Exception(\"Eigenvalues of C are not real\")\n elif any(isinf(e_value)):\n degenerated = True\n # raise Exception(\"Eigenvalues of C are infinite\")\n else:\n self.D = real(e_value)\n self.B = e_vector\n self.sqrt_C = dot(e_vector, e_value**-1 * e_vector.T)\n except LinAlgError as e:\n # raise Exception(e)\n print(\"Restarting, degeneration detected: {}\".format(e))\n degenerated = True\n\n if degenerated:\n self.restart()", "def information_matrix(self):\n return self._cov.inv()", "def conditional_component_covs(self):\n return np.array([d.conditional_cov() for d in self.conditionalMVNs])", "def get_traj_cov(self):\n return np.dot(self._Phi.T, np.dot(self._sigma_W, self._Phi))", "def getCovMatrix(self, caliStep, weights):\n\n Sigma = np.zeros([self.numObs, self.numObs])\n # scale observation data with normalized variance parameter to get covariance matrix\n for i in range(self.numObs):\n # use smaller weights for higher precision\n if self.scaleCovWithMax:\n Sigma[i, i] = self.sigma * weights[i] * max(self.obsData[:, i]) ** 2\n else:\n Sigma[i, i] = self.sigma * weights[i] * self.obsData[caliStep, i] ** 2\n return Sigma", "def cov(self):\n E_x = Sample.mean(self)\n Std_x = Sample.std(self)\n cov = Std_x/E_x\n return(cov)", "def princomp(A):\n # computing eigenvalues and eigenvectors of covariance matrix\n M = (A-np.mean(A.T,axis=1)).T # subtract the mean (along columns)\n [latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted\n score = np.dot(coeff.T,M) # projection of the data in the new space\n return coeff,score,latent", "def precompute(self):\n self.cov_inv = np.linalg.inv(self.cov)\n self.root_2pi_d_det = math.sqrt((2.0*math.pi)**self.raw_data.shape[1] *\n np.linalg.det(self.cov))", "def cov(self, decomposed=False):\n if decomposed:\n return self._R, self._S\n else:\n return np.copy(self._C)", "def calculate_covariance_matrix(X, Y=None):\n\tif Y is None:\n\t\tY = X\n\tn_samples = np.shape(X)[0]\n\tcovariance_matrix = (1 / (n_samples-1)) * (X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))\n\treturn np.array(covariance_matrix, dtype=float)", "def _mn_cor_ ( self , size = -1 , root = False ) :\n #\n cov = self.cov ( size , root )\n #\n from math import sqrt\n #\n if isinstance ( cov , ROOT.TMatrix ) :\n\n size = cov.GetNrows()\n root = True\n \n else : size = cov.kRows\n\n ## use ROOT matrices \n if root : cor = ROOT.TMatrix ( size , size )\n else : cor = cov.__class__ () \n\n for i in range(0, size ) :\n \n d_i = cov ( i , i )\n cor [ i , i ] = 1 if 0 < d_i else 0\n \n for j in range ( i + 1 , size ) :\n \n d_j = cov ( j , j )\n \n if 0 != cov ( i , j ) and 0 < d_i and 0 < d_j :\n \n if root and _rv < 6 : cor [ i ] [ j ] = cov ( i , j ) / sqrt ( d_i * d_j )\n else : cor [ i , j ] = cov ( i , j ) / sqrt ( d_i * d_j )\n \n else :\n \n if root and _rv < 6 : cor [ i ] [ j ] = 0 \n else : cor [ i , j ] = 0\n\n return cor", "def test_calculate_variance_covariance(self):\n\n _var_covar = calculate_variance_covariance(22, 620.0, 0.4239, 0.6142)\n self.assertAlmostEqual(_var_covar[0][0], 0.1351777)\n self.assertAlmostEqual(_var_covar[0][1], -0.04660735)\n self.assertAlmostEqual(_var_covar[1][0], -0.04660735)\n self.assertAlmostEqual(_var_covar[1][1], 0.01710296)\n self.assertEqual(_var_covar[0][1], _var_covar[1][0])", "def cov(m, y=None, rowvar=1, bias=0):\n\n X = array(m, ndmin=2, dtype=float)\n if X.shape[0] == 1:\n rowvar = 1\n if rowvar:\n axis = 0\n tup = (slice(None),newaxis)\n else:\n axis = 1\n tup = (newaxis, slice(None))\n\n\n if y is not None:\n y = array(y, copy=False, ndmin=2, dtype=float)\n X = concatenate((X,y),axis)\n\n X -= X.mean(axis=1-axis)[tup]\n if rowvar:\n N = X.shape[1]\n else:\n N = X.shape[0]\n\n if bias:\n fact = N*1.0\n else:\n fact = N-1.0\n\n if not rowvar:\n return (dot(X.T, X.conj()) / fact).squeeze()\n else:\n return (dot(X, X.T.conj()) / fact).squeeze()", "def get_cov(self):\n\n if self._cov is not None:\n return self._cov\n\n names = ['ra', 'dec', 'parallax', 'pmra', 'pmdec']\n\n C = np.zeros((6,6))\n\n # pre-load the diagonal\n for i,name in enumerate(names):\n full_name = \"{}_error\".format(name)\n C[i,i] = self._data[full_name]**2\n\n for i,name1 in enumerate(names):\n for j,name2 in enumerate(names):\n if j <= i:\n continue\n full_name = \"{}_{}_corr\".format(name1, name2)\n C[i,j] = self._data[full_name] * np.sqrt(C[i,i]*C[j,j])\n C[j,i] = self._data[full_name] * np.sqrt(C[i,i]*C[j,j])\n\n if self._rv_err is not None:\n C[5,5] = self._rv_err**2\n\n self._cov = C\n return self._cov", "def part2(x_train, x_test, lmbd, sigma):\n _, x = x_train.shape\n\n x_test_index = list(range(x_test.shape[0]))\n\n indexes = []\n\n # Calculate covariance\n covariance = np.linalg.inv(lmbd * np.eye(x) + 1 / sigma * (x_train.T.dot(x_train)))\n print(\"Cov matrix\", covariance.shape)\n\n for _ in range(10):\n temp_cov = [sigma + x_test[i].dot(covariance).dot(x_test[i].T) for i in x_test_index]\n print(\"preds\", temp_cov[:5])\n print(\"argmax\", np.argmax(temp_cov[:5]))\n\n pos_max_variance = np.argmax(temp_cov)\n el = x_test_index.pop(pos_max_variance)\n indexes.append(el)\n\n # update covariance with test values\n covariance = np.linalg.inv(covariance +\n sigma * x_test[pos_max_variance].T.dot(x_test[pos_max_variance]))\n\n last_index = indexes[len(indexes) - 1]\n\n print(\"x_test shape:\", x_test[last_index].shape)\n print(\"x_train shape:\", x_train.shape)\n x_train = np.concatenate((x_train, x_test[last_index].reshape(1, x)), axis=0)\n\n return (np.array(indexes) + 1).reshape(1, len(indexes))", "def compute_measurement_covariance(jacobian, oldCovariance, sigmaObservation): \n\n return None", "def covariance(self):\n return self._covariance", "def epidote():\n\n rho = 3465.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 211.5; C[0,1] = 65.6; C[0,2] = 43.2; C[0,3] = 0.; C[0,4] = -6.5; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 239.; C[1,2] = 43.6; C[1,3] = 0.; C[1,4] = -10.4; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 202.1; C[2,3] = 0.; C[2,4] = -20.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 39.1; C[3,4] = 0.; C[3,5] = -2.3\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 43.4; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 79.5\n\n return C, rho", "def covariance(x, mean_x, y, mean_y):\r\n \r\n covar = 0.0\r\n for i in range(len(x)):\r\n covar += (x[i] - mean_x) * (y[i] - mean_y)\r\n return covar", "def calculate_covariance_matrix(X, Y=None):\n if Y is None:\n Y = X\n n_samples = np.shape(X)[0]\n covariance_matrix = (1 / (n_samples - 1)) * (\n X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))\n\n return np.array(covariance_matrix, dtype=float)", "def pcov(xdata, ydata, mx=None, my=None):\n n, s = _SP(xdata, mx, ydata, my)\n if n > 0:\n return s/n\n else:\n raise ValueError('population covariance requires at least one point')", "def compute_initial_covariance(jacobian, sigmaObservation):\n\n return None", "def empirical_covariance(system, excitation, m):\n observations = [system() @ excitation() for _ in range(m)]\n return np.cov(np.array(observations).T)", "def calc_covariance(xy_sum,x_sum,y_sum,n):\n\t\treturn ( xy_sum - np.matmul(x_sum,y_sum.T)/n )/(n-1)", "def ec_matrix_vector(p0, T, n): \n if(n<=0):\n EC=np.zeros(T.shape)\n return EC\n else:\n \"\"\"Probability vector after (k=0) propagations\"\"\" \n p_k=1.0*p0\n \"\"\"Sum of vectors after (k=0) propagations\"\"\"\n p_sum=1.0*p_k \n for k in xrange(n-1):\n \"\"\"Propagate one step p_{k} -> p_{k+1}\"\"\"\n p_k=np.dot(p_k,T) \n \"\"\"Update sum\"\"\"\n p_sum+=p_k \n \"\"\"Expected counts\"\"\"\n EC=p_sum[:,np.newaxis]*T \n return EC", "def __init__(self, N0, N1):\n #self.w = np.zeros(N);\n self.p0 = N0/(N0+N1) \n self.p1 = N1/(N0+N1)\n self.mu0 = np.zeros(N0+N1)\n self.mu1 = np.zeros(N0+N1)\n self.covariance = 0", "def covariance(x, y):\n n = len(x)\n return dot(deviations_from_mean(x), deviations_from_mean(y))/ (n - 1)", "def cov(self):\n return self._cov", "def pca_detector(data):\n #- 'vol_shape' is the shape of volumes\n vol_shape = data.shape[:-1]\n #- 'n_vols' is the number of volumes\n n_vols = data.shape[-1]\n #- N is the number of voxels in a volume\n N = np.prod(vol_shape)\n\n #- Reshape to 2D array that is voxels by volumes (N x n_vols)\n # transpose to n_vols x N\n X = data.reshape((N, n_vols)).T\n\n \"\"\"\n The first part of the code will use PCA to get component matrix U\n and scalar projections matrix C\n \"\"\"\n\n #- Calculate unscaled covariance matrix for X\n unscaled_cov = X.dot(X.T)\n\n #- Use SVD to return U, S, VT matrices from unscaled covariance\n U, S, VT = npl.svd(unscaled_cov)\n\n #- Calculate the scalar projections for projecting X onto the vectors in U.\n #- Put the result into a new array C.\n C = U.T.dot(X)\n # set nans to 0\n C[np.isnan(C)] = 0\n #- Transpose C\n #- Reshape C to have the 4D shape of the original data volumes.\n C_vols = C.T.reshape((vol_shape + (n_vols,)))\n\n \"\"\"\n The second part of the code determines which voxels are inside the brain\n and which are outside the brain and creates a mask (boolean matrix)\n \"\"\"\n\n #get the mean voxel intensity of entire 4D object\n mean_voxel = np.mean(data)\n #get the mean volume (3D) across time series (axis 3)\n mean_volume = np.mean(data, axis=3)\n #boolean mask set to all voxels above .5 in the first volume\n #(.125 is the SPM criterion but .5 seems like a better threshold)\n mask = mean_volume > (.5 * mean_voxel) #threshold can be adjusted!\n out_mask = ~mask\n\n \"\"\"\n The third part of code finds the root mean square of U from step 1, then uses the\n mask from step 2 to determine which components explain data outside the brain\n Selects these \"bad components\" with high \"outsideness\"\n \"\"\"\n\n #Apply mask to C matrix to get all voxels outside of brain\n outside = C_vols[out_mask]\n #Get RMS of the voxels outside, reflecting \"outsideness\" of this scan\n RMS_out = np.sqrt(np.mean((outside ** 2), axis=0))\n\n #Apply mask to C matrix to get all voxels inside brain\n inside = C_vols[mask]\n #Get RMS of the voxels inside, reflecting \"insideness\" of this scan\n RMS_in = np.sqrt(np.mean((inside ** 2), axis=0))\n\n #The closer this ratio is to 1, the worse the volume\n RMS_ratio = RMS_out / RMS_in\n\n \"\"\"\n The fourth part of the code uses the \"bad components\" to generate a new\n \"bad data set\" and then puts this dataset through the outlier detector\n \"\"\"\n\n #Create a boolean mask for the 10% worst PCs (meaning highest RMS ratio)\n PC_bad = np.percentile(RMS_ratio, 90)\n PC_bad_mask = RMS_ratio > PC_bad\n\n U_bad = U[:, PC_bad_mask]\n C_bad = C[PC_bad_mask]\n\n #generates data set based on the bad PCs and (U and C matrices)\n X_bad = U_bad.dot(C_bad).T.reshape((vol_shape + (n_vols,)))\n\n # calculate outliers using iqr_detector\n _, outliers = mah_detector(X_bad)\n\n return X_bad, outliers", "def get_cov_matrix_outputs(self):\n cov = numpy.diag(numpy.zeros(self.get_num_measured_outputs()))\n i = 0\n for o in self.outputs:\n if o.is_measured_output():\n cov[i,i] = o.get_covariance()\n i += 1\n return cov", "def test_calculate_variance_covariance_zero_division_shape(self):\n\n _var_covar = calculate_variance_covariance(22, 620.0, 0.4239, 0.0)\n self.assertAlmostEqual(_var_covar[0][0], 0.006105992)\n self.assertAlmostEqual(_var_covar[0][1], 0.03925982)\n self.assertAlmostEqual(_var_covar[1][0], 0.03925982)\n self.assertAlmostEqual(_var_covar[1][1], -0.7475704)", "def covarmat_s(Xint, Xmeas, gsm):\n\n m, dim = np.shape(Xmeas)\n n, _ = np.shape(Xint)\n\n # scaled distance between all points\n deltaXnorm = (addem(Xint[:, 0]) * np.ones((1, m)) - np.ones((n, 1)) * addem(Xmeas[:, 0]).T) / gsm.lx\n\n if dim > 1:\n deltaYnorm = (addem(Xint[:, 1]) * np.ones((1, m)) - np.ones((n, 1)) * addem(Xmeas[:, 1]).T) / gsm.ly\n if dim == 3:\n deltaZnorm = (addem(Xint[:, 2]) * np.ones((1, m)) - np.ones((n, 1)) * addem(Xmeas[:, 2]).T) / gsm.lz\n H = np.sqrt(deltaXnorm ** 2 + deltaYnorm **2 + deltaZnorm ** 2)\n else:\n H = np.sqrt(deltaXnorm ** 2 + deltaYnorm ** 2)\n else:\n H = abs(deltaXnorm)\n\n if gsm.cmodel is 'Exp':\n Q_ssm = gsm.sig2 * np.exp(-H)\n elif gsm.cmodel is 'Gau':\n Q_ssm = gsm.sig2 * np.exp(-H ** 2)\n elif gsm.cmodel is 'Sph':\n Q_ssm = gsm.sig2 * (1 - 1.5 * H + 0.5 * H ** 3)\n Q_ssm[H > 1] = 0\n\n return Q_ssm", "def test_box_cos():\n b_time = datetime.datetime.now()\n print('Begining reading data')\n DATA_TRAIN_PATH = get_filepath('train')\n y, tX, ids = load_csv_data(DATA_TRAIN_PATH)\n print(\"Finish loading in {s} seconds\".\n format(s=(datetime.datetime.now() - b_time).total_seconds()))\n tX = fill_missing(tX)\n tX = standardize(tX, intercept=False)\n header = get_csv_header(DATA_TRAIN_PATH)\n data = tX[0]\n (N, degree) = data.shape\n result = []\n best = []\n func_space = [lambda x: x, lambda x: x ** 2, lambda x: 1 / x,\n lambda x: np.exp(x), lambda x: np.sqrt(x), lambda x: np.log(x)]\n for index in range(degree):\n # For each variable, test the\n group = []\n print(\"Box-cox analysis for variable {} \".format(header[index + 2]))\n _data = np.reshape(data[:, index], (N,))\n for i, func in enumerate(func_space):\n _data_trans = func(_data)\n # _result =\n # group.append(normaltest(_data_trans).__getattribute__('pvalue'))\n print(\"p-values {}\".format(group))\n print(\"Best transform {} with p-value {}\".format(np.argmax(group), np.max(group)))\n result.append(group)\n _best = np.argmax(group)\n best.append(func_space[_best](_data))\n\n best = np.array(best)\n best_data = np.reshape(best, (N, degree))\n save_data_as_original_format(y, ids, best_data, header,\n os.path.join(get_dataset_dir(), 'trans_' + train_filename))\n print(result)", "def portfolio_vol(weights, covmat):\n return (weights.T @ covmat @ weights)**0.5", "def Nmatrix(init_par, alpha, delta, obs, sigma_obs, ccoef, N):\n\tparallax, v, sigma_v = init_par[:-4], init_par[-4:-1], init_par[-1] \n\tplx_obs, mualpha_obs, mudelta_obs = obs[:, 0], obs[:, 1], obs[:, 2]\n\n\tp, q, r = normalTriad(alpha, delta)\n\tmualpha_mod = np.dot(np.transpose(p),v)*parallax/_A\n\tmudelta_mod = np.dot(np.transpose(q),v)*parallax/_A\n\t\n\tplx_mod, mualpha_mod, mudelta_mod = parallax, mualpha_mod, mudelta_mod\n\tsigma_plx, sigma_mualpha, sigma_mudelta = np.transpose(sigma_obs)\n\ta,like, expo, detD = np.ones(N),np.ones(N),np.ones(N), np.ones(N) \n\tC = np.zeros((3,3,N),dtype=np.float64)\n\tC[0,0,:],C[1,1,:],C[2,2,:] = sigma_plx**2.,sigma_mualpha**2., sigma_mudelta**2.\n\tcorr_coefficient_plx_mualpha, corr_coefficient_plx_mudelta, corr_coefficient_mualpha_mudelta = np.zeros(N), np.zeros(N), np.zeros(N)\n\tcorr_coefficient_plx_mualpha[:], corr_coefficient_plx_mudelta[:], corr_coefficient_mualpha_mudelta[:] = ccoef[:, 0], ccoef[:, 1], ccoef[:, 2] \n\t\n\tC[0,1,:], C[0,2,:] = corr_coefficient_plx_mualpha*sigma_plx*sigma_mualpha, corr_coefficient_plx_mudelta*sigma_plx*sigma_mudelta\n\tC[1,0,:], C[1,2,:] = corr_coefficient_plx_mualpha*sigma_plx*sigma_mualpha, corr_coefficient_mualpha_mudelta*sigma_mualpha*sigma_mudelta\n\tC[2,0,:], C[2,1,:] = corr_coefficient_plx_mudelta*sigma_plx*sigma_mudelta, corr_coefficient_mualpha_mudelta*sigma_mualpha*sigma_mudelta\n\tE = np.zeros((3,3,N),dtype=np.float64)\n\tE[1,1,:],E[2,2,:] = (sigma_v**2.)*(parallax/_A)**2., (sigma_v**2.)*(parallax/_A)**2.\n\tD,invD = np.zeros((3,3,N),dtype=np.float64),np.zeros((3,3,N),dtype=np.float64)\n\tD = np.add(E,C)\n\tfor i in range(N):\n\t\tdetD[i] = matrix_det(D[:,:,i]) \n\t\tinvD[:,:,i] = matrix_inv(D[:,:,i])\n\t\t\n\ta_c = np.ones((3,N))\n\ta_c = [plx_obs - plx_mod, mualpha_obs - mualpha_mod, mudelta_obs-mudelta_mod]\n\t\n\t\n\n\t\n\tcprime_pi, cprime_vx, cprime_vy, cprime_vz, = np.ones((3,N)), np.ones((3,N)), \\\n\t\t\t\t\t\t\tnp.ones((3,N)), np.ones((3,N)), \n\tcprime_pi[0,:] = 1.\n\tcprime_pi[1,:] = np.dot(np.transpose(p),v)/_A\n\tcprime_pi[2,:] = np.dot(np.transpose(q),v)/_A\n\t\n\tcprime_vx[0,:] = 0.\n\tcprime_vx[1,:] = -np.sin(alpha)*plx_mod/_A \n\tcprime_vx[2,:] = -np.sin(delta)*np.cos(alpha)*plx_mod/_A\n\n\t\n\tcprime_vy[0,:] = 0.\n\tcprime_vy[1,:] = np.cos(alpha)*plx_mod/_A \n\tcprime_vy[2,:] = -np.sin(delta)*np.sin(alpha)*plx_mod/_A\n\n\tcprime_vz[0,:] = 0.\n\tcprime_vz[1,:] = 0. \n\tcprime_vz[2,:] = np.cos(delta)*plx_mod/_A\n\n\tdlnd_dpi, dlnd_dsigmav = np.zeros(N), np.zeros(N)\n\tde_dpi, de_dsigmav = np.zeros(N), np.zeros(N)\n\t\n\n\t### See formula A.5 \n\tde_dpi[:] = ((sigma_v/_A)**2.)*2.*plx_mod[:]\n\tde_dsigmav[:] = ((plx_mod[:]/_A)**2.)*2.*sigma_v\n\t\n\tdlnd_dpi[:] = (invD[1,1,:] + invD[2,2,:])*de_dpi[:] \n\tdlnd_dsigmav[:] = (invD[1,1,:] + invD[2,2,:])*de_dsigmav[:]\n\t\n\t\n\t\n\t### See formula A.7\n\thess = np.zeros((N+4, N+4))\n\n\thess_diag_pi, hess_diag_pi_1, hess_diag_pi_2 = np.zeros(N), np.zeros(N), np.zeros(N)\n\thess_diag_pi_1[:] = invD[0, 0, :]*cprime_pi[0, :]*cprime_pi[0, :] + invD[0, 1, :]*cprime_pi[0, :]*cprime_pi[1, :] + invD[0, 2, :]*cprime_pi[0, :]*cprime_pi[2, :] + \\\n\t\t\t invD[1, 0, :]*cprime_pi[1, :]*cprime_pi[0, :] + invD[1, 1, :]*cprime_pi[1, :]*cprime_pi[1, :] + invD[1, 2, :]*cprime_pi[1, :]*cprime_pi[2, :] + \\\n\t\t \t invD[2, 0, :]*cprime_pi[2, :]*cprime_pi[0, :] + invD[2, 1, :]*cprime_pi[2, :]*cprime_pi[1, :] + invD[2, 2, :]*cprime_pi[2, :]*cprime_pi[2, :]\t\n\t\n\t\n\t#hess_diag_pi_2[:] = np.sum(0.5*(invD[1, 1, :]**2. + 2.*invD[1, 2, :]**2. + invD[2, 2, :]**2.)*de_dpi[:]*de_dpi[:]) ### Check if it's with or without sum: without!\n\t# So correct formula is below.\n\thess_diag_pi_2[:] = (0.5*(invD[1, 1, :]**2. + 2.*invD[1, 2, :]**2. + invD[2, 2, :]**2.)*de_dpi[:]*de_dpi[:])\n\thess_diag_pi[:] = hess_diag_pi_1[:] + hess_diag_pi_2[:]\t\n\n\t\n\thess_diag_vx, hess_diag_vy, hess_diag_vz, hess_diag_sigmav = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)\n\thess_pi_vx, hess_pi_vy, hess_pi_vz, hess_pi_sigmav = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)\n\thess_diag_vxi, hess_diag_vyi, hess_diag_vzi = np.zeros(N), np.zeros(N), np.zeros(N)\n\t\n\thess_diag_vxi[:] = invD[0, 0, :]*cprime_vx[0, :]*cprime_vx[0, :] + invD[0, 1, :]*cprime_vx[0, :]*cprime_vx[1, :] + invD[0, 2, :]*cprime_vx[0, :]*cprime_vx[2, :] + \\\n\t\t\t invD[1, 0, :]*cprime_vx[1, :]*cprime_vx[0, :] + invD[1, 1, :]*cprime_vx[1, :]*cprime_vx[1, :] + invD[1, 2, :]*cprime_vx[1, :]*cprime_vx[2, :] + \\\n\t\t\t invD[2, 0, :]*cprime_vx[2, :]*cprime_vx[0, :] + invD[2, 1, :]*cprime_vx[2, :]*cprime_vx[1, :] + invD[2, 2, :]*cprime_vx[2, :]*cprime_vx[2, :] \t\t\n\t\n\thess_diag_vyi[:] = invD[0, 0, :]*cprime_vy[0, :]*cprime_vy[0, :] + invD[0, 1, :]*cprime_vy[0, :]*cprime_vy[1, :] + invD[0, 2, :]*cprime_vy[0, :]*cprime_vy[2, :] +\\\n\t\t\t invD[1, 0, :]*cprime_vy[1, :]*cprime_vy[0, :] + invD[1, 1, :]*cprime_vy[1, :]*cprime_vy[1, :] + invD[1, 2, :]*cprime_vy[1, :]*cprime_vy[2, :] +\\\n\t\t\t invD[2, 0, :]*cprime_vy[2, :]*cprime_vy[0, :] + invD[2, 1, :]*cprime_vy[2, :]*cprime_vy[1, :] + invD[2, 2, :]*cprime_vy[2, :]*cprime_vy[2, :] \t\n\n\n\thess_diag_vzi[:] = invD[0, 0, :]*cprime_vz[0, :]*cprime_vz[0, :] + invD[0, 1, :]*cprime_vz[0, :]*cprime_vz[1, :] + invD[0, 2, :]*cprime_vz[0, :]*cprime_vz[2, :] +\\\n\t\t\t invD[1, 0, :]*cprime_vz[1, :]*cprime_vz[0, :] + invD[1, 1, :]*cprime_vz[1, :]*cprime_vz[1, :] + invD[1, 2, :]*cprime_vz[1, :]*cprime_vz[2, :] +\\\n\t\t\t invD[2, 0, :]*cprime_vz[2, :]*cprime_vz[0, :] + invD[2, 1, :]*cprime_vz[2, :]*cprime_vz[1, :] + invD[2, 2, :]*cprime_vz[2, :]*cprime_vz[2, :] \t\t\n\t\n\n\thess_pi_vx[:] = invD[0, 0, :]*cprime_pi[0,:]*cprime_vx[0, :] + invD[0, 1, :]*cprime_pi[0,:]*cprime_vx[1, :] + invD[0, 2, :]*cprime_pi[0,:]*cprime_vx[2, :] +\\\n\t\t\tinvD[1, 0, :]*cprime_pi[1,:]*cprime_vx[0, :] + invD[1, 1, :]*cprime_pi[1,:]*cprime_vx[1, :] + invD[1, 2, :]*cprime_pi[1,:]*cprime_vx[2, :] +\\\n\t\t\tinvD[2, 0, :]*cprime_pi[2,:]*cprime_vx[0, :] + invD[2, 1, :]*cprime_pi[2,:]*cprime_vx[1, :] + invD[2, 2, :]*cprime_pi[2,:]*cprime_vx[2, :] \n\n\thess_pi_vy[:] = invD[0, 0, :]*cprime_pi[0,:]*cprime_vy[0, :] + invD[0, 1, :]*cprime_pi[0,:]*cprime_vy[1, :] + invD[0, 2, :]*cprime_pi[0,:]*cprime_vy[2, :] +\\\n\t\t\tinvD[1, 0, :]*cprime_pi[1,:]*cprime_vy[0, :] + invD[1, 1, :]*cprime_pi[1,:]*cprime_vy[1, :] + invD[1, 2, :]*cprime_pi[1,:]*cprime_vy[2, :] +\\\n\t\t\tinvD[2, 0, :]*cprime_pi[2,:]*cprime_vy[0, :] + invD[2, 1, :]*cprime_pi[2,:]*cprime_vy[1, :] + invD[2, 2, :]*cprime_pi[2,:]*cprime_vy[2, :] \n\n\thess_pi_vz[:] = invD[0, 0, :]*cprime_pi[0,:]*cprime_vz[0, :] + invD[0, 1, :]*cprime_pi[0,:]*cprime_vz[1, :] + invD[0, 2, :]*cprime_pi[0,:]*cprime_vz[2, :] +\\\n\t\t\tinvD[1, 0, :]*cprime_pi[1,:]*cprime_vz[0, :] + invD[1, 1, :]*cprime_pi[1,:]*cprime_vz[1, :] + invD[1, 2, :]*cprime_pi[1,:]*cprime_vz[2, :] +\\\n\t\t\tinvD[2, 0, :]*cprime_pi[2,:]*cprime_vz[0, :] + invD[2, 1, :]*cprime_pi[2,:]*cprime_vz[1, :] + invD[2, 2, :]*cprime_pi[2,:]*cprime_vz[2, :] \n\n\t\t\t\t\t\t\n\thess_diag_vx = np.sum(hess_diag_vxi)\n\thess_diag_vy = np.sum(hess_diag_vyi)\n\thess_diag_vz = np.sum(hess_diag_vzi)\t\n\t\n\thess_diag_sigmav = np.sum(0.5*(invD[1, 1, :]**2. + 2.*invD[1, 2, :]**2. + invD[2, 2, :]**2.)*de_dsigmav[:]*de_dsigmav[:])\n\thess_pi_sigmav[:] = 0.5*(invD[1, 1, :]**2. + 2.*invD[1, 2, :]**2. + invD[2, 2, :]**2.)*de_dpi[:]*de_dsigmav[:] \n\n\thess_diag = np.concatenate((hess_diag_pi, np.array([hess_diag_vx, hess_diag_vy, hess_diag_vz, hess_diag_sigmav])))\n\t\n\tfor i in range(N+4):\n\t\thess[i, i] = hess_diag[i]\n\t\t\n\t\n\tfor j in range(N):\n\t\t\thess[j, -4] = hess_pi_vx[j]\n\t\t\thess[j, -3] = hess_pi_vy[j]\n\t\t\thess[j, -2] = hess_pi_vz[j]\n\t\t\thess[j, -1] = hess_pi_sigmav[j]\n\t\t\thess[-4, j] = hess_pi_vx[j]\n\t\t\thess[-3, j] = hess_pi_vy[j] \n\t\t\thess[-2, j] = hess_pi_vz[j]\n\t\t\thess[-1, j] = hess_pi_sigmav[j]\n\t\t\t\n\n\t\n\t\n\tpart_12, part_13, part_23 = np.zeros(N),np.zeros(N),np.zeros(N)\n\tfor ia in range(3):\n\t\tfor ib in range(3):\n\t\t\tpart_12[:] += invD[ia, ib, :]*cprime_vx[ia, :]*cprime_vy[ib, :] \n\t\t\tpart_13[:] += invD[ia, ib, :]*cprime_vx[ia, :]*cprime_vz[ib, :] \n\t\t\tpart_23[:] += invD[ia, ib, :]*cprime_vy[ia, :]*cprime_vz[ib, :] \t\t\t\t\n\n\thess[-4, -3] = np.sum(part_12)\n\thess[-3, -4] = hess[-4, -3]\n\t\n\thess[-4, -2] = np.sum(part_13)\n\thess[-2, -4] = hess[-4, -2]\n\n\thess[-3, -2] = np.sum(part_23)\n\thess[-2, -3] = hess[-3, -2]\n\n\t#### I am returning here the matrix Njk, which is defined as -E(H),\n\t#### where H is the hessian of the likelihood: therefore to obtain the real hessian, one\n\t#### should multiply this by '-1' (see function below.)\n\treturn hess ### See eq. 18", "def calc_cnv_cov(cnvbed, hpo_data, cnv, frac=0.5, max_search_dist=20000000):\n\n cnv_cov = {}\n cnvbt_orig = pbt.BedTool(cnvbed)\n contigs = set([x.chrom for x in cnvbt_orig])\n\n # Iterate over each HPO\n for hpo, hdat in hpo_data.items():\n print('Computing covariance matrixes for {}...'.format(hpo))\n\n # Make single bedtool of all windows per contig\n wbt_dict = {contig : {'all_wids' : set()} for contig in contigs}\n for wid in hdat['all_windows'].keys():\n contig = wid.split('_')[0]\n wbt_dict[contig]['all_wids'].add(wid)\n for contig in contigs:\n wbt_str = ''\n for wid in wbt_dict[contig]['all_wids']:\n wbt_str += '\\t'.join(wid.split('_') + [wid]) + '\\n'\n wbt_dict[contig]['wbt'] = pbt.BedTool(wbt_str, from_string=True)\n\n # Filter CNVs by HPO and CNV type\n cnvbt = cnvbt_orig.filter(lambda x: hpo in x[5])\n if cnv != 'NS':\n cnvbt = cnvbt.filter(lambda x: x[4] == cnv).saveas()\n\n # Make covariance matrix of all by all windows per chromosome\n cov_dfs = {}\n for contig in contigs:\n \n # Filter CNVs and windows to contig of interest\n cnvbt_contig = cnvbt.filter(lambda x: x.chrom == contig)\n wbt_contig = wbt_dict[contig]['wbt']\n all_contig_wids = wbt_dict[contig]['all_wids']\n\n # Make dict mapping window ID to dict of set(CNV ids)\n cnvs_per_window = {wid : set() for wid in all_contig_wids}\n for hit in cnvbt_contig.intersect(wbt_contig, wa=True, wb=True, F=frac):\n cnvid = hit[3]\n wid = hit[-1]\n cnvs_per_window[wid].add(cnvid) \n \n # Compute covarance for all pairs of windows\n cov_dfs[contig] = pd.DataFrame(columns=all_contig_wids)\n for wid_a in all_contig_wids:\n jac_l = []\n \n # If first window has no CNVs, Jaccard index = 0 for all mates\n cnvs_a = cnvs_per_window[wid_a]\n if len(cnvs_a) == 0:\n cov_dfs[contig].loc[wid_a] = [0.0] * len(all_contig_wids)\n continue\n\n for wid_b in all_contig_wids:\n # If the Jaccard index has already been computed, \n # can copy value across matrix diagonal\n if wid_b in cov_dfs[contig].index:\n jac_l.append(cov_dfs[contig].loc[wid_b, wid_a])\n continue\n\n # If second window has no CNVs, Jaccard index = 0\n cnvs_b = cnvs_per_window[wid_b]\n if len(cnvs_b) == 0:\n jac_l.append(0.0)\n continue\n\n # Otherwise, compute Jaccard index as long as windows are\n # closer than max_search_dist apart\n mid_a = np.mean([int(x) for x in wid_a.split('_')[1:]])\n mid_b = np.mean([int(x) for x in wid_b.split('_')[1:]])\n if np.abs(mid_b - mid_a) > max_search_dist:\n jac_l.append(0.0)\n else:\n jac_l.append(len(cnvs_a.intersection(cnvs_b)) / len(cnvs_a.union(cnvs_b)))\n\n cov_dfs[contig].loc[wid_a] = jac_l\n\n cnv_cov[hpo] = cov_dfs\n\n return cnv_cov", "def portfolio_vol(weights, covmat):\n\n return (weights.T @ covmat @ weights) ** 0.5" ]
[ "0.7217029", "0.7027655", "0.6832015", "0.67822325", "0.66628355", "0.6503905", "0.6372194", "0.6335945", "0.63327277", "0.6318015", "0.6316783", "0.6251822", "0.62356293", "0.6194071", "0.6187514", "0.6178875", "0.615046", "0.6149075", "0.6138903", "0.61331916", "0.61278504", "0.61136746", "0.60881984", "0.6062804", "0.6058832", "0.6057094", "0.60515517", "0.60489917", "0.60485387", "0.604427", "0.60408574", "0.60237944", "0.6009408", "0.5993064", "0.5991853", "0.598911", "0.5976716", "0.5973448", "0.59727293", "0.59709966", "0.59657437", "0.5952865", "0.59501237", "0.5933594", "0.5932749", "0.5932119", "0.5925289", "0.5924498", "0.59192985", "0.5916946", "0.59058917", "0.58997333", "0.58979726", "0.5891298", "0.5883331", "0.5878747", "0.5862674", "0.5848954", "0.58415115", "0.58404696", "0.58307856", "0.5825145", "0.58206135", "0.5805489", "0.5785216", "0.5779175", "0.57700133", "0.57676077", "0.57659274", "0.57634187", "0.5761762", "0.5752867", "0.57469076", "0.5745317", "0.57422763", "0.57379", "0.5727406", "0.572737", "0.57234424", "0.5719533", "0.5709907", "0.5692051", "0.5687922", "0.5687773", "0.5687031", "0.56858563", "0.56811476", "0.5679363", "0.56769323", "0.5662422", "0.5661097", "0.56597966", "0.56530076", "0.56465435", "0.5643263", "0.56418353", "0.5638515", "0.5634853", "0.5633887", "0.56336266", "0.5614144" ]
0.0
-1
greedily select boxes with high confidence and overlap with current maximum = thresh
def nms_mxnet(self, boxes, scores, thresh): x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] #scores = dets[:, 4] areas = (x2 - x1 + 1) * (y2 - y1 + 1) order = scores.argsort()[::-1] keep = [] while order.size > 0: i = order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h ovr = inter / (areas[i] + areas[order[1:]] - inter) inds = np.where(ovr <= thresh)[0] order = order[inds + 1] return keep
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_non_max_suppression(boxes, scores, iou_thresh=.45, top_k=200):\n\n selected_indices = np.zeros(shape=len(scores))\n if boxes is None or len(boxes) == 0:\n return selected_indices\n x_min = boxes[:, 0]\n y_min = boxes[:, 1]\n x_max = boxes[:, 2]\n y_max = boxes[:, 3]\n areas = (x_max - x_min) * (y_max - y_min)\n remaining_sorted_box_indices = np.argsort(scores)\n remaining_sorted_box_indices = remaining_sorted_box_indices[-top_k:]\n\n num_selected_boxes = 0\n while len(remaining_sorted_box_indices) > 0:\n best_score_args = remaining_sorted_box_indices[-1]\n selected_indices[num_selected_boxes] = best_score_args\n num_selected_boxes = num_selected_boxes + 1\n if len(remaining_sorted_box_indices) == 1:\n break\n\n remaining_sorted_box_indices = remaining_sorted_box_indices[:-1]\n\n best_x_min = x_min[best_score_args]\n best_y_min = y_min[best_score_args]\n best_x_max = x_max[best_score_args]\n best_y_max = y_max[best_score_args]\n\n remaining_x_min = x_min[remaining_sorted_box_indices]\n remaining_y_min = y_min[remaining_sorted_box_indices]\n remaining_x_max = x_max[remaining_sorted_box_indices]\n remaining_y_max = y_max[remaining_sorted_box_indices]\n\n inner_x_min = np.maximum(remaining_x_min, best_x_min)\n inner_y_min = np.maximum(remaining_y_min, best_y_min)\n inner_x_max = np.minimum(remaining_x_max, best_x_max)\n inner_y_max = np.minimum(remaining_y_max, best_y_max)\n\n inner_box_widths = inner_x_max - inner_x_min\n inner_box_heights = inner_y_max - inner_y_min\n\n inner_box_widths = np.maximum(inner_box_widths, 0.0)\n inner_box_heights = np.maximum(inner_box_heights, 0.0)\n\n intersections = inner_box_widths * inner_box_heights\n remaining_box_areas = areas[remaining_sorted_box_indices]\n best_area = areas[best_score_args]\n unions = remaining_box_areas + best_area - intersections\n intersec_over_union = intersections / unions\n intersec_over_union_mask = intersec_over_union <= iou_thresh\n remaining_sorted_box_indices = remaining_sorted_box_indices[\n intersec_over_union_mask]\n\n return selected_indices.astype(int), num_selected_boxes", "def non_max_suppression_fast(boxes, overlapThresh=0.2):\n # if there are no boxes, return an empty list\n if len(boxes) == 0:\n return []\n\n # if the bounding boxes integers, convert them to floats --\n # this is important since we'll be doing a bunch of divisions\n if boxes.dtype.kind == \"i\":\n boxes = boxes.astype(\"float\")\n\n # initialize the list of picked indexes\n pick = []\n\n # grab the coordinates of the bounding boxes\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n\n # compute the area of the bounding boxes and sort the bounding\n # boxes by the bottom-right y-coordinate of the bounding box\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n idxs = np.argsort(y2)\n\n # keep looping while some indexes still remain in the indexes list\n while len(idxs) > 0:\n # grab the last index in the indexes list and add the\n # index value to the list of picked indexes\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n\n # find the largest (x, y) coordinates for the start of\n # the bounding box and the smallest (x, y) coordinates\n # for the end of the bounding box\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n # compute the width and height of the bounding box\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n # compute the ratio of overlap\n overlap = (w * h) / area[idxs[:last]]\n\n # delete all indexes from the index list that have\n idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])))\n\n # return only the bounding boxes that were picked using the\n # integer data type\n return boxes[pick].astype(\"int\"), pick", "def iou_suppression(cnt_box, yolo_box, max_threshold, min_threshold):\n all_boxes = []\n pre_bboxes = yolo_box\n bboxes = cnt_box\n for i in range(len(pre_bboxes)):\n max_flag = 0\n min_flag = 0\n for j in range(len(bboxes)):\n\n (pre_x1, pre_y1) = (pre_bboxes[i][0], pre_bboxes[i][1])\n (pre_x2, pre_y2) = (pre_bboxes[i][2], pre_bboxes[i][3])\n (cur_x1, cur_y1) = (bboxes[j][0], bboxes[j][1])\n (cur_x2, cur_y2) = (bboxes[j][2], bboxes[j][3])\n origin_w = pre_x2 - pre_x1\n origin_h = pre_y2 - pre_y1\n current_w = cur_x2 - cur_x1\n current_h = cur_y2 - cur_y1\n prime_area = origin_h * origin_w\n current_area = current_h*current_w\n\n if pre_x1 > cur_x1:\n if pre_y1 > cur_y1:\n if cur_x2 - pre_x1 <= 0 or cur_y2 - pre_y1 <= 0:\n lap_area = 0\n else:\n width = cur_x2 - pre_x1\n height = cur_y2 - pre_y1\n if width > origin_w:\n width = origin_w\n if height > origin_h:\n height = origin_h\n\n lap_area = width*height\n\n else:\n if cur_x2 - pre_x1 <= 0 or pre_y2 - cur_y1 <= 0:\n lap_area = 0\n else:\n width = cur_x2 - pre_x1\n height = pre_y2 - cur_y1\n if width > origin_w:\n width = origin_w\n if height > current_h:\n height = current_h\n\n lap_area = width*height\n else:\n if pre_y1 > cur_y1:\n if pre_x2 - cur_x1 <= 0 or cur_y2 - pre_y1 <= 0:\n lap_area = 0\n else:\n width = pre_x2 - cur_x1\n height = cur_y2 - pre_y1\n if width > current_w:\n width = current_w\n if height > origin_h:\n height = origin_h\n\n lap_area = width*height\n else:\n if pre_x2 - cur_x1 <= 0 or pre_y2 - cur_y1 <= 0:\n lap_area = 0\n else:\n width = pre_x2 - cur_x1\n height = pre_y2 - cur_y1\n if width > current_w:\n width = current_w\n if height > current_h:\n height = current_h\n\n lap_area = width*height\n\n if lap_area != 0:\n sum_area = (prime_area + current_area - lap_area)\n iou_score = lap_area/sum_area\n if iou_score > max_threshold: # set the threshold of the iou scores, in line with the sort\n max_flag = 1\n elif iou_score > min_threshold:\n min_flag = 1\n\n if max_flag == 1 or min_flag == 0:\n all_boxes.append(pre_bboxes[i])\n\n if cnt_box != []:\n for index_box in range(cnt_box.shape[0]):\n all_boxes.append(cnt_box[index_box])\n\n return np.asarray(all_boxes)", "def non_max_suppression_fast(boxes, probabilities=None, overlap_threshold=0.3):\n # if there are no boxes, return an empty list\n if boxes.shape[1] == 0:\n return []\n # if the bounding boxes integers, convert them to floats --\n # this is important since we'll be doing a bunch of divisions\n if boxes.dtype.kind == \"i\":\n boxes = boxes.astype(\"float\")\n # initialize the list of picked indexes\n pick = []\n # grab the coordinates of the bounding boxes\n x1 = boxes[:, 0] - (boxes[:, 2] / [2]) # center x - width/2\n y1 = boxes[:, 1] - (boxes[:, 3] / [2]) # center y - height/2\n x2 = boxes[:, 0] + (boxes[:, 2] / [2]) # center x + width/2\n y2 = boxes[:, 1] + (boxes[:, 3] / [2]) # center y + height/2\n\n # compute the area of the bounding boxes and grab the indexes to sort\n # (in the case that no probabilities are provided, simply sort on the\n # bottom-left y-coordinate)\n area = boxes[:, 2] * boxes[:, 3] # width * height\n idxs = y2\n\n\n # if probabilities are provided, sort on them instead\n if probabilities is not None:\n idxs = probabilities\n\n # sort the indexes\n idxs = np.argsort(idxs)\n # keep looping while some indexes still remain in the indexes\n # list\n while len(idxs) > 0:\n # grab the last index in the indexes list and add the\n # index value to the list of picked indexes\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n # find the largest (x, y) coordinates for the start of\n # the bounding box and the smallest (x, y) coordinates\n # for the end of the bounding box\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n # compute the width and height of the bounding box\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n # compute the ratio of overlap\n overlap = (w * h) / area[idxs[:last]]\n # delete all indexes from the index list that have\n idxs = np.delete(idxs, np.concatenate(([last],\n np.where(overlap > overlap_threshold)[0])))\n # return only the bounding boxes that were picked\n return pick", "def greedyNonMaximumSupression(boxlist,clipthresh=0.05,IOUthresh=0.5):\r\n NMSed_list=[]\r\n if len(boxlist)==0 or clipthresh>1:\r\n return NMSed_list\r\n \r\n # keep every box with largest score while doesn't overlap with all the other\r\n # boxes\r\n NMSed_list.append(boxlist[0])\r\n for i in range(1,len(boxlist)):\r\n keepflag=True\r\n \r\n if boxlist[i][4]<clipthresh:\r\n break # break when score of current box is lower than thresh\r\n else:\r\n #print('----NMS--{}----'.format(i))\r\n for j in range(len(NMSed_list)):\r\n iou=getIoU(boxlist[i],NMSed_list[j])\r\n #print(iou)\r\n if iou>IOUthresh:\r\n keepflag=False\r\n break\r\n if keepflag:\r\n NMSed_list.append(boxlist[i])\r\n \r\n return NMSed_list", "def nms(boxes, thresh, topk=None):\r\n order = np.argsort(boxes[:, 4])[::-1]\r\n keep = []\r\n while order.size > 0:\r\n keep.append(order[0])\r\n overlaps = bbox_overlap(boxes[order[0:1]][:, :4], \r\n boxes[order[1:]][:, :4]).flatten()\r\n\r\n ids = np.where(overlaps<thresh)[0]\r\n order = order[ids + 1]\r\n \r\n if topk:\r\n keep = keep[:topk]\r\n return keep", "def non_maximum_suppression(boxes, confs, overlap_threshold, top_k):\n eps = 1e-15\n \n boxes = np.asarray(boxes, dtype='float32')\n \n pick = []\n x1, y1, x2, y2 = boxes.T\n \n idxs = np.argsort(confs)\n area = (x2 - x1) * (y2 - y1)\n \n while len(idxs) > 0:\n i = idxs[-1]\n \n pick.append(i)\n if len(pick) >= top_k:\n break\n \n idxs = idxs[:-1]\n \n xx1 = np.maximum(x1[i], x1[idxs])\n yy1 = np.maximum(y1[i], y1[idxs])\n xx2 = np.minimum(x2[i], x2[idxs])\n yy2 = np.minimum(y2[i], y2[idxs])\n \n w = np.maximum(0, xx2 - xx1)\n h = np.maximum(0, yy2 - yy1)\n I = w * h\n \n overlap = I / (area[idxs] + eps)\n # as in Girshick et. al.\n \n #U = area[idxs] + area[i] - I\n #overlap = I / (U + eps)\n \n idxs = idxs[overlap <= overlap_threshold]\n \n return pick", "def non_max_suppression(boxes, max_bbox_overlap, scores=None):\n if len(boxes) == 0:\n return []\n\n boxes = boxes.astype(np.float)\n pick = []\n\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2] + boxes[:, 0]\n y2 = boxes[:, 3] + boxes[:, 1]\n\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n if scores is not None:\n idxs = np.argsort(scores)\n else:\n idxs = np.argsort(y2)\n\n while len(idxs) > 0:\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n overlap = (w * h) / area[idxs[:last]]\n\n idxs = np.delete(\n idxs, np.concatenate(\n ([last], np.where(overlap > max_bbox_overlap)[0])))\n\n return pick", "def non_max_suppression(bboxes, iou_threshold, threshold, box_format=\"corners\"):\n\n # 49 x 6 \n assert type(bboxes) == list\n # print(bboxes)\n bboxes = [box for box in bboxes if box[1] > threshold]\n bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)\n bboxes_after_nms = []\n # print(bboxes)\n while bboxes:\n chosen_box = bboxes.pop(0)\n bbox_temp = bboxes.copy()\n bboxes = []\n for box in bbox_temp: # not the same class or not overlap a lot \n if box[0] != chosen_box[0] or intersection_over_union(torch.tensor(chosen_box[2:]),torch.tensor(box[2:]), box_format=box_format,) < iou_threshold:\n bboxes.append(box)\n\n bboxes_after_nms.append(chosen_box)\n # print(\"NMS: \" + str(len(bboxes_after_nms)))\n return bboxes_after_nms", "def _filter_boxes(self, boxes, box_confidences, box_class_probs):\n box_scores = box_confidences * box_class_probs\n box_classes = np.argmax(box_scores, axis=-1)\n box_class_scores = np.max(box_scores, axis=-1)\n pos = np.where(box_class_scores >= self.object_threshold)\n\n boxes = boxes[pos]\n classes = box_classes[pos]\n scores = box_class_scores[pos]\n\n return boxes, classes, scores", "def test_boundary_boxes(gt_detection_combo):\n found = False\n overlap_threshold = 0.7\n\n for found_box in gt_detection_combo.detected_boxes:\n if overlap_between(gt_detection_combo.gt_box, found_box) > overlap_threshold:\n found = True\n break\n\n assert found is True", "def draw_boxes(image, results, min_score=0.2, max_boxes=10):\n results = sorted(results, key=lambda x: x['score'])\n results = results[0:max_boxes]\n for r in results:\n if r['score'] < min_score:\n continue\n draw_box(image, r['bounding_box'], labels[r['class_id']], r['score'])", "def non_maximum_suppression(boxes):\n\n boxes = sorted(boxes, key=lambda box: box[2]-box[0], reverse=True)\n nms_boxes = []\n overlap_threshold = 0.5\n\n for box in boxes:\n if not any([overlap_between(box, nms_box) > overlap_threshold for nms_box in nms_boxes]):\n nms_boxes.append(box)\n\n return nms_boxes", "def non_maximum_suppression_slow(boxes, confs, iou_threshold, top_k):\n idxs = np.argsort(-confs)\n selected = []\n for idx in idxs:\n if np.any(iou(boxes[idx], boxes[selected]) >= iou_threshold):\n continue\n selected.append(idx)\n if len(selected) >= top_k:\n break\n return selected", "def filter_overlapped_boxes(annotations, iou_thr=0.5):\n new_annotations = dict()\n for img_id, annos in annotations.items(): # loop through images\n annos = np.array(annos).astype(np.float32)\n unique_clses = np.unique(annos[:, 4])\n new_img_boxes = []\n for cls in unique_clses: # loop through classes\n idxes = np.where(annos[:, 4] == cls)[0]\n cls_annos = annos[idxes]\n x1, x2 = cls_annos[:, 0], cls_annos[:, 2]\n y1, y2 = cls_annos[:, 1], cls_annos[:, 3]\n\n areas = (x2 - x1) * (y2 - y1)\n order = np.arange(idxes.shape[0])\n new_cls_boxes = []\n while order.size > 0:\n i = order[0]\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1)\n h = np.maximum(0.0, yy2 - yy1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n # merge overlap boxes\n inds = np.where(ovr > iou_thr)[0]\n overlap_boxes = np.vstack([cls_annos[i:i+1, :],\n cls_annos[order[inds + 1], :]])\n new_cls_boxes.append(np.mean(overlap_boxes, axis=0))\n\n # update order\n inds = np.where(ovr <= iou_thr)[0]\n order = order[inds + 1]\n new_img_boxes.extend(new_cls_boxes)\n new_annotations[img_id] = np.array(new_img_boxes, dtype=np.float32)\n\n return new_annotations", "def vis_detections(im, class_name, dets, image_name, thresh=0.5):\n inds = np.where(dets[:, -1] >= thresh)[0]\n max_inds = 0\n max_score = 0.0\n if len(inds) == 0:\n # print('Warning: no target detected!')\n return\n elif len(inds) > 1:\n # print('Warning: ' + str(len(inds)) + ' targets detected! Choose the highest one')\n for i in inds:\n if(dets[i, -1] > max_score):\n max_inds = i\n max_score = dets[i, -1]\n\n# im = im[:, :, (2, 1, 0)]\n# fig, ax = plt.subplots(figsize=(12, 12))\n# ax.imshow(im, aspect='equal')\n # for i in inds:\n # bbox = dets[i, :4]\n # score = dets[i, -1]\n #print max_inds\n bbox = dets[max_inds, :4]\n score = dets[max_inds, -1]\n\n# ax.add_patch(\n# plt.Rectangle((bbox[0], bbox[1]),\n# bbox[2] - bbox[0],\n# bbox[3] - bbox[1], fill=False,\n# edgecolor='red', linewidth=3.5)\n# )\n# ax.text(bbox[0], bbox[1] - 2,\n# '{:s} {:.3f}'.format(class_name, score),\n# bbox=dict(facecolor='blue', alpha=0.5),\n# fontsize=14, color='white')\n\n # end for\n #print image_name, class_name\n #print score\n # file.writelines([image_name,'\\t',class_name,'\\t',str(score),'\\n'])\n # ax.set_title(('{} detections with '\n # 'p({} | box) >= {:.1f}').format(class_name, class_name,\n # thresh),fontsize=14)\n # plt.axis('off')\n # plt.tight_layout()\n # plt.draw()\n\t### SAVE IMAGES ? ###\n save_img_dir = os.path.join(cfg.ROOT_DIR, 'result', 'test_img')\n # if not os.path.exists(save_img_dir):\n # os.makedirs(save_img_dir)\n # plt.savefig(os.path.join(save_img_dir, image_name + '_' + class_name))\n\n boxes = {'boxes': ((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1])}\n \n save_mat_dir = os.path.join(cfg.ROOT_DIR, 'result', 'test_box')", "def non_max_suppression(boxes, scores, threshold):\n assert boxes.shape[0] > 0\n if boxes.dtype.kind != \"f\":\n boxes = boxes.astype(np.float32)\n\n polygons = convert_format(boxes)\n\n # Get indicies of boxes sorted by scores (highest first)\n ixs = scores.argsort()[::-1]\n\n pick = []\n while len(ixs) > 0:\n # Pick top box and add its index to the list\n i = ixs[0]\n pick.append(i)\n # Compute IoU of the picked box with the rest\n iou = compute_iou(polygons[i], polygons[ixs[1:]])\n # Identify boxes with IoU over the threshold. This\n # returns indices into ixs[1:], so add 1 to get\n # indices into ixs.\n remove_ixs = np.where(iou > threshold)[0] + 1\n # Remove indices of the picked and overlapped boxes.\n ixs = np.delete(ixs, remove_ixs)\n ixs = np.delete(ixs, 0)\n\n return np.array(pick, dtype=np.int32)", "def _filter_boxes2(boxes, max_size, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n if max_size > 0:\n keep = np.where(np.minimum(ws, hs) < max_size)[0]\n elif min_size > 0:\n keep = np.where(np.maximum(ws, hs) > min_size)[0]\n return keep", "def gain_box_score(im, preds):\n if len(preds[0]) == 0:\n cv2.imshow(\"Video detection\", im)\n else:\n for pred in preds:\n for i, box_label in enumerate(zip( pred[\"boxes\"], pred[\"labels\"] )):\n box, label = box_label\n xmin, ymin, xmax, ymax = box\n#-------------------- Create a Rectangle patch ----------------------- \n if label==1:\n class_name='with_mask'\n color = (0, 255, 0)\n elif label==2:\n class_name='without_mask'\n color = (0, 0, 255)\n elif label==3:\n class_name='mask_worn_improperly'\n color = (255, 255 ,0)\n score = pred['scores'][i]\n#--------------------- Bounding Box painting -------------------------- \n if score > 0.65:\n cv2.rectangle(im, (xmin, ymin), (xmax, ymax), color, 1) \n cv2.putText(im, str(class_name)+str(round(score.item(),2)), (xmin,int(ymax-ymax/20)),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1) #print class name\n cv2.imshow(\"Video detection\",im)\n print('*****', 'Bbox:', i , '*****' )\n print('Class: ', str(class_name))\n print('Scores: ', str(round(score.item(),2)))\n print('boxes: ',f'{int(xmin)}, {int(ymin)}, {int(xmax)}, {int(ymax)}')\n print('image shape: ', im.shape) \n else:\n cv2.imshow(\"Video detection\", im)\n print('********************','\\n')", "def _filter_img_boxes(boxes, im_info):\n padding = 50\n w_min = -padding\n w_max = im_info[1] + padding\n h_min = -padding\n h_max = im_info[0] + padding\n keep = np.where((w_min <= boxes[:,0]) & (boxes[:,2] <= w_max) & (h_min <= boxes[:,1]) &\n (boxes[:,3] <= h_max))[0]\n return keep", "def nms(boxes, scores, overlap=0.5, top_k=200):\n\n keep = scores.new(scores.size(0)).zero_().long()\n if boxes.numel() == 0:\n return keep\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n area = torch.mul(x2 - x1, y2 - y1)\n v, idx = scores.sort(0) # sort in ascending order\n # I = I[v >= 0.01]\n idx = idx[-top_k:] # indices of the top-k largest vals\n xx1 = boxes.new()\n yy1 = boxes.new()\n xx2 = boxes.new()\n yy2 = boxes.new()\n w = boxes.new()\n h = boxes.new()\n\n # keep = torch.Tensor()\n count = 0\n while idx.numel() > 0:\n i = idx[-1] # index of current largest val\n # keep.append(i)\n keep[count] = i\n count += 1\n if idx.size(0) == 1:\n break\n idx = idx[:-1] # remove kept element from view\n # load bboxes of next highest vals\n torch.index_select(x1, 0, idx, out=xx1)\n torch.index_select(y1, 0, idx, out=yy1)\n torch.index_select(x2, 0, idx, out=xx2)\n torch.index_select(y2, 0, idx, out=yy2)\n # store element-wise max with next highest score\n xx1 = torch.clamp(xx1, min=x1[i])\n yy1 = torch.clamp(yy1, min=y1[i])\n xx2 = torch.clamp(xx2, max=x2[i])\n yy2 = torch.clamp(yy2, max=y2[i])\n w.resize_as_(xx2)\n h.resize_as_(yy2)\n w = xx2 - xx1\n h = yy2 - yy1\n # check sizes of xx1 and xx2.. after each iteration\n w = torch.clamp(w, min=0.0)\n h = torch.clamp(h, min=0.0)\n inter = w*h\n # IoU = i / (area(a) + area(b) - i)\n rem_areas = torch.index_select(area, 0, idx) # load remaining areas)\n union = (rem_areas - inter) + area[i]\n IoU = inter/union # store result in iou\n # keep only elements with an IoU <= overlap\n idx = idx[IoU.le(overlap)]\n return keep, count", "def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=10):\n # TOP_K was originally -1, to keep all faces, but trying to filter\n # CANDIDATE_SIZE was originally 200, trying to limit # of faces\n scores = box_scores[:, -1]\n boxes = box_scores[:, :-1]\n picked = []\n indexes = np.argsort(scores)\n indexes = indexes[-candidate_size:]\n while len(indexes) > 0:\n current = indexes[-1]\n picked.append(current)\n if 0 < top_k == len(picked) or len(indexes) == 1:\n break\n current_box = boxes[current, :]\n\n indexes = indexes[:-1]\n rest_boxes = boxes[indexes, :]\n iou = iou_of(\n rest_boxes,\n np.expand_dims(current_box, axis=0),\n )\n indexes = indexes[iou <= iou_threshold]\n \n # additional method of discrimination, only the boxes\n # with the largest areas are selected\n new_boxes = box_scores[picked, :]\n areas = []\n for box in new_boxes:\n left_top = np.asarray([box[0], box[1]])\n right_bottom = np.asarray([box[2], box[3]])\n area = area_of(left_top, right_bottom)\n areas.append(area)\n areas = np.asarray(areas)\n biggest = np.argsort(areas)\n last_index = len(biggest) - 1\n middle = max(len(biggest)// 2, 1)\n size = min(middle, candidate_size / 2)\n \n final_boxes = []\n for i in range(size):\n final_boxes.append(new_boxes[biggest[last_index-i]])\n final_boxes = np.asarray(final_boxes)\n \n return final_boxes\n #return box_scores[picked, :]", "def box_results_with_nms_and_limit(scores, boxes, thresh=0.0001):\n num_classes = cfg.MODEL.NUM_CLASSES\n cls_boxes = [[] for _ in range(num_classes)]\n # Apply threshold on detection probabilities and apply NMS\n # Skip j = 0, because it's the background class\n for j in range(1, num_classes):\n inds = np.where(scores[:, j] > cfg.TEST.SCORE_THRESH)[0]\n scores_j = scores[inds, j]\n boxes_j = boxes[inds, j * 4:(j + 1) * 4]\n dets_j = np.hstack((boxes_j, scores_j[:, np.newaxis])).astype(\n np.float32, copy=False\n )\n if cfg.TEST.SOFT_NMS.ENABLED:\n nms_dets, _ = box_utils.soft_nms(\n dets_j,\n sigma=cfg.TEST.SOFT_NMS.SIGMA,\n overlap_thresh=cfg.TEST.NMS,\n score_thresh=0.0001,\n method=cfg.TEST.SOFT_NMS.METHOD\n )\n else:\n keep = box_utils.nms(dets_j, cfg.TEST.NMS)\n nms_dets = dets_j[keep, :]\n # Refine the post-NMS boxes using bounding-box voting\n if cfg.TEST.BBOX_VOTE.ENABLED:\n nms_dets = box_utils.box_voting(\n nms_dets,\n dets_j,\n cfg.TEST.BBOX_VOTE.VOTE_TH,\n scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD\n )\n cls_boxes[j] = nms_dets\n\n # Limit to max_per_image detections **over all classes**\n if cfg.TEST.DETECTIONS_PER_IM > 0:\n image_scores = np.hstack(\n [cls_boxes[j][:, -1] for j in range(1, num_classes)]\n )\n if len(image_scores) > cfg.TEST.DETECTIONS_PER_IM:\n image_thresh = np.sort(image_scores)[-cfg.TEST.DETECTIONS_PER_IM]\n for j in range(1, num_classes):\n keep = np.where(cls_boxes[j][:, -1] >= image_thresh)[0]\n cls_boxes[j] = cls_boxes[j][keep, :]\n\n im_results = np.vstack([cls_boxes[j] for j in range(1, num_classes)])\n boxes = im_results[:, :-1]\n scores = im_results[:, -1]\n return scores, boxes, cls_boxes", "def nonmax_suppression(pred_labels, probabilities, x0, y0, windowsize, overlap_thr=0.1):\n\n # define list of proposals as list of indices over all predictions\n proposals = np.arange(0, len(pred_labels), dtype='int')\n\n # intialize final list of boxes\n final = []\n\n # delete all boxes labeled as \"other\"\n mask_other = [pred!='other' for pred in pred_labels]\n proposals = list(proposals[mask_other])\n\n while len(proposals)>0:\n\n # add the box with the highest confidence to the final selection\n ind_max = probabilities[proposals].argmax()\n select = proposals.pop(ind_max)\n final.append(select)\n\n # delete all boxes which overlap substantially with this last selected box\n delete_i = []\n for i, p in enumerate(proposals):\n\n # compute IoU score\n boxA = (x0[select], y0[select], x0[select]+windowsize[select], y0[select]+windowsize[select])\n boxB = (x0[p], y0[p], x0[p]+windowsize[p], y0[p]+windowsize[p])\n iou = intersection_over_union_from_boxes(boxA, boxB)\n\n if iou >= overlap_thr:\n delete_i.append(i)\n\n # update proposal list\n proposals = [proposals[i] for i in range(len(proposals)) if i not in delete_i]\n\n\n new_pred_labels = np.array(pred_labels)[final]\n new_probabilities = np.array(probabilities)[final]\n new_x0 = np.array(x0)[final]\n new_y0 = np.array(y0)[final]\n new_windowsize = np.array(windowsize)[final]\n\n return new_pred_labels, new_probabilities, new_x0, new_y0, new_windowsize", "def vis_detections(im, class_name, dets, thresh=0.5):\n global CHECK\n global CLASS_NAME\n global INDS\n global RES\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n bbox = dets[inds[0], :4]\n score = dets[inds[0], -1]\n if len(inds) > 1:\n score = -1\n for i in inds:\n temp = dets[i, -1]\n if (temp > score):\n score = temp\n bbox = dets[i, :4]\n if score <= MAX_SCORE[0]:\n return\n else:\n CHECK = 1\n MAX_SCORE[0] = score\n CLASS_NAME = class_name\n # im = im[:, :, (2, 1, 0)]\n # fig, ax = plt.subplots()\n # ax.imshow(im, aspect='equal')\n # ax.add_patch(\n # plt.Rectangle((bbox[0], bbox[1]),\n # bbox[2] - bbox[0],\n # bbox[3] - bbox[1], fill=False,\n # edgecolor='red', linewidth=3.5)\n # )\n # ax.text(bbox[0], bbox[1] - 2,\n # '{:s} {:.3f}'.format(class_name, score),\n # bbox=dict(facecolor='blue', alpha=0.5),\n # fontsize=10, color='white')\n # ax.set_title(('{} detections with '\n # 'p({} | box) >= {:.1f}').format(class_name, class_name,\n # thresh),\n # fontsize=10)\n # plt.axis('off')\n # plt.tight_layout()\n # plt.draw()", "def _get_bounding_boxes(self, imgs, summed_viz, threshold_value=.7):\n self.viz = summed_viz # for debug\n viz = summed_viz\n n_batchs = viz.shape[ 0]\n n_classes = viz.shape[-1]\n \n # viz.shape (100,14,14,20) => (14,14,100,20)\n viz = viz.swapaxes(0,2); viz = viz.swapaxes(0,1)\n \n # Normalize <viz>, image per image (to be in range [-1,1])\n viz = viz / np.max(np.abs(viz), axis=(0,1))\n viz = (viz+1)/2 # range[0,1]\n \n # Resize each summed_viz to its original size (size of input image)\n if viz.shape[:2] != imgs.shape[1:3]:\n viz = np.array(\n [ skimage.transform.resize(viz[:,:,idx], imgs[idx].shape[:2])\n for idx in range(len(imgs))\n if viz.shape[0] != imgs.shape[1]\n ] )\n viz = viz.swapaxes(0,2); viz = viz.swapaxes(0,1)\n \n # Threshold <viz>s to keep values over 70% of its max values\n m_max = threshold_value * viz.max(axis=(0,1))\n viz = viz * (m_max < viz)\n \n # We want a 2d boundind box, so project threshold in xs and ys\n xxs = viz.sum(axis=0)\n yys = viz.sum(axis=1)\n \n # Get some non-thresholded values (left, top... of bounding boxes)\n get_lefts = lambda b_id, c_idx: xxs[:,b_id,c_idx].nonzero()[0][ 0]\n get_tops = lambda b_id, c_idx: yys[:,b_id,c_idx].nonzero()[0][-1]\n get_rights = lambda b_id, c_idx: xxs[:,b_id,c_idx].nonzero()[0][-1]\n get_bottoms = lambda b_id, c_idx: yys[:,b_id,c_idx].nonzero()[0][ 0]\n\n # Debug\n # def get_lefts (b_id, c_idx): \n # print xxs[:,b_id,c_idx].nonzero()\n # xxs[:,b_id,c_idx].nonzero()[0][ 0]\n \n # Build the 2d array with first or lasts positions of zeros\n # INNER FUNCTION\n def _get_border_array(f_border=get_lefts):\n return np.array(\n [ map(f_border, [b_idx]*n_classes, range(n_classes))\n for b_idx in range(n_batchs) ]\n )\n \n lefts = _get_border_array(get_lefts)\n tops = _get_border_array(get_tops)\n rights = _get_border_array(get_rights)\n bottoms = _get_border_array(get_bottoms)\n \n return lefts, tops, rights, bottoms", "def nms(boxes, scores, overlap=0.5, top_k=200):\n\n keep = torch.Tensor(scores.size(0)).fill_(0).long()\n if boxes.numel() == 0:\n return keep\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n area = torch.mul(x2 - x1, y2 - y1)\n v, idx = scores.sort(0) # sort in ascending order\n # I = I[v >= 0.01]\n idx = idx[-top_k:] # indices of the top-k largest vals\n xx1 = boxes.new()\n yy1 = boxes.new()\n xx2 = boxes.new()\n yy2 = boxes.new()\n w = boxes.new()\n h = boxes.new()\n\n # keep = torch.Tensor()\n count = 0\n while idx.numel() > 0:\n i = idx[-1] # index of current largest val\n # keep.append(i)\n keep[count] = i\n count += 1\n if idx.size(0) == 1:\n break\n idx = idx[:-1] # remove kept element from view\n # load bboxes of next highest vals\n torch.index_select(x1, 0, idx, out=xx1)\n torch.index_select(y1, 0, idx, out=yy1)\n torch.index_select(x2, 0, idx, out=xx2)\n torch.index_select(y2, 0, idx, out=yy2)\n # store element-wise max with next highest score\n xx1 = torch.clamp(xx1, min=x1[i])\n yy1 = torch.clamp(yy1, min=y1[i])\n xx2 = torch.clamp(xx2, max=x2[i])\n yy2 = torch.clamp(yy2, max=y2[i])\n w.resize_as_(xx2)\n h.resize_as_(yy2)\n w = xx2 - xx1\n h = yy2 - yy1\n # check sizes of xx1 and xx2.. after each iteration\n w = torch.clamp(w, min=0.0)\n h = torch.clamp(h, min=0.0)\n inter = w*h\n # IoU = i / (area(a) + area(b) - i)\n rem_areas = torch.index_select(area, 0, idx) # load remaining areas)\n union = (rem_areas - inter) + area[i]\n IoU = inter/union # store result in iou\n # keep only elements with an IoU <= overlap\n idx = idx[IoU.le(overlap)]\n return keep, count", "def _filter_boxes(self, boxes, min_size, im_info):\n # Scale min_size to match image scale\n min_size *= im_info[2]\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n x_ctr = boxes[:, 0] + ws / 2.\n y_ctr = boxes[:, 1] + hs / 2.\n keep = np.where((ws >= min_size) & (hs >= min_size) &\n (x_ctr < im_info[1]) & (y_ctr < im_info[0]))[0]\n return keep", "def match(boxes, prior_boxes, iou_threshold=0.5):\n ious = compute_ious(boxes, to_point_form(np.float32(prior_boxes)))\n best_box_iou_per_prior_box = np.max(ious, axis=0)\n\n best_box_arg_per_prior_box = reversed_argmax(ious, 0)\n best_prior_box_arg_per_box = reversed_argmax(ious, 1)\n\n best_box_iou_per_prior_box[best_prior_box_arg_per_box] = 2\n # overwriting best_box_arg_per_prior_box if they are the best prior box\n for box_arg in range(len(best_prior_box_arg_per_box)):\n best_prior_box_arg = best_prior_box_arg_per_box[box_arg]\n best_box_arg_per_prior_box[best_prior_box_arg] = box_arg\n matches = boxes[best_box_arg_per_prior_box]\n # setting class value to 0 (background argument)\n matches[best_box_iou_per_prior_box < iou_threshold, 4] = 0\n return matches", "def non_max_suppression(boxes, scores, threshold):\n assert boxes.shape[0] > 0\n if boxes.dtype.kind != \"f\":\n boxes = boxes.astype(np.float32)\n\n # Compute box areas\n y1 = boxes[:, 0]\n x1 = boxes[:, 1]\n y2 = boxes[:, 2]\n x2 = boxes[:, 3]\n area = (y2 - y1) * (x2 - x1)\n\n # Get indicies of boxes sorted by scores (highest first)\n ixs = scores.argsort()[::-1]\n\n pick = []\n while len(ixs) > 0:\n # Pick top box and add its index to the list\n i = ixs[0]\n pick.append(i)\n # Compute IoU of the picked box with the rest\n iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])\n # Identify boxes with IoU over the threshold. This\n # returns indices into ixs[1:], so add 1 to get\n # indices into ixs.\n remove_ixs = np.where(iou > threshold)[0] + 1\n # Remove indices of the picked and overlapped boxes.\n ixs = np.delete(ixs, remove_ixs)\n ixs = np.delete(ixs, 0)\n return np.array(pick, dtype=np.int32)", "def apply_nms(all_boxes, all_scores, thres, max_boxes):\n y1 = all_boxes[:, 0]\n x1 = all_boxes[:, 1]\n y2 = all_boxes[:, 2]\n x2 = all_boxes[:, 3]\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n order = all_scores.argsort()[::-1]\n keep = []\n\n while order.size > 0:\n i = order[0]\n keep.append(i)\n\n if len(keep) >= max_boxes:\n break\n\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n inds = np.where(ovr <= thres)[0]\n\n order = order[inds + 1]\n return keep", "def apply_nms(all_boxes, all_scores, thres, max_boxes):\n y1 = all_boxes[:, 0]\n x1 = all_boxes[:, 1]\n y2 = all_boxes[:, 2]\n x2 = all_boxes[:, 3]\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n order = all_scores.argsort()[::-1]\n keep = []\n\n while order.size > 0:\n i = order[0]\n keep.append(i)\n\n if len(keep) >= max_boxes:\n break\n\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n inds = np.where(ovr <= thres)[0]\n\n order = order[inds + 1]\n return keep", "def detect(self, score_map, geo_map, score_map_thresh=common.SCORE_MAP_THRESH,\n box_thresh=common.BOX_THRESH, nms_thres=common.NMS_TRESH):\n if len(score_map.shape) == 4:\n score_map = score_map[0, :, :, 0]\n geo_map = geo_map[0, :, :, ]\n # filter the score map\n xy_text = np.argwhere(score_map > score_map_thresh)\n # sort the text boxes via the y axis\n xy_text = xy_text[np.argsort(xy_text[:, 0])]\n # restore\n text_box_restored = restore_rectangle(xy_text[:, ::-1] * 4, geo_map[xy_text[:, 0], xy_text[:, 1], :]) # N*4*2\n boxes = np.zeros((text_box_restored.shape[0], 9), dtype=np.float32)\n boxes[:, :8] = text_box_restored.reshape((-1, 8))\n boxes[:, 8] = score_map[xy_text[:, 0], xy_text[:, 1]]\n # nms part\n boxes = lanms.merge_quadrangle_n9(boxes.astype('float32'), nms_thres)\n\n if boxes.shape[0] == 0:\n return boxes\n\n # here we filter some low score boxes by the average score map, this is different from the orginal paper\n for i, box in enumerate(boxes):\n mask = np.zeros_like(score_map, dtype=np.uint8)\n cv2.fillPoly(mask, box[:8].reshape((-1, 4, 2)).astype(np.int32) // 4, 1)\n boxes[i, 8] = cv2.mean(score_map, mask)[0]\n boxes = boxes[boxes[:, 8] > box_thresh]\n\n return boxes", "def non_max_suppression(self, filtered_boxes, box_classes, box_scores):\n box_predictions = []\n predicted_box_classes = []\n predicted_box_scores = []\n for label in range(len(self.class_names)):\n # for each class\n boxes = []\n class_tmp = []\n score_tmp = []\n for i in range(len(box_classes)):\n if box_classes[i] == label:\n boxes.append(filtered_boxes[i])\n class_tmp.append(box_classes[i])\n score_tmp.append(box_scores[i])\n\n class_tmp = np.array(class_tmp)\n while len(class_tmp) > 0 and np.amax(class_tmp) > -1:\n index = np.argmax(score_tmp)\n box_predictions.append(boxes[index])\n predicted_box_classes.append(class_tmp[index])\n predicted_box_scores.append(score_tmp[index])\n score_tmp[index] = -1\n class_tmp[index] = -1\n px1, py1, px2, py2 = boxes[index]\n p_area = (px2 - px1) * (py2 - py1)\n\n for box in range(len(boxes)):\n if class_tmp[box] != -1:\n bx1, by1, bx2, by2 = boxes[box]\n b_area = (bx2 - bx1) * (by2 - by1)\n ox1 = px1 if px1 > bx1 else bx1\n oy1 = py1 if py1 > by1 else by1\n ox2 = px2 if px2 < bx2 else bx2\n oy2 = py2 if py2 < by2 else by2\n if ox2 - ox1 <= 0 or oy2 - oy1 <= 0:\n continue\n # Calculate overlap area and IoU\n o_area = (ox2 - ox1) * (oy2 - oy1)\n u_area = p_area + b_area - o_area\n iou = o_area / u_area\n\n if iou > self.nms_t:\n class_tmp[box] = -1\n score_tmp[box] = -1\n\n box_predictions = np.array(box_predictions)\n predicted_box_classes = np.array(predicted_box_classes)\n predicted_box_scores = np.array(predicted_box_scores)\n return (box_predictions, predicted_box_classes, predicted_box_scores)", "def _filter_box_candidates(self, bboxes, labels):\n bbox_w = bboxes[:, 2] - bboxes[:, 0]\n bbox_h = bboxes[:, 3] - bboxes[:, 1]\n valid_inds = (bbox_w > self.min_bbox_size) & \\\n (bbox_h > self.min_bbox_size)\n valid_inds = np.nonzero(valid_inds)[0]\n return bboxes[valid_inds], labels[valid_inds]", "def nms(bobj, cf_thresh, nms_thresh):\n bboxs = bobj[\"boxs\"]\n scores = bobj[\"scores\"]\n cfvalid_ids = np.where(scores >= cf_thresh)[0]\n if len(cfvalid_ids) == 0:\n return None, None\n bboxs = bobj[\"boxs\"][cfvalid_ids]\n scores = scores[cfvalid_ids]\n ids = bobj[\"ids\"][cfvalid_ids]\n masks = bobj[\"masks\"][cfvalid_ids]\n x1 = bboxs[:, 0]\n y1 = bboxs[:, 1]\n x2 = bboxs[:, 2]\n y2 = bboxs[:, 3]\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n # cfvalid_ids = np.where(scores >= cf_thresh)[0]\n # scores = scores[cfvalid_ids]\n\n # order = scores.argsort()[::-1]\n mask_sizes = np.sum(masks, axis=(1, 2))\n order = mask_sizes.argsort()[::-1]\n keep = []\n suppress = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n iou = inter / (areas[i] + areas[order[1:]] - inter)\n # Because of we split the object cross the boundary in the cropped instance,\n # concatenating it to the original instance, thus we need also mask iou condition for nms\n mask_other = masks[order[1:], :, :]\n mask_cur = masks[i, :, :]\n mask_inter = np.sum(mask_cur & mask_other, axis=(1, 2))\n mask_union = np.sum(mask_cur | mask_other, axis=(1, 2))\n mask_iou = mask_inter / mask_union\n\n suppress_inds = np.where((iou > nms_thresh) | (mask_iou > nms_thresh))[0]\n sup_i = order[1:][suppress_inds] if suppress_inds.size != 0 else np.array([])\n suppress.append(sup_i)\n\n inds = np.where((iou <= nms_thresh) & (mask_iou <= nms_thresh))[0]\n order = order[inds + 1]\n\n for i, sup in enumerate(suppress):\n if sup.any():\n for sup_id in sup:\n # sup_id = s + 1\n keep_id = keep[i]\n # union the keep mask and the suppress mask\n masks[keep_id, :, :] = masks[keep_id, :, :] | masks[sup_id, :, :]\n if keep:\n return ids[keep], masks[keep]\n else:\n return [], []", "def yolo_filter_boxes(box_confidence: torch.Tensor, boxes: torch.Tensor, box_class_probs: torch.Tensor, threshold: float=.6):\n\n batch_size, num_anchors, _, conv_height, conv_width = box_confidence.shape\n\n box_scores = box_confidence * box_class_probs\n\n box_classes = torch.argmax(box_scores, dim=2, keepdim=True)\n\n box_class_scores, _ = torch.max(box_scores, dim=2, keepdim=True)\n\n prediction_mask = box_class_scores > threshold\n\n classes = box_classes[prediction_mask]\n scores = box_class_scores[prediction_mask]\n\n boxes = boxes.permute(0, 1, 3, 4, 2)\n prediction_mask = prediction_mask.permute(0, 1, 3, 4, 2)\n boxes = boxes[prediction_mask.expand_as(boxes)].view(-1, 4)\n\n return boxes, scores, classes", "def boxes_filter(dets, bbox_id=1, class_name='None', color=(255, 255, 255), scale=1.0, thresh=0.5, min_size=(2, 2)):\n _objs = []\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return _objs\n\n for i in inds:\n bbox = dets[i, :4] / scale\n bbox_confidence = dets[i, -1]\n if bbox[3] - bbox[1] <= min_size[0] or bbox[2] - bbox[0] <= min_size[1]:\n continue\n attribute = dict(class_name=class_name, color=color)\n _objs.append(dict(bbox=bbox, bbox_id=bbox_id, bbox_confidence=bbox_confidence, keypoints=[],\n attribute=attribute, person_id=-1, person_confidence=-1, segment=[]))\n\n return _objs", "def vis_detections(im, class_name, dets, thresh=0.8):\n\n dict = {'HolderA': 'Holder', 'WheelA': 'WheelA', 'WheelB': 'WheelB', 'BrakeA': 'Brake', 'SpringA': 'Spring',\n 'BuckleA': 'BuckleA', 'BuckleB': 'BuckleB', 'TubeA': 'Tube', 'NutA': 'NutA', 'ScrewA': 'ScrewA',\n 'NutB': 'NutB', 'ScrewB': 'ScrewB',\n 'WireA': 'Wire', 'PlateA': 'PlateA', 'PlateB': 'PlateB', 'PlateD': 'PlateC', 'PlateE': 'PlateD',\n 'BoltA': 'Bolt', 'LoopB': 'Loop', 'JointA': 'JointA', 'JointB': 'JointB', 'FixatorA': 'Fixator',\n 'BearingA': 'Bearing', 'PlugA': 'Plug'}\n\n for i in range(np.minimum(10, dets.shape[0])):\n bbox = tuple(int(np.round(x)) for x in dets[i, :4])\n score = dets[i, -1]\n if score > thresh:\n # Color site: http://www.wahart.com.hk/rgb.htm\n if class_name == 'HolderA':\n color = (255, 255, 0) # Cyan\n elif class_name == 'WheelA':\n color = (212, 255, 127) # Aquamarina\n elif class_name == 'WheelB':\n color = (99, 99, 238) # IndianRed2\n elif class_name == 'BrakeA':\n color = (99, 99, 238) # IndianRed2\n elif class_name == 'SpringA':\n color = (180, 130, 70) # SteelBlue\n elif class_name == 'BuckleA':\n color = (205, 0, 0) # MediumBlue\n elif class_name == 'BuckleB':\n color = (170, 205, 102) # MediumAquamarine\n elif class_name == 'BuckleC':\n color = (0, 252, 124) # LawnGreen\n elif class_name == 'BuckleD':\n color = (50, 205, 50) # LimeGreen\n elif class_name == 'TubeA':\n color = (147, 112, 219) # PaleVioletRed\n elif class_name == 'ScrewA':\n color = (240, 32, 160) # Purple\n elif class_name == 'ScrewB':\n color = (0, 165, 255) # Orange1\n elif class_name == 'ScrewC':\n color = (48, 48, 255) # Firebrick1\n elif class_name == 'NutA':\n color = (0, 255, 255) # Yellow\n elif class_name == 'NutB':\n color = (255, 144, 30) # DodgerBlue\n elif class_name == 'NutC':\n color = (180, 238, 180) # DarkSeaGreen2\n elif class_name == 'WireA':\n color = (255, 255, 255) # White\n elif class_name == 'PlateA':\n color = (0, 69, 255) # OrangeRed\n elif class_name == 'PlateB':\n color = (102, 205, 0) # SpringGreen3\n elif class_name == 'PlateD':\n color = (0, 255, 0) # Green\n elif class_name == 'PlateE':\n color = (0, 140, 250) # DarkOrange\n elif class_name == 'BoltA':\n color = (255, 255, 0) # Cyan\n elif class_name == 'LoopB':\n color = (180, 105, 255) # HotPink\n elif class_name == 'JointA':\n color = (105, 140, 255) # Salmon1\n elif class_name == 'JointB':\n color = (255, 0, 255) # Magenta3\n elif class_name == 'FixatorA':\n color = (0, 205, 102) # Chartreuse3\n elif class_name == 'BearingA':\n color = (185, 218, 255) # PeachPuff\n elif class_name == 'PlugA':\n color = (193, 193, 255) # RosyBrown1\n else:\n color = (139, 0, 139) # DarkMagenta\n cv2.rectangle(im, bbox[0:2], bbox[2:4], color, 2)\n # cv2.putText(im, '%s: %.3f' % (class_name, score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_COMPLEX,\n # 0.5, color, thickness=1)\n cv2.putText(im, '%s: %.3f' % (dict[class_name], score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_COMPLEX,\n 0.5, color, thickness=1)\n return im", "def nms(self, dets, scores):\n x1 = dets[:, 0] #xmin\n y1 = dets[:, 1] #ymin\n x2 = dets[:, 2] #xmax\n y2 = dets[:, 3] #ymax\n\n areas = (x2 - x1) * (y2 - y1) # the size of bbox\n order = scores.argsort()[::-1] # sort bounding boxes by decreasing order\n\n keep = [] # store the final bounding boxes\n while order.size > 0:\n i = order[0] #the index of the bbox with highest confidence\n keep.append(i) #save it to keep\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(1e-28, xx2 - xx1)\n h = np.maximum(1e-28, yy2 - yy1)\n inter = w * h\n\n # Cross Area / (bbox + particular area - Cross Area)\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n #reserve all the boundingbox whose ovr less than thresh\n inds = np.where(ovr <= self.nms_thresh)[0]\n order = order[inds + 1]\n\n return keep", "def _filter_boxes(self, min_score, boxes, scores, classes):\n n = len(classes)\n idxs = []\n for i in range(n):\n if scores[i] >= min_score:\n idxs.append(i)\n \n filtered_boxes = boxes[idxs, ...]\n filtered_scores = scores[idxs, ...]\n filtered_classes = classes[idxs, ...]\n return filtered_boxes, filtered_scores, filtered_classes", "def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6):\r\n\r\n # Step 1: Compute box scores\r\n ### START CODE HERE ### (≈ 1 line)\r\n box_scores = box_confidence * box_class_probs\r\n ### END CODE HERE ###\r\n\r\n # Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score\r\n ### START CODE HERE ### (≈ 2 lines)\r\n box_classes = tf.argmax(box_scores, axis=-1)\r\n box_class_scores = tf.reduce_max(box_scores, axis=-1)\r\n ### END CODE HERE ###\r\n\r\n # Step 3: Create a filtering mask based on \"box_class_scores\" by using \"threshold\". The mask should have the\r\n # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)\r\n ### START CODE HERE ### (≈ 1 line)\r\n filtering_mask = box_class_scores >= threshold\r\n ### END CODE HERE ###\r\n\r\n # Step 4: Apply the mask to scores, boxes and classes\r\n ### START CODE HERE ### (≈ 3 lines)\r\n scores = tf.boolean_mask(box_class_scores, filtering_mask)\r\n boxes = tf.boolean_mask(boxes, filtering_mask)\r\n classes = tf.boolean_mask(box_classes, filtering_mask)\r\n ### END CODE HERE ###\r\n\r\n return scores, boxes, classes", "def nms(bboxes, iou_threshold, threshold, box_format=\"corners\"):\n\tassert type(bboxes) == list\n\tbboxes = [box for box in bboxes if box[1] > threshold]\n\tbboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)\n\tbboxes_after_nms = []\n\n\twhile bboxes:\n\t\tchosen_box = bboxes.pop(index=0)\n\t\tbboxes = [box for box in bboxes \n\t\t\t\t\t\t\tif box[0] != chosen_box[0] or intersection_over_union\n\t\t\t\t\t\t\t(torch.tensor(chosen_box[2:]), \n\t\t\t\t\t\t\t\ttorch.tensor(chosen_box[2:]),\n\t\t\t \t\t\t\t\tbox_format=\"midpoint\") < iou_threshold]\n\t\tbboxes_after_nms.append(chosen_box)\n\n\treturn bboxes_after_nms", "def filter_close_teeth(bboxes, pixel_threshold=4):\n # sort bboxes by x-coordinate\n other_bboxes = [bbox for bbox in bboxes if bbox.get_label() != 0]\n teeth_bboxes = [bbox for bbox in bboxes if bbox.get_label() == 0]\n teeth_xmins = [bbox.xmin for bbox in teeth_bboxes]\n xmin_sorted = np.argsort(teeth_xmins)\n teeth_bboxes = np.array(teeth_bboxes)[xmin_sorted]\n teeth_bboxes = teeth_bboxes.tolist()\n filtered_bboxes = []\n\n # get rid of bboxes which are within pixel_threshold in the x-axis\n i = 0\n if teeth_bboxes:\n teeth_bboxes_to_return = [teeth_bboxes[0]]\n else:\n teeth_bboxes_to_return = []\n if len(teeth_bboxes) > 2:\n while i + 2 < len(teeth_bboxes): # iterate over the triples\n bbox1 = teeth_bboxes[i]\n bbox2 = teeth_bboxes[i + 1]\n bbox3 = teeth_bboxes[i + 2]\n xmin1, xmax1, ymin1, ymax1 = convert_bbox_coords_to_pixels(bbox1)\n xmin2, xmax2, ymin2, ymax2 = convert_bbox_coords_to_pixels(bbox2)\n xmin3, xmax3, ymin3, ymax3 = convert_bbox_coords_to_pixels(bbox3)\n if xmin2 - xmax1 < pixel_threshold and xmin3 - xmax2 < pixel_threshold:\n filtered_bboxes.append(bbox2)\n else:\n teeth_bboxes_to_return.append(teeth_bboxes[i + 1])\n i += 1\n\n if len(teeth_bboxes) > 1:\n teeth_bboxes_to_return.append(teeth_bboxes[-1])\n\n bboxes_to_return = teeth_bboxes_to_return + other_bboxes\n return bboxes_to_return, filtered_bboxes", "def _get_best_threshold(self, frame: np.ndarray, save_debug_thresh_images: bool) -> Tuple[float_, Ndarray_]:\n # 1) click_xy --> click_roi (click_xy.center; size = n * MAX_BALL_SIZE -->\n self.click_roi = ROI(frame.shape, self.click_xy, self.CLICK_ZONE_SIZE)\n self.click_roi_img = self.click_roi.extract_img(frame)\n cv.imwrite(f\"images/click_roi_img.png\", self.click_roi_img)\n\n # 2) --> preprocess(gray,blur,dilute) -->\n self.click_roi_gray = self._preprocess_image(self.click_roi_img, \"Start zone\")\n Util.write_bw(f\"images/click_roi_gray.png\", self.click_roi_gray, f\"frame {FrameProcessor.frame_cnt}\")\n # 3) --> find best threshold (one contour of biggest but reasonable size), ball_size, thresh_val -->\n level_results: List[Dict] = []\n for thresh in range(20, 255 - 20, 1):\n _, img_nomorphed = cv.threshold(self.click_roi_gray, thresh, 255, cv.THRESH_BINARY)\n kernel = np.ones((self.BLUR_LEVEL, self.BLUR_LEVEL), np.uint8)\n img = cv.morphologyEx(img_nomorphed, cv.MORPH_OPEN, kernel)\n img = cv.morphologyEx(img, cv.MORPH_CLOSE, kernel)\n # Util.show_img(img, f\"thresh level = {thresh}\", 1)\n\n contours, _ = cv.findContours(img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n # log_zone.debug(f\"get_best_threshold: iterating {thresh=} {len(contours)=} {[cv.contourArea(c) for c in contours]=}\")\n # Util.write_bw(f\"images/thresh_{thresh}.png\", img, f\"#{FrameProcessor.frame_cnt}: {thresh=}\")\n\n if len(contours) != 1: # должен быть только один контур мяча. если несколько - меняем порог\n Util.write_bw(f\"images/{thresh}_not1.png\", img, f\"#{FrameProcessor.frame_cnt}: {thresh=} contours({len(contours)})\")\n continue\n contour = contours[0]\n area = cv.contourArea(contour)\n x, y, w, h = cv.boundingRect(contour)\n if max(w, h) / max(self.click_roi_gray.shape) > self.MAX_RECT_RATIO: # contour is as big as total image - so is useless\n Util.write_bw(f\"images/{thresh}_big.png\", img,\n f\"#{FrameProcessor.frame_cnt}: {thresh=} Big: {w=}{h=} max(shape)={max(self.click_roi_gray.shape)}\")\n continue\n if x == 0 or y == 0 or x + w == self.click_roi.w or y + h == self.click_roi.h:\n Util.write_bw(f\"images/{thresh}_touch.png\", img, f\"#{FrameProcessor.frame_cnt}: {thresh=} Touch: {x=} {y=} {w=} {h=}\")\n continue # contour is touched to border\n hull = cv.convexHull(contour, returnPoints=False)\n defects = cv.convexityDefects(contour, hull)\n max_defect_size = sorted(defects, key=lambda defect: defect[0][3], reverse=True)[0][0][3] if defects is not None else -1\n if max_defect_size > self.MAX_DEFECT_SIZE:\n Util.write_bw(f\"images/{thresh}_defects.png\", img, f\"#{FrameProcessor.frame_cnt}: {thresh=} {max_defect_size=}\")\n continue\n\n result = {\"thresh\": thresh, \"area\": area, \"contour\": contour}\n level_results.append(result)\n Util.write_bw(f\"images/{thresh}_thresh.png\", img,\n f\"#{FrameProcessor.frame_cnt}: {thresh=} area={result['area']} def_size={max_defect_size}\")\n Util.write_bw(f\"images/{thresh}_nomorphed.png\", img_nomorphed,\n f\"#{FrameProcessor.frame_cnt}: {thresh=} area={result['area']} def_size={max_defect_size}\")\n log_zone.debug(f\"get_best_thresh::: level result saved {result['thresh']=} {result['area']=} {ROI(frame.shape, contour=contour)} \")\n\n if len(level_results) == 0: # no appropriate thresh found\n return None, None\n if len(level_results) == 1: # return just the only found thresh\n best_result = level_results[0]\n elif 1 < len(level_results) <= 5: # len(level_results) in (1;5] -- return second best by area if possible\n level_results = sorted(level_results, key=lambda res: res[\"area\"], reverse=True)\n best_result = level_results[1]\n else: # len(level_results) > 5\n best_result = self.get_optimized_thresh_level(level_results)\n\n otsu_thresh, otsu_img = cv.threshold(self.click_roi_gray, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)\n log_zone.debug(f\"{best_result['thresh']=} {best_result['area']=} otsu = {otsu_thresh}\")\n if save_debug_thresh_images:\n Util.write_bw(f\"images/best_{best_result['thresh']}.png\",\n cv.threshold(self.click_roi_gray, best_result['thresh'], 255, cv.THRESH_BINARY)[1],\n f\"{best_result['area']=}\")\n Util.write_bw(f\"images/otsu_{otsu_thresh}.png\", otsu_img)\n return best_result[\"thresh\"], best_result[\"contour\"]", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n\n return keep", "def py_cpu_nms(boxes, scores, thresh=0.55):\n # x1、y1、x2、y2、以及score赋值\n boxes = boxes.detach().numpy()\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n scores = scores\n\n # 每一个检测框的面积\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n # 按照score置信度降序排序\n # order = scores.argsort()[::-1]\n all_scores, order = scores.sort(descending=True)\n\n keep = [] # 保留的结果框集合\n # print(order)\n while int(len(order.detach().numpy())) > 0:\n i = order[0]\n keep.append(i.numpy()) # 保留该类剩余box中得分最高的一个\n # 得到相交区域,左上及右下\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n # 计算相交的面积,不重叠时面积为0\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n # 计算IoU:重叠面积 /(面积1+面积2-重叠面积)\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n # 保留IoU小于阈值的box\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1] # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位\n\n return keep", "def split(self, thresh=0):\n\n new_tree_bounds = []\n new_tree_ids = []\n\n self.contains_null = False\n\n for qi, quad in enumerate(self.tree):\n\n left, bottom, right, top = quad.bounds\n xcenter = left + (right - left) / 2.0\n ycenter = top - (top - bottom) / 2.0\n\n quad_id = self.tree_ids[qi]\n\n for id_, bbox in zip(\n [1, 3, 0, 2],\n [\n (left, ycenter, xcenter, top),\n (xcenter, ycenter, right, top),\n (left, bottom, xcenter, ycenter),\n (xcenter, bottom, right, ycenter),\n ],\n ):\n\n id_list = list(self.sindex.intersection(bbox))\n\n if id_list:\n\n if len(id_list) > thresh:\n\n new_tree_bounds.append(bbox)\n new_tree_ids.append(quad_id + str(id_))\n\n else:\n self.contains_null = True\n\n else:\n self.contains_null = True\n\n self.tree_bounds = new_tree_bounds\n self.tree_ids = new_tree_ids\n\n return self", "def filter_det(scores, boxes, start_ind=0, max_per_img=100, thresh=0.001, pre_nms_topn=6000, post_nms_topn=300, nms_thresh=0.3, nms_filter_duplicates=True):\n valid_cls = (scores[:, 1:].data.max(0)[0] > thresh).nonzero() + 1\n if valid_cls.dim() == 0:\n return None\n nms_mask = scores.data.clone()\n nms_mask.zero_()\n for c_i in valid_cls.squeeze(1).cpu():\n scores_ci = scores.data[:, c_i]\n boxes_ci = boxes.data[:, c_i]\n keep = apply_nms(scores_ci, boxes_ci, pre_nms_topn=pre_nms_topn, post_nms_topn=post_nms_topn, nms_thresh=nms_thresh)\n nms_mask[:, c_i][keep] = 1\n dists_all = Variable(nms_mask * scores.data, volatile=True)\n if nms_filter_duplicates:\n scores_pre, labels_pre = dists_all.data.max(1)\n inds_all = scores_pre.nonzero()\n assert inds_all.dim() != 0\n inds_all = inds_all.squeeze(1)\n labels_all = labels_pre[inds_all]\n scores_all = scores_pre[inds_all]\n else:\n nz = nms_mask.nonzero()\n assert nz.dim() != 0\n inds_all = nz[:, 0]\n labels_all = nz[:, 1]\n scores_all = scores.data.view(-1)[inds_all * scores.data.size(1) + labels_all]\n vs, idx = torch.sort(scores_all, dim=0, descending=True)\n idx = idx[vs > thresh]\n if max_per_img < idx.size(0):\n idx = idx[:max_per_img]\n inds_all = inds_all[idx] + start_ind\n scores_all = Variable(scores_all[idx], volatile=True)\n labels_all = Variable(labels_all[idx], volatile=True)\n return inds_all, scores_all, labels_all", "def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):\n\n # From (center x, center y, width, height) to (x1, y1, x2, y2)\n prediction[..., :4] = change_box_order(prediction[..., :4], order=\"xywh2xyxy\")\n output = [None for _ in range(len(prediction))]\n for image_i, image_pred in enumerate(prediction):\n # Filter out confidence scores below threshold\n image_pred = image_pred[image_pred[:, 4] >= conf_thres]\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Object confidence times class confidence\n score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0]\n # Sort by it\n image_pred = image_pred[(-score).argsort()]\n class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True)\n detections = torch.cat(\n (image_pred[:, :5], class_confs.float(), class_preds.float()), 1\n )\n # Perform non-maximum suppression\n keep_boxes = []\n while detections.size(0):\n large_overlap = (\n box_iou(detections[0, :4].unsqueeze(0), detections[:, :4], order=\"xyxy\")\n > nms_thres\n )\n label_match = detections[0, -1] == detections[:, -1]\n # Indices of boxes with lower confidence scores, large IOUs and matching labels\n invalid = large_overlap & label_match\n weights = detections[invalid, 4:5]\n # Merge overlapping bboxes by order of confidence\n detections[0, :4] = (weights * detections[invalid, :4]).sum(\n 0\n ) / weights.sum()\n keep_boxes += [detections[0]]\n detections = detections[~invalid]\n if keep_boxes:\n output[image_i] = torch.stack(keep_boxes)\n return output", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n return keep", "def filter_boxes(self, boxes, box_confidences, box_class_probs):\n box_scores = [x * y for x, y in zip(box_confidences, box_class_probs)]\n box_class_scores = [np.max(x, axis=-1).reshape(-1) for x in box_scores]\n box_class_scores = np.concatenate(box_class_scores)\n box_classes = [np.argmax(x, axis=-1).reshape(-1) for x in box_scores]\n box_classes = np.concatenate(box_classes)\n filtering_mask = box_class_scores >= self.class_t\n list = [np.reshape(x, (-1, 4)) for x in boxes]\n boxes = np.concatenate(list)\n boxes = boxes[filtering_mask]\n scores = box_class_scores[filtering_mask]\n classes = box_classes[filtering_mask]\n return (boxes, classes, scores)", "def thresh(self, thresh=25, total_ratings=False):\n before = self.item_count()\n\n if total_ratings: self.filter(self.n_per_item() >= thresh)\n else: self.filter(np.all(self.lam() >= thresh, axis=0))\n\n after = self.item_count()\n thresh_type = 'on each item total' if total_ratings else 'by each group' \n with msg(f'Applying threshold of {thresh} ratings {thresh_type} : {after} of {before}', done=False, enabled=self.output):pass", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n return keep", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n return keep", "def filter_bboxes_by_visibility(\n original_shape: Sequence[int],\n bboxes: Sequence[BoxType],\n transformed_shape: Sequence[int],\n transformed_bboxes: Sequence[BoxType],\n threshold: float = 0.0,\n min_area: float = 0.0,\n) -> List[BoxType]:\n img_height, img_width = original_shape[:2]\n transformed_img_height, transformed_img_width = transformed_shape[:2]\n\n visible_bboxes = []\n for bbox, transformed_bbox in zip(bboxes, transformed_bboxes):\n if not all(0.0 <= value <= 1.0 for value in transformed_bbox[:4]):\n continue\n bbox_area = calculate_bbox_area(bbox, img_height, img_width)\n transformed_bbox_area = calculate_bbox_area(transformed_bbox, transformed_img_height, transformed_img_width)\n if transformed_bbox_area < min_area:\n continue\n visibility = transformed_bbox_area / bbox_area\n if visibility >= threshold:\n visible_bboxes.append(transformed_bbox)\n return visible_bboxes", "def filter_boxes(self, min_score, detections):\n mask = detections['detection_scores'] >= min_score\n filtered_detections = (detections['detection_boxes'][mask], \n detections['detection_scores'][mask], \n detections['detection_classes'][mask])\n return filtered_detections", "def box_non_maximum_suppression(data=None, overlap_thresh=_Null, topk=_Null, coord_start=_Null, score_index=_Null, id_index=_Null, force_suppress=_Null, in_format=_Null, out_format=_Null, out=None, name=None, **kwargs):\n return (0,)", "def box_results_with_nms_and_limit(\n scores, boxes, score_thresh=0.05, nms=0.5, detections_per_img=100\n):\n num_classes = scores.shape[1]\n cls_boxes = []\n cls_scores = []\n labels = []\n device = scores.device\n # Apply threshold on detection probabilities and apply NMS\n # Skip j = 0, because it's the background class\n for j in range(1, num_classes):\n inds = scores[:, j] > score_thresh\n scores_j = scores[inds, j]\n boxes_j = boxes[inds, j * 4 : (j + 1) * 4]\n keep = box_nms(boxes_j, scores_j, nms)\n cls_boxes.append(boxes_j[keep])\n cls_scores.append(scores_j[keep])\n # TODO see why we need the device argument\n labels.append(torch.full_like(keep, j, device=device))\n\n cls_scores = torch.cat(cls_scores, dim=0)\n cls_boxes = torch.cat(cls_boxes, dim=0)\n labels = torch.cat(labels, dim=0)\n number_of_detections = len(cls_scores)\n\n # Limit to max_per_image detections **over all classes**\n if number_of_detections > detections_per_img > 0:\n image_thresh, _ = torch.kthvalue(\n cls_scores.cpu(), number_of_detections - detections_per_img + 1\n )\n keep = cls_scores >= image_thresh.item()\n keep = torch.nonzero(keep)\n keep = keep.squeeze(1) if keep.numel() else keep\n cls_boxes = cls_boxes[keep]\n cls_scores = cls_scores[keep]\n labels = labels[keep]\n return cls_scores, cls_boxes, labels", "def py_cpu_nms(dets, thresh):\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4] # bbox打分\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n # 打分从大到小排列,取index\n order = scores.argsort()[::-1]\n keep = [] # keep为最后保留的边框\n while order.size > 0:\n i = order[0] # order[0]是当前分数最大的窗口,肯定保留\n keep.append(i) # 计算窗口i与其他所有窗口的交叠部分的面积\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1) # 取两个向量的较小值\n h = np.maximum(0.0, yy2 - yy1 + 1) # 取两个向量的较大值,不大于0就取0\n inter = w * h # 两个框的交集\n # 交/并得到iou值\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n # inds为所有与窗口i的iou值小于threshold值的窗口的index,其他窗口此次都被窗口i吸收\n inds = np.where(ovr <= thresh)[0]\n # order里面只保留与窗口i交叠面积小于threshold的那些窗口,由于ovr长度比order长度少1(不包含i),所以inds+1对应到保留的窗口\n order = order[inds + 1]\n\n return keep", "def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6):\n\n\t# Step 1: Compute box scores\n\tbox_scores = box_confidence * box_class_probs # [19, 19, 5, 1] * [19, 19, 5, 80] = [19, 19, 5, 80]\n\n\t# Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score\n\tbox_classes = K.argmax(box_scores, axis=-1)\n\tbox_class_scores = K.max(box_scores, axis=-1, keepdims=False)\n\n\t# Step 3: Create a filtering mask based on \"box_class_scores\" by using \"threshold\". The mask should have the\n\t# same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)\n\tfiltering_mask = box_class_scores >= threshold\n\n\t# Step 4: Apply the mask to scores, boxes and classes\n\tscores = tf.boolean_mask(box_class_scores, filtering_mask)\n\tboxes = tf.boolean_mask(boxes, filtering_mask)\n\tclasses = tf.boolean_mask(box_classes, filtering_mask)\n\n\treturn scores, boxes, classes", "def prune_bbox(receptive_box, bbox, threshold=0):\n xmin = util.where(receptive_box[:, 0] >= bbox[0] - threshold)\n ymin = util.where(receptive_box[:, 1] >= bbox[1] - threshold)\n xmax = util.where(receptive_box[:, 2] < bbox[2] + threshold)\n ymax = util.where(receptive_box[:, 3] < bbox[3] + threshold)\n\n val1 = util.intersect1d(xmin, ymin)\n val2 = util.intersect1d(xmax, ymax)\n valid_ids = torch.sort(torch.unique(util.intersect1d(val1, val2)))[0]\n\n pruned_receptive_box = receptive_box[valid_ids]\n\n return pruned_receptive_box, valid_ids", "def filter_boxes(min_score, boxes, scores, classes):\n n = len(classes)\n idxs = []\n for i in range(n):\n if scores[i] >= min_score:\n idxs.append(i)\n \n filtered_boxes = boxes[idxs, ...]\n filtered_scores = scores[idxs, ...]\n filtered_classes = classes[idxs, ...]\n return filtered_boxes, filtered_scores, filtered_classes", "def _nms_boxes(self, boxes, box_confidences):\n x_coord = boxes[:, 0]\n y_coord = boxes[:, 1]\n width = boxes[:, 2]\n height = boxes[:, 3]\n\n areas = width * height\n ordered = box_confidences.argsort()[::-1]\n\n keep = list()\n while ordered.size > 0:\n # Index of the current element:\n i = ordered[0]\n keep.append(i)\n xx1 = np.maximum(x_coord[i], x_coord[ordered[1:]])\n yy1 = np.maximum(y_coord[i], y_coord[ordered[1:]])\n xx2 = np.minimum(x_coord[i] + width[i],\n x_coord[ordered[1:]] + width[ordered[1:]])\n yy2 = np.minimum(y_coord[i] + height[i],\n y_coord[ordered[1:]] + height[ordered[1:]])\n\n width1 = np.maximum(0.0, xx2 - xx1 + 1)\n height1 = np.maximum(0.0, yy2 - yy1 + 1)\n intersection = width1 * height1\n union = (areas[i] + areas[ordered[1:]] - intersection)\n\n # Compute the Intersection over Union (IoU) score:\n iou = intersection / union\n\n # The goal of the NMS algorithm is to reduce the number of adjacent bounding-box\n # candidates to a minimum. In this step, we keep only those elements whose overlap\n # with the current bounding box is lower than the threshold:\n indexes = np.where(iou <= self.nms_threshold)[0]\n ordered = ordered[indexes + 1]\n\n keep = np.array(keep)\n return keep", "def nms(scores: np.ndarray,\n bboxes: np.ndarray,\n thresh: float) -> Tuple[np.ndarray, np.ndarray]:\n valid_idx = bboxes[:, 0] < bboxes[:, 1] # overlap segments\n scores = scores[valid_idx]\n bboxes = bboxes[valid_idx]\n\n # First element in arg_desc is index of most important segment\n arg_desc = scores.argsort()[::-1]\n\n scores_remain = scores[arg_desc]\n bboxes_remain = bboxes[arg_desc]\n\n keep_bboxes = []\n keep_scores = []\n\n while bboxes_remain.size > 0:\n bbox = bboxes_remain[0]\n score = scores_remain[0]\n keep_bboxes.append(bbox)\n keep_scores.append(score)\n\n iou = iou_lr(bboxes_remain, np.expand_dims(bbox, axis=0))\n\n keep_indices = (iou < thresh)\n bboxes_remain = bboxes_remain[keep_indices]\n scores_remain = scores_remain[keep_indices]\n\n keep_bboxes = np.asarray(keep_bboxes, dtype=bboxes.dtype)\n keep_scores = np.asarray(keep_scores, dtype=scores.dtype)\n\n return keep_scores, keep_bboxes", "def getMostContour(img,svm,knn,filterArr,digits,wThresh,hThresh):\r\n # append the filter to filter array, this approach is used in case of \r\n # multiple filter methods would be used.\r\n counts = []\r\n # iterare through every filter\r\n for flt in filterArr:\r\n # copy the image so we don't draw on same image\r\n flt_img = img.copy()\r\n last_img = img.copy()\r\n flt_contour,cntfound_fltr = drawcntMap(img.copy(),flt,wThresh,hThresh) \r\n if not digits:\r\n flt_contour,cntfound_fltr = drawcntMap(img.copy(),flt,wThresh,hThresh)\r\n flt_contour_map = []\r\n labels = []\r\n for crop,(x,y,w,h),contour in cropNwriteBBs(img,cntfound_fltr):\r\n #crop = np.array(crop,dtype='float32')\r\n crop = cv2.cvtColor(crop,cv2.COLOR_BGR2GRAY)\r\n crop = cv2.resize(crop,(25,25))\r\n # winSize is the size of the image cropped to an multiple of the cell size\r\n hog_fts = hog.compute(crop)\\\r\n .reshape(n_cells[1] - block_size[1] + 1,\r\n n_cells[0] - block_size[0] + 1,\r\n block_size[0], block_size[1], nbins) \\\r\n .transpose((1, 0, 2, 3, 4))\r\n hog_fts = np.resize(hog_fts.flatten(),(1,576))\r\n # make the resulted crop same type with the trained values\r\n hog_fts.dtype = 'float32'\r\n # get predicted labels\r\n label_svm=svm.predict(hog_fts)[1]\r\n label_knn = knn.findNearest(hog_fts,k=5)[1]\r\n # label 10 is considered as 'not digit' or 'thrash'\r\n # so if predicted label is not 10, draw the bounding box\r\n if digits:\r\n if(label_svm!=10 and label_knn != 10 and label_svm!=11 and label_knn != 11):\r\n flt_contour_map.append(contour)\r\n labels.append(str(label_knn[0])[1])\r\n else:\r\n if(label_svm!=2 and label_knn != 2):\r\n flt_contour_map.append(contour)\r\n labels.append(str(label_knn[0])[1])\r\n #cv2.putText(flt_img,str(label_knn[0])[1],(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=0.8,color=(0,0,255))\r\n #cv2.putText(flt_img,str(label_knn[0])[1],(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=0.8,color=(0,0,255))\r\n last_cnt,last_labels = secondElimination(flt_contour_map,labels)\r\n for cnt in last_cnt:\r\n x,y,w,h = cv2.boundingRect(cnt)\r\n cv2.rectangle(flt_img,(x,y),(x+w,y+h),[0,255,0],2)\r\n #showWait(flt_img,'fltres')\r\n _,xx,res_boxes,_,_ = mergeBoundingBoxes(flt_img,last_cnt,last_labels)\r\n cnt = len(res_boxes)\r\n counts.append([cnt,flt_img,last_cnt,last_labels])\r\n # append resulted image and contours to an array\r\n counts = np.asarray(counts)\r\n # get the resulted image which contain more digits (bounding boxes)\r\n tmp = counts[:,0]\r\n resulted_img = counts[np.argmax(tmp),1]\r\n result_labels = counts[np.argmax(tmp),3]\r\n resulted_contour = counts[np.argmax(tmp),2]\r\n return resulted_contour,result_labels,resulted_img", "def get_mask_bbox_and_score(yolact_net: Yolact, img, threshold=0.0, max_predictions=1):\n with torch.no_grad():\n frame = torch.from_numpy(img).cuda().float()\n batch = FastBaseTransform()(frame.unsqueeze(0))\n preds = yolact_net(batch)\n\n h, w, _ = img.shape\n\n save = cfg.rescore_bbox\n cfg.rescore_bbox = True\n t = postprocess(preds, w, h, visualize_lincomb=False, crop_masks=True)\n cfg.rescore_bbox = save\n\n idx = t[1].argsort(0, descending=True)[:max_predictions]\n classes, scores, boxes, masks = [x[idx].cpu().numpy() for x in t[:]]\n\n num_dets_to_consider = min(max_predictions, classes.shape[0])\n # Remove detections below the threshold\n for j in range(num_dets_to_consider):\n if scores[j] < threshold:\n num_dets_to_consider = j\n break\n masks_to_return = boxes_to_return = scores_to_return = None\n if num_dets_to_consider > 0:\n masks = masks[:num_dets_to_consider, :, :, None]\n masks_to_return = []\n boxes_to_return = []\n scores_to_return = []\n for m, b, s in zip(masks, boxes, scores):\n masks_to_return.append(m)\n boxes_to_return.append(b)\n scores_to_return.append(s)\n if len(masks_to_return) == 1:\n masks_to_return = masks_to_return[0]\n if len(boxes_to_return) == 1:\n boxes_to_return = boxes_to_return[0]\n if len(scores_to_return) == 1:\n scores_to_return = scores_to_return[0]\n return masks_to_return, boxes_to_return, scores_to_return", "def vis_detections(im, class_name, dets, thresh=0.5, video= None,fid=0):\n dirname = os.path.dirname(__file__)\n show_dir = os.path.join(dirname, '..', 'show/%s' % os.path.basename(video))\n # print(show_dir)\n if not os.path.exists(show_dir):\n os.makedirs(show_dir)\n\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=3.5)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n\n ax.set_title(('{} detections with '\n 'p({} | box) >= {:.1f}').format(class_name, class_name,\n thresh),\n fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n plt.savefig('%s/all_bboxes_%d.jpg' % (show_dir, fid))\n # plt.show()", "def filter_boxes(self, boxes, box_confidence, box_class_probs):\n f_boxes = []\n b_classes = []\n b_scores = []\n for i in range(len(boxes)):\n boxscore = box_confidence[i] * box_class_probs[i]\n maxes = np.amax(boxscore, axis=3)\n keep = np.argwhere(maxes[:, :, :] >= self.class_t)\n\n for kept in keep:\n f_boxes.append(boxes[i][kept[0], kept[1], kept[2]])\n b_classes.append(np.argmax(boxscore[kept[0],\n kept[1], kept[2]]))\n b_scores.append(maxes[kept[0], kept[1], kept[2]])\n \"\"\" muchj easier in tf 2.x\n\n box_class = tf.argmax(boxscore, axis=-1)\n box_score = tf.math.reduce_max(boxscore, axis=-1)\n mask = boxscore >= self.class_t\n\n boxes = tf.compat.v1.boolean_mask(boxes, mask)\n scores = tf.compaat.v1.boolean_mask(boxscore, mask)\n classes = tf.compat.v1.boolean_mask(box_class, mask)\n\n f_boxes.append(boxes)\n b_classes.append(classes)\n b_scores.append(scores)\n \"\"\"\n filtered_boxes = np.array(f_boxes)\n box_classes = np.array(b_classes)\n box_scores = np.array(b_scores)\n return (filtered_boxes, box_classes, box_scores)", "def _get_bounding_box(self, detections, confidence):\n for detection in detections[0,0,:,:]:\n if detection[2] >= confidence:\n return detection[3:]\n \n return None", "def _filter_boxes(self, patch, boxes):\n center = (boxes[:, :2] + boxes[:, 2:]) / 2\n mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (\n center[:, 0] < patch[2]) * (\n center[:, 1] < patch[3])\n return mask", "def find_contours(img_path, **kwargs):\n threshold = kwargs['threshold']\n image = cv2.imread(img_path)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n thresh = cv2.threshold(blurred, threshold, 255, cv2.THRESH_BINARY)[1]\n contours, _ = cv2.findContours(thresh.copy(),\n cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n\n contour_areas = [] # TODO: is there a faster way to keep the contors with n-largest areas?\n for i, contour in enumerate(contours):\n area = cv2.contourArea(contour)\n contour_areas.append((i, area))\n\n rect = cv2.minAreaRect(contour)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n cv2.drawContours(image,[box],0,(0,0,255),2)\n cv2.imwrite('rect_{}.png'.format(i), image)\n # Contours that cover the largest areas are whole pieces\n largest_contour_areas = nlargest(10, contour_areas, key=lambda x:x[1])\n #print(largest_contour_areas)\n\n for i, area in largest_contour_areas:\n print (i, area)\n cv2.drawContours(image, contours, i, (100, 155, 100), 3)\n cv2.imwrite('contor_{}.png'.format(i), image)\n return True", "def _thresh_clip(self, xmin, ymin, zmin, xmax, ymax, zmax):\n\n for p in self.points:\n if p.y > ymax or p.y < ymin:\n print p, 1\n self.raster = False\n break\n elif p.x > xmax or p.x < xmin:\n print p, 2\n self.raster = False\n break\n elif p.z > zmax or p.z < zmin:\n print p, 3\n self.raster = False\n break", "def filter_boxes(self, min_score, boxes, scores, classes):\n n = len(classes)\n idxs = []\n for i in range(n):\n if scores[i] >= min_score:\n idxs.append(i)\n \n filtered_boxes = boxes[idxs, ...]\n filtered_scores = scores[idxs, ...]\n filtered_classes = classes[idxs, ...]\n return filtered_boxes, filtered_scores, filtered_classes", "def paintings_detection(query_image, mask):\n\n image = cv2.imread(query_image)\n\n image_width = mask.shape[0]\n image_height = mask.shape[1]\n x_box_1, y_box_1, w_box_1, h_box_1, x_box_2, y_box_2, w_box_2, h_box_2 = 0, 0, 0, 0, 0, 0, 0, 0, \n\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n \n if (w > 0.15 * image_width) & (h > 0.15 * image_height) & (w < 0.98 * image_width) & (x_box_1 == 0):\n x_box_1, y_box_1, w_box_1, h_box_1 = x, y, w, h\n elif (w > 0.15 * image_width) & (h > 0.15 * image_height) & (w < 0.98 * image_width) & (x_box_1 != 0):\n x_box_2, y_box_2, w_box_2, h_box_2 = x, y, w, h\n\n if x_box_2 == 0:\n x_value_to_split = 0\n else:\n x_value_to_split = (x_box_1 + w_box_1/2 + x_box_2 + w_box_2/2) / 2\n\n\n return(x_value_to_split)", "def non_max_suppression(prediction, score_thres=0.5, nms_thres=0.4):\n output = [{'boxes':None, 'labels':None, 'scores':None} for _ in range(len(prediction))]\n for image_i, pred in enumerate(prediction):\n boxes = pred['boxes']\n labels = pred['labels'].unsqueeze(1)\n scores = pred['scores'].unsqueeze(1)\n image_pred = torch.cat((boxes, scores, labels.float()), 1)\n # Filter out confidence scores below threshold\n image_pred = image_pred[image_pred[:, 4] >= score_thres]\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Object confidence times class confidence\n score = image_pred[:, 4]\n # Sort by it\n image_pred = image_pred[(-score).argsort()]\n #class_confs, class_preds = image_pred[:, 4:].max(1, keepdim=True)\n detections = image_pred\n # Perform non-maximum suppression\n keep_boxes = []\n while detections.size(0):\n large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres\n label_match = detections[0, -1] == detections[:, -1]\n # Indices of boxes with lower confidence scores, large IOUs and matching labels\n invalid = large_overlap & label_match\n weights = detections[invalid, 4:5]\n # Merge overlapping bboxes by order of confidence\n detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum()\n keep_boxes += [detections[0]]\n detections = detections[~invalid]\n if keep_boxes:\n output[image_i]['boxes'] = torch.stack(keep_boxes)[:,:4]\n output[image_i]['labels'] = torch.stack(keep_boxes)[:,-1]\n output[image_i]['scores'] = torch.stack(keep_boxes)[:,4:-1]\n\n return output", "def nms_all_class(bound_corr_objs, nms_thresh):\n bboxs, scores, masks, labels = [], [], [], []\n for obj in bound_corr_objs:\n bboxs.append(obj['box'])\n scores.append(obj['score'])\n # masks.append(obj['mask'])\n # labels.append(obj['label'])\n bboxs = np.asarray(bboxs)\n scores = np.asarray(scores)\n # masks = np.asarray(masks)\n # labels = np.asarray(labels)\n x1 = bboxs[:, 0]\n y1 = bboxs[:, 1]\n x2 = bboxs[:, 2]\n y2 = bboxs[:, 3]\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n # cfvalid_ids = np.where(scores >= cf_thresh)[0]\n # scores = scores[cfvalid_ids]\n\n order = scores.argsort()[::-1]\n # mask_sizes = np.sum(masks, axis=(1, 2))\n # order = mask_sizes.argsort()[::-1]\n keep = []\n suppress = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n iou = inter / (areas[i] + areas[order[1:]] - inter)\n\n # mask_other = masks[order[1:], :, :]\n # mask_cur = masks[i, :, :]\n # mask_inter = np.sum(mask_cur & mask_other, axis=(1, 2))\n # mask_union = np.sum(mask_cur | mask_other, axis=(1, 2))\n # mask_iou = mask_inter / mask_union\n\n # inds = np.where((iou <= nms_thresh) & (mask_iou <= nms_thresh))[0]\n inds = np.where(iou <= nms_thresh)[0]\n order = order[inds + 1]\n\n # masks = masks[keep]\n # ids = ids[keep]\n return keep", "def nms(bboxes, iou_threshold, sigma = 0.3, method = 'nms'):\n \"\"\" takes bboxes with the shape of (num_of_box, 6), where 6 => (xmin, ymin, xmax, ymax, score, class) \"\"\"\n \n # remove duplicates in classes\n classes_in_img = list(set(bboxes[:, 5]))\n \n # initialise list to store best bboxes\n best_bboxes = []\n \n # iterate over each class\n for cls in classes_in_img:\n \n # get mask for bboxes with the same class and apply on bboxes to obtain array of bboxes with same class\n cls_mask = (bboxes[:, 5] == cls)\n cls_bboxes = bboxes[cls_mask]\n \n # iterate while there are still bboxes in cls_bboxes\n while len(cls_bboxes) > 0:\n \n # select index of the bbox with the highest score \n max_ind = np.argmax(cls_bboxes[:, 4])\n \n # select bbox with highest score \n best_bbox = cls_bboxes[max_ind]\n \n # append to best _bbox list \n best_bboxes.append(best_bbox)\n \n # obtain cls_bboxes without best bbox\n cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])\n \n # calculate iou of remaining bboxes with best bbox \n iou = bbox_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])\n \n weight = np.ones((len(iou), ), dtype = np.float32)\n \n # assert method to be either 'nms' or 'soft_nms'\n assert method in ['nms', 'soft_nms']\n \n if method == 'nms':\n \n # obtain nms iou mask based on threshold\n iou_mask = iou > iou_threshold\n \n # apply mask on weights\n weight[iou_mask.numpy()] = 0.0\n \n if method == 'soft_nms':\n \n # obtain soft_nms weights\n weight = np.exp(-(1.0 * iou ** 2 / sigma))\n \n # apply weights on cls_bboxes\n cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight\n \n # obtain score mask of scores greater than zero\n score_mask = cls_bboxes[:, 4] > 0.\n \n # apply mask on cls_bboxes \n cls_bboxes = cls_bboxes[score_mask]\n\n return best_bboxes", "def nms(dets, scores, thresh):\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n # scores = dets[:, 4]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1] # score从大到小的索引值\n # order = np.argsort(-scores) # 也可以\n\n keep = []\n while order.size > 0:\n i = order[0] # 得到第一个最大的索引值\n keep.append(i) # 保留得分最大的索引值\n # 得到中间inter矩形的坐标\n xx1 = np.maximum(x1[i], x1[order[1:]]) # x1[i]和除了最大的值之外的值作比较\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter) # 第i个box和其它box的iou\n\n # 大于阈值的就不管了(去除掉),小于阈值的就可能是另一个目标框,留下来继续比较\n inds = np.where(ovr <= thresh)[0] # 返回满足条件的order[1:]中的索引值\n order = order[inds + 1] # +1得到order中的索引值\n\n return keep", "def find_instances_in_features(self, features, region):\n for current_window in self.hogger.hog_scan(self.box_size, self.box_size):\n if self.classifier.classify_features(current_window[\"features\"])==1.0:\n off_x = current_window[\"x\"]\n off_y = current_window[\"y\"]\n trans_off_x = int(off_x * self.scaling) + region[0]\n trans_off_y = int(off_y * self.scaling) + region[1]\n\n cv2.rectangle(self.resized_image, (off_x, off_y), (off_x + self.box_size, off_y + self.box_size),\n color=(255, 255, 255), thickness=2)\n cv2.rectangle(self.image, (trans_off_x, trans_off_y), (trans_off_x + self.eff_box_size, trans_off_y + self.eff_box_size),\n color=(255, 255, 255), thickness=2)\n self.boundings.append(((trans_off_x, trans_off_y), (trans_off_x + self.eff_box_size, trans_off_y + self.eff_box_size)))", "def vis_detections(im, class_name, dets, thresh=0.8, highest=False, use_colour=None, h=1.0, w=1.0, rc=1.0, alpha=1.0):\n overlay = im\n for i in range(dets.shape[0]):\n overlay = im.copy()\n output = im.copy()\n bbox = []\n for x, ind in zip(dets[i, :4], range(4)):\n if ind == 0 or ind == 2:\n x = int(np.round(x * w / rc))\n if ind == 1 or ind ==3:\n x = int(np.round(x * h / rc))\n bbox.append(x)\n bbox = tuple(bbox)\n thickness = 2\n if highest:\n colour = (0, 0, 200)\n else:\n colour = (0, 200, 0)\n if use_colour is not None:\n colour = use_colour\n\n if alpha == 1.0:\n cv2.rectangle(overlay, bbox[0:2], bbox[2:4], colour, thickness=thickness)\n cv2.putText(overlay, '%s' % (class_name), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,\n 1.0, (0, 0, 255), thickness=1)\n return overlay\n else:\n cv2.rectangle(overlay, bbox[0:2], bbox[2:4], colour, -1)\n cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)\n return output\n return overlay", "def non_max_suppression_all_classes(boxes, scores, labels, iou_threshold=0.5):\n excluded_indices = []\n for i in range(0,len(boxes)):\n obj1_box, _, obj1_label = boxes[i], scores[i], labels[i]\n for j in range(i+1,len(boxes)):\n obj2_box, _, obj2_label = boxes[j], scores[j], labels[j]\n if (get_iou(obj1_box, obj2_box) > iou_threshold):\n #print('excluding idx={}, class={}, score={}, bbox={}'.format(j, obj2_label, obj2_score, obj2_box))\n excluded_indices.append(j)\n \n excluded_indices = list(set(excluded_indices)) #Elimina indices repetidos\n included_indices = [idx for idx in range(len(boxes)) if idx not in excluded_indices]\n #print(included_indices)\n return included_indices", "def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.5):\n min_wh = 2 # (pixels) minimum box width and height\n\n output = [None] * len(prediction)\n for image_i, pred in enumerate(prediction):\n # Multiply conf by class conf to get combined confidence\n class_conf, class_pred = pred[:, 5:].max(1)\n pred[:, 4] *= class_conf\n\n # Select only suitable predictions\n i = pred[:, 4] > conf_thres\n i &= (pred[:, 2:4] > min_wh).all(1)\n i &= torch.isfinite(pred).all(1)\n\n pred = pred[i]\n\n # If none are remaining => process next image\n if len(pred) == 0:\n continue\n\n # Select predicted classes\n class_conf = class_conf[i]\n class_pred = class_pred[i].unsqueeze(1).float()\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n pred[:, :4] = xywh2xyxy(pred[:, :4])\n\n # Detections ordered as (x1y1x2y2, obj_conf, class_conf, class_pred)\n pred = torch.cat((pred[:, :5], class_conf.unsqueeze(1), class_pred), 1)\n\n # Get detections sorted by decreasing confidence scores\n pred = pred[(-pred[:, 4]).argsort()]\n\n det_max = []\n nms_style = 'MERGE' # 'OR' (default), 'AND', 'MERGE' (experimental)\n for c in pred[:, -1].unique():\n dc = pred[pred[:, -1] == c] # select class c\n n = len(dc)\n if n == 1:\n det_max.append(dc) # No NMS required if only 1 prediction\n continue\n elif n > 100:\n # limit to first 100 boxes:\n # https://github.com/ultralytics/yolov3/issues/117\n dc = dc[:100]\n\n # Non-maximum suppression\n if nms_style == 'OR': # default\n while dc.shape[0]:\n det_max.append(dc[:1]) # save highest conf detection\n if len(dc) == 1: # Stop if we're at the last detection\n break\n iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes\n dc = dc[1:][iou < nms_thres] # remove ious > threshold\n elif nms_style == 'AND': # requires overlap, single boxes erased\n while len(dc) > 1:\n iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes\n if iou.max() > 0.5:\n det_max.append(dc[:1])\n dc = dc[1:][iou < nms_thres] # remove ious > threshold\n elif nms_style == 'MERGE': # weighted mixture box\n while len(dc):\n if len(dc) == 1:\n det_max.append(dc)\n break\n i = bbox_iou(dc[0], dc) > nms_thres # iou with other boxes\n weights = dc[i, 4:5]\n dc[0, :4] = (weights * dc[i, :4]).sum(0) / weights.sum()\n det_max.append(dc[:1])\n dc = dc[i == 0]\n # soft-NMS https://arxiv.org/abs/1704.04503\n elif nms_style == 'SOFT':\n sigma = 0.5 # soft-nms sigma parameter\n while len(dc):\n if len(dc) == 1:\n det_max.append(dc)\n break\n det_max.append(dc[:1])\n iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes\n dc = dc[1:]\n # decay confidences\n dc[:, 4] *= torch.exp(-iou ** 2 / sigma)\n\n if len(det_max):\n det_max = torch.cat(det_max) # concatenate\n output[image_i] = det_max[(-det_max[:, 4]).argsort()] # sort\n\n return output", "def boxlist_nms(boxlist, nms_thresh, max_proposals=-1, score_field=\"scores\"):\n if nms_thresh <= 0:\n return boxlist\n mode = boxlist.mode\n boxlist = boxlist.convert(\"xyxy\")\n boxes = boxlist.bbox\n score = boxlist.get_field(score_field)\n keep = _box_nms(boxes, score, nms_thresh)\n if max_proposals > 0:\n keep = keep[:max_proposals]\n boxlist = boxlist[keep]\n return boxlist.convert(mode)", "def GlobalThresholding(image, kernel_sigma, N_levels, N_classes, step = 1): \n \n if kernel_sigma >= 1:\n image = Denoising(image, kernel_sigma);\n \n pixel_count, pixel_count_normalized = CountPixels(image, N_levels);\n mean_g = image.mean(); # global mean\n\n if N_classes == 2: \n interclass_var = np.zeros((N_levels)); # inter-class variance\n range_array = np.arange(0, N_levels, 1).reshape(N_levels, 1);\n for ii in range(0, N_levels - 1, step): \n\n threshold = ii;\n \n mask_1 = range_array <= threshold;\n mask_2 = range_array > threshold;\n \n p_1 = pixel_count_normalized[mask_1].sum(); # probability of class 1\n p_2 = 1 - p_1; # probability of class 2\n \n mean_1 = 1 / p_1 * np.sum(range_array[mask_1] * pixel_count_normalized[mask_1]); # mean of class 1\n mean_2 = 1 / p_2 * np.sum(range_array[mask_2] * pixel_count_normalized[mask_2]); # mean of class 2\n \n temp = p_1 * (mean_1 - mean_g) ** 2 + p_2 * (mean_2 - mean_g) ** 2;\n interclass_var[ii] = np.nan_to_num(temp);\n \n threshold = np.argmax(interclass_var);\n mask_1 = image <= threshold;\n mask_2 = image > threshold;\n mask = np.zeros(image.shape);\n mask[mask_1] = 0;\n mask[mask_2] = 1;\n return mask;\n elif N_classes == 3:\n interclass_var = np.zeros((N_levels, N_levels)); # inter-class variance\n range_array = np.arange(0, N_levels, 1).reshape(N_levels, 1);\n for ii in range(0, N_levels - 2, step): \n for jj in range(ii + 1, N_levels - 1, step):\n\n threshold1 = ii;\n threshold2 = jj;\n \n mask_1 = range_array <= threshold1;\n mask_2 = (range_array > threshold1) * (range_array <= threshold2);\n mask_3 = range_array > threshold2;\n \n p_1 = pixel_count_normalized[mask_1].sum(); # probability of class 1\n p_2 = pixel_count_normalized[mask_2].sum(); # probability of class 2\n p_3 = 1 - (p_1 + p_2); # probability of class 3\n \n mean_1 = 1 / p_1 * np.sum(range_array[mask_1] * pixel_count_normalized[mask_1]); # mean of class 1\n mean_2 = 1 / p_2 * np.sum(range_array[mask_2] * pixel_count_normalized[mask_2]); # mean of class 2\n mean_3 = 1 / p_3 * np.sum(range_array[mask_3] * pixel_count_normalized[mask_3]); # mean of class 3\n \n temp = p_1 * (mean_1 - mean_g) ** 2 + p_2 * (mean_2 - mean_g) ** 2 + p_3 * (mean_3 - mean_g) ** 2;\n interclass_var[ii, jj] = np.nan_to_num(temp);\n \n threshold = np.unravel_index(np.argmax(interclass_var, axis=None), interclass_var.shape);\n threshold1 = threshold[0];\n threshold2 = threshold[1];\n \n mask_1 = image <= threshold1;\n mask_2 = (image > threshold1) * (image <= threshold2);\n mask_3 = image > threshold2;\n mask = np.zeros(image.shape);\n mask[mask_1] = 0;\n mask[mask_2] = 1;\n mask[mask_3] = 2;\n return mask;\n elif N_classes == 4:\n interclass_var = np.zeros((N_levels, N_levels, N_levels)); # inter-class variance\n range_array = np.arange(0, N_levels, 1).reshape(N_levels, 1);\n for ii in range(0, N_levels - 3, step): \n for jj in range(ii + 1, N_levels - 2, step):\n for kk in range(jj + 1, N_levels - 1, step): \n \n threshold1 = ii;\n threshold2 = jj;\n threshold3 = kk;\n \n mask_1 = range_array <= threshold1;\n mask_2 = (range_array > threshold1) * (range_array <= threshold2);\n mask_3 = (range_array > threshold2) * (range_array <= threshold3); \n mask_4 = range_array > threshold3;\n \n p_1 = pixel_count_normalized[mask_1].sum(); # probability of class 1\n p_2 = pixel_count_normalized[mask_2].sum(); # probability of class 2\n p_3 = pixel_count_normalized[mask_3].sum(); # probability of class 3\n p_4 = 1 - (p_1 + p_2 + p_3); # probability of class 4\n \n mean_1 = 1 / p_1 * np.sum(range_array[mask_1] * pixel_count_normalized[mask_1]); # mean of class 1\n mean_2 = 1 / p_2 * np.sum(range_array[mask_2] * pixel_count_normalized[mask_2]); # mean of class 2\n mean_3 = 1 / p_3 * np.sum(range_array[mask_3] * pixel_count_normalized[mask_3]); # mean of class 3\n mean_4 = 1 / p_4 * np.sum(range_array[mask_4] * pixel_count_normalized[mask_4]); # mean of class 4\n \n temp = p_1 * (mean_1 - mean_g) ** 2 + p_2 * (mean_2 - mean_g) ** 2 + \\\n p_3 * (mean_3 - mean_g) ** 2 + p_4 * (mean_4 - mean_g) ** 2;\n interclass_var[ii, jj, kk] = np.nan_to_num(temp);\n \n threshold = np.unravel_index(np.argmax(interclass_var, axis=None), interclass_var.shape);\n threshold1 = threshold[0];\n threshold2 = threshold[1];\n threshold3 = threshold[2];\n \n mask_1 = image <= threshold1;\n mask_2 = (image > threshold1) * (image <= threshold2);\n mask_3 = (image > threshold2) * (image <= threshold3);\n mask_4 = image > threshold3;\n mask = np.zeros(image.shape);\n mask[mask_1] = 0;\n mask[mask_2] = 1;\n mask[mask_3] = 2;\n mask[mask_4] = 3;\n return mask;\n else:\n print('max supported N_class == 4. Abort..\\n')\n return None;", "def boxes_filter(dets, PRE_NMS_TOPN, NMS_THRESH, POST_NMS_TOPN, \n CONF_THRESH, USE_GPU=False):\n # speed up nms \n if PRE_NMS_TOPN > 0:\n dets = dets[: min(len(dets), PRE_NMS_TOPN), :]\n\n # apply nms\n if NMS_THRESH > 0 and NMS_THRESH < 1:\n if USE_GPU:\n keep = nms_gpu(dets, NMS_THRESH)\n else:\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n\n if POST_NMS_TOPN > 0:\n dets = dets[: min(len(dets), POST_NMS_TOPN), :]\n\n inds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n dets = dets[inds, :] \n\n return dets", "def filter_bboxes_by_visibility(img, bboxes, transformed_img, transformed_bboxes, threshold):\n img_height, img_width = img.shape[:2]\n transformed_img_height, transformed_img_width = transformed_img.shape[:2]\n\n visible_bboxes = []\n for bbox, transformed_bbox in zip(bboxes, transformed_bboxes):\n if not all(0.0 <= value <= 1.0 for value in transformed_bbox[:4]):\n continue\n bbox_area = calculate_bbox_area(bbox, img_height, img_width)\n transformed_bbox_area = calculate_bbox_area(transformed_bbox, transformed_img_height, transformed_img_width)\n visibility = transformed_bbox_area / bbox_area\n if visibility >= threshold:\n visible_bboxes.append(transformed_bbox)\n return visible_bboxes", "def textDetectWatershed(thresh, original):\n # According to: http://docs.opencv.org/trunk/d3/db4/tutorial_py_watershed.html\n img = resize(original, 3000)\n thresh = resize(thresh, 3000)\n # noise removal\n kernel = np.ones((3,3),np.uint8)\n opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 3)\n \n # sure background area\n sure_bg = cv2.dilate(opening,kernel,iterations=3)\n\n # Finding sure foreground area\n dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)\n ret, sure_fg = cv2.threshold(dist_transform,0.01*dist_transform.max(),255,0)\n\n # Finding unknown region\n sure_fg = np.uint8(sure_fg)\n unknown = cv2.subtract(sure_bg,sure_fg)\n \n # Marker labelling\n ret, markers = cv2.connectedComponents(sure_fg)\n\n # Add one to all labels so that sure background is not 0, but 1\n markers += 1\n\n # Now, mark the region of unknown with zero\n markers[unknown == 255] = 0\n \n markers = cv2.watershed(img, markers)\n implt(markers, t='Markers')\n image = img.copy()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n # Creating result array\n boxes = []\n for mark in np.unique(markers):\n # mark == 0 --> background\n if mark == 0:\n continue\n\n # Draw it on mask and detect biggest contour\n mask = np.zeros(gray.shape, dtype=\"uint8\")\n mask[markers == mark] = 255\n\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n c = max(cnts, key=cv2.contourArea)\n \n # Draw a bounding rectangle if it contains text\n x,y,w,h = cv2.boundingRect(c)\n cv2.drawContours(mask, c, 0, (255, 255, 255), cv2.FILLED)\n maskROI = mask[y:y+h, x:x+w]\n # Ratio of white pixels to area of bounding rectangle\n r = cv2.countNonZero(maskROI) / (w * h)\n \n # Limits for text\n if r > 0.1 and 2000 > w > 15 and 1500 > h > 15:\n boxes += [[x, y, w, h]]\n \n # Group intersecting rectangles\n boxes = group_rectangles(boxes)\n bounding_boxes = np.array([0,0,0,0])\n for (x, y, w, h) in boxes:\n cv2.rectangle(image, (x, y),(x+w,y+h), (0, 255, 0), 8)\n bounding_boxes = np.vstack((bounding_boxes, np.array([x, y, x+w, y+h])))\n \n implt(image)\n\n # Recalculate coordinates to original size\n boxes = bounding_boxes.dot(ratio(original, img.shape[0])).astype(np.int64)\n return boxes[1:]", "def yolo2_filter_boxes(boxes, box_confidence, box_class_probs, threshold=.6):\n box_scores = box_confidence * box_class_probs\n box_classes = K.argmax(box_scores, axis=-1)\n box_class_scores = K.max(box_scores, axis=-1)\n prediction_mask = box_class_scores >= threshold\n\n # TODO: Expose tf.boolean_mask to Keras backend?\n boxes = tf.boolean_mask(boxes, prediction_mask)\n scores = tf.boolean_mask(box_class_scores, prediction_mask)\n classes = tf.boolean_mask(box_classes, prediction_mask)\n return boxes, scores, classes", "def component_filter(components, img, edge_boxes, max_horizontal_txt_height=defaults.MAX_HORIZONTAL_TEXT_HEIGHT):\n white_txt_background = False\n text_like_component = []\n num_of_box_with_white_neighbors = 0\n white_neighbors = []\n for component in components:\n mask = np.zeros(img.shape[0:2])\n edges = find_edges(component, edge_boxes) # find edge areas inside the text box\n if len(edges) == 0: continue # no processing if there is no edge in the box\n if max(edge_cluster_contrast(img, edges)) < 50: continue # no processing if the contrast is too low\n adjusted_x, adjusted_y, w, h = edge_cluster_rectangle(\n edges) # adjust the coordinates of the text box to make it tighter\n component = (slice(adjusted_y, adjusted_y + h), slice(adjusted_x, adjusted_x + w))\n ###### create a mask in which edge areas are filled with 1, other areas 0.\n for edge in edges:\n x, y, w, h = edge\n mask[y:y + h, x:x + w] = 1\n ############ crop the mask into the same shape as the box\n mask = mask[component[0].start:component[0].stop, component[1].start:component[1].stop]\n ############ extract the area of the text box from the image #################\n aoi = img[component[0].start:component[0].stop, component[1].start:component[1].stop]\n aoi = clean.binarize(aoi, threshold=180)\n\n ############## compute the white/black ratio #######################\n zero_ratio = density_analysis(aoi)\n if zero_ratio > 0.75: continue # if too many white pixels, drop it\n if zero_ratio < 0.15: continue # if too many black pixels, drop it\n\n # print('--------------------------------------------------------')\n # analyze_block_vertical(mask)\n # ax1 = plt.subplot(1,2,1)\n # ax1.imshow(aoi)\n # ax2 = plt.subplot(1,2,2)\n # ax2.imshow(mask)\n # plt.show()\n\n ############### analyze the masks or aois to see whether it is text-like ##########\n\n if analyze_block_vertical(mask, max_horizontal_text_height=max_horizontal_txt_height) \\\n or analyze_block_horizon(mask) \\\n or analyze_block_vertical(aoi / 255, max_horizontal_text_height=max_horizontal_txt_height) \\\n or analyze_block_horizon(aoi / 255):\n # if border_analyze_vertical(mask, vertical_borders) or border_analyze_horizon(mask, horizon_borders):\n text_like_component.append(component)\n ########## extract left, right, upper, lower neighboring areas of the candiate box ########\n component_left_neighbor = img[component[0].start:component[0].stop,\n max(component[1].start - 10, 0):component[1].start]\n component_right_neighbor = img[component[0].start:component[0].stop,\n component[1].stop:min(component[1].stop + 10, img.shape[1])]\n\n component_up_neighbor = img[max(component[0].start - 10, 0):component[0].start,\n component[1].start:component[1].stop]\n\n component_low_neighbor = img[component[0].stop:min(component[0].stop + 10, img.shape[0]),\n component[1].start:component[1].stop]\n ############# if the candidate box is indeed a text box, it should should have white areas next to it #######\n left_white_ratio = 0\n if component_right_neighbor.shape[1] > 0 and component_right_neighbor.shape[0] > 0:\n left_white_ratio = np.sum(component_right_neighbor > 240) / (\n component_right_neighbor.shape[0] * component_right_neighbor.shape[1])\n right_white_ratio = 0\n if component_left_neighbor.shape[0] > 0 and component_left_neighbor.shape[1] > 0:\n right_white_ratio = np.sum(component_left_neighbor > 240) / (\n component_left_neighbor.shape[0] * component_left_neighbor.shape[1])\n up_white_ratio = 0\n if component_up_neighbor.shape[0] > 0 and component_up_neighbor.shape[1] > 0:\n up_white_ratio = np.sum(component_up_neighbor > 240) / (\n component_up_neighbor.shape[0] * component_up_neighbor.shape[1])\n low_white_ratio = 0\n if component_low_neighbor.shape[0] > 0 and component_low_neighbor.shape[1] > 0:\n low_white_ratio = np.sum(component_low_neighbor > 240) / (\n component_low_neighbor.shape[0] * component_low_neighbor.shape[1])\n white_neighbors.append(\n [left_white_ratio > 0.9, right_white_ratio > 0.9, up_white_ratio > 0.9, low_white_ratio > 0.9])\n if all([left_white_ratio > 0.95, right_white_ratio > 0.95, up_white_ratio > 0.95, low_white_ratio > 0.95]):\n num_of_box_with_white_neighbors = num_of_box_with_white_neighbors + 1\n\n if num_of_box_with_white_neighbors >= 2: # if there are at least two boxes having neighbors all white, then all text areas have white background\n white_txt_background = True\n text_like_component = [component for idx, component in enumerate(text_like_component) if\n np.sum(white_neighbors[idx]) >= 2]\n # text_like_component=expand_component_1(img,text_like_component,edge_boxes)\n return text_like_component, white_txt_background", "def nms(bboxs, scores, thresh):\n if len(bboxs) == 0:\n return []\n order = scores.argsort()[::-1]\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n ious = get_iou(bboxs[order], bboxs[i])\n order = order[ious <= thresh]\n return keep", "def non_max_suppress_orig(boxes_in_batch, score_arr, config):\n# FRAME_STEP_SEC = 0.1 # sec\n# FRAME_SIZE_SEC = 2.0 # each window is 2 sec long\n# \n# OVERLAP_RATIO = 1.0 # non-max suppression\n \n \n overlap_size = int(config.FRAME_SIZE_SEC/config.FRAME_STEP_SEC\n *config.OVERLAP_RATIO)\n # boxes sorted by scores\n box_sorted_by_score = boxes_in_batch[np.argsort(score_arr[boxes_in_batch])[::-1]] \n # [::-1] reverse the sort order from ascending to descending\n # get the ordered values: score_arr[boxes_in_batch][box_sorted_by_score]\n\n # original approach\n# time_start = time.time()\n# boxes_separated = separate_boxes(box_sorted_by_score, overlap_size)\n# print('Method 1: run time is: '+str(time.time() - time_start))\n# \n# time_start2 = time.time()\n# boxes_separated2 = separate_boxes_faster(box_sorted_by_score, overlap_size)\n# print('Method 2: run time is: '+str(time.time() - time_start2))\n\n #time_start3 = time.time()\n boxes_separated = separate_boxes_fasterer(box_sorted_by_score, overlap_size)\n #print('Method 3: run time is: '+str(time.time() - time_start3))\n\n \n # alternative approach\n #boxes_separated = separate_boxes_time(box_sorted_by_score.sort(), overlap_size)\n \n # computer vision approach: Malisiewicz et al.\n #boxes_separated = non_max_suppression_fast(boxes_in_batch, overlapThresh):\n #print(boxes_separated)\n #print(boxes_separated2)\n #print(boxes_separated3)\n \n return boxes_separated", "def get_max(im, class_name, dets, thresh=0.5):\n inds = np.where(dets[:, -1] >= thresh)[0]\n max_inds = 0\n max_score = 0.0\n if len(inds) == 0:\n # print('Warning: no target detected!')\n return\n elif len(inds) > 1:\n # print('Warning: ' + str(len(inds)) + ' targets detected! Choose the highest one')\n for i in inds:\n if(dets[i, -1] > max_score):\n max_inds = i\n max_score = dets[i, -1]\n bbox = dets[max_inds, :4]\n score = dets[max_inds, -1]\n return [max_inds,score]", "def bounding_box(alpha):\n assert alpha.ndim == 2\n\n # Take the bounding box of the support, with a certain threshold.\n #print(\"Using alpha\", self.use_alpha, \"support\", self.support)\n supp_axs = [alpha.max(axis=1-i) for i in range(2)]\n\n th = 0.5 \n # Check first and last value of that threshold\n bb = [np.where(supp_axs[i] > th)[0][[0,-1]] for i in range(2)]\n\n # This bb looks like [(x0, x1), (y0, y1)], when we want it as (x0, y0, x1, y1)\n #psize = self.settings['subsample_size']\n #ret = (bb[0][0]/psize[0], bb[1][0]/psize[1], bb[0][1]/psize[0], bb[1][1]/psize[1])\n\n return (bb[0][0], bb[1][0], bb[0][1], bb[1][1])", "def reduce_possibilities_by_box(self):\n x = self.targetCell.x\n y = self.targetCell.y\n if x < 3 and y < 3: #top left\n self.check_box1()\n if x > 2 and x < 6 and y < 3: #middle left\n self.check_box2()\n if x > 5 and y < 3: #bottom left\n self.check_box3()\n if x < 3 and y > 2 and y < 6: #top middle\n self.check_box4()\n if x > 2 and x < 6 and y > 2 and y < 6: #center\n self.check_box5()\n if x > 5 and y > 2 and y < 6: #bottom middle\n self.check_box6()\n if x < 3 and y > 5: #top right\n self.check_box7()\n if x > 2 and x < 6 and y > 5: #middle right\n self.check_box8()\n if x > 5 and y > 5: #bottom right\n self.check_box9()\n self.targetCell.box_neighbour_possibilities = flatten_list(self.targetCell.box_neighbour_possibilities)", "def get_bounding_boxes(outputs, width: int, height: int):\n\n # detected bounding boxes, obtained confidences and class's number\n boxes = []\n scores = []\n classes = []\n\n # this is our threshold for keeping the bounding box\n probability_minimum = 0.5\n\n # iterating through all three outputs\n for result in outputs:\n # going through all bounding boxes from current output layer\n for detection in result:\n # getting class for current object\n scores_current = detection[5:]\n class_current = np.argmax(scores_current)\n\n # getting probability for current object\n probability_current = scores_current[class_current]\n\n # getting object confidence for current object\n object_confidence = detection[4]\n\n # eliminating weak predictions by minimum probability\n if probability_current > probability_minimum:\n # if probability_current*object_confidence > probability_minimum: # this is an alternative way\n\n # Scaling bounding box coordinates to the initial image size\n # by element-wise multiplying them with the width and height of the image\n box_current = np.array(detection[0:4]) * np.array([width, height, width, height])\n\n # YOLO data format keeps center of detected box and its width and height\n # here we reconstruct the top left and bottom right corner\n x_center, y_center, box_width, box_height = box_current.astype('int')\n x_min = int(x_center - (box_width / 2))\n y_min = int(y_center - (box_height / 2))\n x_max = int(x_center + (box_width / 2))\n y_max = int(y_center + (box_height / 2))\n\n # adding results into prepared lists\n boxes.append([x_min, y_min, x_max, y_max])\n scores.append(float(probability_current))\n classes.append(class_current)\n\n boxes = np.array(boxes)\n scores = np.array(scores)\n classes = np.array(classes)\n return boxes, scores, classes", "def rle_mask_voting(\n top_masks, all_masks, all_dets, iou_thresh, binarize_thresh, method='AVG'\n):\n if len(top_masks) == 0:\n return\n\n all_not_crowd = [False] * len(all_masks)\n top_to_all_overlaps = mask_util.iou(top_masks, all_masks, all_not_crowd)\n decoded_all_masks = [\n np.array(mask_util.decode(rle), dtype=np.float32) for rle in all_masks\n ]\n decoded_top_masks = [\n np.array(mask_util.decode(rle), dtype=np.float32) for rle in top_masks\n ]\n all_boxes = all_dets[:, :4].astype(np.int32)\n all_scores = all_dets[:, 4]\n\n # Fill box support with weights\n mask_shape = decoded_all_masks[0].shape\n mask_weights = np.zeros((len(all_masks), mask_shape[0], mask_shape[1]))\n for k in range(len(all_masks)):\n ref_box = all_boxes[k]\n x_0 = max(ref_box[0], 0)\n x_1 = min(ref_box[2] + 1, mask_shape[1])\n y_0 = max(ref_box[1], 0)\n y_1 = min(ref_box[3] + 1, mask_shape[0])\n mask_weights[k, y_0:y_1, x_0:x_1] = all_scores[k]\n mask_weights = np.maximum(mask_weights, 1e-5)\n\n top_segms_out = []\n for k in range(len(top_masks)):\n # Corner case of empty mask\n if decoded_top_masks[k].sum() == 0:\n top_segms_out.append(top_masks[k])\n continue\n\n inds_to_vote = np.where(top_to_all_overlaps[k] >= iou_thresh)[0]\n # Only matches itself\n if len(inds_to_vote) == 1:\n top_segms_out.append(top_masks[k])\n continue\n\n masks_to_vote = [decoded_all_masks[i] for i in inds_to_vote]\n if method == 'AVG':\n ws = mask_weights[inds_to_vote]\n soft_mask = np.average(masks_to_vote, axis=0, weights=ws)\n mask = np.array(soft_mask > binarize_thresh, dtype=np.uint8)\n elif method == 'UNION':\n # Any pixel that's on joins the mask\n soft_mask = np.sum(masks_to_vote, axis=0)\n mask = np.array(soft_mask > 1e-5, dtype=np.uint8)\n else:\n raise NotImplementedError('Method {} is unknown'.format(method))\n rle = mask_util.encode(np.array(mask[:, :, np.newaxis], order='F'))[0]\n top_segms_out.append(rle)\n\n return top_segms_out", "def filter_bboxes(\n bboxes: Sequence[BoxType],\n rows: int,\n cols: int,\n min_area: float = 0.0,\n min_visibility: float = 0.0,\n min_width: float = 0.0,\n min_height: float = 0.0,\n) -> List[BoxType]:\n resulting_boxes: List[BoxType] = []\n for bbox in bboxes:\n # Calculate areas of bounding box before and after clipping.\n transformed_box_area = calculate_bbox_area(bbox, rows, cols)\n bbox, tail = cast(BoxType, tuple(np.clip(bbox[:4], 0, 1.0))), tuple(bbox[4:])\n clipped_box_area = calculate_bbox_area(bbox, rows, cols)\n\n # Calculate width and height of the clipped bounding box.\n x_min, y_min, x_max, y_max = denormalize_bbox(bbox, rows, cols)[:4]\n clipped_width, clipped_height = x_max - x_min, y_max - y_min\n\n if (\n clipped_box_area != 0 # to ensure transformed_box_area!=0 and to handle min_area=0 or min_visibility=0\n and clipped_box_area >= min_area\n and clipped_box_area / transformed_box_area >= min_visibility\n and clipped_width >= min_width\n and clipped_height >= min_height\n ):\n resulting_boxes.append(cast(BoxType, bbox + tail))\n return resulting_boxes", "def draw_boundingbox(image, infer_output, image_width, image_height, conf_thresh):\n\n out_image = image.copy()\n logger.debug(' - input image: [width] %d, [height] %d' % (image.shape[1], image.shape[0]))\n\n def check_valid_range(val, max_val):\n \"\"\" check the coordinate of bbox is inside of an image\"\"\"\n if val < 0:\n val = 0\n elif val > max_val:\n val = max_val\n else:\n pass\n return val\n\n valid_obj_num = 0\n valid_obj_bbox = []\n\n for obj_info in infer_output:\n conf = obj_info['conf']\n # filter by the confidence\n if conf >= conf_thresh:\n # calculate bbox coordinate\n xmin = int(obj_info['x_min'] * image_width)\n ymin = int(obj_info['y_min'] * image_height)\n xmax = int(obj_info['x_max'] * image_width)\n ymax = int(obj_info['y_max'] * image_height)\n\n # round up into valid range\n xmin = check_valid_range(xmin, image_width)\n ymin = check_valid_range(ymin, image_height)\n xmax = check_valid_range(xmax, image_width)\n ymax = check_valid_range(ymax, image_height)\n\n # draw bbox\n cv2.rectangle(out_image, (xmin, ymin), (xmax, ymax), (0, 0, 255), 2)\n\n valid_obj_num += 1\n valid_obj_bbox.append((xmin, ymin, xmax, ymax))\n logger.debug(' - draw bbox [%d, %d, %d, %d] confidence: %f' % (xmin,ymin,xmax,ymax,conf))\n\n return out_image, valid_obj_num", "def post_process(probability,threshold,min_size):\n rects = []\n mask = cv2.threshold(probability,threshold,1,cv2.THRESH_BINARY)[1]\n num_component,component = cv2.connectedComponents(mask.astype(np.uint8))\n predictions = np.zeros((350,525),np.float32)\n num = 0\n for c in range(1,num_component):\n p = (component == c)\n print(\"p.sum(): {}\".format(p.sum()))\n if p.sum() > min_size:\n predictions[p] = 1\n num += 1\n if num > 0:\n mask_p = predictions.copy()\n contours,hierarchy = cv2.findContours(mask_p.astype(np.uint8),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n cnts = sorted(contours,key=cv2.contourArea,reverse=True)[:num]\n for c in cnts:\n x,y,w,h = cv2.boundingRect(c)\n rects.append((x,y,w,h))\n print('rect {}'.format((x,y,w,h)))\n return predictions,num,rects", "def evaluate_recall(self, candidate_boxes=None, thresholds=None,\n area='all', limit=None):\n # Record max overlap value for each gt box\n # Return vector of overlap values\n areas = {'all': 0, 'small': 1, 'medium': 2, 'large': 3,\n '96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}\n area_ranges = [[0 ** 2, 1e5 ** 2], # all\n [0 ** 2, 32 ** 2], # small\n [32 ** 2, 96 ** 2], # medium\n [96 ** 2, 1e5 ** 2], # large\n [96 ** 2, 128 ** 2], # 96-128\n [128 ** 2, 256 ** 2], # 128-256\n [256 ** 2, 512 ** 2], # 256-512\n [512 ** 2, 1e5 ** 2], # 512-inf\n ]\n assert area in areas, 'unknown area range: {}'.format(area)\n area_range = area_ranges[areas[area]]\n gt_overlaps = np.zeros(0)\n num_pos = 0\n for i in range(self.num_images):\n # Checking for max_overlaps == 1 avoids including crowd annotations\n # (...pretty hacking :/)\n max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)\n gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &\n (max_gt_overlaps == 1))[0]\n gt_boxes = self.roidb[i]['boxes'][gt_inds, :]\n gt_areas = self.roidb[i]['seg_areas'][gt_inds]\n valid_gt_inds = np.where((gt_areas >= area_range[0]) &\n (gt_areas <= area_range[1]))[0]\n gt_boxes = gt_boxes[valid_gt_inds, :]\n num_pos += len(valid_gt_inds)\n\n if candidate_boxes is None:\n # If candidate_boxes is not supplied, the default is to use the\n # non-ground-truth boxes from this roidb\n non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]\n boxes = self.roidb[i]['boxes'][non_gt_inds, :]\n else:\n boxes = candidate_boxes[i]\n if boxes.shape[0] == 0:\n continue\n if limit is not None and boxes.shape[0] > limit:\n boxes = boxes[:limit, :]\n\n overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n\n _gt_overlaps = np.zeros((gt_boxes.shape[0]))\n for j in range(gt_boxes.shape[0]):\n # find which proposal box maximally covers each gt box\n argmax_overlaps = overlaps.argmax(axis=0)\n # and get the iou amount of coverage for each gt box\n max_overlaps = overlaps.max(axis=0)\n # find which gt box is 'best' covered (i.e. 'best' = most iou)\n gt_ind = max_overlaps.argmax()\n gt_ovr = max_overlaps.max()\n assert (gt_ovr >= 0)\n # find the proposal box that covers the best covered gt box\n box_ind = argmax_overlaps[gt_ind]\n # record the iou coverage of this gt box\n _gt_overlaps[j] = overlaps[box_ind, gt_ind]\n assert (_gt_overlaps[j] == gt_ovr)\n # mark the proposal box and the gt box as used\n overlaps[box_ind, :] = -1\n overlaps[:, gt_ind] = -1\n # append recorded iou coverage level\n gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))\n\n gt_overlaps = np.sort(gt_overlaps)\n if thresholds is None:\n step = 0.05\n thresholds = np.arange(0.5, 0.95 + 1e-5, step)\n recalls = np.zeros_like(thresholds)\n # compute recall for each iou threshold\n for i, t in enumerate(thresholds):\n recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)\n # ar = 2 * np.trapz(recalls, thresholds)\n ar = recalls.mean()\n return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,\n 'gt_overlaps': gt_overlaps}" ]
[ "0.7311329", "0.7284363", "0.7052062", "0.69620794", "0.6808659", "0.67591953", "0.67545897", "0.6637153", "0.6572565", "0.6438189", "0.6292309", "0.6280824", "0.6277976", "0.62647426", "0.6221004", "0.6205099", "0.6132621", "0.61210424", "0.6105146", "0.6104222", "0.60812354", "0.6081201", "0.60793585", "0.6070803", "0.6066521", "0.6062417", "0.604653", "0.6041", "0.6035929", "0.6029426", "0.6026784", "0.6026784", "0.60226923", "0.60119104", "0.6001998", "0.599053", "0.5974398", "0.59722954", "0.59568614", "0.5955907", "0.59413767", "0.5938237", "0.59378284", "0.59358597", "0.5911856", "0.59064716", "0.58959645", "0.5888006", "0.58866155", "0.58850574", "0.58843905", "0.5878709", "0.5877824", "0.587697", "0.587697", "0.5872546", "0.586785", "0.5862789", "0.58577454", "0.5850379", "0.58436155", "0.58420044", "0.58412343", "0.5830044", "0.5795262", "0.57909787", "0.57814354", "0.5776811", "0.5761598", "0.574994", "0.57468784", "0.57372123", "0.57363147", "0.5726118", "0.57241696", "0.5717714", "0.5697416", "0.5690358", "0.56896526", "0.5688301", "0.5683496", "0.5680036", "0.5674077", "0.56720984", "0.56672114", "0.5656122", "0.5646726", "0.5645878", "0.5645107", "0.5636968", "0.5634166", "0.5624902", "0.56147397", "0.56119734", "0.5610464", "0.5606869", "0.5602008", "0.5599329", "0.5597197", "0.5595043", "0.5594566" ]
0.0
-1
Fill a dataframe row with the others
def filldf(df, response, sorted_selection, params_selection, constant=True, verbose=True): selections_iter = iter(sorted_selection) params_iter = iter(params_selection) idxmissing = df[response][df[response].isnull() == True].index # slect where their is missing data print("Filling .... ") while len(idxmissing) > 0: print("Their is [" + str(len(idxmissing)) + "] events missing") try: # Try if their is still other stations to fill with selection = next(selections_iter) param = next(params_iter) except StopIteration: print("NO MORE SELECTED STATIONS") break try: Y = df.loc[:, response] X1 = df.loc[:, selection[0]] X2 = df.loc[:, selection[1]] select = pd.concat([X1, X2], keys=['X1', 'X2'], axis=1, join='inner').dropna() if constant: newdata = param[0] + param[1] * select['X1'] + param[2] * select['X2'] # reconstruct the data else: newdata = param[0] * select['X1'] + param[1] * select['X2'] # reconstruct the data df.loc[idxmissing, response] = newdata.loc[idxmissing] idxmissing = df[response][df[response].isnull() == True].index # slect where their is missing data except KeyError: if verbose: print('Selected stations ' + str(selection) + 'did not fill any events') else: pass except ValueError: if verbose: print('The variable ' + var + "Does not exist or no data to do the multilinear regression ") else: pass return df.loc[:, response]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_row(df, row):\n df.loc[df.shape[0]] = row", "def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:", "def fill_row(row, x):\n row.append(x)\n return row", "def insert_row(self, row_value, index):\n row = pd.DataFrame(row_value, columns=['lat', 'long', 'alt', 'descr'])\n self.df = pd.concat([self.df.iloc[:index], row, self.df.iloc[index:]]).reset_index(drop=True)", "def fillup_x(self):\n assert not np.all(self.x == None)\n x_df = pd.DataFrame(self.x, columns=self.x_title)\n self.df = pd.concat([self.df, x_df], axis=1)", "def _populate_df(self, df, objs,):\n for obj in objs:\n for prop in df.columns:\n df.loc[obj.name, prop] = getattr(obj, prop)", "def zero_end_interpolation(df: pd.DataFrame):\n end = df.index[-1]\n empty_df = pd.DataFrame(index=np.arange(0, end + 1, 1))\n res = pd.concat([df, empty_df], axis=1)\n res = res.fillna(method='ffill')\n res = res.fillna(method='bfill')\n return res", "def append_row(self, values):\n self.range(self._op.max_row + 1, 1, len(values)).values = values", "def extend_dataset(intial_df):\n all_data = []\n for i,row in intial_df.iterrows():\n all_data.extend(create_all_combination(row))\n\n extended_results = pd.DataFrame(all_data)\n return extended_results", "def add_row(self, row):\n \n new_row = pd.DataFrame(data=[row], columns = self.table.columns) \n self.table = self.table.append(new_row, ignore_index=True)", "def _fract_whole_data(self) :\n if self._fract_data == -1 :\n pass\n else :\n rows = self._df.shape[0]\n fract_rows = int(rows*self._fract_data)\n self._df = self._df.sample(fract_rows).copy()", "def getNewDF_X(self, originalDF):\n new_temps = [x for x in range(-10, 10, 1)]\n for unit in range(-10, 10, 1):\n new_temps[unit] = originalDF[['R1', 'G1', 'B1', 'R2', 'G2', 'B2', 'R3', 'G3', 'B3']].iloc[:] + unit\n new_temps[unit]['W1'] = originalDF['W1']\n new_temps[unit]['W2'] = originalDF['W2']\n new_temps[unit]['W3'] = originalDF['W3']\n returnVal = pd.concat(new_temps)\n return returnVal", "def append_row(row: pd.DataFrame, df: pd.DataFrame, to_top=True):\n # return pd.concat([row,df], keys=list(get_player_dict().keys())) # persist player_dict so don't have to call func each time\n return df.append(row)", "def repeat(df, n):\n return pd.concat([df] * n, ignore_index=True)", "def _add_rows(df, num, alloc_id, constraint, stuff=False):\n if num == 0:\n return df.copy()\n\n to_add = np.random.choice(df.index.values, num)\n rows_to_add = df.loc[to_add]\n\n # update the new rows' index\n max_idx = df.index.max()\n rows_to_add.index = range(max_idx + 1, max_idx + len(rows_to_add) + 1)\n\n # allocate rows to containers\n _allocate_rows(rows_to_add, alloc_id, constraint, stuff)\n\n return pd.concat([df, rows_to_add])", "def setRow(self, row):\n # Row of the database where the values of the variables are found\n self._row = row\n for e in self.children:\n e.setRow(row)", "def update_old_row(self, data):\n for key, value in data.items():\n _column = self._labels.index([v['display'] for k, v in self.headers.items() if k == key].pop())\n cell = self.item(self._opt_row, _column)\n _cell_data = cell.get_data()\n _cell_data[key] = value\n\n cell.set_content(value, _cell_data)", "def iat_df(self, df):\n result = self.iat(**df).reset_coords(drop=True).to_dataframe()\n if isinstance(df, pd.DataFrame):\n result.index = df.index\n return result", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join", "def sample_rows(df, nrows):", "def fill_df(pos, vel, acc, gamma, velmag, tvec, i):\n\n posx = pd.DataFrame(pos[:, 0], index=tvec, columns=[str(i)])\n posz = pd.DataFrame(pos[:, 1], index=tvec, columns=[str(i)])\n\n velx = pd.DataFrame(vel[:, 0], index=tvec, columns=[str(i)])\n velz = pd.DataFrame(vel[:, 1], index=tvec, columns=[str(i)])\n\n accx = pd.DataFrame(acc[:, 0], index=tvec, columns=[str(i)])\n accz = pd.DataFrame(acc[:, 1], index=tvec, columns=[str(i)])\n\n gamm = pd.DataFrame(gamma, index=tvec, columns=[str(i)])\n vmag = pd.DataFrame(velmag, index=tvec, columns=[str(i)])\n\n return posx, posz, velx, velz, accx, accz, gamm, vmag", "def insertLines(data):\n data = pd.DataFrame(data)\n for _,row in data.iterrows():\n insertLine(row)", "def setRow(self, row): \n self.row = row", "def df_add(df,index,column,value):\n\ttry:\n\t\tdf[column]\n\texcept:\n\t\tdf[column]=np.nan\n\ttry:\n\t\tdf.loc[index]\n\texcept:\n\t\tdf.loc[index]=np.nan\n\tdf.loc[index,column]=value\n\treturn df", "def extend_rows(self, assign):\n new_table = dict()\n for key, value in self._table.items():\n new_table[Assignment(key, assign)] = value\n\n self._table = new_table", "def flatten_df(df):\n flat_array = df.values.flatten()\n flat_df = pd.DataFrame(flat_array)\n flat_df.columns = [\"loan\"]\n flat_df[\"row_no\"] = flat_df.reset_index().index\n flat_df = flat_df[[\"row_no\", \"loan\"]]\n flat_df.row_no = flat_df.row_no // 100\n return flat_df", "def fillPositions(self):\r\n if self.th is not None:\r\n self.df['POSITION'] = self.th.positions['Qty']\r\n self.df['REGS'] = self.th.positions['REGS']\r\n self.df['144A'] = self.th.positions['144A']\r\n self.df['POSITION'].fillna(0, inplace=True)\r\n self.df['REGS'].fillna(0, inplace=True)\r\n self.df['144A'].fillna(0, inplace=True)\r\n self.df['RISK'] = -self.df['RISK_MID'] * self.df['POSITION'] / 10000.", "def set_row(self, index, values):\n try:\n idx = self.index_location(index)\n except (IndexError, ValueError):\n idx = self._add_row(index)\n\n column_values = self._column_value_getter(values)\n row = [column_values(values, column) for column in self._columns]\n\n self._data[idx] = row", "def expand_df(df, column):\n expanded2 = pd.DataFrame({\n col: np.repeat(df[col].values, df[column].str.len())\n for col in df.columns.drop(column)}\n ).assign(**{column: list(np.concatenate(df[column].values))})\n return expanded2", "def set_row( self, row, ):\n self.ix_row = row", "def set_data(self, df):\n self.df = df", "def appforth(df, line):\n df.loc[-1]=line\n df.index = df.index + 1 # shifting index\n df = df.sort_index() # sorting by index\n return df", "def correct_test_data(df):\n # The row that is in bot the train and test\n id_start = df['date'] == \"2016-09-13 00:00:00\"\n # We copy its information\n df.loc[id_start, :] = df[id_start].fillna(method='bfill')\n # We remove it then and reset the index\n df.drop(np.where(id_start)[0][-1], inplace=True)\n df.reset_index(drop=True, inplace=True)\n # Fill the summer hour info\n df.loc[:, 'heure_ete'] = df[['heure_ete']].fillna(method='bfill')\n df.loc[:, 'heure_ete'] = df[['heure_ete']].fillna(method='ffill')\n return df", "def set_rows(self, values: List[str]):\n\n values_len = len(values)\n curr_row_count = len(self.row_layout.children())\n\n # adjust row count\n if values_len > curr_row_count:\n for _ in range(values_len - curr_row_count):\n self.add_row()\n elif values_len < curr_row_count:\n for _ in range(curr_row_count - values_len):\n last_row = self.row_layout.children()[-1]\n last_row.itemAt(1).widget().click()\n\n # set values\n idx = 0\n for row in self.row_layout.children():\n if self.possible_values is None:\n row.itemAt(0).widget().setText(values[idx])\n else:\n if values[idx] in self.possible_values:\n row.itemAt(0).widget().setCurrentIndex(self.possible_values.index(values[idx]))\n idx += 1", "def fifty_fifty(dataframe) -> pd.DataFrame:\n dataframe[\"allocation\"] = 0.5\n return dataframe", "def fill_zero(df):\n df = df.fillna(0)\n return df", "def __init__(self, rows, columns, fillValue = None):\n self.data = []\n for row in range(rows):\n dataInRow = []\n for column in range(columns):\n dataInRow.append(fillValue)\n self.data.append(dataInRow)", "def fill_forward(df):\n df = df.fillna(method='ffill')\n df = df.fillna(method='bfill').fillna(0)\n return df", "def complete_data(self, df):\n\n # Make a copy of the df so we don't modify the original one\n new_df = df.copy()\n\n # For each week:\n for week in range(1, 9):\n # Get the players that don't appear in the week\n cur_players = list(new_df['Player Id'][new_df['week'] == week].unique())\n missing_players = [player for player in self.players if player not in cur_players]\n missing_values = {} # {player: {feature: value}}\n\n # For every missing player, add a new row for that week, that feature, sampling a value from the pdf\n for player in missing_players:\n missing_values[player] = {}\n\n for var in self.features:\n # Create distribution from statistics for that week, and that feature\n pdf = self.generate_distribution(new_df, var, week)\n\n # Store value\n missing_values[player][var] = pdf.rvs(1)[0]\n\n # Now add all the new missing values as rows to the old df\n missing_series = []\n for player, value_dict in missing_values.items():\n value_dict['Player Id'] = player\n value_dict['week'] = week\n missing_series.append(pd.Series(value_dict))\n\n new_df = new_df.append(pd.DataFrame(missing_series), ignore_index=True, sort=True)\n\n return new_df", "def rows(self, row):\n self.row += row", "def _values_to_row(self, values, row_idx):\n cell = WriteOnlyCell(self)\n\n for col_idx, value in enumerate(values, 1):\n if value is None:\n continue\n try:\n cell.value = value\n except ValueError:\n if isinstance(value, Cell):\n cell = value\n else:\n raise ValueError\n\n cell.column = col_idx\n cell.row = row_idx\n\n if cell.hyperlink is not None:\n cell.hyperlink.ref = cell.coordinate\n\n yield cell\n\n # reset cell if style applied\n if cell.has_style or cell.hyperlink:\n cell = WriteOnlyCell(self)", "def as_rows(self, rows: Iterable):\n try:\n out = self.from_rows(rows, columns=self.data.columns, meta_dict=self.meta)\n except AssertionError as exc:\n columns = self.data.columns.tolist()\n firstrow = next(iter(rows))\n raise RuntimeError(\n f\"Passed {len(columns)} columns {columns!r}, but \"\n f\"{len(firstrow)} elements in first row: {firstrow}\"\n ) from exc\n return out", "def data_structure():\n\n items = [1.0, 2.0, 3.0, 4.0, 5.0 ]\n s = pd.Series(items, index=['a', 'b', 'c', 'd', 'e'])\n # s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])\n print s\n s = pd.Series(items)\n print s\n\n d= {'one': [1.0, 2.0, 3.0, 4.0], 'two': [4.0, 3.0, 2.0, 1.0]}\n\n df = pd.DataFrame(d)\n print df\n df = pd.DataFrame(d, index=['a', 'b', 'c', 'd'])\n print df\n\n\n data2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]\n df = pd.DataFrame(data2)\n\n print df", "def next_row(self, row: Row):\n column_types = self._deduct_types(row)\n self._update_nullable(row)\n if self._types is None:\n self._types = column_types\n else:\n if not self._is_consistent(column_types):\n raise SomeError('Inconsistent types')\n self._merge_with(column_types)", "def union_all(x: DataFrame, y: DataFrame) -> DataFrame:\n _check_xy(x, y)\n return bind_rows(x, y, __calling_env=CallingEnvs.REGULAR)", "def _append():\n df = pd.DataFrame(np.arange(6).reshape(2, 3), index=[0, 1], columns=list('ABC'))\n print(df)\n df = df.append([{'A': 6, 'B': 7, 'C': 8}])\n print(df)\n df = df.append(pd.Series({'A': 9, 'B': 10, 'C': 11}, name=0), ignore_index=True)\n print(df)\n df['D'] = list(\"1234\")\n print(df)\n return", "def make_and_append_negative_data(self):\n negative_df = self.get_negative_data()\n self.df = pd.concat((self.df, negative_df))", "def start_pipeline(df):\n new_df = df.copy()\n new_df = new_df[[\"Title\", \"Genre\", \"Director\", \"Actors\", \"Plot\"]]\n return new_df", "def fillna_negtive1(df, target=None):\n if not target:\n target = ['price', 'image_top_1']\n for col in target:\n df[col] = df[col].fillna(-1)\n return None", "def _initialize_df(self, df):\n df['values'] = (self.tc.instrument_returns['cumulative'] *\n self.tc.starting_cash).mul(self.target_weights, axis=1).values * (1 - self.tc.commission)\n df['allocations'] = self.df['values'].div(df['values'].sum(axis=1), axis=0)\n df['returns'] = (df['values'].sum(axis=1)).pct_change(1).fillna(0)", "def filldf(df,features,CrossMethod):\n for i in CrossMethod.keys():\n for j in features:\n if i in j:\n p = j[1:-1].split(i)\n df[j] = CrossMethod[i](df[p[0]],df[p[1]])\n return df", "def set_data_by_rows(self, row_headers, column_headers, data_rows):\n self.tblGeneric.setRowCount(len(data_rows))\n self.tblGeneric.setColumnCount(len(data_rows[0]))\n\n if row_headers:\n self.tblGeneric.setVerticalHeaderLabels(row_headers)\n else:\n self.tblGeneric.verticalHeader().setVisible(False)\n if column_headers:\n self.tblGeneric.setHorizontalHeaderLabels(column_headers)\n else:\n self.tblGeneric.horizontalHeader().setVisible(False)\n\n row = 0\n for data_row in data_rows:\n col = 0\n for data_value in data_row:\n item = QTableWidgetItem(str(data_value))\n item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)\n self.tblGeneric.setItem(row, col, item)\n col += 1\n row += 1\n self.tblGeneric.resizeColumnsToContents()\n for row in range(self.tblGeneric.rowCount()):\n self.tblGeneric.setRowHeight(row, 10)", "def fill_numeric_data(df,neighbors = 2):\r\n imputer = KNNImputer(n_neighbors=neighbors, weights=\"uniform\")\r\n cols = df.columns\r\n filled_array = imputer.fit_transform(df)\r\n df_filled = pd.DataFrame(filled_array, columns = cols)\r\n return df_filled", "def add_data(self, df):\n # TODO: improve merging code\n self.data = self.data.append(df, ignore_index=False)\n self.data = self.data[~self.data.index.duplicated(keep='first')]", "def set_rows(self, rows):\r\n rows = [(row_id, parse_date(created)) for row_id, created in rows]\r\n self.get_history_for_student_modules.return_value = rows", "def add_city_state_to_dataframe(dataframe):\n dataframe[['city', 'state']] = dataframe.apply(parse_city_state_from_row,\n axis=1)\n dataframe = dataframe[dataframe.state != \"NULL\"]\n return dataframe", "def rosterRowData(self):", "def _set_right_info_row_values(self, row, values):\n print \"Row is \", row\n print \"Values are \", values\n for name in values.keys():\n self.header_values[name][row].set(values[name])", "def __init__(self, df):\n self.original_data = df\n self.cleaned_data = pd.DataFrame()", "def coerce( self ):\n df = self.copy()\n gcond = ['neighbor', 'pdb'] if 'source' not in df.columns else ['neighbor', 'pdb', 'source']\n for frame_id, frame in df.groupby('frame'):\n g = frame.groupby(gcond)\n neighbors = len(g)\n neighbor = list(g.ngroup() + 1)\n position = list(g.cumcount() + frame_id)\n df.loc[(df['frame'] == frame_id), 'neighbors'] = [neighbors] * frame.shape[0]\n df.loc[(df['frame'] == frame_id), 'neighbor'] = neighbor\n df.loc[(df['frame'] == frame_id), 'position'] = position\n return df", "def create_new_date_row(row, dt, df_station):\n new_row = row.copy()\n # Change fields... (temp_mean_c, temp_min_c, temp_max_c, datetime, date)\n #print(new_row)\n \n #print(dt)\n # Replace dates...\n new_row['datetime'] = dt\n new_row['location_date'] = pd.to_datetime(dt).to_pydatetime().strftime(\"%-m/%-d/%Y\")\n # Make temps nan\n new_row['temp_mean_c'] = interpolate_temperature(dt, df_station, field='temp_mean_c')\n new_row['temp_min_c'] = interpolate_temperature(dt, df_station, field='temp_min_c')\n new_row['temp_max_c'] = interpolate_temperature(dt, df_station, field='temp_max_c')\n\n #print(new_row)\n return new_row", "def test_04():\n growth = {\"Switzerland\": {\"2010\": 3.0, \"2011\": 1.8, \"2012\": 1.1, \"2013\": 1.9},\n \"Germany\": {\"2010\": 4.1, \"2011\": 3.6, \"2012\": 0.4, \"2013\": 0.1},\n \"France\": {\"2010\": 2.0, \"2011\": 2.1, \"2012\": 0.3, \"2013\": 0.3},\n \"Greece\": {\"2010\": -5.4, \"2011\": -8.9, \"2012\": -6.6, \"2013\": -3.3},\n \"Italy\": {\"2010\": 1.7, \"2011\": 0.6, \"2012\": -2.3, \"2013\": -1.9}\n }\n growth_frame = pd.DataFrame(growth)\n # growth_frame = growth_frame.reindex([\"2013\", \"2012\", \"2011\", \"2010\"])\n print(growth_frame)\n # Transpose the data\n print(growth_frame.T)", "def at_df(self, df):\n result = self.at(**df).reset_coords(drop=True).to_dataframe()\n if isinstance(df, pd.DataFrame):\n result.index = df.index\n return result", "def update_row(self):\n self.current_row = self.next_row\n self.next_row = self.get_row()", "def _fillinSheet(self,sheet,data,startrow=2):\n i = startrow-1\n for r in data:\n j = 0\n for c in r:\n sheet.write(i,j,c)\n j+=1\n i += 1", "def aitchison_transform_part(df):\n df_aitchison = multiplicative_replacement(df)\n #df_aitchison = closure(df)\n df_idx = df.index\n df_col = df.columns\n df_aitchison = pd.DataFrame(df_aitchison, index = df_idx, columns = df_col)\n return df_aitchison", "def transform(self, df: DataFrame) -> DataFrame:\n return df", "def insert(self, row_values):\n if len(row_values) != len(self.columns):\n raise TypeError(\"wrong number of elements\")\n\n self.rows += [dict(zip(self.columns, row_values))]", "def link_datadict_to_dataframe(row, dict):\n d = {}\n for k, v in dict.items():\n d[k] = [row[c] if row[c] == row[c] else None for c in v[\"COLUMN\"]]\n\n if len(d[k]) == 1:\n d[k] = d[k][0]\n elif None in d[k]:\n d[k] = None\n\n return d", "def add_facility_id_unit_id_epa(df):\n if \"facility_id\" not in df.columns:\n df[\"facility_id\"] = np.NaN\n if \"unit_id_epa\" not in df.columns:\n df[\"unit_id_epa\"] = np.NaN\n return df", "def fill_col(col, x):\n col.append(x)\n return col", "def fill_end_of_the_row(nono, row):\n ending = nono.limits.get_row_endings(row, -1)\n sth_changed = fill_range_in_row(nono, row,\n range(ending + 1, nono.meta_data.n_cols),\n -1)\n return sth_changed", "def append_to_BOM_df(df,bom):\n for i in range(len(bom)):\n df = df.append(bom[i],ignore_index=True)\n if i == len(bom)-1:\n df = df.append({'Site ID':\"\",'Code':\"\",'Description':\"\",'Quantity':\"\",'Discount':\"\",'Unit list':\"\",'Unit net':\"\",'Total Due':\"\"},ignore_index=True)\n return df", "def __setitem__(\n self,\n index: Union[int, slice],\n value: Union[_RowPrototype, Iterable[_RowPrototype]],\n ) -> None:\n if isinstance(index, slice):\n self._rows[index] = [D2TXTRow(self, row) for row in value]\n else:\n self._rows[index] = D2TXTRow(self, value)", "def fill_nid(df, nid_dict):\n assert 'year' in df.columns, \"DataFrame doesn't have a 'year' column\"\n df['nid'] = df['year'].map(nid_dict)\n return df", "def restructure_data(df_phys, res, ffill=False):\n import pandas as pd\n\n if not df_phys.empty and res != \"\":\n df_phys = df_phys.pivot_table(values=\"Physical Value\", index=pd.Grouper(freq=res), columns=\"Signal\")\n\n if ffill:\n df_phys = df_phys.ffill()\n \n return df_phys", "def rel_matrix(df_long: pd.DataFrame) -> None:\n pass", "def fill_mising(self, dict):\t\n\t\tfor name, df in dict.items():\n\t\t\tdf = df.fillna(method='pad')\n\t\t\tdict[name] = df\n\t\treturn dict", "def assemble_row(r1, r2):\n r1.extend(r2)\n return r1", "def add_rows(self):\n for row in self.rows:\n self.table.add_row(row)", "def rows(self, rows):\n self.grid.items = rows[:]", "def _fit(self, df):\n return df", "def enrich_dataframe(df, name):\n if(name == 'taux_incidence'):\n df['taux_incidence'] = df['P']*100000/df['pop']\n if(name == 'taux_positivite'):\n df['taux_positivite'] = df['P']/df['T'] * 100\n if(name == 'taux_occupation'):\n df['TO'] = df['TO']*100\n if(name == 'vaccins_vaccines_couv_majeurs'):\n df['couv_complet'] = 100 * df['n_cum_complet'] / df['pop']\n if(name == 'vaccins_vaccines_couv_ado_majeurs'):\n df['couv_complet'] = 100 * df['n_cum_complet'] / df['pop']\n if(name == 'taux_classes_fermees'):\n df['taux_classes'] = 100* df['nombre_classes_fermees'] / df['nombre_total_classes']\n if(name == 'taux_structures_fermees'):\n df['taux_structures'] = 100* df['nombre_structures_fermees'] / df['nombre_total_structures']\n\n \n \n return df", "def append_rows(self, rows):\n for row in rows:\n self.append_row(row)", "def fill_missing(self) -> None:\n\n self.fill_missing_rows()\n self.fill_missing_source_parameters()\n return", "def add_data_from_columns_into_rows(columns: list, fixed_rows: list):\n for column in range(len(max(columns))):\n for row in range(len(columns)):\n try:\n fixed_rows[column].append(columns[row][column])\n except IndexError:\n fixed_rows[column].append('')\n return fixed_rows", "def clone(self):\n row = Row()\n row.copyFromRow(self)\n row._objId = self._objId\n \n return row", "def _populate_row_values(self, charts, all_data):\n # get needed charts\n charts = filter(lambda chart: chart['chart_type'] == 'line/bar' and chart['chart_include_method'] == 'selected values', charts)\n\n \n for chart in charts:\n populated_values = list()\n report_column = None\n # get data for chart\n if chart['report_data_set_pivot_id']:\n # it's pivot chart \n data = all_data[chart['report_data_set_pivot_id']]\n pivot = filter(lambda pivot: pivot['report_data_set_pivot_id'] == chart['report_data_set_pivot_id'], self._pivots)[0]\n # get column id from pivot settings\n if chart['bars_or_lines_created_for'] == 'column headers':\n column_id = pivot['pivot_column_value_column_id']\n else:\n column_id = pivot['pivot_row_value_column_id']\n \n \n # get column name \n res = self._db.Query(\"\"\"\n SELECT column_name,\n report_data_set_column_id,\n value_type AS `type` \n FROM report_data_set_column\n WHERE element_id = %s\n AND report_data_set_column_id = %s\"\"\", (self._id, column_id))\n if res:\n report_column = self._db.record[0]\n if report_column['type'] == 'text':\n if chart['bars_or_lines_created_for'] == 'column headers':\n for header in data['header'][1:-1]:\n if header['original_val'] != 'TOTAL':\n populated_values.append(header['original_val'])\n \n elif chart['bars_or_lines_created_for'] == 'row values':\n # get charting column\n orig_headers = [header['original_val'] for header in data['header']]\n if report_column['column_name'] in orig_headers:\n col_index = orig_headers.index(report_column['column_name'])\n #column = data['header'][col_index]\n # run all rows\n for row in data['rows']:\n if row[col_index]['original_val'] != 'TOTAL':\n populated_values.append(row[col_index]['original_val'])\n else:\n if chart['bars_or_lines_created_for'] == 'row values':\n # it's non-pivot chart\n data = all_data[0]\n # get charting column. it's first visible column\n col_index = 0\n for header in data['header']:\n if header['show_column_in_table_display_ind'] == 'Y':\n break\n col_index += 1\n column = data['header'][col_index]\n if column['type'] == 'text':\n # get column id \n res = self._db.Query(\"\"\"\n SELECT report_data_set_column.report_data_set_column_id \n FROM report_data_set_column\n WHERE report_data_set_column.element_id = %s\n AND report_data_set_column.column_name = %s\"\"\", (self._id, column['original_val']))\n \n if res:\n report_column = self._db.record[0]\n # run all rows\n for row in data['rows']:\n if row[col_index]['original_val'] != 'TOTAL':\n populated_values.append(row[col_index]['original_val']) \n \n if report_column and populated_values:\n format_strings = ','.join(['%s'] * len(populated_values))\n \n param = list(populated_values)\n param.append(report_column['report_data_set_column_id'])\n self._db.Query(\"\"\"SELECT report_data_set_row_value_id, row_value\n FROM report_data_set_row_value\n WHERE row_value IN(%s) AND\n report_data_set_column_id = %%s\n \"\"\"% format_strings,tuple(param))\n existed_values = []\n existed_values_ids = []\n for row in self._db.record:\n existed_values.append(row['row_value'])\n existed_values_ids.append(row['report_data_set_row_value_id'])\n # insert new values\n for value in populated_values:\n if not (value in existed_values):\n self._db.Query(\"\"\"INSERT INTO report_data_set_row_value\n SET row_value = %s,\n report_data_set_column_id = %s,\n last_updated_time = NOW()\n \"\"\",(value, report_column['report_data_set_column_id']))\n # update existed values\n if existed_values:\n format_strings = ','.join(['%s'] * len(existed_values))\n param = list(existed_values_ids)\n param.append(report_column['report_data_set_column_id'])\n self._db.Query(\"\"\"UPDATE report_data_set_row_value\n SET last_updated_time = NOW()\n WHERE report_data_set_row_value_id IN(%s) AND\n report_data_set_column_id = %%s\n \"\"\"% format_strings,tuple(param))", "def single_program_df(classify_df: pd.DataFrame) -> pd.DataFrame:\n return classify_df.iloc[[0], :].copy()", "def new_row( self, delta_row = 1, ):\n self.ix_row += delta_row\n self.ix_col = 0", "def fill_row(nono, row):\n # Filling inside the blocks\n changed_1 = fill_inside_of_the_blocks(nono, row)\n # Marking as empty beggining of the line\n changed_2 = fill_beggining_of_the_row(nono, row)\n # Marking as empty end of the line\n changed_3 = fill_end_of_the_row(nono, row)\n # Marking as empty area between blocks\n changed_4 = fill_between_the_blocks(nono, row)\n\n sth_changed = changed_1 or changed_2 or changed_3 or changed_4\n\n # if nonogram is in the interactive mode, update plot\n if nono.mode_data.is_interactive_plot_active() and sth_changed:\n nono.update_plot()\n\n return sth_changed", "def set_value(self, value):\n for row in self.rows:\n row.set_values(value)", "def expand_list_in_rows(df, columns=None, reset_index=True):\n if columns is None or columns == df.columns.tolist():\n # Using apply with pd.Series.explode is 30x faster than df.explode(columns)\n df_return = df.apply(pd.Series.explode)\n else:\n df_return = df.explode(columns)\n if reset_index is True:\n return df_return.reset_index(drop=True)\n return df_return", "def fill_hom(patient, gene):\n\n first = 'HR_' + patient + '_First_' + gene + '_Split'\n second = 'HR_' + patient + '_Second_' + gene + '_Split'\n\n for column in data.columns:\n f = re.match(second, column)\n if f:\n data[second] = data[second].fillna(data[first])\n else:\n pass", "def copy(self):\n new=DataTable(self.getName(),self.getColumnNames(),self.getColumnUnits())\n nlig=self.getNbRows()\n for i in range(nlig):\n lig=self.getRow(i)\n new.addRow(lig)\n pass\n return new", "def setup_data(self, data: pd.DataFrame) -> pd.DataFrame:\n return data", "def test_add_new_no_dupl_w_optional(self):\n new_df = pd.DataFrame(np.eye(3) * 2, index=range(3, 6),\n columns=self.req_cols + self.opt_cols)\n self.table.add_new(new=new_df)\n self.assertEqual(len(self.table.index), 6)", "def add_row_to_dataset(self, row : Dict[str, Any]):\n row = {key : [row[key]] for key in row}\n if self.res_dataset is None:\n self.res_dataset = Dataset.from_dict(row) if self.format == \"arrow\" else pd.DataFrame(row)\n else:\n if self.format == \"arrow\":\n self.res_dataset = self.res_dataset.append(row)\n else:\n new_df = pd.DataFrame(row)\n self.res_dataset = pd.concat([self.res_dataset, new_df], ignore_index = True)\n\n self.save_accum += 1\n if self.save_accum >= self.save_every:\n self.save_dataset()\n self.save_accum = 0", "def reframe_df(previous_df, processed_data):\n idx = previous_df.index\n col = previous_df.columns\n df = pd.DataFrame(data=processed_data, index=idx, columns=col)\n return df", "def iter_rows_raw(self, *args):\n\n for row in super().iter_rows_raw(*args):\n row[0] = row[1] # sequential catalog index not right in this case; overwrite to match finder id\n yield row" ]
[ "0.65674084", "0.64855564", "0.64138365", "0.61660343", "0.6115377", "0.5761364", "0.5706634", "0.57038164", "0.56266713", "0.56095374", "0.5561263", "0.5560726", "0.55440325", "0.55188113", "0.5499481", "0.54974097", "0.5496469", "0.54918057", "0.549143", "0.54893225", "0.5451847", "0.5441361", "0.54314804", "0.54179907", "0.54152584", "0.5409858", "0.5407496", "0.54013705", "0.5386779", "0.53813124", "0.5380556", "0.53779614", "0.5377142", "0.53668827", "0.5343879", "0.53229916", "0.53173214", "0.5302011", "0.53016293", "0.5280382", "0.52778184", "0.52391887", "0.5233717", "0.523147", "0.5229054", "0.52266973", "0.5219172", "0.5213196", "0.5211001", "0.5206175", "0.52015454", "0.516251", "0.51580024", "0.51520044", "0.5137627", "0.5133284", "0.51293707", "0.5121769", "0.51203585", "0.51200974", "0.51178074", "0.5117678", "0.5113238", "0.51117855", "0.51056004", "0.51045185", "0.50971675", "0.5097097", "0.5095758", "0.5091514", "0.50909466", "0.5089578", "0.50891685", "0.5084841", "0.50686616", "0.5065494", "0.50654894", "0.50648856", "0.50641805", "0.50568926", "0.5056266", "0.5055222", "0.50537753", "0.50514346", "0.50500125", "0.5047275", "0.5046059", "0.50380534", "0.5037585", "0.50327814", "0.5027132", "0.50155735", "0.5007209", "0.50045365", "0.5000743", "0.49962243", "0.49951446", "0.4992977", "0.4990508", "0.4984372", "0.49758747" ]
0.0
-1
Get preditors base on their distance The predictors are selected as following [1,2], [1,3], [1,4], [2,3], [2,4], [2,5], [2,6]
def getpredictors_distance( staname, distance): distfromsta = distance[staname] try: del distfromsta[staname] # remove the station to be fill from the dataframe except: pass distfromsta = distfromsta.sort_values() stations = distfromsta.index sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1 sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2 selection= [None] * (len(sel1) + len(sel2)) selection[::2] = sel1 selection[1::2] = sel2 return selection[:4]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getpredictors_distance(self, staname, distance):\n\n distfromsta = distance[staname]\n del distfromsta[staname] # remove the station to be fill from the dataframe\n distfromsta = distfromsta.sort_values()\n\n stations = self.network.getsta(distfromsta.index.values)\n # station = self.network.getsta(staname)\n\n # Only 3 closest stations\n # sel1 = [ (i,e) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2 = [ (i,e) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 2\n\n # Use all stations\n sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2\n\n # sel3 = [ (i,e) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 3\n # sel4 = [ (i,e) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 4\n\n # Only 3 closest stations\n # sel1names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 1\n\n # using all stations\n sel1names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-2], stations[2:])] # selction predictors with spacing 1\n\n # sel3names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 1\n # sel4names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 1\n\n selection = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1, sel2)) if x]\n selectionnames = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1names, sel2names)) if x]\n\n return selection, selectionnames", "def predict(x):\n file_train = open('trains.pkl', \"rb\")\n train = pkl.load(file_train)\n y = []\n k = 5\n x_train = train[0]\n y_train = train[1]\n for q in range(100):\n distance = []\n for i in range(800):\n distance.append(np.linalg.norm(x[q] - x_train[i]))\n\n # distance.append(np.sqrt(sum((x[q] - x_train[i]) ** 2)))\n # u = (x[0] - x_train) ** 2\n # print(distance)\n # distance = np.sqrt([sum(b) for b in u])\n # print(distance)\n minarg = np.argsort(distance)\n i = np.array(np.zeros(10))\n j = 0\n while k not in i:\n i[y_train[minarg[j]]] += 1\n j += 1\n y.append(np.argmax(i))\n return y", "def pred_for_user(self,u):\r\n ids=np.where(self.Y_data_n[:,0]==u)[0]\r\n items_rated_by_u=Y_data_n[ids,1].tolist()\r\n pred_ratings=[]\r\n for i in range(self.n_items):\r\n if i not in items_rated_by_u:\r\n pred_ratings.append(self.pred(u,i))\r\n return pred_ratings", "def predict(self, X):\n\n Xn = np.copy(X)\n\n preds = []\n # compute distance from all points\n for x1 in Xn:\n dist = self._euclidian_distance(self.X_data, x1)\n dist = np.vstack((dist, self.y)).T\n dist = dist[dist[:, 0].argsort(axis=0)][:,-1]\n # get a vote from top k\n pred = sts.mode(dist[0:self.k])[0][0]\n preds.append(pred)\n\n return np.array(preds)", "def oldPredict(self, data):\n\n predictions = []\n\n if len(self.observations) < self.k_neighbors:\n print(f\"Data length ({len(data)}) was too small.\")\n\n for row in data:\n neighbors_info = {}\n\n for row_index in range(len(self.observations)):\n distance = self.calcualteEuclideanDistance(self.observations[row_index], row)\n if len(neighbors_info) > self.k_neighbors - 1:\n largest_distance = max(neighbors_info.keys())\n if distance < largest_distance:\n neighbors_info[distance] = self.labels[row_index]\n del neighbors_info[largest_distance]\n else:\n neighbors_info[distance] = self.labels[row_index]\n\n unique_values = set(neighbors_info.values())\n if len(unique_values) == 1:\n value = unique_values.pop()\n predictions.append(value)\n else:\n best_value = 0\n best_value_weight = 0\n for label in unique_values:\n weight = 0\n for distance in neighbors_info.keys():\n if label == neighbors_info[distance]:\n if 'inverse_distance' == self.weight_type:\n weight += self.calulateWeightedVote(distance)\n elif 'no_weight' == self.weight_type:\n weight += 1\n else:\n print(\"Not a valid_weight_type.\")\n\n if weight > best_value_weight:\n best_value_weight = weight\n best_value = label\n\n predictions.append(best_value)\n # print(f\"Neighbors Info: {neighbors_info}\")\n\n return predictions", "def predict(self, X):\n labels = []\n for i in range(0,len(X)):\n min_distance = distance.euclidean(X[i],self.best_medoids[0])\n min_distance_index = 0\n\n for j in range(1,len(self.best_medoids)):\n current_distance = distance.euclidean(X[i],self.best_medoids[j])\n if(current_distance < min_distance):\n min_distance = current_distance\n min_distance_index = j\n\n labels.append(min_distance_index)\n return labels\n\n pass", "def predict(self, predPoints=None):", "def predict_individual(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,nn-1] #subtract 1 since it is zero based\n\n ypred.append(self.ytrain[neigh_ind])\n\n self.ypred = ypred\n\n return ypred", "def predict_individual(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,nn-1]# subtract 1 since it is zero based\n\n ypred.append(self.ytrain[neigh_ind])\n\n self.ypred = ypred\n\n return ypred", "def get_prediction(data):\n # load cannabis data\n strains = pd.read_csv(URL)\n # Combine the Effects and Flavors in one column\n strains['Criteria'] = strains['Effects'] + ',' + strains['Flavor']\n\n # Train model on dtm\n nn = NearestNeighbors(n_neighbors=5, algorithm='ball_tree')\n nn.fit(dtm)\n\n # load request data\n # r = data.args\n entry = [v for k,v in data.items()][1:]\n #print(entry)\n # transform\n new = tf.transform(entry)\n #print(new)\n results = nn.kneighbors(new.todense())\n #print(results)\n # extract top 5 results\n output = [strains['Strain'][results[1][0][i]] for i in range(5)]\n\n return output", "def predict(self, query: np.ndarray):\n assert query.shape == self._training_set[1, :-1].shape, \"Size of the query does not match the size of the\" \\\n \" training set, Which is: \"\\\n + str(self._training_set[1, :-1].shape)\n tmp = (self._training_set[:, :-1] - query).astype(float)\n distances = np.linalg.norm(tmp, axis=1)\n\n index = np.argsort(distances)\n sorted_set = self._training_set[index, :]\n\n (unique, counts) = np.unique(sorted_set[:self._k, -1], return_counts=True)\n\n return unique[counts == np.max(counts)][0]", "def preprocess(df):\n df[\"distance\"] = compute_distance(df)\n X_train = df[[\"distance\"]]\n y_train = df[\"fare_amount\"]\n return X_train, y_train", "def predict_only(self):", "def get_predictors(self):\n\t\treturn self.predictors", "def predict(self,data):\n results = []\n predict_instances = np.shape(data)[0]\n stored_instances = np.shape(self.data)[0]\n for predict_index in range(predict_instances):\n neighbors = [] # dist, label\n for stored_index in range(stored_instances):\n neighbors.append((self._distance(self.data[stored_index], data[predict_index]), self.data_labels[stored_index][0], data[predict_index]))\n neighbors = sorted(neighbors, key=lambda x: x[0])[:self.k]\n results.append(self._analyze_neighbors(neighbors))", "def predict_labels(self, dists, k=1):\n num_test = dists.shape[0]\n y_pred = np.zeros(num_test)\n for i in range(num_test):\n indices = np.argsort(dists[i])[:k]\n closest_y = self.y_train[indices]\n y_pred_i = mode(closest_y)[0]\n y_pred[i] = y_pred_i\n return y_pred", "def post_predictive_distribution(self, samples):\n post_pred_dist = []\n posteriors = self.posterior(samples)\n for point in range(1, self.max_val+1):\n post_pred = 0\n for concept, posterior in list(zip(self.concepts, posteriors)):\n if point in concept.extension:\n post_pred += posterior\n post_pred_dist.append(post_pred)\n return post_pred_dist", "def predict(self, test):\n test_data = np.asarray(test)\n assert self.x is not None and self.y is not None, \"You must train the classifier before testing\"\n results = []\n for i in range(test_data.shape[0]):\n m = self.x - test_data[i]\n # dist holds the Euclidean distance to every training point\n dist = np.sum(m*m, 1)\n # this call uses a quickselect algo to find k-smallest\n ind = np.argpartition(dist, self.k)[:self.k]\n # take the class present the most among the k closest\n out = int(scipy.stats.mode(self.y[ind], axis=None)[0])\n results.append(out)\n return results", "def _calc_distances(preds, targets, mask, normalize):\n N, K, _ = preds.shape\n _mask = mask.copy()\n _mask[np.where((normalize == 0).sum(1))[0], :] = False\n distances = np.full((N, K), -1, dtype=np.float32)\n normalize[np.where(normalize <= 0)] = 1000000.0\n distances[_mask] = np.linalg.norm(((preds - targets) / normalize[:, None, :])[_mask], axis=-1)\n return distances.T", "def estimate_dists(self) -> np.array:\n return np.array(\n list(\n chain.from_iterable(\n model.estimate_dist(self.featurized_data)\n for model in self.models\n )\n )\n )", "def predict(self,Xtest,nn_list):\n\n self.dist_calc(Xtest)\n xsize = self.dist.shape[0]\n ysize = self.ytrain.shape[1]\n ypred = []\n\n for nn in nn_list:\n\n yp = np.empty((xsize,ysize))\n\n if self.weights =='uniform':\n\n neigh_ind = self.ind[:,0:nn]\n\n for j in range(self.ytrain.shape[1]):\n\n mode = utilities.quick_mode_axis1_keep_nearest_neigh(\n self.ytrain[neigh_ind,j].astype(int))\n yp[:,j] = mode\n\n\n elif self.weights=='distance':\n dist = self.dist[:,0:nn]\n neigh_ind = self.ind[:,0:nn]\n W = 1./(dist+.000001) #to make sure we dont divide by zero\n\n for j in range(self.ytrain.shape[1]):\n mode, _ = utilities.weighted_mode(self.ytrain[neigh_ind,j].astype(int), W, axis=1)\n\n mode = np.asarray(mode.ravel(), dtype=int)\n\n yp[:, j] = mode\n\n ypred.append(yp)\n\n self.ypred = ypred\n\n return ypred", "def predict(self, dists, k=1):\n s = np.argsort(dists, axis=1)\n y_pred = np.zeros(dists.shape[0])\n for i in range(dists.shape[0]):\n y_pred[i] = np.argmax(np.bincount(self.ytr[s[i,:k]]))\n return y_pred", "def predict(self,X,y):\n self.X_test = X\n self.y_test = y\n d = []\n for i in range(self.X_train.shape[0]):\n d.append(self.get_distance(self.X_train.ix[i,:])) # hold all distances\n sorted = np.argsort(d)\n k_indices = np.argsort(d)[:self.k] # get indices with lowest distances\n predictions = self.y_train[k_indices]\n unique, counts = np.unique(predictions,return_counts=True)\n\n if (np.where(predictions ==1)[0].shape[0]) >self.p*self.k:\n y_pred = 1\n else:\n y_pred=0\n # {'sample':X_test.name,'d':d,'k_ix':k_indices,'pred':predictions,\n # 'counts':counts,'uniq':unique,'y_pred':y_pred,\n # 'y_test':self.y_test,'y_train':self.y_train,\n # 'sorted':sorted}\n return {'sample':self.X_test.name,\n 'y_pred':y_pred, \n 'y_test':self.y_test}", "def nearest_neighbors_classifier(data):\n clf = KNeighborsClassifier(3, 'distance')\n clf.name = \"KNN\"\n train_predict_and_results(data, clf)", "def predict(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,0:nn]\n\n if self.weights == 'uniform':\n\n p = np.mean(self.ytrain[neigh_ind], axis=1)\n\n elif self.weights =='distance':\n\n p = np.empty((self.dist.shape[0], self.ytrain.shape[1]), dtype=np.float)\n\n for i in range(self.ytrain.shape[1]):\n p[:,i] = utilities.weighted_mean(self.ytrain[neigh_ind,i], self.dist[:,0:nn])\n\n ypred.append(p)\n\n self.ypred = ypred\n self.nn_list = nn_list\n return ypred", "def LevDistMultilabels(y_true, y_pred):\n \n n = y_pred.shape[0]\n D = 0\n for i in range(n):\n D += LevenshteinDistance(y_pred[i,:], y_true[i,:])[-1, -1]\n return D/n", "def getDistances(trainingSet, testInstance, distances):\n # Empty list to store distances of between testInstance and each trainSet item\n # Number of dimensions to check\n length=len(testInstance) - 1\n # Iterate through all items in trainingSet and compute the distance, then append to the distances list\n for x in range(len(trainingSet)):\n dist=calculateDistance(testInstance, trainingSet[x], length)\n distances.append((trainingSet[x], dist))\n return distances", "def _predict_base(self, X):\n\n # Return the indices of the BMU which matches the input data most\n distances = []\n\n prev_activation = np.zeros((self.map_dim, self.data_dim))\n\n for x in X:\n distance, prev_activation = self._get_bmus(x, prev_activation=prev_activation)\n distances.append(distance)\n\n return distances", "def _get_closest(centers, features):\n pred_labels = []\n\n features = features\n for feature in features:\n distances = End2End._dist(centers, feature)\n pred_labels.append(distances.argmin().item())\n\n return np.array(pred_labels)", "def predict(self, data):\n\t\treturn closestCluster(data, self.centers)", "def predict_labels(self, dists, k=1):\n num_test = dists.shape[0]\n y_pred = np.zeros(num_test)\n for i in range(num_test):\n # A list of length k storing the labels of the k nearest neighbors to\n # the ith test point.\n closest_y = []\n closest_y = self.y_train[np.argsort(dists[i])][0:k]\n closest_y = closest_y.astype(int)\n y_pred[i] = np.bincount(closest_y).argmax()\n return y_pred", "def distances(self):", "def predict(self, instances):\r\n raise NotImplementedError", "def getPredictions(self):\n\t\tself.bestLabel = self.testingProbs.apply(lambda x: x.argmax(),1)", "def predict(self, test_data):\n random.seed(self.seed)\n preds = [{\"id\": instance['id'], \"prediction\": random.choice([0, 1])} for instance in test_data]\n return preds", "def predict(self, data):\r\n\r\n distances = [np.linalg.norm(data-self.centroids[centroid]) for centroid in self.centroids]\r\n classification = distances.index(min(distances))\r\n return classification", "def allYandPred(self):\n y = []\n pred = []\n if self.use_dic:\n pb = progressbar.ProgressBar(self.n)\n for data in sorted(self.dic):\n for activity in sorted(self.dic[data]):\n for imsize in sorted(self.dic[data][activity]):\n for img in self.dic[data][activity][imsize]:\n labels = self.dic[data][activity][imsize][img]\n if len(labels) == 2:\n y.append(labels[0])\n pred.append(labels[1])\n else:\n return None\n pb.update()\n else:\n y, pred = self.getYandPred()\n return y, pred", "def predictors(self):\n if hasattr(self, '_predictors'):\n return self._predictors", "def predict(self, data: List):", "def predict(self, xs, **kwargs):", "def predictor(self, support_set, support_set_mean, queries, labels):\n dist = self.distance(support_set, support_set_mean, queries) \n\n # dist: 64, 5 (chars), 5 (distance between each char)\n logit = F.softmax(dist, dim=-1)\n # 64, 5 keys and the 64, 5 queries\n ce_loss = torch.nn.CrossEntropyLoss()\n loss = ce_loss(dist, labels)\n _, y_hat = torch.max(logit, -1)\n\n accuracy = torch.eq(y_hat, labels).float().mean()\n return loss, accuracy", "def _distorted_distance(self):\n distance = 0\n for i, pixel in enumerate(self.training_set):\n distance += self._euclid_distance(\n pixel, self.clusters[self.labels[i]], axis=0)\n return distance", "def mc_similarity_predict(embeddings, topk=1, distance='cosine_distance'):\n y_pred, y = [], []\n for i in range(len(embeddings)):\n # prediction\n distances = np.zeros(4)\n for j in range(0, 4):\n if distance == 'cosine_similarity': \n distances[j] = cosine_similarity(embeddings[i]['question'].reshape(1, -1), \n embeddings[i][f'choice_{j}'].reshape(1, -1))[0][0]\n elif distance == 'cosine_distance':\n distances[j] = cosine_distances(embeddings[i]['question'].reshape(1, -1), \n embeddings[i][f'choice_{j}'].reshape(1, -1))[0][0]\n if distance == 'cosine_similarity':\n if topk == 1:\n y_pred.append(np.argmax(distances))\n else:\n y_pred.append(np.argsort(distances)[::-1][:topk])\n elif distance == 'cosine_distance':\n if topk == 1:\n y_pred.append(np.argmin(distances))\n else:\n y_pred.append(np.argsort(distances)[:topk])\n else:\n raise ValueError(f'{distance} is not supported')\n \n \n # true labels\n y.append(embeddings[i]['correct_answer'])\n return y, y_pred", "def predict_distances(self, inputs, features=None):\n outputs = dict()\n if self.args.pose_model_type == \"shared\":\n # If we are using a shared encoder for both norm and pose,\n # then all images are fed separately through the norm encoder.\n images = torch.cat([inputs[(\"color_aug\", frame_id, 0)] for frame_id in self.args.frame_idxs])\n all_features = self.models[\"encoder\"](images)\n all_features = [torch.split(f, self.args.batch_size) for f in all_features]\n features = dict()\n for i, frame_id in enumerate(self.args.frame_idxs):\n features[frame_id] = [f[i] for f in all_features]\n outputs.update(self.models[\"norm\"](features[0]))\n else:\n # Otherwise, we only feed the target image through the norm encoder\n features = self.models[\"encoder\"](inputs[\"color_aug\", 0, 0]) if features is None else features\n outputs.update(self.models[\"norm\"](features))\n\n return outputs, features", "def extract_pred_from_estimator_predictions(predictions):\n # print('predictions:', predictions)\n pred = np.array([])\n for prediction in predictions:\n pred = np.append(pred, prediction['predictions'])\n num_samples = len(pred)\n pred = pred.reshape((num_samples, ))\n return pred", "def dist_pred_dict(self, curr):\n dist = {}\n pred = {}\n for currency in self.currencies:\n dist[currency] = float('inf') # set all starting vertices to be infinite distance away\n pred[currency] = None\n\n dist[curr] = 0\n\n return dist, pred", "def classify(self, data):\n\n \"*** YOUR CODE HERE ***\"\n # should compute (validationData[i] - trainingData[j])^2\n result = np.zeros(data.shape[0])\n for i in range(data.shape[0]):\n distances = np.linalg.norm(self.trainingData - data[i], axis=1)\n nearest = np.argsort(distances)[:self.num_neighbors]\n nearest_tags = [self.trainingLabels[j] for j in nearest]\n result[i] = max(nearest_tags, key=lambda x: nearest_tags.count(x))\n return result", "def predictTest(k, train, test):\r\n\r\n pred_labels = []\r\n\r\n # for each instance in the testing dataset, calculate all L2 distance from all training instances\r\n for te in range(len(test)):\r\n all_D = np.zeros((len(train), 1))\r\n\r\n # calculate the L2 distance of the testing instance from each training instance\r\n for tr in range(len(train)):\r\n D = 0\r\n for var in range(len(train.columns)-1):\r\n # if feature is real-valued, add (testing value - training value)^2\r\n if train[var].dtype == np.float64 or train[var].dtype == np.int64:\r\n D += (test[var][te] - train[var][tr])**2\r\n # if feature is nominal, add 1 if testing and training values are different\r\n else:\r\n if test[var][te] != train[var][tr]:\r\n D += 1\r\n all_D[tr] = D**(1/2)\r\n\r\n # sort all L2 distances, select K closest neighbors, and choose the most prevalent label\r\n all_D = np.column_stack((all_D, np.array(range(len(train)))))\r\n all_D = all_D[np.argsort(all_D[:, 0])]\r\n prob_labels = train[len(train.columns)-1][all_D[0:k, 1]].as_matrix()\r\n pred_labels.append(Counter(prob_labels).most_common(1)[0][0])\r\n\r\n return pred_labels", "def prediction():\r\n\r\n\r\n\tpredictVal = []\r\n\taccuracy = 0.0\r\n\r\n\t# Calculate accuracy for each class in testData\r\n\tfor item in testData:\r\n\t\tclass0Prediction = posProb / 100\r\n\t\tclass1Prediction = negProb / 100\r\n\t\t\r\n\t\t# Multiply the prior probablities for negative and positive reviews by their feature likelihoods \r\n\t\tfor word in item[2]:\r\n\t\t\tclass0Prediction *= class0Dict[word]\r\n\t\t\tclass1Prediction *= class1Dict[word]\r\n\r\n\t\t# Give every item in testData a predicted value\r\n\t\tif(class0Prediction > class1Prediction):\r\n\t\t\tpredictVal.append('0')\r\n\t\telse:\r\n\t\t\tpredictVal.append('1')\r\n\r\n\tfor i in range(len(testData)):\r\n\t\tif(testData[i][1] == predictVal[i]):\r\n\t\t\taccuracy += 1\r\n\r\n\t\t\t\r\n\taccuracy = 100 * (accuracy / len(testData))\r\n\treturn(predictVal, accuracy)", "def pred_dset_weight_vote(fx_output, dset, fold_set):\n y_values = []\n for i,y in dset.split_idx[fold_set]:\n x,yi = dset.get(i)\n assert yi==y\n p = fx_output(dset.reshape_batch(x))\n y_pred = p.argmax(axis=1)\n y_weight = p.max(axis=1)\n h = np.bincount(y_pred,weights=y_weight)\n y_pred = h.argmax()\n y_values += [(y, y_pred)]\n \n return np.asarray(y_values)", "def predictedPace(gender,knowntime,distance,newdistance):\n data=[[1,1.82479886,0.15442097,2.02078846,0.152018],\\\n [2,2.81269131,0.24298622,2.94027249,0.19785733],\\\n [3.1,3.21439758,0.20199374,3.38934256,0.17998415],\\\n [4,3.48733053,0.17403963,3.63338083,0.15416951],\\\n [5,3.69988339,0.1828273,3.85497481,0.15342633],\\\n [6.2,3.92248345,0.17043469,4.08229446,0.15731007],\\\n [7,4.03585866,0.15658534,4.15885728,0.12602283],\\\n [8,4.1804725,0.13912475,4.29095087,0.11099218],\\\n [9.3,4.34819542,0.16160759,4.47210575,0.13358565],\\\n [10,4.39081844,0.16460379,4.52142994,0.13755774],\\\n [13.1,4.71140604,0.16332366,4.84067277,0.14722737],\\\n [18,5.08558166,0.15552566,5.19199923,0.13465799],\\\n [20,5.07063126,0.15512254,5.18039573,0.12522386],\\\n [26.2,5.50908488,0.18280742,5.62205952,0.16401895],\\\n [37.28,5.9248495,0.17540027,6.01767465,0.15617823],\\\n [50,6.18750376,0.13950345,6.23711374,0.11798467]]\n\n datalog=[[0.0,1.82479886,0.15442097,2.02078846,0.152018],\\\n [0.69314718056,2.81269131,0.24298622,2.94027249,0.19785733],\\\n [1.13140211149,3.21439758,0.20199374,3.38934256,0.17998415],\\\n [1.38629436112,3.48733053,0.17403963,3.63338083,0.15416951],\\\n [1.60943791243,3.69988339,0.1828273,3.85497481,0.15342633],\\\n [1.82454929205,3.92248345,0.17043469,4.08229446,0.15731007],\\\n [1.94591014906,4.03585866,0.15658534,4.15885728,0.12602283],\\\n [2.07944154168,4.1804725,0.13912475,4.29095087,0.11099218],\\\n [2.23001440016,4.34819542,0.16160759,4.47210575,0.13358565],\\\n [2.30258509299,4.39081844,0.16460379,4.52142994,0.13755774],\\\n [2.57261223021,4.71140604,0.16332366,4.84067277,0.14722737],\\\n [2.8903717579,5.08558166,0.15552566,5.19199923,0.13465799],\\\n [2.99573227355,5.07063126,0.15512254,5.18039573,0.12522386],\\\n [3.26575941077,5.50908488,0.18280742,5.62205952,0.16401895],\\\n [3.61845698982,5.9248495,0.17540027,6.01767465,0.15617823],\\\n [3.91202300543,6.18750376,0.13950345,6.23711374,0.11798467]]\n\n gender=gender.lower()\n distance=np.log(distance)\n \n imu=1\n isigma=2\n if gender=='f':\n imu=3\n isigma=4\n\n knownmu=my_interpol(datalog,imu,distance)\n knownsigma=my_interpol(datalog,isigma,distance)\n \n knownpercentage=slowerthan(knowntime,knownmu,knownsigma)\n\n newdistance=np.log(newdistance)\n newmu=my_interpol(datalog,imu,newdistance)\n newsigma=my_interpol(datalog,isigma,newdistance)\n return findTime(knownpercentage,newmu,newsigma)", "def knn(k, Xtrain, Ytrain, Xtest):\n d = euclidean_distances(Xtest, Xtrain, squared=True)\n nnc = Ytrain[np.argsort(d)[..., :k].flatten()].reshape(Xtest.shape[0], k)\n pred = [max(nnc[i], key=Counter(nnc[i]).get) for i in range(nnc.shape[0])]\n return np.array(pred)", "def get_neighbors(training_set, \r\n labels, \r\n test_instance, \r\n k, \r\n distance=distance):\r\n distances = []\r\n for index in range(len(training_set)):\r\n dist = distance(test_instance, training_set[index])\r\n distances.append((training_set[index], dist, labels[index]))\r\n distances.sort(key=lambda x: x[1])\r\n neighbors = distances[:k]\r\n return neighbors", "def distance(keys_pred, keys_gt, key_num_gt):\n mask = (keys_gt > 1e-10).float()\n dif = keys_pred * mask - keys_gt\n err = dif.norm(dim=2)\n err = err.sum(dim=1)\n err = torch.div(err, key_num_gt)\n return err", "def get_classification_predictions(self):\n predictions = []\n for i, test_batch in enumerate(tqdm.tqdm(self.loader)):\n if self.tta_fn is not None:\n pred_out = self.tta_fn(batch=test_batch[0].cuda())\n else:\n # (batch_size, n_classes)\n pred_out = apply_nonlin(self.model(test_batch[0].cuda()))\n # for each prediction (1,) in pred_out (n, 4): post process\n for pred in pred_out:\n # (4, )\n probability = pred.cpu().detach().numpy()\n for prob_i in probability:\n # (1,)\n predictions.append(prob_i)\n return predictions", "def predict_labels(self, dists, k=1):\n num_test = dists.shape[0]\n y_pred = np.zeros(num_test)\n for i in range(num_test):\n # A list of length k storing the labels of the k nearest neighbors to\n # the ith test point.\n closest_y = []\n indices = np.argsort(dists[i])\n indices = indices[range(k)]\n closest_y = self.y_train[indices]\n counts = np.bincount(closest_y)\n y_pred[i] = np.argmax(counts)\n\n return y_pred", "def distance_metric(y_true, y_pred):\n diff = y_true - y_pred\n sqr = K.square(diff)\n total = K.sum(sqr, axis=1)\n return K.sqrt(total)", "def predict_multiple():\n req = request.json\n values = pd.DataFrame(data=req['data'])\n pred = clf2.predict(values)\n responses = {}\n for i, rep in enumerate(pred):\n if rep:\n responses[f\"Customer {i + 1}\"] = f\"Input: {', '.join([str(values.loc[i, :][j]) for j in range(values.shape[1])])}, Output: is likely to churn\"\n\n else:\n responses[f\"Customer {i + 1}\"] = f\"Input: {', '.join([str(values.loc[i, :][j]) for j in range(values.shape[1])])}, Output: is a loyal customer\"\n\n return make_response(jsonify(responses))", "def get_pred_ids(predictions):\n le_classes = ['Emotet', 'Mirai', 'Zeus'] \n malwares_dict = {'Emotet': 1, 'Mirai': 2, 'Zeus': 3}\n predicted_ids = []\n \n for idx in predictions:\n pred_name = le_classes[idx]\n pred_id = malwares_dict[pred_name]\n predicted_ids.append(pred_id)\n \n return predicted_ids", "def smaller(self):\n return [x for x in TransitiveIdeal(attrcall('pred'), [self])]", "def predicts(self, data_iter):\n predicteds = []\n logits = []\n\n all_corrects, all_loss, all_size = 0, 0, 0\n step = 0\n for feature, target in data_iter:\n step += 1\n # print(feature)\n # if self._cuda:\n # feature, target = feature.cuda(), target.cuda()\n\n logit = self._model(feature)\n predicted = torch.max(logit.data, 1)[1].view(target.size()).data\n # print(predicted)\n predicteds.extend(predicted)\n logits.extend(logit)\n loss = F.cross_entropy(logit, target, size_average=False)\n\n cur_loss = loss.data[0]\n all_loss += cur_loss\n cur_corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()\n all_corrects += cur_corrects\n print('Evaluation - average loss: {:.6f} average acc: {:.4f}%'.format(\n float(all_loss) / (int(all_size) + 1), 100 * float(all_corrects) / (int(all_size) + 1)))\n\n return predicteds, logits", "def _calc_distance_features(self):\n d = ()\n for dx, dy in DIRECTIONS:\n if dx and dy:\n d += (list(self.__calc_distance(direction_x=dx, direction_y=dy)), )\n elif dx:\n tmp, _, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n elif dy:\n _, tmp, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n\n self.dist_features = d\n\n self.direc_dist = self.__calc_direc_distance()", "def clf_perceptron(vector_col:str,\n df_train:pd.DataFrame,\n model:Perceptron,\n ) -> list:\n\n #if return_ranking: return list(model.decision_function(df[vector_col].to_list()))\n \n return list(model.predict(df_train[vector_col].to_list()))", "def distances_to(self, pt):\n d = [pt.distance(a) for a in self]\n return np.array(d)", "def predict(self, X):\n \n if self.centers is None:\n raise Exception(\"Not fitted\")\n else:\n out = []\n for i in range(X.shape[0]):\n j_closest_center = self.metric.closest_neighbor_index(X[i,:], self.centers)\n out.append(self.centers[j_closest_center, :])\n \n return np.array(out)", "def discriminant(self, words):\n indicators = self.evaluate_indicators(words)\n indicators = array(indicators, ndmin=2) # I am fine with 1d arrays but scikit-learn raises deprecation warning\n prediction = self.model.predict(indicators)\n return prediction", "def distance(self):\n try:\n import pdb\n pdb.set_trace()\n s = []\n x0,y0 = self.deriv.T\n for thing in everything:\n x1,y1 = thing.deriv.T\n r,p = pearsonr(y0,y1)\n s.append(( p,thing.label ))\n s.sort()\n #print s[-5:]\n print s\n except:\n return np.inf", "def predicts(self,X):\n return [self.predict(x) for x in X]", "def compare_distance(model):\n\n dists = []\n outputs = []\n paths = 'images/person/'\n\n for i in range(6):\n img = paths + str(i) + '.jpg'\n image = cv2.imread(img)\n image = process_image(image)\n\n output = model.predict(image)[0]\n outputs.append(output)\n\n vec1 = outputs[0]\n for vec2 in outputs:\n dist = np.linalg.norm(vec1 - vec2)\n dists.append(dist)\n\n print(dists[1:])\n\n plt.bar(range(1, 6), (dists[1:]), color='lightblue')\n plt.xlabel('Person')\n plt.ylabel('Euclidean distance')\n plt.title('Similarity')\n plt.grid(True)\n plt.show()", "def predict(self, X) -> List[str]:\n # Get docID of nearest neighbours\n nn = self.vsm.search(X, limit=self.k)\n\n # Create list of concatenation of all topics, including duplicates\n topics = []\n for docID in nn:\n index = self.docIDs_train[self.docIDs_train == docID].index[0]\n topics += self.Y_train.iloc[index]\n\n # Assign prediction as most common topics that make up at least 50% of the topic labels\n n = len(topics)\n total_prob = 0\n results = []\n topics = Counter(topics).most_common()\n for (topic, count) in topics:\n results.append(topic)\n total_prob += count / n\n if total_prob > 0.5:\n break\n\n return results", "def knnSame(k, Xtrain, Ytrain):\n d = euclidean_distances(Xtrain, squared=True)\n np.fill_diagonal(d, np.inf)\n nnc = Ytrain[np.argsort(d)[..., :k].flatten()].reshape(Xtrain.shape[0], k)\n pred = [max(nnc[i], key=Counter(nnc[i]).get) for i in range(nnc.shape[0])]\n return np.array(pred)", "def distance_score(x_embeddings_test, x_embeddings_train, y_true_train, K=50):\n num_samples = x_embeddings_test.shape[0]\n num_classes = y_true_train.shape[1]\n y_test_confidence = []\n for i in range(num_samples):\n sample_embedding = x_embeddings_test[i]\n distances = np.square(sample_embedding - x_embeddings_train).sum(axis=-1)\n K_nn = np.argsort(distances)[:K]\n K_nn_distances = np.exp(-np.sqrt(distances[K_nn]))\n K_nn_labels = y_true_train[K_nn, :]\n\n class_indicators = np.eye(num_classes)\n classes_masks = np.matmul(class_indicators, np.transpose(K_nn_labels))\n\n # foreach class we mask away the samples in Knn that belong to other classes\n class_samples_distances = classes_masks * np.expand_dims(K_nn_distances, axis=0) # this gives num_classes X K (100 X 50 matrix)\n sum_distances = np.sum(K_nn_distances)\n D_x = np.sum(class_samples_distances, axis=-1)/sum_distances\n\n y_test_confidence.append(D_x)\n\n return np.vstack(y_test_confidence)", "def get_distance_metrics():\n\n return [HausdorffDistance(),\n AverageDistance(),\n MahalanobisDistance(),\n VariationOfInformation(),\n GlobalConsistencyError(),\n ProbabilisticDistance()]", "def predict_rent(seed):\n X_train, X_test, y_train, y_test, catnums, raw_df = \\\n get_data(\"https://ndownloader.figshare.com/files/7586326\", seed)\n pipe = model_pipeline(catnums)\n pipe.fit(X_train, y_train)\n y_pred = pipe.predict(X_test)\n X_test_index = pd.DataFrame(index=X_test.index)\n return raw_df.join(X_test_index, how='inner').values, y_test.values, y_pred", "def _predict_all(self, data):\n preds = np.zeros(len(data))\n for row in data.itertuples():\n index, item, _, user = row\n preds[index] = self.predict(user, item)\n return preds", "def predict_tree(self, testing_data, average=False):\n predictions = []\n for point in testing_data:\n # Loop over each point and find it's k-nearest neighbors\n k_nearest = self.kd_tree.return_nearest_k(point, self.k)\n targets = [self.targets[n.node[1]] for n in k_nearest]\n if average:\n predictions.append(round(np.average(targets)))\n else:\n unique, counts = np.unique(targets, return_counts=True)\n max_index = np.argmax(counts)\n predictions.append(unique[max_index])\n return predictions", "def get_neighbor_classes(self, observation: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n distances = np.sqrt(np.sum((self.X_train - observation)**2, axis=1))\n\n # Create an array of training set indices ordered by their\n # distance from the current observation\n indices = np.argsort(distances, axis=0)\n\n selected_indices = indices[:self.k]\n return self.y_train[selected_indices], distances[selected_indices]", "def predict(self, features):\n feature_labels = []\n for f in features:\n get_label = self.get_k_neighbors(f)\n c0 = get_label.count(0)\n c1 = get_label.count(1)\n if c0 >= c1:\n f_label = 0\n else:\n f_label = 1\n feature_labels.append(f_label)\n return feature_labels\n raise NotImplementedError", "def getPredictorList(self):\n return self.non_miList + self.miList", "def predict(self, x):\n \n\n return predictions", "def pred_dset_bin_vote(fx_output, dset, fold_set):\n labels = []\n for i,y in dset.split_idx[fold_set]:\n x,yi = dset.get(i)\n assert yi==y\n p = fx_output(dset.reshape_batch(x))\n y_pred = np.bincount(p.argmax(axis=1)).argmax()\n labels += [(y, y_pred)]\n \n return np.asarray(labels)", "def nearest_district(df_training, df_test, categories, training_embedding, test_embedding):\n # Creates an empty matrix distance.\n labels = [None] * len(test_embedding)\n for index, test in enumerate(test_embedding):\n # Recover the non-zero indexes.\n non_zero_indexes = np.nonzero(test)[0]\n labels[index] = np.argmin(euclidean_distances(test[non_zero_indexes].reshape(1, -1),\n training_embedding[:, non_zero_indexes]))\n return labels", "def predict(self, samples):\n distance_mat = self.get_distance(samples, self.cluster_centers)\n probs = self._calculate_cluster_probs(distance_mat, self.T_min)\n return probs", "def calculate_distances(train_data, test_datum):\n n = train_data.shape[0]\n dist = []\n for i in range(n):\n distance = np.sqrt(np.sum(np.square(train_data[i]-test_datum)))\n dist.append(distance)\n dist = np.asarray(dist)\n return dist", "def all_predictors():\n from operator import itemgetter\n\n from ..util import itersubclasses\n\n predictors = sorted(\n ((s, s.__name__) for s in itersubclasses(Predictor)), key=itemgetter(1)\n )\n return list(zip(*predictors))[0]", "def single_predict_proba(self, vec, n_nearest):\n\n most_sim_ind = self.annoy_index.get_nns_by_vector(vec, n_nearest)\n most_similar_doc_ids = [self.document_ids[x] for x in most_sim_ind]\n return self.ids2class.loc[most_similar_doc_ids].mean().\\\n sort_values(ascending=False)", "def predict(self, reps):\n return [self.classes_[self.predict_one(rep)] for rep in reps]", "def get_nearest_neighbors ( self, distances: List [ float ] ):\n \n return pipe (\n # Map index to distance\n dict ( enumerate ( distances ) ),\n # Sort the indices based on their value in the mapping and take the 1st k\n lambda distance_map: sorted ( distance_map, key = distance_map.get ) [: self.k ],\n ) # End get_nearest_neighbors()", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)\n #track.du_doan(self.kf_test)", "def predict(self, X, k=None):\n \n if not hasattr(self, 'n_neighbors'):\n self.fit(X)\n \n if k is None:\n k = self.n_neighbors\n else:\n k = check_n_neighbors(k, X.shape[0])\n \n distances, _ = self.nbrs.kneighbors(X, n_neighbors=k+1)\n #distances = distances[:, 1:]\n distances[distances[:, 0] == 0., :-1] = distances[distances[:, 0] == 0., 1:]\n distances = distances[:, :-1]\n \n return distances.mean(axis=1)", "def _kendall_distance(Y_true, Y_pred, normalize=True, sample_weight=None):\n (n_samples, n_classes) = Y_true.shape\n dists = np.zeros(n_samples)\n\n for sample in range(n_samples):\n for f_class in range(n_classes - 1):\n for s_class in range(f_class + 1, n_classes):\n a = Y_true[sample, f_class] - Y_true[sample, s_class]\n b = Y_pred[sample, f_class] - Y_pred[sample, s_class]\n\n if a * b < 0:\n dists[sample] += 1\n\n if normalize:\n dists[sample] /= n_classes * (n_classes-1) / 2\n\n return np.average(a=dists, weights=sample_weight)", "def PredictiveDist(self, label='pred'):\n # TODO: fill this in\n lam = 1\n pred = thinkbayes2.MakePoissonPmf(lam, 15)\n return pred", "def make_doppelganger_vs_clusters(n_clusters_considered,X,X_occam,n_repeats):\n res = []\n for n_clusters in n_clusters_considered:\n res.append([])\n for _ in range(n_repeats):\n X_restricted,restricted_idxs = get_n_random_clusters(X_occam,n_clusters)\n print(X.val.shape)\n print(X_restricted.val.shape)\n evaluator_X = evaluators.EvaluatorWithFiltering(X,X_restricted,leave_out=True,fitter_class=standard_fitter,valid_idxs=valid_idxs[restricted_idxs])\n res[-1].append(evaluator_X.weighted_average) \n return res", "def predict(self, X):\n\n pred = []\n for x_i in X:\n tmp = x_i\n p0 = self.model.predict(tmp.reshape(1,128,128,3))\n p1 = self.model.predict(np.fliplr(tmp).reshape(1,128,128,3))\n# p2 = self.model.predict(np.flipud(tmp).reshape(1,128,128,1))\n# p3 = self.model.predict(np.fliplr(np.flipud(tmp)).reshape(1,128,128,1))\n p = (p0[0] +\n np.fliplr(p1[0]) #+\n# np.flipud(p2[0]) +\n# np.fliplr(np.flipud(p3[0]))\n ) / 2#4\n pred.append(p)\n return np.array(pred)", "def _predict_proba(self, X):\n preds = self._predict(X)\n n_instances = len(preds)\n if hasattr(self, \"n_clusters\") and self.n_clusters is not None:\n n_clusters = self.n_clusters\n else:\n n_clusters = max(preds) + 1\n dists = np.zeros((X.shape[0], n_clusters))\n for i in range(n_instances):\n dists[i, preds[i]] = 1\n return dists", "def getYandPred(self):\n y = []\n pred = []\n if self.use_dic:\n for k in self.alllabels:\n if len(k) == 2:\n y.append(k[0])\n pred.append(k[1])\n else:\n return None\n else:\n for k in self.dic.values():\n if len(k) == 2:\n y.append(k[0])\n pred.append(k[1])\n else:\n return None\n return y, pred", "def predict(self, X):", "def predict(self, X):", "def _get_dst(self, df_train, df_test):\n #train NearestNeighbors(Unsupervised learner)\n neigh = NearestNeighbors(1)\n neigh.fit(df_train[['longitude', 'latitude']])\n #find the K-neighbors of points in df_test\n distances, indices = neigh.kneighbors(df_test[['longitude', 'latitude']])\n return distances", "def distance(dataset: Dataset) -> Dict[str, List[Tuple[int, float]]]:\n distances: Dict[str, List[Tuple[int, float]]] = {}\n\n images = dataset.images\n annotations = dataset.annotations\n\n for image in images:\n distances[image] = []\n image_area = img_area(image)\n\n for detection in annotations[image]:\n distances[image].append((detection.class_index, detection.bounds.area / image_area))\n\n return distances" ]
[ "0.63841313", "0.59920394", "0.5903511", "0.5846713", "0.5797452", "0.5794031", "0.5764465", "0.57157874", "0.57146764", "0.571108", "0.5700389", "0.56553006", "0.5652828", "0.5636438", "0.56238115", "0.5598508", "0.55349874", "0.55291295", "0.55164886", "0.5492974", "0.54692423", "0.5461225", "0.5450445", "0.542296", "0.5393487", "0.5379403", "0.537781", "0.5301012", "0.52867323", "0.52719057", "0.5259735", "0.5248266", "0.5247325", "0.52317524", "0.5227176", "0.52208805", "0.5220796", "0.52146065", "0.5190949", "0.5181991", "0.518092", "0.5177115", "0.51767176", "0.5174934", "0.51736057", "0.5171339", "0.51605666", "0.5151517", "0.51314944", "0.5128932", "0.5125497", "0.51246095", "0.51215744", "0.5119611", "0.51101583", "0.51090586", "0.5104639", "0.5104018", "0.5090774", "0.50895077", "0.50895053", "0.50759816", "0.5075226", "0.5067342", "0.50653744", "0.506135", "0.505967", "0.5051794", "0.504817", "0.50479245", "0.5044842", "0.50419647", "0.50402355", "0.5039397", "0.5039349", "0.503239", "0.5032145", "0.5025802", "0.50232464", "0.501578", "0.5014631", "0.50121003", "0.50114083", "0.50079376", "0.50021404", "0.49951145", "0.49921095", "0.4989966", "0.49875852", "0.49823922", "0.4982152", "0.49804184", "0.49797863", "0.49793372", "0.497927", "0.4978362", "0.49773735", "0.49773735", "0.4975811", "0.49745715" ]
0.6556841
0
DESCRIPTION Check every variable of every stations and try to fill them with the variables of the two nearest station for every time. INPUT
def fillstation(self, stanames, all=None, plot=None, summary=None, From=None, To=None, by=None, how='mean', variables=None, distance=None, sort_cor=True, constant=True, cor_lim=None): if all == True: stations = self.network.getsta([], all=True).values() else: stations = self.network.getsta(stanames) for station in stations: staname = station.getpara('stanames') if variables == None: newdataframe = station.getData(reindex=True, From=From, To=To, by=by, how=how) # Dataframe which stock the new data of the stations newdataframe['U m/s'] = station.getData('U m/s', reindex=True, From=From, To=To, by=by, how=how) newdataframe['V m/s'] = station.getData('V m/s', reindex=True, From=From, To=To, by=by, how=how) newdataframe['Ua g/kg'] = station.getData('Ua g/kg', reindex=True, From=From, To=To, by=by, how=how) newdataframe['Theta C'] = station.getData('Theta C', reindex=True, From=From, To=To, by=by, how=how) variables_name = newdataframe.columns else: newdataframe = station.getData(var=variables, reindex=True, From=From, To=To, by=by, how=how) # Dataframe which stock the new data of the stations variables_name = variables # select and sort nearest stations selections, selectionsnames = self.__getpredictors_distance(staname, distance) for var in variables_name: print("I" * 30) print("variable -> " + var) try: selections, params = self.__sort_predictors_by_corr(station, selections, var, From, To, by, how, constant=constant, selectionsnames=selectionsnames, sort_cor=sort_cor, cor_lim=cor_lim) selections_iter = iter(selections) params_iter = iter(params) # print newdataframe idxmissing = newdataframe[var][ newdataframe[var].isnull() == True].index # slect where their is missing data while len(idxmissing) > 0: print("Their is [" + str(len(idxmissing)) + "] events missing") try: # Try if their is still other stations to fill with selection = selections_iter.next() param = params_iter.next() except StopIteration: print("NO MORE SELECTED STATIONS") break try: Y = station.getData(var, From=From, To=To, by=by, how=how) # variable to be filled X1 = selection[0].getData(var, From=From, To=To, by=by, how=how) # stations variable used to fill X2 = selection[1].getData(var, From=From, To=To, by=by, how=how) # stations variable used to fill select = pd.concat([X1, X2], keys=['X1', 'X2'], axis=1, join='inner').dropna() if constant: newdata = param[0] + param[1] * select['X1'] + param[2] * select[ 'X2'] # reconstruct the data else: newdata = param[0] * select['X1'] + param[1] * select['X2'] # reconstruct the data newdataframe.loc[idxmissing, var] = newdata.loc[idxmissing, var] idxmissing = newdataframe[var][ newdataframe[var].isnull() == True].index # slect where their is missing data except KeyError: print("&" * 60) print('Selected stations did not fill any events') except ValueError: print('The variable ' + var + "Does not exist or no data to do the multilinear regression ") if plot == True: df = pd.concat([Y, X1, X2, newdata, newdataframe[var]], keys=['Y', 'X1', 'X2', 'estimated data', 'Estimated replaced'], axis=1, join='outer') self.plotcomparison(df) print("Their is [" + str(len(idxmissing)) + "] FINALLY events missing") # Recalculate the wind direction and speed from the U an V components try: speed, dir = cart2pol(newdataframe['U m/s'], newdataframe['V m/s']) newdataframe['Dm G'] = dir newdataframe['Sm m/s'] = speed except ValueError: print 'No wind found in the dataframe' except KeyError: print('No wind found in the dataframe') self.newdataframes[staname] = newdataframe
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_nearest(self):\n dist = station.nearest(28.43, -81.31)\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"KMCO\")\n for val in dist.values():\n self.assertIsInstance(val, float)\n for *params, count in (\n (30, -82, 10, True, True, 0.2, 1),\n (30, -82, 10, True, False, 0.2, 5),\n (30, -82, 10, False, False, 0.2, 6),\n (30, -82, 1000, True, True, 0.5, 6),\n (30, -82, 1000, False, False, 0.5, 37),\n ):\n stations = station.nearest(*params)\n self.assertEqual(len(stations), count)\n for dist in stations:\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n for val in dist.values():\n self.assertIsInstance(val, float)", "def test_nearest_filter(self):\n for airport, reports, count in (\n (True, True, 6),\n (True, False, 16),\n (False, True, 6),\n (False, False, 30),\n ):\n stations = station.nearest(30, -80, 30, airport, reports, 1.5)\n self.assertEqual(len(stations), count)", "def test_nearest(self):\n for lat, lon, icao in ((28.43, -81.31, \"KMCO\"), (28.43, -81, \"KTIX\")):\n stn, dist = station.Station.nearest(lat, lon, is_airport=True)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, icao)\n for val in dist.values():\n self.assertIsInstance(val, float)\n # Test with IATA req disabled\n stn, dist = station.Station.nearest(28.43, -81, False, False)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"FA18\")\n for val in dist.values():\n self.assertIsInstance(val, float)", "def Find_nearest_dwd_stations(inpt_data,\r\n date_start='20051201',\r\n date_end='20201231',\r\n dwd_time_format='%Y%m%d%H',\r\n data_category='air_temperature',\r\n temp_resolution='hourly',\r\n no_of_nearest_stations=4,\r\n memory_save=True,\r\n Output='True'):\r\n if isinstance(data_category,list):\r\n if len(list(data_category)) > 1:\r\n print(\r\n 'Currently only one dwd category allowed, please run function multiple times for each category'\r\n )\r\n return None\r\n \r\n #convert time to datetime\r\n dt_start=datetime.strptime(date_start,'%Y%m%d')\r\n dt_end=datetime.strptime(date_end,'%Y%m%d')\r\n print('Start quering data from DWD')\r\n #define the database folder\r\n pypath = os.path.dirname(os.path.abspath(__file__))\r\n table_dir = pypath + '\\\\' + 'tables'\r\n dbase_dir = pypath + '\\\\' + 'dbase' \r\n #%% we check all available stations and create a valid list\r\n filename_stations=update_stationlist(time_res='hourly',dbase_dir=table_dir)\r\n stations_all=pd.read_csv(filename_stations, dtype={'STATIONS_ID': object})\r\n # delete all stations which do not cover the category\r\n dwd_stations=stations_all[stations_all[data_category]==True].copy()\r\n #correct to datetime\r\n dwd_stations['date_end']=pd.to_datetime(stations_all.date_end,format='%Y%m%d')\r\n dwd_stations['date_start']=pd.to_datetime(stations_all.date_start,format='%Y%m%d')\r\n # clean to stations which cover the campaign time #dt_low <= dt <= dt_high:\r\n dwd_stations=dwd_stations[(dwd_stations.date_start<=dt_start) & (dwd_stations.date_end>=dt_end)]\r\n #make a geodataframe out of it\r\n dwd_stations=gpd.GeoDataFrame(dwd_stations,geometry=gpd.points_from_xy(dwd_stations.geo_lon, dwd_stations.geo_lat))\r\n \r\n #loop through all rows to get the n closest points\r\n distances=pd.DataFrame()\r\n for _, station in dwd_stations.iterrows():\r\n distances[station.STATIONS_ID]=inpt_data.distance(station.geometry)\r\n \r\n #%% get the n stations with smallest distance and update database\r\n id_nearest_stations=distances.apply(lambda s: s.nsmallest(no_of_nearest_stations).index.tolist(), axis=1).values.tolist() #station ids\r\n #get them as unique values by sum a list of lists https://bit.ly/353iZQB\r\n id_dwd_stations=list(set(sum(id_nearest_stations,[])))\r\n \r\n #update the database\r\n db_dwd_stations=import_stations(time_res=temp_resolution,time_format=dwd_time_format,campaign_time=[dt_start,dt_end],data_category=data_category,station_ids=id_dwd_stations,dbase_dir=dbase_dir,Output=Output,table_dir=table_dir,memory_save=memory_save)\r\n \r\n #distance of nearest stattions\r\n dist_nearest_stations=pd.DataFrame(np.sort(distances.values)[:,:no_of_nearest_stations]).values.tolist() #distances themself\r\n #create new columns in the input data\r\n station_col_nm=list()\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_station_'+str(i))\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_distance_'+str(i))\r\n #create new dataframe\r\n distance_data=pd.concat([pd.DataFrame(id_nearest_stations).astype(int),pd.DataFrame(dist_nearest_stations)],axis=1)\r\n distance_data.columns=station_col_nm\r\n #add to main dataset\r\n inpt_data=pd.concat([inpt_data, distance_data],axis=1) \r\n \r\n return inpt_data,db_dwd_stations", "def interpolate(self, var, time, lat, lon):\n\n # Get the nearest four points in space\n # Check to see if lat/lons are 2d or 1d\n if len(self['lat'].shape) == 2:\n closey, closex, distances = self.nearest_points(lat, lon, npt=4)\n # Distances in km\n# distances = np.array([self.haversine(\n# (self['lat'][y,x].values, self['lon'][y,x].values),\n# (lat, lon)) for y,x in \n# zip(list(closey), list(closex))])\n else:\n closen = self.nearest_points(lat, lon, npt=4)\n closey = closen\n closex = closen\n # Distances in km\n distances = np.array([self.haversine(\n (self['lat'][n].values, self['lon'][n].values),\n (lat, lon)) for n in list(closen)])\n # Check for exact match (within some tolerance)\n spaceweights = np.zeros(distances.shape)\n if (distances < 1.0).sum() > 0:\n spaceweights[distances.argmin()] = 1\n else:\n # Here, inverse distance weighting (for simplicity)\n spaceweights = 1.0 / distances\n spaceweights /= spaceweights.sum()\n # Get weights in time\n #time64 = np.datetime64(time)\n #all the valid times in the ensemble\n valids = self['validtime'].values\n timeweights = np.zeros(valids.shape)\n # Check if we are outside the valid time range\n if (time < valids[0]) or (time > valids[-1]):\n print(\"Interpolation is outside of time range in state!\")\n return None\n # Find where we are in this list\n #index after the time of the observation\n lastdex = (valids >= time).argmax()\n # If we match a particular time value, then\n # this is just an identity\n if valids[lastdex] == time:\n # Just make a one at this time\n timeweights[lastdex] = 1\n else:\n # Linear interpolation\n #often going to be 6 hours, subtracts datetime objects I think\n diff = (valids[lastdex] - valids[lastdex-1])\n #print(valids[lastdex], valids[lastdex-1], diff)\n #often going to be 21600 seconds\n totsec = diff.seconds\n #totsec = np.abs(diff / np.timedelta64(1, 's'))\n #ST\n #calculate time difference between time after and time of observation\n #the abs will make this positive definite, which is okay since\n #the difference will always be negative\n thisdiff = abs(time - valids[lastdex])\n #thissec = np.abs(thisdiff / np.timedelta64(1,'s'))\n thissec = thisdiff.seconds\n # Put in appropriate weights\n #ST switched the -1 between the two lines to match up with the positive-\n #definite thisdiff\n timeweights[lastdex-1] = float(thissec) / totsec\n timeweights[lastdex] = 1.0 - (float(thissec)/totsec)\n # Now that we have the weights, do the interpolation\n #ST an ntimes x 4 x nens array\n interp = self.variables[var].values[:,closey,closex,:]\n # Do a dot product with the time weights\n # And with the space weights\n if len(interp.shape) == 3:\n interp = (timeweights[:,None,None] * interp).sum(axis=0)\n else:\n interp = (timeweights[:,None,None,None] * interp).sum(axis=0)\n \n if len(interp.shape) == 3:\n #ST Changed 2nd : to None\n interp = (spaceweights[:,None,None] * interp).sum(axis=1)\n else:\n interp = (spaceweights[:,None] * interp).sum(axis=0)\n # Return estimate from all ensemble members\n return interp", "def run(self):\n # Cache parameters and arrays\n nstat = self.north.shape[1]\n ind = self.istart\n solver = self.solver\n cutoff = self.cutoff\n shared = self.shared\n\n # Check if penalties are arrays\n arrflag = [isinstance(arr, np.ndarray) for arr in [self.penn,self.pene,self.penu]]\n arrflag = reduce(operator.mul, arrflag, 1)\n\n # Loop over my portion of GPS stations\n for jj in range(nstat):\n # Unpack component-wise indices of valid observations\n bool_east, bool_north, bool_up = self.bool_list[jj]\n # Extract valid observations\n dnorth, deast, dup = (self.north[bool_north,jj], \n self.east[bool_east,jj], \n self.up[bool_up,jj])\n wn, we, wu = (self.wn[bool_north,jj], \n self.we[bool_east,jj], \n self.wu[bool_up,jj])\n Gn, Ge, Gu = self.G[bool_north,:], self.G[bool_east,:], self.G[bool_up,:]\n # Perform estimation and store weights\n if arrflag:\n northPen, eastPen, upPen = self.penn[jj,:], self.pene[jj,:], self.penu[jj,:]\n else:\n northPen, eastPen, upPen = self.penn, self.pene, self.penu\n shared.m_north[:,ind], qn = solver.invert(dmultl(wn,Gn), wn*dnorth, northPen)\n shared.m_east[:,ind], qe = solver.invert(dmultl(we,Ge), we*deast, eastPen)\n shared.m_up[:,ind], qu = solver.invert(dmultl(wu,Gu), wu*dup, upPen)\n # Now modify the shared penalty array\n if arrflag:\n shared.penn[ind,:] = qn[cutoff:]\n shared.pene[ind,:] = qe[cutoff:]\n shared.penu[ind,:] = qu[cutoff:]\n ind += 1\n\n # done\n return", "def test_get_closest_stations(self):\n\t\tpoint = \"POINT(40.71911552 -74.00666661)\"\n\t\tstations = set(server.get_closest_stations(point))\n\t\t# find the closest stations, make them a set of objects see if sets intersect completely", "def multi_velo_inspec(self, n = 60, lat0 = 60, lat1 = 90, pole = \"north\"):\n inds = self.mlat_finder(lat1, lat0, pole)[1]\n NeA = self.NeA[inds]\n NeB = self.NeB[inds]\n NeC = self.NeC[inds]\n\n secondsA = self.secondsA[inds]\n secondsB = self.secondsB[inds]\n secondsC = self.secondsC[inds]\n\n\n mlatA = self.mlatA[inds]\n mlatB = self.mlatB[inds]\n mlatC = self.mlatC[inds]\n\n mean_range = 5\n NeA = self.meanie(NeA, mean_range)\n NeB = self.meanie(NeB, mean_range)\n NeC = self.meanie(NeC, mean_range)\n \n \n N = int((len(NeA)/n*2) - 1) #nr of windows\n \n dx = (secondsB[1]-secondsB[0])*self.velB[0]\n \n nBAs = []\n nBCs = []\n nACs = []\n \n for i in range(N):\n startind = int(i/2*n)\n stopind = int((i/2+1)*n)\n temp_NeA = NeA[startind:stopind]\n temp_NeB = NeB[startind:stopind]\n temp_NeC = NeC[startind:stopind]\n \n temp_secondsA = secondsA[startind:stopind]\n temp_secondsB = secondsB[startind:stopind]\n temp_secondsC = secondsC[startind:stopind]\n \n \n curr_timediff = np.round((temp_secondsB[1:] - temp_secondsB[:-1])-(1/self.fs))\n if np.sum(curr_timediff) > 2:\n continue\n \n gradA = (temp_NeA[1:] - temp_NeA[:-1])/dx\n gradB = (temp_NeB[1:] - temp_NeB[:-1])/dx\n gradC = (temp_NeC[1:] - temp_NeC[:-1])/dx\n \n if np.max(gradA) < 0.9:\n continue\n \n stdA = np.std(gradA)\n stdB = np.std(gradB)\n stdC = np.std(gradC)\n \n meanA = temp_secondsB[np.where(gradA == np.max(gradA))][0]\n meanB = temp_secondsB[np.where(gradB == np.max(gradB))][0]\n meanC = temp_secondsB[np.where(gradC == np.max(gradC))][0]\n \n p0A = [1, meanA, stdA]\n p0B = [1, meanB, stdB]\n p0C = [1, meanB, stdB]\n \n poptA, pcovA = curve_fit(self.gaussian, temp_secondsB[:-1], gradA, p0 = p0A)\n poptB, pcovB = curve_fit(self.gaussian, temp_secondsB[:-1], gradB, p0 = p0B)\n poptC, pcovC = curve_fit(self.gaussian, temp_secondsB[:-1], gradC, p0 = p0C)\n \n nBA = poptB[1] - poptA[1]\n nBC = poptB[1] - poptC[1]\n nAC = poptA[1] - poptC[1]\n \n nBAs.append(nBA)\n nBCs.append(nBC)\n nACs.append(nAC)\n \n \n sBA = self.BA_shift/2 #time delay BA\n sBC = self.BC_shift/2 #time delay BC\n sAC = (self.BC_shift - self.BA_shift)/2\n V = self.velA[0]\n for i in range(len(nBAs)):\n VBA = self.along_track_velo(V, sBA, nBAs[i])\n VBC = self.along_track_velo(V, sBC, nBCs[i])\n VAC = self.along_track_velo(V, sAC, nACs[i])\n \n print(VBA)\n print(VBC)\n print(VAC)\n print(\"________________________________________\")", "def check_latlon(self):\n\n for station in list(self.station_list.values()):\n station_def = self.station_definitions[station.name]\n lat = float(station.get_obs('LAT')[0])\n lon = float(station.get_obs('LON')[0])\n lat_diff = abs(lat - station_def['lat'])\n lon_diff = abs(lon - station_def['lon'])\n if lat_diff > .1:\n qc_error.all_qc_errors.append(\n qce(\n station_name=station.name,\n error_code=9000,\n old_data_value=lat,\n explanation=\"lats are different for: \" + station.name +\n \". Old value : \" + str(station_def['lat'])\n ))\n if lon_diff > .1:\n qc_error.all_qc_errors.append(\n qce(\n station_name=station.name,\n error_code=9000,\n old_data_value=lon,\n explanation=\"lons are different for: \" + station.name +\n \". Old value : \" + str(station_def['lon'])\n ))", "def gpt2_1w (station, dmjd,dlat,dlon,hell,it):\n\n# need to find diffpod and difflon\n if (dlon < 0):\n plon = (dlon + 2*np.pi)*180/np.pi;\n else:\n plon = dlon*180/np.pi;\n# transform to polar distance in degrees\n ppod = (-dlat + np.pi/2)*180/np.pi; \n\n# % find the index (line in the grid file) of the nearest point\n# \t % changed for the 1 degree grid (GP)\n ipod = np.floor(ppod+1); \n ilon = np.floor(plon+1);\n \n# normalized (to one) differences, can be positive or negative\n#\t% changed for the 1 degree grid (GP)\n diffpod = (ppod - (ipod - 0.5));\n difflon = (plon - (ilon - 0.5));\n\n\n# change the reference epoch to January 1 2000\n print('Modified Julian Day', dmjd)\n dmjd1 = dmjd-51544.5 \n\n pi2 = 2*np.pi\n pi4 = 4*np.pi\n\n# mean gravity in m/s**2\n gm = 9.80665;\n# molar mass of dry air in kg/mol\n dMtr = 28.965E-3 \n# dMtr = 28.965*10^-3 \n# universal gas constant in J/K/mol\n Rg = 8.3143 \n\n# factors for amplitudes, i.e. whether you want time varying\n if (it==1):\n print('>>>> no refraction time variation ')\n cosfy = 0; coshy = 0; sinfy = 0; sinhy = 0;\n else: \n cosfy = np.cos(pi2*dmjd1/365.25)\n coshy = np.cos(pi4*dmjd1/365.25) \n sinfy = np.sin(pi2*dmjd1/365.25) \n sinhy = np.sin(pi4*dmjd1/365.25) \n cossin = np.matrix([1, cosfy, sinfy, coshy, sinhy])\n# initialization of new vectors\n p = 0; T = 0; dT = 0; Tm = 0; e = 0; ah = 0; aw = 0; la = 0; undu = 0;\n undul = np.zeros(4)\n Ql = np.zeros(4)\n dTl = np.zeros(4)\n Tl = np.zeros(4)\n pl = np.zeros(4)\n ahl = np.zeros(4)\n awl = np.zeros(4)\n lal = np.zeros(4)\n Tml = np.zeros(4)\n el = np.zeros(4)\n#\n pgrid, Tgrid, Qgrid, dTgrid, u, Hs, ahgrid, awgrid, lagrid, Tmgrid = read_4by5(station,dlat,dlon,hell)\n#\n for l in [0,1,2,3]:\n KL = l #silly to have this as a variable like this \n# transforming ellipsoidal height to orthometric height:\n# Hortho = -N + Hell\n undul[l] = u[KL] \n hgt = hell-undul[l] \n# pressure, temperature at the height of the grid\n T0 = Tgrid[KL,0] + Tgrid[KL,1]*cosfy + Tgrid[KL,2]*sinfy + Tgrid[KL,3]*coshy + Tgrid[KL,4]*sinhy;\n tg = float(Tgrid[KL,:] *cossin.T)\n# print(T0,tg)\n\n p0 = pgrid[KL,0] + pgrid[KL,1]*cosfy + pgrid[KL,2]*sinfy + pgrid[KL,3]*coshy + pgrid[KL,4]*sinhy;\n \n# humidity \n Ql[l] = Qgrid[KL,0] + Qgrid[KL,1]*cosfy + Qgrid[KL,2]*sinfy + Qgrid[KL,3]*coshy + Qgrid[KL,4]*sinhy;\n \n# reduction = stationheight - gridheight\n Hs1 = Hs[KL]\n redh = hgt - Hs1;\n\n# lapse rate of the temperature in degree / m\n dTl[l] = dTgrid[KL,0] + dTgrid[KL,1]*cosfy + dTgrid[KL,2]*sinfy + dTgrid[KL,3]*coshy + dTgrid[KL,4]*sinhy;\n \n# temperature reduction to station height\n Tl[l] = T0 + dTl[l]*redh - 273.15;\n\n# virtual temperature\n Tv = T0*(1+0.6077*Ql[l]) \n c = gm*dMtr/(Rg*Tv) \n \n# pressure in hPa\n pl[l] = (p0*np.exp(-c*redh))/100 \n \n# hydrostatic coefficient ah\n ahl[l] = ahgrid[KL,0] + ahgrid[KL,1]*cosfy + ahgrid[KL,2]*sinfy + ahgrid[KL,3]*coshy + ahgrid[KL,4]*sinhy;\n \n# wet coefficient aw\n awl[l] = awgrid[KL,0] + awgrid[KL,1]*cosfy + awgrid[KL,2]*sinfy + awgrid[KL,3]*coshy + awgrid[KL,4]*sinhy;\n\t\t\t\t\t \n# water vapor decrease factor la - added by GP\n lal[l] = lagrid[KL,0] + lagrid[KL,1]*cosfy + lagrid[KL,2]*sinfy + lagrid[KL,3]*coshy + lagrid[KL,4]*sinhy;\n\t\t\t\t\t \n# mean temperature of the water vapor Tm - added by GP\n Tml[l] = Tmgrid[KL,0] + Tmgrid[KL,1]*cosfy + Tmgrid[KL,2]*sinfy + Tmgrid[KL,3]*coshy + Tmgrid[KL,4]*sinhy;\n\t\t\t\t\t \t\t \n# water vapor pressure in hPa - changed by GP\n e0 = Ql[l]*p0/(0.622+0.378*Ql[l])/100; # % on the grid\n aa = (100*pl[l]/p0)\n bb = lal[l]+1\n el[l] = e0*np.power(aa,bb) # % on the station height - (14) Askne and Nordius, 1987\n \n dnpod1 = np.abs(diffpod); # % distance nearer point\n dnpod2 = 1 - dnpod1; # % distance to distant point\n dnlon1 = np.abs(difflon);\n dnlon2 = 1 - dnlon1;\n \n# pressure\n R1 = dnpod2*pl[0]+dnpod1*pl[1];\n R2 = dnpod2*pl[2]+dnpod1*pl[3];\n p = dnlon2*R1+dnlon1*R2;\n \n# temperature\n R1 = dnpod2*Tl[0]+dnpod1*Tl[1];\n R2 = dnpod2*Tl[2]+dnpod1*Tl[3];\n T = dnlon2*R1+dnlon1*R2;\n \n# temperature in degree per km\n R1 = dnpod2*dTl[0]+dnpod1*dTl[1];\n R2 = dnpod2*dTl[2]+dnpod1*dTl[3];\n dT = (dnlon2*R1+dnlon1*R2)*1000;\n \n# water vapor pressure in hPa - changed by GP\n R1 = dnpod2*el[0]+dnpod1*el[1];\n R2 = dnpod2*el[2]+dnpod1*el[3];\n e = dnlon2*R1+dnlon1*R2;\n \n# hydrostatic\n R1 = dnpod2*ahl[0]+dnpod1*ahl[1];\n R2 = dnpod2*ahl[2]+dnpod1*ahl[3];\n ah = dnlon2*R1+dnlon1*R2;\n \n# wet\n R1 = dnpod2*awl[0]+dnpod1*awl[1];\n R2 = dnpod2*awl[2]+dnpod1*awl[3];\n aw = dnlon2*R1+dnlon1*R2;\n \n# undulation\n R1 = dnpod2*undul[0]+dnpod1*undul[1];\n R2 = dnpod2*undul[2]+dnpod1*undul[3];\n undu = dnlon2*R1+dnlon1*R2;\n\n# water vapor decrease factor la - added by GP\n R1 = dnpod2*lal[0]+dnpod1*lal[1];\n R2 = dnpod2*lal[2]+dnpod1*lal[3];\n la = dnlon2*R1+dnlon1*R2;\n\t\t\n# mean temperature of the water vapor Tm - added by GP\n R1 = dnpod2*Tml[0]+dnpod1*Tml[1];\n R2 = dnpod2*Tml[2]+dnpod1*Tml[3];\n Tm = dnlon2*R1+dnlon1*R2; \n\n return p, T, dT,Tm,e,ah,aw,la,undu", "def find_endpoints(batch_trajectories):\n # empty lists to fill\n site_lats = []\n site_lons = []\n last_lats = []\n last_lons = []\n lats_150 = []\n lons_150 = [] \n last_times = []\n times_150 = []\n last_sst = []\n sst_150 = []\n \n # temporary lists as placeholders\n temp_site_lats = []\n temp_site_lons = []\n temp_lats = []\n temp_lons = []\n temp_lats150 = []\n temp_lons150 = []\n temp_times = []\n temp_times150 = []\n temp_sst = []\n temp_sst150 = []\n\n for speed in range(len(batch_trajectories)):\n # working with one speed at a time means working with one nc file at\n # a time\n \n # reset temporary lists\n temp_site_lats = []\n temp_site_lons = []\n temp_lats = []\n temp_lons = []\n temp_lats150 = []\n temp_lons150 = []\n temp_times = []\n temp_times150 = []\n temp_sst = []\n temp_sst150 = []\n\n # extract variables into lists\n lats = batch_trajectories[speed].variables['lat'][:]\n lons = batch_trajectories[speed].variables['lon'][:]\n lats150 = batch_trajectories[speed].variables['lat150'][:]\n lons150 = batch_trajectories[speed].variables['lon150'][:]\n times = batch_trajectories[speed].variables['time'][:]\n ssts = batch_trajectories[speed].variables['temp'][:]\n ssts_150 = batch_trajectories[speed].variables['temp150'][:]\n\n # if a particle is deleted before time is up, values are masked. \n # We'd like to get the last valid number.\n for trajectory in range(len(lats)):\n i = -1 # index for the last value\n while np.ma.is_masked(lats[trajectory][i]) is True:\n i -= 1 # if the value is masked, go to one value sooner\n \n j = i # use j for the 150m values\n while lats150[trajectory][j] > 0:\n # we want the first index where the latitude is recorded.\n # j is actually the last one where it's not recorded, so we\n # extract the information at index j+1\n j -= 1\n\n # once i and j are determined for a trajectory, we can extract the\n # variables and append them to temporary lists.\n temp_site_lats.append(lats[trajectory][0])\n temp_site_lons.append(lons[trajectory][0])\n temp_lats.append(lats[trajectory][i])\n temp_lons.append(lons[trajectory][i])\n temp_lats150.append(lats150[trajectory][j+1])\n temp_lons150.append(lons150[trajectory][j+1])\n temp_times.append(times[trajectory][i])\n temp_sst.append(ssts[trajectory][i])\n temp_sst150.append(ssts_150[trajectory][j+1])\n temp_times150.append(times[trajectory][j+1])\n \n # after the temporary lists are appended by sinking speed, they\n # are appended to the big lists that are returned by the function.\n # this keeps the structure of being separated by sinking speed.\n site_lats.append(temp_site_lats)\n site_lons.append(temp_site_lons)\n last_lats.append(temp_lats)\n last_lons.append(temp_lons)\n lats_150.append(temp_lats150)\n lons_150.append(temp_lons150)\n last_times.append(temp_times)\n times_150.append(temp_times150)\n last_sst.append(temp_sst)\n sst_150.append(temp_sst150)\n \n return site_lats, site_lons, last_lats, last_lons, lats_150, lons_150,\\\n last_times, times_150, last_sst, sst_150", "def get_neigh_demand(city):\n\n # get station set S with more than 10 charge equipment\n static_file_path = exp_data_path + os.sep + 'static' + os.sep + 'static_feature_{}.csv'.format(city)\n static_feature = pd.read_csv(static_file_path, header=0)\n station_set = set(static_feature[static_feature.num >= 10].index)\n\n # calculate 10 nearest neighborhoods for each station, sort by distance and store their index, get a map\n neighbor_distance_map = {}\n matrix_distance = np.load(exp_data_path + os.sep + 'similarity' + os.sep + 'similarity_distance_{}_numpy.npy'.format(city), allow_pickle=True)\n all_distance_map = {i: [] for i in range(station_count[city])}\n for i in range(station_count[city]):\n if i not in station_set:\n continue\n for j in range(station_count[city]):\n if j not in station_set:\n continue\n all_distance_map[i].append((j, matrix_distance[i][j]))\n all_distance_map[i].sort(key=lambda x : x[1], reverse=True)\n neighbor_distance_map[i] = [idx for idx, distance in all_distance_map[i][:10]]\n\n # 11 times header, get static neighborhood feature for each station(in S), get csv: neighbor_feature_{city}.csv\n ALL_HEADER = ['index']\n ALL_HEADER.extend(GENERAL_HEADER)\n for i in range(10):\n for j in GENERAL_HEADER:\n ALL_HEADER.append('{}_{}'.format(j, i))\n\n raw_data = np.empty((len(neighbor_distance_map), len(ALL_HEADER)))\n for i, idx in enumerate(neighbor_distance_map.keys()):\n raw_data[i][0] = idx\n raw_data[i][1:1+len(GENERAL_HEADER)] = static_feature.iloc[idx]['num':'mall']\n for j in range(10):\n neighbor_idx = neighbor_distance_map[idx][j]\n raw_data[i][1+len(GENERAL_HEADER)*(j+1):1+len(GENERAL_HEADER)*(j+2)] = static_feature.iloc[neighbor_idx]['num':'mall']\n neighbor_feature_data = pd.DataFrame(raw_data, columns=ALL_HEADER)\n print('neighbor feature')\n print(neighbor_feature_data)\n\n neighbor_feature_path = exp_data_path + os.sep + 'static' + os.sep + 'static_neighor_feature_{}.csv'.format(city)\n if os.path.exists(neighbor_feature_path):\n os.remove(neighbor_feature_path)\n neighbor_feature_data.to_csv(neighbor_feature_path)\n\n # create final csv(11 times header with basic info(time_index + time_embed_index))\n # if index in S, fill basic info, neighbor_feature and demand\n\n demand = np.load(exp_data_path + os.sep + 'station' + os.sep + 'demand_{}.npy'.format(city), allow_pickle=True)\n time_count = demand.shape[1]\n\n DEMAND_HEADER = []\n DEMAND_HEADER.extend(ALL_HEADER)\n DEMAND_HEADER.extend(['time_index', 'time_embed', 'demand'])\n neighbor_demand_raw_data = np.empty(((len(neighbor_distance_map)*time_count, len(DEMAND_HEADER))))\n\n # get time map like {\"0800\": 1, \"0830\": 2, ....}\n time_index_map = np.load(exp_data_path + os.sep + 'station_list' + os.sep + 'time_index.npy', allow_pickle=True)\n time_index_map = dict(time_index_map.tolist())\n time_map = {t: i for i, t in enumerate(sorted(set([k[-4:] for k in time_index_map['rev_index'].keys()])))}\n\n cur_idx = 0\n for time_idx in range(time_count):\n time_embed_idx = time_map[time_index_map['index'][time_idx][-4:]]\n for station_idx in station_set:\n neighbor_demand_raw_data[cur_idx][0:len(ALL_HEADER)] = neighbor_feature_data.loc[neighbor_feature_data['index']==station_idx, 'index':'mall_9']\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)] = time_idx\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)+1] = time_embed_idx\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)+2] = demand[station_idx][time_idx][-1]\n # todo add slow demand and quick demand here\n cur_idx = cur_idx + 1\n print(cur_idx, neighbor_demand_raw_data.shape)\n\n neighbor_demand_data = pd.DataFrame(neighbor_demand_raw_data, columns=DEMAND_HEADER)\n print('neighbor demand')\n print(neighbor_demand_data)\n\n neighbor_demand_path = exp_data_path + os.sep + 'static' + os.sep + 'neighbor_demand_{}.csv'.format(city)\n if os.path.exists(neighbor_demand_path):\n os.remove(neighbor_demand_path)\n neighbor_demand_data.to_csv(neighbor_demand_path)", "def Fetch_station(long, lat, y):\r\n global ddf\r\n dmin = 1000000\r\n rs = 0\r\n i=0\r\n for i in range(len(ddf[y])):\r\n #Calculate the distance between zip code location and weather station location\r\n dnew = Distance_orthonormique(ddf[y]['LON'][i], ddf[y]['LAT'][i], long, lat)\r\n\r\n if(dmin > dnew):\r\n #If the last smaller distance is superior than the current distance :\r\n #the new smaller distance is the current distance\r\n dmin = dnew\r\n rs = i\r\n\r\n #rs = index dataframe weather station\r\n #ddf[y]['STATION NAME'][rs] = Weather station name\r\n #round(dmin, 2) = Distance between weather station and zip code\r\n \r\n return rs, ddf[y]['STATION NAME'][rs], round(dmin,2)", "def prepare_input(self, only_center = True):\n \n if only_center:\n nx = [0]\n ny = [0]\n else:\n nx = [0,1,-1]\n ny = [0,1,-1]\n gauge = dd.read_csv(str(Path(self.db_location, 'gauge', '*.csv.gz')), \n compression='gzip', \n assume_missing=True,\n dtype = {'TIMESTAMP':int, 'STATION': str})\n \n gauge = gauge.compute().drop_duplicates()\n gauge = gauge.replace(-9999,np.nan)\n for x in nx:\n for y in ny:\n logging.info('Processing neighbour {:d}{:d}'.format(x, y))\n radar = dd.read_parquet(str(Path(self.db_location, 'radar',\n '*.parquet')))\n refer = dd.read_parquet(str(Path(self.db_location, 'reference', \n '*.parquet')))\n \n # Select only required pixel\n radar = radar.loc[np.logical_and(radar['NX'] == x, \n radar['NY'] == y)]\n refer = refer.loc[np.logical_and(refer['NX'] == x, \n refer['NY'] == y)]\n \n # Convert to pandas and remove duplicates \n radar = radar.compute().drop_duplicates(subset = ['TIMESTAMP',\n 'STATION',\n 'RADAR',\n 'NX','NY',\n 'SWEEP'])\n \n refer = refer.compute().drop_duplicates(subset = ['TIMESTAMP',\n 'STATION'])\n \n radar = radar.sort_values(by = ['TIMESTAMP','STATION','SWEEP'])\n refer = refer.sort_values(by = ['TIMESTAMP','STATION'])\n gauge = gauge.sort_values(by = ['TIMESTAMP','STATION'])\n # Get only valid precip data\n gauge = gauge[np.isfinite(gauge['RRE150Z0'])]\n \n # Create individual 10 min - station stamps\n gauge['s-tstamp'] = np.array(gauge['STATION'] + \n gauge['TIMESTAMP'].astype(str)).astype(str)\n radar['s-tstamp'] = np.array(radar['STATION'] + \n radar['TIMESTAMP'].astype(str)).astype(str)\n refer['s-tstamp'] = np.array(refer['STATION'] + \n refer['TIMESTAMP'].astype(str)).astype(str)\n \n # Get gauge and reference only when radar data available\n \n # Find timestamps that are in the three datasets\n ststamp_common = np.array(pd.Series(list(set(gauge['s-tstamp'])\n .intersection(set(refer['s-tstamp'])))))\n ststamp_common = np.array(pd.Series(list(set(radar['s-tstamp'])\n .intersection(set(ststamp_common)))))\n radar = radar.loc[radar['s-tstamp'].isin(ststamp_common)]\n gauge = gauge.loc[gauge['s-tstamp'].isin(ststamp_common)]\n refer = refer.loc[refer['s-tstamp'].isin(ststamp_common)]\n \n \n # Filter incomplete hours\n stahour = np.array(gauge['STATION'] + \n ((gauge['TIMESTAMP'] - 600 ) - \n (gauge['TIMESTAMP'] - 600 ) % 3600).astype(str)).astype(str)\n \n full_hours = np.array(gauge.groupby(stahour)['STATION']\n .transform('count') == 6)\n \n refer = refer.reindex[full_hours]\n gauge = gauge.reindex[full_hours] \n radar = radar.reindex[radar['s-tstamp'].\n isin(np.array(gauge['s-tstamp']))]\n \n stahour = stahour[full_hours]\n \n # Creating vertical grouping index\n \n _, idx, grp_vertical = np.unique(radar['s-tstamp'],\n return_inverse = True,\n return_index = True)\n # Get original order\n sta_tstamp_unique = radar['s-tstamp'][np.sort(idx)]\n # Preserves order and avoids sorting radar_statstamp\n grp_vertical = idx[grp_vertical]\n # However one issue is that the indexes are not starting from zero with increment\n # of one, though they are sorted, they are like 0,7,7,7,15,15,23,23\n # We want them starting from zero with step of one\n grp_vertical = rankdata(grp_vertical,method='dense') - 1\n \n # Repeat operation with gauge hours\n sta_hourly_unique, idx, grp_hourly = np.unique(stahour, \n return_inverse = True,\n return_index = True)\n grp_hourly = idx[grp_hourly]\n \n # Add derived variables height iso0 (HISO) and height above ground (HAG)\n # Radar\n stations = constants.METSTATIONS\n cols = list(stations.columns)\n cols[1] = 'STATION'\n stations.columns = cols\n radar = pd.merge(radar,stations, how = 'left', on = 'STATION',\n sort = False)\n \n radar['HISO'] = -radar['T'] / constants.LAPSE_RATE * 100\n radar['HAG'] = radar['HEIGHT'] - radar['Z']\n radar['HAG'][radar['HAG'] < 0] = 0\n \n # Gauge\n gauge['minutes'] = (gauge['TIMESTAMP'] % 3600)/60\n \n # Save all to file\n refer.to_parquet(str(Path(self.input_location, \n 'reference_x{:d}y{:d}.parquet'.format(x,y))),\n compression = 'gzip', index = False)\n \n radar.to_parquet(str(Path(self.input_location, \n 'radar_x{:d}y{:d}.parquet'.format(x,y))),\n compression = 'gzip', index = False)\n \n grp_idx = {}\n grp_idx['grp_vertical'] = grp_vertical\n grp_idx['grp_hourly'] = grp_hourly\n grp_idx['tstamp_unique'] = sta_tstamp_unique\n \n pickle.dump(grp_idx, \n open(str(Path(self.input_location, \n 'grouping_idx_x{:d}y{:d}.p'.format(x,y))),'wb'))\n \n if x == 0 and y == 0:\n # Save only gauge for center pixel since it's available only there\n gauge.to_parquet(str(Path(self.input_location, 'gauge.parquet')),\n compression = 'gzip', index = False)", "def nearest_loop(row, gdf2,geometry_cols=['geo_lon','geo_lat'],src_column=None,surrounding=False):\r\n def haversine_distance(origin, destination):\r\n lon1, lat1 = origin\r\n lon2, lat2 = destination\r\n radius = 6371000 # meters\r\n \r\n dlat = math.radians(lat2-lat1)\r\n dlon = math.radians(lon2-lon1)\r\n a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \\\r\n * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)\r\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\r\n d = radius * c\r\n return d\r\n\r\n # start the main iteration\r\n if row.geometry.type == 'Polygon':\r\n point_xy = np.array((row.geometry.centroid.x,\r\n row.geometry.centroid.y))\r\n if row.geometry.type in ['Point', 'LineString']:\r\n point_xy = np.array((row.geometry.x, row.geometry.y)) \r\n # Select most current stations datasets.\r\n closest = None\r\n closest_distance = 99999999999\r\n for _, station in gdf2.iterrows():\r\n d = haversine_distance((point_xy[0], point_xy[1]),\r\n (station[geometry_cols[0]], station[geometry_cols[1]]))\r\n if d < closest_distance:\r\n closest = station\r\n closest_distance = d\r\n # if surroung \r\n if surrounding:\r\n closest1 = []\r\n closest_distance = closest_distance+surrounding\r\n i = 0\r\n for _, station in gdf2.iterrows():\r\n d = haversine_distance((point_xy[0], point_xy[1]),\r\n (station[geometry_cols[0]], station[geometry_cols[1]]))\r\n if d < closest_distance:\r\n closest1.append(station)\r\n i += 1\r\n closest = closest1\r\n return closest[src_column]", "def _get_storm_velocities_missing(\n storm_object_table,\n e_folding_radius_metres=DEFAULT_VELOCITY_EFOLD_RADIUS_METRES):\n\n east_velocities_m_s01 = storm_object_table[\n tracking_utils.EAST_VELOCITY_COLUMN].values\n\n north_velocities_m_s01 = storm_object_table[\n tracking_utils.NORTH_VELOCITY_COLUMN].values\n\n if not numpy.any(numpy.isnan(east_velocities_m_s01)):\n return storm_object_table\n\n unique_times_unix_sec, orig_to_unique_indices = numpy.unique(\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values,\n return_inverse=True)\n\n num_times = len(unique_times_unix_sec)\n\n # Use neighbouring storms at same time to estimate missing velocities.\n for j in range(num_times):\n these_indices = numpy.where(orig_to_unique_indices == j)[0]\n if not numpy.any(numpy.isnan(east_velocities_m_s01[these_indices])):\n continue\n\n (east_velocities_m_s01[these_indices],\n north_velocities_m_s01[these_indices]\n ) = _estimate_velocity_by_neigh(\n x_coords_metres=storm_object_table[\n CENTROID_X_COLUMN].values[these_indices],\n y_coords_metres=storm_object_table[\n CENTROID_Y_COLUMN].values[these_indices],\n x_velocities_m_s01=east_velocities_m_s01[these_indices],\n y_velocities_m_s01=north_velocities_m_s01[these_indices],\n e_folding_radius_metres=e_folding_radius_metres)\n\n if not numpy.any(numpy.isnan(east_velocities_m_s01)):\n return storm_object_table.assign(**{\n tracking_utils.EAST_VELOCITY_COLUMN: east_velocities_m_s01,\n tracking_utils.NORTH_VELOCITY_COLUMN: north_velocities_m_s01\n })\n\n # Use all storms at same time to estimate missing velocities.\n for j in range(num_times):\n these_indices = numpy.where(orig_to_unique_indices == j)[0]\n if not numpy.any(numpy.isnan(east_velocities_m_s01[these_indices])):\n continue\n\n (east_velocities_m_s01[these_indices],\n north_velocities_m_s01[these_indices]\n ) = _estimate_velocity_by_neigh(\n x_coords_metres=storm_object_table[\n CENTROID_X_COLUMN].values[these_indices],\n y_coords_metres=storm_object_table[\n CENTROID_Y_COLUMN].values[these_indices],\n x_velocities_m_s01=east_velocities_m_s01[these_indices],\n y_velocities_m_s01=north_velocities_m_s01[these_indices],\n e_folding_radius_metres=numpy.nan)\n\n if not numpy.any(numpy.isnan(east_velocities_m_s01)):\n return storm_object_table.assign(**{\n tracking_utils.EAST_VELOCITY_COLUMN: east_velocities_m_s01,\n tracking_utils.NORTH_VELOCITY_COLUMN: north_velocities_m_s01\n })\n\n # Use neighbouring storms at all times to estimate missing velocities.\n for j in range(num_times):\n these_indices = numpy.where(orig_to_unique_indices == j)[0]\n if not numpy.any(numpy.isnan(east_velocities_m_s01[these_indices])):\n continue\n\n these_east_velocities_m_s01, these_north_velocities_m_s01 = (\n _estimate_velocity_by_neigh(\n x_coords_metres=storm_object_table[CENTROID_X_COLUMN].values,\n y_coords_metres=storm_object_table[CENTROID_Y_COLUMN].values,\n x_velocities_m_s01=east_velocities_m_s01 + 0.,\n y_velocities_m_s01=north_velocities_m_s01 + 0.,\n e_folding_radius_metres=e_folding_radius_metres)\n )\n\n east_velocities_m_s01[these_indices] = these_east_velocities_m_s01[\n these_indices]\n north_velocities_m_s01[these_indices] = these_north_velocities_m_s01[\n these_indices]\n\n if not numpy.any(numpy.isnan(east_velocities_m_s01)):\n return storm_object_table.assign(**{\n tracking_utils.EAST_VELOCITY_COLUMN: east_velocities_m_s01,\n tracking_utils.NORTH_VELOCITY_COLUMN: north_velocities_m_s01\n })\n\n # Use all storms at all times to estimate missing velocities.\n for j in range(num_times):\n these_indices = numpy.where(orig_to_unique_indices == j)[0]\n if not numpy.any(numpy.isnan(east_velocities_m_s01[these_indices])):\n continue\n\n these_east_velocities_m_s01, these_north_velocities_m_s01 = (\n _estimate_velocity_by_neigh(\n x_coords_metres=storm_object_table[CENTROID_X_COLUMN].values,\n y_coords_metres=storm_object_table[CENTROID_Y_COLUMN].values,\n x_velocities_m_s01=east_velocities_m_s01 + 0.,\n y_velocities_m_s01=north_velocities_m_s01 + 0.,\n e_folding_radius_metres=numpy.nan)\n )\n\n east_velocities_m_s01[these_indices] = these_east_velocities_m_s01[\n these_indices]\n north_velocities_m_s01[these_indices] = these_north_velocities_m_s01[\n these_indices]\n\n if not numpy.any(numpy.isnan(east_velocities_m_s01)):\n return storm_object_table.assign(**{\n tracking_utils.EAST_VELOCITY_COLUMN: east_velocities_m_s01,\n tracking_utils.NORTH_VELOCITY_COLUMN: north_velocities_m_s01\n })\n\n # Replace missing velocities with defaults.\n nan_indices = numpy.where(numpy.isnan(east_velocities_m_s01))[0]\n east_velocities_m_s01[nan_indices] = DEFAULT_EAST_VELOCITY_M_S01\n north_velocities_m_s01[nan_indices] = DEFAULT_NORTH_VELOCITY_M_S01\n\n return storm_object_table.assign(**{\n tracking_utils.EAST_VELOCITY_COLUMN: east_velocities_m_s01,\n tracking_utils.NORTH_VELOCITY_COLUMN: north_velocities_m_s01\n })", "def calculate_vars(data, lat, lon):\n # Keep track of running distance and time calculations\n distance_to_dest = 0.0\n time_estimate = 0.0\n\n # Calculate from starting dest to first point in data\n user_coords = (lat, lon)\n first_path_coords = (data[0][\"lat\"], data[0][\"lon\"])\n first_distance = geopy.distance.distance(user_coords, first_path_coords).miles\n distance_to_dest += first_distance\n time_estimate += first_distance * 20 # 3mph walking speed\n\n # Calculate for all other points\n for i in range(1, len(data) - 1):\n this_coords = (data[i][\"lat\"], data[i][\"lon\"])\n next_coords = (data[i + 1][\"lat\"], data[i + 1][\"lon\"])\n\n distance = geopy.distance.distance(this_coords, next_coords).miles\n distance_to_dest += distance\n time_estimate += distance * 20 # 3mph walking speed\n\n # Round distance and time estimates\n distance_to_dest = round(distance_to_dest, 1)\n time_estimate = round(time_estimate)\n\n return distance_to_dest, time_estimate", "def test_stations_by_distance():\n station_list = build_station_list()\n #test for stations closest to cambridge city coordinates\n station_list_sort = stations_by_distance(station_list, (52.2053, 0.1218))\n output = [(station.name, distance) for (station, distance) in station_list_sort]\n for n in range(1, len(station_list)):\n #make sure that the distance of the previous station to the point is less than the next one in the list\n assert output[n-1][1] <= output[n][1]", "def forecast_for_closest(\n lat: float, lon: float, lang=_DEFAULT_LANG, num_stations_to_try: int = 3\n) -> Tuple[Dict, Dict]:\n assert lang in _SUPPORTED_LANGS\n\n stations = closest_stations(lat, lon, limit=num_stations_to_try)\n for s in stations:\n o = forecast_for_station(s[\"id\"], lang=lang)\n if o[\"results\"] and not o[\"results\"][0].get(\"err\") and o[\"results\"][0][\"valid\"]:\n return o, s\n\n return forecast_for_station(stations[0][\"id\"], lang=lang), stations[0]", "def equalise_ts(station_list):\n s_y={}\n s_y_size = 5000 #large number for first pass\n for s in station_list:\n s_data = np.loadtxt('station_fuelType/'+s+\"_P98\",delimiter=',')\n s_y[s] = s_data[:,1]\n while float(s_y[s][0]) == 0.0:\n s_y[s]=s_y[s][1:len(s_y[s])]\n if len(s_y[s])<s_y_size:\n s_y_size = len(s_y[s])\n \n for s in s_y:\n if len(s_y[s])>s_y_size:\n s_y[s]=s_y[s][len(s_y[s])-s_y_size:len(s_y[s])]\n \n ts_y = np.ndarray(shape=[0, s_y_size], dtype = 'float')\n for key, v in s_y.items():\n ts_y = np.vstack([ts_y, np.array(v)])\n\n return ts_y", "def main():\n #get_lat_long\n place_name = 'Arlington - Arlington St'\n # sec_fun = get_lat_long(place_name)\n # print(sec_fun)\n # get_nearest_station(sec_fun[0], sec_fun[1]) #\n # get_nearest_station(42.350009, -71.076077)\n print(find_stop_near(place_name))", "def process(date, lat_oi, lon_oi, shared_args, verbose=False):\n \n filename = download(date, shared_args)\n\n atmo_data = data.open_netcdf4(filename)\n\n # choose points\n lat = atmo_data.variables['lat'][:]\n lon = atmo_data.variables['lon'][:]\n lat = numpy.stack([lat]*lon.shape[0], axis=0)\n lon = numpy.stack([lon]*lat.shape[1], axis=1)\n chosen_idxs, data_coor = funcs.choose_points(lat, lon, lat_oi, lon_oi)\n\n latidx = tuple(chosen_idxs[0])\n lonidx = tuple(chosen_idxs[1])\n \n t1, t2 = data.closest_hours(atmo_data.variables['time'][:].data,\n atmo_data.variables['time'].units, date)\n t1_dt = num2date(atmo_data.variables['time'][t1], atmo_data.variables['time'].units)\n t2_dt = num2date(atmo_data.variables['time'][t2], atmo_data.variables['time'].units)\n\n index1 = (t1, slice(None), latidx, lonidx)\n index2 = (t2, slice(None), latidx, lonidx)\n\n press = numpy.array(atmo_data.variables['lev'][:])\n\n temp1 = numpy.empty\n temp2 = numpy.empty\n \n temp1 = numpy.diagonal(atmo_data.variables['T'][index1], axis1=1, axis2=2).T\n temp2 = numpy.diagonal(atmo_data.variables['T'][index2], axis1=1, axis2=2).T\n\n rhum1 = numpy.diagonal(atmo_data.variables['RH'][index1], axis1=1, axis2=2).T # relative humidity\n rhum2 = numpy.diagonal(atmo_data.variables['RH'][index2], axis1=1, axis2=2).T\n\n height1 = numpy.diagonal(atmo_data.variables['H'][index1], axis1=1, axis2=2).T / 1000.0 # height\n height2 = numpy.diagonal(atmo_data.variables['H'][index2], axis1=1, axis2=2).T / 1000.0\n\n # interpolate in time, now they are shape (4, N)\n t = interp.interp_time(date, temp1, temp2, t1_dt, t2_dt)\n h = interp.interp_time(date, height1, height2, t1_dt, t2_dt)\n rh = interp.interp_time(date, rhum1, rhum2, t1_dt, t2_dt)\n \n # interpolate in space, now they are shape (1, N)\n height = interp.idw(h, data_coor, [lat_oi, lon_oi])\n temp = interp.idw(t, data_coor, [lat_oi, lon_oi])\n relhum = interp.idw(rh, data_coor, [lat_oi, lon_oi])\n \n # calculate the number of nan and zero values in the array and remove them, reducing the size of the array accordingly\n nr_of_nans1 = numpy.sum(temp1[0].mask)\n nr_of_nans2 = numpy.sum(temp2[0].mask)\n nr_of_nans = max([nr_of_nans1,nr_of_nans2])\n \n height = height[nr_of_nans:]\n temp = temp[nr_of_nans:]\n relhum = relhum[nr_of_nans:]\n press = press[nr_of_nans:]\n\n # load standard atmosphere for mid-lat summer\n # TODO evaluate standard atmo validity, add different ones for different TOY?\n stan_atmo = numpy.loadtxt(settings.STAN_ATMO, unpack=True)\n stan_height, stan_press, stan_temp, stan_relhum = stan_atmo\n # add standard atmo above cutoff index\n \n cutoff_idx = numpy.abs(stan_press - press[-1]).argmin()\n height = numpy.append(height, stan_height[cutoff_idx:])\n press = numpy.append(press, stan_press[cutoff_idx:])\n temp = numpy.append(temp, stan_temp[cutoff_idx:])\n relhum = numpy.append(relhum, stan_relhum[cutoff_idx:])\n \n # Convert relative humidity to percentage for modtran\n relhum = relhum * 100\n\n # TODO add buoy stuff to bottom of atmosphere\n\n if verbose:\n # send out plots and stuff\n stuff = numpy.asarray([height, press, temp, relhum]).T\n h = 'Height [km], Pressure[kPa], Temperature[k], Relative_Humidity[0-100]' + '\\nCoordinates: {0} Buoy:{1}'.format(data_coor, buoy)\n \n numpy.savetxt('atmosphere_{0}_{1}_{2}.txt'.format('merra', date.strftime('%Y%m%d'), buoy.id), stuff, fmt='%7.2f, %7.2f, %7.2f, %7.2f', header=h)\n\n return height, press, temp, relhum", "def _compute(self, w_beg, w_end, signal, station_availability):\n\n avail_idx = np.where(station_availability == 1)[0]\n sige = signal[0]\n sign = signal[1]\n sigz = signal[2]\n\n p_onset_raw, p_onset = self._compute_p_onset(sigz,\n self.sampling_rate)\n s_onset_raw, s_onset = self._compute_s_onset(sige, sign,\n self.sampling_rate)\n self.data.p_onset = p_onset\n self.data.s_onset = s_onset\n self.data.p_onset_raw = p_onset_raw\n self.data.s_onset_raw = s_onset_raw\n\n ps_onset = np.concatenate((self.data.p_onset, self.data.s_onset))\n ps_onset[np.isnan(ps_onset)] = 0\n\n p_ttime = self.lut.fetch_index(\"TIME_P\", self.sampling_rate)\n s_ttime = self.lut.fetch_index(\"TIME_S\", self.sampling_rate)\n ttime = np.c_[p_ttime, s_ttime]\n del p_ttime, s_ttime\n\n nchan, tsamp = ps_onset.shape\n\n pre_smp = int(round(self.pre_pad * int(self.sampling_rate)))\n pos_smp = int(round(self.post_pad * int(self.sampling_rate)))\n nsamp = tsamp - pre_smp - pos_smp\n\n # Prep empty 4-D coalescence map and run C-compiled ilib.migrate()\n ncell = tuple(self.lut.cell_count)\n map_4d = np.zeros(ncell + (nsamp,), dtype=np.float64)\n ilib.migrate(ps_onset, ttime, pre_smp, pos_smp, nsamp, map_4d,\n self.n_cores)\n\n # Prep empty coa and loc arrays and run C-compiled ilib.find_max_coa()\n max_coa = np.zeros(nsamp, np.double)\n grid_index = np.zeros(nsamp, np.int64)\n ilib.find_max_coa(map_4d, max_coa, grid_index, 0, nsamp, self.n_cores)\n\n # Get max_coa_norm\n sum_coa = np.sum(map_4d, axis=(0, 1, 2))\n max_coa_norm = max_coa / sum_coa\n max_coa_norm = max_coa_norm * map_4d.shape[0] * map_4d.shape[1] * \\\n map_4d.shape[2]\n\n tmp = np.arange(w_beg + self.pre_pad,\n w_end - self.post_pad + (1 / self.sampling_rate),\n 1 / self.sampling_rate)\n daten = [x.datetime for x in tmp]\n\n # Calculate max_coa (with correction for number of stations)\n max_coa = np.exp((max_coa / (len(avail_idx) * 2)) - 1.0)\n\n loc = self.lut.xyz2index(grid_index, inverse=True)\n\n return daten, max_coa, max_coa_norm, loc, map_4d", "def run():\n\n # Build list of tuples of station names and distance \n stations = build_station_list()\n p = (52.2053, 0.1218)\n by_distance = stations_by_distance(stations, p)\n for n in range(10):\n print(by_distance[n])\n for n in range(10):\n i = len(by_distance) - 10 + n\n print(by_distance[i])", "def update_stationlist(time_res='hourly',dbase_dir='dbase'):\r\n\r\n \r\n dwd_abbr = {'air_temperature': 'TU',\r\n 'cloud_type': 'CS', \r\n 'cloudiness': 'N',\r\n 'dew_point' : 'TD',\r\n 'extreme_temperature': 'TX',\r\n 'extreme_wind': 'FX',\r\n 'precipitation': 'RR',\r\n 'pressure': 'P0',\r\n 'soil_temperature': 'EB',\r\n 'solar': 'ST',\r\n 'sun': 'SD',\r\n 'visibility': 'VV',\r\n 'wind': 'FF',\r\n 'wind_synop': 'F'\r\n }\r\n \r\n # lets start\r\n print('Updating station list')\r\n \r\n # create output directory if not existing\r\n \r\n if not os.path.exists(dbase_dir):\r\n os.makedirs(dbase_dir)\r\n \r\n #check whether we have an up-to-date-station-list-already\r\n try:\r\n stations_network_old=[s for s in os.listdir(dbase_dir) if 'dwd_station_network' in s][0]\r\n datetime_network=datetime.date(datetime.strptime(re.findall('\\d+',stations_network_old)[0],'%Y%m%d'))\r\n #update if more than 24hours\r\n dt_today=datetime.date(datetime.now())\r\n if (dt_today-datetime_network)<timedelta(days=1):\r\n print('DWD network list is up-to-date, no update needed')\r\n filename_stations=dbase_dir+'\\\\'+stations_network_old\r\n return filename_stations\r\n else:\r\n print('DWD network list neeeds to be updated')\r\n os.remove(dbase_dir+'\\\\'+stations_network_old)\r\n except:\r\n print('DWD network list neeeds to be updated')\r\n pass\r\n \r\n \r\n # header\r\n stations_network=pd.DataFrame()\r\n \r\n # connect to ftp server and go to the folder\r\n \r\n # Connect to the Server\r\n server='opendata.dwd.de'\r\n ftp=connect_ftp(server = server,connected = False)\r\n #change to subfolder\r\n ftp.cwd('/climate_environment/CDC/observations_germany/climate/' + time_res +'/')\r\n #get dwd categories\r\n dwd_categories=ftp.nlst()\r\n #loop through the subfolders to get the station lists\r\n for category in dwd_categories:\r\n print('retrieve stationlist for', category)\r\n #try to get historical data\r\n try:\r\n dir_path='/climate_environment/CDC/observations_germany/climate/' + time_res +'/'+category+'/historical/'\r\n ftp.cwd(dir_path)\r\n except Exception as e:\r\n print(e, 'try to download category', category, 'from other folder')\r\n try:\r\n dir_path='/climate_environment/CDC/observations_germany/climate/' + time_res +'/'+category+'/'\r\n ftp.cwd(dir_path)\r\n except:\r\n print('Category', category, 'could not have been downloaded')\r\n pass\r\n #retrieve the stationlist\r\n stationlist = []\r\n # try to retrieve file\r\n retrieved=False\r\n filename=dwd_abbr[category]+'_Stundenwerte_Beschreibung_Stationen.txt'\r\n while not retrieved:\r\n try:\r\n ftp.retrlines(\"RETR \" + filename, stationlist.append)\r\n #ftp.retrbinary(\"RETR \" + filestr, stationlist.write)\r\n retrieved = True\r\n except:\r\n ftp=connect_ftp(server = server,connected = False)\r\n ftp.cwd(dir_path)\r\n #remove first two lines\r\n stationlist=stationlist[2:]\r\n #delete uncessary blanks\r\n stationlist=[re.sub(' +', ' ', station.rstrip()) for station in stationlist]\r\n #split the list\r\n stationlist=[station.split(\" \")[:7] for station in stationlist]\r\n #read as dataframe\r\n dfstations=pd.DataFrame(stationlist,columns=['STATIONS_ID','date_start','date_end','height','geo_lat','geo_lon','name'])\r\n #add true information to category\r\n dfstations[category]=True\r\n \r\n stations_network=stations_network.append(dfstations,sort=False,ignore_index=True)\r\n #A=[sub.split(\" \") for sub in stationlist] \r\n \r\n #replace all Na by False\r\n stations_network[stations_network.isna()]=0 \r\n #aggregate\r\n stations_network=stations_network.groupby(['STATIONS_ID'],as_index=False).agg('max')\r\n #replace zero by False in order to have pure boolean data\r\n stations_network.replace(0,False,inplace=True)\r\n #fix the error with station 14138 and 05614 and 07325, which does not have pressure cord\r\n stations_network.loc[stations_network.STATIONS_ID=='14138','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='05614','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='07325','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='01572','pressure']=False\r\n #for temperature the same\r\n stations_network.loc[stations_network.STATIONS_ID=='14138','air_temperature']=False\r\n #save to database writing the time as well\r\n filename_stations=dbase_dir+'\\\\dwd_station_network_'+datetime.now().strftime('%Y%m%d')+'.csv'\r\n stations_network.to_csv(filename_stations,index=False)\r\n \r\n print('Updating station list...finished')\r\n \r\n return filename_stations", "def get_nearest_station(latitude, longitude):\n url = '{}?api_key={}&filter[latitude]={}&filter[longitude]={}&sort=distance'.format(MBTA_BASE_URL,MBTA_API_KEY,latitude,longitude)\n # print(url)\n station_json = get_json(url)\n # print(station_json)\n station_name = station_json['data'][0]['attributes']['name']\n # # print(station_name)\n # # station_description = station_json['data'][0]['attributes']['description']\n # # if station_description:\n # # station_name = station_description\n # # print(station_description)\n wheelchair_boarding = station_json['data'][0]['attributes']['wheelchair_boarding']\n if wheelchair_boarding:\n wheelchair_boarding = \"This station is wheelchair accesible\"\n else:\n wheelchair_boarding = \"Sorry cripple\"\n # print(wheelchair_boarding)\n return station_name, wheelchair_boarding", "def __getpredictors_distance(self, staname, distance):\n\n distfromsta = distance[staname]\n del distfromsta[staname] # remove the station to be fill from the dataframe\n distfromsta = distfromsta.sort_values()\n\n stations = self.network.getsta(distfromsta.index.values)\n # station = self.network.getsta(staname)\n\n # Only 3 closest stations\n # sel1 = [ (i,e) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2 = [ (i,e) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 2\n\n # Use all stations\n sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2\n\n # sel3 = [ (i,e) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 3\n # sel4 = [ (i,e) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 4\n\n # Only 3 closest stations\n # sel1names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 1\n\n # using all stations\n sel1names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-2], stations[2:])] # selction predictors with spacing 1\n\n # sel3names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 1\n # sel4names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 1\n\n selection = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1, sel2)) if x]\n selectionnames = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1names, sel2names)) if x]\n\n return selection, selectionnames", "def analyseCoordination(self):\n #create a list of criteria that correspond to maximal path length\n #max_path_length = max(self.pathLengths)\n\n #criterion_max_path_length = []\n #origins_max_path_length = []\n #for c in range(len(self.pathLengths)):\n # if self.pathLengths[c] == max_path_length:\n # criterion_max_path_length.append(self.globalMin[c])\n # origins_max_path_length.append(self.origins[c])\n\n #min_criterion = min(criterion_max_path_length)\n\n #find index\n #for m in range(len(criterion_max_path_length)):\n # if criterion_max_path_length[m] == min_criterion:\n # break\n\n #for s in range(len(self.origins)):\n # if self.origins[s] == origins_max_path_length[m]:\n # break\n\n min_criterion = self.globalMin[0]\n self.overall_min = min_criterion\n self.overall_max_path_length = len(self.min_path[0])\n\n if self.chosenScheduleIndex != self.globalMinSchedIdx[0]:\n self.chosenScheduleIndex = self.globalMinSchedIdx[0]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n\n # inform all neighbors about origin that has local minimal criterion\n for n in range(len(self.Neighbors)):\n #structure: ['minimalorigin', ID_minimal_origin, minimal_criterion_value]\n #self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(origins_max_path_length[m]), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[s]), copy.deepcopy(self.min_path_schedules[s])])\n self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(self.CommID), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[0]), copy.deepcopy(self.min_path_schedules[0])])\n\n if self.OPTcriterion == 'maxmindiff':\n fluct_criterion = max(self.EFluctuationCurve) - min(self.EFluctuationCurve)\n elif self.OPTcriterion == 'absremainder':\n fluct_criterion = 0\n for a in range(len(self.EFluctuationCurve)):\n fluct_criterion += abs(self.EFluctuationCurve[a])\n\n\n #print 'ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[s]), 100 - 100*(float((float(min_criterion))/float(fluct_max_min_diff))), origins_max_path_length[m], self.min_path_schedules[s] )\n self.log_message('ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[0]), 100 - 100*(float((float(min_criterion))/float(fluct_criterion))), self.CommID, self.min_path_schedules[0] ))", "def spatial_check(obs, nstnnets, lat, lon, elev, var, ivar, qc_flag):\n\n ndts = len(obs[:, 0, 0]) # number of hours in obs.\n roism = 100.0 # smaller radius of influence.\n roibg = 150.0 # bigger radius of influence.\n min_stations = 2 # min # of stns needed for testing.\n level1 = suspflag\n level2 = warnflag\n latdiff = 3.0\n londiff = 3.0\n\n thresholds = {\n 'pressure': (750.0, 1000.0, 1000.0),\n 'temp': (5.556, 8.333, 150.0), # (10degF), (15degF)\n 'dew': (5.556, 8.333, 150.0), # (10degF), (15degF)\n 'wind_speed': (7.65, 10.2, 250.0), # (15kts), (20kts)\n 'wind_dir': (360.0, 360.0, 250.0),\n 'rel_hum': (75.0, 85.0, 250.0),\n\n 'pcp6': (76.2, 101.6, 500.0), # (mm; eq 3 inches), (mm; eq 4 inches)\n 'pcp24': (152.4, 203.2, 500.0), # (mm; eq 6 inches), (mm; eq 8 inches).\n }\n\n try:\n maxvdiff1, maxvdiff2, max_elev_diff = thresholds[var]\n except KeyError:\n raise ValueError('Unrecognized variable')\n\n # If variable is precip, look for traces make them 0.0 (not permanently as these data values don't get sent back out)\n if var == 'pcp':\n for d in range(ndts):\n for s in range(nstnnets):\n if obs[d, s, ivar] == trace:\n obs[d, s, ivar] = 0.0 # obs[:,:,ivar]...\n\n # Cliff's simple similar neighbor test.\n\n for d in range(ndts):\n for s in range(nstnnets):\n if obs[d, s, ivar] == mvc or elev[d, s] == mvc or qc_flag[d, s, ivar, irangeflag] == failflag:\n qc_flag[d, s, ivar, ispatialflag] = notestflag\n continue\n\n valsm2 = []\n valbg2 = []\n\n # for each station, check it versus every other station (except itself). First time through get # of\n # stations within radius of influence to determine if we can do this test.\n for ss in range(nstnnets):\n if ss == s or obs[d, ss, ivar] == mvc \\\n or elev[d, ss] == mvc or lat[d, ss] == mvc or lon[d, ss] == mvc \\\n or abs(lat[d, ss] - lat[d, s]) > latdiff or abs(lon[d, ss] - lon[d, s]) > londiff \\\n or abs(elev[d, ss] - elev[d, s]) > max_elev_diff:\n continue\n if qc_flag[d, ss, ivar, irangeflag] == failflag \\\n or qc_flag[d, ss, ivar, istepflag] in [suspflag, warnflag] \\\n or qc_flag[d, ss, ivar, ipersistflag] in [suspflag, warnflag]:\n continue\n\n dist = distance(lat[d, s], lon[d, s], lat[d, ss], lon[d, ss])\n obsdiff = abs(obs[d, ss, ivar] - obs[d, s, ivar])\n\n if dist < roism:\n valsm2.append(obsdiff)\n\n elif dist < roibg:\n valbg2.append(obsdiff)\n\n # !--- If any obs found in roi was <= maxvdiff1, it's a pass. If none found <= maxvdiff1,\n # but one is >= maxvdiff1 & < maxvdiff2, it's \"suspect.\" Otherwise it's \"warning.\" Look in big roi too.\n if len(valsm2) >= min_stations:\n mindiffsm = min(valsm2)\n if mindiffsm <= maxvdiff1:\n qc_flag[d, s, ivar, ispatialflag] = passflag\n elif maxvdiff1 < mindiffsm <= maxvdiff2:\n qc_flag[d, s, ivar, ispatialflag] = level1\n else:\n qc_flag[d, s, ivar, ispatialflag] = level2\n elif len(valsm2) < min_stations <= len(valbg2):\n qc_flag[d, s, ivar, ispatialflag] = passflag if min(valbg2) <= maxvdiff2 else level1\n\n else: # not enough obs in either roi to do test.\n qc_flag[d, s, ivar, ispatialflag] = notestflag", "def SK(all_black,all_white,all_other):\n real_zone_1=[]\n real_zone_2=[]\n real_zone_3=[]\n global p\n #FIRST defining the zone value since the more center you are, the\n #more value you will have.\n \n #Zone 1: the gratest value zone\n zone_1=[]\n zone_1_val=0.3\n for i in all_other:\n if 125<=int(i[0])<=1100 and 125<=int(i[1])<=825:\n zone_1.append(i)\n\n #zone 2: second greatest value zone\n zone_2=[]\n zone_2_val=0.2\n for i in all_other:\n if 0<=int(i[0])<=125 and 125<=int(i[1])<=825:\n zone_2.append(i)\n if 1100<=int(i[0])<=1225 and 125<=int(i[1])<=825:\n zone_2.append(i)\n if 125<=int(i[0])<=1100 and 0<=int(i[1])<=125:\n zone_2.append(i)\n if 125<=int(i[0])<=1100 and 825<=int(i[1])<=950:\n zone_2.append(i)\n\n #zone 3: smallest value zone\n zone_3=[]\n zone_3_val=0.1\n for i in all_other:\n if 0<=int(i[0])<=125 and 0<=int(i[1])<=125:\n zone_3.append(i)\n if 0<=int(i[0])<=125 and 825<=int(i[1])<=950:\n zone_3.append(i)\n if 1100<=int(i[0])<=1225 and 0<=int(i[1])<=125:\n zone_3.append(i)\n if 1100<=int(i[0])<=1225 and 825<=int(i[1])<=950:\n zone_3.append(i)\n\n if all_black==[] and all_white==[]:\n p=0 #First hand Black\n #all_black.append([25*25,19*25])\n return[25*25,19*25]\n\n\n \n\n #Calculation of the values\n val=0\n value_list=[] #[[coordinate],val]\n if p == 0: #First hand Black\n for i in all_black:\n x=i[0]\n y=i[1]\n #right down↘️\n if [x+25 ,y+25] in all_other:\n val=1\n value_list.append([[x+25,y+25],val])\n #print('右下 if',value_list)\n #print('Right D if',val)\n else:\n val=1\n for a in range(1,4):\n if [x+25*a,y+25*a] in all_black:\n val+=1\n elif [x+25*a,y+25*a] in all_other:\n value_list.append([[x+25*a,y+25*a],val])\n #print('Right D',val)\n #print('右下',value_list)\n elif [x+25*a,y+25*a] in all_white:\n break\n \n #left up↖️\n if [x-25,y-25] in all_other:\n val=1\n value_list.append([[x-25,y-25],val])\n #print('Left U if')\n else:\n val=1\n for a in range(1,4):\n if [x-25*a,y-25*a] in all_black:\n val+=1\n elif [x-25*a,y-25*a] in all_other:\n value_list.append([[x-25*a,y-25*a],val])\n #print('Left U')\n elif [x-25*a,y-25*a] in all_white:\n break\n \n #right up↗️ \n if [x+25,y-25] in all_other:\n val=1\n value_list.append([[x+25,y-25],val])\n #print('RU if')\n else:\n val=1\n for a in range(1,4):\n if [x+25*a,y-25*a] in all_black:\n val+=1\n elif [x+25*a,y-25*a] in all_other:\n value_list.append([[x+25*a,y-25*a],val])\n #print('右上')\n elif [x+25*a,y-25*a] in all_white:\n break\n\n #left down↙️\n if [x-25,y+25] in all_other:\n val=1\n value_list.append([[x-25,y+25],val])\n #print('左下 if') \n else:\n val=1\n for a in range(1,4):\n if [x-25*a,y+25*a] in all_black:\n val+=1\n elif [x-25*a,y+25*a] in all_other:\n value_list.append([[x-25*a,y+25*a],val])\n #print('左下')\n elif [x-25*a,y+25*a] in all_white:\n break\n\n #right➡️\n if [x+25,y] in all_other:\n val=1\n value_list.append([[x+25,y],val])\n #print('右',value_list)\n #print('右 if')\n else:\n val=1\n for a in range(1,4):\n if [x+25*a,y] in all_black:\n val+=1\n elif [x+25*a,y] in all_other:\n value_list.append([[x+25*a,y],val])\n #print('右')\n elif [x+25*a,y] in all_white:\n break\n\n #left⬅️ \n if [i[0]-25,i[1]] in all_other:\n val=1\n value_list.append([[i[0]-25,i[1]],val])\n #print('左', value_list)\n #print('左 if')\n else:\n val=1\n for a in range(1,4):\n if [i[0]-25*a,i[1]] in all_black:\n val+=1\n elif [i[0]-25*a,i[1]] in all_other:\n value_list.append([[i[0]-25*a,i[1]],val])\n #print('左')\n elif [i[0]-25*a,i[1]] in all_white:\n break\n\n #down⬇️ \n if [i[0],i[1]+25] in all_other:\n val=1\n value_list.append([[i[0],i[1]+25],val])\n #print('下', value_list)\n #print('下 if')\n else:\n val=1\n for a in range(1,4):\n if [i[0],i[1]+25*a] in all_black:\n val+=1\n elif [i[0],i[1]+25*a] in all_other:\n value_list.append([[i[0],i[1]+25*a],val])\n #print('下')\n elif [i[0],i[1]+25*a] in all_white:\n break\n \n #up⬆️\n if [i[0],i[1]-25] in all_other:\n val=1\n value_list.append([[i[0],i[1]-25],val])\n #print('上',value_list)\n #print('上 if')\n else:\n val=1\n for a in range(1,4):\n if [i[0],i[1]-25*a] in all_black:\n val+=1\n elif [i[0],i[1]-25*a] in all_other:\n value_list.append([[i[0],i[1]-25*a],val])\n #print('上')\n elif [i[0],i[1]-25*a] in all_white:\n break\n\n\n\n all_val=[]\n #print(value_list,'这是value_list')\n\n \n sum_value=[]\n coord=[]\n for a in value_list:\n if a[0] not in coord:\n coord.append(a[0])\n #print(coord)\n for b in coord:\n he=[]\n for c in value_list:\n if b == c[0]:\n he.append(c[1])\n #print(he,'这是和')\n sum_value.append([b,sum(he)])\n\n\n\n #print(sum_value,'同样坐标下val相加')\n for i in sum_value:\n all_val.append(i[1])\n #print(all_val,'所有的相加之后的val')\n numb=-1\n all_max=[]\n for v in all_val:\n numb+=1\n if v == max(all_val):\n max_val_list = value_list[numb][0] #max (x,y)\n if value_list[numb][0] in all_other:\n all_max.append(value_list[numb])\n \n \n #print(max(all_val),'max val')\n for u in all_max:\n if u[0] in zone_1:\n real_zone_1.append(u[0])\n if u[0] in zone_2:\n real_zone_2.append(u[0])\n if u[0] in zone_3:\n real_zone_3.append(u[0])\n if real_zone_1 != []:\n print('real_1')\n return real_zone_1[0]\n elif real_zone_2 != []:\n print('Its zone 2')\n return real_zone_2[0]\n elif real_zone_3 != []:\n print('Its zone 3')\n return real_zone_3[0]\n else:\n return \"mistake\"", "def collect_stations(self):\n # First, iterate provinces and build url's\n site = urllib.request.urlopen(self.base_url)\n\n # Check that the site is still valid or operating by collecting a list of provinces\n print(\"Collecting provinces\")\n provinces = [s[9:11] for s in re.findall('<a href=\"../\">../</a>', site.read())]\n\n # Iterate provinces and collect list of available times\n print(\"Collecting time periods and station ID's\")\n self.stations = defaultdict(dict)\n for prov in provinces:\n site = urllib.request.urlopen(self.build_url(prov))\n expression = '<a href=\"[hd][a-zA-Z]*/\">[hd][a-zA-Z]*/</a>'\n times = [s.split('>')[1].split('<')[0].replace('/', '') for s in re.findall(expression, site.read())]\n\n # Iterate times and collect the station ID's\n for time in times:\n site = urllib.request.urlopen(self.build_url(prov, time))\n expression = '<a href=\"{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv\">{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv</a>'\n expression = expression.format(prov.upper(), time.lower())\n stations = [s.split('_')[1] for s in re.findall(expression, site.read())]\n self.stations[prov][time] = stations", "def populate_agdds(start_date, end_date, source, source_id, stations):\r\n # possibly grab ACIS station data (for entire date range)\r\n if source == 'ACIS':\r\n station_ids = []\r\n for station in stations:\r\n station_ids.append(station['char_network_id'])\r\n acis_data = get_acis_climate_data(\",\".join(station_ids), 'mint,maxt,gdd32,gdd50', start_date, end_date)\r\n\r\n for station in stations:\r\n print(station['char_network_id'])\r\n # grab previous days tmin, tmax, and agdd for both bases from mysql agdds table and start over at year breaks\r\n day_before_start_date = start_date - timedelta(days=1)\r\n if day_before_start_date.year == start_date.year:\r\n prev_tmin = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'tmin')\r\n prev_tmax = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'tmax')\r\n agdd32 = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'agdd')\r\n agdd50 = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 50, 'agdd')\r\n else:\r\n prev_tmin = None\r\n prev_tmax = None\r\n agdd32 = None\r\n agdd50 = None\r\n\r\n if prev_tmin is None or prev_tmin == 'M':\r\n prev_tmin = 0\r\n if prev_tmax is None or prev_tmax == 'M':\r\n prev_tmax = 0\r\n if agdd32 is None or agdd32 == 'M':\r\n agdd32 = 0\r\n if agdd50 is None or agdd50 == 'M':\r\n agdd50 = 0\r\n\r\n # possibly find station of interest from ACIS retrieved data\r\n acis_station = None\r\n if source == 'ACIS':\r\n station_found = False\r\n for a_station in acis_data['data']:\r\n if station_found:\r\n break\r\n for sid in a_station['meta']['sids']:\r\n # print(sid)\r\n # print(station['char_network_id'])\r\n if station['char_network_id'] in sid:\r\n station_found = True\r\n acis_station = a_station\r\n break\r\n if not station_found:\r\n print(\"Could not find station \" + station['char_network_id'])\r\n\r\n previous_year = start_date.year\r\n delta = end_date - start_date\r\n for i in range(delta.days + 1):\r\n day = start_date + timedelta(days=i)\r\n doy = day.timetuple().tm_yday\r\n\r\n # reset the agdd to 0 if we go into a new year\r\n if previous_year != day.year:\r\n agdd32 = 0\r\n agdd50 = 0\r\n previous_year = day.year\r\n\r\n missing_data = False\r\n print(day.strftime(\"%Y-%m-%d\"))\r\n\r\n # see if we already have tmin and tmax from local db\r\n # tmin = None\r\n # tmax = None\r\n tmin = get_element_from_qc_table(station['station_id'], source_id, day, 32, 'tmin')\r\n tmax = get_element_from_qc_table(station['station_id'], source_id, day, 32, 'tmax')\r\n\r\n already_retrieved = False\r\n if tmin is not None and tmin != 'M' and tmax is not None and tmax != 'M' and source != 'PRISM':\r\n already_retrieved = True\r\n\r\n # don't already have tmin and tmax locally so grab from URMA postgis db or ACIS data\r\n if not already_retrieved:\r\n if source == 'URMA':\r\n if station['char_value'] == 'AK':\r\n tmin = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmin', 'alaska')\r\n tmax = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmax', 'alaska')\r\n else:\r\n tmin = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmin', 'conus')\r\n tmax = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmax', 'conus')\r\n # URMA and PRISM are in celsius in our postgis db everything else is Fer so convert here\r\n if tmin is not None:\r\n tmin = tmin * 1.8 + 32\r\n if tmax is not None:\r\n tmax = tmax * 1.8 + 32\r\n elif source == 'PRISM':\r\n tmin = get_prism_climate_data(station['longitude'], station['latitude'], day, 'tmin')\r\n tmax = get_prism_climate_data(station['longitude'], station['latitude'], day, 'tmax')\r\n if tmin is not None:\r\n tmin = tmin * 1.8 + 32\r\n if tmax is not None:\r\n tmax = tmax * 1.8 + 32\r\n elif acis_station is not None:\r\n tmin = acis_station['data'][i][0]\r\n tmax = acis_station['data'][i][1]\r\n\r\n # if tmin or tmax is missing, set to previous day's and mark as missing\r\n if tmin is not None and tmin != 'M':\r\n tmin = float(tmin)\r\n prev_tmin = tmin\r\n else:\r\n missing_data = True\r\n tmin = prev_tmin\r\n if tmax is not None and tmax != 'M':\r\n tmax = float(tmax)\r\n prev_tmax = tmax\r\n else:\r\n missing_data = True\r\n tmax = prev_tmax\r\n\r\n # compute gdd and agdd for both bases\r\n gdd32 = compute_gdd(tmin, tmax, 32)\r\n gdd50 = compute_gdd(tmin, tmax, 50)\r\n\r\n agdd32 += gdd32\r\n agdd50 += gdd50\r\n\r\n if not already_retrieved:\r\n # do an insert or update\r\n add_agdd_row(station['station_id'], source_id, gdd32, agdd32, day.year, doy, day, 32, missing_data, tmin, tmax)\r\n add_agdd_row(station['station_id'], source_id, gdd50, agdd50, day.year, doy, day, 50, missing_data, tmin, tmax)", "def filldf(df, response, sorted_selection, params_selection, constant=True, verbose=True):\n selections_iter = iter(sorted_selection)\n params_iter = iter(params_selection)\n idxmissing = df[response][df[response].isnull() == True].index # slect where their is missing data\n\n print(\"Filling .... \")\n\n while len(idxmissing) > 0:\n print(\"Their is [\" + str(len(idxmissing)) + \"] events missing\")\n\n try: # Try if their is still other stations to fill with\n selection = next(selections_iter)\n param = next(params_iter)\n except StopIteration:\n print(\"NO MORE SELECTED STATIONS\")\n break\n\n try:\n Y = df.loc[:, response]\n X1 = df.loc[:, selection[0]]\n X2 = df.loc[:, selection[1]]\n select = pd.concat([X1, X2], keys=['X1', 'X2'], axis=1, join='inner').dropna()\n if constant:\n newdata = param[0] + param[1] * select['X1'] + param[2] * select['X2'] # reconstruct the data\n else:\n newdata = param[0] * select['X1'] + param[1] * select['X2'] # reconstruct the data\n\n df.loc[idxmissing, response] = newdata.loc[idxmissing]\n idxmissing = df[response][df[response].isnull() == True].index # slect where their is missing data\n except KeyError:\n if verbose:\n print('Selected stations ' + str(selection) + 'did not fill any events')\n else:\n pass\n\n except ValueError:\n if verbose:\n print('The variable ' + var + \"Does not exist or no data to do the multilinear regression \")\n else:\n pass\n\n return df.loc[:, response]", "def pick_next_station(self, station):\n self.best_score = 0\n\n stations = self.grid.stations\n # all connections of the last added added station \n lookahead_1 = self.grid.get_station(self.best_connection[1]).connections\n\n for la1 in lookahead_1.values():\n next_station = la1[0].name\n # if adding the connection exceeds the tracks max time length \n if self.track.add_station(self.grid, next_station) is False:\n break\n\n lookahead_2 = self.grid.get_station(la1[0].name).connections\n\n # keeps adding stations untill the time limit is reached\n for la2 in lookahead_2:\n la2 = stations.get(la2)\n if self.track.add_station(self.grid, la2.name) is False:\n break\n \n quality = self.grid.get_quality()\n \n self.track.remove_last_station()\n\n # if quality improves, add first station to the track\n if quality > self.best_score:\n self.best_score = quality \n self.best_connection = [la2.name, la1[0].name]\n \n self.track.remove_last_station()", "def get_station_configuration_new(stations_id, station_configuration):\n \n \"\"\" Gets the primary station_id from the station_configuration table. \n station_id is the id taken from the input file.\n First it checks if a primary_id in th estation_conf file matches the station_id, \n otherwise it looks for an element in the list of secondary ids. \n \"\"\"\n\n for s in stations_id:\n s = str(s)\n try:\n si = s.decode('utf-8')\n except:\n si = s \n #if ':' in si:\n # si = si.split(':')[1]\n \n station_id_primary = numpy.string_( '0-20000-0-' + si ) # remove the prefix to the station id \n station_id_primary_alternative = numpy.string_( '0-20001-0-' + si )\n \n \"\"\" First, check for matching primary_id. \n If not found, check for secondary id. Note that secondary is a list, so must loop over the entry to find a matching one \"\"\"\n \n matching_primary = station_configuration.loc[station_configuration['primary_id'] == station_id_primary ]\n matching_primary_alt = station_configuration.loc[station_configuration['primary_id'] == station_id_primary_alternative ]\n \n if len(matching_primary) > 0:\n return matching_primary \n \n elif len(matching_primary_alt) > 0 :\n return matching_primary_alt \n \n else:\n secondary = station_configuration['secondary_id'] \n \n for second in secondary:\n try: # this try is needed when the secondary ids are not defined or wrong, and the primary id cannot be matched with the station_id \n sec_list = second.decode('utf-8') # secondary ids are separated by a comma, so I loop over the list\n except:\n try:\n sec_list = str(second)\n except: \n pass\n \n #if ':' in sec_list: # might be : or C: in the secndary id , e.g. C:5852 \n # sec_list = sec_list.split(':')[1]\n \n if sec_list == si:\n sc = station_configuration.loc[station_configuration['secondary_id'] == second ]\n #print(\"FOUND a secondary !!!\" \n return sc \n try:\n if str(second) == si:\n sc = station_configuration.loc[station_configuration['secondary_id'] == second ]\n #print(\"FOUND a secondary !!!\")\n return sc \n except:\n pass \n \n return None", "def nearest_neigbor(self, pc):\n coord = get_coordinates(pc)\n # deliveries\n pdist_deliv = {haversine(coord[0], coord[1], pcoord[1][0], pcoord[1][1]):pc for pc, pcoord in self.state.D_k.items()}\n pdist_list_deliv = list(pdist_deliv.keys())\n if len(pdist_list_deliv) > 0:\n val_deliv_min = min(pdist_list_deliv)\n else:\n val_deliv_min = 1e6 # great value to be discarded when comparing with val_pickup_min\n # pickups\n pdist_pickup = {haversine(coord[0], coord[1], pcoord[-1][0], pcoord[-1][1]):pc for pc, pcoord in self.state.P_k.items()}\n pdist_list_pickup = list(pdist_pickup.keys())\n\n if len(pdist_list_pickup) > 0:\n val_pickup_min = min(pdist_list_pickup)\n else:\n val_pickup_min = 1e6 # great value to be discarded when comparing with val_pickup_min\n\n if val_deliv_min == val_pickup_min and val_deliv_min == 1e6:\n print(\"All jobs completed: go to wait or stop if it's 12pm\")\n return 0\n\n if val_deliv_min < val_pickup_min:\n return pdist_deliv[val_deliv_min]\n\n elif val_deliv_min >= val_pickup_min:\n return pdist_pickup[val_pickup_min]\n else:\n raise valueError('Impossible comparison between val_deliv_min and val_pickup_min ')", "def propagate(satellite):", "def _estimate_velocity_by_neigh(\n x_coords_metres, y_coords_metres, x_velocities_m_s01,\n y_velocities_m_s01, e_folding_radius_metres):\n\n if numpy.isnan(e_folding_radius_metres):\n neigh_radius_metres = numpy.inf\n else:\n neigh_radius_metres = 3 * e_folding_radius_metres\n\n orig_x_velocities_m_s01 = x_velocities_m_s01 + 0.\n orig_y_velocities_m_s01 = y_velocities_m_s01 + 0.\n\n nan_flags = numpy.logical_or(\n numpy.isnan(orig_x_velocities_m_s01),\n numpy.isnan(orig_y_velocities_m_s01)\n )\n nan_indices = numpy.where(nan_flags)[0]\n\n for this_index in nan_indices:\n if numpy.isnan(e_folding_radius_metres):\n these_neighbour_indices = numpy.where(numpy.invert(nan_flags))[0]\n if len(these_neighbour_indices) == 0:\n continue\n\n x_velocities_m_s01[this_index] = numpy.mean(\n orig_x_velocities_m_s01[these_neighbour_indices]\n )\n\n y_velocities_m_s01[this_index] = numpy.mean(\n orig_y_velocities_m_s01[these_neighbour_indices]\n )\n\n continue\n\n these_x_diffs_metres = numpy.absolute(\n x_coords_metres[this_index] - x_coords_metres)\n these_y_diffs_metres = numpy.absolute(\n y_coords_metres[this_index] - y_coords_metres)\n\n these_neighbour_flags = numpy.logical_and(\n these_x_diffs_metres <= neigh_radius_metres,\n these_y_diffs_metres <= neigh_radius_metres)\n\n these_neighbour_flags = numpy.logical_and(\n these_neighbour_flags, numpy.invert(nan_flags)\n )\n\n these_neighbour_indices = numpy.where(these_neighbour_flags)[0]\n if len(these_neighbour_indices) == 0:\n continue\n\n these_neighbour_dist_metres = numpy.sqrt(\n these_x_diffs_metres[these_neighbour_indices] ** 2 +\n these_y_diffs_metres[these_neighbour_indices] ** 2\n )\n\n these_neighbour_subindices = numpy.where(\n these_neighbour_dist_metres <= neigh_radius_metres\n )[0]\n if len(these_neighbour_subindices) == 0:\n continue\n\n these_neighbour_indices = these_neighbour_indices[\n these_neighbour_subindices]\n these_neighbour_dist_metres = these_neighbour_dist_metres[\n these_neighbour_subindices]\n\n these_weights = numpy.exp(\n -these_neighbour_dist_metres / e_folding_radius_metres\n )\n these_weights = these_weights / numpy.sum(these_weights)\n\n x_velocities_m_s01[this_index] = numpy.sum(\n these_weights * orig_x_velocities_m_s01[these_neighbour_indices]\n )\n\n y_velocities_m_s01[this_index] = numpy.sum(\n these_weights * orig_y_velocities_m_s01[these_neighbour_indices]\n )\n\n return x_velocities_m_s01, y_velocities_m_s01", "def _load_stations(self, nodes: List[OSMNode]) -> None:\n # Process OSM nodes into intermediate stations\n grouped_stations: defaultdict[str, list[IntermediateStation]] = defaultdict(list)\n\n # Iterate thru nodes while popping them from the provided list\n # to allow used nodes to bne garbage collected.\n while nodes:\n node = nodes.pop()\n name_id = node.tags[\"name\"]\n grouped_stations[name_id].append(IntermediateStation(\n node.id,\n name_id,\n node.lat,\n node.lon,\n [k for (k, v) in node.tags.items() if \".\" in k and v == \"yes\"],\n node.tags.get(\"merged\") == \"all\",\n ))\n\n # Convert the intermediate representations to GeoStation\n # (again popping from grouped_stations to allow intermediate representation to be gc-ed)\n while grouped_stations:\n name_id, stations = grouped_stations.popitem()\n merged_all_node = get_merged_all_node(stations)\n\n if len(stations) == 1 and len(stations[0].routes) == 1:\n # Case 1 - one station and one line.\n sta = stations[0]\n sta_id = sta.routes[0] + \".\" + name_id\n self.by_id[sta_id] = GeoStation(sta_id, sta.lat, sta.lon)\n\n elif len(stations) == 1:\n # Case 2 - one station and multiple lines.\n # Simple parent-children structure, all in one location.\n sta = stations[0]\n parent = GeoStation(\"Merged.\" + name_id, sta.lat, sta.lon)\n self.by_id[parent.id] = parent\n\n for route in sta.routes:\n child = GeoStation(route + \".\" + name_id, sta.lat, sta.lon, parent=parent)\n self.by_id[child.id] = child\n parent.children.append(child)\n\n elif merged_all_node:\n # Case 3: many nodes, but all under one parent\n parent = GeoStation(\"Merged.\" + name_id, merged_all_node.lat, merged_all_node.lon)\n self.by_id[parent.id] = parent\n\n for ista in stations:\n for route in ista.routes:\n child = GeoStation(route + \".\" + name_id, ista.lat, ista.lon,\n parent=parent)\n self.by_id[child.id] = child\n parent.children.append(child)\n\n else:\n # Case 4: many nodes, no parent-of-all\n needs_merged_no = count_multiple_routes(stations) > 1\n merged_no = 1\n\n for sta in stations:\n if len(sta.routes) == 1:\n # Case 4.1 - single line - behavior as in case 1\n sta_id = sta.routes[0] + \".\" + name_id\n self.by_id[sta_id] = GeoStation(sta_id, sta.lat, sta.lon)\n\n else:\n # Case 4.2 - multiple lines - behavior as in case 2\n parent_prefix = \"Merged.\"\n if needs_merged_no:\n parent_prefix = f\"Merged.{merged_no}.\"\n merged_no += 1\n\n parent = GeoStation(parent_prefix + name_id, sta.lat, sta.lon)\n self.by_id[parent.id] = parent\n\n for route in sta.routes:\n child = GeoStation(route + \".\" + name_id, sta.lat, sta.lon,\n parent=parent)\n self.by_id[child.id] = child\n parent.children.append(child)", "def _calc_(self):\n self.data = []\n all_xyz_data = self.Var.data.get_xyz_data()\n all_cols = self.Var.data.get_xyz_cols()\n\n # Loop over all the xyz data and cols we have\n for xyz_data, cols in zip(all_xyz_data, all_cols):\n\n at_crds = np.array([i[cols[0] != 'Ne'] for i in xyz_data])\n self.natom = len(at_crds[0])\n self.nstep = len(at_crds)\n self.step_data = {}\n\n # Calculate the nearest neighbour lists for each step\n for step in range(self.nstep):\n self.step_data[step] = {}\n\n # Get coords\n crds = at_crds[step]\n\n # Get distances between neighbours\n self.get_distances(crds)\n\n # Get a sorted list of atom indices by distance\n self.get_nearest_atom_inds()\n\n # If we have some molecule metadata\n if 'atoms_per_molecule' in self.Var.metadata:\n self.at_per_mol = self.Var.metadata['atoms_per_molecule']\n self.nmol = mol_utils.get_nmol(self.natom, self.at_per_mol)\n self.reshape_at_dist()\n self.get_nearest_atom_inds_per_mol()\n self.step_data[step]['closest_atoms_mol_grouped'] = self.closest_at_per_mol\n self.step_data[step]['distances_mol_grouped'] = self.all_dist_per_mol\n\n # Save data in dict\n self.step_data[step]['distances'] = self.all_dist\n self.step_data[step]['closest_atom_indices'] = self.closest_ats\n\n self.data.append(self.step_data)\n\n return self.data", "def shifter(self):\n #self.BA_shift = self.timeshift_latitude(self.latB, self.latA)\n #self.BC_shift = self.timeshift_latitude(self.latB, self.latC)\n\n\n self.shifted = True #changing boolean to True when function is called.\n\n secondsA = self.secondsA\n secondsB = self.secondsB\n secondsC = self.secondsC\n\n NeA = self.holefill(self.NeA, secondsA)\n NeB = self.holefill(self.NeB, secondsB)\n NeC = self.holefill(self.NeC, secondsC)\n\n start = 0\n stop = len(NeA) - np.max(np.array([self.BA_shift, self.BC_shift]))\n\n startA = start + self.BA_shift\n stopA = stop + self.BA_shift\n\n startC = start + self.BC_shift\n stopC = stop + self.BC_shift\n\n NeA = NeA[startA:stopA]\n NeB = NeB[start:stop]\n NeC = NeC[startC:stopC]\n\n longA = self.holefill(self.longA, secondsA)\n longB = self.holefill(self.longB, secondsB)\n longC = self.holefill(self.longC, secondsC)\n longA = longA[startA:stopA]\n longB = longB[start:stop]\n longC = longC[startC:stopC]\n\n latA = self.holefill(self.latA, secondsA)\n latB = self.holefill(self.latB, secondsB)\n latC = self.holefill(self.latC, secondsC)\n latA = latA[startA:stopA]\n latB = latB[start:stop]\n latC = latC[startC:stopC]\n\n radA = self.holefill(self.radA, secondsA)\n radB = self.holefill(self.radB, secondsB)\n radC = self.holefill(self.radC, secondsC)\n radA = radA[startA:stopA]\n radB = radB[start:stop]\n radC = radC[startC:stopC]\n\n velA = self.holefill(self.velA, secondsA)\n velB = self.holefill(self.velB, secondsB)\n velC = self.holefill(self.velC, secondsC)\n velA = velA[startA:stopA]\n velB = velB[start:stop]\n velC = velC[start:stop]\n\n altA = self.holefill(self.altA, secondsA)\n altB = self.holefill(self.altB, secondsB)\n altC = self.holefill(self.altC, secondsC)\n altA = altA[startA:stopA]\n altB = altB[start:stop]\n altC = altC[startC:stopC]\n\n\n mlatA = self.holefill(self.mlatA, secondsA)\n mlatB = self.holefill(self.mlatB, secondsB)\n mlatC = self.holefill(self.mlatC, secondsC)\n mlatA = mlatA[startA:stopA]\n mlatB = mlatB[start:stop]\n mlatC = mlatC[startC:stopC]\n\n mlongA = self.holefill(self.mlongA, secondsA)\n mlongB = self.holefill(self.mlongB, secondsB)\n mlongC = self.holefill(self.mlongC, secondsC)\n mlongA = mlongA[startA:stopA]\n mlongB = mlongB[start:stop]\n mlongC = mlongC[startC:stopC]\n\n mltA = self.holefill(self.mltA, secondsA)\n mltB = self.holefill(self.mltB, secondsB)\n mltC = self.holefill(self.mltC, secondsC)\n mltA = mltA[startA:stopA]\n mltB = mltB[start:stop]\n mltC = mltC[startC:stopC]\n\n secondsA = self.holefill(secondsA, secondsA)\n secondsB = self.holefill(secondsB, secondsB)\n secondsC = self.holefill(secondsC, secondsC)\n secondsA = secondsA[startA:stopA]\n secondsB = secondsB[start:stop]\n secondsC = secondsC[startC:stopC]\n\n indsA = np.nonzero(secondsA)[0]\n indsB = np.nonzero(secondsB)[0]\n indsC = np.nonzero(secondsC)[0]\n\n inds = np.intersect1d(indsA, indsB)\n inds = np.intersect1d(inds, indsC)\n\n self.NeA = NeA[inds]\n self.NeB = NeB[inds]\n self.NeC = NeC[inds]\n\n self.longA = longA[inds]\n self.longB = longB[inds]\n self.longC = longC[inds]\n\n self.latA = latA[inds]\n self.latB = latB[inds]\n self.latC = latC[inds]\n\n self.radA = radA[inds]\n self.radB = radB[inds]\n self.radC = radC[inds]\n\n self.velA = velA[inds]\n self.velB = velB[inds]\n self.velC = velC[inds]\n\n self.altA = altA[inds]\n self.altB = altB[inds]\n self.altC = altC[inds]\n\n self.mlatA = mlatA[inds]\n self.mlatB = mlatB[inds]\n self.mlatC = mlatC[inds]\n\n self.mlongA = mlongA[inds]\n self.mlongB = mlongB[inds]\n self.mlongC = mlongC[inds]\n\n self.mltA = mltA[inds]\n self.mltB = mltB[inds]\n self.mltC = mltC[inds]\n\n self.secondsA = secondsA[inds]\n self.secondsB = secondsB[inds]\n self.secondsC = secondsC[inds]", "def greedy_initial(self):\r\n sol = [] # [[0;2;5;0;4;6;0],[],...]\r\n sol_veh_type = [] # corresponding vehicle type for the solution\r\n route_way_time = []\r\n\r\n to_vist = [i+1 for i in range(store_num - 1)] # [1,5,8,...]\r\n itr = 0\r\n\r\n while len(to_vist) > 0 and itr < 500:\r\n itr += 1\r\n\r\n if itr <= small_veh_cnt:\r\n vehicle_type0 = 2\r\n elif itr <= small_veh_cnt + medium_veh_cnt:\r\n vehicle_type0 = 3\r\n else:\r\n vehicle_type0 = 5\r\n\r\n sol_veh_type.append(vehicle_type0)\r\n\r\n used_res = [0, 0, 0, 0] # used volume, and travel time of the vehicle, leave time, travel distance\r\n veh_rout = [0]\r\n\r\n # print '\\nA new vehicle will be used.'\r\n way_time = 0 # travel time of coming to the store + wait time at the store + operation time at this store\r\n while True:\r\n curr_cust = veh_rout[-1]\r\n\r\n next_one, way_time = self.time_nn(way_time, curr_cust, to_vist, used_res, len(veh_rout), vehicle_type0)\r\n next_cust, next_start = next_one[0], next_one[1]\r\n # print('next start', next_cust, next_start)\r\n if next_cust == 0: # next visiting customer is depot\r\n # print 'Get back to the depot, and ready for a new round.'\r\n veh_rout.append(next_cust)\r\n break\r\n\r\n else: # next visiting customer is a store\r\n used_res[0] += (num_demd[next_cust][0] * bskt_vol + num_demd[next_cust][1] * trsf_vol + (num_demd[next_cust][2] + \\\r\n num_demd[next_cust][3]) * milk_vol + num_demd[next_cust][4] * paper_bskt)\r\n used_res[2] = (next_start + oprt_t)\r\n used_res[3] += dist_mat[curr_cust, next_cust]\r\n\r\n\r\n veh_rout.append(next_cust)\r\n # print 'Vehicle used resource: ', used_res\r\n to_vist.remove(next_cust)\r\n\r\n sol.append(veh_rout)\r\n route_way_time.append(way_time)\r\n\r\n # print 'Last point 0 earliest leave time: ', int(used_res[-1]) / 60, ':', int(used_res[-1]) % 60\r\n # print 'Route %s is: ' % itr, veh_rout\r\n print('*'*10, 'Iteration:', itr, '*'*10)\r\n\r\n\r\n if len(to_vist) > 0:\r\n print('number of stores remained: ', len(to_vist))\r\n\r\n return sol, sol_veh_type, route_way_time", "def test_interpolation(self):\n\n ndx1, ndx2 = self.find_partition()\n tessellation = Delaunay(self.grid[ndx2,:])\n\n # initialisation\n results = []\n ndim = self.ndim+1\n\n for j in ndx1:\n nmodels = len(self.tracks[j].models)\n aResult = np.empty((nmodels,ndim+nglb+6),dtype=gtype)\n pt = self.tracks[j].params + [0.0,]\n\n for i in range(nmodels):\n aModel1 = self.tracks[j].models[i]\n pt[-1] = aModel1.glb[iage]\n aModel2 = interpolate_model(self,pt,tessellation,ndx2)\n aResult[i,0:ndim] = pt\n if (aModel2 is None):\n aResult[i,ndim:ndim+nglb+6] = np.nan\n else:\n aResult[i,ndim:ndim+nglb+6] = compare_models(aModel1,aModel2)\n\n results.append(aResult)\n\n return results, ndx1, ndx2, tessellation", "def test_nearest_location_even():\n assert nearest_location([(3, 6), (8, 13)], 6, 0) == 0\n assert nearest_location([(3, 6), (8, 13)], 6, 1) == 0\n assert nearest_location([(3, 6), (8, 13)], 7, 0) == 1\n assert nearest_location([(3, 6), (8, 13)], 7, 1) == 1", "def main():\n snowdensity=0.35 #from May 1 2010 SNOTEL (2011,2013 were similar, 2014 was 0.4), at the saddle in May 1 2010 it was 0.4\n snodasyears=[2010,2004,2005]\n wdata=[wrf.load(\"wrf/SWE_daily.nc\",extractday=212+5+int(np.round(365.25*year))) for year in [3,4]]\n wdata.extend([wrf.load(\"wrf/SWE_daily.nc\",extractday=212+20+int(np.round(365.25*year))) for year in [3,4]])\n print(len(wdata))\n sdata=[snodas.load(\"snodas/SWE_Daily0600UTC_WesternUS_{}.dat\".format(year),extractday=125) for year in snodasyears]\n sdata.extend([snodas.load(\"snodas/SWE_Daily0600UTC_WesternUS_{}.dat\".format(year),extractday=140) for year in snodasyears])\n print(len(sdata))\n # sdata=[snodas.load(\"snodas/SWE_Daily0600UTC_WesternUS_{}.dat\".format(year),extractday=120) for year in range(2004,2013)]\n # sdata.insert(0,sdata.pop(6)) #move year 2010 to the begining of the list\n ldata=lidar.load_fast(loc=\"lidar/\",geofile=\"snow-on-dem.nc\",decimation_factor=10)\n \n print(\"Calculating WRF weights\")\n try:\n wrfweights=mygis.read_nc(\"wrf2lidar_weights.nc\").data\n except:\n wrfweights =gen_weights(ldata.lat,ldata.lon,wdata[0].lat,wdata[0].lon,mask=(ldata.dem>1500))\n mygis.write(\"wrf2lidar_weights.nc\",wrfweights)\n \n # wrfbounds =find_bounds(wrfweights)\n print(\"Calculating SNODAS weights\")\n try:\n snodasweights=mygis.read_nc(\"snodas2lidar_weights.nc\").data\n except:\n snodasweights=gen_weights(ldata.lat,ldata.lon,sdata[0].lat,sdata[0].lon,mask=(ldata.dem>1500))\n mygis.write(\"snodas2lidar_weights.nc\",snodasweights)\n \n # snodasbounds =find_bounds(snodasweights)\n \n wdata[0].lc[wrfweights==0]=0\n sdata[0].lc[snodasweights==0]=0\n\n print(\"Binning by elevations...\")\n #dx=4000) #note use dx=lidar_dx because weights are lidar gridcells...\n wrfbyz=[bin_by_elevation(w.data,w.dem,wdata[0].lc,weights=wrfweights,dz=200,dx=10) for w in wdata]\n print(\"Binning by elevations...\")\n snodasbyz=[bin_by_elevation(s.data,sdata[0].dem,sdata[0].lc,weights=snodasweights,dz=150,dx=10) for s in sdata]#dx=926)\n print(\"Binning by elevations...\")\n lidarbyz=bin_by_elevation(ldata.data*snowdensity,ldata.dem,ldata.lc,dz=100,dx=10)\n print(\"Plotting\")\n plot_volumes(wrfbyz,snodasbyz,lidarbyz)\n\n snodasyears=[2010,2004,2005,2010.2,2004.2,2005.2]\n for i in range(len(snodasbyz)):\n plot_elevation_bands(snodasbyz[i],outputfile=\"SNODAS_swe_by_z_{}.png\".format(snodasyears[i]),title=\"SNODAS SWE {}\".format(snodasyears[i]))", "def get_shortest_route_floyd(network, start,destination, excludings=[]):\n\n # On récupère la liste des villes\n list_city = network[1].keys()\n \n # Si la ville de départ ou de fin n'existe pas\n if start not in list_city or destination not in list_city:\n return None\n\n # On retire les villes à exclure\n list_city = [x for x in list_city if x not in excludings]\n\n\n # Initialisation de se qu'on a besoin\n matrix = []\n distance = []\n n = len(list_city)\n\n \n # On construit la matrice adjacente où indique la distance si il existe une autoroute entre 2 villes\n for x in range(n): \n matrix.append( [] )\n distance.append( [] )\n for y in range(n):\n road_id = get_road_to(network,list_city[x],list_city[y])\n if road_id != None:\n matrix[x].append( get_length(network,road_id) )\n else:\n matrix[x].append( None )\n distance[x].append( [road_id] ) # Autoroute -> format: ['LA']\n\n\t \n # Algorithme de Floyd\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if ( matrix[i][k] != None and matrix[k][j] != None ) and ( ( matrix[i][j] == None ) or ( matrix[i][j] > matrix[i][k] + matrix[k][j] ) ):\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t \n\t\t # Hors Floyd / Ajout personnel\n if i != k and j != k: # Si i == k ou j == k, cela veut dire qu'on additionne un résultat supplémentaire à la case ij\n distance[i][j] = [] # Sinon ca signifie qu'on a trouvé un chemin plus court, du coup on supprime l'ancien chemin\n distance[i][j].extend( distance[i][k] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n distance[i][j].extend( distance[k][j] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n\n\t\t \n # On récupère simplement la liste des autoroutes parcourus\n idx_start = list_city.index( start )\n idx_destination = list_city.index( destination )\n distance_minimum = distance[ idx_start ][ idx_destination ]\n\n \n # Si on ne trouve aucune solution, on renvoie None\n if distance_minimum == [None]:\n distance_minimum = None\n \n return distance_minimum", "def _make_ties(self) -> None:\n\n # get all hint spaces with adjacent '?'s\n frontier = {neighbor: self._lookup[neighbor] for pos, space in self._unknowns.items() for neighbor in\n space.neighbors.values() if neighbor and self._lookup[neighbor].hint.isnumeric()}\n\n # use hints to create \"zones\" of '?'-squares along the frontier,\n # detailing the # of mines left to find in each zone.\n for pos, space in frontier.items():\n local_unknowns = {coord for coord in space.neighbors.values() if coord in self._unknowns}\n for unknown in local_unknowns:\n key = frozenset(local_unknowns)\n self._lookup[unknown].zones[key] = self._lookup[unknown].zones.setdefault(key, space.num_undiscovered)\n self._lookup[unknown].zones[key] = min(space.num_undiscovered, self._lookup[unknown].zones[key])\n self._lookup[unknown].ties |= local_unknowns - {unknown}\n self._remaining_zones.update(self._lookup[unknown].zones)\n\n # split overlapping zones into components\n for unknown in self._unknowns.values():\n for zone, num_undiscovered in list(unknown.zones.items()):\n if zone not in unknown.zones:\n continue\n for other_zone, other_num_undiscovered in list(unknown.zones.items()):\n if other_zone in unknown.zones:\n shared = zone & other_zone\n\n if zone < other_zone or (shared and other_num_undiscovered > num_undiscovered):\n # if \"zone\" & \"other_zone\" share members then\n # it is possible to split the zone w/ the higher # of mines\n # into components, \"shared\" & \"not_shared\".\n\n # unknown.zones.pop(other_zone)\n\n not_shared = other_zone - shared\n unknown.zones[not_shared] = other_num_undiscovered - num_undiscovered\n else:\n print(end='')\n return", "def _add_data(self, model_stations: Iterable[model.Station],\n validate_prefix: str = \"\") -> int:\n valid_station_count = 0\n jreast_merged_codes: dict[model.StationID, str] = load_csv_as_mapping(\n DIR_CURATED / \"jreast_merged_codes.csv\",\n itemgetter(\"sta_id\"),\n itemgetter(\"code\")\n )\n\n # Add data from model stations\n for model_sta in model_stations:\n is_invalid = False\n should_validate = model_sta.id.startswith(validate_prefix)\n\n # Find a matching geo_sta\n geo_sta = self.by_id.get(model_sta.id)\n if not geo_sta:\n if should_validate:\n self.logger.critical(f\"{Color.RED}geo.osm is missing station \"\n f\"{Color.MAGENTA}{model_sta.id}{Color.RESET}\")\n self.valid = False\n continue\n\n # Find a name\n name_id = last_part(geo_sta.id)\n geo_sta.name = self.names.get(name_id)\n if geo_sta.name is None and should_validate:\n self.logger.critical(f\"{Color.RED}sta_names.csv is missing name for \"\n f\"{Color.MAGENTA}{name_id}{Color.RESET}\")\n is_invalid = True\n\n # Copy stop_code\n geo_sta.code = model_sta.code\n\n # Check if station was valid\n if is_invalid:\n self.valid = False\n elif should_validate:\n valid_station_count += 1\n\n # Generate codes and names for mother stations\n for sta in self.by_id.values():\n if not sta.children:\n continue\n\n name_id = last_part(sta.id)\n sta.name = self.names.get(name_id)\n if not sta.name:\n self.logger.critical(f\"{Color.RED}sta_names.csv is missing name for \"\n f\"{Color.MAGENTA}{name_id}{Color.RESET}\")\n is_invalid = True\n\n # Get children codes\n children_codes = []\n jreast_merged_code = jreast_merged_codes.get(sta.id)\n if jreast_merged_code:\n children_codes.append(jreast_merged_code)\n\n for child in sta.children:\n # Ignore JR-East child codes if there's a JR-East merged code\n if child.id.startswith(\"JR-East\") and jreast_merged_code:\n continue\n elif child.code:\n children_codes.append(child.code)\n\n sta.code = \"/\".join(children_codes)\n\n return valid_station_count", "def _interpolate_meteorological_data(dset, data, rundate):\n rundate = datetime(rundate.year, rundate.month, rundate.day)\n for field, station in [(f, f[4:]) for f in data.keys() if f.startswith(\"met_\")]:\n log.debug(f\"Meteorological data available for station {station}\")\n\n met_time = data[field].pop(\"met_time\")\n flat_list = [item for sublist in met_time for item in sublist]\n met_time_float = np.array([(flat_list[i] - rundate).total_seconds() for i in range(0, len(flat_list))])\n met_time_unique, met_index = np.unique(met_time_float, return_index=True)\n\n diff = len(met_time_float) - len(met_time_unique)\n if diff > 0:\n log.dev(f\"Removed duplicate met data for station {station}\")\n log.dev(\"Do this for the actual obs data also!\")\n if len(met_time_unique) == 1:\n for met_type in data[field].keys():\n data[field][met_type] = np.repeat(data[field][met_type][0], dset.num_obs)\n continue\n\n # Extrapolation one month before/after\n # (this is overkill, most of these values will be removed later when taking the diagonal)\n min_time = min(met_time_unique) - 31 * 86400\n max_time = max(met_time_unique) + 31 * 86400\n met_time_unique = np.hstack((np.array(min_time), met_time_unique, np.array(max_time)))\n\n for met_type in data[field].keys():\n met_data_array = data[field][met_type]\n flat_list = [item for sublist in met_data_array for item in sublist]\n met_data_array = np.array([flat_list[i] for i in met_index])\n met_data_array = np.hstack((met_data_array[0], met_data_array, met_data_array[-1]))\n data[field][met_type] = interpolation.interpolate(\n met_time_unique, met_data_array, dset.obs_time, kind=\"cubic\"\n )\n\n return data", "def test_compute_after_smooth_goddard_2013(\r\n PM_ds_initialized_3d_full, PM_ds_control_3d_full\r\n):\r\n PM_ds_control_3d_full = smooth_goddard_2013(\r\n PM_ds_control_3d_full,\r\n )\r\n PM_ds_initialized_3d_full = smooth_goddard_2013(\r\n PM_ds_initialized_3d_full,\r\n )\r\n actual = compute_perfect_model(PM_ds_initialized_3d_full, PM_ds_control_3d_full).tos\r\n\r\n north_atlantic = actual.sel(lat=slice(40, 50), lon=slice(-30, -20))\r\n assert not north_atlantic.isnull().any()", "def combine_weather(weather):\n\n weather1 = weather[weather[\"Station\"] == 1]\n weather2 = weather[weather[\"Station\"] == 2]\n\n\n pass", "def get_sunspot_data(yy, time1):\n master = []\n num_of_ss = np.max(yy.flatten()) # get number of different SS's\n centroids = []\n sizes = []\n numbers = []\n\n for i in np.arange(1, num_of_ss + 1): # for each SS:\n temp_sunspot = SunSpot(1, 1, 1)\n copy_yy = np.array(yy, copy = True)\n copy_yy[copy_yy != i] = 0 # get only points == i\n copy_yy[copy_yy == i] = 1\n\n indices_x, indices_y = np.where(yy == i)\n\n max_lat = np.max(indices_x)\n min_lat = np.min(indices_x)\n mean_lat = max_lat - (max_lat - min_lat)/2\n \n max_lon = np.max(indices_y)\n min_lon = np.min(indices_y)\n mean_lon = max_lon - (max_lon - min_lon)/2\n \n temp_sunspot.mask = copy_yy\n temp_sunspot.centroid = [mean_lon, mean_lat]\n temp_sunspot.size = len(indices_x)\n temp_sunspot.number = i\n temp_sunspot.x_points = indices_x\n temp_sunspot.y_points = indices_y\n temp_sunspot.timestamp = time1\n temp_sunspot.min_x = min_lon\n temp_sunspot.max_x = max_lon\n temp_sunspot.min_y = min_lat\n temp_sunspot.max_y = max_lat\n\n master.append(temp_sunspot)\n\n return num_of_ss, master", "def extract_loc(ref_lon, ref_lat, tlon, tlat, var):\n\n if var.ndim == 3: # 3D variable\n zmax, imax, jmax = var.shape\n threeD = True\n elif var.ndim == 2: # 2D variable\n imax, jmax = var.shape\n threeD = False\n else:\n print 'extract_loc: check variable dimensions'\n return\n\n # find the indices of the 4 model grid points around the location\n Ilist, Jlist = find_stn_idx(ref_lon, ref_lat, tlon, tlat)\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n dist[dist==0] = 1.e-15 # avoid division by zero\n\n # arrays to store weights and data to be averaged\n if threeD: # 3D variable\n wghts = MA.zeros((zmax,len(Ilist)*len(Jlist)),float)\n data = MA.zeros((zmax,len(Ilist)*len(Jlist)),float)\n if MA.isMA(var): # mask weights\n dist_m = MA.array(N.resize(dist,var.shape),mask=var.mask)\n else:\n dist_m = N.array(N.resize(dist,var.shape))\n else: # 2D variable\n wghts = MA.zeros((len(Ilist)*len(Jlist)),float)\n data = MA.zeros((len(Ilist)*len(Jlist)),float)\n if MA.isMA(var):\n dist_m = MA.array(dist,mask=var.mask) # mask weights\n else:\n dist_m = N.array(dist)\n\n # get the 4 model grid points and compute weights\n n = 0\n for i in Ilist:\n for j in Jlist:\n wghts[...,n] = 1./dist_m[...,i,j]\n data[...,n] = var[...,i,j]\n n += 1\n\n # compute weighted average\n wavg = MA.average(data,axis=-1,weights=wghts)\n return wavg", "def merge_data():\n\n\tconfig = Config()\n\tfilename_train, filename_test = \"../data/train.csv\", \"../data/test.csv\" \n\n # create datasets\n\ttrain, test = config.load_data(filename_train, filename_test, print_EDA=False)\n\n # 1. datetime features\n\t# diff between weekday and day?\n\t#weekday - Return the day of the week as an integer, where Monday is 0 and Sunday is 6.\n\t#day - Between 1 and the number of days in the given month of the given year.\n\ttrain['pickup_hour'] = train.pickup_datetime.dt.hour.astype('uint8')\n\ttrain['pickup_day'] = train.pickup_datetime.dt.day.astype('uint8')\n\ttrain['pickup_weekday'] = train.pickup_datetime.dt.weekday.astype('uint8')\n\ttrain['pickup_minute'] = train.pickup_datetime.dt.minute.astype('uint8')\n\ttrain['pickup_month'] = train.pickup_datetime.dt.month.astype('uint8')\n\ttrain['pickup_hour_weekofyear'] = train['pickup_datetime'].dt.weekofyear\n\ttrain['pickup_weekday_hour'] = train['pickup_weekday']*24 + train['pickup_hour']\n\n\ttest['pickup_hour'] = test.pickup_datetime.dt.hour.astype('uint8')\n\ttest['pickup_day'] = test.pickup_datetime.dt.day.astype('uint8')\n\ttest['pickup_weekday'] = test.pickup_datetime.dt.weekday.astype('uint8')\n\ttest['pickup_minute'] = test.pickup_datetime.dt.minute.astype('uint8')\n\ttest['pickup_month'] = test.pickup_datetime.dt.month.astype('uint8')\n\ttest['pickup_hour_weekofyear'] = test['pickup_datetime'].dt.weekofyear\n\ttest['pickup_weekday_hour'] = test['pickup_weekday']*24 + test['pickup_hour']\n\n\t# 2. Location features\n\tdef haversine(lon1, lat1, lon2, lat2):\n\t lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\t dlon = lon2 - lon1\n\t dlat = lat2 - lat1\n\t a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2\n\t c = 2 * np.arcsin(np.sqrt(a))\n\t km = 6367 * c # AVG_EARTH_RADIUS=6367\n\t miles = km * 0.621371\n\t return miles\n\n\t# def dummy_manhattan_distance(lat1, lng1, lat2, lng2):\n\t# a = haversine_array(lat1, lng1, lat1, lng2)\n\t# b = haversine_array(lat1, lng1, lat2, lng1)\n\t# return a + b\n\n\t# def bearing_array(lat1, lng1, lat2, lng2):\n\t# AVG_EARTH_RADIUS = 6371 # in km\n\t# lng_delta_rad = np.radians(lng2 - lng1)\n\t# lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))\n\t# y = np.sin(lng_delta_rad) * np.cos(lat2)\n\t# x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(lng_delta_rad)\n\t# return np.degrees(np.arctan2(y, x))\n\n\ttrain['distance'] = haversine(train.pickup_longitude, train.pickup_latitude,\n\t train.dropoff_longitude, train.dropoff_latitude)\n\ttest['distance'] = haversine(test.pickup_longitude, test.pickup_latitude,\n\t test.dropoff_longitude, test.dropoff_latitude)\n\n\n\t# 3. Use outsource data\n\tweatherdata_filename = \"../data/outsource_data/weather_data_nyc_centralpark_2016.csv\"\n\tfastestroute_data_train = \"../data/outsource_data/fastest_train.csv\"\n\tfastestroute_data_test = \"../data/outsource_data/fastest_routes_test.csv\"\n\n\n\twd = pd.read_csv(weatherdata_filename, header=0)\n\twd['date'] = pd.to_datetime(wd.date, format=\"%d-%m-%Y\")\n\twd['pickup_day'] = wd['date'].dt.day\n\twd['snow fall'] = wd['snow fall'].replace('T', 0.05).astype(np.float32) \n\twd['precipitation'] = wd['precipitation'].replace('T', 0.05).astype(np.float32) \n\twd['snow depth'] = wd['snow depth'].replace('T', 0.05).astype(np.float32) \n\n\t# Merge training data with weather data on pickup_day\n\tprint(\"Merging training data with weather data ....\")\n\twd_train = pd.merge(train, wd, on='pickup_day')\n\twd_train = wd_train.drop(['date','maximum temperature','minimum temperature'],axis=1)\n\tgc.collect()\n\n\t# Merge wd_train with fastestroute_data\n\tfastest = pd.read_csv(fastestroute_data_train, header=0)\n\tprint(\"Merging Location data with weather and training data ....\")\n\twd_train_fastest = pd.merge(wd_train, fastest, on='id', how='outer')\n\n\tgc.collect()\n\n\n\tprint(\"===================== CHECK TRAINING DATA =====================\")\n\n\tprint(wd_train_fastest.head(2))\n\tprint(\"Semi-final training data shape is: {}\".format(wd_train_fastest.shape))\n\tprint(\"Training data columns: {}\".format(wd_train_fastest.columns))\n\n\n\t# Use the same outsource data with test set\n\t# merge outsource data with test data as well\n\tft_test_cols = [ 'id', 'starting_street','end_street','total_distance',\t'total_travel_time',\n\t\t\t\t\t'number_of_steps','street_for_each_step','distance_per_step','travel_time_per_step',\n\t 'step_maneuvers','step_direction',\t'step_location_list']\n\tfastest_test = pd.read_csv(fastestroute_data_test, names=ft_test_cols, header=0)\n\n\tprint(\"Merging test data with Location data ....\")\n\ttest = pd.merge(test, fastest_test, on='id', how='outer')\n\ttest = test.drop(['step_location_list','step_direction','step_maneuvers','travel_time_per_step','distance_per_step','street_for_each_step','number_of_steps','starting_street',\n\t 'end_street'], axis=1)\n\tprint(\"Merging test data with weather data ....\")\n\ttest = pd.merge(test, wd, on='pickup_day')\n\n\tprint(\"===================== CHECK TEST DATA =====================\")\n\n\tprint(test.head(2))\n\tprint(\"Semi-final test data shape is: {}\".format(test.shape))\n\tprint(\"Test data columns: {}\".format(test.columns))\n\n\n\n\t# 4. Do more data munging\n\tmask = ((wd_train_fastest.trip_duration > 60) & (wd_train_fastest.distance < 0.05))\n\twd_train_fastest = wd_train_fastest[~mask]\n\tmask = (wd_train_fastest.trip_duration < 60) \n\twd_train_fastest = wd_train_fastest[~mask]\n\tmask = wd_train_fastest.trip_duration > 79200\n\twd_train_fastest = wd_train_fastest[~mask]\n\tmask = wd_train_fastest.distance/(wd_train_fastest.trip_duration/3600) > 60\n\twd_train_fastest = wd_train_fastest[~mask]\n\twd_train_fastest.trip_duration = wd_train_fastest.trip_duration.astype(np.uint16)\n\twd_train_fastest = wd_train_fastest[wd_train_fastest.passenger_count > 0]\n\n\t# 5. Do some data maskig based on location to create jfk and lgo features\n\tjfk_lon = -73.778889\n\tjfk_lat = 40.639722\n\tlga_lon = -73.872611\n\tlga_lat = 40.77725\n\n\twd_train_fastest['jfk_pickup_dist'] = wd_train_fastest.apply(lambda row: haversine(jfk_lon, jfk_lat, row['pickup_longitude'],row['pickup_latitude']), axis=1)\n\twd_train_fastest['lga_pickup_dist'] = wd_train_fastest.apply(lambda row: haversine(lga_lon, lga_lat, row['pickup_longitude'],row['pickup_latitude']), axis=1)\n\twd_train_fastest['jfk_dropoff_dist'] = wd_train_fastest.apply(lambda row: haversine(jfk_lon, jfk_lat, row['dropoff_longitude'],row['dropoff_latitude']), axis=1)\n\twd_train_fastest['lga_dropoff_dist'] = wd_train_fastest.apply(lambda row: haversine(lga_lon, lga_lat, row['dropoff_longitude'],row['dropoff_latitude']), axis=1)\n\n\twd_train_fastest['jfk'] = ((wd_train_fastest['jfk_pickup_dist'] < 2) | (wd_train_fastest['jfk_dropoff_dist'] < 2))\n\twd_train_fastest['lga'] = ((wd_train_fastest['lga_pickup_dist'] < 2) | (wd_train_fastest['lga_dropoff_dist'] < 2))\n\twd_train_fastest = wd_train_fastest.drop(['jfk_pickup_dist','lga_pickup_dist','jfk_dropoff_dist','lga_dropoff_dist'],axis=1)\n\twd_train_fastest['workday'] = ((wd_train_fastest['pickup_hour'] > 8) & (wd_train_fastest['pickup_hour'] < 18))\n\n\n\tprint(\"===================== CHECK TRAINING DATA AGAIN =====================\")\n\n\tprint(wd_train_fastest.head(2))\n\tprint(\"Final training data shape is: {}\".format(wd_train_fastest.shape))\n\tprint(\"Training data columns: {}\".format(wd_train_fastest.columns))\n\n\n\treturn wd_train_fastest, test", "def get_station_configuration_f(stations_id, station_configuration):\n \n \"\"\" Gets the primary station_id from the station_configuration table. \n station_id is the id taken from the input file.\n First it checks if a primary_id in th estation_conf file matches the station_id, \n otherwise it looks for an element in the list of secondary ids. \n \"\"\"\n\n for s in stations_id:\n s = str(s)\n try:\n si = s.decode('utf-8')\n except:\n si = s \n if ':' in si:\n si = si.split(':')[1]\n \n station_id_primary = numpy.string_( '0-20000-0-' +si ) # remove the prefix to the station id \n station_id_primary_alternative = numpy.string_( '0-20001-0-' + si )\n \n \n \"\"\" First, check for matching primary_id. \n If not found, check for secondary id. Note that secondary is a list, so must loop over the entry to find a matching one \"\"\"\n \n matching_primary = station_configuration.loc[station_configuration['primary_id'] == station_id_primary ]\n matching_primary_alt = station_configuration.loc[station_configuration['primary_id'] == station_id_primary_alternative ]\n \n if len(matching_primary) > 0:\n return matching_primary \n \n elif len(matching_primary_alt) > 0 :\n return matching_primary_alt \n \n else:\n secondary = station_configuration['secondary_id'] \n \n for second in secondary:\n try: # this try is needed when the secondary ids are not defined or wrong, and the primary id cannot be matched with the station_id \n sec_list = second.decode('utf-8') # secondary ids are separated by a comma, so I loop over the list\n except:\n try:\n sec_list = str(second)\n except: \n pass\n \n if ':' in sec_list: # might be : or C: in the secndary id , e.g. C:5852 \n sec_list = sec_list.split(':')[1]\n \n if sec_list == si:\n sc = station_configuration.loc[station_configuration['secondary_id'] == second ]\n #print(\"FOUND a secondary !!!\" \n return sc \n try:\n if str(second) == si:\n sc = station_configuration.loc[station_configuration['secondary_id'] == second ]\n #print(\"FOUND a secondary !!!\")\n return sc \n except:\n pass \n \n return None", "def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]", "def update():\n\n # ensure parameters are present\n if not request.args.get(\"sw\"):\n raise RuntimeError(\"missing sw\")\n if not request.args.get(\"ne\"):\n raise RuntimeError(\"missing ne\")\n\n # ensure parameters are in lat,lng format\n if not re.search(\"^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$\", request.args.get(\"sw\")):\n raise RuntimeError(\"invalid sw\")\n if not re.search(\"^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$\", request.args.get(\"ne\")):\n raise RuntimeError(\"invalid ne\")\n\n # explode southwest corner into two variables\n (sw_lat, sw_lng) = [float(s) for s in request.args.get(\"sw\").split(\",\")]\n\n # explode northeast corner into two variables\n (ne_lat, ne_lng) = [float(s) for s in request.args.get(\"ne\").split(\",\")]\n\n # find stations within view\n if (sw_lng <= ne_lng):\n # doesn't cross the antimeridian\n\n stations = Station.query.join(Place).\\\n filter(db.and_(\n sw_lat <= Place.lat, Place.lat <= ne_lat,(db.and_(\n sw_lng <= Place.lng, Place.lng <= ne_lng)))).all()\n\n else:\n # crosses the antimeridian\n\n stations = Station.query.join(Place).\\\n filter(db.and_(\n sw_lat <= Place.lat, Place.lat <= ne_lat,(db.or_(\n sw_lng <= Place.lng, Place.lng <= ne_lng)))).all()\n\n result = geo_stations.dump(stations)\n\n return jsonify(result.data)", "def closest_stations(latlong, df):\n names = df['name'].values\n station_dists = {}\n for (lat, lon, name) in list(df[['Lat', 'Lon', 'name']].value_counts().index):\n if not(np.isnan(lat) or np.isnan(lon)):\n station_dists[name] = haversine(latlong, (lat, lon)) \n \n return sorted(station_dists.items(), key=lambda x: x[1])", "def convergence_check(self):\n air = self.air_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n fuel = self.fuel_alias.val\n\n for c in self.outl:\n if not c.fluid.val_set[air]:\n if c.fluid.val[air] > 0.95:\n c.fluid.val[air] = 0.95\n if c.fluid.val[air] < 0.5:\n c.fluid.val[air] = 0.5\n\n if not c.fluid.val_set[flue_gas]:\n if c.fluid.val[flue_gas] > 0.5:\n c.fluid.val[flue_gas] = 0.5\n if c.fluid.val[flue_gas] < 0.05:\n c.fluid.val[flue_gas] = 0.05\n\n if not c.fluid.val_set[fuel]:\n if c.fluid.val[fuel] > 0:\n c.fluid.val[fuel] = 0\n\n c.target.propagate_fluid_to_target(c, c.target)\n\n for i in self.inl:\n if i.m.val_SI < 0 and not i.m.val_set:\n i.m.val_SI = 0.01\n\n for c in self.outl:\n if c.m.val_SI < 0 and not c.m.val_set:\n c.m.val_SI = 10\n c.target.propagate_fluid_to_target(c, c.target)\n\n if self.lamb.val < 1 and not self.lamb.is_set:\n self.lamb.val = 2", "def manipulate_data(ds, var, predef_clim, predef_trnd, trn_yrs, all_yrs, \n apply_latw=True, apply_detrending=True, dropna=True):\n\n \n if((var=='SD')|(var=='sd')|(var=='snowc')): \n ds[var] = ds[var].where(ds[var]>=0, other=0.0)\n ds[var] = ds[var].where(ds[var]==0, other=1.0)\n #ds[var].values = Gauss_filter(ds[var].values, (0,3,3))\n \n \"\"\"\n if((var=='hgt')|(var=='z')|(var=='GPT')):\n months = ds.time.to_index().month; ssn_ends = (months==2)|(months==5)|(months==8)|(months==11)\n ds = ds.sel(time=ssn_ends)\n else: \n ds = ds.resample(time='3M').mean()\n \"\"\"\n \n ds = ds.resample(time='3M').mean()\n\n ds = ds.sel(time=slice(str(all_yrs[0])+'-01-01', str(all_yrs[-1])+'-12-31')) \n \n try: \n clim = predef_clim\n ds = ds.groupby('time.season') - clim\n print('Predefined climatology used')\n except:\n clim = ds.sel(time=slice(str(trn_yrs[0])+'-01-01', str(trn_yrs[-1])+'-12-31')).groupby('time.season').mean('time')\n ds = ds.groupby('time.season') - clim\n print('Climatology calculated from data')\n \n if(apply_latw): ds[var].values = lat_weighting(ds[var].values, \n ds.lat, ds.lon)\n if(dropna):\n ds = ds.stack(gridcell=('lat', 'lon')).dropna(dim='gridcell',how='any')\n else: \n ds = ds.stack(gridcell=('lat', 'lon')).fillna(0)\n \n \n trend_models = { }\n if(apply_detrending): \n ds = ds.load()\n for ssn in ('DJF', 'MAM', 'JJA', 'SON'):\n #ssn_idx = ds['time.season'] == ssn\n \n trn_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], trn_yrs))\n all_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], all_yrs))\n \n trn_x = np.array(ds.time[trn_idx].values.tolist()).reshape(-1,1)\n all_x = np.array(ds.time[all_idx].values.tolist()).reshape(-1,1)\n try:\n trend = predef_trnd[ssn].predict(all_x)\n trend_models[ssn] = predef_trnd[ssn]\n print('Predefined trend model used')\n except:\n #_, trend_model = define_trends(ds[var][trn_idx], trn_x)\n _, trend_model = define_trends(ds[var][all_idx], all_x)\n trend = trend_model.predict(all_x)\n trend_models[ssn] = trend_model\n print('Trends calculated from data')\n \n ds[var][all_idx] = ds[var][all_idx] - trend\n \n\n \n return ds, clim, trend_models", "def coarsen_byavg(invar,lat,lon,deg,tol,latweight=True,verbose=True,ignorenan=False):\n\n # Make new Arrays\n lon5 = np.arange(0,360+deg,deg)\n lat5 = np.arange(-90,90+deg,deg)\n \n \n # Set up latitude weights\n if latweight:\n _,Y = np.meshgrid(lon,lat)\n wgt = np.cos(np.radians(Y)) # [lat x lon]\n invar *= wgt[None,:,:] # Multiply by latitude weight\n \n # Get time dimension and preallocate\n nt = invar.shape[0]\n outvar = np.zeros((nt,len(lat5),len(lon5)))\n \n # Loop and regrid\n i=0\n for o in range(len(lon5)):\n for a in range(len(lat5)):\n lonf = lon5[o]\n latf = lat5[a]\n \n lons = np.where((lon >= lonf-tol) & (lon <= lonf+tol))[0]\n lats = np.where((lat >= latf-tol) & (lat <= latf+tol))[0]\n \n varf = invar[:,lats[:,None],lons[None,:]]\n \n if latweight:\n wgtbox = wgt[lats[:,None],lons[None,:]]\n if ignorenan:\n varf = np.nansum(varf/np.nansum(wgtbox,(0,1)),(1,2)) # Divide by the total weight for the box\n else:\n varf = np.sum(varf/np.sum(wgtbox,(0,1)),(1,2)) # Divide by the total weight for the box\n \n \n else:\n if ignorenan: \n varf = np.nanmean(varf,axis=(1,2))\n else:\n varf = varf.mean((1,2))\n \n outvar[:,a,o] = varf.copy()\n i+= 1\n msg=\"\\rCompleted %i of %i\"% (i,len(lon5)*len(lat5))\n print(msg,end=\"\\r\",flush=True)\n return outvar,lat5,lon5", "def solve_tsp(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n drop_off_dict = {}\n car_path = []\n home_map = {}\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n\n start_in_home = start in home_indexes\n if start in home_indexes:\n home_indexes.remove(start)\n home_indexes.insert(0, start)\n home_count = 0;\n\n for home in home_indexes:\n #print(home, end = \" \")\n home_map[home_count] = home\n home_count += 1\n # Instantiate the data problem.\n #print(len(home_map))\n data = create_data_model(home_indexes, 0)\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['locations']),\n data['num_vehicles'], data['depot'])\n\n #print(manager.NodeToIndex(15))\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n #print(home_map[to_index], end = \" \")\n from_index = manager.IndexToNode(from_index)\n to_index = manager.IndexToNode(to_index)\n dist_to = all_paths.get(home_map[from_index])[0][home_map[to_index]]\n #if from_index >= 25 or to_index >= 25:\n # print(\"from\" if from_index >= 25 else \"to\", end = \" \")\n #dist_to = all_paths[from_index][0][to_index]\n return dist_to\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n \"\"\"\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n \"\"\"\n\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = 3\n #search_parameters.log_search = True\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # if assignment:\n # print_solution(manager, routing, assignment)\n # Print solution on console.\n\n if start in home_indexes:\n drop_off_dict[start] = [start]\n\n\n index = routing.Start(0)\n car_path.append(start)\n\n while not routing.IsEnd(index):\n previous_index = manager.IndexToNode(index)\n index = assignment.Value(routing.NextVar(index))\n\n car_path.pop();\n to_index = manager.IndexToNode(index)\n path_to = all_paths.get(home_map[previous_index])[1][home_map[to_index]]\n drop_off_dict[home_map[to_index]] = [home_map[to_index]]\n #print(to_index, end = ' ')\n car_path.extend(path_to)\n #route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n # for i in car_path:\n # print(i)\n if start in drop_off_dict.keys() and not start_in_home:\n drop_off_dict.pop(start, None)\n\n return car_path, drop_off_dict", "def get_ships_analysis(self):\n \n # Get SHIPS times\n times = self.search_ships()\n if len(times) <= 1:\n raise RuntimeError('SHIPS data is unavailable for the requested storm.')\n \n # Declare dict\n new_dict = {\n 'time': [],\n 'mslp': [],\n 'type': [],\n 'vmax': [],\n 'wmo_basin': [],\n }\n for attr in ['name', 'id', 'operational_id', 'year', 'season', 'basin', 'realtime']:\n new_dict[attr] = self[attr]\n new_dict['ace'] = 0.0\n \n # Construct data\n for time in times:\n ships = self.get_ships(time)\n if ships is None: continue\n if np.isnan(ships.lat[0]) or np.isnan(ships.lon[0]): continue\n\n # Add relevant variables\n new_dict['time'].append(time)\n new_dict['mslp'].append(np.nan)\n for key in ships.dict.keys():\n if key in ['fhr', 'vmax_noland_kt', 'vmax_lgem_kt']: continue\n\n # Special handling for storm type\n if key == 'storm_type':\n subtropical_flag = False\n derived_type = 'EX'\n try:\n if ships.dict['storm_type'][0] == 'SUBT':\n subtropical_flag = True\n derived_type = get_storm_type(ships.dict['vmax_land_kt'][0], subtropical_flag)\n if ships.dict['storm_type'][0] not in ['TROP', 'SUBT']:\n derived_type = 'EX'\n except:\n pass\n new_dict['type'].append(derived_type)\n\n # vmax handling\n elif key == 'vmax_land_kt':\n new_dict['vmax'].append(ships.dict[key][0])\n\n # Normal handling\n elif key in new_dict:\n new_dict[key].append(ships.dict[key][0])\n else:\n new_dict[key] = [ships.dict[key][0]]\n \n # Derive ACE\n if not np.isnan(new_dict['vmax'][-1]):\n new_dict['ace'] += accumulated_cyclone_energy(new_dict['vmax'][-1])\n\n # Derive basin\n new_dict['wmo_basin'].append(get_basin(new_dict['lat'][-1],\n new_dict['lon'][-1],\n self.basin))\n\n # Add other attributes\n new_dict['source_info'] = 'SHIPS Analysis'\n new_dict['source_method'] = 'UCAR SHIPS Archive'\n new_dict['source_url'] = 'https://hurricanes.ral.ucar.edu/'\n new_dict['invest'] = False\n new_dict['source'] = 'ships'\n new_dict['prob_2day'] = 'N/A'\n new_dict['prob_7day'] = 'N/A'\n new_dict['risk_2day'] = 'N/A'\n new_dict['risk_7day'] = 'N/A'\n \n return Storm(new_dict)", "def detour(src, dst, pitstop):\n options = on_path([src, dst],query='shell gas station', size=10,urgency=0)\n ret = []\n for place in options:\n title = place['title']\n x = place['latlon']\n addr = place['address']\n A_X = dist(src, x); X_B = dist(x, dst)\n consumer_dist = A_X['distance'] + X_B['distance']\n tour_time = A_X['trafficTime']+X_B['trafficTime']\n last_mile_dist = 2*dist(pitstop, x)['distance']\n total_trip_dist = consumer_dist + last_mile_dist\n carbon_print = total_trip_dist/(1e3 * .621 * .70548)\n ret.append({\"distance\" : consumer_dist,\n \"latlon\" : x,\n \"title\" : title,\n \"time\" : tour_time,\n \"address\" : addr,\n \"carbon\" : carbon_print})\n ret = sorted(ret, key=lambda loc: loc.get('distance'))\n #print(total_trip_dist, consumer_dist, last_mile_dist)\n\n # worst carbon\n consumer_dist = dist(src, dst)['distance']\n last_mile_dist = 2*dist(pitstop, dst)['distance']\n total_trip_dist = consumer_dist + last_mile_dist\n carbon_print = total_trip_dist/(1e3 * .621 * .70548)\n #print(total_trip_dist, consumer_dist, last_mile_dist)\n\n # worst case time A - C - B\n A_C = dist(src, pitstop)\n C_B = dist(pitstop, dst)\n total_time = A_C['trafficTime'] + C_B['trafficTime']\n return {\"meetpoints\" : ret, 'worst_time' : total_time, \"worst_carbon\" : carbon_print}", "def _do_checkWeather(self, mjd, w, config):\n # Convert mjd to the relevant time units of the weather dates.\n time = (mjd - config['sim_start'] + config['%s_start' %(w)]) * _day2sec\n # And wrap the time, if we need to. \n time = time % self.maxtime[w]\n # Find the observations which are closest in time to our requested time.\n time_order = (abs(self.dates[w] - time)).argsort()\n date1 = self.dates[w][time_order[0]]\n date2 = self.dates[w][time_order[1]]\n weather1 = self.weather[w][time_order[0]]\n weather2 = self.weather[w][time_order[1]]\n # Do interpolation for weather at this particular time.\n weather = (weather2 - weather1) / (date2 - date1) * (time - date1) + weather1\n return weather, weather1", "def combineGPSandPhoneStops(arg):\r\n\r\n # unpack parameters\r\n user_gps, user_cell, dur_constr, spat_constr_gps, spat_cell_split = arg\r\n\r\n # combine cellular stay if it is close to a gps stay\r\n cell_stays = list(set([(trace[6],trace[7]) for d in user_cell for trace in user_cell[d] if int(trace[9]) >= dur_constr]))\r\n gps_stays = list(set([(trace[6],trace[7]) for d in user_gps for trace in user_gps[d] if int(trace[9]) >= dur_constr]))\r\n pairs_close = set()\r\n for cell_stay in cell_stays:\r\n for gps_stay in gps_stays:\r\n if distance(cell_stay[0],cell_stay[1],gps_stay[0],gps_stay[1]) <= spat_constr_gps:\r\n pairs_close.add((gps_stay[0],gps_stay[1],cell_stay[0],cell_stay[1]))\r\n break\r\n # find all pair[1]s in list, and replace it with pair[0]\r\n for pair in list(pairs_close):\r\n for d in user_cell.keys():\r\n for trace in user_cell[d]:\r\n if trace[6] == pair[2] and trace[7] == pair[3]:\r\n trace[5], trace[6], trace[7] = 99, pair[0], pair[1] #pretend as gps\r\n\r\n user = user_gps\r\n for d in user.keys():\r\n if len(user_cell[d]):\r\n user[d].extend(user_cell[d])\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n\r\n # address oscillation\r\n user = oscillation_h1_oscill(user, dur_constr) #OscillationPairList = oscillation_h1_oscill(user, dur_constr)\r\n # ## when replaced, can only replaced with a gps stay; so let modify exchange ping-pong pair in the pairList\r\n # gpslist_temp = {(trace[6], trace[7]):int(trace[5]) for d in user.keys() for trace in user[d]}\r\n # for pair_i in range(len(OscillationPairList)):\r\n # if gpslist_temp[(OscillationPairList[pair_i][0],OscillationPairList[pair_i][1])] <= spat_constr_gps:# wrong(2,3)\r\n # OscillationPairList[pair_i] = [OscillationPairList[pair_i][2],OscillationPairList[pair_i][3],\r\n # OscillationPairList[pair_i][0],OscillationPairList[pair_i][1]]\r\n ## find pong in trajactory, and replace it with ping\r\n ## this part is now integreted into the function itself\r\n ## OscillationPairList is in format: {, (ping[0], ping[1]): (pong[0], pong[1])}\r\n # for d in user.keys():\r\n # for trace in user[d]:\r\n # if (trace[6], trace[7]) in OscillationPairList:\r\n # trace[6], trace[7] = OscillationPairList[(trace[6], trace[7])]\r\n\r\n # update duration\r\n user = update_duration(user, dur_constr)\r\n\r\n for d in user:\r\n phone_index = [k for k in range(len(user[d])) if int(user[d][k][5]) > spat_cell_split]\r\n if len(phone_index) == 0: # if no phone trace\r\n continue\r\n for i in range(len(user[d])):\r\n if int(user[d][i][5]) > spat_cell_split and int(user[d][i][9]) < dur_constr: # passing phone observ\r\n user[d][i].append('checked')\r\n # combine consecutive obsv on a phone stay into two observ\r\n i = min(phone_index) # i has to be a phone index\r\n j = i + 1\r\n while i < len(user[d]) - 1:\r\n if j >= len(user[d]): # a day ending with a stay, j goes beyond the last observation\r\n for k in range(i + 1, j - 1, 1):\r\n user[d][k] = []\r\n break\r\n if int(user[d][j][5]) > spat_cell_split and user[d][j][6] == user[d][i][6] \\\r\n and user[d][j][7] == user[d][i][7] and j < len(user[d]):\r\n j += 1\r\n else:\r\n for k in range(i + 1, j - 1, 1):\r\n user[d][k] = []\r\n phone_index = [k for k in range(j, len(user[d])) if int(user[d][k][5]) > spat_cell_split]\r\n if len(phone_index) < 3: # if no phone trace\r\n break\r\n i = min(phone_index) ##i has to be a phone index\r\n j = i + 1\r\n i = 0 # remove []\r\n while i < len(user[d]):\r\n if len(user[d][i]) == 0:\r\n del user[d][i]\r\n else:\r\n i += 1\r\n # adress phone stay one by one\r\n flag_changed = True\r\n phone_list_check = []\r\n while (flag_changed):\r\n # print('while........')\r\n flag_changed = False\r\n gps_list = []\r\n phone_list = []\r\n for i in range(len(user[d])):\r\n if int(user[d][i][5]) <= spat_cell_split:#or user[d][i][2] == 'addedphonestay': #changed on 0428\r\n gps_list.append(user[d][i])\r\n else:\r\n phone_list.append(user[d][i])\r\n\r\n phone_list.extend(phone_list_check)\r\n # when updating duration for phone stay, we have to put back passing obs\r\n phone_list = sorted(phone_list, key=itemgetter(0))\r\n # update phone stay\r\n i = 0\r\n j = i\r\n while i < len(phone_list):\r\n if j >= len(phone_list): # a day ending with a stay, j goes beyond the last observation\r\n dur = str(int(phone_list[j - 1][0]) - int(phone_list[i][0]))\r\n for k in range(i, j, 1):\r\n if int(phone_list[k][9]) >= dur_constr:\r\n # we don't want to change a pssing into a stay; as we have not process the combine this stay\r\n # this is possible when a stay that prevents two passing is mergeed into gps as gps points\r\n phone_list[k][9] = dur\r\n break\r\n if phone_list[j][6] == phone_list[i][6] and phone_list[j][7] == phone_list[i][7] and j < len(\r\n phone_list):\r\n j += 1\r\n else:\r\n dur = str(int(phone_list[j - 1][0]) - int(phone_list[i][0]))\r\n for k in range(i, j, 1):\r\n if int(phone_list[k][9]) >= dur_constr:\r\n phone_list[k][9] = dur\r\n i = j\r\n for trace in phone_list: # those trace with gps as -1,-1 (not clustered) should not assign a duration\r\n if float(trace[6]) == -1: trace[9] = -1\r\n if len(phone_list) == 1: phone_list[0][9] = -1\r\n\r\n # update check lable\r\n for i in range(len(phone_list)):\r\n if int(phone_list[i][5]) > spat_cell_split and int(phone_list[i][9]) < dur_constr \\\r\n and phone_list[i][-1] != 'checked':\r\n # passing phone observ\r\n phone_list[i].append('checked')\r\n\r\n # put those not checked together with gps\r\n user[d] = gps_list\r\n phone_list_check = []\r\n for i in range(len(phone_list)):\r\n if phone_list[i][-1] == 'checked':\r\n phone_list_check.append(phone_list[i])\r\n else:\r\n user[d].append(phone_list[i])\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n\r\n # find a stay which is not checked\r\n flag_phonestay_notchecked = False\r\n phonestay_left, phonestay_right = -1, -1\r\n for i in range(max(0, phonestay_right+1), len(user[d])):\r\n phonestay_left, phonestay_right = -1, -1\r\n if int(user[d][i][5]) > spat_cell_split \\\r\n and int(user[d][i][9]) >= dur_constr and user[d][i][-1] != 'checked':\r\n phonestay_left = phonestay_right\r\n phonestay_right = i\r\n if phonestay_left != -1 and phonestay_right != -1 \\\r\n and user[d][phonestay_left][9] == user[d][phonestay_right][9]:\r\n flag_phonestay_notchecked = True\r\n\r\n ## modified on 04152019\r\n if flag_phonestay_notchecked == False or len(phone_list) == 0: # if all phone observation are checked, end\r\n break\r\n # if they are not two consecutive observation\r\n if phonestay_right != phonestay_left + 1: # attention: only phonestay_left is addressed\r\n # not consecutive two observations\r\n if any([int(user[d][j][9]) >= dur_constr for j in range(phonestay_left + 1, phonestay_right, 1)]):\r\n # found a gps stay in betw\r\n # print('23: found a gps stay in betw, just use one gps stay trade one phone stay')\r\n temp = user[d][phonestay_left][6:]\r\n user[d][phonestay_left][6:] = [-1, -1, -1, -1, -1, -1] # phone disappear\r\n # user[d][phonestay_left].extend(temp)\r\n user[d][phonestay_left].append('checked')\r\n # del user[d][phonestay_left] # phone disappear\r\n flag_changed = True\r\n else: # find close gps\r\n # print('24: do not found a gps stay in betw')\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n if all([(phone_uncernt + int(user[d][j][5])) > 1000 * distance(user[d][j][3], user[d][j][4],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n for j in range(phonestay_left + 1, phonestay_right, 1)]):\r\n # total uncerty larger than distance\r\n # this case should be rare, as those close gps may be clustered\r\n # print('241: all gps falling betw are close with phone stay')\r\n temp = user[d][phonestay_left][3:] # copy neighbor gps\r\n user[d][phonestay_left][3:] = user[d][phonestay_left + 1][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n flag_changed = True\r\n else:\r\n # print('242: find a gps in betw,\r\n # which is far away with phone stay, contradic with a stay (with phone obsv)')\r\n temp = user[d][phonestay_left][6:]\r\n user[d][phonestay_left][6:] = [-1, -1, -1, -1, -1, -1] # phone disappear\r\n # user[d][phonestay_left].extend(temp)\r\n user[d][phonestay_left].append('checked')\r\n # del user[d][phonestay_left] # phone disappear\r\n flag_changed = True\r\n else: # if they are two consecutive traces\r\n # two consecutive observation\r\n # if phonestay_left != 0 and phonestay_right < len(user[d]) - 1:\r\n # ignore if they are at the beginning or the end of traj\r\n prev_gps = next_gps = 0 # find prevous and next gps\r\n found_prev_gps = False\r\n found_next_gps = False\r\n for prev in range(phonestay_left - 1, -1, -1):\r\n # if int(user[d][prev][5]) <= spat_cell_split: ########## changed on 04282018\r\n if int(user[d][prev][5]) <= spat_cell_split and int(user[d][prev][9]) >= dur_constr:\r\n prev_gps = prev\r\n found_prev_gps = True\r\n break\r\n for nxt in range(phonestay_right + 1, len(user[d])):\r\n if int(user[d][nxt][5]) <= spat_cell_split and int(user[d][nxt][9]) >= dur_constr:\r\n next_gps = nxt\r\n found_next_gps = True\r\n break\r\n\r\n if found_prev_gps and found_next_gps and user[d][prev_gps][6] == user[d][next_gps][6]:\r\n # this is a phone stay within a gps stay\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt = int(user[d][prev_gps][8])\r\n dist = 1000 * distance(user[d][prev_gps][6],\r\n user[d][prev_gps][7],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n speed_dep = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][phonestay_left][0]) - int(user[d][prev_gps][0])) * 3.6\r\n speed_retn = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][next_gps][0]) - int(user[d][phonestay_right][0])) * 3.6\r\n if (dist - phone_uncernt - gps_uncernt) > 0 \\\r\n and dist > 1000*spat_constr_gps and speed_dep < 200 and speed_retn < 200:\r\n # print('1111: distance larger than acc, and can travel, add phone stay, shorten gps stay')\r\n # leave phone stay there, we later update duration for the gps stay\r\n user[d][phonestay_left].append('checked')\r\n # those phone stay not removed have to be marked with 'checked'!\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n else: # merge into gps stay\r\n # print('1112: distance less than acc, or cannot travel, merge into gps stay')\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n elif found_prev_gps and found_next_gps and user[d][prev_gps][6] != user[d][next_gps][6]:\r\n phone_uncernt_l = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt_l = int(user[d][prev_gps][8])\r\n dist_l = 1000 * distance(user[d][prev_gps][6],\r\n user[d][prev_gps][7],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n speed_dep = (dist_l - phone_uncernt_l - gps_uncernt_l) / \\\r\n (int(user[d][phonestay_left][0]) - int(user[d][prev_gps][0])) * 3.6\r\n phone_uncernt_r = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt_r = int(user[d][next_gps][8])\r\n dist_r = 1000 * distance(user[d][next_gps][6],\r\n user[d][next_gps][7],\r\n user[d][phonestay_right][6],\r\n user[d][phonestay_right][7])\r\n speed_retn = (dist_r - phone_uncernt_r - gps_uncernt_r) / \\\r\n (int(user[d][next_gps][0]) - int(user[d][phonestay_right][0])) * 3.6\r\n comb_l = 0 #revised on 03202019 to pick up one gps stay to combine with; if spatial conti with multi\r\n comb_r = 0\r\n if (dist_l - phone_uncernt_l - gps_uncernt_l) < 0 \\\r\n or dist_l < 1000*spat_constr_gps or speed_dep > 200:\r\n comb_l = 1\r\n if (dist_r - phone_uncernt_r - gps_uncernt_r) < 0 \\\r\n or dist_r < 1000 * spat_constr_gps or speed_retn > 200:\r\n comb_r = 1\r\n if comb_l*comb_r == 1:\r\n if dist_l < dist_r:\r\n comb_r = 0\r\n else:\r\n comb_l = 0\r\n if comb_l:\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n elif comb_r:\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n else:\r\n user[d][phonestay_left].append('checked')\r\n # those phone stay not removed have to be marked with 'checked'!\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n elif found_prev_gps: # a gps stay #right# before\r\n # print('113: before phone stay, we have gps stay')\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt = int(user[d][prev_gps][8])\r\n dist = 1000 * distance(user[d][prev_gps][6],\r\n user[d][prev_gps][7],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n speed_dep = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][phonestay_left][0]) - int(user[d][prev_gps][0])) * 3.6\r\n if (dist - phone_uncernt - gps_uncernt) > 0 and dist > 1000*spat_constr_gps and speed_dep < 200:\r\n # spatially seperate enough and can travel, add in gps\r\n # print('1132: dist>low_acc, add phone stay')\r\n # leave phone stay there\r\n user[d][phonestay_left].append('checked')\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n else:\r\n # print('1131: low_acc > dist, merge with gps stay, meaning extend gps dur')\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n elif found_next_gps: # a gps stay #right# after\r\n # print('112: after phone stay, we have gps stay')\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt = int(user[d][next_gps][8])\r\n dist = 1000 * distance(user[d][next_gps][6],\r\n user[d][next_gps][7],\r\n user[d][phonestay_right][6],\r\n user[d][phonestay_right][7])\r\n speed_retn = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][next_gps][0]) - int(user[d][phonestay_right][0])) * 3.6\r\n if (dist - phone_uncernt - gps_uncernt) > 0 and dist > 1000*spat_constr_gps and speed_retn<200:\r\n # spatially seperate enough and can travel, add in gps\r\n # print('1122: dist>low_acc, add phone stay')\r\n # leave phone stay there, we later update duration for the gps stay\r\n user[d][phonestay_left].append('checked')\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n else:# remain phone observ, but use gps location\r\n # print('1121: low_acc > dist, merge with gps stay, meaning extend gps dur')\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n else: # if don't match any case, just add it\r\n # print('donot match any case, just add it (e.g., consecutive two phone stays)')\r\n # leave phone stay there\r\n user[d][phonestay_left].append('checked')\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n\r\n\r\n # user[d].extend(phone_list_check)\r\n for trace in phone_list_check:\r\n if trace[2] == 'addedphonestay':\r\n user[d].append(trace[:])\r\n # remove passingby cellular traces\r\n i = 0\r\n while i<len(user[d]):\r\n if user[d][i][5] == 99 and float(user[d][i][9]) < dur_constr:\r\n del user[d][i]\r\n else:\r\n i+=1\r\n # remove passing traces\r\n ## Flag_changed = True\r\n ## while (Flag_changed):\r\n ## Flag_changed = False\r\n # i = 0\r\n # while i < len(user[d]):\r\n # if int(user[d][i][5]) > spat_cell_split and int(user[d][i][9]) < dur_constr:\r\n # # Flag_changed = True\r\n # del user[d][i]\r\n # else:\r\n # i += 1\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n # update duration\r\n i = 0\r\n j = i\r\n while i < len(user[d]):\r\n if j >= len(user[d]): # a day ending with a stay, j goes beyond the last observation\r\n dur = str(int(user[d][j - 1][0]) - int(user[d][i][0]))\r\n for k in range(i, j, 1):\r\n user[d][k][9] = dur\r\n break\r\n if user[d][j][6] == user[d][i][6] and user[d][j][7] == user[d][i][7] and j < len(\r\n user[d]):\r\n j += 1\r\n else:\r\n dur = str(int(user[d][j - 1][0]) - int(user[d][i][0]))\r\n for k in range(i, j, 1):\r\n user[d][k][9] = dur\r\n i = j\r\n for trace in user[d]: # those trace with gps as -1,-1 (not clustered) should not assign a duration\r\n if float(trace[6]) == -1: trace[9] = -1\r\n if len(user[d]) == 1: user[d][0][9] = -1\r\n # remove and add back; because phone stays are distroyed as multiple, should be combined as one\r\n i = 0\r\n while i < len(user[d]):\r\n if user[d][i][2] == 'addedphonestay':\r\n del user[d][i]\r\n else:\r\n i += 1\r\n # add back and sort\r\n for trace in phone_list_check:\r\n if trace[2] == 'addedphonestay':\r\n user[d].append(trace)\r\n\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n\r\n # remove temp marks\r\n user[d]=[trace[:12] for trace in user[d]]\r\n\r\n # oscillation\r\n # modify grid\r\n for day in user.keys():\r\n for trace in user[day]:\r\n if float(trace[6]) == -1:\r\n found_stay = False\r\n if found_stay == False:\r\n trace[6] = trace[3] + '000' # in case do not have enough digits\r\n trace[7] = trace[4] + '000'\r\n digits = (trace[6].split('.'))[1]\r\n digits = digits[:2] + str(int(digits[2]) / 2)\r\n trace[6] = (trace[6].split('.'))[0] + '.' + digits\r\n # trace[6] = trace[6][:5] + str(int(trace[6][5]) / 2) # 49.950 to 49.952 220 meters\r\n digits = (trace[7].split('.'))[1]\r\n digits = digits[:2] + str(int(digits[2:4]) / 25)\r\n trace[7] = (trace[7].split('.'))[0] + '.' + digits\r\n # trace[7] = trace[7][:7] + str(int(trace[7][7:9]) / 25) # -122.3400 to -122.3425 180 meters\r\n\r\n # added to address oscillation\r\n user = oscillation_h1_oscill(user, dur_constr)\r\n ## find pong in trajactory, and replace it with ping\r\n ## this part is now integreted into the function itself\r\n ## OscillationPairList is in format: {, (ping[0], ping[1]): (pong[0], pong[1])}\r\n # for d in user.keys():\r\n # for trace in user[d]:\r\n # if (trace[6], trace[7]) in OscillationPairList:\r\n # trace[6], trace[7] = OscillationPairList[(trace[6], trace[7])]\r\n\r\n # update duration\r\n user = update_duration(user, dur_constr)\r\n\r\n # end addressing oscillation\r\n # those newly added stays should be combined with close stays\r\n user = cluster_incremental(user, spat_constr_gps, dur_constr=dur_constr)\r\n # update duration\r\n user = update_duration(user, dur_constr)\r\n # use only one record for one stay\r\n for d in user:\r\n i = 0\r\n while i < len(user[d]) - 1:\r\n if user[d][i + 1][6] == user[d][i][6] and user[d][i + 1][7] == user[d][i][7] \\\r\n and user[d][i + 1][9] == user[d][i][9] and int(user[d][i][9]) >= dur_constr:\r\n del user[d][i + 1]\r\n else:\r\n i += 1\r\n # mark stay\r\n staylist = set() # get unique staylist\r\n for d in user.keys():\r\n for trace in user[d]:\r\n if float(trace[9]) >= dur_constr:\r\n staylist.add((trace[6], trace[7]))\r\n else: # change back keep full trajectory: do not use center for those are not stays\r\n trace[6], trace[7], trace[8], trace[9] = -1, -1, -1, -1 # for non stay, do not give center\r\n staylist = list(staylist)\r\n for d in user.keys():\r\n for trace in user[d]:\r\n for i in range(len(staylist)):\r\n if trace[6] == staylist[i][0] and trace[7] == staylist[i][1]:\r\n trace[10] = 'stay' + str(i)\r\n break\r\n\r\n return user", "def test_exact_matches(self):\n idw = self.dset.spec.sel(\n lons=self.lons_exact, lats=self.lats_exact, method=\"idw\"\n )\n nearest = self.dset.spec.sel(\n lons=self.lons_exact, lats=self.lats_exact, method=\"nearest\"\n )\n assert abs(idw.efth - nearest.efth).max() == 0", "def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):\n \n waypoint_co2 = {}\n waypoint_durations = {}\n\n # get all prefectures referential\n db_connector = Connector()\n with db_connector:\n results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)\n all_waypoints = pd.DataFrame(results.fetchall())\n\n # Vérification si les trajets péfecture à préfecture ont été déjà calculés\n db_connector = Connector()\n with db_connector:\n saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)\n\n # Dans le précalcul des trajets optimaux, utilisation de la date courante\n travel_date = datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n bad_waypoints = []\n\n if saved_waypoints.rowcount > 0 and not is_force_compute:\n print(\"le référentiel des voyage existe déjà\")\n else:\n try:\n bdd_management.truncate_journey()\n\n for (from_city, to_city) in combinations(all_waypoints[0].values, 2):\n try:\n if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:\n continue\n\n route = requests.get(API_NAVITIA.format(\n int(from_city), int(to_city), travel_date, API_KEY))\n response = json.loads(route.text)\n\n mid_duration = 0\n mid_co2 = 0\n for journey in response[\"journeys\"]:\n mid_duration += journey[\"duration\"]\n mid_co2 += journey[\"co2_emission\"][\"value\"]\n\n waypoint_co2[frozenset([from_city, to_city])\n ] = mid_co2/len(response[\"journeys\"])\n waypoint_durations[frozenset(\n [from_city, to_city])] = mid_duration/len(response[\"journeys\"])\n\n except Exception as e:\n print(\"Error with finding the route between %s and %s : %s\" %\n (from_city, to_city, response[\"error\"][\"message\"]))\n if 'no destination point' == response[\"error\"][\"message\"]:\n bad_waypoints.append(int(to_city))\n\n if 'no origin point' == response[\"error\"][\"message\"]:\n bad_waypoints.append(int(from_city))\n\n for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response[\"error\"][\"message\"]):\n if not int(bad_insee_code) in bad_waypoints:\n bad_waypoints.append(int(bad_insee_code))\n\n # Enregistrement des trajets point à point (préfecture à préfecture)\n db_connector = Connector()\n with db_connector:\n for (waypoint1, waypoint2) in waypoint_co2.keys():\n waypoint = [waypoint1,\n waypoint2,\n str(waypoint_co2[frozenset([waypoint1, waypoint2])]),\n str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]\n \n db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)\n # commit trajets unitaires dans la bdd\n db_connector.commit()\n\n # enregistrement des préfectures non trouvée (pas de gare)\n print(bad_waypoints)\n db_connector = Connector()\n with db_connector:\n for bad_city in bad_waypoints:\n db_connector.execute_nonquery(\n sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))\n #db_connector.commit()\n except Exception as e:\n print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')\n\n waypoint_co2 = {}\n waypoint_durations = {}\n processed_waypoints = set()\n\n db_connector = Connector()\n with db_connector:\n waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)\n\n for row in waypoints:\n waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]\n waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]\n processed_waypoints.update([row[0], row[1]])\n\n travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )\n\n # take most represented trip order\n journey_groups = Counter(chain(*travel_results))\n top_journeys = journey_groups.most_common(1)[0][0]\n\n print('Le voyage le plus représentatif est :')\n print(top_journeys)\n\n # calcul des horaires de voyage réels pour le trajet le plus optimisé\n\n print('Départ du calcul du voyage le %s' %\n (datetime_str_to_datetime_str(trip_start_date)))\n travel_date = trip_start_date\n\n db_connector = Connector()\n with db_connector:\n try:\n #vidage de la table contenant les informations du voyage\n bdd_management.truncate_roadtrip()\n\n for i in range(len(top_journeys)-1):\n try:\n from_city_insee = top_journeys[i]\n to_city_insee = top_journeys[i+1]\n route = requests.get(API_NAVITIA.format(\n int(from_city_insee), int(to_city_insee), travel_date, API_KEY))\n travels = json.loads(route.text)\n\n # Contrôle des voyage reçus pour identifier le plus adapté à recherche\n best_travel = travels[\"journeys\"][0]\n for travel in travels[\"journeys\"]:\n if is_min_co2_search and float(best_travel['co2_emission']['value']) > float(travel['co2_emission']['value']):\n best_travel = travel\n if best_travel['arrival_date_time'] > travel['arrival_date_time']:\n best_travel = travel\n\n # sauvegarde du trajet 'i' en base\n save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel)\n\n # le prochain trajet devra avoir une date de départ > à la date de ce trajet\n travel_date = best_travel['arrival_date_time']\n\n except Exception as e:\n print(\"!! Erreur durant le calcul du trajet entre '%s' et '%s'\" %\n (from_city_insee, to_city_insee))\n\n #Ecriture du résumé du voyage\n resume = db_connector.execute_query(sql.SQL_GET_C02_CONSUMPTION_RESUME)\n resume = resume.fetchone()\n\n resume_description = \"\"\"Début du voyage le {} . Arrivée le {}. \n Le voyage à durée {} pour un total de {:d} kgeC\"\"\".format(\n datetime_str_to_datetime_str(trip_start_date),\n datetime_str_to_datetime_str(travel_date),\n str(timedelta(seconds=resume[0])) ,\n trunc( resume[1]/1000))\n\n store_section(db_connector, resume_description, None, None, 'INFO', resume[0], resume[1])\n\n db_connector.commit()\n\n except Exception as e:\n db_connector.rollback()\n print('Erreur durant la création du voyage. rollback effectué!!!')\n\n print('print map with road-trip data')\n visualization.generate_visualization()\n\n print('Travel complete. Have nive trip!!!')", "def testTsysMapNN(self):\n self._runTest('tsys', False, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def observation_for_closest(\n lat: float, lon: float, lang: str = _DEFAULT_LANG, num_stations_to_try: int = 3\n) -> Tuple[Dict, Dict]:\n assert lang in _SUPPORTED_LANGS\n\n stations = closest_stations(lat, lon, limit=num_stations_to_try)\n for s in stations:\n o = observation_for_station(s[\"id\"], lang=lang)\n if o[\"results\"] and not o[\"results\"][0].get(\"err\") and o[\"results\"][0][\"valid\"]:\n return o, s\n return observation_for_station(stations[0][\"id\"], lang=lang), stations[0]", "def gnss_satellite_position(dset: \"Dataset\") -> None:\n file_path = config.files.path(\"output_satellite_position\", file_vars={**dset.vars, **dset.analysis})\n\n # Add date field to dataset\n if \"date\" not in dset.fields:\n dset.add_text(\"date\", val=[d.strftime(\"%Y/%m/%d %H:%M:%S\") for d in dset.time.datetime], write_level=\"detail\")\n \n # Add fields in case of broadcast ephemeris\n if \"broadcast\" in config.tech.apriori_orbit.list:\n if not \"trans_time_gpsweek\" in dset.fields:\n dset.add_text(\n \"trans_time_gpsweek\",\n val=[\n f\"{t.gps_ws.week:04.0f}{t.gps_ws.day:1.0f}:{t.gps_ws.seconds:06.0f}\" for t in dset.used_transmission_time\n ],\n write_level=\"detail\",\n )\n if not \"toe_gpsweek\" in dset.fields:\n dset.add_text(\n \"toe_gpsweek\",\n val=[f\"{t.gps_ws.week:04.0f}{t.gps_ws.day:1.0f}:{t.gps_ws.seconds:06.0f}\" for t in dset.used_toe],\n write_level=\"detail\",\n )\n if not \"diff_trans_toe\" in dset.fields:\n dset.add_float(\n \"diff_trans_toe\",\n val=(dset.used_transmission_time.gps.mjd - dset.used_toe.gps.mjd) * Unit.day2second,\n unit=\"second\", \n write_level=\"detail\",\n )\n if not \"age_of_ephemeris\" in dset.fields:\n dset.add_float(\n \"age_of_ephemeris\",\n val=(dset.time.gps.mjd - dset.used_toe.gps.mjd) * Unit.day2second,\n unit=\"second\", \n write_level=\"detail\",\n )\n \n # Select fields available in Dataset\n fields = get_existing_fields(dset, FIELDS)\n\n # Put together fields in an array as specified by the 'dtype' tuple list\n output_list = list(zip(*(get_field(dset, f.field, f.attrs, f.unit) for f in fields)))\n output_array = np.array(output_list, dtype=[(f.name, f.dtype) for f in fields])\n \n # Write to disk\n header = get_header(\n fields,\n pgm_version=f\"where {where.__version__}\",\n run_by=util.get_user_info()[\"inst_abbreviation\"] if \"inst_abbreviation\" in util.get_user_info() else \"\",\n summary=\"GNSS satellite position results\",\n )\n np.savetxt(\n file_path,\n output_array,\n fmt=tuple(f.format for f in fields),\n header=header,\n delimiter=\"\",\n encoding=\"utf8\",\n )", "def update_rainfall_obs(target_model, method, timestep, start_time, end_time):\n obs_start = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')\n try:\n\n # Connect to the database\n curw_obs_pool = get_Pool(host=con_params.CURW_OBS_HOST, user=con_params.CURW_OBS_USERNAME,\n password=con_params.CURW_OBS_PASSWORD, port=con_params.CURW_OBS_PORT,\n db=con_params.CURW_OBS_DATABASE)\n\n curw_obs_connection = curw_obs_pool.connection()\n\n curw_sim_pool = get_Pool(host=con_params.CURW_SIM_HOST, user=con_params.CURW_SIM_USERNAME,\n password=con_params.CURW_SIM_PASSWORD, port=con_params.CURW_SIM_PORT,\n db=con_params.CURW_SIM_DATABASE)\n\n TS = Timeseries(pool=curw_sim_pool)\n\n # [hash_id, station_id, station_name, latitude, longitude]\n active_obs_stations = extract_active_curw_obs_rainfall_stations(start_time=start_time, end_time=end_time)[1:]\n obs_stations_dict = { } # keys: obs station id , value: [hash id, name, latitude, longitude]\n\n for obs_index in range(len(active_obs_stations)):\n obs_stations_dict[active_obs_stations[obs_index][1]] = [active_obs_stations[obs_index][0],\n active_obs_stations[obs_index][2],\n active_obs_stations[obs_index][3],\n active_obs_stations[obs_index][4]]\n\n for obs_id in obs_stations_dict.keys():\n meta_data = {\n 'latitude': float('%.6f' % float(obs_stations_dict.get(obs_id)[2])),\n 'longitude': float('%.6f' % float(obs_stations_dict.get(obs_id)[3])),\n 'model': target_model, 'method': method,\n 'grid_id': 'rainfall_{}_{}'.format(obs_id, obs_stations_dict.get(obs_id)[1])\n }\n\n tms_id = TS.get_timeseries_id_if_exists(meta_data=meta_data)\n\n if tms_id is None:\n tms_id = TS.generate_timeseries_id(meta_data=meta_data)\n meta_data['id'] = tms_id\n TS.insert_run(meta_data=meta_data)\n\n TS.update_grid_id(id_=tms_id, grid_id=meta_data['grid_id'])\n\n obs_hash_id = obs_stations_dict.get(obs_id)[0]\n\n obs_timeseries = []\n\n if timestep == 5:\n ts = extract_obs_rain_5_min_ts(connection=curw_obs_connection, start_time=obs_start, end_time=end_time,\n id=obs_hash_id)\n if ts is not None and len(ts) > 1:\n obs_timeseries.extend(process_5_min_ts(newly_extracted_timeseries=ts, expected_start=obs_start)[1:])\n # obs_start = ts[-1][0]\n elif timestep == 15:\n ts = extract_obs_rain_15_min_ts(connection=curw_obs_connection, start_time=obs_start, end_time=end_time,\n id=obs_hash_id)\n if ts is not None and len(ts) > 1:\n obs_timeseries.extend(process_15_min_ts(newly_extracted_timeseries=ts, expected_start=obs_start)[1:])\n # obs_start = ts[-1][0]\n\n # for i in range(len(obs_timeseries)):\n # if obs_timeseries[i][1] == -99999:\n # obs_timeseries[i][1] = 0\n\n if obs_timeseries is not None and len(obs_timeseries) > 0:\n TS.replace_data(timeseries=obs_timeseries, tms_id=tms_id)\n\n except Exception as e:\n traceback.print_exc()\n logger.error(\"Exception occurred while updating obs rainfalls in curw_sim.\")\n finally:\n curw_obs_connection.close()\n destroy_Pool(pool=curw_sim_pool)\n destroy_Pool(pool=curw_obs_pool)", "def match_static_data(self, static_table):\n stationIDs = static_table[0][\"number\"]\n# if not numpy.array_equal(stationIDs, self.number):\n# print \"ERROR in new data! Bad station ID numbers\"\n indices = []\n for i in range(len(stationIDs)):\n if(stationIDs[i] == 0): continue\n w = numpy.where(self.number == stationIDs[i])[0]\n nmatch = len(w)\n if nmatch > 1:\n print \"TOO MANY MATCHES?\", nmatch, stationIDs[i]\n jmatch = w[0]\n if nmatch == 1:\n jmatch = w[0]\n else:\n jmatch = -1\n indices.append(jmatch)\n\n # Add blanks to fill out the array to length \"glob.n_stations\".\n # Blank entries will be assigned index -1\n indices = numpy.array(indices)\n print \"TESTING INDIC 0\", len(indices), glob.n_stations - len(indices)\n indices = numpy.append(indices, \n numpy.zeros(glob.n_stations-len(indices),\n dtype='int') - 1)\n\n\n return indices", "def interp_to_site(lon, lat, data, tolat, tolon):\n wlon = numpy.where((lon[:-1] <= tolon)&(lon[1:]>tolon))\n wlon = wlon[0][0]\n wlat = numpy.where((lat[:-1] <= tolat)&(lat[1:]>tolat))\n wlat = wlat[0][0]\n \n p = data[:,wlat:wlat+2,wlon:wlon+2]\n fx = (tolon-lon[wlon])/(lon[wlon+1]-lon[wlon])\n fy = (tolat-lat[wlat])/(lat[wlat+1]-lat[wlat])\n a = p[:,0,0]*(1-fx) + p[:,0,1]*fx\n b = p[:,1,0]*(1-fx) + p[:,1,1]*fx\n c = a*(1-fy) + b*fy\n\n return c", "def test_lat_not_loc_1(self):\n patient = Semiology('lat_not_loc', Laterality.LEFT, Laterality.LEFT)\n patient.data_frame = self.df\n lat_not_loc_all_combined_gifs = patient.query_lateralisation(\n one_map_dummy)\n\n # inspect result\n lat_not_loc_result, num_query_loc = patient.query_semiology()\n\n self.assertIs(type(lat_not_loc_all_combined_gifs), pd.DataFrame)\n assert not lat_not_loc_all_combined_gifs.empty\n\n # drop the zero entries as these are from the CL/IL zeros:\n lat_not_loc_all_combined_gifs = lat_not_loc_all_combined_gifs[['Gif Parcellations', 'pt #s']].astype(\n {'Gif Parcellations': 'int32', 'pt #s': 'int32'})\n lat_not_loc_all_combined_gifs.set_index(\n 'Gif Parcellations', inplace=True)\n lat_not_loc_gifsclean = lat_not_loc_all_combined_gifs.loc[\n lat_not_loc_all_combined_gifs['pt #s'] != 0, :]\n # now we know only the CL data remains in this dummy data, which is on the RIGHT.\n gifs_right, gifs_left = gifs_lat_factor()\n lat_not_loc_gifsclean_rights = (\n lat_not_loc_gifsclean.index.isin(gifs_right).all()\n )\n\n # inspect result assertions\n assert(lat_not_loc_result.Localising.sum() == 0)\n assert(lat_not_loc_result['Lateralising'].sum() == 1)\n\n # all_combined_gifs assertions\n assert((\n lat_not_loc_gifsclean_rights == True)\n )\n assert(\n (\n lat_not_loc_gifsclean.index.isin(gifs_left)).any() == False\n )\n assert (lat_not_loc_gifsclean['pt #s'].sum()\n == lat_not_loc_gifsclean.shape[0])\n\n # test MTG on right 155 gif # gives 1:\n heatmap, _ = patient.get_num_datapoints_dict(method='minmax')\n assert 156 not in heatmap # left\n assert heatmap[155] == 1 # right", "def test_nearest_location_odd():\n assert nearest_location([(3, 6), (9, 13)], 7) == 0\n assert nearest_location([(3, 6), (9, 13)], 7, 1) == 1", "def station_from_lat_lon(lat, lon, stations, n_nearest=3):\n lat, lon = float(lat), float(lon)\n distances = [(distance(lat, lon, st['lat'], st['lon']), st)\n for st in stations\n if (st['is_renting'] and st['is_installed'])]\n distances = sorted(distances)\n return [pair[1] for pair in distances[:n_nearest]]", "def import_stations(time_res='hourly',time_format='%Y%m%d%H',\r\n campaign_time=[datetime(2018,12,9), datetime(2018,12,12)],\r\n data_category='air_temperature', station_ids=['00044','00091'],\r\n dbase_dir='dbase', table_dir='tables',Output=True,\r\n memory_save=True):\r\n timeranges=['recent','historical']\r\n #%%load the datasets available at each timestep\r\n dwd_datasets_meta=dwd_datasets_meta=json.load(open(table_dir+\"\\\\dwd_station_meta.txt\"))\r\n #try to get a variable from the category, otherwise use interpolation of higher frequency data\r\n resample_frequency=None\r\n time_res_dbase=time_res\r\n try:\r\n dwd_datasets_meta[time_res][data_category]\r\n except Exception:\r\n if time_res=='daily':\r\n try:\r\n dwd_datasets_meta['hourly'][data_category]\r\n print(data_category,' is not provided at the required resolution, daily_mean of hourly data used instead')\r\n resample_frequency='D'\r\n time_res_dbase='hourly'\r\n except Exception:\r\n try: \r\n dwd_datasets_meta['10_minutes'][data_category]\r\n print(data_category,' is not provided at the required resolution, daily_mean of 10_minutes data used instead')\r\n resample_frequency='D'\r\n time_res_dbase='10_minutes'\r\n except Exception:\r\n print(data_category, 'not available')\r\n sys.exit(1)\r\n if time_res=='hourly':\r\n try: \r\n dwd_datasets_meta['10_minutes'][data_category]\r\n print(data_category,' is not provided at the required resolution, hourly_mean of 10_minutes data used instead')\r\n resample_frequency='H'\r\n time_res_dbase='10_minutes'\r\n except Exception:\r\n print(data_category, 'not available')\r\n sys.exit(1)\r\n \r\n \r\n #%% download from dwd if necessary\r\n #connect to server\r\n server='opendata.dwd.de'\r\n ftp=connect_ftp(server = server,connected = False)\r\n #get the mean time of the campaign\r\n date_mean=campaign_time[0]+(campaign_time[1]-campaign_time[0])/2 \r\n # load the inititial ds\r\n dbase_path=dbase_dir+'\\\\db_stations_'+time_res+'_'+data_category+'.nc'\r\n if os.path.exists(dbase_path):\r\n with xr.open_dataset(dbase_path) as dwd_dbase:\r\n dwd_dbase.load()\r\n print('Existing database imported')\r\n #get the non_nans stations\r\n current_stations=np.array(dwd_dbase[list(dwd_dbase.keys())[0]].sel(time=date_mean,method='nearest').dropna('STATIONS_ID').coords['STATIONS_ID'])\r\n else:\r\n print(dbase_path, 'does not exist, we create a new netcdf_file')\r\n dwd_dbase=xr.Dataset()\r\n current_stations=np.array((-9999)).reshape(1)\r\n #change directory on server\r\n for timerange in timeranges:\r\n archive_url='/climate_environment/CDC/observations_germany/climate/'+time_res_dbase+'/'+data_category+'/'+timerange \r\n ftp.cwd(archive_url)\r\n #get the archive\r\n for station_id in station_ids:\r\n #we check whether the station is in the database with this parameter already\r\n if int(station_id) in current_stations:\r\n print('Station', station_id, 'with category', data_category,'in ',timerange,'dbase already')\r\n continue\r\n try:\r\n archive_name=[s for s in ftp.nlst() if station_id in s][0]\r\n except:\r\n print('No ',timerange,'data for station',station_id)\r\n continue\r\n print('Retrieving {}...'.format(archive_name))\r\n retrieved = False\r\n archive = io.BytesIO()\r\n # try to retrieve file\r\n while not retrieved:\r\n try:\r\n ftp.retrbinary(\"RETR \" + archive_name, archive.write)\r\n retrieved = True\r\n except:\r\n ftp=connect_ftp(server = server,connected = False)\r\n ftp.cwd(archive_url)\r\n archive.seek(0)\r\n with ZipFile(archive) as myzip:\r\n for f in myzip.infolist():\r\n # This is the data file\r\n #print('zip content:', f.filename)\r\n if f.filename.startswith('produkt_'):\r\n product = io.StringIO(str(myzip.read(f.filename),'utf-8'))\r\n #get dataframe from product \r\n dwd_product=pd.read_csv(product,sep=';',skipinitialspace=True)\r\n #get datetime\r\n dwd_product['time']=pd.to_datetime(dwd_product['MESS_DATUM'],format=time_format) \r\n dwd_product=dwd_product.rename(columns=dwd_datasets_meta[time_res_dbase][data_category])\r\n dwd_product=dwd_product.reset_index()\r\n dwd_product=dwd_product.set_index(['time','STATIONS_ID'])\r\n dwd_product=dwd_product.drop(columns=['MESS_DATUM','quality_level_of_next_columns','end_of_record','index'])\r\n #append to database\r\n dwd_xr=dwd_product.to_xarray()\r\n #replace all values equal to -999 to nan\r\n for data_var in dwd_xr.data_vars:\r\n dwd_xr[data_var]=dwd_xr[data_var].where(dwd_xr[data_var]>-999)\r\n if station_id=='05009':\r\n print('ok') \r\n #only add relevant dates if available memoryis rather small\r\n \r\n if memory_save and timerange=='historical':\r\n dwd_xr=dwd_xr.sel(time=slice(campaign_time[0]-timedelta(days=1),campaign_time[1]+timedelta(days=1)))\r\n #dwd_xr=dwd_xr.squeeze()\r\n \r\n try:\r\n dwd_dbase=xr.merge([dwd_dbase,dwd_xr])\r\n except Exception as e:\r\n print(e)\r\n print('try merging with compat=override')\r\n dwd_dbase=xr.merge([dwd_dbase,dwd_xr],compat='override')\r\n print(archive_name,' added to database')\r\n #upscale to required temporal resolution\r\n if resample_frequency is not None:\r\n dwd_dbase=dwd_dbase.resample(time=resample_frequency).mean(skipna=True)\r\n print('DWD data upscaled to',time_res,'averages')\r\n if Output==True:\r\n dwd_dbase.to_netcdf(dbase_path)\r\n print('Updated database' ,dbase_path)\r\n return dwd_dbase", "def fix_map_exceptions(stations, addresses, lines):\n for i in range(0, len(stations)):\n station = stations[i]\n address = addresses[i]\n curlines = lines[i]\n\n if station == \"Wtc - Cortlandt\" or station == \"Park Place Station\" or station == \"World Trade Center\":\n stations[i] = \"World Trade Center\"\n addresses[i] = \"79 Church St\"\n lines[i] = \"1,2,3,A,C,E,N,Q,R,W\"\n if station == \"51 St\" or station == \"Lexington Av/53 St\":\n stations[i] = \"Lexington Av/53 St\"\n addresses[i] = \"201 East 53rd St\"\n lines[i] = \"4,6,6X,E,M\"\n if station == \"Lexington Av/63 St\" or station == \"Lexington Av / 59 St\":\n stations[i] = \"Lexington Av / 59 St\"\n addresses[i] = \"743 Lexington Ave\"\n lines[i] = \"4,5,6,F,N,Q,R\"\n if station == \"Broadway-Lafayette St\" or station == \"Bleecker St\":\n stations[i] = \"Bleecker St\"\n addresses[i] = \"338 Lafayette Street\"\n lines[i] = \"4,6,6X,B,D,F,M\"\n if station == \"E 180th\":\n lines[i] = \"2,5\"\n if station == \"61 St\":\n stations[i] = \"New Utrecht Av\"\n addresses[i] = \"1462 62nd St\"\n lines[i] = \"D,N,W\"\n if station == \"Canal St\" and address == \"257 Canal Street\":\n lines[i] = \"N,Q,R,J,Z,4,6\"\n if station == \"East 174 Street Station Subway\":\n lines[i] = \"2,5\"\n if station == \"Jay St - Metrotech\":\n lines[i] = \"A,C,F,N,Q,R\"\n if station == \"Court St\":\n lines[i] = \"N,Q,R\"\n if station == \"Rector St\" and address == \"33 Trinity Place\":\n lines[i] = \"N,Q,R\"\n if station == \"City Hall\":\n lines[i] = \"N,Q,R\"\n if station == \"Whitehall St\":\n lines[i] = \"N,Q,R,W\"\n if station == \"45 St\":\n lines[i] == \"N,R\"\n\n\n return stations, addresses, lines", "def test_nearest_neighbour_regular_1d():\n # test with regular grid and 1d coords\n grid_lon = np.arange(100)\n grid_lat = np.arange(50)\n data = np.zeros((50, 100))\n\n # the four nearest values for the first point\n data[20:22, 10:12] = 7\n\n # the four nearest values for the second point\n data[17:19, 13:15] = 8\n\n # the actual test\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=4)(data)\n np.testing.assert_array_almost_equal(res, [7, 8])\n\n # same test, but with 3d-data (e.g., level, lat, lon)\n data2 = np.zeros((10, 50, 100))\n for i in range(10):\n data2[i, :, :] = data + i\n\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=4)(data2)\n np.testing.assert_array_almost_equal(res, np.asarray([np.arange(7, 17, 1), np.arange(8, 18, 1)]).transpose())\n\n # same test with only one neighbour or only one target point\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=1)(data)\n np.testing.assert_array_almost_equal(res, [7, 8])\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 10.2, 20.2, npoints=1)(data)\n np.testing.assert_array_almost_equal(res, 7)\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 13.2, 17.2, npoints=1)(data2)\n np.testing.assert_array_almost_equal(res, np.arange(8, 18, 1).reshape(10, 1))", "def test_test_nearest_neighbour_dmean():\n # test with regular grid and 1d coords\n grid_lon = np.arange(100)\n grid_lat = np.arange(50)\n data = np.zeros((50, 100))\n\n # the four nearest values for the first point\n data[20, 10] = 7\n\n # the four nearest values for the second point\n data[17, 13] = 8\n\n # the actual test\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10, 13), (20, 17), npoints=2, method=\"d-mean\")(data)\n np.testing.assert_array_almost_equal(res, [5.6, 6.4])", "def GetAllLocalSensors(sm_dict, northing, easting, current_time):\n current_SM_dict = {\"sm_sensor\" : [], \"dist\" : [], \"sm_val\" : []}; SM_ind = -99\n for sensor in sm_dict.keys():\n if SM_ind < -1: SM_ind = Precip.GetLastPrecipInd(sm_dict[sensor]['SM_df'], current_time, 'Year', 'DOY') #only need this once \n current_SM_dict['sm_sensor'].append(sensor); current_SM_dict['sm_val'].append(sm_dict[sensor]['SM_df']['SM'][SM_ind])\n current_SM_dict['dist'].append(math.sqrt((sm_dict[sensor]['Northing'] - northing)**2 + (sm_dict[sensor]['Easting'] - easting)**2))\n return pd.DataFrame(current_SM_dict)", "def get_storm_velocities(\n storm_object_table,\n min_time_difference_sec=DEFAULT_MIN_VELOCITY_TIME_SEC,\n max_time_difference_sec=DEFAULT_MAX_VELOCITY_TIME_SEC,\n test_mode=False):\n\n error_checking.assert_is_integer(min_time_difference_sec)\n error_checking.assert_is_integer(max_time_difference_sec)\n error_checking.assert_is_geq(\n max_time_difference_sec, min_time_difference_sec)\n\n error_checking.assert_is_boolean(test_mode)\n\n num_storm_objects = len(storm_object_table.index)\n east_velocities_m_s01 = numpy.full(num_storm_objects, numpy.nan)\n north_velocities_m_s01 = numpy.full(num_storm_objects, numpy.nan)\n\n for i in range(num_storm_objects):\n if numpy.mod(i, 100) == 0:\n print('Found velocity for {0:d} of {1:d} storm objects...'.format(\n i, num_storm_objects))\n\n these_predecessor_rows = find_predecessors(\n storm_object_table=storm_object_table, target_row=i,\n num_seconds_back=max_time_difference_sec)\n\n if len(these_predecessor_rows) == 0:\n continue\n\n these_time_diffs_seconds = (\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values[i] -\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values[\n these_predecessor_rows]\n )\n\n these_subrows = numpy.where(\n these_time_diffs_seconds >= min_time_difference_sec\n )[0]\n\n if len(these_subrows) == 0:\n continue\n\n these_time_diffs_seconds = these_time_diffs_seconds[these_subrows]\n these_predecessor_rows = these_predecessor_rows[these_subrows]\n this_num_predecessors = len(these_predecessor_rows)\n\n this_end_latitude_deg = storm_object_table[\n tracking_utils.CENTROID_LATITUDE_COLUMN].values[i]\n this_end_longitude_deg = storm_object_table[\n tracking_utils.CENTROID_LONGITUDE_COLUMN].values[i]\n\n these_east_displacements_metres = numpy.full(\n this_num_predecessors, numpy.nan)\n these_north_displacements_metres = numpy.full(\n this_num_predecessors, numpy.nan)\n\n for j in range(this_num_predecessors):\n this_start_latitude_deg = storm_object_table[\n tracking_utils.CENTROID_LATITUDE_COLUMN\n ].values[these_predecessor_rows[j]]\n\n this_start_longitude_deg = storm_object_table[\n tracking_utils.CENTROID_LONGITUDE_COLUMN\n ].values[these_predecessor_rows[j]]\n\n if test_mode:\n these_east_displacements_metres[j] = (\n this_end_longitude_deg - this_start_longitude_deg\n )\n these_north_displacements_metres[j] = (\n this_end_latitude_deg - this_start_latitude_deg\n )\n else:\n these_east_displacements_metres[j] = geodesic(\n (this_start_latitude_deg, this_start_longitude_deg),\n (this_start_latitude_deg, this_end_longitude_deg)\n ).meters\n\n these_north_displacements_metres[j] = geodesic(\n (this_start_latitude_deg, this_start_longitude_deg),\n (this_end_latitude_deg, this_start_longitude_deg)\n ).meters\n\n east_velocities_m_s01[i] = numpy.mean(\n these_east_displacements_metres / these_time_diffs_seconds\n )\n north_velocities_m_s01[i] = numpy.mean(\n these_north_displacements_metres / these_time_diffs_seconds\n )\n\n storm_object_table = storm_object_table.assign(**{\n tracking_utils.EAST_VELOCITY_COLUMN: east_velocities_m_s01,\n tracking_utils.NORTH_VELOCITY_COLUMN: north_velocities_m_s01\n })\n\n return _get_storm_velocities_missing(storm_object_table=storm_object_table)", "def GetTimeIntervalsForEachStation(PathInfo):\r\n\tTimeIntervalAtStation = {}\r\n\r\n\tif not PathInfo: return TimeIntervalAtStation\r\n\tif len(PathInfo) < 2: return TimeIntervalAtStation\r\n\r\n\tfor i in range(1, len(PathInfo)):\r\n\t\tConnection1 = PathInfo[i-1]\r\n\t\tConnection2 = PathInfo[i]\r\n\r\n\t\tTripID1 = Connection1[ConnInfoInd['travel_id']]\r\n\t\tTripID2 = Connection2[ConnInfoInd['travel_id']]\r\n\r\n\t\t# check if customer makes a change at station\r\n\t\t# if TripID1 == TripID2: continue\r\n\r\n\t\tstation = Connection2[ConnInfoInd['station_from']]\r\n\r\n\t\tArrivalMin = Connection1[ConnInfoInd['arrival_hour']]*60 + Connection1[ConnInfoInd['arrival_min']]\r\n\t\tDepartureMin = Connection2[ConnInfoInd['departure_hour']]*60 + Connection2[ConnInfoInd['departure_min']]\r\n\r\n\t\tif TimeIntervalAtStation.has_key(station):\r\n\t\t\tTimeIntervalAtStation[station].append((ArrivalMin, DepartureMin))\r\n\t\telse:\r\n\t\t\tTimeIntervalAtStation[station] = [(ArrivalMin, DepartureMin)]\r\n\treturn TimeIntervalAtStation", "def update_conditions(self) -> None:\n self.log.debug(\"Updating conditions.\")\n\n self.models[\"sky\"].update(self.models[\"observatory_state\"].time)\n\n if self.is_night is None:\n self.log.debug(\"Driver not initialized yet. Computing night parameters.\")\n # Driver was not initialized yet. Need to compute night\n # boundaries\n\n (self.current_sunset, self.current_sunrise) = self.models[\n \"sky\"\n ].get_night_boundaries(self.parameters.night_boundary)\n\n self.is_night = (\n self.current_sunset\n <= self.models[\"observatory_state\"].time\n < self.current_sunrise\n )\n\n self.log.debug(\n f\"Sunset/Sunrise: {self.current_sunset}/{self.current_sunrise}, \"\n f\"sun @ {self.parameters.night_boundary} degrees.\"\n )\n\n is_night = self.is_night\n\n self.is_night = (\n self.current_sunset\n <= self.models[\"observatory_state\"].time\n < self.current_sunrise\n )\n\n # Only compute night boundaries when we transition from nighttime to\n # daytime. Possibilities are:\n # 1 - self.is_night=True and is_night = True: During the night (no need\n # to compute anything).\n # 2 - self.is_night=False and is_night = True: Transitioned from\n # night/day (need to recompute night boundaries).\n # 3 - self.is_night=True and is_night = False: Transitioned from\n # day/night (no need to compute anything).\n # 4 - self.is_night=False and is_night = False: During the day, no need\n # to compute anything.\n if not self.is_night and is_night:\n self.log.debug(\n \"Night over. Computing next night boundaries. \"\n f\"Assuming sun elevation of {self.parameters.night_boundary}.\"\n )\n self.night += 1\n (self.current_sunset, self.current_sunrise) = self.models[\n \"sky\"\n ].get_night_boundaries(self.parameters.night_boundary)\n\n self.log.debug(\n f\"[{self.night}]: Sunset/Sunrise: {self.current_sunset}/{self.current_sunrise} \"\n )", "def run():\n\n # Build list of stations\n stations = build_station_list()\n \n # Update latest level data for all stations\n update_water_levels(stations)\n \n # Stations at which the current relative level is over 0.8\n z= stations_level_over_threshold(stations, 0.8)\n for a in z:\n print(a[0],a[1])\n print(\".\") \n print(\".\")", "def testTsysMapNNSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def _find_nearest_grid(self,lon,lat,period):\n\t\tgroup = self['%g_sec'%( period )]\n\t\tlonArr = group['lonArr'].value\n\t\tlatArr = group['latArr'].value\n\t\tdiff_Arr = np.dstack((lonArr, latArr)) - np.array([lon, lat]) # 3-d array ( , ,2)\n\t\tdiff_Arr[:,:,0] = diff_Arr[:,:,0] * np.cos(lat/180.*np.pi)\n\t\tdist_sq = np.sum(diff_Arr**2,axis=-1)\n\t\tind1, ind2 = np.where(dist_sq == np.min(dist_sq))\n\t\treturn ind1[0], ind2[0]", "def _calculate_secondary_vars(self):\n print(\"\\nSystem: Now calculating secondary variables based on data provided.\")\n self.data_length = self.data_year.shape[0]\n self.station_pressure = 101.3 * (((293 - (0.0065 * self.station_elev)) / 293) ** 5.26) # units kPa, EQ 3 ASCE\n\n # Calculate DOY from Y/M/D values\n self.data_doy = []\n for i in range(self.data_length):\n # Create list of string DOY values\n self.data_doy.append(dt.date(self.data_year[i], self.data_month[i], self.data_day[i]).strftime(\"%j\"))\n\n self.data_doy = np.array(list(map(int, self.data_doy))) # Converts list of string values into ints\n\n # Calculate tavg if it is not provided by dataset\n if self.column_df.tavg == -1:\n # Tavg not provided\n self.data_tavg = np.array((self.data_tmax + self.data_tmin) / 2.0)\n else:\n # Tavg is provided, no action needs to be taken\n pass\n\n # Figure out which humidity variables are provided and calculate Ea and TDew if needed\n (self.data_ea, self.data_tdew) = data_functions.\\\n calc_humidity_variables(self.data_tmax, self.data_tmin, self.data_tavg, self.data_ea, self.column_df.ea,\n self.data_tdew, self.column_df.tdew, self.data_rhmax, self.column_df.rhmax,\n self.data_rhmin, self.column_df.rhmin, self.data_rhavg, self.column_df.rhavg)\n\n # Calculates secondary temperature values and mean monthly counterparts\n (self.delta_t, self.mm_delta_t, self.k_not, self.mm_k_not, self.mm_tmin, self.mm_tdew) = data_functions.\\\n calc_temperature_variables(self.data_month, self.data_tmax, self.data_tmin, self.data_tdew)\n\n # Calculates rso and grass/alfalfa reference evapotranspiration from refet package\n np.warnings.filterwarnings('ignore', 'invalid value encountered') # catch invalid value warning for nans\n (self.rso, self.mm_rs, self.eto, self.etr, self.mm_eto, self.mm_etr) = data_functions.\\\n calc_rso_and_refet(self.station_lat, self.station_elev, self.ws_anemometer_height, self.data_doy,\n self.data_month, self.data_tmax, self.data_tmin, self.data_ea, self.data_ws,\n self.data_rs)\n np.warnings.resetwarnings() # reset warning filter to default\n\n #########################\n # Back up original data\n # Original data will be saved to output file\n # Values are also used to generate delta values of corrected data - original data\n self.original_df = self.data_df.copy(deep=True) # Create an unlinked copy of read-in values dataframe\n self.original_df['rso'] = self.rso\n self.original_df['etr'] = self.etr\n self.original_df['eto'] = self.eto\n\n # Create datetime variables that will be used by bokeh plot and correction functions\n self.dt_array = []\n for i in range(self.data_length):\n self.dt_array.append(dt.datetime(self.data_year[i], self.data_month[i], self.data_day[i]))\n self.dt_array = np.array(self.dt_array, dtype=np.datetime64)\n self.mm_dt_array = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])\n self.data_null = np.empty(self.data_length) * np.nan", "def bruteforce(self):\n import time\n t1 = time.time()\n for i in range(self.td.shape[0]):\n #Get the latitude at the start of the row, this is used for the entire row\n\n if i % config.LATITUDE_STEP == 0:\n startlat = i + config.LATITUDE_STEP #move to the center of the step\n startlat += self.start #Offset for parallel segmentation\n\n # This is the latitude at the center of the tile defined by\n # the image width, and the latitude_step\n x = int(self.td.shape[1] / 2)\n y = int((startlat + config.LATITUDE_STEP) / 2)\n latitude, _ = self.temperature.pixel_to_latlon(x,y)\n\n lat_f = PchipInterpolator(self.latitudenodes, self.lookup, extrapolate=False, axis=0)\n #The reshape corresponds to the dimensions of the OLAP cube\n # 5 elevations, 5 slope azimuths, 3 slopes, 3 opacities, 3 albedos, and finally 20 TI\n data = lat_f(latitude)\n compressedlookup = data.reshape(6,5,3,3,3,20)\n # Compute the PChip interpolation function for elevation\n elevation_interp_f = PchipInterpolator(np.array([-5.0, -2.0, -1.0, 1.0, 6.0, 8.0]), compressedlookup, extrapolate=False, axis=0)\n \n for j in range(self.td.shape[1]):\n # Each interpolation is composed in 2 parts.\n # 1. The interpolation function is computed.\n # 2. The interpolation function is applied.\n #print(self.reference[i,j], self.r_ndv)\n # If either the reference or the input THEMIS have no data\n if (self.td[i,j] == self.ndv) or (self.reference[i,j] == self.r_ndv):\n #The pixel is no data in the input, propagate to the output\n self.resultdata[i,j] = self.ndv\n continue\n\n #Interpolate elevation\n try:\n new_elevation = elevation_interp_f(self.ed[i,j])\n except:\n # The elevation is bad.\n self.resultdata[i,j] = self.ndv\n self.log[i,j] = self.error_codes['elevation_out_of_bounds']\n continue\n #Interpolate Slope Azimuth\n slopeaz_f = self.compute_interpolation_function(sorted(self.slopeaz_lookup.keys()),\n new_elevation,\n config.SLOPEAZ_INTERPOLATION)\n new_slopeaz = slopeaz_f(self.sz[i,j])\n #Interpolate Slope\n slope_f = self.compute_interpolation_function(sorted(self.slope_lookup.keys()),\n new_slopeaz,\n config.SLOPE_INTERPOLATION)\n capped_slope = self.sd[i,j]\n if capped_slope > 60.0:\n capped_slope = 60.0\n new_slope = slope_f(capped_slope)\n # I am having problems here with pulling TAU properly - check montabone!\n #Interpolate Tau\n tau_f = PchipInterpolator(sorted(self.tau_lookup.keys()),\n new_slope,\n extrapolate=False,\n axis=0)\n new_tau = tau_f(self.od[i,j])\n #Interpolate Albedo\n albedo_f = self.compute_interpolation_function(sorted(self.albedo_lookup.keys()),\n new_tau,\n config.ALBEDO_INTERPOLATION)\n new_albedo = albedo_f(self.ad[i,j])\n #Interpolate Inertia\n self.resultdata[i,j] = self.extract_monotonic(self.td[i,j],\n new_albedo)", "def get_nearest_station(latitude, longitude):\n url1 = \"http://realtime.mbta.com/developer/api/v2/stopsbylocation\"\n params1 = {'api_key':'lm1M_mXgq0O6dsH9xduPAQ','lat':latitude,'lon':longitude,'format':'json'}\n req1 = requests.get(url1,params=params1)\n stat1 = req1.status_code\n stop_name = req1.json()['stop'][0]['stop_name']\n distance = req1.json()['stop'][0]['distance']\n return stop_name, distance", "def main():\n\n print(\"-------------------------\")\n print(\"| codedrome.com |\")\n print(\"| Great Circle Distance |\")\n print(\"-------------------------\\n\")\n\n starting_cities = [{\"name\": \"London\", \"latitude1_degrees\": 51.507222, \"longitude1_degrees\": -0.1275},\n {\"name\": \"London\", \"latitude1_degrees\": 51.507222, \"longitude1_degrees\": -0.1275},\n {\"name\": \"London\", \"latitude1_degrees\": 51.507222, \"longitude1_degrees\": -0.1275},\n {\"name\": \"London\", \"latitude1_degrees\": 51.507222, \"longitude1_degrees\": -0.1275},\n {\"name\": \"London\", \"latitude1_degrees\": 51.507222, \"longitude1_degrees\": -0.1275},\n {\"name\": \"London\", \"latitude1_degrees\": 51.507222, \"longitude1_degrees\": -0.1275},\n {\"name\": \"London\", \"latitude1_degrees\": 51.507222, \"longitude1_degrees\": -0.1275}]\n\n destination_cities = [{\"name\": \"Tokyo\", \"latitude1_degrees\": 35.683333, \"longitude1_degrees\": 139.683333},\n {\"name\": \"New York\", \"latitude1_degrees\": 40.7127, \"longitude1_degrees\": -74.0059},\n {\"name\": \"New Delhi\", \"latitude1_degrees\": 28.613889, \"longitude1_degrees\": 77.208889},\n {\"name\": \"Sydney\", \"latitude1_degrees\": -33.865, \"longitude1_degrees\": 151.209444},\n {\"name\": \"Cape Town\", \"latitude1_degrees\": -33.925278, \"longitude1_degrees\": 18.423889},\n {\"name\": \"Rio de Janeiro\", \"latitude1_degrees\": -22.908333, \"longitude1_degrees\": -43.196389},\n {\"name\": \"Oblivion\", \"latitude1_degrees\": 91, \"longitude1_degrees\": 360}]\n\n gc = greatcircle.GreatCircle()\n\n for i in range(0, len(starting_cities)):\n\n gc.name1 = starting_cities[i][\"name\"]\n gc.latitude1_degrees = starting_cities[i][\"latitude1_degrees\"]\n gc.longitude1_degrees = starting_cities[i][\"longitude1_degrees\"]\n\n gc.name2 = destination_cities[i][\"name\"]\n gc.latitude2_degrees = destination_cities[i][\"latitude1_degrees\"]\n gc.longitude2_degrees = destination_cities[i][\"longitude1_degrees\"]\n\n gc.calculate()\n\n output(gc)", "def main():\n # Constants\n groundstation_name = 'Wallops Antenna'\n groundstation_address = 'Radar Road, Temperanceville, VA 23442'\n satnum = 25544 # ISS = 25544\n saturl=\"http://www.celestrak.com/NORAD/elements/stations.txt\"\n gs_minimum_elevation_angle = 10.0\n\n # Alternate constants\n gs_alt_lat = 37.854886 # Only needed if address not found\n gs_alt_lon = -75.512936 # Ditto\n gs_alt_el_meters = 3.8 # Ditto\n gs_alt_tz_offset_seconds = -18000.0 # Ditto\n gs_tzname = 'US/Eastern'\n\n # Construct the ground station info\n try:\n # Try to use the address...\n gs = GroundStation.from_address(groundstation_address, \\\n groundstation_name, \\\n gs_minimum_elevation_angle)\n except:\n # Otherwise, use explicit location data...\n gs = GroundStation.from_location(gs_alt_lat, gs_alt_lon, \\\n gs_alt_el_meters, \\\n gs_tzname, \\\n groundstation_name, \\\n gs_minimum_elevation_angle)\n\n # Times we need\n now = datetime.now()\n gs_today = gs.get_tz().localize(datetime(now.year, now.month, now.day))\n gs_today_start = gs.get_tz().localize(datetime(now.year, now.month, now.day, \\\n 0, 0, 0)) \n gs_today_end = gs.get_tz().localize(datetime(now.year, now.month, now.day, \\\n 23, 59, 59))\n\n # Get the InviewCalculator and compute the inviews\n st = SatelliteTle(satnum, tle_url=saturl)\n ic = InviewCalculator(gs, st)\n inviews = ic.compute_inviews(gs_today_start, gs_today_end)\n\n # Print the results\n print_satellite_header(st)\n print_inview_header(gs.get_minimum_elevation_angle(), gs_today, gs)\n print_inviews(gs, inviews)\n print_azeltables(inviews, ic)", "def test_latnotloc_and_latandloc_2(self):\n patient = Semiology('lat_', Laterality.LEFT, Laterality.LEFT)\n patient.data_frame = self.df\n lat_not_loc_all_combined_gifs = patient.query_lateralisation(\n one_map_dummy)\n\n # inspect result\n lat_not_loc_result, _ = patient.query_semiology()\n\n self.assertIs(type(lat_not_loc_all_combined_gifs), pd.DataFrame)\n assert not lat_not_loc_all_combined_gifs.empty\n\n # drop the zero entries - should be only the IL left ones which aren't MTG of TL:\n lat_not_loc_all_combined_gifs = lat_not_loc_all_combined_gifs[['Gif Parcellations', 'pt #s']].astype(\n {'Gif Parcellations': 'int32', 'pt #s': 'int32'})\n lat_not_loc_all_combined_gifs.set_index(\n 'Gif Parcellations', inplace=True)\n lat_not_loc_gifsclean = lat_not_loc_all_combined_gifs.loc[\n lat_not_loc_all_combined_gifs['pt #s'] != 0, :]\n\n gifs_right, gifs_left = gifs_lat_factor()\n lat_not_loc_gifsclean_rights = (\n lat_not_loc_gifsclean.drop(index=156).index.isin(gifs_right).all()\n )\n\n # inspect result assertions\n assert(lat_not_loc_result.Localising.sum() == 1)\n assert(lat_not_loc_result['Lateralising'].sum() == 2)\n\n # all_combined_gifs assertions\n # all except GIF 156 (L MTG) are in the right GIFs:\n assert((\n lat_not_loc_gifsclean_rights == True)\n )\n assert(\n (\n lat_not_loc_gifsclean.index.isin(gifs_left)).any() == True\n )\n # assert using shape as all pt #s are 1:\n assert (lat_not_loc_gifsclean['pt #s'].sum()\n == lat_not_loc_gifsclean.shape[0])\n\n # check that latnotloc gives 1 and latandloc adds zero to right MTG GIF #155\n heatmap, _ = patient.get_num_datapoints_dict(method='minmax')\n assert heatmap[155] == 1 # right", "def dwn_stationarity(t_a1, t_a2):\n # See: https://arxiv.org/pdf/1302.6219.pdf, text after (3.2).\n t_x0 = (\n +4.0 * tf.einsum('mi,mjkl->ijkl', t_a1, t_a2)\n -3.0 * tf.einsum('mnij,nklm->ijkl', t_a2, t_a2))\n t_x0_real = tf.math.real(t_x0)\n t_x0_imag = tf.math.imag(t_x0)\n tc_sd = tf.constant(get_proj_35_8888(True))\n tc_asd = tf.constant(get_proj_35_8888(False))\n t_x_real_sd = tf.einsum('aijkl,ijkl->a', tc_sd, t_x0_real)\n t_x_imag_asd = tf.einsum('aijkl,ijkl->a', tc_asd, t_x0_imag)\n return (tf.einsum('a,a->', t_x_real_sd, t_x_real_sd) +\n tf.einsum('a,a->', t_x_imag_asd, t_x_imag_asd))", "def test_weighting(self):\n dset = self.dset.spec.sel(\n lons=self.lons_inexact, lats=self.lats_inexact, method=\"idw\"\n )\n for stat in [\"hs\", \"tp\"]:\n idw = dset.spec.stats([stat])[stat].values\n site0 = self.dset.isel(site=[0]).spec.stats([stat])[stat].values\n site1 = self.dset.isel(site=[1]).spec.stats([stat])[stat].values\n lower = np.array([min(s1, s2) for s1, s2 in zip(site0, site1)])\n upper = np.array([max(s1, s2) for s1, s2 in zip(site0, site1)])\n assert (upper - idw > 0).all() and (idw - lower > 0).all()", "def __save_all():\n \n # Use directory listing from stilt-web data. Ignore stations that\n # may be in the queue but are not finished yet.\n allStations = [s for s in os.listdir(CPC.STILTPATH) if os.path.exists(CPC.STILTPATH + s)]\n\n \n # read lis of ICOS stations\n icosStations = cpstation.getIdList()\n icosStations = list(icosStations['id'][icosStations.theme=='AS'])\n \n # dictionary to return\n stations = {}\n\n # fill dictionary with ICOS station id, latitude, longitude and altitude\n for ist in tqdm(sorted(allStations)):\n \n stations[ist] = {}\n # get filename of link (original stiltweb directory structure) and extract location information\n \n loc_ident = os.readlink(CPC.STILTPATH+ist)\n clon = loc_ident[-13:-6]\n lon = float(clon[:-1])\n if clon[-1:] == 'W':\n lon = -lon\n clat = loc_ident[-20:-14]\n lat = float(clat[:-1])\n if clat[-1:] == 'S':\n lat = -lat\n alt = int(loc_ident[-5:])\n\n stations[ist]['lat']=lat\n stations[ist]['lon']=lon\n stations[ist]['alt']=alt\n stations[ist]['locIdent']=os.path.split(loc_ident)[-1]\n \n # set the name and id\n stations[ist]['id'] = ist\n \n # set a flag if it is an ICOS station\n stn = ist[0:3].upper()\n if stn in icosStations:\n stations[ist]['icos'] = cpstation.get(stn).info()\n lat = stations[ist]['icos']['lat']\n lon = stations[ist]['icos']['lon']\n else:\n stations[ist]['icos'] = False \n lat = stations[ist]['lat']\n lon = stations[ist]['lon']\n \n stations[ist]['geoinfo'] = country.get(latlon=[lat,lon])\n \n return stations", "def get_mesowest_radius(attime, within,\n extra='&radius=kslc,30',\n variables=default_vars,\n verbose=True):\n # Convert attime to string required for MesoWest API query.\n attime = attime.strftime(\"%Y%m%d%H%M\")\n tz = 'utc'\n\n URL = 'http://api.mesowest.net/v2/stations/nearesttime?&token=' + token \\\n + '&attime=' + attime \\\n + '&within=' + str(within) \\\n + '&obtimezone=' + tz \\\n + extra \\\n + '&vars=' + variables\n\n try:\n # Open URL and read JSON content. Convert JSON string to some python\n # readable format.\n f = urllib2.urlopen(URL)\n data = f.read()\n data = json.loads(data)\n\n # Store the data we will return in this new dictionary\n return_this = {'URL': URL,\n 'NAME': np.array([]),\n 'STID': np.array([]),\n 'LAT': np.array([]),\n 'LON': np.array([]),\n 'ELEVATION': np.array([]), # Note: Elevation is in feet.\n 'DATETIME': np.array([])\n }\n\n # Create a new key for each possible variable\n for v in data['UNITS'].keys():\n return_this[str(v)] = np.array([])\n\n # Since some observation times between variables for the same station\n # *could* be different, I will store the datetimes from each variable\n # with a similar name as the variable.\n return_this[str(v) + '_DATETIME'] = np.array([])\n\n for i in range(0, len(data['STATION'])):\n stn = data['STATION'][i] # this represents the station\n\n # Store basic metadata for each station in the dictionary.\n return_this['NAME'] = np.append(return_this['NAME'], str(stn['NAME']))\n return_this['STID'] = np.append(return_this['STID'], str(stn['STID']))\n return_this['LAT'] = np.append(return_this['LAT'],\n float(stn['LATITUDE']))\n return_this['LON'] = np.append(return_this['LON'],\n float(stn['LONGITUDE']))\n try:\n return_this['ELEVATION'] = np.append(return_this['ELEVATION'],\n float(stn['ELEVATION']))\n except:\n return_this['ELEVATION'] = np.append(return_this['ELEVATION'], np.nan)\n\n # Dynamically store data from each available variable.\n for v in data['UNITS'].keys():\n\n key_name = str(v) # Same as the API variable name\n set_num = 0 # Always get the first set: value_1 or value_1d\n # May need to write some exceptions to this rule\n\n try:\n # If value exists, then append with the data\n grab_this_set = str(stn['SENSOR_VARIABLES']\n [key_name].keys()[set_num])\n variable_data = float(stn['OBSERVATIONS']\n [grab_this_set]['value'])\n date_data = MWdate_to_datetime(stn['OBSERVATIONS']\n [grab_this_set]['date_time'])\n\n return_this[key_name] = \\\n np.append(return_this[key_name], variable_data)\n return_this[key_name + '_DATETIME'] = \\\n np.append(return_this[key_name + '_DATETIME'], date_data)\n\n except:\n # If it doesn't exist, then append with np.nan\n return_this[key_name] = \\\n np.append(return_this[key_name], np.nan)\n return_this[key_name + '_DATETIME'] = \\\n np.append(return_this[key_name + '_DATETIME'], np.nan)\n\n return return_this\n except:\n # If it doens't work, then return the URL for debugging.\n if verbose==True:\n print 'Errors loading:', URL\n return 'ERROR'", "def optimize(self):\n # Loop through every WD and WS individually\n wd_array = self.fi_subset.floris.flow_field.wind_directions\n ws_array = self.fi_subset.floris.flow_field.wind_speeds\n for nwsi, ws in enumerate(ws_array):\n\n self.fi_subset.reinitialize(wind_speeds=[ws])\n\n for nwdi, wd in enumerate(wd_array):\n # Find turbines to optimize\n turbs_to_opt = self._turbs_to_opt_subset[nwdi, nwsi, :]\n if not any(turbs_to_opt):\n continue # Nothing to do here: no turbines to optimize\n\n # Extract current optimization problem variables (normalized)\n yaw_lb = self._minimum_yaw_angle_subset_norm[nwdi, nwsi, turbs_to_opt]\n yaw_ub = self._maximum_yaw_angle_subset_norm[nwdi, nwsi, turbs_to_opt]\n bnds = [(a, b) for a, b in zip(yaw_lb, yaw_ub)]\n x0 = self._x0_subset_norm[nwdi, nwsi, turbs_to_opt]\n\n J0 = self._farm_power_baseline_subset[nwdi, nwsi]\n yaw_template = self._yaw_angles_template_subset[nwdi, nwsi, :]\n turbine_weights = self._turbine_weights_subset[nwdi, nwsi, :]\n yaw_template = np.tile(yaw_template, (1, 1, 1))\n turbine_weights = np.tile(turbine_weights, (1, 1, 1))\n\n # Define cost function\n def cost(x):\n x_full = np.array(yaw_template, copy=True)\n x_full[0, 0, turbs_to_opt] = x * self._normalization_length\n return (\n - 1.0 * self._calculate_farm_power(\n yaw_angles=x_full,\n wd_array=[wd],\n turbine_weights=turbine_weights\n )[0, 0] / J0\n )\n\n # Perform optimization\n residual_plant = minimize(\n fun=cost,\n x0=x0,\n bounds=bnds,\n method=self.opt_method,\n options=self.opt_options,\n )\n\n # Undo normalization/masks and save results to self\n self._farm_power_opt_subset[nwdi, nwsi] = -residual_plant.fun * J0\n self._yaw_angles_opt_subset[nwdi, nwsi, turbs_to_opt] = (\n residual_plant.x * self._normalization_length\n )\n\n # Finalize optimization, i.e., retrieve full solutions\n df_opt = self._finalize()\n return df_opt", "def nearest_neighbors(self, t, s):\n # fit to S\n nn_s = NearestNeighbors(1).fit(self.data[s])\n if t == s:\n # find distances from s to s\n d = nn_s.kneighbors()[0]\n else:\n # find distances from t to s\n d = nn_s.kneighbors(self.data[t])[0]\n return t, s, d" ]
[ "0.6480234", "0.6318352", "0.6269479", "0.6215623", "0.6206157", "0.6110717", "0.60025865", "0.5942108", "0.5934246", "0.5904729", "0.5811288", "0.5711787", "0.57106215", "0.56911725", "0.5684194", "0.5649491", "0.5626808", "0.56212485", "0.55674076", "0.5553363", "0.5552577", "0.5550375", "0.5518935", "0.5510778", "0.54994935", "0.5495334", "0.5485145", "0.54698217", "0.54626256", "0.54444784", "0.54340124", "0.5421569", "0.5400976", "0.5369203", "0.53660303", "0.5332901", "0.5325814", "0.5318677", "0.5315941", "0.52962583", "0.5295616", "0.52868176", "0.5278198", "0.527239", "0.5271548", "0.5267424", "0.52631724", "0.526147", "0.52576345", "0.5257149", "0.52567786", "0.52560264", "0.5255774", "0.52549475", "0.5235435", "0.52336425", "0.52326405", "0.5231525", "0.5229067", "0.5227798", "0.5217147", "0.52153856", "0.5210066", "0.5201921", "0.5187117", "0.5183035", "0.518232", "0.5181685", "0.51671845", "0.5161072", "0.5158338", "0.51313347", "0.5120243", "0.51150966", "0.5108852", "0.510671", "0.5106316", "0.51016515", "0.5098156", "0.5091996", "0.5090695", "0.50796986", "0.5079613", "0.5079284", "0.50776076", "0.50773984", "0.50708276", "0.5069745", "0.5067509", "0.5066863", "0.5058058", "0.50522506", "0.5043869", "0.50352603", "0.5034825", "0.50266266", "0.50228435", "0.5017625", "0.50120276", "0.5011841" ]
0.6406745
1
DESCRIPTION Write the bootstraped in a file
def WriteDataFrames(self, Outpath): newdataframes = self.newdataframes for staname in newdataframes.keys(): fname = staname + '.TXT' newdataframes[staname].to_csv(Outpath + fname, float_format="%.2f") print('--------------------') print('Writing dataframe') print('--------------------')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_bootstrap(self):\n\n for line in Bootstrap.BOOTSTRAP:\n self.write_line(line)\n\n self.write_call(\"Sys.init\", 0)", "def write_bootstrap(output_file, curr_function):\n # initialize stack\n output_file.write(\"@256\" + \"\\n\" +\n \"D = A\" + \"\\n\" +\n \"@SP\" + \"\\n\" +\n \"M = D\" + \"\\n\"\n )\n # call the Sys.init function\n write_call(output_file, \"Sys.init\", \"0\", curr_function)", "def write(self):\n # # Sometimes file is not written properly. So delete and rewrite it\n # os.system('rm {}'.format(snip_dir + '/' + self.name))\n # if 'NUM_TIME_STEPS' not in self.define.keys():\n # warnings.warn('NUM_TIME_STEPS missing in header. Execution may hang!')\n with open(snip_dir + '/' + self.name, 'w') as f:\n f.write('/* Temporary generated file for snip process definitions before compilation */\\n')\n f.write(self.__str__())\n\n # os.system('ls {}'.format(snip_dir + '/' + self.name))", "def write(self):\n if self.skip_bootloader: # pylint: disable=no-member\n return\n\n if self.update_only: # pylint: disable=no-member\n self.update()\n return\n\n try:\n os.sync()\n self.stage2_device.format.sync(root=util.getTargetPhysicalRoot()) # pylint: disable=no-member\n self.install()\n finally:\n self.write_config() # pylint: disable=no-member", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def _write_master(self):\n text = _condor_master_template % {\n 'meds_files':self.meds_files,\n }\n master_script=files.get_condor_master_path(self['run'])\n print('writing master:',master_script)\n with open(master_script,'w') as fobj:\n fobj.write(text)\n\n os.system('chmod 755 %s' % master_script)", "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "def generate(self):\n self._open_file()\n # copied from GenerateCSPEC.py\n self._write_header_and_defaults()\n self._write_source()\n self._write_sample()\n\n self._write_all_components()\n self._write_mantle_module()\n self._write_segment()\n self._write_all_ids()\n self._write_footer()\n self._close_file()", "def write(self, fname):\n pass", "def write_scram_toolfile(self, contents, filename):\n with open(self.spec.prefix.etc + '/scram.d/' + filename, 'w') as f:\n f.write(contents)\n f.close()", "def beginFileOutput(self):\n self._outputFilepath = self.dataSet[self._outputFileLabel]\n self._outputFile = open(self._outputFilepath, 'w')", "def sirsam_bs_out(sirsam_bootstrap, sirsam_target_path):\n return os.path.join(sirsam_bootstrap, 'out')", "def write_manifest(self):\n import time\n import sys\n with open('bake-manifest-' + time.strftime('%Y-%m-%d-%H:%M:%S') + \n '.txt', 'w') as hout:\n hout.write(' '.join(sys.argv) + '\\n')\n for k, v in self.table.items():\n hout.write(';'.join([k] + v) + '\\n')", "def Write(self):\n if self._project_definition.name in self._PROJECTS_WITH_PYTHON3_AS_DEFAULT:\n shebang = '#!/usr/bin/env python3'\n else:\n shebang = '#!/usr/bin/env python'\n\n template_mappings = {\n 'project_name': self._project_definition.name,\n 'shebang': shebang,\n }\n\n if self._project_definition.name == 'plaso':\n template_file = 'check_dependencies-with_url.py'\n else:\n template_file = 'check_dependencies.py'\n\n template_file = os.path.join(\n self._l2tdevtools_path, self._TEMPLATE_DIRECTORY, template_file)\n file_content = self._GenerateFromTemplate(template_file, template_mappings)\n\n with io.open(self.PATH, 'w', encoding='utf-8') as file_object:\n file_object.write(file_content)", "def write(self, ext_file_action=ExtFileAction.copy_relative_paths):\n if self.simulation_data.auto_set_sizes:\n self._update_size_defs()\n\n # create any folders in path\n package_file_path = self.get_file_path()\n package_folder = os.path.split(package_file_path)[0]\n if package_folder and not os.path.isdir(package_folder):\n os.makedirs(os.path.split(package_file_path)[0])\n\n # open file\n fd = open(package_file_path, \"w\")\n\n # write flopy header\n if self.simulation_data.write_headers:\n dt = datetime.datetime.now()\n header = (\n \"# File generated by Flopy version {} on {} at {}.\"\n \"\\n\".format(\n __version__,\n dt.strftime(\"%m/%d/%Y\"),\n dt.strftime(\"%H:%M:%S\"),\n )\n )\n fd.write(header)\n\n # write blocks\n self._write_blocks(fd, ext_file_action)\n\n fd.close()", "def write_init_file(name, data, path=\"\"):\n\n # find the resource and exclude it from the file\n data = data.copy()\n\n # Removes the Visa resource if needed\n try:\n data.remove(\"Visa_Resource\")\n except:\n pass\n\n if os.path.isfile(os.path.abspath(str(path) + str(name.split(\".\")[0]) + \".yaml\")):\n\n os.remove(os.path.abspath(path + str(name.split(\".\")[0]) + \".yaml\"))\n filename, version = create_new_file(\n str(name.split(\".\")[0]), path, os_file=False, suffix=\".yaml\"\n )\n yaml.dump(data, filename, indent=4)\n close_file(filename)\n\n elif not os.path.isfile(os.path.abspath(path + str(name.split(\".\")[0]) + \".yaml\")):\n\n # directory = path[:len(path) - len(path.split(\"/\")[-1])]\n\n filename, version = create_new_file(\n str(name.split(\".\")[0]), path, os_file=False, suffix=\".yaml\"\n )\n\n yaml.dump(data, filename, indent=4)\n\n close_file(filename)\n\n # Debricated\n # for items in data.items():\n # if type(items[1]) != type([]):\n # string = str(items[0]) + \" = \\\"\" + str(items[1]) + \"\\\"\\n\"\n # os.write(filename, str(string))\n # else:\n # string = str(items[0]) + \" = \\\"\"\n # for i in items[1]:\n # string += str(i).strip(\"'\").strip(\"[\").strip(\"]\") + \",\"\n # string = string[:-1]\n # string += \"\\\"\\n\"\n # print string\n # os.write(filename, string)\n\n else:\n return -1", "def _print_breakdown(cls, savedir, fname, data):\n if not os.path.exists(savedir):\n os.makedirs(savedir)\n\n with open(os.path.join(savedir, fname), 'w') as fout:\n fout.write(data)", "def save_file(self, sub):\n fileout = os.path.join(self.saving_dir, 'output_skeleton_' + str(sub) + '.nii.gz')\n print('writing altered skeleton to', fileout)\n aims.write(self.skel, fileout)", "def write_data():\n\n data_location = os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", DATA_DIR))\n\n sbi_file_name = os.path.join(data_location, SBI_FILE)\n\n sbi = SbiInfo(sbi_file_name)\n\n # the test file is stored in the same directory as the script\n test_file = os.path.splitext(os.path.join(os.path.dirname(__file__), SBI_FILE))[0] + \".pkl\"\n _logger.info(\"Writing header object to {}\".format(os.path.join(os.path.dirname(__file__),\n test_file)))\n sbi.data.to_pickle(test_file)", "def Write(self):\n template_mappings = {}\n\n template_file = os.path.join(self._l2tdevtools_path, self._TEMPLATE_FILE)\n file_content = self._GenerateFromTemplate(template_file, template_mappings)\n\n file_content = file_content.encode('utf-8')\n\n with open(self.PATH, 'wb') as file_object:\n file_object.write(file_content)", "def write(self):\n self.f.write(yaml.safe_dump(self.data, default_flow_style=False, indent=4))", "def write_inits(project_name, root_dir):\r\n \r\n #Create our file paths first...\r\n test_init_path = get_file_path(root_dir, \"tests\", \"__init__.py\")\r\n project_init_path = get_file_path(root_dir, project_name, \"__init__.py\")\r\n \r\n #Write the test_init file first\r\n test_init = open(test_init_path, 'w')\r\n test_init.close()\r\n print_file(test_init_path)\r\n \r\n #Write the NAME_init second\r\n project_init = open(project_init_path, 'w')\r\n project_init.close()\r\n print_file(project_init_path)", "def run(self):\n write_config(self.filename)\n print('Wrote default config to', self.filename)", "def writeToFile(self, basedir, write_code=0):", "def write_setup(project_name, root_dir):\r\n setup_path = get_file_path(root_dir, None, \"setup.py\") #Get the path for setup.py\r\n setup_content = get_setup_text(project_name)\r\n \r\n setup_file = open(setup_path, 'w')\r\n setup_file.write(setup_content)\r\n setup_file.close()\r\n print_file(setup_path, \" +++\")", "def _create_pbs_file(self, jobname, num_jobs, pf_pbs, pf_input_package_template, pf_output_package_template):\n\n # create unique compute directory\n pd_compute = None # run_shell_cmd(\"mktemp --tmpdir={}\".format(self._prl_options[\"pbs-pd-root-compute\"]))\n\n pbs_text = PBS._generate_pbs_header_array(num_jobs, jobname, self._prl_options, pd_compute=pd_compute)\n\n pbs_text += \"\\n{}\\n\".format(\n PBS._generate_call_command(self._env,\n pf_input_package_template,\n pf_output_package_template,\n self._prl_options,\n pd_compute=pd_compute\n )\n )\n\n # write to file\n write_to_file(pbs_text, pf_pbs)", "def test_write_pdf_bootstrap_tree(self):\r\n\r\n tree = parse_newick(\r\n \"((tax7:0.1,tax3:0.2)node0:.98,tax8:.3, tax4:.3)node1:.4\",\r\n PhyloNode)\r\n bootstraps = {'node0': .7, 'node1': .4}\r\n fd, f = mkstemp(prefix='make_bootstrapped_tree_test',\r\n suffix='.pdf')\r\n close(fd)\r\n self._paths_to_clean_up.append(f)\r\n write_pdf_bootstrap_tree(tree, f, bootstraps)\r\n assert(os.path.exists(f))", "def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)", "def persist(self, filepath):\n joblib.dump('hello-steppy', filepath)", "def Write(self):\n template_mappings = {\n 'pypi_token': self._project_definition.pypi_token or ''}\n\n file_content = []\n\n template_data = self._GenerateFromTemplate('environment', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name not in self._PROJECTS_WITHOUT_BUILD:\n if self._project_definition.pypi_token:\n template_data = self._GenerateFromTemplate(\n 'pypi_token', template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('matrix', template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('install', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name != 'l2tdevtools':\n template_data = self._GenerateFromTemplate(\n 'install_l2tdevtools', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name in self._PROJECTS_WITHOUT_BUILD:\n template_filename = 'build_off'\n else:\n template_filename = 'build'\n\n template_data = self._GenerateFromTemplate(\n template_filename, template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('test_script', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name not in self._PROJECTS_WITHOUT_BUILD:\n template_data = self._GenerateFromTemplate('artifacts', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.pypi_token:\n template_data = self._GenerateFromTemplate(\n 'deploy_script', template_mappings)\n file_content.append(template_data)\n\n file_content = ''.join(file_content)\n\n with io.open(self.PATH, 'w', encoding='utf-8') as file_object:\n file_object.write(file_content)", "def write_job_manifest(self):\n import time\n with open('bake-manifest-' + time.strftime('%Y-%m-%d-%H:%M:%S') + \n '.txt', 'w') as hout:\n for k, v in self.job.items():\n hout.write(';'.join([k, v]) + '\\n')", "def write_to_file(self, filename: str) -> None:", "def write (self, file):\n\t\tfile.write (self.pack ())", "def writeDependencyFile():\n # if not wbuildVersionIsCurrent():\n # print(bcolors.WARNING + \"Version of the project's static .wBuild lib is not the same as the dynamically loaded \"\n # \"wBuild\"\n # \"version. It is strongly recommended to update .wBuild lib using \\'wbuild update\\'; \"\n # \"otherwise, the consistency of the build can not be guaranteed.\" + bcolors.ENDC)\n logger.info(\"Structuring dependencies...\")\n conf = Config()\n htmlOutputPath = conf.get(\"htmlOutputPath\")\n logger.debug(\"Loaded config.\\n html output path (key htmlOutputPath): \" + htmlOutputPath + \"\\n\")\n scriptsPath = conf.get(\"scriptsPath\")\n readmePath = conf.get(\"readmePath\")\n wbData = parseWBInfosFromRFiles(script_dir=scriptsPath, htmlPath=htmlOutputPath)\n mdData = parseMDFiles(script_dir=scriptsPath, htmlPath=htmlOutputPath, readmePath=readmePath)\n dependFile = tempfile.NamedTemporaryFile('w',delete=False)\n with dependFile as f: #start off with the header\n f.write('######\\n')\n f.write('#This is a autogenerated snakemake file by wBuild\\n')\n f.write('#wBuild by Leonhard Wachutka\\n')\n f.write('######\\n')\n\n # write build index rule\n writeIndexRule(wbData, mdData, f)\n logger.info(\"Dependencies file generated at: {}\\n\".format(dependFile.name))\n\n return dependFile.name", "def save(self):\n for p, c in self.configs_:\n c.write(p)", "def write_output(self):", "def dumpf(self, gzip=False):\n if 0 != len(self.sources):\n os.mkdir(self.name)\n filename = os.path.join(self.name, 'bootstrap.sh')\n f = codecs.open(filename, 'w', encoding='utf-8')\n elif gzip:\n filename = '{0}.sh.gz'.format(self.name)\n f = gziplib.open(filename, 'w')\n else:\n filename = '{0}.sh'.format(self.name)\n f = codecs.open(filename, 'w', encoding='utf-8')\n f.write(self.comment)\n f.write('cd \"$(dirname \"$0\")\"\\n')\n for filename2, content in sorted(self.sources.iteritems()):\n f2 = open(os.path.join(self.name, filename2), 'w')\n f2.write(content)\n f2.close()\n for out in self.out:\n f.write(out)\n f.close()\n if gzip and 0 != len(self.sources):\n filename = 'sh-{0}.tar.gz'.format(self.name)\n tarball = tarfile.open(filename, 'w:gz')\n tarball.add(self.name)\n tarball.close()\n return filename\n return filename", "def setup_output_path(self):\n self.logger.info('setting up output path')\n try:\n self.output_path.mkdir()\n except FileExistsError:\n pass\n try:\n (self.output_path / 'simple').mkdir()\n except FileExistsError:\n pass\n for filename in resource_listdir(__name__, 'static'):\n if filename == 'index.html':\n # Skip template\n continue\n with (self.output_path / filename).open('wb') as f:\n source = resource_stream(__name__, 'static/' + filename)\n f.write(source.read())\n source.close()", "def write(self, fn):\n with open(fn, 'w') as f:\n self.config.write(f)", "def generate_overlayfs_stacking(self, working_file_name):\n\n # Reopenthe working file\n working_file = open(working_file_name, \"a\")\n\n\n working_file.write(\"generate_overlayfs_stacking\\n\")\n\n # We are done here, now close the file\n working_file.close()", "def write_config(self, config_file):\n \n # write root paths\n \n # write reference data\n \n # write tool paths\n \n pass", "def write_file(country, season, final, var):\n if var=='label':\n path='../results/kmeans/'\n elif var=='cluster':\n path='../results/sequence_analysis/'\n country_ = country.lower()\n season_ = season.replace('-','_')\n file_name=country_+\"_\"+season_\n newpath=path+file_name+'/'\n if not os.path.exists(newpath):\n os.makedirs(newpath)\n f = open(newpath+file_name+\".txt\",\"w\") \n f.write(final)\n f.close()", "def write_contents(self):\n dfile = open(os.path.join(self.directory, self.file_name), 'w')\n dfile.write(self.contents.strip())", "def _createConfigFile(self):\n configFile = self._configFile()\n try:\n with open(configFile) as fh:\n pass\n except IOError:\n try:\n with open(configFile, 'w') as fh:\n fh.write(\"[settings]\\n\")\n fh.write(\"debug = false\\n\")\n fh.write(\"hidefilenames = false\\n\")\n except IOError:\n pass", "def bootstrap():\n Bootstrap()", "def write(self, filename): # real signature unknown; restored from __doc__\n pass", "def writeto(self, filename=None):\n if filename is None:\n if self.fileout is not None:\n filename = self.fileout\n else:\n raise IOError(\"filename is None and no self.fileout set.\")\n \n with open(filename, \"w\") as f:\n f.writelines(\"\\n\".join(self.get_config(\"configfile\")))", "def _write_init(self):\n\n content = ('# -*- coding: utf-8 -*-'\n '\\n'\n '\\n'\n \"__version__ = '${version}'\"\n '\\n')\n intFile = os.path.join(self.packageDir, '__init__.py')\n return self.write_file(string.Template(content), intFile)", "def write_to(self, filepath):\n output = self._generate_output()\n with open(filepath, 'wb') as out:\n out.write(output.encode('utf-8'))\n out.write(b'<!-- handrolled for excellence -->\\n')", "def save(self):\n f=open(\"{}/{}.html\".format(self.path,self.name),\"w\")\n f.write(\"<html>\\n <head>\\n\")\n for c in self.css:\n f.write(\" <link rel=\\\"Stylesheet\\\" href=\\\"{}\\\" />\\n\".format(c))\n f.write(\" </head>\\n</body>\\n\")\n for line in self.template.split(\"\\n\"):\n f.write(\" {}\\n\".format(line))\n f.write(\" </body>\\n</html>\")\n f.close()", "def _setupFiles(self):\r\n with open(self._conf, 'w') as f:\r\n # Write base config\r\n f.write('lxc.utsname = {0}\\n'.format(self._hostname))\r\n f.write('\\n')\r\n f.write('lxc.rootfs = {0}\\n'.format(self._rootfs))\r\n f.write('lxc.mount = {0}\\n'.format(self._fstab))\r\n\r\n # Write interface config\r\n for name, link, ip, up, down in self._ifs:\r\n f.write('\\n')\r\n f.write('lxc.network.type = veth\\n')\r\n f.write('lxc.network.flags = up\\n')\r\n f.write('lxc.network.name = {0}\\n'.format(name))\r\n\r\n if link:\r\n f.write('lxc.network.link = {0}\\n'.format(link))\r\n\r\n if ip:\r\n f.write('lxc.network.ipv4 = {0}/24\\n'.format(ip))\r\n\r\n if up:\r\n f.write('lxc.network.script.up = {0}\\n'.format(up))\r\n\r\n if down:\r\n f.write('lxc.network.script.down = {0}\\n'.format(down))\r\n\r\n\r\n # Write cgroup config\r\n f.write(_CONFIG_CGROUP)\r\n\r\n # Write capabilities config\r\n # TODO: Add at some point?\r\n # f.write(_CONFIG_CAP)\r\n\r\n with open(self._fstab, 'w') as f:\r\n f.write(_FSTAB_BASE.format(proc=pjoin(self._rootfs, 'proc'),\r\n devpts=pjoin(self._rootfs, 'dev/pts'),\r\n sysfs=pjoin(self._rootfs, 'sys')))\r\n\r\n for src, dst, ro in self._fstabExt:\r\n f.write(_FSTAB_BIND.format(srcDir=src, dstDir=dst,\r\n ro=',ro' if ro else ''))", "def write(self):\n cfgpath = os.path.join(self.config_dir, CONFIG_FILENAME)\n ofile = open(cfgpath, 'w')\n if ofile:\n log.debug( \"Write config: %s\" % cfgpath )\n cfg = yaml.dump(self.yaml, default_flow_style=False)\n log.debug( \"Config:\\n%s\" % cfg)\n ofile.write(cfg)\n ofile.close()", "def writeBlade(self):\n\n ofname = self.blade1_file ### note, assuming they're all the same\n ofh = open(ofname,'w')\n\n for line in self.lines_blade:\n ofh.write(line)\n ofh.close()", "def bootstrap(self):\n None", "def write(self, filename):\n bvh_string = self.generate_bvh_string()\n if filename[-4:] == '.bvh':\n filename = filename\n else:\n filename = filename + '.bvh'\n with open(filename, 'w') as outfile:\n outfile.write(bvh_string)", "def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()", "def create_file(self):\n with open(self.get_path(), 'w', encoding='utf8') as file:\n print(\"- {}\".format(time2str(self.start)), file=file)", "def write(filename):\n print(uc.write(filename))", "def write_to_init_file(self, init_dir_path, application_name):\n init_file_name = '__init__.py'\n init_file_path = os.path.join(init_dir_path, init_file_name)\n if not os.path.exists(init_dir_path):\n os.makedirs(init_dir_path)\n with open(init_file_path, 'w') as init_file:\n for model_name in self.app_models_dict[application_name]:\n for serializer_name in self.app_model_serializer_dict[model_name]:\n init_file.write(\"from .\" + serializer_name[:-10] + \"_auto_view import \" + serializer_name[:-10] + \"ViewSet\\n\")\n init_file.close()\n self.write_init_file = False", "def write_to_file(self, stream, application_name, serializer_name, model_name):\n project_path = settings.BASE_DIR\n application_path = os.path.join(project_path, application_name)\n is_serializer_file = os.path.exists(os.path.join(application_path, 'auto_serializers.py'))\n if is_serializer_file:\n viewset_dir_path = application_path\n viewset_name = 'auto_views.py'\n file_mode = 'a'\n self.write_init_file = False\n else:\n viewset_dir_path = os.path.join(application_path, 'auto_views')\n viewset_name = serializer_name[:-10] + '_auto_view.py'\n file_mode = 'w'\n self.file_write_flag = False\n viewset_file_path = os.path.join(viewset_dir_path, viewset_name)\n dir_name = os.path.dirname(viewset_file_path)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n if self.write_init_file is True:\n self.write_to_init_file(viewset_dir_path, application_name)\n with open(os.path.join(viewset_dir_path, viewset_name), file_mode) as output_file:\n if (file_mode == 'w') or (file_mode == 'a' and self.file_write_flag is True):\n output_file.write(\"from rest_framework import viewsets\\n\")\n if file_mode == 'a':\n for mdl_name in self.app_models_dict[application_name]:\n output_file.write(\"from \" + application_name + \".models import \" + mdl_name + \"\\n\")\n for srlzr_name in self.app_model_serializer_dict[mdl_name]:\n output_file.write(\"from \" + application_name + \".auto_serializers import \" + srlzr_name +\n \"\\n\")\n else:\n output_file.write(\"from \" + application_name + \".models import \" + model_name + \"\\n\")\n output_file.write(\"from \" + application_name + \".auto_serializers import \" + serializer_name + \"\\n\")\n output_file.write(stream)\n self.file_write_flag = False\n output_file.close()", "def __init__(self, output_file):\n self.file = open(output_file, \"w\")", "def write_towhee_restart(self, filename, pbc=(0, 0, 0)):\n seeds = [5054388, 14604618, 6176650, 7526479, 6525, 10097385, 9059353,\n 14349506, 535, 7287374, 12195841, 7272997, 5692437, 11292972,\n 1589479, 16351161, 14342867, 3500530, 14385737, 2924396,\n 11857489, 6765405, 12074244, 5940539, 3050519]\n version = 6\n numcycles = 0\n numboxes = 1\n nummoltypes = 1\n maxtranssingle = 0.5\n maxtranscom = 0.15305334\n maxrot = 0.672082\n maxvoldis = [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]\n if type(pbc) is list or type(pbc) is tuple:\n pbc = np.asarray(pbc)\n length = np.asarray(self.length) + pbc\n hmatrix = np.diag(length)\n maxvoldis.extend(list(hmatrix))\n with open(filename, 'w') as f:\n f.write(f\"\\t\\t{version}\\n\")\n f.write(\"\\t\" + \"\\t \".join(map(str, seeds)) + \"\\n\")\n f.write(f\"\\t{numcycles} \\t\\t{numboxes} \\t\\t{nummoltypes}\\n\")\n f.write(f\"\\t{maxtranssingle:20.15f}\\n\")\n f.write(f\"\\t{maxtranscom:20.15f}\\n\")\n f.write(f\"\\t{maxrot:20.15f}\\n\\n\")\n for row in maxvoldis:\n f.write(f\"\\t{row[0]:20.15f} \\t{row[1]:20.15f} \\t{row[2]:20.15f}\\n\")\n f.write(f\"\\t{self.numatom // self.molsize}\\n\")\n f.write(f\"\\t{self.molsize}\\n\")\n f.write(\"\\t \".join(map(str, list(self.contents['Mol_type'].astype(int)))) + \"\\n\")\n f.write(\"\\t \".join(map(str, list(self.contents['Box_ID'].astype(int)))) + \"\\n\")\n df = self.contents[['X', 'Y', 'Z']].copy()\n np.savetxt(f, df.values, fmt=\" %20.15f\"*3)", "def write_bootstrap_support_files(master_tree, bootstraps, output_dir,\r\n num_support_trees):\r\n\r\n # master tree as passed\r\n fname = os.path.join(output_dir, \"master_tree.tre\")\r\n f = open(fname, 'w')\r\n f.write(master_tree.getNewick(with_distances=True))\r\n f.close()\r\n\r\n # support of nodes in tab delimited text\r\n f = open(os.path.join(output_dir, 'jackknife_support.txt'), 'w')\r\n f.write('#total support trees considered: ' +\r\n str(num_support_trees) + '\\n')\r\n f.write('#node support is fractional - in range [0,1]\\n')\r\n for key, val in bootstraps.items():\r\n f.write(\"\\t\".join([str(key), str(val)]) + \"\\n\")\r\n f.close()\r\n\r\n # tree with nodes named by support values\r\n pseudo_newick_master = copy.deepcopy(master_tree)\r\n f = open(os.path.join(output_dir, 'jackknife_named_nodes.tre'), 'w')\r\n for name, support_val in bootstraps.items():\r\n node = pseudo_newick_master.getNodeMatchingName(name)\r\n node.Name = str(support_val)\r\n f.write(pseudo_newick_master.getNewick(with_distances=True))\r\n f.close()", "def create_file(file_name: str, startup_text: str) -> None:\n with open(file_name, 'w') as f:\n f.write(startup_text)", "def write(self, file):\n #write header\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "def write_file(self, contents):\n fd = open(os.path.join(os.path.dirname(__file__),\n 'data', 'test.html'), 'w')\n fd.write(contents)\n fd.close()", "def make_file(self):\n\n f = open(get_output_path(), \"w\")\n \n f.write(self.export())\n \n f.close()\n\n return self", "def write_to_file(self, file_name=None, sub_path=None) -> None:\n super().write_to_file(file_name, settings.ARTILLERY_FOLDER)\n\n self.set_yaml_config()\n self.write_file_to_output(\n settings.ARTILLERY_YAML, self.yaml_config, append_mode=False, project_sub_folder=settings.ARTILLERY_FOLDER\n )", "def write(self):\n with open(self.filename, 'w') as outfile:\n [outfile.write(element) for element in self.preamble]\n [outfile.write(element) for element in self.body]", "def write(self, f):\n \n with new_child_context() as ctx:\n columns = ctx.fallback(self.columns, 'ninja.file_columns', DEFAULT_COLUMNS)\n strict = ctx.fallback(self.strict, 'ninja.file_columns_strict', False)\n if strict and (columns is not None) and (columns < _MINIMUM_COLUMNS_STRICT):\n columns = _MINIMUM_COLUMNS_STRICT\n\n with _Writer(f, columns, strict) as w:\n ctx.current.writer = w\n ctx.current.phase_outputs = StrictDict(key_type=str, value_type=list)\n ctx.current.project = self._project\n ctx.current.project_outputs[self._project] = ctx.current.phase_outputs\n \n # Header\n w.comment('Ninja file for {}'.format(self._project))\n w.comment('Generated by Rōnin on {}'\n .format(datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')))\n if columns is not None:\n w.comment('Columns: {:d} ({})'\n .format(columns, 'strict' if strict else 'non-strict'))\n \n w.line()\n w.line('builddir = {}'.format(pathify(self._project.output_path)))\n \n # Rules\n for phase_name, phase in self._project.phases.items():\n verify_type(phase, Phase)\n self._write_rule(ctx, phase_name, phase)", "def save_chain(self):\n pprint('saving to file named bc_file.txt')\n with open('ddos_bc_file.txt', 'w') as output:\n output.write(serializer.serialize(self.chain))", "def starting_new_file(self):", "def create(self):\n self.create_file()", "def writeto(self, fileout):\n \n dump_pkl(self.data, fileout)", "def write_all(self):\n self.write_config()\n self.write_wq()", "def write(self, filename):\n assert filename[-3:]=='.fz','name must end in .fz'\n\n files.makedir_fromfile(filename)\n\n ucfilename=filename[0:-3]\n bname = os.path.basename(ucfilename)\n\n tmp_path = os.path.join(\n files.get_temp_dir(),\n bname,\n )\n files.makedir_fromfile(tmp_path)\n\n with TempFile(tmp_path) as tfile:\n super(CosmosMEDSMaker,self).write(tfile.path)\n self._compress_meds_file(tfile.path, filename)", "def _jobfile(self):\n job = self.job.format(fnum=self.fnum)\n with open(job, 'w') as f:\n f.write('#!/bin/sh\\n' + self.phast_cmmd + self.cleanup_cmmd)", "def write_to_file(self):\n \"\"\"\n Saves this app to a file in it's protobuf notation\n This way, it can be parsed using the same constructor as aps received over the internet\n The file extension stands for Protocol buffer Apk INformation\n \"\"\"\n file_name = f'{self.package_name()}({self.version_code()}).pain'\n dir_path = self.path()\n os.makedirs(dir_path, exist_ok=True)\n file_path = os.path.join(dir_path, file_name)\n with open(file_path, 'wb+') as file:\n file.write(self.proto.SerializeToString())\n LOGGER.debug(f'Wrote metadata for {self.package_name()} to {file_path}')\n return file_path", "def test1_write():\n with open(FILE_DIR + FILE_NAME, mode='w', encoding='utf-8') as f:\n f.write(DATA)", "def __exit__(self, *_):\n with self._info_yaml_file_path.open(\"w\") as info:\n self._yml.dump(self._info, info)", "def filewrite(self, filename):\n io.write(self, filename)", "def _make_slice_file(self):\n import_str = '\\n'.join([self._scad_include_str.format(i) for i in self.scad_includes])\n object_str = '\\n'.join([m + ';' for m in self.scad_object_modules])\n key_str = '\\n'.join([m + ';' for m in self.scad_key_modules])\n\n contents = SLICER_TEMPLATE.substitute(import_str=import_str,\n object_str=object_str,\n key_str=key_str,\n openscad=self._openscad_command)\n LOG.debug('.scad file contents: %s', contents)\n\n with open(self.scad_filename, 'w') as f:\n f.write(contents)\n LOG.info('Created .scad file: %s', self.scad_filename)", "def __write_config(self):\n with open(self.config_file, 'w') as data_file:\n config = {\"ibooks_doc_root\":self.ibooks_doc_root,\n \"library_folder\":self.library_folder,\n \"annotation_folder\":self.annotation_folder,\n \"tmp_dir\":self.tmp_dir\n } \n data = json.dumps(config, ensure_ascii=False)\n data_file.write(data)", "def write(self):", "def write(self):", "def save(self):\r\n with open(self.filename, 'wb') as configfile:\r\n self.write(configfile)", "def create_file(self, file_name=\"result\", extension=\"html\"):\n with open(f\"{file_name}.{extension}\", \"a\") as f:\n f.write(\"<!DOCTYPE html>\\n<html>\\n<head>\\n <meta charset='utf-8'>\")\n for head_element in self.head:\n f.write(head_element)\n f.write(\"\\n</head>\\n<body>\")\n for body_element in self.body:\n f.write(body_element)\n f.write(\"\\n</body>\\n</html>\")", "def write(self, fp):\n if self._defaults:\n s = str(\"[%s]\\n\" % configparser.DEFAULTSECT)\n\n # This is python version dependent. Again :(\n if sys.version_info[0] == 2:\n fp.write(s)\n\n for (key, value) in self._defaults.items():\n self._write_item(fp, key, value)\n fp.write(\"\\n\")\n elif sys.version_info[0] == 3:\n fp.write(bytes(s, 'UTF-8'))\n\n for (key, value) in self._defaults.items():\n self._write_item(fp, key, value)\n fp.write(bytes(\"\\n\"), 'UTF-8')\n else:\n raise Exception(\"Unknown python version\")\n\n for section in self._sections:\n\n if sys.version_info[0] == 2:\n s = str(\"[%s]\\n\" % section)\n fp.write(s)\n\n for (key, value) in self._sections[section].items():\n self._write_item(fp, key, value)\n s = str(\"\\n\")\n fp.write(s)\n elif sys.version_info[0] == 3:\n s = str(\"[%s]\\n\" % section)\n fp.write(bytes(s, 'UTF-8'))\n for (key, value) in self._sections[section].items():\n self._write_item(fp, key, value)\n s = str(\"\\n\")\n fp.write(bytes(s, 'UTF-8'))\n else:\n raise Exception(\"Unknown python version\")", "def write(self, path):\n with open(path, \"w\") as fh_:\n fh_.write(self.config())", "def write(self, path):\n with open(path, \"w\") as fh_:\n fh_.write(self.config())", "def write_to_file(start_runtime, contents, write_mode='a'):\n with open(f\"{start_runtime}.txt\", write_mode) as f:\n f.write(\"Filename\\t\\tMaxTrack\\tNumInst\\t\\tTimeSig\\t\\tTPB\\n\")\n f.write(contents)", "def test_write(self):\n temp_file = tempfile.mkstemp()[1]\n try:\n with open(temp_file, \"w+\") as fh:\n self.new_manifest.write(fh)\n tools.eq_(self.new_manifest, load_manifest(temp_file))\n finally:\n os.unlink(temp_file)", "def file(self, sentence):\n req = open(\"python.py\" , 'w+')\n req.write(sentence)\n req.close()", "def newfile(self) :\n\n\t\tfrom tempfile import mkstemp\n\t\timport os\n\t\tglobal configurer\n\n\t\tfd,name = mkstemp(suffix='.blend')\n\t\tos.close(fd)\n\t\tself.name = name\n\t\tfd = open(name,'wb', configurer.get('ServerBufferSize'))\n\t\tself.fd = fd\n\t\tprint name\n\t\treturn 1", "def open_output_files(self):\n if not os.path.exists(self.outputDictionaryPath):\n os.makedirs(self.outputDictionaryPath)\n\n self.XMLfile = open(os.path.join(self.outputDictionaryPath, 'MyDictionary.xml'), 'w+', encoding='utf-8') # this is the output file\n self.Makefile = open(os.path.join(self.outputDictionaryPath, 'Makefile'), 'w+', encoding='utf-8')\n self.MyInfoFile = open(os.path.join(self.outputDictionaryPath, 'MyInfo.plist'), 'w+', encoding='utf-8')", "def write_initdata(xy0, v0, NL, BND, h, beta, outdir):\n dio.ensure_dir(outdir)\n M = np.hstack((xy0, v0))\n np.savetxt(outdir + 'NL.txt', NL, fmt='%i', delimiter=',', header='NL (Neighbor List)')\n np.savetxt(outdir + 'BND.txt', BND, fmt='%i', header='BND (Boundary List)')\n np.savetxt(outdir + 'xyv0.txt', M, delimiter=',', header='xy0 (initial positions) v0 (initial velocities)')\n with open(outdir + 'h.txt', \"w\") as hfile:\n hfile.write(\"# h (time step) \\n{0:4f}\".format(h))\n if beta != 'none':\n with open(outdir + 'beta.txt', \"w\") as betafile:\n betafile.write(\"# beta (damping coeff) \\n{0:4f}\".format(beta))", "def write_ctl_file(self):\n # Make sure all paths are relative to the working directory\n try:\n self._set_rel_paths()\n except (AttributeError, ValueError) as error:\n raise error\n with open(self.ctl_file, 'w') as ctl_handle:\n ctl_handle.write(\"seqfile = {0}\\n\".format(self._rel_alignment))\n ctl_handle.write(\"outfile = {0}\\n\".format(self._rel_out_file))\n for option in self._options.items():\n if option[1] == None:\n # If an option has a value of None, there's no need\n # to write it in the control file; it's normally just\n # commented out.\n continue\n ctl_handle.write(\"{0} = {1}\\n\".format(option[0], \n option[1]))", "def write(self, data_pref)\n\n def _writeToAddama(self, addama_dir):", "def write_scram_toolfiles(self):\n from string import Template\n\n mkdirp(join_path(self.spec.prefix.etc, 'scram.d'))\n\n values = {}\n values['VER'] = self.spec.version\n values['PFX'] = self.spec.prefix\n\n fname = 'uuid-cms.xml'\n template = Template(\"\"\"<tool name=\"uuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)\n\n fname = 'libuuid.xml'\n template = Template(\"\"\"<tool name=\"libuuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)", "def write_data_files(self):\n \n logging.info('\\n Start writing data files \\n')\n \n for i, (data_file, label_file) in enumerate(self.files):\n data_file, label_file = Path(data_file), Path(label_file)\n logging.info('Writing .hdf5 file for : [{}]'.format(str(data_file)))\n \n file_name = self.save_data_folder / '{}.hdf5'.format(label_file.name[:-4])\n if file_name.exists():\n continue\n \n with h5py.File(str(file_name), 'w') as writer:\n self.serialize_samples(\n writer, data_file, label_file)" ]
[ "0.71933657", "0.68223804", "0.6309295", "0.62452126", "0.6161541", "0.6161541", "0.60435784", "0.60038215", "0.59648716", "0.5935721", "0.5925566", "0.58335197", "0.5824003", "0.58197004", "0.58056873", "0.58021903", "0.5799778", "0.57911575", "0.57786596", "0.57492787", "0.5725511", "0.5712414", "0.5678266", "0.5677597", "0.56420195", "0.5637757", "0.562947", "0.5628967", "0.5586574", "0.5564605", "0.5562713", "0.55564564", "0.55564165", "0.5545869", "0.55446255", "0.55381835", "0.55362904", "0.5506221", "0.5500667", "0.5495424", "0.549262", "0.54868513", "0.54817283", "0.54784656", "0.5465986", "0.54657066", "0.5464725", "0.54641384", "0.5463309", "0.5458612", "0.54437155", "0.54417634", "0.5440476", "0.5439343", "0.5425226", "0.54240483", "0.54181933", "0.5415917", "0.5410719", "0.5407512", "0.5406624", "0.540416", "0.53942287", "0.53913057", "0.53903025", "0.53873056", "0.5382533", "0.53707135", "0.535489", "0.53544945", "0.5352945", "0.5349412", "0.5349125", "0.5348334", "0.53371966", "0.5336728", "0.5331236", "0.5324024", "0.5323876", "0.53212017", "0.5320224", "0.53151023", "0.53144765", "0.5313769", "0.53045315", "0.53045315", "0.53021365", "0.52996755", "0.52990603", "0.52974", "0.52974", "0.52965325", "0.52960825", "0.5293301", "0.5291637", "0.5289741", "0.5289486", "0.528798", "0.52835774", "0.52814615", "0.527947" ]
0.0
-1
Get preditors base on their distance The predictors are selected as following [1,2], [1,3], [1,4], [2,3], [2,4], [2,5], [2,6]
def __getpredictors_distance(self, staname, distance): distfromsta = distance[staname] del distfromsta[staname] # remove the station to be fill from the dataframe distfromsta = distfromsta.sort_values() stations = self.network.getsta(distfromsta.index.values) # station = self.network.getsta(staname) # Only 3 closest stations # sel1 = [ (i,e) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1 # sel2 = [ (i,e) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 2 # Use all stations sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1 sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2 # sel3 = [ (i,e) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 3 # sel4 = [ (i,e) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 4 # Only 3 closest stations # sel1names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1 # sel2names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 1 # using all stations sel1names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1 sel2names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 1 # sel3names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 1 # sel4names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 1 selection = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1, sel2)) if x] selectionnames = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1names, sel2names)) if x] return selection, selectionnames
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getpredictors_distance( staname, distance):\n\n distfromsta = distance[staname]\n try:\n del distfromsta[staname] # remove the station to be fill from the dataframe\n except:\n pass\n distfromsta = distfromsta.sort_values()\n\n stations = distfromsta.index\n\n sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2\n\n selection= [None] * (len(sel1) + len(sel2))\n selection[::2] = sel1\n selection[1::2] = sel2\n\n return selection[:4]", "def predict(x):\n file_train = open('trains.pkl', \"rb\")\n train = pkl.load(file_train)\n y = []\n k = 5\n x_train = train[0]\n y_train = train[1]\n for q in range(100):\n distance = []\n for i in range(800):\n distance.append(np.linalg.norm(x[q] - x_train[i]))\n\n # distance.append(np.sqrt(sum((x[q] - x_train[i]) ** 2)))\n # u = (x[0] - x_train) ** 2\n # print(distance)\n # distance = np.sqrt([sum(b) for b in u])\n # print(distance)\n minarg = np.argsort(distance)\n i = np.array(np.zeros(10))\n j = 0\n while k not in i:\n i[y_train[minarg[j]]] += 1\n j += 1\n y.append(np.argmax(i))\n return y", "def pred_for_user(self,u):\r\n ids=np.where(self.Y_data_n[:,0]==u)[0]\r\n items_rated_by_u=Y_data_n[ids,1].tolist()\r\n pred_ratings=[]\r\n for i in range(self.n_items):\r\n if i not in items_rated_by_u:\r\n pred_ratings.append(self.pred(u,i))\r\n return pred_ratings", "def predict(self, X):\n\n Xn = np.copy(X)\n\n preds = []\n # compute distance from all points\n for x1 in Xn:\n dist = self._euclidian_distance(self.X_data, x1)\n dist = np.vstack((dist, self.y)).T\n dist = dist[dist[:, 0].argsort(axis=0)][:,-1]\n # get a vote from top k\n pred = sts.mode(dist[0:self.k])[0][0]\n preds.append(pred)\n\n return np.array(preds)", "def oldPredict(self, data):\n\n predictions = []\n\n if len(self.observations) < self.k_neighbors:\n print(f\"Data length ({len(data)}) was too small.\")\n\n for row in data:\n neighbors_info = {}\n\n for row_index in range(len(self.observations)):\n distance = self.calcualteEuclideanDistance(self.observations[row_index], row)\n if len(neighbors_info) > self.k_neighbors - 1:\n largest_distance = max(neighbors_info.keys())\n if distance < largest_distance:\n neighbors_info[distance] = self.labels[row_index]\n del neighbors_info[largest_distance]\n else:\n neighbors_info[distance] = self.labels[row_index]\n\n unique_values = set(neighbors_info.values())\n if len(unique_values) == 1:\n value = unique_values.pop()\n predictions.append(value)\n else:\n best_value = 0\n best_value_weight = 0\n for label in unique_values:\n weight = 0\n for distance in neighbors_info.keys():\n if label == neighbors_info[distance]:\n if 'inverse_distance' == self.weight_type:\n weight += self.calulateWeightedVote(distance)\n elif 'no_weight' == self.weight_type:\n weight += 1\n else:\n print(\"Not a valid_weight_type.\")\n\n if weight > best_value_weight:\n best_value_weight = weight\n best_value = label\n\n predictions.append(best_value)\n # print(f\"Neighbors Info: {neighbors_info}\")\n\n return predictions", "def predict(self, X):\n labels = []\n for i in range(0,len(X)):\n min_distance = distance.euclidean(X[i],self.best_medoids[0])\n min_distance_index = 0\n\n for j in range(1,len(self.best_medoids)):\n current_distance = distance.euclidean(X[i],self.best_medoids[j])\n if(current_distance < min_distance):\n min_distance = current_distance\n min_distance_index = j\n\n labels.append(min_distance_index)\n return labels\n\n pass", "def predict(self, predPoints=None):", "def predict_individual(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,nn-1] #subtract 1 since it is zero based\n\n ypred.append(self.ytrain[neigh_ind])\n\n self.ypred = ypred\n\n return ypred", "def predict_individual(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,nn-1]# subtract 1 since it is zero based\n\n ypred.append(self.ytrain[neigh_ind])\n\n self.ypred = ypred\n\n return ypred", "def get_prediction(data):\n # load cannabis data\n strains = pd.read_csv(URL)\n # Combine the Effects and Flavors in one column\n strains['Criteria'] = strains['Effects'] + ',' + strains['Flavor']\n\n # Train model on dtm\n nn = NearestNeighbors(n_neighbors=5, algorithm='ball_tree')\n nn.fit(dtm)\n\n # load request data\n # r = data.args\n entry = [v for k,v in data.items()][1:]\n #print(entry)\n # transform\n new = tf.transform(entry)\n #print(new)\n results = nn.kneighbors(new.todense())\n #print(results)\n # extract top 5 results\n output = [strains['Strain'][results[1][0][i]] for i in range(5)]\n\n return output", "def predict(self, query: np.ndarray):\n assert query.shape == self._training_set[1, :-1].shape, \"Size of the query does not match the size of the\" \\\n \" training set, Which is: \"\\\n + str(self._training_set[1, :-1].shape)\n tmp = (self._training_set[:, :-1] - query).astype(float)\n distances = np.linalg.norm(tmp, axis=1)\n\n index = np.argsort(distances)\n sorted_set = self._training_set[index, :]\n\n (unique, counts) = np.unique(sorted_set[:self._k, -1], return_counts=True)\n\n return unique[counts == np.max(counts)][0]", "def predict_only(self):", "def preprocess(df):\n df[\"distance\"] = compute_distance(df)\n X_train = df[[\"distance\"]]\n y_train = df[\"fare_amount\"]\n return X_train, y_train", "def get_predictors(self):\n\t\treturn self.predictors", "def predict(self,data):\n results = []\n predict_instances = np.shape(data)[0]\n stored_instances = np.shape(self.data)[0]\n for predict_index in range(predict_instances):\n neighbors = [] # dist, label\n for stored_index in range(stored_instances):\n neighbors.append((self._distance(self.data[stored_index], data[predict_index]), self.data_labels[stored_index][0], data[predict_index]))\n neighbors = sorted(neighbors, key=lambda x: x[0])[:self.k]\n results.append(self._analyze_neighbors(neighbors))", "def predict_labels(self, dists, k=1):\n num_test = dists.shape[0]\n y_pred = np.zeros(num_test)\n for i in range(num_test):\n indices = np.argsort(dists[i])[:k]\n closest_y = self.y_train[indices]\n y_pred_i = mode(closest_y)[0]\n y_pred[i] = y_pred_i\n return y_pred", "def post_predictive_distribution(self, samples):\n post_pred_dist = []\n posteriors = self.posterior(samples)\n for point in range(1, self.max_val+1):\n post_pred = 0\n for concept, posterior in list(zip(self.concepts, posteriors)):\n if point in concept.extension:\n post_pred += posterior\n post_pred_dist.append(post_pred)\n return post_pred_dist", "def predict(self, test):\n test_data = np.asarray(test)\n assert self.x is not None and self.y is not None, \"You must train the classifier before testing\"\n results = []\n for i in range(test_data.shape[0]):\n m = self.x - test_data[i]\n # dist holds the Euclidean distance to every training point\n dist = np.sum(m*m, 1)\n # this call uses a quickselect algo to find k-smallest\n ind = np.argpartition(dist, self.k)[:self.k]\n # take the class present the most among the k closest\n out = int(scipy.stats.mode(self.y[ind], axis=None)[0])\n results.append(out)\n return results", "def _calc_distances(preds, targets, mask, normalize):\n N, K, _ = preds.shape\n _mask = mask.copy()\n _mask[np.where((normalize == 0).sum(1))[0], :] = False\n distances = np.full((N, K), -1, dtype=np.float32)\n normalize[np.where(normalize <= 0)] = 1000000.0\n distances[_mask] = np.linalg.norm(((preds - targets) / normalize[:, None, :])[_mask], axis=-1)\n return distances.T", "def estimate_dists(self) -> np.array:\n return np.array(\n list(\n chain.from_iterable(\n model.estimate_dist(self.featurized_data)\n for model in self.models\n )\n )\n )", "def predict(self,Xtest,nn_list):\n\n self.dist_calc(Xtest)\n xsize = self.dist.shape[0]\n ysize = self.ytrain.shape[1]\n ypred = []\n\n for nn in nn_list:\n\n yp = np.empty((xsize,ysize))\n\n if self.weights =='uniform':\n\n neigh_ind = self.ind[:,0:nn]\n\n for j in range(self.ytrain.shape[1]):\n\n mode = utilities.quick_mode_axis1_keep_nearest_neigh(\n self.ytrain[neigh_ind,j].astype(int))\n yp[:,j] = mode\n\n\n elif self.weights=='distance':\n dist = self.dist[:,0:nn]\n neigh_ind = self.ind[:,0:nn]\n W = 1./(dist+.000001) #to make sure we dont divide by zero\n\n for j in range(self.ytrain.shape[1]):\n mode, _ = utilities.weighted_mode(self.ytrain[neigh_ind,j].astype(int), W, axis=1)\n\n mode = np.asarray(mode.ravel(), dtype=int)\n\n yp[:, j] = mode\n\n ypred.append(yp)\n\n self.ypred = ypred\n\n return ypred", "def predict(self, dists, k=1):\n s = np.argsort(dists, axis=1)\n y_pred = np.zeros(dists.shape[0])\n for i in range(dists.shape[0]):\n y_pred[i] = np.argmax(np.bincount(self.ytr[s[i,:k]]))\n return y_pred", "def predict(self,X,y):\n self.X_test = X\n self.y_test = y\n d = []\n for i in range(self.X_train.shape[0]):\n d.append(self.get_distance(self.X_train.ix[i,:])) # hold all distances\n sorted = np.argsort(d)\n k_indices = np.argsort(d)[:self.k] # get indices with lowest distances\n predictions = self.y_train[k_indices]\n unique, counts = np.unique(predictions,return_counts=True)\n\n if (np.where(predictions ==1)[0].shape[0]) >self.p*self.k:\n y_pred = 1\n else:\n y_pred=0\n # {'sample':X_test.name,'d':d,'k_ix':k_indices,'pred':predictions,\n # 'counts':counts,'uniq':unique,'y_pred':y_pred,\n # 'y_test':self.y_test,'y_train':self.y_train,\n # 'sorted':sorted}\n return {'sample':self.X_test.name,\n 'y_pred':y_pred, \n 'y_test':self.y_test}", "def nearest_neighbors_classifier(data):\n clf = KNeighborsClassifier(3, 'distance')\n clf.name = \"KNN\"\n train_predict_and_results(data, clf)", "def predict(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,0:nn]\n\n if self.weights == 'uniform':\n\n p = np.mean(self.ytrain[neigh_ind], axis=1)\n\n elif self.weights =='distance':\n\n p = np.empty((self.dist.shape[0], self.ytrain.shape[1]), dtype=np.float)\n\n for i in range(self.ytrain.shape[1]):\n p[:,i] = utilities.weighted_mean(self.ytrain[neigh_ind,i], self.dist[:,0:nn])\n\n ypred.append(p)\n\n self.ypred = ypred\n self.nn_list = nn_list\n return ypred", "def LevDistMultilabels(y_true, y_pred):\n \n n = y_pred.shape[0]\n D = 0\n for i in range(n):\n D += LevenshteinDistance(y_pred[i,:], y_true[i,:])[-1, -1]\n return D/n", "def getDistances(trainingSet, testInstance, distances):\n # Empty list to store distances of between testInstance and each trainSet item\n # Number of dimensions to check\n length=len(testInstance) - 1\n # Iterate through all items in trainingSet and compute the distance, then append to the distances list\n for x in range(len(trainingSet)):\n dist=calculateDistance(testInstance, trainingSet[x], length)\n distances.append((trainingSet[x], dist))\n return distances", "def _predict_base(self, X):\n\n # Return the indices of the BMU which matches the input data most\n distances = []\n\n prev_activation = np.zeros((self.map_dim, self.data_dim))\n\n for x in X:\n distance, prev_activation = self._get_bmus(x, prev_activation=prev_activation)\n distances.append(distance)\n\n return distances", "def _get_closest(centers, features):\n pred_labels = []\n\n features = features\n for feature in features:\n distances = End2End._dist(centers, feature)\n pred_labels.append(distances.argmin().item())\n\n return np.array(pred_labels)", "def predict(self, data):\n\t\treturn closestCluster(data, self.centers)", "def predict_labels(self, dists, k=1):\n num_test = dists.shape[0]\n y_pred = np.zeros(num_test)\n for i in range(num_test):\n # A list of length k storing the labels of the k nearest neighbors to\n # the ith test point.\n closest_y = []\n closest_y = self.y_train[np.argsort(dists[i])][0:k]\n closest_y = closest_y.astype(int)\n y_pred[i] = np.bincount(closest_y).argmax()\n return y_pred", "def predict(self, instances):\r\n raise NotImplementedError", "def distances(self):", "def getPredictions(self):\n\t\tself.bestLabel = self.testingProbs.apply(lambda x: x.argmax(),1)", "def predict(self, test_data):\n random.seed(self.seed)\n preds = [{\"id\": instance['id'], \"prediction\": random.choice([0, 1])} for instance in test_data]\n return preds", "def allYandPred(self):\n y = []\n pred = []\n if self.use_dic:\n pb = progressbar.ProgressBar(self.n)\n for data in sorted(self.dic):\n for activity in sorted(self.dic[data]):\n for imsize in sorted(self.dic[data][activity]):\n for img in self.dic[data][activity][imsize]:\n labels = self.dic[data][activity][imsize][img]\n if len(labels) == 2:\n y.append(labels[0])\n pred.append(labels[1])\n else:\n return None\n pb.update()\n else:\n y, pred = self.getYandPred()\n return y, pred", "def predict(self, data):\r\n\r\n distances = [np.linalg.norm(data-self.centroids[centroid]) for centroid in self.centroids]\r\n classification = distances.index(min(distances))\r\n return classification", "def predictors(self):\n if hasattr(self, '_predictors'):\n return self._predictors", "def predict(self, data: List):", "def predict(self, xs, **kwargs):", "def predictor(self, support_set, support_set_mean, queries, labels):\n dist = self.distance(support_set, support_set_mean, queries) \n\n # dist: 64, 5 (chars), 5 (distance between each char)\n logit = F.softmax(dist, dim=-1)\n # 64, 5 keys and the 64, 5 queries\n ce_loss = torch.nn.CrossEntropyLoss()\n loss = ce_loss(dist, labels)\n _, y_hat = torch.max(logit, -1)\n\n accuracy = torch.eq(y_hat, labels).float().mean()\n return loss, accuracy", "def mc_similarity_predict(embeddings, topk=1, distance='cosine_distance'):\n y_pred, y = [], []\n for i in range(len(embeddings)):\n # prediction\n distances = np.zeros(4)\n for j in range(0, 4):\n if distance == 'cosine_similarity': \n distances[j] = cosine_similarity(embeddings[i]['question'].reshape(1, -1), \n embeddings[i][f'choice_{j}'].reshape(1, -1))[0][0]\n elif distance == 'cosine_distance':\n distances[j] = cosine_distances(embeddings[i]['question'].reshape(1, -1), \n embeddings[i][f'choice_{j}'].reshape(1, -1))[0][0]\n if distance == 'cosine_similarity':\n if topk == 1:\n y_pred.append(np.argmax(distances))\n else:\n y_pred.append(np.argsort(distances)[::-1][:topk])\n elif distance == 'cosine_distance':\n if topk == 1:\n y_pred.append(np.argmin(distances))\n else:\n y_pred.append(np.argsort(distances)[:topk])\n else:\n raise ValueError(f'{distance} is not supported')\n \n \n # true labels\n y.append(embeddings[i]['correct_answer'])\n return y, y_pred", "def extract_pred_from_estimator_predictions(predictions):\n # print('predictions:', predictions)\n pred = np.array([])\n for prediction in predictions:\n pred = np.append(pred, prediction['predictions'])\n num_samples = len(pred)\n pred = pred.reshape((num_samples, ))\n return pred", "def predict_distances(self, inputs, features=None):\n outputs = dict()\n if self.args.pose_model_type == \"shared\":\n # If we are using a shared encoder for both norm and pose,\n # then all images are fed separately through the norm encoder.\n images = torch.cat([inputs[(\"color_aug\", frame_id, 0)] for frame_id in self.args.frame_idxs])\n all_features = self.models[\"encoder\"](images)\n all_features = [torch.split(f, self.args.batch_size) for f in all_features]\n features = dict()\n for i, frame_id in enumerate(self.args.frame_idxs):\n features[frame_id] = [f[i] for f in all_features]\n outputs.update(self.models[\"norm\"](features[0]))\n else:\n # Otherwise, we only feed the target image through the norm encoder\n features = self.models[\"encoder\"](inputs[\"color_aug\", 0, 0]) if features is None else features\n outputs.update(self.models[\"norm\"](features))\n\n return outputs, features", "def _distorted_distance(self):\n distance = 0\n for i, pixel in enumerate(self.training_set):\n distance += self._euclid_distance(\n pixel, self.clusters[self.labels[i]], axis=0)\n return distance", "def dist_pred_dict(self, curr):\n dist = {}\n pred = {}\n for currency in self.currencies:\n dist[currency] = float('inf') # set all starting vertices to be infinite distance away\n pred[currency] = None\n\n dist[curr] = 0\n\n return dist, pred", "def classify(self, data):\n\n \"*** YOUR CODE HERE ***\"\n # should compute (validationData[i] - trainingData[j])^2\n result = np.zeros(data.shape[0])\n for i in range(data.shape[0]):\n distances = np.linalg.norm(self.trainingData - data[i], axis=1)\n nearest = np.argsort(distances)[:self.num_neighbors]\n nearest_tags = [self.trainingLabels[j] for j in nearest]\n result[i] = max(nearest_tags, key=lambda x: nearest_tags.count(x))\n return result", "def predictTest(k, train, test):\r\n\r\n pred_labels = []\r\n\r\n # for each instance in the testing dataset, calculate all L2 distance from all training instances\r\n for te in range(len(test)):\r\n all_D = np.zeros((len(train), 1))\r\n\r\n # calculate the L2 distance of the testing instance from each training instance\r\n for tr in range(len(train)):\r\n D = 0\r\n for var in range(len(train.columns)-1):\r\n # if feature is real-valued, add (testing value - training value)^2\r\n if train[var].dtype == np.float64 or train[var].dtype == np.int64:\r\n D += (test[var][te] - train[var][tr])**2\r\n # if feature is nominal, add 1 if testing and training values are different\r\n else:\r\n if test[var][te] != train[var][tr]:\r\n D += 1\r\n all_D[tr] = D**(1/2)\r\n\r\n # sort all L2 distances, select K closest neighbors, and choose the most prevalent label\r\n all_D = np.column_stack((all_D, np.array(range(len(train)))))\r\n all_D = all_D[np.argsort(all_D[:, 0])]\r\n prob_labels = train[len(train.columns)-1][all_D[0:k, 1]].as_matrix()\r\n pred_labels.append(Counter(prob_labels).most_common(1)[0][0])\r\n\r\n return pred_labels", "def prediction():\r\n\r\n\r\n\tpredictVal = []\r\n\taccuracy = 0.0\r\n\r\n\t# Calculate accuracy for each class in testData\r\n\tfor item in testData:\r\n\t\tclass0Prediction = posProb / 100\r\n\t\tclass1Prediction = negProb / 100\r\n\t\t\r\n\t\t# Multiply the prior probablities for negative and positive reviews by their feature likelihoods \r\n\t\tfor word in item[2]:\r\n\t\t\tclass0Prediction *= class0Dict[word]\r\n\t\t\tclass1Prediction *= class1Dict[word]\r\n\r\n\t\t# Give every item in testData a predicted value\r\n\t\tif(class0Prediction > class1Prediction):\r\n\t\t\tpredictVal.append('0')\r\n\t\telse:\r\n\t\t\tpredictVal.append('1')\r\n\r\n\tfor i in range(len(testData)):\r\n\t\tif(testData[i][1] == predictVal[i]):\r\n\t\t\taccuracy += 1\r\n\r\n\t\t\t\r\n\taccuracy = 100 * (accuracy / len(testData))\r\n\treturn(predictVal, accuracy)", "def pred_dset_weight_vote(fx_output, dset, fold_set):\n y_values = []\n for i,y in dset.split_idx[fold_set]:\n x,yi = dset.get(i)\n assert yi==y\n p = fx_output(dset.reshape_batch(x))\n y_pred = p.argmax(axis=1)\n y_weight = p.max(axis=1)\n h = np.bincount(y_pred,weights=y_weight)\n y_pred = h.argmax()\n y_values += [(y, y_pred)]\n \n return np.asarray(y_values)", "def predictedPace(gender,knowntime,distance,newdistance):\n data=[[1,1.82479886,0.15442097,2.02078846,0.152018],\\\n [2,2.81269131,0.24298622,2.94027249,0.19785733],\\\n [3.1,3.21439758,0.20199374,3.38934256,0.17998415],\\\n [4,3.48733053,0.17403963,3.63338083,0.15416951],\\\n [5,3.69988339,0.1828273,3.85497481,0.15342633],\\\n [6.2,3.92248345,0.17043469,4.08229446,0.15731007],\\\n [7,4.03585866,0.15658534,4.15885728,0.12602283],\\\n [8,4.1804725,0.13912475,4.29095087,0.11099218],\\\n [9.3,4.34819542,0.16160759,4.47210575,0.13358565],\\\n [10,4.39081844,0.16460379,4.52142994,0.13755774],\\\n [13.1,4.71140604,0.16332366,4.84067277,0.14722737],\\\n [18,5.08558166,0.15552566,5.19199923,0.13465799],\\\n [20,5.07063126,0.15512254,5.18039573,0.12522386],\\\n [26.2,5.50908488,0.18280742,5.62205952,0.16401895],\\\n [37.28,5.9248495,0.17540027,6.01767465,0.15617823],\\\n [50,6.18750376,0.13950345,6.23711374,0.11798467]]\n\n datalog=[[0.0,1.82479886,0.15442097,2.02078846,0.152018],\\\n [0.69314718056,2.81269131,0.24298622,2.94027249,0.19785733],\\\n [1.13140211149,3.21439758,0.20199374,3.38934256,0.17998415],\\\n [1.38629436112,3.48733053,0.17403963,3.63338083,0.15416951],\\\n [1.60943791243,3.69988339,0.1828273,3.85497481,0.15342633],\\\n [1.82454929205,3.92248345,0.17043469,4.08229446,0.15731007],\\\n [1.94591014906,4.03585866,0.15658534,4.15885728,0.12602283],\\\n [2.07944154168,4.1804725,0.13912475,4.29095087,0.11099218],\\\n [2.23001440016,4.34819542,0.16160759,4.47210575,0.13358565],\\\n [2.30258509299,4.39081844,0.16460379,4.52142994,0.13755774],\\\n [2.57261223021,4.71140604,0.16332366,4.84067277,0.14722737],\\\n [2.8903717579,5.08558166,0.15552566,5.19199923,0.13465799],\\\n [2.99573227355,5.07063126,0.15512254,5.18039573,0.12522386],\\\n [3.26575941077,5.50908488,0.18280742,5.62205952,0.16401895],\\\n [3.61845698982,5.9248495,0.17540027,6.01767465,0.15617823],\\\n [3.91202300543,6.18750376,0.13950345,6.23711374,0.11798467]]\n\n gender=gender.lower()\n distance=np.log(distance)\n \n imu=1\n isigma=2\n if gender=='f':\n imu=3\n isigma=4\n\n knownmu=my_interpol(datalog,imu,distance)\n knownsigma=my_interpol(datalog,isigma,distance)\n \n knownpercentage=slowerthan(knowntime,knownmu,knownsigma)\n\n newdistance=np.log(newdistance)\n newmu=my_interpol(datalog,imu,newdistance)\n newsigma=my_interpol(datalog,isigma,newdistance)\n return findTime(knownpercentage,newmu,newsigma)", "def knn(k, Xtrain, Ytrain, Xtest):\n d = euclidean_distances(Xtest, Xtrain, squared=True)\n nnc = Ytrain[np.argsort(d)[..., :k].flatten()].reshape(Xtest.shape[0], k)\n pred = [max(nnc[i], key=Counter(nnc[i]).get) for i in range(nnc.shape[0])]\n return np.array(pred)", "def get_neighbors(training_set, \r\n labels, \r\n test_instance, \r\n k, \r\n distance=distance):\r\n distances = []\r\n for index in range(len(training_set)):\r\n dist = distance(test_instance, training_set[index])\r\n distances.append((training_set[index], dist, labels[index]))\r\n distances.sort(key=lambda x: x[1])\r\n neighbors = distances[:k]\r\n return neighbors", "def distance(keys_pred, keys_gt, key_num_gt):\n mask = (keys_gt > 1e-10).float()\n dif = keys_pred * mask - keys_gt\n err = dif.norm(dim=2)\n err = err.sum(dim=1)\n err = torch.div(err, key_num_gt)\n return err", "def get_classification_predictions(self):\n predictions = []\n for i, test_batch in enumerate(tqdm.tqdm(self.loader)):\n if self.tta_fn is not None:\n pred_out = self.tta_fn(batch=test_batch[0].cuda())\n else:\n # (batch_size, n_classes)\n pred_out = apply_nonlin(self.model(test_batch[0].cuda()))\n # for each prediction (1,) in pred_out (n, 4): post process\n for pred in pred_out:\n # (4, )\n probability = pred.cpu().detach().numpy()\n for prob_i in probability:\n # (1,)\n predictions.append(prob_i)\n return predictions", "def predict_labels(self, dists, k=1):\n num_test = dists.shape[0]\n y_pred = np.zeros(num_test)\n for i in range(num_test):\n # A list of length k storing the labels of the k nearest neighbors to\n # the ith test point.\n closest_y = []\n indices = np.argsort(dists[i])\n indices = indices[range(k)]\n closest_y = self.y_train[indices]\n counts = np.bincount(closest_y)\n y_pred[i] = np.argmax(counts)\n\n return y_pred", "def predict_multiple():\n req = request.json\n values = pd.DataFrame(data=req['data'])\n pred = clf2.predict(values)\n responses = {}\n for i, rep in enumerate(pred):\n if rep:\n responses[f\"Customer {i + 1}\"] = f\"Input: {', '.join([str(values.loc[i, :][j]) for j in range(values.shape[1])])}, Output: is likely to churn\"\n\n else:\n responses[f\"Customer {i + 1}\"] = f\"Input: {', '.join([str(values.loc[i, :][j]) for j in range(values.shape[1])])}, Output: is a loyal customer\"\n\n return make_response(jsonify(responses))", "def distance_metric(y_true, y_pred):\n diff = y_true - y_pred\n sqr = K.square(diff)\n total = K.sum(sqr, axis=1)\n return K.sqrt(total)", "def get_pred_ids(predictions):\n le_classes = ['Emotet', 'Mirai', 'Zeus'] \n malwares_dict = {'Emotet': 1, 'Mirai': 2, 'Zeus': 3}\n predicted_ids = []\n \n for idx in predictions:\n pred_name = le_classes[idx]\n pred_id = malwares_dict[pred_name]\n predicted_ids.append(pred_id)\n \n return predicted_ids", "def smaller(self):\n return [x for x in TransitiveIdeal(attrcall('pred'), [self])]", "def predicts(self, data_iter):\n predicteds = []\n logits = []\n\n all_corrects, all_loss, all_size = 0, 0, 0\n step = 0\n for feature, target in data_iter:\n step += 1\n # print(feature)\n # if self._cuda:\n # feature, target = feature.cuda(), target.cuda()\n\n logit = self._model(feature)\n predicted = torch.max(logit.data, 1)[1].view(target.size()).data\n # print(predicted)\n predicteds.extend(predicted)\n logits.extend(logit)\n loss = F.cross_entropy(logit, target, size_average=False)\n\n cur_loss = loss.data[0]\n all_loss += cur_loss\n cur_corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()\n all_corrects += cur_corrects\n print('Evaluation - average loss: {:.6f} average acc: {:.4f}%'.format(\n float(all_loss) / (int(all_size) + 1), 100 * float(all_corrects) / (int(all_size) + 1)))\n\n return predicteds, logits", "def clf_perceptron(vector_col:str,\n df_train:pd.DataFrame,\n model:Perceptron,\n ) -> list:\n\n #if return_ranking: return list(model.decision_function(df[vector_col].to_list()))\n \n return list(model.predict(df_train[vector_col].to_list()))", "def _calc_distance_features(self):\n d = ()\n for dx, dy in DIRECTIONS:\n if dx and dy:\n d += (list(self.__calc_distance(direction_x=dx, direction_y=dy)), )\n elif dx:\n tmp, _, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n elif dy:\n _, tmp, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n\n self.dist_features = d\n\n self.direc_dist = self.__calc_direc_distance()", "def predict(self, X):\n \n if self.centers is None:\n raise Exception(\"Not fitted\")\n else:\n out = []\n for i in range(X.shape[0]):\n j_closest_center = self.metric.closest_neighbor_index(X[i,:], self.centers)\n out.append(self.centers[j_closest_center, :])\n \n return np.array(out)", "def distances_to(self, pt):\n d = [pt.distance(a) for a in self]\n return np.array(d)", "def discriminant(self, words):\n indicators = self.evaluate_indicators(words)\n indicators = array(indicators, ndmin=2) # I am fine with 1d arrays but scikit-learn raises deprecation warning\n prediction = self.model.predict(indicators)\n return prediction", "def distance(self):\n try:\n import pdb\n pdb.set_trace()\n s = []\n x0,y0 = self.deriv.T\n for thing in everything:\n x1,y1 = thing.deriv.T\n r,p = pearsonr(y0,y1)\n s.append(( p,thing.label ))\n s.sort()\n #print s[-5:]\n print s\n except:\n return np.inf", "def predicts(self,X):\n return [self.predict(x) for x in X]", "def predict(self, X) -> List[str]:\n # Get docID of nearest neighbours\n nn = self.vsm.search(X, limit=self.k)\n\n # Create list of concatenation of all topics, including duplicates\n topics = []\n for docID in nn:\n index = self.docIDs_train[self.docIDs_train == docID].index[0]\n topics += self.Y_train.iloc[index]\n\n # Assign prediction as most common topics that make up at least 50% of the topic labels\n n = len(topics)\n total_prob = 0\n results = []\n topics = Counter(topics).most_common()\n for (topic, count) in topics:\n results.append(topic)\n total_prob += count / n\n if total_prob > 0.5:\n break\n\n return results", "def compare_distance(model):\n\n dists = []\n outputs = []\n paths = 'images/person/'\n\n for i in range(6):\n img = paths + str(i) + '.jpg'\n image = cv2.imread(img)\n image = process_image(image)\n\n output = model.predict(image)[0]\n outputs.append(output)\n\n vec1 = outputs[0]\n for vec2 in outputs:\n dist = np.linalg.norm(vec1 - vec2)\n dists.append(dist)\n\n print(dists[1:])\n\n plt.bar(range(1, 6), (dists[1:]), color='lightblue')\n plt.xlabel('Person')\n plt.ylabel('Euclidean distance')\n plt.title('Similarity')\n plt.grid(True)\n plt.show()", "def knnSame(k, Xtrain, Ytrain):\n d = euclidean_distances(Xtrain, squared=True)\n np.fill_diagonal(d, np.inf)\n nnc = Ytrain[np.argsort(d)[..., :k].flatten()].reshape(Xtrain.shape[0], k)\n pred = [max(nnc[i], key=Counter(nnc[i]).get) for i in range(nnc.shape[0])]\n return np.array(pred)", "def predict_rent(seed):\n X_train, X_test, y_train, y_test, catnums, raw_df = \\\n get_data(\"https://ndownloader.figshare.com/files/7586326\", seed)\n pipe = model_pipeline(catnums)\n pipe.fit(X_train, y_train)\n y_pred = pipe.predict(X_test)\n X_test_index = pd.DataFrame(index=X_test.index)\n return raw_df.join(X_test_index, how='inner').values, y_test.values, y_pred", "def _predict_all(self, data):\n preds = np.zeros(len(data))\n for row in data.itertuples():\n index, item, _, user = row\n preds[index] = self.predict(user, item)\n return preds", "def distance_score(x_embeddings_test, x_embeddings_train, y_true_train, K=50):\n num_samples = x_embeddings_test.shape[0]\n num_classes = y_true_train.shape[1]\n y_test_confidence = []\n for i in range(num_samples):\n sample_embedding = x_embeddings_test[i]\n distances = np.square(sample_embedding - x_embeddings_train).sum(axis=-1)\n K_nn = np.argsort(distances)[:K]\n K_nn_distances = np.exp(-np.sqrt(distances[K_nn]))\n K_nn_labels = y_true_train[K_nn, :]\n\n class_indicators = np.eye(num_classes)\n classes_masks = np.matmul(class_indicators, np.transpose(K_nn_labels))\n\n # foreach class we mask away the samples in Knn that belong to other classes\n class_samples_distances = classes_masks * np.expand_dims(K_nn_distances, axis=0) # this gives num_classes X K (100 X 50 matrix)\n sum_distances = np.sum(K_nn_distances)\n D_x = np.sum(class_samples_distances, axis=-1)/sum_distances\n\n y_test_confidence.append(D_x)\n\n return np.vstack(y_test_confidence)", "def get_distance_metrics():\n\n return [HausdorffDistance(),\n AverageDistance(),\n MahalanobisDistance(),\n VariationOfInformation(),\n GlobalConsistencyError(),\n ProbabilisticDistance()]", "def predict_tree(self, testing_data, average=False):\n predictions = []\n for point in testing_data:\n # Loop over each point and find it's k-nearest neighbors\n k_nearest = self.kd_tree.return_nearest_k(point, self.k)\n targets = [self.targets[n.node[1]] for n in k_nearest]\n if average:\n predictions.append(round(np.average(targets)))\n else:\n unique, counts = np.unique(targets, return_counts=True)\n max_index = np.argmax(counts)\n predictions.append(unique[max_index])\n return predictions", "def get_neighbor_classes(self, observation: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n distances = np.sqrt(np.sum((self.X_train - observation)**2, axis=1))\n\n # Create an array of training set indices ordered by their\n # distance from the current observation\n indices = np.argsort(distances, axis=0)\n\n selected_indices = indices[:self.k]\n return self.y_train[selected_indices], distances[selected_indices]", "def predict(self, features):\n feature_labels = []\n for f in features:\n get_label = self.get_k_neighbors(f)\n c0 = get_label.count(0)\n c1 = get_label.count(1)\n if c0 >= c1:\n f_label = 0\n else:\n f_label = 1\n feature_labels.append(f_label)\n return feature_labels\n raise NotImplementedError", "def getPredictorList(self):\n return self.non_miList + self.miList", "def predict(self, x):\n \n\n return predictions", "def pred_dset_bin_vote(fx_output, dset, fold_set):\n labels = []\n for i,y in dset.split_idx[fold_set]:\n x,yi = dset.get(i)\n assert yi==y\n p = fx_output(dset.reshape_batch(x))\n y_pred = np.bincount(p.argmax(axis=1)).argmax()\n labels += [(y, y_pred)]\n \n return np.asarray(labels)", "def predict(self, samples):\n distance_mat = self.get_distance(samples, self.cluster_centers)\n probs = self._calculate_cluster_probs(distance_mat, self.T_min)\n return probs", "def nearest_district(df_training, df_test, categories, training_embedding, test_embedding):\n # Creates an empty matrix distance.\n labels = [None] * len(test_embedding)\n for index, test in enumerate(test_embedding):\n # Recover the non-zero indexes.\n non_zero_indexes = np.nonzero(test)[0]\n labels[index] = np.argmin(euclidean_distances(test[non_zero_indexes].reshape(1, -1),\n training_embedding[:, non_zero_indexes]))\n return labels", "def all_predictors():\n from operator import itemgetter\n\n from ..util import itersubclasses\n\n predictors = sorted(\n ((s, s.__name__) for s in itersubclasses(Predictor)), key=itemgetter(1)\n )\n return list(zip(*predictors))[0]", "def calculate_distances(train_data, test_datum):\n n = train_data.shape[0]\n dist = []\n for i in range(n):\n distance = np.sqrt(np.sum(np.square(train_data[i]-test_datum)))\n dist.append(distance)\n dist = np.asarray(dist)\n return dist", "def single_predict_proba(self, vec, n_nearest):\n\n most_sim_ind = self.annoy_index.get_nns_by_vector(vec, n_nearest)\n most_similar_doc_ids = [self.document_ids[x] for x in most_sim_ind]\n return self.ids2class.loc[most_similar_doc_ids].mean().\\\n sort_values(ascending=False)", "def predict(self, reps):\n return [self.classes_[self.predict_one(rep)] for rep in reps]", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)\n #track.du_doan(self.kf_test)", "def get_nearest_neighbors ( self, distances: List [ float ] ):\n \n return pipe (\n # Map index to distance\n dict ( enumerate ( distances ) ),\n # Sort the indices based on their value in the mapping and take the 1st k\n lambda distance_map: sorted ( distance_map, key = distance_map.get ) [: self.k ],\n ) # End get_nearest_neighbors()", "def predict(self, X, k=None):\n \n if not hasattr(self, 'n_neighbors'):\n self.fit(X)\n \n if k is None:\n k = self.n_neighbors\n else:\n k = check_n_neighbors(k, X.shape[0])\n \n distances, _ = self.nbrs.kneighbors(X, n_neighbors=k+1)\n #distances = distances[:, 1:]\n distances[distances[:, 0] == 0., :-1] = distances[distances[:, 0] == 0., 1:]\n distances = distances[:, :-1]\n \n return distances.mean(axis=1)", "def PredictiveDist(self, label='pred'):\n # TODO: fill this in\n lam = 1\n pred = thinkbayes2.MakePoissonPmf(lam, 15)\n return pred", "def getYandPred(self):\n y = []\n pred = []\n if self.use_dic:\n for k in self.alllabels:\n if len(k) == 2:\n y.append(k[0])\n pred.append(k[1])\n else:\n return None\n else:\n for k in self.dic.values():\n if len(k) == 2:\n y.append(k[0])\n pred.append(k[1])\n else:\n return None\n return y, pred", "def predict(self, X):\n\n pred = []\n for x_i in X:\n tmp = x_i\n p0 = self.model.predict(tmp.reshape(1,128,128,3))\n p1 = self.model.predict(np.fliplr(tmp).reshape(1,128,128,3))\n# p2 = self.model.predict(np.flipud(tmp).reshape(1,128,128,1))\n# p3 = self.model.predict(np.fliplr(np.flipud(tmp)).reshape(1,128,128,1))\n p = (p0[0] +\n np.fliplr(p1[0]) #+\n# np.flipud(p2[0]) +\n# np.fliplr(np.flipud(p3[0]))\n ) / 2#4\n pred.append(p)\n return np.array(pred)", "def _predict_proba(self, X):\n preds = self._predict(X)\n n_instances = len(preds)\n if hasattr(self, \"n_clusters\") and self.n_clusters is not None:\n n_clusters = self.n_clusters\n else:\n n_clusters = max(preds) + 1\n dists = np.zeros((X.shape[0], n_clusters))\n for i in range(n_instances):\n dists[i, preds[i]] = 1\n return dists", "def predict(self, X):", "def predict(self, X):", "def _kendall_distance(Y_true, Y_pred, normalize=True, sample_weight=None):\n (n_samples, n_classes) = Y_true.shape\n dists = np.zeros(n_samples)\n\n for sample in range(n_samples):\n for f_class in range(n_classes - 1):\n for s_class in range(f_class + 1, n_classes):\n a = Y_true[sample, f_class] - Y_true[sample, s_class]\n b = Y_pred[sample, f_class] - Y_pred[sample, s_class]\n\n if a * b < 0:\n dists[sample] += 1\n\n if normalize:\n dists[sample] /= n_classes * (n_classes-1) / 2\n\n return np.average(a=dists, weights=sample_weight)", "def make_doppelganger_vs_clusters(n_clusters_considered,X,X_occam,n_repeats):\n res = []\n for n_clusters in n_clusters_considered:\n res.append([])\n for _ in range(n_repeats):\n X_restricted,restricted_idxs = get_n_random_clusters(X_occam,n_clusters)\n print(X.val.shape)\n print(X_restricted.val.shape)\n evaluator_X = evaluators.EvaluatorWithFiltering(X,X_restricted,leave_out=True,fitter_class=standard_fitter,valid_idxs=valid_idxs[restricted_idxs])\n res[-1].append(evaluator_X.weighted_average) \n return res", "def __query_pairs(self):\n\n probs = self.clf.predict_proba(self.all_features)[:,1] # unlabeled_features\n\n probs_df = pd.DataFrame(probs, index=self.all_features.index.values, columns=['proba'])\n probs_df['certainty'] = abs(0.5 - probs_df.proba)\n probs_df.sort_values(by='certainty', axis=0, inplace=True)\n\n uncertain_pairs = probs_df[:self.n_uncertain]\n match_pairs = probs_df[probs_df.proba > 0.5].sample(self.n_match)\n notmatch_pairs = probs_df[probs_df.proba < 0.5].sample(self.n_notmatch)\n\n pairs_to_label = pd.concat([uncertain_pairs,\n match_pairs,\n notmatch_pairs], axis=0, ignore_index=False)\n\n return pairs_to_label.index.values", "def _get_dst(self, df_train, df_test):\n #train NearestNeighbors(Unsupervised learner)\n neigh = NearestNeighbors(1)\n neigh.fit(df_train[['longitude', 'latitude']])\n #find the K-neighbors of points in df_test\n distances, indices = neigh.kneighbors(df_test[['longitude', 'latitude']])\n return distances" ]
[ "0.65554714", "0.59923244", "0.5906513", "0.5849182", "0.5799373", "0.5795326", "0.5768941", "0.5718649", "0.57175404", "0.5713419", "0.5701458", "0.5656988", "0.5656353", "0.56391543", "0.562515", "0.56004006", "0.553719", "0.5531781", "0.55137074", "0.5489938", "0.5472367", "0.5462772", "0.54522556", "0.54234236", "0.5396595", "0.5379135", "0.5375029", "0.5300419", "0.5286575", "0.52725637", "0.52616566", "0.52498883", "0.5244988", "0.5236045", "0.5230724", "0.52235377", "0.52222854", "0.5217793", "0.5193867", "0.51855475", "0.51831603", "0.5176649", "0.5175919", "0.51740193", "0.5172947", "0.5172279", "0.51606214", "0.51540416", "0.5134902", "0.5131294", "0.5125628", "0.5124694", "0.51209116", "0.5117749", "0.51129466", "0.51110804", "0.51069915", "0.51027083", "0.50923425", "0.5091188", "0.5090467", "0.50775015", "0.5072366", "0.50663596", "0.5064414", "0.50620055", "0.5055409", "0.5054068", "0.50507385", "0.5045653", "0.50441974", "0.5042392", "0.5042133", "0.5040418", "0.5037686", "0.5035084", "0.5031807", "0.5029069", "0.5025424", "0.5019438", "0.50171894", "0.50128263", "0.50108224", "0.50047827", "0.5004693", "0.49974957", "0.49958202", "0.49905637", "0.49888232", "0.49831328", "0.49824464", "0.49822703", "0.4982182", "0.4981841", "0.49806848", "0.49806848", "0.49803963", "0.4978635", "0.49742678", "0.49742183" ]
0.63834083
1
Return a sorted selections by the correlation rsquared scores
def __sort_predictors_by_corr(self, station, selections, var, From, To, by, how, constant=True, selectionsnames=None, sort_cor=True, cor_lim=None): scores_corel = pd.DataFrame(index=np.arange(0, len(selections)), columns=['corel', 'selections', 'params', 'selectionname']) # correlation of each selections and variables for i, (selection, selectionname) in enumerate(zip(selections, selectionsnames)): try: Y = station.getData(var, From=From, To=To, by=by, how=how) # variable to be filled X1 = selection[0].getData(var, From=From, To=To, by=by, how=how) # stations variable used to fill X2 = selection[1].getData(var, From=From, To=To, by=by, how=how) # stations variable used to fill data = pd.concat([Y, X1, X2], keys=['Y', 'X1', 'X2'], axis=1, join='outer').dropna() est = self.__MLR(data[['X1', 'X2']], data['Y'], constant=constant) rsquared = est.rsquared scores_corel.loc[i, 'corel'] = rsquared scores_corel.loc[i, 'selections'] = selection scores_corel.loc[i, 'selectionname'] = selectionname if constant: scores_corel.loc[i, 'params'] = [est.params[0], est.params[1], est.params[2]] else: scores_corel.loc[i, 'params'] = [est.params[0], est.params[1]] except ValueError: print('No data to do the multilinear regression. Put correlation = 0') scores_corel.loc[i, 'selections'] = selection scores_corel.loc[i, 'selectionname'] = selectionname scores_corel.loc[i, 'corel'] = 0 scores_corel.loc[i, 'params'] = np.nan if sort_cor: scores_corel = scores_corel.sort_values('corel', ascending=False) if cor_lim: scores_corel = scores_corel[scores_corel['corel'] > cor_lim] else: scores_corel = scores_corel[scores_corel['corel'] > 0] scores_corel.index = np.arange(0, len(scores_corel.index)) selections = scores_corel['selections'].values params = scores_corel['params'].values print("u" * 30) print("Correlation coefficient of the multilinear regression") print("u" * 30) print(scores_corel[['corel', 'selectionname']]) print("u" * 30) return selections, params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def corr_list(self):\n c = self.df.corr().abs()\n s = c.unstack()\n so = s.sort_values(ascending=False)\n i = int(len(so) ** (1/2))\n charts = so[i:]\n charts = charts[::2]\n if len(charts) > 3:\n charts = charts[:3]\n return charts.index, charts.values", "def compute_correlations(struc_df, option, gamma, alpha):\n n_states = len(np.unique(struc_df.objnum))\n nodes = network.temp_node_info()\n adjacency = network.adjacency_mat(nodes)\n L = compute_limit_matrix(0.5, adjacency, n_states)\n L_vector = L.flatten()\n M = learn_sr(struc_df, gamma, alpha)\n M = M[2, 6]\n M_vector = M.flatten()\n\n if option == \"norm\":\n print(\"Norm of L - M: \")\n print(la.norm(L_vector - M_vector, np.inf))\n\n if option == \"correlation\":\n print(\"Correlation of L, M: \")\n print(np.dot(L_vector, M_vector) /\n (la.norm(L_vector) * la.norm(M_vector)))", "def get_top_correlations(dataframe,columns,frame_type='spark'):\n if frame_type == 'spark':\n import math\n correlation_list = []\n correlations_finished = [] #hold correlatons done to prevent repitition\n for i, col_i in enumerate(columns):\n for j, col_j in enumerate(columns):\n if col_i+col_j not in correlations_finished: # don't repeat\n columns = [col_i,col_j]\n correlation = dataframe.stat.corr(col_i,col_j)\n if math.isnan(correlation):\n correlation=0.0\n correlation_list.append({\n 'columns': columns,\n 'correlation': correlation,\n 'correlation_abs':math.fabs(correlation),\n })\n # print({\n # 'columns': columns,\n # 'correlation': correlation,\n # 'correlation_abs':math.fabs(correlation),\n # })\n correlations_finished.append(col_i+col_j)\n #sort the list so highest correlations are first\n correlation_list = sorted(correlation_list, key=lambda x: x['correlation_abs'], reverse=True)\n return correlation_list\n else:\n pass", "def build_retrieved_list(self, scores):\n\n res = self.index.rank(scores)\n tmp_res = []\n # keep scores too\n tmp_scores = []\n\n # build the list\n tmp_res = []\n #print rank, \"<--\"\n for i, k in res:\n tmp_res.append( self.indices[i] )\n tmp_scores.append( k )\n\n\n # compute the difference with the difference\n diff = list(set(self.indices.values())-set(tmp_res))\n\n # shuffle to fill the rest of the list\n np.random.shuffle(diff)\n\n scores_diff = np.zeros( (len(diff,)) )\n\n final = []\n final_scores = []\n\n final.extend(tmp_res)\n final.extend(diff)\n\n final_scores.extend(tmp_scores)\n final_scores.extend(scores_diff)\n\n # remove extension for evaluation\n f = lambda x: x.split('.')[0]\n final = map(f, final)\n\n return final, final_scores", "def comparator(self):\n return self.get_scores()", "def correlation(self) -> List[float]:\n self.pearson_corr = self.sim_data[\"Human (mean)\"].corr(self.sim_data[\"assigned_sim\"], method=\"pearson\")\n self.spearman_corr = self.sim_data[\"Human (mean)\"].corr(self.sim_data[\"assigned_sim\"], method=\"spearman\")\n return [self.pearson_corr, self.spearman_corr]", "def find_perfect_corr(df): \n corrMatrix = df.corr()\n corrMatrix.loc[:,:] = numpy.tril(corrMatrix.values, k = -1)\n already_in = set()\n result = []\n for col in corrMatrix:\n perfect_corr = corrMatrix[col][abs(numpy.round(corrMatrix[col],10)) == 1.00].index.tolist()\n if perfect_corr and col not in already_in:\n already_in.update(set(perfect_corr))\n perfect_corr.append(col)\n result.append(perfect_corr)\n toRemove = []\n for item in result:\n toRemove.append(item[1:(len(item)+1)])\n toRemove = sum(toRemove, [])\n return {'corrGroupings':result, 'toRemove':toRemove}", "def __pick_clusters(self, mothur_results):\r\n # Sanity check\r\n if not 0 <= self.Params['Similarity'] <= 1:\r\n raise ValueError(\r\n 'Similarity threshold must be number between 0 and 1 '\r\n '(received %)' % similarity_threshold)\r\n\r\n # A lower mothur score means more otu's. To find otu's that\r\n # satisfy a similarity threshold of 0.9, we must find the\r\n # largest score less than or equal to (1 - 0.9 =) 0.1.\r\n score_threshold = 1 - self.Params['Similarity']\r\n\r\n my_score, my_otus = mothur_results.next()\r\n for score, otus in mothur_results:\r\n\r\n # Sanity check\r\n if score < my_score:\r\n raise ValueError(\r\n 'Mothur results not in ascending order. This is an error '\r\n 'in the Mothur application controller, and it should be '\r\n 'reported to the PyCogent developers.')\r\n\r\n if score <= score_threshold:\r\n my_score, my_otus = score, otus\r\n else:\r\n # Scores are only getting larger, so bail out now\r\n break\r\n return my_otus", "def find_perfect_corr(df): \n corrMatrix = df.corr()\n corrMatrix.loc[:,:] = numpy.tril(corrMatrix.values, k = -1)\n already_in = set()\n result = []\n for col in corrMatrix:\n perfect_corr = corrMatrix[col][abs(numpy.round(corrMatrix[col],10)) == 1.00].index.tolist()\n if perfect_corr and col not in already_in:\n already_in.update(set(perfect_corr))\n perfect_corr.append(col)\n result.append(perfect_corr)\n toRemove = []\n for item in result:\n toRemove.append(item[1:(len(item)+1)])\n toRemove = sum(toRemove, [])\n return {'corrGroupings':result, 'toRemove':toRemove}", "def compute_correlation_separability_score(self) -> float:\n sep_scores = pd.DataFrame.from_dict(self.separability_scores).to_numpy()\n sep_scores = minmax_scale(sep_scores)\n corrs = {}\n for tumor_pair in range(sep_scores.shape[1]):\n corr_sep_score = np.corrcoef(PATHO_PRIOR[:, tumor_pair], sep_scores[:, tumor_pair])\n corrs[tumor_pair] = corr_sep_score[1, 0]\n corrs['agg_with_risk'] = sum(\n np.array([val for _, val in corrs.items()]) *\n RISK\n ) \n corrs['agg'] = sum([val for key, val in corrs.items() if type(key)==int]) \n return corrs", "def getCorrelationForSpecies(self, current_species, threshold):\n grouped = self.groupAllSamples()\n if self.corr_matrix is None or self.corr_signature is None or self.corr_signature[0] != grouped.iloc[:,len(self.tax_levels):-1].columns.tolist():\n corr_matrix = grouped.iloc[:,len(self.tax_levels):-2]\n corr_matrix.index = grouped[self.tax_level]\n self.corr_matrix = corr_matrix.transpose().corr(method='spearman')\n self.corr_signature = (list(corr_matrix.columns), self.tax_levels[0]) \n\n corr_matrix = self.corr_matrix.loc[:,current_species]\n text = 'spearman (rank) correlation >= ' + str(threshold) + ':\\n'\n\n corr_series = corr_matrix[abs(corr_matrix) >= threshold].sort_values(ascending=False)\n \n corr_matrix = grouped.iloc[:,len(self.tax_levels):-1]\n corr_matrix.index = grouped[self.tax_level]\n corr_list = []\n\n current_abundance = corr_matrix.loc[current_species,:corr_matrix.columns[-2]]\n list_index = []\n for name in corr_matrix.index:\n new_abundance = corr_matrix.loc[name,:][:-1]\n corr = '{0:.3}'.format(current_abundance.corr(new_abundance, method='spearman'))\n if corr != 'nan' and abs(float(corr)) >= threshold and current_species != name:\n corr_list.append('{0:.3}'.format(current_abundance.corr(new_abundance, method='spearman')))\n list_index.append(name)\n #rho, pval = spearmanr(current_abundance, new_abundance)\n #if rho != 'nan' and abs(float(corr)) >= threshold and current_species != name and pval <= 0.05:\n # #corr_list.append('{0:.3}'.format(rho))\n # #list_index.append(name)\n # print(name + '\\t' + str(rho) + '\\t' + str(pval))\n \n #for i in xrange(len(corr_list)):\n # if corr_list[i] != 'nan' and abs(float(corr_list[i])) >= threshold and current_species != corr_matrix.index[i]:\n # print(corr_matrix.index[i] + '\\t' + corr_list[i])\n corr_series = pd.Series(corr_list, index=list_index)\n return text, corr_series\n #return text, corr_series.map('{0:.3}'.format)", "def calc_rocchio(original, relevant_vectors, nonrelevant_vectors):\n print('orig' + str(len(original)))\n if len(relevant_vectors) > 0: print('rv 1st len' + str(len(relevant_vectors[0])))\n if len(nonrelevant_vectors) > 0: print('nr 1st len' + str(len(nonrelevant_vectors[0])))\n rv_count = len(relevant_vectors)\n nr_count = len(nonrelevant_vectors)\n rv_sum = np.add.reduce(relevant_vectors)\n print('rv_sum' + str(rv_sum) + 'rv_count' + str(rv_count))\n nr_sum = np.add.reduce(nonrelevant_vectors)\n print('nr_sum' + str(nr_sum) + 'nr_count' + str(nr_count))\n updated_relevance = cg.ROCCHIO_ALPHA * original \\\n + cg.ROCCHIO_BETA * (1/rv_count if rv_count else 1) * rv_sum \\\n - cg.ROCCHIO_GAMMA * (1/nr_count if nr_count else 1) * nr_sum\n #only keep terms above minimum threshold (also serves to exclude negative values)\n print('before')\n print(updated_relevance[:40])\n updated_relevance = [0 if wgt < cg.ROCCHIO_MIN else wgt for wgt in updated_relevance]\n print('after')\n print(updated_relevance[:40])\n return updated_relevance", "def correlation(data, method, caption):\n columns = list(data)\n coefficients = data.astype(float).corr(method=method)\n results = []\n for i in range(len(columns)):\n for j in range(i + 1, len(columns)):\n coefficient = coefficients[columns[i]][columns[j]]\n results.append((\n abs(coefficient), coefficient,\n columns[i] + ' x ' + columns[j]))\n print('# ' + caption + ', ' + method)\n for result in reversed(sorted(results)):\n abs_coefficient, coefficient, columns_pair = result\n print (coefficient, columns_pair)", "def get_sorted_results(self):\n results = self.results.values()\n return sorted(results, key=lambda r: r.rank(), reverse=True)", "def sorted_carnivores(self):\n fitness_dict = {carn: carn.fitness for carn in self.carnivores}\n sorted_tuples = dict(sorted(fitness_dict.items(), key=lambda x: x[1], reverse=True))\n\n return list(sorted_tuples.keys())", "def eval_concreteness(scores: np.ndarray, word_pairs, num=100, gt_divisor=10, vecs_names=None, tablefmt='simple'):\n\n # Sort scores by first and second word's concreteness scores\n def print_conc(synset_agg, title):\n ids12 = wn_concreteness_for_pairs(word_pairs, synset_agg)\n # plot_scores(scores[ids1], gt_divisor, vecs_names, title=title)\n # plot_scores(scores[ids2], gt_divisor, vecs_names, title=title)\n # plot_scores(scores[ids12][:100], gt_divisor, vecs_names, title=title + ' - 100 least concrete')\n # plot_scores(scores[ids12][-100:], gt_divisor, vecs_names, title=title + ' - 100 most concrete')\n print(f'\\n-------- {num} least concrete - {title} -------\\n')\n print_correlations(scores[ids12][:num], name_pairs='gt', common_subset=False, tablefmt=tablefmt)\n print(f'\\n-------- {num} most concrete - {title} -------\\n')\n print_correlations(scores[ids12][-num:], name_pairs='gt', common_subset=False, tablefmt=tablefmt)\n\n # plots both for median concreteness of synsets and for the most concrete synset of words\n print_conc('median', 'Median synset concreteness')\n print_conc('most_conc', 'Most concrete synsets')", "def abilityScores():\n\n scores_list = []\n\n for i in range(6):\n temp_list = []\n for j in range(4):\n temp_list.append(r.choice([1,2,3,4,5,6]))\n temp_list.sort()\n scores_list.append(temp_list[1]+temp_list[2]+temp_list[3])\n scores_list.sort()\n return scores_list", "def recommend_from_scores(scores: List[List[float]], n: int) -> List[List[int]]:\n\n def top_idx(scores):\n return np.array(scores).argsort()[::-1][:n]\n\n return [top_idx(s) for s in scores]", "def _select_matches(self, matches):\n matches = sorted(matches, key=lambda x: x.distance)\n matches = matches[:int(self._config['best_matches_percentage'] * len(matches))]\n return matches", "def top_matches(prefs, person, n=5, similarity=sim_pearson):\n scores = [(similarity(prefs, person, other), other)\n for other in prefs if other != person]\n\n scores.sort()\n scores.reverse()\n return scores[0:n]", "def _cont_cat_corr_features_anova(self, p_val = 0.01, subsamplesize = 100, p_seed = 0):\n \"\"\" Use ICC to define correlations, give box-plots for highly correlated pairs \"\"\"\n # TODO add option to do Bonferroni correction to adjust p-value depending on number of variables\n \n warnings.filterwarnings('ignore')\n # List of pairs along with correlation above threshold\n cont_cat_corr_list = []\n \n seed(p_seed)\n rand_vals = sample(range(self._n_rows), k=subsamplesize)\n \n # Search for the highly correlated pairs\n for i in self._cont_index_predictors: \n for j in self._cat_index_predictors:\n formula = self._dataset.columns[i] + \" ~ \" + self._dataset.columns[j] \n model_fit = ols(formula, data=self._dataset.iloc[rand_vals,:]).fit()\n anova_model = anova_lm(model_fit)\n p = anova_model.iloc[0,4]\n if p < p_val:\n cont_cat_corr_list.append([p,i,j]) #store correlation and columns index\n \n # Order variables by level of correlation \n s_cont_cat_corr_list = sorted(cont_cat_corr_list,key=lambda x: abs(x[0]))\n \n cont_cat_corr_features = []\n\n for v,i,j in s_cont_cat_corr_list:\n cont_cat_corr_features.append([self._dataset.columns[i],self._dataset.columns[j],v])\n \n return cont_cat_corr_features", "def run_grouped_correlation(md_vals, otu_arrays, test, test_choices,\r\n pval_assignment_method, permutations=None):\r\n test_fn = test_choices[test]\r\n sample_sizes = map(len, md_vals)\r\n\r\n def _rho(otu_vals, md_vals):\r\n return test_fn(otu_vals, md_vals)\r\n # find the correlations. rhos is list of 1D arrays.\r\n rhos = []\r\n for i in range(len(md_vals)):\r\n rhos.append(apply_along_axis(_rho, 1, otu_arrays[i], md_vals[i]))\r\n pvals = []\r\n for i, group_rhos in enumerate(rhos):\r\n pvals_i = zeros(len(group_rhos))\r\n for j, rho in enumerate(group_rhos):\r\n pvals_i[j] = assign_correlation_pval(rho, sample_sizes[i],\r\n pval_assignment_method, permutations, test_fn, otu_arrays[\r\n i][j],\r\n md_vals[i])\r\n pvals.append(array(pvals_i))\r\n # calculate combined stats\r\n fisher_pvals = apply_along_axis(fisher, 0, array(pvals))\r\n fisher_rho_and_h = apply_along_axis(fisher_population_correlation, 0,\r\n array(rhos), sample_sizes)\r\n return (\r\n (rhos, pvals, fisher_pvals, fisher_rho_and_h[0], fisher_rho_and_h[1])\r\n )", "def compute_cost_clarans(data, _cur_choice):\n # modified from that of CLARA\n total_cost = 0.0\n medoids = {}\n for idx in _cur_choice:\n medoids[idx] = []\n\n for i in list(data.index):\n choice = -1\n min_cost = np.inf\n for m in medoids:\n # fast_euclidean from CLARA\n tmp = np.linalg.norm(data.loc[m] - data.loc[i])\n if tmp < min_cost:\n choice = m\n min_cost = tmp\n\n medoids[choice].append(i)\n total_cost += min_cost\n # print(\"total_cost: \", total_cost)\n return total_cost, medoids", "def calc_qcorr(self) -> Dict[int, float]:\n return self._calc_qcorr", "def getSorteScoresFromScoreDict(queryRunDict):\n return list(sorted(queryRunDict.items(), key=lambda x: (x[1], x[0]), reverse=True))", "def __get_score_ordered(scores, idx):\t\n\treturn [x[1][idx] for x in sorted(scores.items())]", "def scatter_chart_score(self, grouped):\n score = np.abs(np.corrcoef(grouped.keys(), grouped.values)[0][1])\n if score > 0.3:\n score = 3\n return score", "def get_scores(self):\n\n\t\tscores = np.dot(self.rankings, self.weights)\n\t\tranked_indices = np.argsort(scores)\n\t\tranked_sources = self.source_names[ranked_indices]\n\t\tranked_scores = sorted(scores)\n\t\tself.scores = {source:score for source, score in zip(ranked_sources, ranked_scores)}\n\n\t\treturn self.scores", "def build_sorted_corelation(player_attributes):\n player_attributes_wo_na = player_attributes.dropna()\n player_attributes_corr = player_attributes_wo_na.corr()\n df_overall_rating_corr = player_attributes_corr[\"overall_rating\"]\n\n df_single_corr = pd.DataFrame(df_overall_rating_corr)\n df_single_corr = df_single_corr.sort_values(by=[\"overall_rating\"], ascending=True)\n\n # drop first three rows since we only care about positive correlation\n index_headers = list(df_single_corr[3:].index)\n plt.figure(figsize=(20, 10))\n plt.barh(\n y=index_headers,\n width=df_single_corr.overall_rating[3:],\n height=1,\n linewidth=0.5,\n )\n plt.show()", "def compute_clf_based_correlation_score(stats, columns, col_name):\n full_col_data = columns[col_name]\n\n dt_clf = DecisionTreeClassifier()\n\n other_feature_names = []\n other_features = []\n for other_col_name in columns.columns:\n if other_col_name == col_name:\n continue\n\n other_feature_names.append(other_col_name)\n le = LabelEncoder()\n _stringified_col = list(map(str,columns[other_col_name]))\n le.fit(_stringified_col)\n other_features.append(list(le.transform(_stringified_col)))\n\n other_features_t = np.array(other_features, dtype=object).transpose()\n\n le = LabelEncoder()\n _stringified_col = list(map(str,full_col_data))\n le.fit(_stringified_col)\n y = le.transform(_stringified_col)\n dt_clf.fit(other_features_t,y)\n prediction_score = dt_clf.score(other_features_t,y)\n corr_scores = list(dt_clf.feature_importances_)\n highest_correlated_column = max(corr_scores)\n return {\n 'correlation_score': round(10 * (1 - prediction_score * highest_correlated_column))\n ,'highest_correlation': max(corr_scores)\n ,'most_correlated_column': other_feature_names[corr_scores.index(max(corr_scores))]\n ,'similarity_score_description':\"\"\"\n A high value for this score means that two of your columns are highly similar. This is done by trying to predict one column using the other via a simple DT.\n \"\"\"\n }", "def _select(self):\n sel = []\n\n # choose randomly while favouring fit individuals\n lp = len(self.population) // 2\n for _ in range(lp):\n idx1 = idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n while idx1 == idx2:\n idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n\n p1, p2 = self.population[idx1], self.population[idx2]\n sel.append((p1, p2))\n\n return sel", "def score(self,ytest,how='score'):\n scores = []\n #iterate through each pred for each nn value\n for pred in self.ypred:\n sc = np.empty(pred.shape[1]) #need to store the scores\n\n for i in range(pred.shape[1]):\n\n p = pred[:,i]\n\n if how == 'score':\n sc[i] = utilities.score(p, ytest[:,i])\n\n if how == 'corrcoef':\n\n sc[i] = utilities.corrcoef(p, ytest[:,i])\n\n scores.append(sc)\n\n scores = np.vstack(scores)\n return scores", "def get_rcs():\n kk = np.loadtxt(source+\"/kids_data/rcslens2.csv\", delimiter=\",\",\n skiprows=1, max_rows=sample)\n global maxra\n maxra = max(kk[:sample, 0])\n global minra\n minra = min(kk[:sample, 0])\n global maxdec\n maxdec = max(kk[:sample, 1])\n global mindec\n mindec = min(kk[:sample, 1])\n global bsize\n bsize = abs(max(maxra, maxdec) - min(mindec, minra))\n coords = np.column_stack([kk[:sample, 0], kk[:sample, 1]])\n global SIZE\n SIZE = len(coords)\n print(maxra, maxdec, minra, mindec, SIZE)\n ctree = cKDTree(coords)\n # gamma_shear = -k[:,2]*np.cos\n return ctree, kk[:sample, 2], kk[:sample,\n 3], kk[:sample, 4], kk[:sample, 5]", "def get_similarities(self, query, n=5):\n scores = self.get_scores(query)\n rank_n = np.argsort(scores)[::-1]\n if n > 0:\n rank_n = rank_n[:n]\n return [self.corpus[i] for i in rank_n]", "def score(self, views: Iterable[np.ndarray], y=None, **kwargs):\n # by default return the average pairwise correlation in each dimension (for 2 views just the correlation)\n pair_corrs = self.pairwise_correlations(views, **kwargs)\n # sum all the pairwise correlations for each dimension. Subtract the self correlations. Divide by the number of views. Gives average correlation\n dim_corrs = np.sum(pair_corrs, axis=(0, 1)) - pair_corrs.shape[0]\n # number of pairs is n_views choose 2\n num_pairs = (self.n_views_ * (self.n_views_ - 1)) / 2\n dim_corrs = dim_corrs / (2 * num_pairs)\n return dim_corrs", "def compute_correlations(scores: (np.ndarray, list), name_pairs: List[Tuple[str, str]] = None,\n common_subset: bool = False, leave_out=False):\n if name_pairs == 'gt':\n name_pairs = [('ground_truth', nm) for nm in scores[0].dtype.names\n if nm != 'ground_truth']\n elif name_pairs == 'all':\n name_pairs = None\n if not name_pairs: # Correlations for all combinations of 2\n name_pairs = list(combinations(scores.dtype.names, 2))\n\n if common_subset: # Filter rows where any of the scores are missing for a word pair\n ids = set(range(scores.shape[0]))\n for n in scores.dtype.names:\n ids = ids.intersection(set(np.where(scores[n] != MISSING)[0]))\n scs = np.array(np.empty(len(ids)), dtype=scores.dtype)\n for n in scores.dtype.names:\n scs[n] = scores[n][list(ids)]\n else:\n scs = scores\n\n correlations = {}\n for nm1, nm2 in name_pairs:\n # Filter pairs which the scores, coming from any of the two embeddings, don't cover\n if (scs[nm1] == MISSING).all():\n warnings.warn(f'{nm1} has 0 coverage.')\n correlations[' | '.join([nm1, nm2])] = (0, 0, 0)\n elif (scs[nm2] == MISSING).all():\n warnings.warn(f'{nm2} has 0 coverage.')\n correlations[' | '.join([nm1, nm2])] = (0, 0, 0)\n else:\n scores1, scores2 = zip(*[(s1, s2) for s1, s2 in\n zip(scs[nm1], scs[nm2]) if s1 != MISSING and s2 != MISSING])\n assert len(scores1) == len(scores2)\n if leave_out:\n lp = len(scores1)\n keep = 1 - 1 / leave_out\n idx = list(range(lp))\n random.shuffle(idx)\n idx = idx[:int(lp * keep)]\n scores1 = [s for i, s in enumerate(scores1) if i in idx]\n scores2 = [s for i, s in enumerate(scores2) if i in idx]\n corr = spearmanr(scores1, scores2)\n correlations[' | '.join([nm1, nm2])] = (corr.correlation, corr.pvalue, len(scores1))\n\n return correlations", "def organize(select, strain, equals):\n scores = []\n data = list(strainer(select, strain, equals))\n while len(data) != 0:\n number = lowest_number(data)\n scores.append(number)\n data.remove(number)\n return scores", "def quaternary_tournament(population, scores, next_gen_number, random_seed=42):\n\n np.random.seed(random_seed)\n\n indices = list(range(len(population)))\n indices_array = np.array(indices)\n\n selected = []\n for i in range(next_gen_number):\n best_score = math.inf\n picked = None\n selected_indices = np.random.choice(indices_array, size=4)\n\n for indx in selected_indices:\n if scores[indx] < best_score:\n best_score = scores[indx]\n picked = population[indx]\n\n selected.append(picked)\n\n return selected", "def calculate_correlations(input_data, index_col, cat_features, exclu_elements): \r\n try:\r\n # encode the categorical features\r\n encoded_data = pd.get_dummies(input_data,columns=cat_features,drop_first=True)\r\n\r\n pd_transposed_data = encoded_data.set_index('Style_display_code').T\r\n\r\n # get the number of items\r\n items_list = [str(a) for a in pd_transposed_data.columns]\r\n\r\n print(\"Number of items to correlate :{}_Timestamp:{}\".format(str(len(items_list)), \r\n format(str(datetime.now()))))\r\n \r\n\r\n #compute correlations and save the pickle file\r\n# matrix = pd_transposed_data.corr().values\r\n# pickle.dump(matrix, open(staging_dir+ '/corr_matrix_output_py3.p', 'wb'))\r\n \r\n # read from the saved pickle file - ONLY FOR CONSECUTIVE RUNS, TO SAVE TIME\r\n matrix = pickle.load(open(staging_dir+ '/corr_matrix_output_py3.p', \"rb\" ) )\r\n\r\n print(\"Corr Matrix size:{}_Timestamp:{}\".format(str(matrix.size),\r\n format(str(datetime.now()))))\r\n\r\n except Exception as e:\r\n print(\" Error !!\", e)\r\n \r\n # return the top correlated items\r\n return top_correlateditems(items_list,matrix, index_col, exclu_elements)", "def compute_score(self, gts, res):\n\n assert(gts.keys() == res.keys())\n imgIds = gts.keys()\n\n cider_scorer = CiderScorer(n=self._n, sigma=self._sigma, \n document_frequency=self._doucument_frequency, \n ref_len=self._ref_len)\n\n for id in imgIds:\n hypo = res[id]\n ref = gts[id]\n\n # Sanity check.\n assert(type(hypo) is list)\n assert(len(hypo) == 1)\n assert(type(ref) is list)\n assert(len(ref) > 0)\n\n cider_scorer += (hypo[0], ref)\n\n (score, scores) = cider_scorer.compute_score()\n\n return score, scores", "def get_score_list(self, query: list, top_k: int = 0, e: int = 0.5):\n scores = list()\n for i in range(self.document_count):\n node = (i, self.get_score(query=query, index=i, e=e))\n scores.append(node)\n scores.sort(key=lambda x: x[1], reverse=True)\n\n return scores if top_k == 0 else scores[:top_k]", "def _forward_selected_new(data, response,d_val=False):\n remaining = set(data.columns)\n remaining.remove(response)\n selected = []\n current_score, best_new_score = 0.0, 0.0\n while remaining and current_score == best_new_score:\n scores_with_candidates = []\n for candidate in remaining:\n formula = \"{} ~ {} + 1\".format(response,\n ' + '.join(selected + [candidate]))\n score = smf.ols(formula, data).fit().rsquared_adj\n scores_with_candidates.append((score, candidate))\n\n scores_with_candidates.sort()\n best_new_score, best_candidate = scores_with_candidates.pop()\n if current_score < best_new_score:\n remaining.remove(best_candidate)\n selected.append(best_candidate)\n current_score = best_new_score\n elif current_score == best_new_score:\n current_score +=1 #break the while loop if score stays the same\n return selected", "def sort_priors(self):\n return", "def get_best_matches(\n results: Dict[Result, float], score_threshold: float\n) -> List[Tuple[Result, float]]:\n\n result_items = list(results.items())\n\n # Sort results by highest score\n sorted_results = sorted(result_items, key=lambda x: x[1], reverse=True)\n\n best_score = sorted_results[0][1]\n\n return [\n result\n for result in sorted_results\n if (best_score - result[1]) <= score_threshold\n ]", "def corrGroups(df:pd.DataFrame,corr_thresh:float=0.9) -> list:\n \n corrMatrix = df.corr().abs()\n corrMatrix.loc[:,:] = np.tril(corrMatrix, k=-1)\n corrMatrix = corrMatrix[corrMatrix >= corr_thresh].dropna(how='all').dropna(axis=1,how='all')\n corrMatrix['corr_groups'] = corrMatrix.apply(lambda x:sum([[x.name],x.index[x.notna()].tolist()],[]), axis=1)\n corrMatrix['max'] = corrMatrix.max(axis=1)\n \n corrVars = [i for i in corrMatrix.sort_values('max',ascending=False).corr_groups]\n \n remove=[]\n for i in corrVars:\n for j in range(0,len(corrVars)):\n if set(i).issubset(corrVars[j]) and i!=corrVars[j] and i not in remove:\n remove.append(i)\n \n for rm in remove:\n corrVars.remove(rm)\n \n return corrVars", "def correlations_cont_cat(self):\n \"\"\" Use ICC to define correlations, give box-plots for highly correlated pairs \"\"\"\n \n warnings.filterwarnings('ignore')\n \n # Print correlations and column names\n print('One-way ANOVA p-values - Predictors')\n for i,j,v in self.cont_cat_distance:\n print('{} and {} = {:.2}'.format(i,j,v))\n \n # Box plot of the highly correlated pairs\n for i,j,v in self.cont_cat_distance:\n fg,ax = plt.subplots(figsize=(12, 8))\n fg = self._dataset.boxplot(i, j, ax=ax, grid=False)\n plt.xticks(rotation=90)\n plt.show()", "def getRecommendations(prefs,person,similarity=sim_pearson):\n weighted_similarities = dict((\n (other, similarity(prefs, person, other)) \n for other in prefs.keys() if other != person))\n # Eliminate critics with negative correlation (I'm not sure why\n # this is a good idea)\n for critic, sim in weighted_similarities.items():\n if sim <= 0:\n del weighted_similarities[critic]\n sum_ratings = defaultdict(int) # int() initializes to 0\n sum_weights = defaultdict(int)\n for other, weight in weighted_similarities.items():\n for movie, rating in prefs[other].items():\n sum_ratings[movie] += rating * weight\n sum_weights[movie] += weight\n recommendations = [(sum_ratings[movie]/sum_weights[movie], movie)\n for movie in sum_ratings.keys()\n if movie not in prefs[person].keys()]\n recommendations.sort()\n recommendations.reverse()\n return recommendations", "def sort_results(boxes):\n return sorted(results[k], key=lambda x : x['score'], reverse=True)", "def select_corresponding(self, theory):\n\n Qth, Rth = theory\n return [None if x_data is None\n else (x_data.Q, np.interp(x_data.Q, Qth, x_th))\n for x_data, x_th in zip(self.xs, Rth)]", "def _scorr(\n sim: xr.DataArray,\n ref: xr.DataArray,\n *,\n dims: Sequence | None = None,\n group: str | Grouper = \"time\",\n):\n if dims is None:\n dims = [d for d in sim.dims if d != \"time\"]\n\n refcorr = _pairwise_spearman(ref, dims)\n simcorr = _pairwise_spearman(sim, dims)\n S_corr = (simcorr - refcorr).sum([\"_spatial\", \"_spatial2\"])\n return S_corr.assign_attrs(units=\"\")", "def evaluation(self):\n rows_list = []\n for name in self.single_classifier_best.keys():\n row = {}\n row['algorithm'] = name \n row[self.scoring_metric] = self.single_classifier_best[name].best_score_\n rows_list.append(row)\n \n scoring_df = pd.DataFrame(rows_list)\n scoring_sorted = scoring_df.sort_values(self.scoring_metric, ascending=False)\n print()\n print('*'*shutil.get_terminal_size().columns)\n print(scoring_sorted)\n print('*'*shutil.get_terminal_size().columns)\n self.evaluation_scores = scoring_sorted", "def __get_top(self, result, top=10):\n result = result.sort_values(by=\"bias_score\", ascending=False).drop_duplicates(subset='productid', keep=\"first\")\n print(result)\n result = result[:top].sort_values(by=\"final_score\", ascending=False).productid\n\n return list(result)", "def get_top_corr_features(self, feature_name):\n\n correlations = abs(self.train_correlations[feature_name][self.feature_names]) # abs!\n correlations.sort_values(ascending=False, inplace=True)\n return correlations.index[1:] # exlude itsself", "def find_n_best_hough_circles(radii, hough_res, n):\n\n n_radii = len(radii)\n\n max_by_radii = [(np.max(hough_res[r,:,:]), r) for r in range(n_radii)]\n max_by_radii.sort(reverse=True)\n\n best_scores = max_by_radii[:2]\n\n def flatten_where_result(where_result):\n return [e[0] for e in where_result]\n\n circles = []\n for score, index in best_scores:\n x, y = flatten_where_result(np.where(hough_res[index,:,:]==score))\n r = radii[index]\n circles.append((x, y, r))\n\n return circles", "def find_n_best_hough_circles(radii, hough_res, n):\n\n n_radii = len(radii)\n\n max_by_radii = [(np.max(hough_res[r,:,:]), r) for r in range(n_radii)]\n max_by_radii.sort(reverse=True)\n\n best_scores = max_by_radii[:2]\n\n def flatten_where_result(where_result):\n return [e[0] for e in where_result]\n\n circles = []\n for score, index in best_scores:\n x, y = flatten_where_result(np.where(hough_res[index,:,:]==score))\n r = radii[index]\n circles.append((x, y, r))\n\n return circles", "def top_matches(self, prefs, p1):\n #print 'top_matches', prefs, p1\n #print '\\n'\n return [(p2, self.similarity(prefs[p1], prefs[p2])) for p2 in prefs if p2 != p1]", "def calc_meas_qcorr(self) -> None:\n \n total_counts = np.zeros(self.rows_a)\n shots = 0\n for results in self._results:\n shots += results[0].results[0].shots\n counts = []\n for res in results:\n counts.extend([v for k, v in sorted(res.get_counts().items())])\n total_counts += np.array(counts)\n self._meas_qcorr = total_counts/shots", "def order_scores(doctors):\n\n # return doctors.sort(key=operator.methodcaller('get_review_score'))\n # print doctors\n print\n print\n ret_docs = sorted(doctors, key=operator.itemgetter('review_score'), reverse=True)\n # ret_docs = doctors.sort(key=lambda k: k['review_score'])\n # print ret_docs\n return ret_docs", "def output_new_correlation_values():\n correlation_builder = _get_correlation_values()\n bv_data = correlation_builder.correlation_store.query(\n criteria={'property_x': 'bulk_modulus',\n 'property_y': 'vickers_hardness'}\n )\n \n print(\"Bulk Modulus/Vickers Hardness\")\n \n for item in bv_data:\n print(\"'{}': {},\".format(item['correlation_func'],\n item['correlation']))\n\n vb_data = correlation_builder.correlation_store.query(\n criteria={'property_y': 'bulk_modulus',\n 'property_x': 'vickers_hardness'}\n )\n\n print(\"Vickers Hardness/Bulk Modulus\")\n\n for item in vb_data:\n print(\"'{}': {},\".format(item['correlation_func'],\n item['correlation']))\n \n print('linlsq correlation values')\n\n bg_ad = correlation_builder.correlation_store.query_one(\n criteria={'property_x': 'band_gap_pbe',\n 'property_y': 'atomic_density',\n 'correlation_func': 'linlsq'}\n )\n\n bm_vh = correlation_builder.correlation_store.query_one(\n criteria={'property_x': 'bulk_modulus',\n 'property_y': 'vickers_hardness',\n 'correlation_func': 'linlsq'}\n )\n \n print(\"[{}, {}]\".format(bg_ad['correlation'], \n bm_vh['correlation']))", "def line_chart_score(self, grouped):\n keys = []\n score = []\n line_score = 0\n for i in range(len(grouped.keys())):\n keys.append(i+1)\n score.append(np.abs(np.corrcoef(keys, grouped.values)[0][1])) #linear match\n score.append(np.abs(np.corrcoef(keys, np.log(grouped.values))[0][1])) #log match\n score.append(np.abs(np.corrcoef(np.log(keys), grouped.values)[0][1])) #exponential match\n\n final_score = np.max(score)\n if final_score > 0.3:\n line_score = 3\n return line_score", "def sort_solutions(self, solutions):\r\n if self.breeding_rules.sorting_order is ScoresSortingOrder.ASCENDING:\r\n reverse = False\r\n else:\r\n reverse = True\r\n return sorted(solutions, reverse=reverse, key=lambda solution: solution.score)", "def select_most_similar_max_only(calculated_matches, selected_values, selected):\n\n new_calculated_matches = {}\n for cmdb in calculated_matches:\n for db in calculated_matches.get(cmdb):\n if db not in new_calculated_matches:\n new_calculated_matches[db] = {}\n new_calculated_matches[db][cmdb] = calculated_matches.get(\n cmdb).get(db)\n else:\n new_calculated_matches[db][cmdb] = calculated_matches.get(\n cmdb).get(db)\n\n calculated_matches = {}\n for key in new_calculated_matches:\n calculated_matches[key] = {k: v for k, v in sorted(new_calculated_matches.get(\n key).items(), key=lambda item: item[1], reverse=True)}\n\n for db in calculated_matches:\n if len(calculated_matches.get(db)) > 0:\n values = list(calculated_matches.get(db).values())\n fst = None\n if len(values) > 0:\n if values.count(values[0]) > 1:\n same = [x for x in calculated_matches.get(\n db) if calculated_matches.get(db).get(x) == values[0]]\n if len(same) > 1:\n fst = select_option(same, db)\n elif len(same) == 1:\n fst = same[0]\n else:\n fst = list(calculated_matches.get(db).keys())[0]\n selected_values[db] = {fst: calculated_matches.get(db).get(fst)}\n\n return selected_values", "def sorted_scores(scores):\n\treturn sorted(scores, key=lambda sailor: (total_score(sailor), sailor[1][0]))", "def _get_competent_detectors(self, scores):\n\n # create histogram of correlation scores\n scores = scores.reshape(-1, 1)\n\n # TODO: handle when Pearson score is 0\n # if scores contain nan, change it to 0\n if np.isnan(scores).any():\n scores = np.nan_to_num(scores)\n\n if self.n_bins > self.n_clf:\n warnings.warn(\n \"The number of histogram bins is greater than the number of \"\n \"classifiers, reducing n_bins to n_clf.\")\n self.n_bins = self.n_clf\n hist, bin_edges = np.histogram(scores, bins=self.n_bins)\n\n # find n_selected largest bins\n max_bins = argmaxn(hist, n=self.n_selected)\n candidates = []\n\n # iterate through bins\n for max_bin in max_bins:\n # determine which detectors are inside this bin\n selected = np.where((scores >= bin_edges[max_bin])\n & (scores <= bin_edges[max_bin + 1]))\n\n # add to list of candidates\n candidates = candidates + selected[0].tolist()\n\n return candidates", "def get_sorted_disciplines(self):\n results = self.__create_discipline_and_grade_dto()\n results.sort(self.__compare_dtos_on_grade)\n return results", "def calculate_correlation(self):\n self.network.index_nodes()\n self._calculate_dist()\n pearson_correlation, pearson_pvalue = scipy.stats.pearsonr(self.dist[:,0], self.dist[:,1])\n spearman_correlation, spearman_pvalue = scipy.stats.spearmanr(self.dist[:,0], self.dist[:,1])\n return pearson_correlation, pearson_pvalue, spearman_correlation, spearman_pvalue", "def compute_ranking_correls(ranked_means):\n ranked_means = [w.split(\":\")[-1].lower() for (r,w) in ranked_means]\n ranked_means = [w[:-4] if w.endswith(\" (v)\") else w for w in ranked_means]\n\n correlations = []\n starostin = load_starostin_ranking()\n starostin = [s for s in starostin if s in ranked_means]\n posterior = [p for p in ranked_means if p in starostin]\n starostin = [starostin.index(w) for w in posterior]\n posterior = [posterior.index(w) for w in posterior]\n correlations.append(scipy.stats.spearmanr(posterior, starostin)[0])\n\n swadesh = load_swadesh_ranking()\n swadesh = [s for s in swadesh if s in ranked_means]\n posterior = [p for p in ranked_means if p in swadesh]\n swadesh = [swadesh.index(w) for w in posterior]\n posterior = [posterior.index(w) for w in posterior]\n correlations.append(scipy.stats.spearmanr(posterior, swadesh)[0])\n\n pagel = load_pagel_ranking()\n pagel = [s for s in pagel if s in ranked_means]\n posterior = [p for p in ranked_means if p in pagel]\n pagel = [pagel.index(w) for w in posterior]\n posterior = [posterior.index(w) for w in posterior]\n correlations.append(scipy.stats.spearmanr(posterior, pagel)[0])\n\n starostin = load_starostin_ranking()\n swadesh = load_swadesh_ranking()\n starostin = [s for s in starostin if s in ranked_means and s in swadesh]\n swadesh = [s for s in swadesh if s in ranked_means and s in starostin]\n posterior = [p for p in ranked_means if p in starostin and p in swadesh]\n starostin = [starostin.index(w) for w in posterior]\n swadesh = [swadesh.index(w) for w in posterior]\n posterior = [posterior.index(w) for w in posterior]\n mean = [(st+sw)/2.0 for st,sw in zip(starostin,swadesh)]\n correlations.append(scipy.stats.spearmanr(posterior, mean)[0])\n\n for l, c in zip((\"starostin\", \"swadesh\", \"pagel\", \"mean\"), correlations):\n fp = open(\"%s_correlation.txt\" % l, \"w\")\n fp.write(\"%.2f\" % c)\n fp.close()", "def get_cv_scores ( self, X: np.ndarray, y: np.ndarray ):\n # Create the parameter grid\n param_grid = list ( GridSearchCV.create_param_grid ( self.param_grid ) )\n\n # Zip the grid to the results from a single fit\n return zip (\n param_grid,\n [\n self.get_single_fitting_iteration (\n X, y, model = self.model_callable ( ** param_set )\n )\n for param_set in param_grid\n ],\n ) # End get_cv_scores", "def pairwise_correlations(self, views: Iterable[np.ndarray], **kwargs):\n transformed_views = self.transform(views, **kwargs)\n all_corrs = []\n for x, y in itertools.product(transformed_views, repeat=2):\n all_corrs.append(\n np.diag(\n np.corrcoef(x.T, y.T)[\n : self.latent_dimensions, self.latent_dimensions :\n ]\n )\n )\n try:\n all_corrs = np.array(all_corrs).reshape(\n (self.n_views_, self.n_views_, self.latent_dimensions)\n )\n except:\n print()\n return all_corrs", "def plot_by_concreteness(scores: np.ndarray, word_pairs, ax1, ax2, common_subset=False, vecs_names=None,\n concrete_num=100, title_prefix='', pair_score_agg='sum', show=False):\n for synset_agg, ax in zip(['median', 'most_conc'], [ax1, ax2]):\n corrs_by_conc = defaultdict(list)\n ids12, concs = wn_concreteness_for_pairs(word_pairs, synset_agg, pair_score_agg=pair_score_agg)\n scs = scores[ids12]\n for i in range(0, len(ids12), concrete_num):\n corrs = compute_correlations(scs[i:i + concrete_num], 'gt', common_subset=common_subset)\n for k, v in corrs.items():\n corrs_by_conc[k].append(v[0]) # Append correlations score for each embedding\n\n corrs_by_conc_a = dict2struct_array(corrs_by_conc)\n\n vnames = [n for n in corrs_by_conc_a.dtype.names if 'fmri' not in n and 'frcnn' not in n]\n labels = [Embeddings.get_label(n.split(NAME_DELIM)[1]) for n in vnames]\n\n colours, linestyles, alphas = PlotColour.colour_by_modality(labels)\n labelpad = 10\n\n # Concreteness scores on different axis but the same plot\n axn = ax\n axn.plot(concs, color='blue')\n axn.set_xlabel('Word pairs', labelpad=labelpad)\n axn.set_ylabel('WordNet concreteness', labelpad=labelpad)\n axn.yaxis.label.set_color('blue')\n # Xticklabels by step size\n n = scores.shape[0]\n step = 500\n xtlabels = [i for i in range(concrete_num, n) if i % step == 0] + [n]\n axn.xaxis.set_ticks([i - 1 for i in xtlabels])\n axn.set_xticklabels(xtlabels)\n\n # Plot for Spearman's correlations\n axp = axn.twiny().twinx()\n axp = plot_scores(corrs_by_conc_a,\n vecs_names=vnames,\n labels=None,\n colours=colours,\n linestyles=linestyles,\n title='',\n alphas=alphas,\n xtick_labels=None,\n ax=axp,\n show=show)\n axp.set_ylabel(\"Spearman's correlation\", labelpad=labelpad - 3)\n # TODO: Doesn't show, order of axn.twiny().twinx() matters...\n axp.set_xlabel('WordNet concreteness splits by 100 pairs', labelpad=labelpad)\n n = corrs_by_conc_a.shape[0]\n axp.xaxis.set_ticks([i for i in range(-1, n)])\n axp.set_xticklabels(['' for i in axp.get_xticklabels()])\n syna = {'median': 'Median', 'most_conc': 'Most Concrete'}[synset_agg]\n axp.set_title(f'{title_prefix} - Synset Agg {syna}')", "def get_score(self):\n files_flare = self.generate_flare_set()\n files_non_flare = self.generate_non_flare_set()\n timeseries = []\n y = []\n scores = {}\n column_mapping = self.__get_column_mapping()\n for col in tqdm(range(1, 25)):\n for file in tqdm(files_flare):\n s = Sample(\"FL\", file).get_data().iloc[:, col].values\n y.append(self.mapping[file[0]])\n timeseries.append(s)\n\n for file in tqdm(files_non_flare):\n s = Sample(\"NF\", file).get_data().iloc[:, col].values\n y.append(self.mapping[file[0]])\n timeseries.append(s)\n embed = self.get_embed_vector(timeseries)\n\n embed_y = KMeans(n_clusters=5).fit_predict(embed)\n y = np.array(y).flatten()\n scores[column_mapping[col]] = self.relevance_score(embed_y, y)\n timeseries = []\n y = []\n scores_data = pd.DataFrame.from_dict(scores, orient='index', columns=['Relevance Score']).sort_values(\n by='Relevance Score', ascending=False)\n return scores_data", "def generalization_feature_selection(data1, data2, measure, cutoff):\n cor1 = np.corrcoef(np.transpose(data1))\n cor2 = np.corrcoef(np.transpose(data2))\n num = data1.shape[1]\n cor = []\n if measure == \"pearson\":\n for i in range(num):\n cor.append(\n np.corrcoef(\n np.vstack(\n (\n list(cor1[:i, i]) + list(cor1[(i + 1) :, i]),\n list(cor2[:i, i]) + list(cor2[(i + 1) :, i]),\n )\n )\n )[0, 1]\n )\n elif measure == \"ccc\":\n for i in range(num):\n cor.append(\n calculate_concordance_correlation_coefficient(\n np.array(list(cor1[:i, i]) + list(cor1[(i + 1) :, i])),\n np.array(list(cor2[:i, i]) + list(cor2[(i + 1) :, i])),\n )\n )\n cor = np.array(cor)\n fid = np.argsort(-cor)[: int(cutoff)]\n return fid", "def forward_selected(data, response):\n remaining = set(data.columns)\n remaining.remove(response)\n selected = []\n current_score, best_new_score = 0.0, 0.0\n while remaining and current_score == best_new_score:\n scores_with_candidates = []\n for candidate in remaining:\n formula = \"{} ~ {} + 1\".format(response,\n ' + '.join(selected + [candidate]))\n score = smf.ols(formula, data).fit().rsquared_adj\n scores_with_candidates.append((score, candidate))\n scores_with_candidates.sort()\n best_new_score, best_candidate = scores_with_candidates.pop()\n if current_score < best_new_score:\n remaining.remove(best_candidate)\n selected.append(best_candidate)\n current_score = best_new_score\n formula = \"{} ~ {} + 1\".format(response,\n ' + '.join(selected))\n model = smf.ols(formula, data).fit()\n\n print(selected)\n return model", "def sortByScore(scores,boxes):\r\n \r\n fullboxlist=[]\r\n for i in range(len(scores)):\r\n boxdict={}\r\n boxdict['scores']=scores[i]\r\n boxdict['y_min']=boxes[i][0]\r\n boxdict['x_min']=boxes[i][1]\r\n boxdict['y_max']=boxes[i][2]\r\n boxdict['x_max']=boxes[i][3]\r\n fullboxlist.append(boxdict)\r\n \r\n fullboxlist.sort(key=getClass1Score, reverse=True)\r\n boxlist=[]\r\n for boxdict in fullboxlist:\r\n # if class 0 has highest prob, find second highest as class\r\n class_code=np.where(boxdict['scores']==np.amax(boxdict['scores']))[0][0]\r\n if class_code==0:\r\n class_code = 1+ np.where(boxdict['scores'][1:]==np.amax(boxdict['scores'][1:]))[0][0]\r\n\r\n \r\n boxlist.append([boxdict['y_min'],\r\n boxdict['x_min'],\r\n boxdict['y_max'],\r\n boxdict['x_max'],\r\n boxdict['scores'][class_code],\r\n class_code\r\n ])\r\n \r\n return boxlist,fullboxlist", "def _correlation_test_helper(\n X: Union[np.ndarray, spmatrix],\n Y: np.ndarray,\n n_perms: Optional[int] = None,\n seed: Optional[int] = None,\n confidence_level: float = 0.95,\n **kwargs,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n\n def perm_test_extractor(\n res: Sequence[Tuple[np.ndarray, np.ndarray]]\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n pvals, corr_bs = zip(*res)\n pvals = np.sum(pvals, axis=0) / float(n_perms)\n\n corr_bs = np.concatenate(corr_bs, axis=0)\n corr_ci_low, corr_ci_high = np.quantile(corr_bs, q=ql, axis=0), np.quantile(\n corr_bs, q=qh, axis=0\n )\n\n return pvals, corr_ci_low, corr_ci_high\n\n if not (0 <= confidence_level <= 1):\n raise ValueError(\n f\"Expected `confidence_level` to be in interval `[0, 1]`, found `{confidence_level}`.\"\n )\n\n n = X.shape[1] # genes x cells\n ql = 1 - confidence_level - (1 - confidence_level) / 2.0\n qh = confidence_level + (1 - confidence_level) / 2.0\n\n if issparse(X) and not isspmatrix_csr(X):\n X = csr_matrix(X)\n\n corr = _mat_mat_corr_sparse(X, Y) if issparse(X) else _mat_mat_corr_dense(X, Y)\n\n # see: https://en.wikipedia.org/wiki/Pearson_correlation_coefficient#Using_the_Fisher_transformation\n mean, se = np.arctanh(corr), 1.0 / np.sqrt(n - 3)\n z_score = (np.arctanh(corr) - np.arctanh(0)) * np.sqrt(n - 3)\n\n z = norm.ppf(qh)\n corr_ci_low = np.tanh(mean - z * se)\n corr_ci_high = np.tanh(mean + z * se)\n pvals = 2 * norm.cdf(-np.abs(z_score))\n\n return corr, pvals, corr_ci_low, corr_ci_high", "def _get_indices(scores: np.ndarray, shuffle_prop: float) -> np.ndarray:\n return _shuffle_subset(scores.argsort().argsort(), shuffle_prop)", "def sort(self):\r\n self.candidates.sort(key=self.sortFitness)\r\n return", "def select_best(self,dataframe: pd.DataFrame):\n \n # create a Dataframe only for categorical variables\n # categorical_df = pd.get_dummies(dataframe[self.cat_feats])\n categorical_df = dataframe[self.cat_feats]\n \n for feats in self.cat_feats:\n lbl = preprocessing.LabelEncoder()\n lbl.fit(dataframe[feats].values)\n categorical_df.loc[:,feats] = lbl.transform(dataframe[feats].values)\n \n # select only Top 5 variables \n selector = SelectKBest(chi2,k=5)\n # give the targetcolumn and the rest of the data to the scalar to fit\n selector.fit(categorical_df,dataframe[self.target_cols])\n # get the indicies of the selected columns\n cols = selector.get_support(indices=True)\n\n # For display purpose Only\n dfscores = pd.DataFrame(selector.scores_)\n dfcolumns = pd.DataFrame(categorical_df.columns)\n\n #concat two dataframes for better visualization \n featureScores = pd.concat([dfcolumns,dfscores],axis=1)\n featureScores.columns = ['Features','Score'] #naming the dataframe columns\n featureScores = featureScores.sort_values(by='Score', ascending=False)\n \n utils.bar_plot(\n x_data= featureScores['Features'],\n y_data=featureScores['Score'],\n title=\"Select_K_Best using CHI2 For Categorical Features\",\n x_title=\"Features\",\n y_title=\"CHI2 Score\",\n output_path= os.path.join(self.output_path,\"select_k_best_chi2.html\")\n )\n \n self.cat_feats = featureScores['Features'].values.tolist()[:self.num_best]\n # drop the columns which did not qualify\n for feats in self.dataframe_d_copy.columns:\n if feats not in self.cat_feats:\n self.dataframe_d_copy = self.dataframe_d_copy.drop(feats,axis=1)\n return self.cat_feats", "def analyse_and_sort(self, df):\n if (type(df) is pd.Series):\n df = df.to_frame(\"score\")\n elif (type(df) is pd.DataFrame):\n df.columns = [\"score\"]\n df = self._filter(df)\n df = self._argrelmax(df)\n df = self._drop_close_extrema(df) # by = [deb1[0]]\n return df.sort_values(by=[\"score\"])[::-1]", "def correlation(x_items, y_items):\r\n return correlation_test(x_items, y_items, method='pearson', tails=None,\r\n permutations=0)[:2]", "def __call__(self, relsSortedByScores, qrelDict):\n idcg = self._dcg(sorted(qrelDict.values(), reverse=True))\n return self._dcg(relsSortedByScores) / idcg if idcg > 0 else 0", "def meas_qcorr(self) -> List[float]:\n return self._meas_qcorr", "def sorting_by_criteria(self, result):\r\n\t\tresult = sorted(result, key=lambda r: r[0])\r\n\t\tflag = False\r\n\t\tm = result[0][0]\r\n\t\tfor i in range(len(result)):\r\n\t\t\tif (result[i][0] == m): continue\r\n\t\t\tflag = True\r\n\t\t\tbreak\r\n\t\tif not flag: i += 1\r\n\t\tresult = result[:i]\r\n\r\n\t\t\"\"\" in prewin status, compare useful_amount only \"\"\"\r\n\t\tif (result[0][0] == 0):\r\n\t\t\tresult = sorted(result, key=lambda r: r[1], reverse=True)\r\n\t\t\ttest = \"\"\r\n\t\t\tfor r in result:\r\n\t\t\t\ttest += \"[{0}, {1}, {2}, {3}], \".format(r[0], r[1], r[2], r[3])\r\n#\t\t\tprint \"prewin status: {0}\".format(test)\r\n\t\t\tself.current_best_state = [result[0][0], result[0][1], result[0][2]]\r\n\t\t\treturn result[0][3]\r\n\r\n\t\t\"\"\" sort by score (big -> small) \"\"\"\r\n\t\tresult = sorted(result, key=lambda r: r[2], reverse=True)\r\n\t\tflag = False\r\n\t\tm = result[0][2]\r\n\t\tfor i in range(len(result)):\r\n\t\t\tif (result[i][2] == m): continue\r\n\t\t\tflag = True\r\n\t\t\tbreak\r\n\t\tif not flag: i += 1\r\n\t\tresult = result[:i]\r\n\r\n\t\t\"\"\" sort by useful card amount (big -> small) \"\"\"\r\n\t\tresult = sorted(result, key=lambda r: r[1], reverse=True)\r\n\r\n\t\t\"\"\" choose one to discard \"\"\"\r\n\t\tdcard = result[0][3]\r\n\t\tm = result[0][1]\r\n\t\tbest = result[0]\r\n\t\tfor r in result:\r\n\t\t\tif (r[1] != m): break\r\n\t\t\tctype = GameBoard.CardType(r[3])\r\n\t\t\tif (ctype == 4) and (self.word_list.count(r[3]) == 1):\r\n\t\t\t\tdcard = r[3]\r\n\t\t\t\tbest = r\r\n\t\t\tif (ctype == 5) and (self.wind_list.count(r[3]) == 1):\r\n\t\t\t\tdcard = r[3]\r\n\t\t\t\tbest = r\r\n\t\tself.current_best_state = [r[0], r[1], r[2]]\r\n\t\treturn dcard", "def find_correlation(convergence_data, radii, plot_correlation=False, plot_radii=False, fis=False, mu_diff=None,\n impact=False):\n correlations = []\n correlation_errs = []\n for cone_radius in radii:\n if fis or impact:\n pickle_in = open(\"MICE_SN_data_fis.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n if mu_diff is None:\n mu_diff = SN_data[f\"Radius{str(cone_radius)}\"][\"mu_diff\"]\n conv = np.array(convergence_data[f\"Radius{str(cone_radius)}\"][\"SNkappa\"])\n else:\n pickle_in = open(\"MICE_SN_data.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n # redshift_cut = [SN_data['SNZ'] > 0.2]\n if mu_diff is None:\n mu_diff = SN_data[\"mu_diff\"]\n conv = np.array(convergence_data[f\"Radius{str(cone_radius)}\"][\"SNkappa\"])\n\n conv_rank = rankdata(conv)\n mu_rank = rankdata(mu_diff)\n # print(mu_diff)\n diff = np.abs(conv_rank - mu_rank)\n rho = 1 - 6 / (len(conv) * (len(conv) ** 2 - 1)) * np.sum(diff ** 2)\n rho_err = np.sqrt((1 - rho ** 2) / (len(conv) - 1))\n correlations.append(rho)\n correlation_errs.append(rho_err)\n\n if plot_correlation:\n edges = np.linspace(-0.0065, 0.011, 6)\n bins = (edges[1:] + edges[:-1]) / 2\n mean_dmu = []\n standard_error = []\n for bin in bins:\n dmus = []\n for kappa, dmu in zip(conv, mu_diff):\n if bin - 0.007 / 4 < kappa <= bin + 0.0007 / 4:\n dmus.append(dmu)\n mean_dmu.append(np.mean(dmus))\n standard_error.append(np.std(dmus) / np.sqrt(len(dmus)))\n\n plt.plot([min(conv), max(conv)], [0, 0], color=grey, linestyle='--')\n plt.plot(conv, mu_diff, linestyle='', marker='o', markersize=2, color=colours[0])\n # plt.plot(conv, fit, color=colours[1], label=f'$\\Delta\\mu = {round(float(grad),3)}\\kappa$')\n plt.errorbar(bins, mean_dmu, standard_error, marker='s', color='r', markersize=3, capsize=3, linestyle='')\n plt.xlabel('$\\kappa$')\n plt.ylabel('$\\Delta\\mu$')\n plt.xlim([-0.008, 0.011])\n plt.legend(frameon=0, loc='lower right')\n plt.ylim([-0.3, 0.3])\n plt.text(0.0038, -0.19, f'$\\\\rho$ = {round(rho, 3)} $\\pm$ {round(rho_err, 3)}', fontsize=16)\n plt.show()\n\n if plot_radii:\n u_err = [correlations[i] + correlation_errs[i] for i in range(len(correlations))]\n d_err = [correlations[i] - correlation_errs[i] for i in range(len(correlations))]\n smooth_corr = savgol_filter([correlations[i] for i in range(len(correlations))], 11, 4)\n smooth_u_err = savgol_filter(u_err, 11, 4)\n smooth_d_err = savgol_filter(d_err, 11, 4)\n plt.plot([0, 30], [0, 0], color=grey, linestyle='--')\n plt.plot(radii, smooth_corr, color=colours[0])\n plt.plot(radii, [correlations[i] for i in range(len(correlations))], marker='x', color=colours[1],\n linestyle='')\n plt.fill_between(radii, smooth_u_err, smooth_d_err, color=colours[0], alpha=0.4)\n\n plt.xlabel('Cone Radius (arcmin)')\n plt.ylabel(\"Spearman's Rank Coefficient\")\n plt.gca().invert_yaxis()\n plt.show()\n return [correlations, smooth_corr, smooth_u_err, smooth_d_err, np.array(u_err) - np.array(correlations)]\n\n return correlations, correlation_errs", "def tiles_by_score(self):\n sorted_list = sorted(self.tiles, key=lambda t: t.score, reverse=True)\n return sorted_list", "def corr_list(df, target, thresh=0.1, sort=True, fill=True):\n if fill:\n interest = df.corr().fillna(0)[target]\n else:\n interest = df.corr()[target]\n interest = interest[np.abs(interest) > thresh]\n if len(interest) > 0:\n if sort:\n return interest.sort_values(ascending=False)\n else:\n return interest\n else:\n return []", "def best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\treturn percentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask", "def sort_results(self):\n pass", "def spearman(mystery_ranks, language_ranks):\n\tspearman_numbers = [] \n\tfor language in language_ranks:\n\t\tnumber = spearman_correlation(language, mystery_ranks)\n\t\tspearman_numbers.append(number)\n\n\treturn spearman_numbers", "def topMatches(prefs, person, n=5, similarity=sim_pearson):\n all_matches = [(similarity(prefs, person, other), other) \n for other in prefs.keys()\n if person != other]\n all_matches.sort()\n all_matches.reverse()\n return all_matches[0:n]", "def __find_correlations(self, results):\n\n for result in results[:self.__result_limit]:\n\n # pub without venue\n if len(result['ven']) == 0:\n result['alternative'] = []\n\n with self.vix.searcher(weighting=Frequency) as vs:\n vq_parse = QueryParser('key', self.vix.schema).parse(result['pub']['crossref'])\n tresult = vs.search(vq_parse, limit=None, )\n if len(tresult) != 0:\n result['ven'] = {}\n result['added'] = 1\n for attr in tresult[0].items():\n result['ven'][attr[0]] = attr[1]\n\n self.__output.append(result)\n\n # venue without pub or venue with a list of pubs\n elif len(result['pub']) == 0 or (\n isinstance(result['pub'], list) and len(result['pub']) > 1):\n result['alternative'] = []\n\n with self.pix.searcher(weighting=Frequency) as ps:\n pq_parse = QueryParser('crossref', self.pix.schema).parse(result['ven']['key'])\n tresult = ps.search(pq_parse, limit=None, )\n\n if len(tresult):\n plist = []\n tmp = dict()\n for el in tresult:\n for attr in el.items():\n if attr[0] == 'title' and attr[1] not in [x['title'] for x in result['pub']]:\n plist.append(attr[1])\n break\n\n result['alternative'] = plist\n self.__output.append(result)\n\n # mixed case\n elif len(self.__output) == 0 or not result['ven']['key'] in [x['key'] for x in self.__output]:\n lis = [x for x in results if len(x['ven']) and x['ven']['key'] == result['ven']['key']]\n tmp = {}\n if len(lis) <= 1:\n tmp = {'key': result['pub']['key'],\n 'score': result['score'],\n 'pub': [x['pub'] for x in lis],\n 'ven': result['ven'],\n 'alternative': list()}\n else:\n tmp = {'key': result['ven']['key'],\n 'score': result['score'],\n 'pub': [x['pub'] for x in lis],\n 'ven': result['ven'],\n 'alternative': list()}\n plist = []\n with self.pix.searcher() as ps:\n pq_parse = QueryParser('crossref', self.pix.schema).parse(tmp['key'])\n tresult = ps.search(pq_parse, limit=None, )\n if len(tresult):\n for el in tresult:\n for attr in el.items():\n if attr[0] == 'title' and attr[1] not in [x['title'] for x in tmp['pub']]:\n plist.append(attr[1])\n break\n\n tmp['alternative'] = plist\n self.__output.append(tmp)", "def _sort_compounds(self):\n self.sorted_molecules = sorted(self.values(), key=operator.attrgetter('criterion'))", "def all_order_selection(C):\n W = np.linalg.eigvalsh(C)\n thresholds = list(np.linspace(np.min(W), np.max(W), 100))\n orders = [np.sum(W >= threshold) for threshold in thresholds]\n return thresholds, orders", "def get_recommendations(prefs, person, similarity=sim_pearson):\n totals = {}\n similarity_sums = {}\n\n for other in prefs:\n if other == person:\n continue\n\n sim = similarity(prefs, person, other)\n\n if sim <= 0:\n continue\n\n for item in prefs[other]:\n if item not in prefs[person] or prefs[person][item] == 0:\n totals.setdefault(item, 0)\n totals[item] += prefs[other][item] * sim\n similarity_sums.setdefault(item, 0)\n similarity_sums[item] += sim\n\n # Normalized list\n rankings = [(total / similarity_sums[item], item)\n for item, total in totals.items()]\n\n # Returns normalized score, not an r that would be between -1 and 1\n rankings.sort()\n rankings.reverse()\n return rankings", "def monte_carlo_sample(self):\n\t\tresult = dict()\n\t\tfor n in self.topological_sort():\n\t\t\tpvals = tuple(result[p] for p in n.parents)\n\t\t\tresult[n.name] = n.cpt.rand_result(pvals)\n\t\treturn result", "def r2(preds, actual):\n preds = np.reshape(preds, (-1,))\n r = np.corrcoef(preds, actual)[0,1]\n rsq = r ** 2\n return r,rsq", "def get_scored_list(self, rows, wordids):\n totalscores = dict([(row[0], 0) for row in rows])\n # This is where we'll put our scoring functions\n weights = [(1.0, location_score(rows)),\n (1.0, frequency_score(rows)),\n (1.0, distance_score(rows)),\n (1.0, self.page_rank_score(rows)),\n (1.0, self.link_text_score(rows, wordids)),\n # (5.0, self.nnscore(rows, wordids))\n ]\n # Sum up weighted scores\n for (weight, scores) in weights:\n for url in totalscores:\n totalscores[url] += weight * scores[url]\n return totalscores", "def cv_score_table(res_sprm_cv):\n \n n_settings = len(res_sprm_cv.cv_results_['params'])\n etas = [res_sprm_cv.cv_results_['params'][i]['eta'] for i in range(0,n_settings)]\n components = [res_sprm_cv.cv_results_['params'][i]['n_components'] for i in range(0,n_settings)]\n cv_score_table_ = ps.DataFrame({'etas':etas, 'n_components':components, 'score':res_sprm_cv.cv_results_['mean_test_score']})\n return(cv_score_table_)", "def rankPairs (self):\n def key (matrix, pair):\n # majority is positive, we want larger ones first\n major = matrix[pair[0]][pair[1]]\n # minority is negative because we want the smaller ones first\n minor = -1*matrix[pair[1]][pair[0]]\n return (major,minor)\n\n self.pairs = [(x,y) for x in self.poller.candidates for y in self.poller.candidates if x != y]\n matrix = self.poller.voteMatrix()\n # reverse=true to indicate descending sort\n self.pairs.sort(key=lambda pair: key(matrix,pair), reverse=True)\n self.weights = { pair : key(matrix,pair) for pair in self.pairs }\n self.pairs = [pair for pair in self.pairs if self.weights[pair][0] > -1*self.weights[pair][1]]", "def feature_selection(x_train, y_train, nb_feats=150):\n cs = np.zeros(x_train.shape[1])\n for f in range(x_train.shape[1]):\n if np.isclose(np.sum(x_train[:, f]), 0):\n cs[f] = 0\n continue\n\n cs[f], p = spearmanr(x_train[:, f], np.mean(y_train, axis=1))\n select = np.argsort(np.abs(cs))[np.max([-nb_feats, -len(cs)]):]\n return select" ]
[ "0.5838652", "0.55438906", "0.5539825", "0.5511799", "0.5483375", "0.54549277", "0.54273504", "0.5422278", "0.5394091", "0.536099", "0.53258604", "0.53121966", "0.5309838", "0.5289699", "0.52868915", "0.5261606", "0.5250422", "0.52314734", "0.5225502", "0.5213998", "0.52039325", "0.51935524", "0.516331", "0.5158532", "0.5155567", "0.515291", "0.5152596", "0.51503474", "0.51387125", "0.5133462", "0.512954", "0.5125265", "0.5122881", "0.5122401", "0.5120716", "0.512067", "0.511712", "0.5114616", "0.5107376", "0.5098367", "0.5089563", "0.50889945", "0.5085631", "0.50760865", "0.5074344", "0.5067185", "0.50590825", "0.505759", "0.5052397", "0.5051906", "0.5051167", "0.5049832", "0.50484717", "0.50456107", "0.50456107", "0.5044287", "0.5032156", "0.5026754", "0.5025897", "0.5024754", "0.50222486", "0.5017585", "0.5011001", "0.50066525", "0.5000348", "0.49925277", "0.49886438", "0.49849862", "0.4974436", "0.49706605", "0.49605286", "0.49503404", "0.4949182", "0.49371734", "0.493592", "0.4935602", "0.49346483", "0.49282858", "0.49250466", "0.49201557", "0.4917003", "0.49107042", "0.49084008", "0.49063957", "0.48999354", "0.4896587", "0.4889881", "0.48848134", "0.48828086", "0.48739696", "0.4872824", "0.48717257", "0.4871216", "0.48702532", "0.48592645", "0.48527822", "0.48483828", "0.4840211", "0.48388737", "0.48332888" ]
0.62526226
0
Gets a remote file of a bucket using a connection
def _get(conn, remote_file, bucket_name=BUCKET_NAME): contents = None try: reply = conn.get(bucket_name, remote_file) contents = reply.body if reply.http_response.status != 200: print 'Failed to fetch current_remote metadata' contents = None except: contents = None return contents
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def s3_get(url, temp_file):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)", "def s3_get(url, temp_file):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)", "def download(bucket, key):\n validate_bucket_name(bucket)\n validate_key_name(key)\n client = get_client()\n\n # do a buffered download\n bytes_io = io.BytesIO()\n client.download_fileobj(bucket, key, bytes_io)\n\n # hope that stuff is not too big, and just return content\n return bytes_io.getvalue()", "def __retrieve_from_bucket(fname):\n blob = BUCKET.blob(fname)\n json_data = json.loads(blob.download_as_string())\n return json_data", "def remote_resource(cloud_config):\n remote_uri = 'http://storage.googleapis.com/{}/'.format(\n cloud_config.storage_bucket)\n\n return lambda path, tmpdir: fetch_gcs_resource(\n remote_uri + path.strip('/'), tmpdir)", "def download(self, bucket, object, filename=None):\n service = self.get_conn()\n downloaded_file_bytes = service \\\n .objects() \\\n .get_media(bucket=bucket, object=object) \\\n .execute()\n\n # Write the file to local file path, if requested.\n if filename:\n write_argument = 'wb' if isinstance(downloaded_file_bytes, bytes) else 'w'\n with open(filename, write_argument) as file_fd:\n file_fd.write(downloaded_file_bytes)\n\n return downloaded_file_bytes", "def s3_get(url, temp_file, proxies=None):\n\ts3_resource = boto3.resource (\"s3\", config=Config (proxies=proxies))\n\tbucket_name, s3_path = split_s3_path (url)\n\ts3_resource.Bucket (bucket_name).download_fileobj (s3_path, temp_file)", "def get_file(cls, url, working_dir):\n if url.lower().startswith(\"s3://\"):\n return cls._s3_get_file(url)\n elif url.lower().startswith(\"http\"):\n return cls._http_get_file(url)\n else:\n return cls._fs_get_file(url, working_dir)", "def fetch(iid):\n if AWS_CLIENT_CONFIG and BUCKET_NAME:\n try:\n s3 = boto3.resource('s3', **AWS_CLIENT_CONFIG)\n obj = s3.Bucket(BUCKET_NAME).Object(iid).get()\n if obj:\n return obj.get('Body')\n except botocore.exceptions.ClientError as e:\n logger.error(e)\n else:\n # get locally from temp dir (tests, local development)\n return get_temp_file(iid)\n return None", "def get_s3_object(self, remote_s3_url):\n try:\n _file = tempfile.mkstemp()[1]\n parsed_s3_path = remote_s3_url.split(\"/\", 3) # s3://bucket-name/key\n remote_bucket = parsed_s3_path[2] # Bucket name\n remote_key = parsed_s3_path[3] # Key\n self.download_file(remote_bucket, remote_key, _file)\n return _file\n except Exception as e:\n message = {'FILE': __file__.split('/')[-1],\n 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}\n self.logger.exception(message)\n raise", "def get_s3_object(bucket, key_name, local_file):\n\n tracer.put_metadata('object', f's3://{bucket}/{key_name}')\n\n try:\n s3_resource.Bucket(bucket).download_file(key_name, local_file)\n result = 'ok'\n tracer.put_annotation('OBJECT_DOWNLOAD', 'SUCCESS')\n except botocore.exceptions.ClientError as e:\n tracer.put_annotation('OBJECT_DOWNLOAD', 'FAILURE')\n if e.response['Error']['Code'] == '404':\n result = f'Error: s3://{bucket}/{key_name} does not exist'\n else:\n result = f'Error: {str(e)}'\n\n return(result)", "def _do_retrieve(bucket_name, key_path, number_retries=DEFAULT_S3_RETRIES):\n try:\n return conn.get_object(Bucket=bucket_name, Key=key_path, ResponseContentType='string')\n except Exception:\n if number_retries > 0:\n print(\"s3_retrieve failed with incomplete read, retrying on %s\" % key_path)\n return _do_retrieve(bucket_name, key_path, number_retries=number_retries - 1)\n raise", "def get_file(self, file_name: str) -> BytesIO:\n fl = BytesIO()\n self.client.download_fileobj(self.bucket, file_name, fl)\n fl.seek(0)\n return fl", "def download_blob(bucket_name, source_blob_name):\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n return blob", "def ReadRemoteFile(url) -> bytes:\n local_url = download_util.DownloadResource(url)\n return file_util.OpenFile(local_url).read()", "def get_bucket_file_url(bucket, key):\n\t#https://s3.amazonaws.com/link-checker/2018-05-27-235740.txt\n\tfile_url = \"https://s3.amazonaws.com/\" + bucket + \"/\" + key\n\treturn file_url", "def _s3_get_file(url):\n try:\n return S3().get_contents_from_url(url)\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))", "def download_bucket(blob_name, path_to_file):\r\n blob = bucket.blob(blob_name)\r\n blob.download_to_filename(path_to_file)", "def get_file_s3(bucket, key):\n \n client = boto3.client('s3')\n return client.get_object(Bucket=bucket, Key=key)['Body'].read().decode('utf-8')", "def download_file(bucket_name: str, object_name: str, file_path: str):\n # pylint: disable=import-outside-toplevel\n from sotaque_brasileiro.utils import safe_getenv\n\n minio_client = Minio(\n safe_getenv(constants.MINIO_ENDPOINT.value),\n access_key=safe_getenv(constants.MINIO_ACCESS_KEY.value),\n secret_key=safe_getenv(constants.MINIO_SECRET_KEY.value),\n )\n minio_client.fget_object(bucket_name, object_name, file_path)", "def download(self, bucket_name, file_name, file_path):\n\n self.client.download_file(bucket_name, file_name, file_path)", "def download_blob(bucket_name, source_blob_name, destination_file_name):\n storage_client = storage.Client()\n try:\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n \n blob.download_to_filename(destination_file_name)\n \n print('Blob {} downloaded to {}.'.format(\n source_blob_name,\n destination_file_name)) \n except:\n print(\"User does not have access to that bucket. Trying public link:\")\n gcs_url = 'https://%(bucket)s.storage.googleapis.com/%(file)s' % {'bucket':bucket_name, 'file':source_blob_name}\n urllib.urlretrieve(gcs_url, destination_file_name)\n print (\"Download complete\")", "def get(self, src):\n tarName = \"%s/%s.tar\" % (self.bucket, src)\n if not self.client.exists(os.path.dirname(tarName)):\n return None\n # k = \"\"\n try:\n # with self.client.open(self.bucket + \"/\" + src) as f:\n # k = f.read()\n # copy_to_local(src: str, localdest: str, **kwargs)\n if not os.path.exists(os.path.dirname(tarName)):\n os.makedirs(os.path.dirname(tarName))\n self.client.copy_to_local(tarName, tarName)\n self.extractTar(os.path.dirname(tarName), tarName)\n except Exception as e:\n logger.info(\"Exception during get: %s\" % str(e))\n # return k", "def get_blob(self, download_meta):\n bucket_name, key = self._get_bucket_key(download_meta)\n response = self.s3.get_object(Bucket=bucket_name,\n Key=key)\n return response['Body'].read().decode()", "def get_remote_file(url, success=200, timeout=10):\n try:\n app.logger.info(\"GET: %s\" % url)\n auth = None\n res = requests.get(url, stream=True, timeout=timeout, auth=auth)\n if res.status_code == success:\n return res.headers.get('Content-Type', 'application/octet-stream'), res.raw.data\n except:\n pass\n return None, None", "def _download_file(bucket: str, key: str) -> str:\n tmp_file_name = f\"/tmp/logs\"\n\n try:\n with open(tmp_file_name, \"wb\") as data:\n s3cl.download_fileobj(bucket, key, data)\n except Exception as e:\n print(type(e).__name__, e)\n f = open(tmp_file_name, \"w\")\n f.write(\"\")\n f.close()\n try:\n with gzip.open(tmp_file_name, mode=\"rt\") as f:\n x = f.read()\n return x\n except Exception as e:\n print(type(e).__name__, e, key)\n return \"\"", "def get(self, bucket: str, object_name: str) -> bytes:\n raise NotImplementedError()", "def scp_get_file(self, source_file, dest_file):\n self.scp_client.get(source_file, dest_file)", "def get_image(filename):\n\n client.download_file(S3_BUCKET, filename, 'uploads/{}'.format(filename))", "def get_remote_bytes(file_url) -> io.BytesIO:\n result = urlfetch.fetch(file_url)\n return io.BytesIO(result.content)", "def get_blob(self, blob_name):\n return self.bucket.get_blob(blob_name)", "def download_blob(bucket_name, source_blob_name):\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n return blob.download_as_string().decode()", "def download_blob(bucket_name, source_blob_name, destination_file_name):\n # bucket_name = \"your-bucket-name\"\n # source_blob_name = \"storage-object-name\"\n # destination_file_name = \"local/path/to/file\"\n\n bucket = storage_client.bucket(bucket_name)\n blobs = storage_client.list_blobs(bucket_name)\n for blob in blobs:\n print(blob.name)\n if (blob.name == source_blob_name):\n # Construct a client side representation of a blob.\n # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve\n # any content from Google Cloud Storage. As we don't need additional data,\n # using `Bucket.blob` is preferred here.\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n \n print(\n \"Blob {} downloaded to {}.\".format(\n source_blob_name, destination_file_name\n )\n )", "def download_bam_file_from_s3(self, remote_bam, local_bam):\n self.cmd(\"{s3cmd} get {remote} {local}\"\n .format(\n s3cmd=self.cmds[\"s3cmd\"],\n remote=remote_bam,\n local=local_bam,\n ),\n shell=True)", "def get_object(self, bucket_name, key, stream=False, extra_get_args={}):\n url = self.__key_url(bucket_name, key)\n res = self.infinispan_client.get(url, headers=self.headers, auth=self.basicAuth)\n data = res.content\n return data", "def download_file(self, bucket_name, object_name, file_name):\n self._client.download_file(bucket_name, object_name, file_name)", "def get_remote_file(url):\n # Disable the proxies by not trusting the env\n session = requests.Session()\n session.trust_env = False\n\n # Make the request\n requests.packages.urllib3.disable_warnings()\n try:\n r = session.get(url, verify=False)\n except requests.exceptions.RequestException as e:\n # catastrophic error. bail.\n print(e)\n sys.exit(1)\n\n r = session.get(url, verify=False)\n remote_file = r.text\n return remote_file", "def download_reference_file_from_s3(self, s3_file, local_file, unzip=False):\n if unzip:\n rc = subprocess.call(\"{s3cmd} get {s3_file} - | gunzip > {local_file}\"\n .format(\n s3cmd=self.cmds[\"s3cmd\"],\n s3_file=s3_file,\n local_file=local_file, \n ), shell=True)\n else:\n rc = subprocess.call(\"{s3cmd} get {s3_file} {local_file}\"\n .format(\n s3cmd=self.cmds[\"s3cmd\"],\n s3_file=s3_file,\n local_file=local_file,\n ), shell=True)\n return rc", "def download_specific_blob(bucket_name, path_to_storage_file_name, download_file_name):\r\n\r\n try:\r\n # initialize client & get blob\r\n _, _, blob = create_client(bucket_name, path_to_storage_file_name)\r\n\r\n # set the path to source file\r\n blob.download_to_filename(download_file_name)\r\n \r\n except Exception as err:\r\n raise err\r\n sys.exit(1)\r\n \r\n else:\r\n print(f\"download blob '{path_to_storage_file_name}' succeed\")\r\n\r\n return None", "def get_bucket_file(\n self,\n organization_id: str,\n bucket_id: str,\n file_id: str) -> dict:\n path = '/organizations/{}/buckets/{}/files/{}'.format(\n organization_id, bucket_id, file_id)\n res = self._connection.api_request(method='GET', path=path)\n return res", "def get_file_fromMinio(bucket_name, minio_object_path, local_object_path):\n minio_client = get_minio_client()\n minio_client.fget_object(\n bucket_name=bucket_name, \n object_name=minio_object_path,\n file_path=local_object_path\n )", "def _get(self, remote_filename, local_path):\n\n with local_path.open('wb') as local_file:\n file_id = self.get_file_id(remote_filename)\n if file_id is None:\n raise BackendException(\n 'File \"%s\" cannot be downloaded: it does not exist' %\n remote_filename)\n\n response = self.http_client.get(\n self.content_url + '/nodes/' + file_id + '/content', stream=True)\n response.raise_for_status()\n for chunk in response.iter_content(chunk_size=DEFAULT_BUFFER_SIZE):\n if chunk:\n local_file.write(chunk)\n local_file.flush()", "def get_file(self, sys_id):\n url = \"{}/file\".format(self._target(sys_id))\n r = self._client.session.get(url, stream=True)\n return r", "def openBucket(name):\n conn = boto.connect_s3() \n print \"Connecting to bucket: {}\".format(name)\n bucket = conn.get_bucket(name, validate=True)\n return bucket", "def download_file_from_s3_public_bucket(bucket, object, output_file):\n botocore_config = Config(signature_version=UNSIGNED)\n s3_client = boto3.client(\"s3\", config=botocore_config)\n s3_client.download_file(bucket, object, output_file)", "def get_remote_file(sid, path):\n with slycat.web.server.remote.get_session(sid) as session:\n return session.get_file(path)", "def fetch_file(uri, file=None, username=None, password=None):\r\n boto.log.info('Fetching %s' % uri)\r\n if file == None:\r\n file = tempfile.NamedTemporaryFile()\r\n try:\r\n if uri.startswith('s3://'):\r\n bucket_name, key_name = uri[len('s3://'):].split('/', 1)\r\n c = boto.connect_s3(aws_access_key_id=username, aws_secret_access_key=password)\r\n bucket = c.get_bucket(bucket_name)\r\n key = bucket.get_key(key_name)\r\n key.get_contents_to_file(file)\r\n else:\r\n if username and password:\r\n passman = urllib2.HTTPPasswordMgrWithDefaultRealm()\r\n passman.add_password(None, uri, username, password)\r\n authhandler = urllib2.HTTPBasicAuthHandler(passman)\r\n opener = urllib2.build_opener(authhandler)\r\n urllib2.install_opener(opener)\r\n s = urllib2.urlopen(uri)\r\n file.write(s.read())\r\n file.seek(0)\r\n except:\r\n raise\r\n boto.log.exception('Problem Retrieving file: %s' % uri)\r\n file = None\r\n return file", "def storage_get_file(self, group='', key=''):\n try:\n obj = None\n content = None\n if key != '':\n if self.config['type'] == 's3':\n obj = self.s3.Object(bucket_name=self.bucket, key='corr-{0}s/{1}'.format(group,key))\n res = obj.get()\n content = res['Body'].read()\n elif self.config['type'] == 'filesystem':\n with open('{0}/corr-{1}s/{2}'.format(self.storage_path, group, key), \"rb\") as obj:\n content = obj.read()\n else:\n content = None\n\n except:\n print(traceback.print_exc())\n content = None\n\n try:\n if self.config['type'] == 's3':\n file_buffer = BytesIO()\n elif self.config['type'] == 'filesystem':\n file_buffer = BytesIO()\n file_buffer.write(content)\n file_buffer.seek(0)\n return file_buffer\n except:\n self.app.logger.error(traceback.print_exc())\n print(traceback.print_exc())\n return None", "def download_object(self, bucket, key, dest_path) -> None:\n self.resource.Bucket(bucket).download_file(key, dest_path)", "def blob_download(blob_url):\n blob = storage.Object.from_url(blob_url)\n blobc = blob.download()\n return blobc", "def retrieve_s3_contents ( s3_conn, bucket_name, key_name, stored_filename = None ) :\n bucket = s3_conn.get_bucket( bucket_name )\n key = boto.s3.key.Key( bucket )\n key.key = key_name\n if key.exists( ) :\n if stored_filename :\n key.get_contents_to_filename( stored_filename )\n return stored_filename\n\n return key.get_contents_as_string( )\n\n return None", "def download_file(bucket,file_name):\n with open(file_name, 'wb') as f:\n s3.download_fileobj(bucket, file_name,f)\n print(file_name, \": is downloaded\")", "def read_file_from_s3(bucket, key, profile):\n s3_client = boto3.Session(profile_name=profile).client('s3')\n resp = s3_client.get_object(Bucket=bucket, Key=key)\n # Python 3.8/3.9 can't download files over 2GB via HTTP, so file is \n # streamed in chunks just in case\n content = ''.join([\n chunk.decode() for chunk in resp['Body'].iter_chunks()\n ])\n \n return content", "async def read(self, size=-1):\n # read the object using the bucket and path already determined in\n # __init__, and using the connection object\n try:\n # get the file size first\n file_size = await self._getsize()\n if size== -1:\n range_start = 0\n range_end = file_size\n range_size = file_size\n else:\n range_start = self._seek_pos\n range_end = self._seek_pos+size-1\n if range_end > file_size:\n range_end = file_size-1\n range_size = range_end-range_start+1\n\n # if multipart download is not supported\n if not self._multipart_download:\n # get the full file\n s3_object = await self._conn_obj.conn.get_object(\n Bucket = self._bucket,\n Key = self._path,\n )\n body = s3_object['Body']\n data = await body.read()\n # if the file is smaller than the MAXIMUM_PART_SIZE\n elif (range_size < self._part_size):\n # the requested range is the full file, it is fastest to\n # not specify the range\n if (range_start == 0 and range_size == file_size):\n # get the full file\n s3_object = await self._conn_obj.conn.get_object(\n Bucket = self._bucket,\n Key = self._path,\n )\n # a portion of the file is requested\n else:\n s3_object = await self._conn_obj.conn.get_object(\n Bucket = self._bucket,\n Key = self._path,\n Range = 'bytes={}-{}'.format(\n range_start, range_end\n )\n )\n body = s3_object['Body']\n data = await body.read()\n # multipart download version\n else:\n \"\"\"Use range get to split up a file into the MAXIMUM_PART_SIZE\n and download each part asynchronously.\"\"\"\n # calculate the number of necessary parts\n n_parts = int(range_size / self._part_size + 1)\n # don't go above the maximum number downloadable\n if n_parts > self._max_parts:\n n_parts = self._max_parts\n # (re)calculate the download size\n part_size = float(range_size) / n_parts\n # create the tasks and assign the return data buffer\n tasks = []\n data_buf = io.BytesIO()\n\n for p in range(0, n_parts):\n event_loop = asyncio.get_event_loop()\n task = event_loop.create_task(self._read_partial_file(\n p, part_size\n ))\n tasks.append(task)\n # wait for all the tasks to finish\n results = await asyncio.gather(*tasks)\n # read each chunk of data and write into the global buffer\n for r in results:\n data_buf.write(r)\n r = None # indicate ready for garbage collection\n data_buf.seek(0)\n data = data_buf.read()\n\n except ClientError as e:\n raise IOException(\n \"Could not read from object {} {}\".format(self._path, e)\n )\n except AttributeError as e:\n self._handle_connection_exception(e)\n return data", "def s3_download(path):\n with s3_read(path):\n # Reading the file will cache the file locally.\n pass", "def get_bytes(bucket: str, key: str) -> bytes:\n logger.debug(f'Reading from s3://{bucket}/{key}')\n response = client().get_object(Bucket=bucket, Key=key)\n return response['Body'].read()", "def do_part_download(args):\r\n bucket_name, key_name, fname, min_byte, max_byte, split, secure, max_tries, current_tries = args\r\n conn = boto.connect_s3(calling_format=OrdinaryCallingFormat())\r\n conn.is_secure = secure\r\n\r\n # Make the S3 request\r\n resp = conn.make_request(\"GET\", bucket=bucket_name,\r\n key=key_name, headers={'Range':\"bytes=%d-%d\" % (min_byte, max_byte)})\r\n\r\n # Open the target file, seek to byte offset\r\n fd = os.open(fname, os.O_WRONLY)\r\n logger.debug(\"Opening file descriptor %d, seeking to %d\" % (fd, min_byte))\r\n os.lseek(fd, min_byte, os.SEEK_SET)\r\n\r\n chunk_size = min((max_byte-min_byte), split*1024*1024)\r\n logger.debug(\"Reading HTTP stream in %dM chunks\" % (chunk_size/1024./1024))\r\n t1 = time.time()\r\n s = 0\r\n try:\r\n while True:\r\n data = resp.read(chunk_size)\r\n if data == \"\":\r\n break\r\n os.write(fd, data)\r\n s += len(data)\r\n t2 = time.time() - t1\r\n os.close(fd)\r\n s = s / 1024 / 1024.\r\n logger.debug(\"Downloaded %0.2fM in %0.2fs at %0.2fMBps\" % (s, t2, s/t2))\r\n except Exception, err:\r\n logger.debug(\"Retry request %d of max %d times\" % (current_tries, max_tries))\r\n if (current_tries > max_tries):\r\n logger.error(err)\r\n else:\r\n time.sleep(3)\r\n current_tries += 1\r\n do_part_download(bucket_name, key_name, fname, min_byte, max_byte, split, secure, max_tries, current_tries)", "def download_blob(url: str) -> io.BytesIO:\n storage_client = storage.Client()\n bucket_name = get_bucket_name(url)\n source_blob_name = get_blob_name(url)\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n f = io.BytesIO(blob.download_as_bytes())\n return f", "def read_file(remote_path):\n conn = _connection()\n try:\n assert conn.connect(IP_ADDRESS, SMB_PORT)\n bytesIO = BytesIO()\n conn.retrieveFile(NAME_OF_SMB_SHARE, remote_path, bytesIO)\n finally:\n conn.close()\n bytesIO.seek(0)\n return bytesIO", "def download_file(bucket, key, filename):\n validate_bucket_name(bucket)\n validate_key_name(key)\n client = get_client()\n\n client.download_file(bucket, key, filename)", "def download_blob(bucket_name, source_blob_name, destination_file_name):\n # The ID of your GCS bucket\n # bucket_name = \"your-bucket-name\"\n\n # The ID of your GCS object\n # source_blob_name = \"storage-object-name\"\n\n # The path to which the file should be downloaded\n # destination_file_name = \"local/path/to/file\"\n\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n\n # Construct a client side representation of a blob.\n # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve\n # any content from Google Cloud Storage. As we don't need additional data,\n # using `Bucket.blob` is preferred here.\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n\n print(\n \"Downloaded storage object {} from bucket {} to local file {}.\".format(\n source_blob_name, bucket_name, destination_file_name))", "async def get(src: str, dst: str = None) -> Union[bytes, bool]:\n _ = src.strip('/').split('/')\n bucket = _[0]\n key = '/'.join(_[1:])\n async with _create_client() as client:\n try:\n resp = await client.get_object(Bucket=bucket, Key=key)\n except ClientError:\n return False\n async with resp['Body'] as stream:\n if dst is not None:\n async with aiofiles.open(dst, 'w+b') as f:\n async for chunk in stream.iter_chunked(FILE_CHUNK_SIZE):\n await f.write(chunk)\n logger.info(f'Get object \"{src}\" to \"{dst}\".')\n return True\n else:\n logger.info(f'Get object \"{src}\" as return.')\n return await stream.read()", "def run(\n self,\n bucket: str = None,\n blob: str = None,\n project: str = None,\n chunk_size: int = None,\n credentials: dict = None,\n encryption_key: str = None,\n request_timeout: Union[float, Tuple[float, float]] = 60,\n ) -> str:\n # create client\n client = get_storage_client(project=project, credentials=credentials)\n\n # retrieve bucket\n bucket = self._retrieve_bucket(\n client=client, bucket=bucket, create_bucket=False\n )\n\n # identify blob name\n blob = self._get_blob(\n bucket,\n blob,\n chunk_size=chunk_size,\n encryption_key=encryption_key,\n )\n # Support GCS < 1.31\n return (\n blob.download_as_bytes(timeout=request_timeout)\n if hasattr(blob, \"download_as_bytes\")\n else blob.download_as_string(timeout=request_timeout)\n )", "def _retrieve_blob(self, object_key):\n return self.s3_resource.Object(self.CVE_BUCKET, object_key).get()['Body'].read()", "def DownloadFile(self, gcs_file_name, io_base):\n bucket, bucket_path = self._ParseBucketAndPath(gcs_file_name)\n\n # Check the size of the remote file. If it's empty, we have to return early\n # because the chunked downloader will crash. There aren't any contents to\n # retrieve in that case, anyway.\n object_data = self._RunWithRetries(\n self._service.objects().get(bucket=bucket, object=bucket_path).execute,\n self._CommonErrorMatcher)\n if ('name' not in object_data or object_data['name'] != bucket_path\n or 'size' not in object_data):\n raise CloudStorageApiError('Object data for %s is malformed.' %\n GcsPath(bucket, bucket_path))\n if int(object_data['size']) == 0:\n return\n\n request = self._service.objects().get_media(bucket=bucket,\n object=bucket_path)\n downloader = gapi_http.MediaIoBaseDownload(\n io_base, request, chunksize=1024*1024)\n done = False\n while not done:\n # The first return value indicates download progress, which we won't do\n # anything fancy with for now.\n _, done = self._RunWithRetries(downloader.next_chunk,\n self._CommonErrorMatcher)", "def get_file(object_name: str, **kwargs) -> HTTPResponse:\n data = client.get_object(DATASETS_BUCKET, object_name, **kwargs)\n return data", "def get(host, username, remotepath, localpath=None, port=22):\n log = logging.getLogger('device.remotecall')\n log.info('geting file from remote:%s -> %s', remotepath, localpath)\n if not localpath:\n localpath = os.path.split(remotepath)[1]\n cmd = 'scp -P %s %s@%s:%s %s' % (port, username, host, remotepath, localpath)\n try:\n null = open('/dev/null', 'w')\n subprocess.call(shlex.split(cmd), stdin=subprocess.PIPE, stdout=null, stderr=null)\n null.close()\n except Exception as e:\n log.debug('Could not retrieve %s file from %s: Error %s', remotepath, host, e)", "def read(self, table: Union[GemTable, Tuple[str, int]], local_path: str):\n # NOTE: this uses the pre-signed S3 download url. If we need to download larger files,\n # we have other options available (using multi-part downloads in parallel , for example).\n if isinstance(table, Tuple):\n table = self.get(table[0], table[1])\n\n data_location = table.download_url\n data_location = rewrite_s3_links_locally(data_location, self.session.s3_endpoint_url)\n response = requests.get(data_location)\n write_file_locally(response.content, local_path)", "def read(path, bucket=None, show_progressbar=True,\n *args, **kwargs):\n path = s3_path_utils.clean_path(path)\n bucket = bucket or s3_path_utils.get_default_bucket()\n bucket = s3_path_utils.clean_bucket(bucket)\n\n filetype = s3_path_utils.get_filetype(path)\n read_fn = get_storage_fn(filetype, 'read')\n\n s3 = boto3.client('s3')\n s3_kwargs = get_s3_client_kwargs(path, bucket,\n operation='read',\n show_progressbar=show_progressbar)\n\n with NamedTemporaryFile(suffix='.' + filetype) as tmpfile:\n inform('Downloading from s3://{}/{}...'.format(bucket, path))\n s3.download_file(bucket, path, tmpfile.name, **s3_kwargs)\n inform('Reading from tempfile...')\n obj = read_fn(tmpfile, *args, **kwargs)\n return obj", "def download_from_s3(s3_resource, photo):\n try:\n bucket, key = photo.replace(\"s3://\", \"\").split(\"/\", 1)\n local_file = os.path.basename(photo)\n except ValueError as err:\n logger.exception(\"Couldn't get S3 info for %s: %s\", photo)\n raise\n\n try:\n logger.info(\"Downloading %s\", photo)\n s3_resource.Bucket(bucket).download_file(key, local_file)\n except ClientError:\n logger.exception(\"Couldn't download %s from S3.\", photo)\n raise\n\n return local_file", "def download_keypair ( s3_infra_conn, aws_account_type, region_name, keypair_type ) :\n keypair_name = get_keypair_name( aws_account_type, region_name, keypair_type )\n keypair_bucket = get_admin_bucket_name( region_name = region_name )\n return retrieve_s3_contents( s3_conn = s3_infra_conn,\n bucket_name = keypair_bucket,\n key_name = get_keypair_keypath( aws_account_type ) + get_keypair_keyname( keypair_name ),\n stored_filename = keypair_name )", "def download_file(Bucket=None, Key=None, Filename=None, ExtraArgs=None, Callback=None, Config=None):\n pass", "def _aws_get_object(bucket, key, request_pays=True, client=None):\n if not client:\n session = boto3_session(region_name=REGION)\n client = session.client(\"s3\")\n\n params = {\"Bucket\": bucket, \"Key\": key}\n if request_pays:\n params[\"RequestPayer\"] = \"requester\"\n response = client.get_object(**params)\n return response[\"Body\"].read()", "def get_bucket(self, bucket):\n msg = \"get_bucket not implemented\"\n raise NotImplementedError(msg)", "def download(\n bucket: str, key: str, file_path: str, session: Optional[boto3.Session] = None\n) -> str:\n s3_client = _get_client(session)\n\n LOGGER.info(\"downloading s3://%s/%s to %s...\", bucket, key, file_path)\n s3_client.download_file(Bucket=bucket, Key=key, Filename=file_path)\n return file_path", "def get_bucket():\n return FileBucket(os.path.join(context.site.data_path, 'buckets'))", "def remote(self, requests, file, remoteHost):\n # Set the source and dest paths\n remote_url = self.base_url + '/remote?file=' + file + \"&host=\" + remoteHost\n\n print(\"Making remote request: \" + remote_url)\n\n r = requests.get(remote_url, max_price=10)\n\n print(\"Remote request completed.\")\n\n return r.json()", "def get_blob_url(self, download_meta):\n bucket_name, key = self._get_bucket_key(download_meta)\n location = self.s3.generate_presigned_url(\n ClientMethod='get_object',\n ExpiresIn=36*60*60,\n Params={'Bucket': bucket_name, 'Key': key})\n return location", "def fetch(self, url) -> bytes:\n buffer = self.download(url)\n zfs = ZipFileSystem(buffer, \"r\")\n return zfs.open(zfs.glob(\"*\")[0]).read()", "def download_blob(bucket_name, source_blob_name, destination_file_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n blob.download_to_filename(destination_file_name)\n\n print('Blob {} downloaded to {}.'.format(\n source_blob_name,\n destination_file_name))", "def get_raw(key: str, bucket: google.cloud.storage.bucket.Bucket) -> bytes:\n blob = google.cloud.storage.blob.Blob(name=key, bucket=bucket)\n return blob.download_as_string()", "def get(self, url, path):\n rpath = urllib.parse.urlparse(url).path\n try:\n self.sftp.get(rpath, path)\n except Exception as e:\n osaka.utils.LOGGER.warning(\n \"Encountered exception: {}\\n{}\".format(e, traceback.format_exc())\n )\n raise osaka.utils.OsakaFileNotFound(\"File {} doesn't exist.\".format(url))", "def test_get_url(self):\n package = make_package()\n response = self.storage.download_response(package)\n\n parts = urlparse(response.location)\n self.assertEqual(parts.scheme, 'https')\n self.assertEqual(parts.netloc, 'mybucket.s3.amazonaws.com')\n self.assertEqual(parts.path, '/' + self.storage.get_path(package))\n query = parse_qs(parts.query)\n self.assertItemsEqual(query.keys(), ['Expires', 'Signature',\n 'AWSAccessKeyId'])\n self.assertTrue(int(query['Expires'][0]) > time.time())\n self.assertEqual(query['AWSAccessKeyId'][0],\n self.settings['storage.access_key'])", "def download(self, file_url):\n url = self.base_url + \"/storage-service/cloud-storage/s3/file/download?url={0}\".format(file_url)\n\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response", "def create_connection(bucket_name):\n conn = boto.connect_s3()\n bucket = conn.get_bucket(bucket_name)\n return conn, bucket", "def get_bucket(self, bucket, access_key=None, secret_key=None):\n msg = \"get_bucket not implemented\"\n raise NotImplementedError(msg)", "def get_s3_content(bucket, key, access_id, access_secret):\n try:\n response = s3_client(access_id, access_secret).get_object(Bucket=bucket, Key=key)\n body = response['Body'].read()\n return body\n except ClientError as error:\n LOGGER.error(error)\n return None", "def download(self, bucket_name=None,\n object_key=None,\n dest=None):\n\n if bucket_name == None or \\\n object_key == None or \\\n dest == None:\n u_print(\" Error - argument is missing\")\n\n u_print_d('S3.download() - bucket=[{}] key=[{}] dest=[{}]'.format(bucket_name,\n object_key,\n dest))\n return self.s3.Object(bucket_name, object_key).download_file(dest)", "def get_file(bucket_name):\n\n client = boto3.client('s3')\n resource = boto3.resource('s3')\n my_bucket = resource.Bucket(bucket_name)\n obj = client.get_object(Bucket=bucket_name, Key='cyber_attack_subset_new.csv')\n\n # Get column names for streaming data\n feature_list = pd.read_csv(obj['Body'], nrows=1, header=None).values.tolist()[0]\n\n return(feature_list)", "def _download_from_bucket(self, ext_filename, local_filename, force=False):\n if os.path.exists(local_filename) and not force:\n logging.info('File {} already exists. Not overwriting...'.format(local_filename))\n return\n if os.path.exists(local_filename) and force:\n logging.info('File {} already exists. Overwriting...'.format(local_filename))\n else:\n logging.info('File {} does not exist. Downloading...'.format(local_filename))\n\n Path(os.path.dirname(local_filename)).mkdir(parents=True, exist_ok=True)\n\n if self.s3:\n self.bucket.download_file(ext_filename, local_filename)\n logging.info('Downloaded {} to {}'.format(ext_filename, local_filename))\n if self.gs:\n try:\n client = storage.Client()\n bucket = client.get_bucket(self.bucket_name)\n blob = storage.Blob(ext_filename, bucket)\n blob.download_to_filename(local_filename)\n logging.info('Downloaded {} to {}'.format(ext_filename, local_filename))\n except:\n logging.warning('Downloading failed')\n\n i += 1", "def download_file_from_icos(icos_obj, bucket: str, local_file_name: str, key: str) -> None:\r\n try:\r\n icos_obj.download_file(Bucket=bucket, Key=key, Filename=local_file_name)\r\n except Exception as e:\r\n print(Exception, e)\r\n else:\r\n print('File `{}` downloaded from ICOS and saved locally as `{}`.'.format(key, local_file_name))", "def get(self, filepath):\n try:\n collname = '%s.files' % self.bucketname\n coll = Collection(self.db, collname)\n if coll:\n doc = coll.find_one({'filename': str(filepath)}, sort=[('uploadDate', -1)])\n if doc:\n id = doc['_id']\n gout = self.gridfs.get(ObjectId(id))\n if gout:\n content = gout.read()\n gout.close()\n return content\n except Exception, e:\n print e\n return None", "def get_file(self, remote_path, local_path, storage_id=None):\n return self.get(remote_path, local_path, directory=False, storage_id=storage_id)", "def download_file(self, instance, file, where, local):\n\n instance = self.get_instance(instance)\n\n try:\n if instance.get('address'):\n username = instance.get('address') + \"@\" + instance.get('credentials').get('username')\n key = instance.get('credentials').get('publickey')\n subprocess.check_output([\"scp\", key, username + \":\" + self.default_path_aws + where + file, local])\n else:\n username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')\n key = instance.get('credentials').get('EC2_SECRET_KEY')\n # output = os.popen(\"ls\"+ \" | \" + \"ssh\"+ \" -i \"+ key +\" \"+ username).read()\n subprocess.check_output(\n [\"scp\", \"-i\", key, username + ':' + self.default_path_aws + where + file, local])\n return \"Success to download file \" + self.default_path_aws + where + file + \" to \" + local\n except:\n return \"Faile to access the instance\"", "def get_gcs_file(self, no_copy=False):\n return self.get_file(uri_type=URI_GCS, no_copy=no_copy)", "def get_s3_object(self, key):\n try:\n bucket_name = app.config['S3_BUCKET_NAME']\n s3_client = app.config['S3']\n response = s3_client.get_object(Bucket=bucket_name, Key=key)\n return response['Body'].read()\n except Exception:\n return None", "def local_fetch_s3_artifact(uri, local_dest='.'):\n local('aws s3 cp {} {}'.format(uri, local_dest))", "def download_files(\n self, mcg_obj, awscli_pod, bucket_to_read, result_folder, s3_creds=None\n ):\n ns_bucket_path = f\"s3://{bucket_to_read}\"\n\n if s3_creds:\n # Read data directly from target bucket (uls) to result dir\n sync_object_directory(\n awscli_pod,\n ns_bucket_path,\n result_folder,\n signed_request_creds=s3_creds,\n )\n else:\n # Read data from NS bucket to result dir\n sync_object_directory(awscli_pod, ns_bucket_path, result_folder, mcg_obj)", "def _get_server_bucket_object(uri):\n # First split the uri into the network location and path, and build the\n # server\n url_p = urlparse(uri)\n # check that the uri contains a scheme and a netloc\n if url_p.scheme == '' or url_p.netloc == '':\n raise APIException(\n \"URI supplied to s3aioFileObject is not well-formed: {}\". format(uri)\n )\n server = url_p.scheme + \"://\" + url_p.netloc\n split_path = url_p.path.split(\"/\")\n # get the bucket\n try:\n bucket = split_path[1]\n except IndexError as e:\n raise APIException(\n \"URI supplied has no bucket contained within it: {}\".format(uri)\n )\n # get the path\n try:\n path = \"/\".join(split_path[2:])\n except IndexError as e:\n raise APIException(\n \"URI supplied has no path contained within it: {}\".format(uri)\n )\n return server, bucket, path", "def read(self, local_path): # noqa: D402\n data_location = self.download_url\n data_location = rewrite_s3_links_locally(data_location)\n response = requests.get(data_location)\n write_file_locally(response.content, local_path)" ]
[ "0.68474334", "0.68474334", "0.6793478", "0.6766179", "0.6609071", "0.6568017", "0.6541442", "0.6517729", "0.6445589", "0.64432293", "0.64367956", "0.6386914", "0.6360486", "0.630485", "0.62856895", "0.62814856", "0.62789845", "0.62708944", "0.62665856", "0.62540734", "0.62386733", "0.62182367", "0.62100065", "0.61916614", "0.61643773", "0.61561275", "0.61527926", "0.6138786", "0.6133349", "0.61329275", "0.61030006", "0.6094974", "0.60938084", "0.607788", "0.60653347", "0.6045556", "0.60422844", "0.6040264", "0.60385644", "0.6017402", "0.6007477", "0.60046995", "0.59981924", "0.5985898", "0.5977035", "0.59719175", "0.59673357", "0.5961811", "0.595345", "0.5943101", "0.5941813", "0.5922852", "0.59197694", "0.59084827", "0.5896813", "0.58965117", "0.58959657", "0.58900267", "0.5887713", "0.587944", "0.5855549", "0.58483744", "0.5846863", "0.5840993", "0.58369035", "0.5835362", "0.58327657", "0.5815447", "0.5815285", "0.58134097", "0.58062047", "0.57988644", "0.5783176", "0.57825613", "0.57785374", "0.57771826", "0.5771508", "0.5767387", "0.5755125", "0.5739543", "0.57382226", "0.57287484", "0.57244104", "0.5715728", "0.57093215", "0.5706955", "0.56900275", "0.56821066", "0.5681712", "0.56742525", "0.5671657", "0.5669573", "0.5661175", "0.5659599", "0.5656793", "0.56413704", "0.56398076", "0.5639734", "0.5639353", "0.5638951" ]
0.7393641
0
Put some contents into a remote_file of a bucket usign connection conn. Optionally the headers can be specified.
def _put(conn, remote_file, contents, bucket_name=BUCKET_NAME, headers=None): error_msg = 'Failed to upload to %s' % remote_file try: reply = conn.put(bucket_name, remote_file, S3.S3Object(contents), headers) if reply.http_response.status != 200: print error_msg except: print error_msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_file(bucket, local_file_path, remote_destination_path):\n bucket = get_bucket(bucket)\n k = Key(bucket)\n k.key = remote_destination_path\n k.set_contents_from_filename(local_file_path)", "def upload_file(conn, filename_local, filename_s3, gzip=False):\n\n filename_s3 = filename_s3.lstrip('./')\n\n file_descriptor = open(filename_local, 'rb')\n content = file_descriptor.read()\n\n content_type = _get_content_type(file_descriptor)\n headers = _get_headers(content_type)\n\n #should compress if the file is compressable and gzip is enabled\n can_be_gzipped = _file_can_be_compressed(filename_local)\n if gzip and can_be_gzipped:\n content = _compress_string(content)\n headers['Content-Length'] = str(len(content))\n headers['Content-Encoding'] = 'gzip'\n extension = mimetypes.guess_extension(content_type)\n #we should not overwrite the original file in the server.\n #We change extensions: style.css --> style.gz.css, for instance\n filename_s3 = filename_s3.rstrip(extension) + '.gz' + extension\n\n #if gzip is enabled and it is not compressable, don't upload nothing at all\n elif gzip and not can_be_gzipped:\n return\n\n #upload\n print 'Uploading %s to %s' % (filename_local, filename_s3)\n _put(conn, filename_s3, content, headers=headers)\n file_descriptor.close()", "def send_file(self, fp, headers=None, cb=None, num_cb=10,\r\n query_args=None, chunked_transfer=False):\r\n provider = self.bucket.connection.provider\r\n\r\n def sender(http_conn, method, path, data, headers):\r\n http_conn.putrequest(method, path)\r\n for key in headers:\r\n http_conn.putheader(key, headers[key])\r\n http_conn.endheaders()\r\n if chunked_transfer:\r\n # MD5 for the stream has to be calculated on the fly, as\r\n # we don't know the size of the stream before hand.\r\n m = md5()\r\n else:\r\n fp.seek(0)\r\n\r\n save_debug = self.bucket.connection.debug\r\n self.bucket.connection.debug = 0\r\n # If the debuglevel < 3 we don't want to show connection\r\n # payload, so turn off HTTP connection-level debug output (to\r\n # be restored below).\r\n # Use the getattr approach to allow this to work in AppEngine.\r\n if getattr(http_conn, 'debuglevel', 0) < 3:\r\n http_conn.set_debuglevel(0)\r\n if cb:\r\n if chunked_transfer:\r\n # For chunked Transfer, we call the cb for every 1MB\r\n # of data transferred.\r\n cb_count = (1024 * 1024)/self.BufferSize\r\n self.size = 0\r\n elif num_cb > 2:\r\n cb_count = self.size / self.BufferSize / (num_cb-2)\r\n elif num_cb < 0:\r\n cb_count = -1\r\n else:\r\n cb_count = 0\r\n i = total_bytes = 0\r\n cb(total_bytes, self.size)\r\n l = fp.read(self.BufferSize)\r\n while len(l) > 0:\r\n if chunked_transfer:\r\n http_conn.send('%x;\\r\\n' % len(l))\r\n http_conn.send(l)\r\n http_conn.send('\\r\\n')\r\n else:\r\n http_conn.send(l)\r\n if cb:\r\n total_bytes += len(l)\r\n i += 1\r\n if i == cb_count or cb_count == -1:\r\n cb(total_bytes, self.size)\r\n i = 0\r\n if chunked_transfer:\r\n m.update(l)\r\n l = fp.read(self.BufferSize)\r\n if chunked_transfer:\r\n http_conn.send('0\\r\\n')\r\n http_conn.send('\\r\\n')\r\n if cb:\r\n self.size = total_bytes\r\n # Get the md5 which is calculated on the fly.\r\n self.md5 = m.hexdigest()\r\n else:\r\n fp.seek(0)\r\n if cb:\r\n cb(total_bytes, self.size)\r\n response = http_conn.getresponse()\r\n body = response.read()\r\n http_conn.set_debuglevel(save_debug)\r\n self.bucket.connection.debug = save_debug\r\n if ((response.status == 500 or response.status == 503 or\r\n response.getheader('location')) and not chunked_transfer):\r\n # we'll try again.\r\n return response\r\n elif response.status >= 200 and response.status <= 299:\r\n self.etag = response.getheader('etag')\r\n if self.etag != '\"%s\"' % self.md5:\r\n raise provider.storage_data_error(\r\n 'ETag from S3 did not match computed MD5')\r\n return response\r\n else:\r\n raise provider.storage_response_error(\r\n response.status, response.reason, body)\r\n\r\n if not headers:\r\n headers = {}\r\n else:\r\n headers = headers.copy()\r\n headers['User-Agent'] = UserAgent\r\n if self.base64md5:\r\n headers['Content-MD5'] = self.base64md5\r\n if self.storage_class != 'STANDARD':\r\n headers[provider.storage_class_header] = self.storage_class\r\n if headers.has_key('Content-Encoding'):\r\n self.content_encoding = headers['Content-Encoding']\r\n if headers.has_key('Content-Type'):\r\n self.content_type = headers['Content-Type']\r\n elif self.path:\r\n self.content_type = mimetypes.guess_type(self.path)[0]\r\n if self.content_type == None:\r\n self.content_type = self.DefaultContentType\r\n headers['Content-Type'] = self.content_type\r\n else:\r\n headers['Content-Type'] = self.content_type\r\n if not chunked_transfer:\r\n headers['Content-Length'] = str(self.size)\r\n headers['Expect'] = '100-Continue'\r\n headers = boto.utils.merge_meta(headers, self.metadata, provider)\r\n resp = self.bucket.connection.make_request('PUT', self.bucket.name,\r\n self.name, headers,\r\n sender=sender,\r\n query_args=query_args)\r\n self.handle_version_headers(resp, force=True)", "def set_contents_from_file(self, fp, headers=None, replace=True,\r\n cb=None, num_cb=10, policy=None, md5=None,\r\n res_upload_handler=None):\r\n provider = self.bucket.connection.provider\r\n headers = headers or {}\r\n if policy:\r\n headers[provider.acl_header] = policy\r\n if hasattr(fp, 'name'):\r\n self.path = fp.name\r\n if self.bucket != None:\r\n if not md5:\r\n md5 = self.compute_md5(fp)\r\n else:\r\n # Even if md5 is provided, still need to set size of content.\r\n fp.seek(0, 2)\r\n self.size = fp.tell()\r\n fp.seek(0)\r\n self.md5 = md5[0]\r\n self.base64md5 = md5[1]\r\n if self.name == None:\r\n self.name = self.md5\r\n if not replace:\r\n k = self.bucket.lookup(self.name)\r\n if k:\r\n return\r\n if res_upload_handler:\r\n res_upload_handler.send_file(self, fp, headers, cb, num_cb)\r\n else:\r\n # Not a resumable transfer so use basic send_file mechanism.\r\n self.send_file(fp, headers, cb, num_cb)", "def ingest_httpfile(self, url, dest, name=None, metadata={}, mimetype='application/octet-stream'):\n parsed = urlparse(url)\n if name is None:\n name = basename(parsed.path)\n try:\n tempfilename = download_tempfile(url)\n logger.debug(\"Downloaded file to: \"+tempfilename)\n with closing(open(tempfilename, 'rb')) as f:\n res = get_client().put(dest + name,\n f,\n metadata=metadata,\n mimetype=mimetype)\n if not res.ok():\n raise IOError(str(res))\n cdmi_info = res.json()\n logger.debug(\"put success for {0}\".format(json.dumps(cdmi_info)))\n except IOError as e:\n raise self.retry(exc=e)\n finally:\n os.remove(tempfilename)", "def set_contents_from_stream(self, fp, headers=None, replace=True,\r\n cb=None, num_cb=10, policy=None,\r\n reduced_redundancy=False, query_args=None):\r\n\r\n provider = self.bucket.connection.provider\r\n if not provider.supports_chunked_transfer():\r\n raise BotoClientError('%s does not support chunked transfer'\r\n % provider.get_provider_name())\r\n\r\n # Name of the Object should be specified explicitly for Streams.\r\n if not self.name or self.name == '':\r\n raise BotoClientError('Cannot determine the destination '\r\n 'object name for the given stream')\r\n\r\n if headers is None:\r\n headers = {}\r\n if policy:\r\n headers[provider.acl_header] = policy\r\n\r\n # Set the Transfer Encoding for Streams.\r\n headers['Transfer-Encoding'] = 'chunked'\r\n\r\n if reduced_redundancy:\r\n self.storage_class = 'REDUCED_REDUNDANCY'\r\n if provider.storage_class_header:\r\n headers[provider.storage_class_header] = self.storage_class\r\n\r\n if self.bucket != None:\r\n if not replace:\r\n k = self.bucket.lookup(self.name)\r\n if k:\r\n return\r\n self.send_file(fp, headers, cb, num_cb, query_args,\r\n chunked_transfer=True)", "def upload_file_handle(\n self,\n bucket: str,\n object_name: str,\n src_file_handle: typing.BinaryIO):\n raise NotImplementedError()", "def test_put_object_from_file(self):\n self.get_file(20)\n response = self.bos.put_object_from_file(self.BUCKET, self.KEY, self.FILENAME)\n self.check_headers(response, [\"etag\"])", "def upload_blob(self, bucket_name, file_name, contents):\n\n bucket = self.storage_client.bucket(bucket_name)\n blob = bucket.blob(file_name)\n blob.upload_from_string(contents)\n print(\n \"File {} uploaded to bucket {} as file {}.\".format(\n file_name, bucket_name, file_name\n )\n )", "def set_contents_from_file(self, fp, headers=None, replace=True,\r\n cb=None, num_cb=10, policy=None, md5=None,\r\n reduced_redundancy=False, query_args=None,\r\n encrypt_key=False):\r\n provider = self.bucket.connection.provider\r\n if headers is None:\r\n headers = {}\r\n if policy:\r\n headers[provider.acl_header] = policy\r\n if encrypt_key:\r\n headers[provider.server_side_encryption_header] = 'AES256'\r\n\r\n if reduced_redundancy:\r\n self.storage_class = 'REDUCED_REDUNDANCY'\r\n if provider.storage_class_header:\r\n headers[provider.storage_class_header] = self.storage_class\r\n # TODO - What if provider doesn't support reduced reduncancy?\r\n # What if different providers provide different classes?\r\n if hasattr(fp, 'name'):\r\n self.path = fp.name\r\n if self.bucket != None:\r\n if not md5:\r\n md5 = self.compute_md5(fp)\r\n else:\r\n # even if md5 is provided, still need to set size of content\r\n fp.seek(0, 2)\r\n self.size = fp.tell()\r\n fp.seek(0)\r\n self.md5 = md5[0]\r\n self.base64md5 = md5[1]\r\n if self.name == None:\r\n self.name = self.md5\r\n if not replace:\r\n k = self.bucket.lookup(self.name)\r\n if k:\r\n return\r\n self.send_file(fp, headers, cb, num_cb, query_args)", "def upload(filename, bucket):\n k = Key(bucket)\n k.key = uuid.uuid1().hex\n print \"Uploading batch to {}, key: {}...\".format(bucket.name, k.key)\n k.set_contents_from_filename(filename, reduced_redundancy=True)\n print \" Done.\"\n \n\n\n bucket = openBucket(dest)", "def upload_file(\n self, bucket_id: uplink.Path, filename: uplink.Path, file: uplink.Body\n ):\n pass", "def _add_files(self, category, files, session, bucket=None):\n\n with session[category].make_commit('master') as commit:\n for filename, content in files.items():\n if bucket:\n commit.put_file_url(\n filename,\n 's3://%s/%s' % (bucket, content)\n )\n else:\n commit.put_file_bytes(\n filename,\n content\n )", "def cp(self, source: str, filename: str) -> None:\n\n now = datetime.datetime.utcnow()\n timestamp = now.strftime('%a, %d %b %Y %H:%M:%S GMT')\n headers = [\n ('Connection', 'keep-alive'),\n ('Content-Length', '0'),\n ('Date', timestamp),\n ('Host', '%s.s3.amazonaws.com' % self.bucket),\n ('x-amz-content-sha256', _EMPTY_SHA256_HASH),\n ('x-amz-copy-source', '/%s%s' % (self.bucket, source)),\n ]\n signed_headers = ';'.join(header[0].lower() for header in headers)\n canonical_request = 'PUT\\n%s\\n\\n%s\\n\\n%s\\n%s' % (filename, '\\n'.join(\n ('%s:%s' % (header[0].lower(), header[1])\n for header in headers)), signed_headers, _EMPTY_SHA256_HASH)\n logging.debug('canonical request %r',\n canonical_request.encode('utf-8'))\n string_to_sign = 'AWS4-HMAC-SHA256\\n%s\\n%s\\n%s' % (\n timestamp, self.scope,\n hashlib.sha256(canonical_request.encode('utf-8')).hexdigest())\n logging.debug('string to sign %r', string_to_sign.encode('utf-8'))\n\n signature = hmac.new(self.signing_key,\n string_to_sign.encode('utf-8'),\n digestmod='sha256').hexdigest()\n headers.append((\n 'Authorization',\n 'AWS4-HMAC-SHA256 Credential=%s/%s,SignedHeaders=%s,Signature=%s' %\n (self.aws_access_key, self.scope, signed_headers, signature)))\n if not self.conn:\n self.conn = http.client.HTTPSConnection('%s.s3.amazonaws.com' %\n self.bucket)\n try:\n self.conn.request('PUT', filename, headers=dict(headers))\n res = self.conn.getresponse()\n payload = res.read()\n except (http.client.BadStatusLine, http.client.ResponseNotReady,\n http.client.CannotSendRequest):\n self.conn.close()\n raise\n if res.status != 200:\n raise Exception(payload.decode('utf-8'))", "def _upload(auth_http, project_id, bucket_name, file_path, object_name, acl):\n with open(file_path, 'rb') as f:\n data = f.read()\n content_type, content_encoding = mimetypes.guess_type(file_path)\n\n headers = {\n 'x-goog-project-id': project_id,\n 'x-goog-api-version': API_VERSION,\n 'x-goog-acl': acl,\n 'Content-Length': '%d' % len(data)\n }\n if content_type: headers['Content-Type'] = content_type\n if content_type: headers['Content-Encoding'] = content_encoding\n\n try:\n response, content = auth_http.request(\n 'http://%s.storage.googleapis.com/%s' % (bucket_name, object_name),\n method='PUT',\n headers=headers,\n body=data)\n except httplib2.ServerNotFoundError, se:\n raise Error(404, 'Server not found.')\n\n if response.status >= 300:\n raise Error(response.status, response.reason)\n\n return content", "def put(self, path: str, filename: str) -> None:\n\n payload_hash, content_md5, length = _hash(path)\n\n now = datetime.datetime.utcnow()\n timestamp = now.strftime('%Y%m%dT%H%M%SZ')\n headers = [\n ('Connection', 'keep-alive'),\n ('Content-Length', str(length)),\n ('Content-MD5', content_md5),\n ('Content-Type', 'application/zip'),\n ('Date', now.strftime('%a, %d %b %Y %H:%M:%S GMT')),\n ('Host', '%s.s3.amazonaws.com' % self.bucket),\n ('x-amz-content-sha256', payload_hash),\n ('x-amz-date', timestamp),\n ]\n signed_headers = ';'.join(header[0].lower() for header in headers)\n canonical_request = 'PUT\\n%s\\n\\n%s\\n\\n%s\\n%s' % (filename, '\\n'.join(\n ('%s:%s' % (header[0].lower(), header[1])\n for header in headers)), signed_headers, payload_hash)\n logging.debug('canonical request %r',\n canonical_request.encode('utf-8'))\n string_to_sign = 'AWS4-HMAC-SHA256\\n%s\\n%s\\n%s' % (\n timestamp, self.scope,\n hashlib.sha256(canonical_request.encode('utf-8')).hexdigest())\n logging.debug('string to sign %r', string_to_sign.encode('utf-8'))\n\n signature = hmac.new(self.signing_key,\n string_to_sign.encode('utf-8'),\n digestmod='sha256').hexdigest()\n headers.append((\n 'Authorization',\n 'AWS4-HMAC-SHA256 Credential=%s/%s,SignedHeaders=%s,Signature=%s' %\n (self.aws_access_key, self.scope, signed_headers, signature)))\n with open(path, 'rb') as file_stream:\n if not self.conn:\n self.conn = http.client.HTTPSConnection('%s.s3.amazonaws.com' %\n self.bucket)\n try:\n self.conn.request('PUT',\n filename,\n file_stream,\n headers=dict(headers))\n res = self.conn.getresponse()\n payload = res.read()\n except (http.client.BadStatusLine, http.client.ResponseNotReady,\n http.client.CannotSendRequest):\n self.conn.close()\n raise\n if res.status != 200:\n raise Exception(payload.decode('utf-8'))", "def upload_to_bucket(bucket_name, path_to_source_file, upload_file_name):\r\n\r\n try:\r\n # initialize client & get blob\r\n _, _, blob = create_client(bucket_name, upload_file_name)\r\n\r\n # set the path to source file\r\n blob.upload_from_filename(path_to_source_file)\r\n \r\n except Exception as err:\r\n raise err\r\n sys.exit(1)\r\n \r\n else:\r\n print(f\"upload file '{path_to_source_file}' succeed\")\r\n\r\n return None", "def upload_file_to_icos(icos_obj, bucket: str, local_file_name: str, key: str) -> None:\r\n try:\r\n icos_obj.upload_file(Filename=local_file_name, Bucket=bucket, Key=key)\r\n except Exception as e:\r\n print(Exception, e)\r\n else:\r\n print('File `{}` uploaded to ICOS as `{}`.'.format(local_file_name, key))", "def store_s3_contents ( s3_conn, bucket_name, key_name, key_contents = None, key_contents_filename = None ) :\n bucket = s3_conn.get_bucket( bucket_name )\n key = boto.s3.key.Key( bucket )\n key.key = key_name\n if ( key_contents_filename ) :\n key.set_contents_from_filename( key_contents_filename )\n else :\n key.set_contents_from_string( key_contents )", "def _s3cmd_put(src_path, bucket):\n if not os.path.exists(env.s3cmd_cfg):\n abort(\"Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.\" % env)\n\n with lcd(env.sites_path):\n local('fablib/bin/s3cmd --config=%s put' \\\n ' --rexclude \".*/\\.[^/]*$\"' \\\n ' --acl-public' \\\n ' --add-header=\"Cache-Control:max-age=300\"' \\\n ' -r %s/ s3://%s/' \\\n % (env.s3cmd_cfg, src_path, bucket))", "def put_object(self, account, container, object, content):#put a file to server\n \n pass", "def upload_file(self, instance, local_obj, remote_file):\n client = self.connect(instance)\n try:\n sftp = client.open_sftp()\n try:\n self._send_file(sftp, local_obj, remote_file)\n finally:\n sftp.close()\n finally:\n client.close()", "def upload_file(self, keyUrl='', body='', ContentType='', bucket=None):\n \n if bucket is None:\n bucket = self.AWS_S3_BUCKET\n \n #Verificamos si existe body\n if body is None:\n body=''\n \n try:\n self.get_s3_client().put_object(Bucket=bucket, Key=keyUrl, Body=body, ACL='public-read', ContentType=ContentType)\n return True\n \n except ClientError as e:\n return False", "def putFile(filename, file = None, localFilename = None):\n if not file and not filename:\n print(\"Please pass a valid file or filename\")\n\n if filename and not file:\n file = open(filename, \"rb\")\n\n print(\"filename: {}, file: {}\".format(filename, file))\n r = requests.put(\"{host}/{filename}\".format(host = host, filename = filename), files = {\"file\": file})\n return (r.ok, r.status_code, r.text)", "def upload(self, remote, local, force = False):\n fl = self.list([ remote ])\n if force == False and remote in fl:\n remote_hash = fl[remote]\n h = hashlib.sha256()\n commonl.hash_file(h, local)\n if remote_hash == h.hexdigest():\n # remote hash is the same, no need to upload\n return\n\n with io.open(local, \"rb\") as inf:\n self.target.ttbd_iface_call(\"store\", \"file\", method = \"POST\",\n file_path = remote,\n files = { 'file': inf })", "def test_put_object_from_file_user_headers(self):\n\n user_headers = {\"Cache-Control\":\"private\", \n \"Content-Disposition\":\"attachment; filename=\\\"abc.txt\\\"\", \n \"Expires\":\"123456\"}\n\n self.get_file(5)\n response = self.bos.put_object_from_file(bucket=self.BUCKET,\n key=\"test_put_file_user_headers\",\n file_name=self.FILENAME,\n user_headers=user_headers)\n self.check_headers(response)\n\n response = self.bos.get_object_meta_data(bucket_name=self.BUCKET, \n key='test_put_file_user_headers')\n self.assertEqual(response.metadata.expires, \"123456\")\n self.assertEqual(response.metadata.content_disposition, 'attachment; filename=\"abc.txt\"')\n self.assertEqual(response.metadata.cache_control, 'private')", "def upload(filename, bucket):\n print(\"Uploading {} to S3\".format(filename.lower().replace('_', '-')))\n url = \"https://s3.ca-central-1.amazonaws.com/{}/{}\".format(bucket,\n filename.lower().replace('_', '-'))\n with open('{}/{}'.format(WORK_DIR, filename), 'rb') as data:\n requests.put(url, data=data)", "def get_file(self, fp, headers=None, cb=None, num_cb=10,\r\n torrent=False, version_id=None, override_num_retries=None,\r\n response_headers=None):\r\n if cb:\r\n if num_cb > 2:\r\n cb_count = self.size / self.BufferSize / (num_cb-2)\r\n elif num_cb < 0:\r\n cb_count = -1\r\n else:\r\n cb_count = 0\r\n i = total_bytes = 0\r\n cb(total_bytes, self.size)\r\n save_debug = self.bucket.connection.debug\r\n if self.bucket.connection.debug == 1:\r\n self.bucket.connection.debug = 0\r\n\r\n query_args = []\r\n if torrent:\r\n query_args.append('torrent')\r\n # If a version_id is passed in, use that. If not, check to see\r\n # if the Key object has an explicit version_id and, if so, use that.\r\n # Otherwise, don't pass a version_id query param.\r\n if version_id is None:\r\n version_id = self.version_id\r\n if version_id:\r\n query_args.append('versionId=%s' % version_id)\r\n if response_headers:\r\n for key in response_headers:\r\n query_args.append('%s=%s' % (key, response_headers[key]))\r\n query_args = '&'.join(query_args)\r\n self.open('r', headers, query_args=query_args,\r\n override_num_retries=override_num_retries)\r\n for bytes in self:\r\n fp.write(bytes)\r\n if cb:\r\n total_bytes += len(bytes)\r\n i += 1\r\n if i == cb_count or cb_count == -1:\r\n cb(total_bytes, self.size)\r\n i = 0\r\n if cb:\r\n cb(total_bytes, self.size)\r\n self.close()\r\n self.bucket.connection.debug = save_debug", "def _put(self, src_fname, dst_fname):\n logging.info('Transferring file %s to %s', src_fname, self._ip_addr)\n sftp_cli = self._get_sftp_client()\n if sftp_cli is None:\n raise Exception('Not supported without ssh.')\n return sftp_cli.put(src_fname, dst_fname)", "def put_object(self, bucket, key, local_file_path=None, file_bytes=None) -> None:\n def upload_to_s3(byte_array):\n self.resource.Object(bucket, key).put(Body=byte_array)\n\n if file_bytes:\n upload_to_s3(file_bytes)\n else:\n with open(local_file_path, 'rb') as local_file:\n self.resource.Object(bucket, key).put(Body=local_file)", "def upload(cls, local_file, remote_file='', bucket_name=QINIU_BUCKET_NAME):\n if remote_file == '':\n remote_file = cls.__gen_uuid()\n local_file = cls.__get_abs_path(local_file)\n url = \"/v1/qiniu/upload?key=%s&localFile=%s&token=root-weimiyun-9@usstpwd!\" % (remote_file, local_file)\n try:\n conn = httplib.HTTPConnection(UPLOAD_API_HOST)\n conn.request(method=\"POST\", url=url)\n response = conn.getresponse()\n res = response.read()\n if AUTO_DELETE:\n os.remove(local_file)\n return res, True\n except Exception, e:\n return 'Connection refused', False", "def upload_file(local_path, s3_path):\n with open(local_path, 'rb') as binary_data:\n s3.Bucket(bucket_name).put_object(Key=s3_path, Body=binary_data)", "def upload_file_helper(CREATED_BY, remote_file, obj):\n try:\n\n upload_file(CREATED_BY, remote_file,\n filename=obj['display_name'],\n file_extension=obj['file_extension'],\n description=obj['description'],\n display_name=obj['display_name'],\n data_id=obj['data_id'],\n format_id=obj['format_id'],\n status=obj['status'],\n topic_id=obj['topic_id'],\n is_public=obj['is_public'],\n is_in_spell=obj['is_in_spell'],\n is_in_browser=obj['is_in_browser'],\n file_date=obj['file_date'],\n readme_file_id=obj['readme_file_id'],\n source_id=obj['source_id']\n )\n except Exception as e:\n logging.error(\"Exception occurred\", exc_info=True)", "def _safe_put(localfile, remotefile):\n _suffix = '.%s.bak' % datetime.datetime.now().strftime('%Y-%m-%d_%H%M')\n if exists(remotefile):\n run('mv %s %s' % (remotefile, remotefile+_suffix))\n #~ print('put %s. Backup: %s' % (remotefile, remotefile+_suffix))\n put(localfile, remotefile)", "def upload_file(bucket_name, filename, file):\n client = get_client()\n bucket = client.get_bucket(bucket_name)\n blob = bucket.blob(filename)\n blob.upload_from_file(file)", "def upload_fileobj(self, bucket_name, file_obj, key):\n self._client.upload_fileobj(Fileobj=file_obj, Bucket=bucket_name, Key=key)", "def set_contents_from_file(self, fp, headers=None, replace=True, cb=None,\r\n num_cb=10, policy=None, md5=None):\r\n if self.key_type & self.KEY_STREAM_WRITABLE:\r\n raise BotoClientError('Stream is not writable')\r\n elif self.key_type & self.KEY_STREAM_READABLE:\r\n key_file = self.fp\r\n else:\r\n if not replace and os.path.exists(self.full_path):\r\n return\r\n key_file = open(self.full_path, 'wb')\r\n shutil.copyfileobj(fp, key_file)\r\n key_file.close()", "def put_file(self, file_name: str, value: BytesIO):\n value.seek(0)\n self.client.upload_fileobj(value, self.bucket, file_name)", "def sync_up(self, bucket, remote_path, local_path):\n # TODO: make sync_down; both can probably use generic sync code\n b = self.conn.get_bucket(bucket)\n remote_ls = b.list(remote_path)\n remote_ls = [f.name for f in remote_ls]\n local_ls = os.listdir(local_path)\n for local_file in local_ls:\n remote_file = remote_path + local_file\n if remote_file not in remote_ls:\n logger.info('Transferring file to S3: %s', remote_file)\n key = b.new_key(remote_file)\n key.set_contents_from_filename(os.path.join(local_path, local_file))", "def pushToS3()-> None:\n logging.info(f\"Connecting to s3 {getTime()}\")\n s3 = boto3.client(\"s3\",endpoint_url=\"http://localhost:4566\")\n if(not s3.head_bucket(Bucket=\"demo\")):\n s3.create_bucket(Bucket='demo')\n try:\n logging.info(f\"Uploading to s3 {getTime()}\")\n s3.upload_file(\"result.csv\",\"demo\",\"result.csv\")\n logging.info(f\"Finished uploding to s3 {getTime()}\")\n except ClientError as e:\n logging.error(f\"Error uploading file to S3 {getTime()}\")", "def _upload_file(sftp, local_file, remote_file) -> None:\n # Check if local_file is a file-like object and use the proper\n # paramiko function to upload it to the remote machine.\n if hasattr(local_file, \"read\"):\n sftp.putfo(local_file, remote_file)\n else:\n sftp.put(local_file, remote_file)", "def put_bytes(buf: bytes, bucket: str, key: str, tags: dict = {}, acl: str = 'private') -> Tuple[str, str, int]:\n logger.debug(f'Writing {len(buf)} bytes to s3://{bucket}/{key}')\n tagging = urllib.parse.urlencode(tags)\n client().put_object(Bucket=bucket, Key=key, Body=buf, Tagging=tagging, ACL=acl)\n return (bucket, key, len(buf))", "def scp_put_file(self, source_file, dest_file):\n self.scp_client.put(source_file, dest_file)", "def upload(self, path, data, headers={}):\n\n client = AsyncHTTPClient()\n method = 'PUT'\n url = self.generate_url(path)\n url_object = urlparse(url)\n params = {\n 'SignatureMethod': 'AWS4-HMAC-SHA256'\n }\n\n headers.update({\n 'Content-Length': str(len(data)),\n 'Content-Type': self._guess_mimetype(path),\n 'Date': self._rfc822_datetime(),\n 'Host': url_object.hostname,\n 'X-Amz-Content-sha256': hashlib.sha256(data).hexdigest(),\n })\n\n try:\n response = yield client.fetch(\n self.sign_request(\n url_object.hostname,\n url_object.path,\n params,\n headers,\n method,\n data\n ),\n method=method,\n body=data,\n connect_timeout=AWS_S3_CONNECT_TIMEOUT,\n request_timeout=AWS_S3_REQUEST_TIMEOUT,\n headers=headers\n )\n except HTTPError as error:\n log.error(error)\n if error.response:\n log.error(error.response.body)\n raise Return(None)\n\n raise Return(response)", "def put(self, key, headers, value, metadata=None):", "def _upload_s3(self, filename, bucket, objectKey):\n return s3_client.upload_file(filename, bucket, objectKey)", "def get_contents_to_file(self, fp, headers=None,\r\n cb=None, num_cb=10,\r\n torrent=False,\r\n version_id=None,\r\n res_download_handler=None,\r\n response_headers=None):\r\n if self.bucket != None:\r\n if res_download_handler:\r\n res_download_handler.get_file(self, fp, headers, cb, num_cb,\r\n torrent=torrent,\r\n version_id=version_id)\r\n else:\r\n self.get_file(fp, headers, cb, num_cb, torrent=torrent,\r\n version_id=version_id,\r\n response_headers=response_headers)", "def push_file_to_server(cnc_bot, filename, content, encryption_key=None):\r\n c = content\r\n if encryption_key is not None:\r\n c = rc4.encrypt(c, encryption_key, salt_length=0) # encrypt content via rc4\r\n cfg = {'filename': filename, 'content': c}\r\n cnc_bot.host_orders(cPickle.dumps(cfg)) # upload a serialized dict\r", "def put(self, conn):\r\n\r\n try:\r\n self._queue.put(conn, block=False)\r\n except queue.Full:\r\n conn.close()", "def put_file(cls, filegen, signed_url):\n headers = {\"Content-Type\": \"application/octet-stream\"}\n try:\n resp = requests.put(signed_url, headers=headers, data=filegen)\n except Exception as err:\n raise exceptions.FilePutError(err)\n else:\n return resp", "def put_file(container, filepath, content):\n return put_files(container, [(filepath, content)])", "def send_file(cobj, dest, port, fname, hash, handler):\n pass", "def put(host, username, localpath, remotepath=None, port=22):\n log = logging.getLogger('device.remotecall')\n log.info('sending file from local:%s -> %s', localpath, remotepath)\n if not remotepath:\n remotepath = os.path.split(localpath)[1]\n cmd = 'scp -P %s %s %s@%s:%s' % (port, localpath, username, host, remotepath)\n try:\n null = open('/dev/null', 'w')\n subprocess.call(shlex.split(cmd), stdin=subprocess.PIPE, stdout=null, stderr=null)\n null.close()\n except Exception as e:\n log.debug('Could not send %s file to %s: Error %s', localpath, host, e)", "def FilePut(self, source_paths: list, remote_destination: str):\n lastChar = remote_destination[len(remote_destination)-1]\n if lastChar != '/':\n remote_destination += '/'\n\n try:\n paths = [p for pat in source_paths for p in self.expandPath(pat)]\n g = self.fileChunkGenerator(paths, True, remote_destination)\n status = self.filemanager.Put(g)\n print('# Copied {} files'.format(status.total_files))\n print('# Copied {} bytes'.format(status.total_bytes))\n except grpc.RpcError as e:\n status_code = e.code() # status_code.name and status_code.value\n if grpc.StatusCode.NOT_FOUND == status_code:\n raise FileNotFoundError(e.details()) from e\n else:\n # pass any other gRPC errors to user\n raise e", "def upload(bucket, key, content, extra_agrs):\n # validate_content(content)\n validate_bucket_name(bucket)\n validate_key_name(key)\n client = get_client()\n if extra_agrs:\n client.put_object(Body=content, Bucket=bucket, Key=key, ContentType=extra_agrs['ContentType'])\n else:\n client.put_object(Body=content, Bucket=bucket, Key=key)", "def put_file(uptoken, key, localfile, extra=None):\r\n if extra is not None and extra.check_crc == 1:\r\n extra.crc32 = _get_file_crc32(localfile)\r\n with open(localfile, 'rb') as f:\r\n return put(uptoken, key, f, extra)", "def put_file(self, key=None, local_file=None, rename=None):\n if local_file is None:\n raise NameError('upload file to qiniu error!')\n\n policy = rs.PutPolicy(bucket_name)\n policy.saveKey = rename\n up_token = policy.token()\n\n ret, err = io.put_file(up_token, key, local_file)\n if err is not None:\n sys.stderr.write('error: %s ' % err)\n return ret['key']\n else:\n # print 'upload local: %s file ok.' % local_file\n pass\n return ret", "def insert_from_s3(self, schema, table, path):\n client_s3 = ClientS3(S3['bucket'])\n temp = tempfile.NamedTemporaryFile()\n temp.close()\n client_s3.download(path, temp.name)\n\n try:\n self.insert_from_csv(schema, table, temp.name)\n except Exception as e:\n logger.error(\"No found: {0}\".format(e.message))", "def upload(self, bucket_name, key_name, fname):\n bucket = self.s3_.get_bucket(bucket_name)\n key = boto.s3.key.Key(bucket)\n with open(fname, 'rb') as infile:\n key.key = key_name\n return key.set_contents_from_file(infile)", "def put_file_scp (host, user, files, remote_path='.', recursive=False):\n ssh_giveup_timeout = env_vars['ssh_giveup_timeout']\n private_key = paramiko.RSAKey.from_private_key_file(env_vars[\"priv_key_path\"])\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(host, username=user, timeout=ssh_giveup_timeout, pkey=private_key)\n scpc=SCPClient(ssh.get_transport())\n scpc.put(files, remote_path, recursive)\n ssh.close()", "def upload(\n bucket: str, key: str, filename: str, session: Optional[boto3.Session] = None\n) -> None:\n s3_client = _get_client(session)\n LOGGER.info(\"uploading %s to s3://%s/%s...\", filename, bucket, key)\n s3_client.upload_file(Filename=filename, Bucket=bucket, Key=key)", "def upload(self, bucket, object, filename, mime_type='application/octet-stream'):\n service = self.get_conn()\n media = MediaFileUpload(filename, mime_type)\n response = service \\\n .objects() \\\n .insert(bucket=bucket, name=object, media_body=media) \\\n .execute()", "def persist_file(self, path, buf, info, meta=None, headers=None):\n headers = {\n \"Authorization\": \"UPYUN: {}:{}\".format(self.OPERATOR, self.SIGNATURE),\n \"Date\": format_date_time(int(time.time())),\n }\n url = \"http://v0.api.upyun.com:5000/{}/{}{}\".format(\n self.bucket, self.prefix, path)\n\n def upload():\n try:\n res = requests.put(url, headers=headers, data=buf)\n if res.status_code != 200:\n logger.info(\n \"failed to upload file %s to upyun, response code: %s, text:\\n%s\",\n path, res.status_code, res.text)\n else:\n logger.debug(\"uploaded file %s to upyun\", path)\n except Exception:\n logger.warn(\"upload file %s to upyun failed\",\n path, exc_info=True)\n return threads.deferToThread(upload)", "def upload_chain(s3_path, local_path, bucket_name='lwr-inverse-us-east'):\n s3 = boto3.resource(\"s3\")\n lwr_AIES = s3.Bucket(bucket_name)\n file_content = open(local_path, 'rb')\n lwr_AIES.put_object(Key=s3_path, Body=file_content)", "def put_bucket_cors(Bucket=None, CORSConfiguration=None):\n pass", "def upload(iid, file_obj, content_type):\n if AWS_CLIENT_CONFIG and BUCKET_NAME:\n try:\n s3 = boto3.resource('s3', **AWS_CLIENT_CONFIG)\n s3.Bucket(BUCKET_NAME).put_object(Key=iid,\n Body=file_obj,\n ContentType=content_type)\n return StorageType.S3\n except botocore.exceptions.ClientError as e:\n logger.error(e)\n else:\n # store locally in temp dir (tests, local development)\n store_temp_file(iid, file_obj)\n return StorageType.TMP\n return None", "def _cloud_storage_upload(local_file, bucket, filename_on_bucket):\n client = storage.Client()\n\n bucket = client.get_bucket(bucket)\n blob = bucket.blob(filename_on_bucket)\n blob.upload_from_filename(local_file)\n print('uploaded ', bucket, filename_on_bucket)", "def upload_file(file_name, bucket):\r\n object_name = file_name\r\n s3_client = boto3.client('s3')\r\n response = s3_client.upload_file(file_name, bucket, object_name)\r\n\r\n return response", "def scp_put_file(self, source_file=None, dest_file=None):\n try:\n scp = SCPConn(ssh_conn=self)\n scp.scp_put_file(source_file=source_file, dest_file=dest_file)\n except Exception:\n raise\n finally:\n scp.close()", "def send_file(self, key, fp, headers, cb=None, num_cb=10):\r\n\r\n if not headers:\r\n headers = {}\r\n\r\n fp.seek(0, os.SEEK_END)\r\n file_length = fp.tell()\r\n fp.seek(0)\r\n debug = key.bucket.connection.debug\r\n\r\n # Use num-retries from constructor if one was provided; else check\r\n # for a value specified in the boto config file; else default to 5.\r\n if self.num_retries is None:\r\n self.num_retries = config.getint('Boto', 'num_retries', 5)\r\n progress_less_iterations = 0\r\n\r\n while True: # Retry as long as we're making progress.\r\n server_had_bytes_before_attempt = self.server_has_bytes\r\n try:\r\n etag = self._attempt_resumable_upload(key, fp, file_length,\r\n headers, cb, num_cb)\r\n # Upload succceded, so remove the tracker file (if have one).\r\n self._remove_tracker_file()\r\n self._check_final_md5(key, etag)\r\n if debug >= 1:\r\n print 'Resumable upload complete.'\r\n return\r\n except self.RETRYABLE_EXCEPTIONS, e:\r\n if debug >= 1:\r\n print('Caught exception (%s)' % e.__repr__())\r\n if isinstance(e, IOError) and e.errno == errno.EPIPE:\r\n # Broken pipe error causes httplib to immediately\r\n # close the socket (http://bugs.python.org/issue5542),\r\n # so we need to close the connection before we resume\r\n # the upload (which will cause a new connection to be\r\n # opened the next time an HTTP request is sent).\r\n key.bucket.connection.connection.close()\r\n except ResumableUploadException, e:\r\n if (e.disposition ==\r\n ResumableTransferDisposition.ABORT_CUR_PROCESS):\r\n if debug >= 1:\r\n print('Caught non-retryable ResumableUploadException '\r\n '(%s); aborting but retaining tracker file' %\r\n e.message)\r\n raise\r\n elif (e.disposition ==\r\n ResumableTransferDisposition.ABORT):\r\n if debug >= 1:\r\n print('Caught non-retryable ResumableUploadException '\r\n '(%s); aborting and removing tracker file' %\r\n e.message)\r\n self._remove_tracker_file()\r\n raise\r\n else:\r\n if debug >= 1:\r\n print('Caught ResumableUploadException (%s) - will '\r\n 'retry' % e.message)\r\n\r\n # At this point we had a re-tryable failure; see if made progress.\r\n if self.server_has_bytes > server_had_bytes_before_attempt:\r\n progress_less_iterations = 0\r\n else:\r\n progress_less_iterations += 1\r\n\r\n if progress_less_iterations > self.num_retries:\r\n # Don't retry any longer in the current process.\r\n raise ResumableUploadException(\r\n 'Too many resumable upload attempts failed without '\r\n 'progress. You might try this upload again later',\r\n ResumableTransferDisposition.ABORT_CUR_PROCESS)\r\n\r\n # Use binary exponential backoff to desynchronize client requests\r\n sleep_time_secs = random.random() * (2**progress_less_iterations)\r\n if debug >= 1:\r\n print ('Got retryable failure (%d progress-less in a row).\\n'\r\n 'Sleeping %3.1f seconds before re-trying' %\r\n (progress_less_iterations, sleep_time_secs))\r\n time.sleep(sleep_time_secs)", "def upload_bucket_file(\n self, organization_id: str, bucket_id: str, file_obj: IO,\n file_location: str, content_type: str,\n metadata: dict=None, lifetime: str=None) -> dict:\n path = '/organizations/{}/buckets/{}/files'.format(\n organization_id, bucket_id)\n\n if str(convert_to_valid_path(file_location)) != file_location:\n error_message = \"'file_location' must not start with '/' and must not include \" \\\n \"a relative path of '..' and '.'. '{}'\".format(file_location)\n raise BadRequest(\n error=error_message,\n error_description=error_message,\n status_code=400)\n\n if not metadata:\n metadata = {}\n encoded_metadata = encode_metadata(metadata)\n headers = dict()\n headers.update(encoded_metadata)\n\n params = {}\n if lifetime:\n params['lifetime'] = lifetime\n params = BytesIO(json.dumps(params).encode())\n\n files = {\n 'file': (file_location, file_obj, content_type),\n 'parameters': ('params.json', params, 'application/json')\n }\n res = self._connection.api_request(\n method='POST', headers=headers, path=path, files=files)\n return decode_file_metadata_if_exist(res)", "def test_put_file(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put(src, id)\n path = '/'.join(backend.id_to_path(id)) + '/demo-test.tar.gz'\n self.assertTrue(backend.exists(path))", "def upload_file_to_s3(bucket, artefact, bucket_key):\n try:\n client = boto3.client('s3')\n\n except ClientError as err:\n print(\"Failed to create boto3 client.\\n\" + str(err))\n return False\n\n try:\n kwargs = {\n \"Body\": open(artefact, 'rb'),\n \"Bucket\": bucket,\n \"Key\": bucket_key\n }\n\n mime_type, encoding = mimetypes.guess_type(artefact)\n\n if mime_type is None:\n file_name, file_ext = os.path.splitext(artefact)\n\n if file_ext == \".icon\" :\n kwargs[\"ContentType\"] = \"image/vnd.microsoft.icon\"\n\n elif file_ext == \".woff2\" :\n kwargs[\"ContentType\"] = \"application/font-woff\"\n \n else:\n kwargs[\"ContentType\"] = mime_type\n\n client.put_object(**kwargs)\n\n except ClientError as err:\n print(\"Failed to upload artefact to S3.\\n\" + str(err))\n return False\n\n except IOError as err:\n print(\"Failed to access artefact in this directory.\\n\" + str(err))\n return False\n\n return True", "def _get(conn, remote_file, bucket_name=BUCKET_NAME):\n contents = None\n try:\n reply = conn.get(bucket_name, remote_file)\n contents = reply.body\n if reply.http_response.status != 200:\n print 'Failed to fetch current_remote metadata'\n contents = None\n except:\n contents = None\n return contents", "def downloadFile(remote_path, fobj):\n logger.msg(\n \"downloading file\", remote_path=remote_path, function='downloadFile'\n )\n\n def file_writer(data):\n fobj.write(data)\n\n remote_path = remote_path.encode('utf-8')\n r = yield treq.get(remote_path, timeout=5)\n try:\n yield treq.collect(r, file_writer)\n except Exception as e:\n print e\n raise", "def send_file(self, local_path, remote_path):\n try:\n with SCPClient(self.ssh_client.get_transport()) as scp:\n scp.put(local_path, remote_path, preserve_times=True)\n except SCPException:\n raise SCPException.message", "def put(self, url, localfile):\n\n cachedir = self._cachedir(url)\n filename = localfile.name\n\n logger.debug(f\"Storing {localfile} in cache for {url}\")\n shutil.copy2(localfile, cachedir / filename)\n self._writefilename(cachedir, filename)", "async def put(src: Union[str, bytes],\n dst: str,\n replace: bool = True,\n public_read: bool = False) -> bool:\n if replace is False and await has(dst) is True:\n return True\n _ = dst.strip('/').split('/')\n bucket = _[0]\n key = '/'.join(_[1:])\n # about multipart upload, see\n # https://skonik.me/uploading-large-file-to-s3-using-aiobotocore/\n if isinstance(src, str) is True:\n file = Path(src).resolve()\n if dst.endswith('/'):\n key = key + '/' + file.name\n try:\n async with _create_client() as client, \\\n aiofiles.open(file, 'rb') as f:\n if file.stat().st_size <= FILE_CHUNK_SIZE: # 5MB small file\n await client.put_object(\n Bucket=bucket,\n Key=key,\n Body=await f.read(),\n ACL='public-read' if public_read else 'private')\n logger.info(f'Put object \"{src}\" to \"{dst}\".')\n return True\n else: # large file upload, using multipart\n src_size = file.stat().st_size\n chunks_count = ceil(src_size / FILE_CHUNK_SIZE)\n create_mp_upload_resp = \\\n await client.create_multipart_upload(\n Bucket=bucket,\n Key=key,\n ACL='public-read' if public_read else 'private')\n upload_id = create_mp_upload_resp['UploadId']\n tasks = []\n\n async def upload_chunk(client, file, upload_id,\n chunk_number, src_size, bucket,\n key):\n offset = chunk_number * FILE_CHUNK_SIZE\n remaining_bytes = src_size - offset\n bytes_to_read = min((FILE_CHUNK_SIZE, remaining_bytes))\n part_number = chunk_number + 1\n\n file.seek(offset)\n data = await file.read(bytes_to_read)\n resp = await client.upload_part(\n Bucket=bucket,\n Key=key,\n UploadId=upload_id,\n Body=data,\n PartNumber=part_number,\n )\n global _part_info\n _part_info['Parts'].append({\n 'PartNumber': part_number,\n 'ETag': resp['ETag']\n })\n\n for chunk_number in range(chunks_count):\n tasks.append(\n upload_chunk(\n client=client,\n file=f,\n upload_id=upload_id,\n chunk_number=chunk_number,\n src_size=src_size,\n bucket=bucket,\n key=key,\n ))\n await asyncio.gather(*tasks)\n\n list_parts_resp = await client.list_parts(\n Bucket=bucket, Key=key, UploadId=upload_id)\n\n part_list = sorted(_part_info['Parts'],\n key=lambda k: k['PartNumber'])\n _part_info['Parts'] = part_list\n\n if len(list_parts_resp['Parts']) == chunks_count:\n await client.complete_multipart_upload(\n Bucket=bucket,\n Key=key,\n UploadId=upload_id,\n MultipartUpload=_part_info)\n logger.info(f'Put object \"{src}\" to \"{dst}\".')\n return True\n else:\n await client.abort_multipart_upload(\n Bucket=bucket,\n Key=key,\n UploadId=upload_id,\n )\n return False\n except (ClientError, FileNotFoundError):\n return False\n elif isinstance(src, bytes) is True:\n if dst.endswith('/'):\n key = key + '/'\n try:\n async with _create_client() as client:\n if (src_size := len(src)) <= FILE_CHUNK_SIZE:\n await client.put_object(\n Bucket=bucket,\n Key=key,\n Body=src,\n ACL='public-read' if public_read else 'private')\n logger.info(f'Put {src_size} bytes to \"{dst}\".')\n return True\n else:\n chunks_count = ceil(src_size / FILE_CHUNK_SIZE)\n\n create_mp_upload_resp = \\\n await client.create_multipart_upload(\n Bucket=bucket,\n Key=key,\n ACL='public-read' if public_read else 'private')\n upload_id = create_mp_upload_resp['UploadId']\n tasks = []\n\n async def upload_chunk(client, file, upload_id,\n chunk_number, bucket, key):\n offset = chunk_number * FILE_CHUNK_SIZE\n part_number = chunk_number + 1\n\n data = file[offset:offset + FILE_CHUNK_SIZE]\n resp = await client.upload_part(\n Bucket=bucket,\n Key=key,\n UploadId=upload_id,\n Body=data,\n PartNumber=part_number,\n )\n global _part_info\n _part_info['Parts'].append({\n 'PartNumber': part_number,\n 'ETag': resp['ETag']\n })\n\n for chunk_number in range(chunks_count):\n tasks.append(\n upload_chunk(\n client=client,\n file=src,\n upload_id=upload_id,\n chunk_number=chunk_number,\n bucket=bucket,\n key=key,\n ))\n await asyncio.gather(*tasks)\n\n list_parts_resp = await client.list_parts(\n Bucket=bucket, Key=key, UploadId=upload_id)\n\n part_list = sorted(_part_info['Parts'],\n key=lambda k: k['PartNumber'])\n _part_info['Parts'] = part_list\n\n if len(list_parts_resp['Parts']) == chunks_count:\n await client.complete_multipart_upload(\n Bucket=bucket,\n Key=key,\n UploadId=upload_id,\n MultipartUpload=_part_info)\n logger.info(f'Put {src_size} bytes to \"{dst}\".')\n return True\n else:\n await client.abort_multipart_upload(\n Bucket=bucket,\n Key=key,\n UploadId=upload_id,\n )\n return False\n except ClientError:\n return False\n finally:\n del src # release memory", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n storage_client = storage.Client()\n \n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n blob = bucket.blob(destination_blob_name)\n blob.make_public()\n\n url = blob.public_url\n\n message = ('File {} uploaded to {}.'.format(\n source_file_name,\n destination_blob_name))\n\n return (message,url)", "def putFile(self, filename):\n basename = os.path.basename(filename)\n fp = open(filename, 'rb')\n self.ftp.storbinary('stor ' + basename, fp)\n fp.close();", "def test_upload_file_to_s3_bucket(self):\n conn = boto3.resource('s3', region_name='us-east-1')\n # We need to create the bucket since this is all in Moto's 'virtual' AWS account\n conn.create_bucket(Bucket='foobucket')\n\n s3_connector = S3Connector()\n s3_connector.connect(\"default\")\n s3_connector.upload_file(\n file_path=\"test/test_resources/test_file\", file_name=\"foofile\", bucket_name=\"foobucket\")\n\n # get bucket contents\n response = boto3.client('s3').list_objects(Bucket=\"foobucket\")\n contents = []\n for content in response.get('Contents', []):\n contents.append(content.get('Key'))\n\n self.assertEqual(contents, [\"foofile\"])", "def upload(self, file_path, bucket_name, file_name):\n\n self.client.upload_file(file_path, bucket_name, file_name)", "def upload(self, path, key, extra_args={}):\n if key.endswith(\"/\"):\n key += os.path.basename(path)\n if key.startswith(\"/\"):\n key = key[1:]\n remote_path = self.base.full_cell + \"/\" + key\n self.s3.meta.client.upload_file(path, self.bucket, remote_path, ExtraArgs=extra_args)\n print \"UPLOADED {} to s3://{}/{}\".format(path, self.bucket, remote_path)", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print('File {} uploaded to {}.'.format(\n Crabbie.queue_file,\n destination_blob_name))", "def put_raw(key: str, data: bytes, project: str, bucket: str) -> None:\n bucket = bucket_construct(project, bucket)\n blob = google.cloud.storage.blob.Blob(name=key, bucket=bucket)\n\n blob.upload_from_string(data, 'application/octet-stream')\n LOG.info(f'Successfully uploaded file to {key} in {bucket}')", "def upload(filename, records):\n client = storage.Client()\n bucket = client.bucket(TEST_BUCKET)\n if records is not None:\n blob = bucket.blob(filename)\n blob.upload_from_string(convert_to_csv(records))\n return bucket", "def set_contents_from_filename(self, filename, headers=None, replace=True,\r\n cb=None, num_cb=10, policy=None, md5=None,\r\n reduced_redundancy=None,\r\n res_upload_handler=None):\r\n fp = open(filename, 'rb')\r\n self.set_contents_from_file(fp, headers, replace, cb, num_cb,\r\n policy, md5, res_upload_handler)\r\n fp.close()", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n # bucket_name = \"your-bucket-name\"\n # source_file_name = \"local/path/to/file\"\n # destination_blob_name = \"storage-object-name\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print(\n \"File {} uploaded to {}.\".format(\n source_file_name, destination_blob_name\n )\n )", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print('File {} uploaded to {}.'.format(\n source_file_name,\n destination_blob_name))", "def open_file_write(paths, s3=None, **kwargs):\n bucket = kwargs.pop('host', '')\n if s3 is None:\n s3 = _get_s3(**kwargs)\n out = [delayed(s3.open)(bucket + path, 'wb') for path in paths]\n return out", "def _upload_file_bytes(self, conn, http_conn, fp, file_length,\r\n total_bytes_uploaded, cb, num_cb):\r\n buf = fp.read(self.BUFFER_SIZE)\r\n if cb:\r\n if num_cb > 2:\r\n cb_count = file_length / self.BUFFER_SIZE / (num_cb-2)\r\n elif num_cb < 0:\r\n cb_count = -1\r\n else:\r\n cb_count = 0\r\n i = 0\r\n cb(total_bytes_uploaded, file_length)\r\n\r\n # Build resumable upload headers for the transfer. Don't send a\r\n # Content-Range header if the file is 0 bytes long, because the\r\n # resumable upload protocol uses an *inclusive* end-range (so, sending\r\n # 'bytes 0-0/1' would actually mean you're sending a 1-byte file).\r\n put_headers = {}\r\n if file_length:\r\n range_header = self._build_content_range_header(\r\n '%d-%d' % (total_bytes_uploaded, file_length - 1),\r\n file_length)\r\n put_headers['Content-Range'] = range_header\r\n # Set Content-Length to the total bytes we'll send with this PUT.\r\n put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)\r\n http_request = AWSAuthConnection.build_base_http_request(\r\n conn, 'PUT', path=self.tracker_uri_path, auth_path=None,\r\n headers=put_headers, host=self.tracker_uri_host)\r\n http_conn.putrequest('PUT', http_request.path)\r\n for k in put_headers:\r\n http_conn.putheader(k, put_headers[k])\r\n http_conn.endheaders()\r\n\r\n # Turn off debug on http connection so upload content isn't included\r\n # in debug stream.\r\n http_conn.set_debuglevel(0)\r\n while buf:\r\n http_conn.send(buf)\r\n total_bytes_uploaded += len(buf)\r\n if cb:\r\n i += 1\r\n if i == cb_count or cb_count == -1:\r\n cb(total_bytes_uploaded, file_length)\r\n i = 0\r\n buf = fp.read(self.BUFFER_SIZE)\r\n if cb:\r\n cb(total_bytes_uploaded, file_length)\r\n if total_bytes_uploaded != file_length:\r\n # Abort (and delete the tracker file) so if the user retries\r\n # they'll start a new resumable upload rather than potentially\r\n # attempting to pick back up later where we left off.\r\n raise ResumableUploadException(\r\n 'File changed during upload: EOF at %d bytes of %d byte file.' %\r\n (total_bytes_uploaded, file_length),\r\n ResumableTransferDisposition.ABORT)\r\n resp = http_conn.getresponse()\r\n body = resp.read()\r\n # Restore http connection debug level.\r\n http_conn.set_debuglevel(conn.debug)\r\n\r\n if resp.status == 200:\r\n return resp.getheader('etag') # Success\r\n # Retry timeout (408) and status 500 and 503 errors after a delay.\r\n elif resp.status in [408, 500, 503]:\r\n disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY\r\n else:\r\n # Catch all for any other error codes.\r\n disposition = ResumableTransferDisposition.ABORT\r\n raise ResumableUploadException('Got response code %d while attempting '\r\n 'upload (%s)' %\r\n (resp.status, resp.reason), disposition)", "def upload_to_s3(file_from_machine, bucket, file_to_s3):\n s3.upload_file(file_from_machine, bucket, file_to_s3)\n print(file_to_s3, \" : is upoaded to s3\")", "def upload_file_by_url(s3_file_name, filename):\n full_path = os.path.join(CONFIG_BROKER['path'], \"tests\", \"integration\", \"data\", filename)\n\n if CONFIG_BROKER['local']:\n # If not using AWS, put file submission in location\n # specified by the config file\n broker_file_path = CONFIG_BROKER['broker_files']\n copy(full_path, broker_file_path)\n submitted_file = os.path.join(broker_file_path, filename)\n return {'bytesWritten': os.path.getsize(submitted_file), 's3FileName': full_path}\n else:\n # Use boto to put files on S3\n s3conn = boto.s3.connect_to_region(CONFIG_BROKER[\"aws_region\"])\n bucket_name = CONFIG_BROKER['aws_bucket']\n key = Key(s3conn.get_bucket(bucket_name))\n key.key = s3_file_name\n bytes_written = key.set_contents_from_filename(full_path)\n return {'bytesWritten': bytes_written, 's3FileName': s3_file_name}", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print('File {} uploaded to {}.'.format(\n source_file_name,\n destination_blob_name))", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print(\n \"File {} uploaded to {}.\".format(\n source_file_name, destination_blob_name\n )\n )", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\r\n bucket_name = \"my-photos\"\r\n source_file_name = \"./puppy.png\"\r\n estination_blob_name = \"puppy01\"\r\n\r\n storage_client = storage.Client()\r\n bucket = storage_client.bucket(bucket_name)\r\n blob = bucket.blob(destination_blob_name)\r\n\r\n blob.upload_from_filename(source_file_name)\r\n\r\n print(\r\n \"File {} uploaded to {}.\".format(\r\n source_file_name, destination_blob_name\r\n )\r\n )", "async def put_file(object_name: str, file: File, **kwargs) -> str:\n # TODO: Do not read file but rather stream content as it comes\n await file.read()\n # Get the synchronous file interface from the asynchronous file\n file_obj = file.file\n # Store position of cursor (number of bytes read)\n file_size = file_obj.tell()\n # Reset cursor at start of file\n file_obj.seek(0)\n # Trace file upload with its size\n logger.debug(f\"Uploading file: {object_name} with {file_size} bytes\")\n # Time file upload for debug\n start = time.time()\n # Store object on s3 storage\n client.put_object(\n bucket_name=DATASETS_BUCKET,\n object_name=object_name,\n length=file_size,\n data=file_obj,\n )\n end = time.time()\n # Log time spent\n logger.debug(f\"Took {end - start} seconds to upload {file_size} bytes\")", "def upload_to_s3(channel, file):\n s3_resource = boto3.resource('s3')\n data = open(file, \"rb\")\n key = channel + '/' + file\n s3_resource.Bucket(BUCKET).put_object(Key=key, Body=data)", "def test_cmd_PUTSingleNoRemotePath(self):\n content = b'Test\\r\\nContent'\n localPath = self.makeFile(content=content)\n flags = (\n filetransfer.FXF_WRITE |\n filetransfer.FXF_CREAT |\n filetransfer.FXF_TRUNC\n )\n remoteName = os.path.join('/', os.path.basename(localPath))\n remoteFile = InMemoryRemoteFile(remoteName)\n self.fakeFilesystem.put(remoteName, flags, defer.succeed(remoteFile))\n self.client.client.options['buffersize'] = 10\n\n deferred = self.client.cmd_PUT(localPath)\n self.successResultOf(deferred)\n\n self.assertEqual(content, remoteFile.getvalue())\n self.assertTrue(remoteFile._closed)\n self.checkPutMessage(\n [(localPath, remoteName,\n ['76% 10.0B', '100% 13.0B', '100% 13.0B'])])", "def store_file(self, key, local_file):\n\t\t\n\t\ttry:\n\t\t\tdata = open(local_file, 'r').read()\n\t\t\tself.s3.put(\n\t\t\t\tself.bucket,\n\t\t\t\tkey,\n\t\t\t\tS3Object(data), {\n\t\t\t\t\t'Content-Type': 'application/x-bzip2',\n\t\t\t\t\t'x-amz-acl': 'private',\n\t\t\t\t\t'Content-Length': len(data)\n\t\t\t\t}\n\t\t\t)\n\t\texcept:\n\t\t\treturn False" ]
[ "0.6534801", "0.6233578", "0.6144909", "0.6129894", "0.5970255", "0.59591484", "0.59422994", "0.5909662", "0.5902494", "0.58096933", "0.57955354", "0.5794321", "0.57661766", "0.5737979", "0.57304716", "0.5720829", "0.5701128", "0.5676588", "0.56659216", "0.56574786", "0.56344366", "0.56267023", "0.5618739", "0.5609733", "0.5602718", "0.56025225", "0.5596587", "0.559401", "0.5587667", "0.5583829", "0.55564725", "0.5553292", "0.5541828", "0.55386174", "0.55283624", "0.5513457", "0.5504701", "0.5489724", "0.5488502", "0.5471507", "0.54712653", "0.5470534", "0.5462744", "0.54610294", "0.5447789", "0.54170287", "0.5411128", "0.54085916", "0.5405848", "0.54014534", "0.53979445", "0.5391712", "0.5388961", "0.53764796", "0.5370936", "0.5365559", "0.5356252", "0.5355936", "0.535211", "0.53507006", "0.5347875", "0.53343284", "0.5330204", "0.5315898", "0.5315622", "0.5296826", "0.52943945", "0.5287755", "0.52775234", "0.5276915", "0.52673125", "0.5265712", "0.52624047", "0.52602935", "0.5257203", "0.5249224", "0.523539", "0.5233218", "0.5231367", "0.52279305", "0.5216441", "0.52152926", "0.5213391", "0.520211", "0.51996607", "0.5195008", "0.5191709", "0.5180996", "0.5180726", "0.51805085", "0.517495", "0.5170336", "0.5168395", "0.516448", "0.51602626", "0.51581514", "0.51570624", "0.51556885", "0.51502764", "0.51485384" ]
0.7733097
0
Get headers for this type of file. Also, put the correct content encoding.
def _get_headers(content_type): headers = {'x-amz-acl': 'public-read', 'Content-Type': content_type, 'Cache-Control': 'public,max-age=31536000'} return headers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_headers(self):\r\n raise NotImplementedError", "def set_headers(self, headers):\n self.headers = headers\n process_headers(self)\n self.character_encoding = self.parsed_headers.get(\n 'content-type', (None, {})\n )[1].get('charset', 'utf-8') # default isn't UTF-8, but oh well", "def _read_headers(self):\n # Read the textual header.\n self._read_textual_header()\n # The next 400 bytes are from the Binary File Header.\n binary_file_header = self.file.read(400)\n bfh = SEGYBinaryFileHeader(binary_file_header, self.endian)\n self.binary_file_header = bfh\n self.data_encoding = self.binary_file_header.data_sample_format_code\n # If bytes 3506-3506 are not zero, an extended textual header follows\n # which is not supported so far.\n if bfh.number_of_3200_byte_ext_file_header_records_following != 0:\n msg = 'Extended textual headers are supported yet. ' + \\\n 'Please contact the developers.'\n raise NotImplementedError(msg)", "def get_headers(self):\n return [('Content-Type', self.MULTIPART_HEADER % self.boundary)]", "def getAllHeaders():", "def get_headers(self, file_name):\n with open(file_name) as f:\n self.headers = json.load(f)", "def __headers(content_type='application/json'):\n headers = {\n 'accept': content_type,\n 'content-type': content_type,\n }\n return headers", "def headers(self) -> dict:\n raise NotImplementedError # pragma: no cover", "def parse_headers(self):\n\n logger.debug(f\"parse headers of {self.path}\")\n with open(self.path, 'rb') as f:\n parser = BinaryParser(f)\n magic, version_major, version_minor = parser.unpack(\"<2sBB\")\n if magic != b'RW':\n raise ValueError(\"invalid magic code\")\n self.version = (version_major, version_minor)\n\n if version_major == 1:\n parser.seek(8)\n elif version_major == 2:\n parser.seek(100)\n elif version_major == 3:\n parser.seek(268)\n else:\n raise ValueError(f\"unsupported WAD version: {version_major}.{version_minor}\")\n\n entry_count, = parser.unpack(\"<I\")\n\n if version_major == 1:\n self.files = [WadFileHeader(*parser.unpack(\"<QIIII\")) for _ in range(entry_count)]\n else:\n self.files = [WadFileHeader(*parser.unpack(\"<QIIIBBBBQ\")) for _ in range(entry_count)]", "def get_headers(self):\n # Creating headers.\n headers = {'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'accept-encoding': 'gzip, deflate, sdch, br',\n 'accept-language': 'en-GB,en;q=0.8,en-US;q=0.6,ml;q=0.4',\n 'cache-control': 'max-age=0',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'}\n return headers", "def read_headers(self):\r\n environ = self.environ\r\n \r\n while True:\r\n line = self.rfile.readline()\r\n if not line:\r\n # No more data--illegal end of headers\r\n raise ValueError(\"Illegal end of headers.\")\r\n \r\n if line == '\\r\\n':\r\n # Normal end of headers\r\n break\r\n \r\n if line[0] in ' \\t':\r\n # It's a continuation line.\r\n v = line.strip()\r\n else:\r\n k, v = line.split(\":\", 1)\r\n k, v = k.strip().upper(), v.strip()\r\n envname = \"HTTP_\" + k.replace(\"-\", \"_\")\r\n \r\n if k in comma_separated_headers:\r\n existing = environ.get(envname)\r\n if existing:\r\n v = \", \".join((existing, v))\r\n environ[envname] = v\r\n \r\n ct = environ.pop(\"HTTP_CONTENT_TYPE\", None)\r\n if ct:\r\n environ[\"CONTENT_TYPE\"] = ct\r\n cl = environ.pop(\"HTTP_CONTENT_LENGTH\", None)\r\n if cl:\r\n environ[\"CONTENT_LENGTH\"] = cl", "def read_headers(filelike):\n return reader.Reader.read_headers(filelike).datafile", "def get_headers(self):\n \n return self.headers", "def set_file_content(self, file_path: Path, content_type: str):\n file_stat = file_path.stat()\n self.set_header(\"Content-Length\", str(file_stat.st_size))\n self.set_header(\"Last-Modified\", gmtime_string(file_stat.st_mtime))\n self.set_header(\"Content-Type\", content_type)\n self._content_io = io.open(file_path, 'rb')", "def __get_headers(self):\n\n return {}", "def set_http_headers(self, content_settings, timeout=None, **kwargs): # type: ignore\n #type: (ContentSettings, Optional[int], Optional[Any]) -> Dict[str, Any]\n file_content_length = kwargs.pop('size', None)\n file_http_headers = FileHTTPHeaders(\n file_cache_control=content_settings.cache_control,\n file_content_type=content_settings.content_type,\n file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,\n file_content_encoding=content_settings.content_encoding,\n file_content_language=content_settings.content_language,\n file_content_disposition=content_settings.content_disposition\n )\n try:\n return self._client.file.set_http_headers( # type: ignore\n timeout=timeout,\n file_content_length=file_content_length,\n file_http_headers=file_http_headers,\n cls=return_response_headers,\n **kwargs)\n except StorageErrorException as error:\n process_storage_error(error)", "def get_headers():\n file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'cfg', 'headers.json'))\n return open_json_file(file_path)", "def GetHeaders(the_file):\n\n data = exifread.process_file(the_file, 'UNDEF', False, False, False)\n return data", "def get_headers():\n if not headers:\n headers[\"Content-Type\"] = \"application/json\"\n headers[\"Accept\"] = \"application/json\"\n headers[\"User-Agent\"] = constants.USER_AGENT\n headers[\"Authorization\"] = get_token(constants.AUTH_URL, cfg[\"key\"])\n\n return headers\n\n return headers", "def get_headers(self, environ=None):\n return [('Content-Type', 'application/json')]", "def headers(self, v):\n raise NotImplementedError", "def headers(self):\r\n return dict(**self._get_headers())", "def _setHeaders(self):\r\n if not self.headers_set:\r\n self.headers_set = 1\r\n for key in self.headers_out.keys():\r\n self._response.setHeader(key, self.headers_out[key])\r\n self._response.setContentType(self.content_type)", "def headers(self):\n\n return None", "def _headers(self) -> Mapping[str, str]:\n return self.auth.headers() if self.auth else {}", "def headers(self):\n return [h for h, _ in self.data]", "def _wsgi_headers(self, media_type=None):\n\n headers = self._headers\n\n # PERF(kgriffs): Using \"in\" like this is faster than using\n # dict.setdefault (tested on py27).\n set_content_type = (media_type is not None and\n 'content-type' not in headers)\n\n if set_content_type:\n headers['content-type'] = media_type\n\n if six.PY2: # pragma: no cover\n # PERF(kgriffs): Don't create an extra list object if\n # it isn't needed.\n return headers.items()\n\n return list(headers.items()) # pragma: no cover", "def generate_headers(self):\n raise NotImplementedError()", "def get_http_headers(self):\n return dict(self.headers)", "def headers(self):\n return Dict(**self._get_headers())", "def get_headers(self):\n\t\t# collect all the non-segment\n\t\t# files into a list (there\n\t\t# should only be one header)\n\t\tfiles = glob.glob(\"%s/*\" % self.segment_path)\n\t\theaders = [f for f in files if os.path.splitext(f)[1] != '.seg']\n\t\tfor path in headers:\n\t\t\t_file = os.path.split(path)[1]\n\t\t\tdae = DiscreetArchiveElement(self,_file,element_type='header')\n\t\t\tself.elements.append(dae)\n\t\treturn True", "def http_headers(self) -> dict:\n headers = super().http_headers\n headers[\"Accept\"] = \"application/vnd.github.squirrel-girl-preview\"\n return headers", "def peek(self):\n self.fh.seek(0)\n snip = self.fh.read(12)\n if unpack('<i', snip[4:8])[0] <= max(_supported_file_types):\n self.header['byte order'] = '<'\n self._bo = '<'\n elif unpack('>i', snip[4:8])[0] <= max(_supported_file_types):\n self.header['byte order'] = '>'\n self._bo = '>'\n else:\n raise TypeError(\"Cannot determine file endianess.\")\n\n self.header['file version'], self.header['file type'], \\\n self.header['header size'] = \\\n unpack(self._bo + '3i', snip)\n\n if self.header['file type'] not in _supported_file_types:\n msg = \"File of type {} is not supported at the moment.\"\n msg = msg.format(self.header['file type'])\n raise NotImplementedError(msg)", "def headers(self) -> Mapping[str, str]:\n return pulumi.get(self, \"headers\")", "def get_headers(self, environ):\n return [('Content-Type', 'text/html'),\n ('Access-Control-Allow-Origin', '*')]", "def readFrom(self,fn):\n hdrs = {}\n try:\n f = open(fn+\".headers\",\"tr\")\n for l in f:\n if l[-1:]==\"\\n\":\n l = l[:-1]\n i = l.find(\": \")\n if -1!=i:\n hdrs[l[:i]] = l[i+2:]\n f.close()\n except (Exception,Error) as err:\n log(\"readFrom: header: error: \"+str(err))\n try:\n f2 = open(fn,\"br\")\n data = f2.read()\n f2.close()\n except (Exception,Error) as err:\n log(\"readFrom: body: error: \"+str(err))\n return (hdrs,data)", "def _headers(self):\n auth = AuthenticationProvider.currentAuth()\n\n return {\n 'Authorization': '%s %s' % (auth.tokenType, auth.accessToken),\n 'Content-Type': 'application/json'}", "def headers(self):\n return(self.__response.headers)", "def getheaders(self):\n return self.__headers", "def _headers(self, **kwargs):\n headers = BASE_HEADERS.copy()\n if self.headers:\n headers.update(self.headers)\n headers.update(kwargs)\n return headers", "def headers(self):\n return self._header", "def _headers(self) -> Mapping[str, str]:\n return {}", "def headers(self) -> Sequence['outputs.HeaderResponse']:\n return pulumi.get(self, \"headers\")", "def _headers(self):\n\n auth_token = SendbeeAuth(self.client.api_secret).get_auth_token()\n headers = {\n 'X-Auth-Token': auth_token,\n 'X-Api-Key': self.client.api_key,\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'User-Agent': 'Sendbee Python API Client'\n }\n self.debug.ok('headers', headers)\n\n return headers", "def getHeaders(self):\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\tif len(self.line) == 7:\n\t\t\tself.header.kod = self.line[0]\n\t\t\tself.header.ver = self.line[1]\n\t\t\tpID_date = self.line[2]\n\t\t\tself.header.probid = np.append(self.header.probid, pID_date)\n\t\t\tpID_time = self.line[3]\n\t\t\tself.header.probid = np.append(self.header.probid, pID_time)\n\t\t\tself.header.knod = int(self.line[4])\n\t\t\tself.header.nps = int(self.line[5])\n\t\t\tself.header.rnr = int(self.line[6])\n\t\telif len(self.line) == 3:\n\t\t\tself.header.knod = int(self.line[0])\n\t\t\tself.header.nps = int(self.line[1])\n\t\t\tself.header.rnr = int(self.line[2])\n\t\t\t\n\n\t\tself.header.title = self.mctalFile.readline().strip()\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\tself.header.ntal = int(self.line[1])\n\n\t\tif self.header.ntal == 0:\n\t\t\tprint >> sys.stderr, \"\\n \\033[1;31mNo tallies in this MCTAL file. Exiting.\\033[0m\\n\"\n\t\t\tsys.exit(1)\n\n\t\tif len(self.line) == 4:\n\t\t\tself.header.npert = int(self.line[3])\n\t\t\tprint >> sys.stderr, \"\\n \\033[1;31mMCTAL file with perturbation card. Not supported. Exiting.\\033[0m\\n\"\n\t\t\tsys.exit(1)\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\twhile self.line[0].lower() != \"tally\":\n\t\t\tfor l in self.line: self.header.ntals = np.append(self.header.ntals,int(l))\n\t\t\tself.line = self.mctalFile.readline().split()", "def fusion_api_get_headers(self):\n return self.fusion_client._headers.copy()", "def _read_header(self):\n\n stream = self.stream\n\n self._seek_to_table(tables.header)\n\n # Read header[0 ... 1]\n checksum = stream.read_unsigned_byte4()\n design_font_size = stream.read_fix_word()\n\n # Read header[2 ... 11] if there\n character_info_table_position = self.table_pointers[\n tables.character_info]\n position = stream.tell()\n if position < character_info_table_position:\n character_coding_scheme = stream.read_bcpl()\n else:\n character_coding_scheme = None\n\n # Read header[12 ... 16] if there\n character_coding_scheme_length = 40 # bytes (11 - 2 + 1) * 4 = 10 * 4\n position += character_coding_scheme_length\n if position < character_info_table_position:\n family = stream.read_bcpl(position)\n else:\n family = None\n\n # Read header[12 ... 16] if there\n family_length = 20 # bytes (16 - 12 +1) * 4 = 5 * 4\n position += family_length\n if position < character_info_table_position:\n seven_bit_safe_flag = stream.read_unsigned_byte1(position)\n stream.read_unsigned_byte2()\n face = stream.read_unsigned_byte1()\n # Fixme: complete\n\n # don't read header [18 ... whatever]\n\n self.tfm = Tfm(self.font_name,\n self.filename,\n self.smallest_character_code,\n self.largest_character_code,\n checksum,\n design_font_size,\n character_coding_scheme,\n family)", "def parse_header(self):", "def _read_hdr_file(ktlx_file):\r\n with open(ktlx_file, 'rb') as f:\r\n\r\n hdr = {}\r\n assert f.tell() == 0\r\n\r\n hdr['file_guid'] = hexlify(f.read(16))\r\n hdr['file_schema'], = unpack('<H', f.read(2))\r\n if not hdr['file_schema'] in (1, 3, 7, 8, 9):\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'file_schema ' + str(hdr['file_schema']))\r\n\r\n hdr['base_schema'], = unpack('<H', f.read(2))\r\n if not hdr['base_schema'] == 1: # p.3: base_schema 0 is rare, I think\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'base_schema ' + str(hdr['base_schema']))\r\n\r\n hdr['creation_time'] = datetime.fromtimestamp(unpack('<i',\r\n f.read(4))[0])\r\n hdr['patient_id'], = unpack('<i', f.read(4))\r\n hdr['study_id'], = unpack('<i', f.read(4))\r\n hdr['pat_last_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_first_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_middle_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['patient_id'] = _make_str(unpack('c' * 80, f.read(80)))\r\n assert f.tell() == 352\r\n\r\n if hdr['file_schema'] >= 7:\r\n hdr['sample_freq'], = unpack('<d', f.read(8))\r\n n_chan, = unpack('<i', f.read(4))\r\n hdr['num_channels'] = n_chan\r\n hdr['deltabits'], = unpack('<i', f.read(4))\r\n hdr['phys_chan'] = unpack('<' + 'i' * hdr['num_channels'],\r\n f.read(hdr['num_channels'] * 4))\r\n\r\n f.seek(4464)\r\n hdr['headbox_type'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sn'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sw_version'] = _make_str(unpack('c' * 40, f.read(40)))\r\n hdr['dsp_hw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['dsp_sw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['discardbits'], = unpack('<i', f.read(4))\r\n\r\n if hdr['file_schema'] >= 8:\r\n hdr['shorted'] = unpack('<' + 'h' * 1024, f.read(2048))[:n_chan]\r\n hdr['frequency_factor'] = unpack('<' + 'h' * 1024,\r\n f.read(2048))[:n_chan]\r\n return hdr", "def _read_headers(self, data):\n do_close = False\n\n try:\n initial_line, data = data.split(CRLF, 1)\n try:\n try:\n http_version, status, status_text = initial_line.split(' ', 2)\n status = int(status)\n except ValueError:\n http_version, status = initial_line.split(' ')\n status = int(status)\n status_text = HTTP.get(status, '')\n except ValueError:\n raise BadRequest('Invalid HTTP status line %r.' % initial_line)\n\n # Parse the headers.\n headers = read_headers(data)\n\n # Construct an HTTPResponse object.\n self.current_response = response = HTTPResponse(self,\n self._requests[0], http_version, status, status_text, headers)\n\n # Do we have a Content-Encoding header?\n if 'Content-Encoding' in headers:\n encoding = headers['Content-Encoding']\n if encoding == 'gzip':\n response._decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)\n elif encoding == 'deflate':\n response._decompressor = zlib.decompressobj(-zlib.MAX_WBITS)\n\n # Do we have a Content-Length header?\n if 'Content-Length' in headers:\n self._stream.on_read = self._read_body\n self._stream.read_delimiter = int(headers['Content-Length'])\n\n elif 'Transfer-Encoding' in headers:\n if headers['Transfer-Encoding'] == 'chunked':\n self._stream.on_read = self._read_chunk_head\n self._stream.read_delimiter = CRLF\n else:\n raise BadRequest(\"Unsupported Transfer-Encoding: %s\" % headers['Transfer-Encoding'])\n\n # Is this a HEAD request? If so, then handle the request NOW.\n if response.method == 'HEAD':\n self._on_response()\n\n except BadRequest, e:\n log.info('Bad response from %r: %s',\n self._server, e)\n do_close = True\n\n except Exception:\n log.exception('Error handling HTTP response.')\n do_close = True\n\n # Clear the way for the next request.\n if do_close:\n self._requests.pop(0)\n self.current_response = None\n if self._stream:\n self._stream.close()\n self._stream = None", "def headers(self):\n return self.generator.headers", "def get_headers(self):\n headers = []\n for text, level in self._headers:\n headers.append(text)\n return headers", "def get_file_content(self):\n s = StringIO.StringIO()\n\n s.write(self.get_header())\n s.write(self.get_content())\n\n return s.getvalue()", "def __set_content_type(self):\r\n if self.headers is None:\r\n return\r\n\r\n content_type = self.headers.get(\"content-type\", None)\r\n\r\n if content_type is None:\r\n return\r\n if \";\" in content_type:\r\n content_type_parts = content_type.split(\";\")\r\n\r\n if len(content_type_parts) == 2:\r\n self.__content_type = content_type_parts[0]\r\n else:\r\n self.__content_type = content_type", "def _read_response_header(self):\r\n length = None\r\n encoding = \"identity\"\r\n chunked = False\r\n\r\n hdr = []\r\n while True:\r\n line = self._read_line()\r\n if not line:\r\n break\r\n hdr.append(line)\r\n\r\n for line in hdr:\r\n if \"Content-Length\" in line:\r\n length = int(line[15:])\r\n if \"Content-Encoding\" in line:\r\n encoding = line[17:].strip()\r\n if \"Transfer-Encoding: chunked\" in line:\r\n chunked = True\r\n\r\n return (length, encoding, chunked)", "def __set_content_type(self):\n if self.headers is None:\n return\n\n content_type = self.headers.get(\"content-type\", None)\n\n if content_type is None:\n return\n if \";\" in content_type:\n content_type_parts = content_type.split(\";\")\n\n if len(content_type_parts) == 2:\n self.__content_type = content_type_parts[0]\n else:\n self.__content_type = content_type", "def _parseFileHeader(self):\n self.fileheader = FileHeader()\n self.fileheader.parse(self.f)\n #print('Parsed fileheader')", "def response_headers(self, extra_headers=None):\n headers_copy = self.headers.copy()\n\n if extra_headers:\n headers_copy.update(extra_headers)\n\n headers = \"\"\n\n for h in headers_copy:\n headers += \"%s: %s\\r\\n\" % (h, headers_copy[h])\n\n return headers.encode()", "def _update_management_header(self, request):\n\n if request.method in [\"PUT\", \"POST\", \"MERGE\", \"DELETE\"]:\n request.headers[\"Content-Length\"] = str(len(request.body))\n\n # append additional headers base on the service\n # request.headers.append(('x-ms-version', X_MS_VERSION))\n\n # if it is not GET or HEAD request, must set content-type.\n if request.method not in [\"GET\", \"HEAD\"]:\n for key in request.headers:\n if \"content-type\" == key.lower():\n break\n else:\n request.headers[\"Content-Type\"] = \"application/xml\"\n\n return request.headers", "def __head_or_get(self, path):\n try:\n info = self.get_cont_stat(path)\n if not isinstance(info, types.DictType):\n raise info()\n headers = HeaderKeyDict({\n 'X-Container-Object-Count': info['object_count'],\n 'X-Container-Bytes-Used': info['bytes_used'],\n 'X-Timestamp': info['created_at'],\n 'X-PUT-Timestamp': info['put_timestamp'],\n })\n metadata = info['metadata']\n for key, value in metadata.iteritems():\n if key == 'r-':\n headers.update({'x-container-read' : value})\n elif key == 'w-':\n headers.update({'x-container-write' : value})\n else:\n ser_key = key.split('-')[0]\n if ser_key == 'm':\n #Supported a single word key till first '-' \n #in the entire metadata header as X-Container-Meta-A\n #key = 'x-container-meta-' + key.split('-')[1]\n \n #SANCHIT: This supports multi-part key for metadata \n #such as X-Container-Meta-A-B-C\n key = 'x-container-meta-' + key.split('-', 1)[1]\n else:\n #key = 'x-container-sysmeta-' + key.split('-')[1]\n key = 'x-container-sysmeta-' + key.split('-', 1)[1]\n headers.update({key : value})\n return headers\n except HTTPException as error:\n self.logger.exception(error)\n return error.status_int\n except Exception as err:\n self.logger.exception(err)\n return HTTP_INTERNAL_SERVER_ERROR", "def define_headers(self):\n return {}", "def _update_headers(self):\n if not self._header_updated:\n headers = self.head_obj(self._client, self._spec)\n self._headers.update(headers)\n self._header_updated = True", "def generate_generic_headers(self):\n return {\n 'accept': 'application/json',\n 'Content-Type': 'application/json'\n }", "def _build_headers(self):\n headers = {}\n headers.update(self.data_sources)\n headers.update(self.seasons)\n headers.update(self.region)\n headers.update(self.subregions)\n return headers", "def getHeaders(self):\n hd = {}\n line = self.conn.readline()\n while line != \"\\r\\n\":\n print \":\"+line+\":\"+\" len = \",len(line)\n key,value = line.split(':',1)\n hd[key] = value.rstrip()\n line = self.conn.readline()\n return hd", "def getheaders(self):\n return self.urllib3_response.getheaders()", "def getheaders(self):\n return self.urllib3_response.getheaders()", "def getheaders(self):\n return self.urllib3_response.getheaders()", "def encode_meta_headers(headers):\n ret = {}\n for header, value in headers.items():\n value = encode_utf8(value)\n header = header.lower()\n\n if (isinstance(header, str) and\n header.startswith(USER_METADATA_TYPE)):\n header = encode_utf8(header)\n\n ret[header] = value\n return ret", "def headersFromRawFile(self, rawFile: str, headers: Dict) -> None:\n dFile = open(os.path.join(self.dataPath, rawFile), \"r\", encoding=\"ISO-8859-1\")\n generalHeaderString = dFile.read(1000) # this should be long enough\n generalSplit = generalHeaderString.split()\n # read GENERAL HEADER\n generalHeader = {}\n generalHeader[\"recLength\"] = int(generalSplit[0])\n generalHeader[\"fileType\"] = generalSplit[1]\n generalHeader[\"wordLength\"] = int(generalSplit[2])\n generalHeader[\"version\"] = generalSplit[3]\n generalHeader[\"procId\"] = generalSplit[4]\n generalHeader[\"numCh\"] = int(generalSplit[5])\n generalHeader[\"totalRec\"] = int(generalSplit[6])\n generalHeader[\"firstEvent\"] = int(generalSplit[7])\n generalHeader[\"numEvent\"] = int(generalSplit[8])\n generalHeader[\"extend\"] = int(generalSplit[9])\n\n # read EVENT HEADER - there can be multiple of these, but normally only the one\n # Multiple events are largely deprecated. Only a single event is used\n eventHeaders = []\n fileSize = os.path.getsize(os.path.join(self.dataPath, rawFile))\n record = generalHeader[\"firstEvent\"]\n for ir in range(0, generalHeader[\"numEvent\"]):\n seekPt = (record - 1) * generalHeader[\"recLength\"]\n if not seekPt > fileSize:\n # seek from beginning of file\n dFile.seek(seekPt, 0)\n # read extra to make sure\n eventString = dFile.read(1000)\n eventSplit = eventString.split()\n eH = {}\n eH[\"start\"] = int(eventSplit[0])\n eH[\"startms\"] = int(eventSplit[1])\n eH[\"stop\"] = int(eventSplit[2])\n eH[\"stopms\"] = int(eventSplit[3])\n eH[\"cvalue1\"] = float(eventSplit[4])\n eH[\"cvalue2\"] = float(eventSplit[5])\n eH[\"cvalue3\"] = float(eventSplit[6])\n eH[\"EHInfile\"] = int(eventSplit[7])\n eH[\"nextEH\"] = int(eventSplit[8])\n eH[\"previousEH\"] = int(eventSplit[9])\n eH[\"numData\"] = int(eventSplit[10])\n eH[\"startData\"] = int(eventSplit[11])\n eH[\"extended\"] = int(eventSplit[12])\n eventHeaders.append(eH)\n if eH[\"nextEH\"] < generalHeader[\"totalRec\"]:\n record = eH[\"nextEH\"] # set to go to next eH\n else:\n break # otherwise break out of for loops\n # close the data file\n dFile.close()\n # now compare number of samples with that calculated previously\n if eventHeaders[0][\"numData\"] != headers[\"num_samples\"]:\n self.printWarning(\"Data file: {}\".format(dFile))\n self.printWarning(\n \"Number of samples in raw file header {} does not equal that calculated from data {}\".format(\n eventHeaders[0][\"numData\"], headers[\"num_samples\"]\n )\n )\n self.printWarning(\"Number of samples calculated from data will be used\")\n # set the byte offset for the file\n self.dataByteOffset[rawFile] = (\n eventHeaders[0][\"startData\"] - 1\n ) * generalHeader[\"recLength\"]\n self.recChannels[rawFile] = generalHeader[\"numCh\"]", "def get_file_header(self, file_path, size=8) -> str:\n file_header = None\n try:\n with open(file_path, 'rb') as _file:\n file_header = _file.read(size)\n except Exception:\n logger.error('Unable to retrieve the file header')\n return file_header", "def responseheaders(self, flow: mitmproxy.http.HTTPFlow):", "def http_headers(self) -> dict:\n headers = {\"Accept\": \"application/vnd.github.v3+json\"}\n if \"user_agent\" in self.config:\n headers[\"User-Agent\"] = self.config.get(\"user_agent\")\n if \"auth_token\" in self.config:\n headers[\"Authorization\"] = f\"token {self.config['auth_token']}\"\n return headers", "def test_header_encoding(self):\n\n body = r\"\"\"\n {\"response\":{\n \"action\":\"upload\\/simple\",\n \"doupload\":{\"result\":\"0\",\"key\":\"53u05frn7sm\"},\n \"server\":\"live\",\"result\":\"Success\",\"new_key\":\"yes\",\n \"current_api_version\":\"1.1\"}}\n \"\"\"\n\n responses.add(responses.POST, self.url, body=body, status=200,\n content_type=\"application/json\")\n\n fd = io.StringIO(\"I am the payload. Hi!\")\n file_size = len(fd.read())\n fd.seek(0)\n\n self.api.upload_simple(\n fd, \"тест.bin\", file_size=file_size, file_hash='0')\n\n request = responses.calls[0].request\n\n x_filename = request.headers['x-filename']\n\n self.assertEqual(x_filename, \"тест.bin\".encode('utf-8'))", "def get_request_headers(self):\n\t\theaders = {\n\t\t\t'Cache-Control': 'no-cache no-store max-age=1',\n\t\t\t'Connection': 'cache-control',\n\t\t}\n\t\tif self.last_modified:\n\t\t\theaders['If-Modified-Since'] = self.last_modified\n\t\tif self.etag:\n\t\t\theaders['If-None-Match'] = self.etag\n\t\treturn headers", "def get_content_type_and_encoding(content_type_header):\n\tif not content_type_header:\n\t\treturn (None, None)\n\t\n\th_parts = content_type_header.split(';')\n\tcontent_type = h_parts[0]\n\tpage_encoding = None\n\tfor h_part in h_parts[1:]:\n\t\th_part = h_part.strip()\n\t\tif h_part.lower().startswith('charset='):\n\t\t\tpage_encoding = h_part[8:]\n\treturn (content_type, page_encoding,)", "def _headers(self) -> dict[str, str]:\n headers = super()._headers()\n headers[\"Authorization\"] = f\"Bearer {self.__token}\"\n return headers", "def _read_header(self, stream):\n return", "def set_header(cls, response):\n head = {key: response[key] for key in (response.keys() & cls.HEAD_KEYS)}\n\n for key_name, key_path in cls.HEAD_EXTRA:\n value = response\n try:\n for key in key_path:\n value = value[key]\n except KeyError:\n continue\n head[key_name] = value\n\n return head", "def make_headers(self):\n return {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US;\\\n rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}", "def get_response_headers(self, *args, **kwargs):\n if self.response_headers:\n return self._unpack_headers(self.response_headers)", "def headers(self):\n headers = BASE_HEADERS\n if self.token:\n headers['X-Plex-Token'] = self.token\n return headers", "def get_upload_key_metadata(self):\n key = self.get_upload_key()\n metadata = key.metadata.copy()\n\n # Some http header properties which are stored on the key need to be\n # copied to the metadata when updating\n headers = {\n # http header name, key attribute name\n 'Cache-Control': 'cache_control',\n 'Content-Type': 'content_type',\n 'Content-Disposition': 'content_disposition',\n 'Content-Encoding': 'content_encoding',\n }\n\n for header_name, attribute_name in headers.items():\n attribute_value = getattr(key, attribute_name, False)\n if attribute_value:\n metadata.update({b'{0}'.format(header_name):\n b'{0}'.format(attribute_value)})\n return metadata", "def get_headers() -> dict:\n\n return {\"Connection\": \"keep-alive\",\n \"Cache-Control\": \"max-age=0\",\n \"Upgrade-Insecure-Requests\": 1,\n \"User-Agent\": (\"Mozilla/5.0 (X11; Linux x86_64)\"\n \" AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/73.0.3683.86 Safari/537.36\"),\n \"Accept\": (\"text/html,application/xhtml+xml,\"\n \"application/xml;q=0.9,image/webp,\"\n \"image/apng,*/*;q=0.8,\"\n \"application/signed-exchange;v=b3\"),\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-US,en;q=0.9\"}", "def GetEncoding(self): \n return self.file.GetEncoding()", "def responseheaders(self, flow: mitmproxy.http.HTTPFlow):\n pass", "def responseheaders(self, flow: mitmproxy.http.HTTPFlow):\n pass", "def _read_header(self, line):\n try:\n creation_date = datetime.strptime(line[23:33], '%y%m%d%H%M')\n except ValueError as err:\n print('Error parsing file creation date -> ' + str(err))\n creation_date = '000000'\n\n self.file_header = {'Priority Code': line[1:3],\n 'Immediate Destination': line[3:13].strip(),\n 'Immediate Origin': line[13:23].strip(),\n 'Creation Date': creation_date,\n 'File ID Modifier': line[33],\n 'Record Size': int(line[34:37].strip()),\n 'Blocking Factor': int(line[37:39]),\n 'Format Code': line[39],\n 'Immediate Destination Name': line[40:63].strip(),\n 'Immediate Origin Name': line[63:86].strip(),\n 'Reference Code': line[86:93]}", "def getHeaders(self):\n return [\"Temp\"]", "def _chunk_headers(self, chunk_id, data, chunk_checksum_algo=\"blake3\"):\n hash_ = get_hasher(chunk_checksum_algo)\n hash_.update(data)\n return {\n CHUNK_HEADERS[\"content_id\"]: \"0123456789ABCDEF\",\n CHUNK_HEADERS[\"content_version\"]: \"1456938361143740\",\n CHUNK_HEADERS[\"content_path\"]: \"test\",\n CHUNK_HEADERS[\n \"content_chunkmethod\"\n ]: \"ec/algo=liberasurecode_rs_vand,k=6,m=3\",\n CHUNK_HEADERS[\"content_policy\"]: \"TESTPOLICY\",\n CHUNK_HEADERS[\"container_id\"]: \"1\" * 64,\n CHUNK_HEADERS[\"chunk_id\"]: chunk_id,\n CHUNK_HEADERS[\"chunk_size\"]: len(data),\n CHUNK_HEADERS[\"chunk_hash\"]: hash_.hexdigest().upper(),\n CHUNK_HEADERS[\"chunk_pos\"]: 0,\n CHUNK_HEADERS[\"full_path\"]: \"test/test/test,test1/test1/test1\",\n CHUNK_HEADERS[\"oio_version\"]: OIO_VERSION,\n }", "def send_headers(self):\r\n hkeys = [key.lower() for key, value in self.outheaders]\r\n status = int(self.status[:3])\r\n \r\n if status == 413:\r\n # Request Entity Too Large. Close conn to avoid garbage.\r\n self.close_connection = True\r\n elif \"content-length\" not in hkeys:\r\n # \"All 1xx (informational), 204 (no content),\r\n # and 304 (not modified) responses MUST NOT\r\n # include a message-body.\" So no point chunking.\r\n if status < 200 or status in (204, 205, 304):\r\n pass\r\n else:\r\n if self.response_protocol == 'HTTP/1.1':\r\n # Use the chunked transfer-coding\r\n self.chunked_write = True\r\n self.outheaders.append((\"Transfer-Encoding\", \"chunked\"))\r\n else:\r\n # Closing the conn is the only way to determine len.\r\n self.close_connection = True\r\n \r\n if \"connection\" not in hkeys:\r\n if self.response_protocol == 'HTTP/1.1':\r\n if self.close_connection:\r\n self.outheaders.append((\"Connection\", \"close\"))\r\n else:\r\n if not self.close_connection:\r\n self.outheaders.append((\"Connection\", \"Keep-Alive\"))\r\n \r\n if \"date\" not in hkeys:\r\n self.outheaders.append((\"Date\", rfc822.formatdate()))\r\n \r\n if \"server\" not in hkeys:\r\n self.outheaders.append((\"Server\", self.environ['SERVER_SOFTWARE']))\r\n \r\n buf = [self.environ['ACTUAL_SERVER_PROTOCOL'], \" \", self.status, \"\\r\\n\"]\r\n try:\r\n buf += [k + \": \" + v + \"\\r\\n\" for k, v in self.outheaders]\r\n except TypeError:\r\n if not isinstance(k, str):\r\n raise TypeError(\"WSGI response header key %r is not a string.\")\r\n if not isinstance(v, str):\r\n raise TypeError(\"WSGI response header value %r is not a string.\")\r\n else:\r\n raise\r\n buf.append(\"\\r\\n\")\r\n self.sendall(\"\".join(buf))", "def get_headers(self) -> Dict[str, str]:\n header_dict = self.generate_auth_dict()\n\n return {\n \"Authorization\": \"Basic \" + header_dict[\"signature\"],\n \"Content-Type\": 'application/json',\n }", "def getAllHeaders(self, req):\n headers = {}\n for k, v in req.requestHeaders.getAllRawHeaders():\n headers[k.lower()] = v[-1]\n return headers", "def __get_headers(self, passed_headers: Dict) -> Dict:\n\n # User-Agent for HTTP request\n library_details = [\n f\"requests {requests.__version__}\",\n f\"python {platform.python_version()}\",\n f\"connector {self.__class__.__name__}\",\n ]\n library_details = \"; \".join(library_details)\n user_agent = f\"Infermedica-API-Python {__version__} ({library_details})\"\n\n headers = {\n \"Accept\": \"application/json\",\n \"User-Agent\": user_agent,\n \"App-Id\": self.app_id,\n \"App-Key\": self.app_key,\n }\n headers.update(self.default_headers)\n headers.update(passed_headers) # Make sure passed headers take precedence\n return headers", "def headers(self) -> Optional[Mapping[str, Any]]:\n if hasattr(self, \"_headers\"):\n return self._headers\n return None", "def _extract_headers(self):\n\n with open(self.file_path, \"rt\", encoding=self._encoding) as csv_file:\n for row in csv.reader(csv_file):\n if self._file_headings:\n return [header if header != \"\" else f\"Untitled_{index + 1}\" for index, header in enumerate(row)]\n\n else:\n return [f\"Untitled_{i + 1}\" for i in range(len(row[0]))]", "def write_headers(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\t# For unknown reasons this does not work on Linux, but works on Mac ??? Really?\n\t\tDB = db_open_dict(filename)\n\t\tfor i in range(len(lima)):\n\t\t\tDB.set_header(lima[i], data[i])\n\t\tDB.close()\n\t\t#for i in range(len(lima)):\n\t\t#\tdata[i].write_image(filename, lima[i])\n\telif ftp == \"hdf\":\n\t\tfor i in range(len(lima)):\n\t\t\tdata[i].write_image(filename, lima[i], EMUtil.ImageType.IMAGE_HDF, True)\n\telse:\n\t\tERROR(\"Unacceptable file format\",\"write_headers\",1)", "def _read_textual_header(self):\n # The first 3200 byte are the textual header.\n textual_header = self.file.read(3200)\n # The data can either be saved as plain ASCII or EBCDIC. The first\n # character always is mostly 'C' and therefore used to check the\n # encoding. Sometimes is it not C but also cannot be decoded from\n # EBCDIC so it is treated as ASCII and all empty symbols are removed.\n if not self.textual_header_encoding:\n if textual_header[0:1] != b'C':\n try:\n textual_header = \\\n textual_header.decode('EBCDIC-CP-BE').encode('ascii')\n # If this worked, the encoding is EBCDIC.\n self.textual_header_encoding = 'EBCDIC'\n except UnicodeEncodeError:\n textual_header = textual_header\n # Otherwise it is ASCII.\n self.textual_header_encoding = 'ASCII'\n else:\n # Otherwise the encoding will also be ASCII.\n self.textual_header_encoding = 'ASCII'\n elif self.textual_header_encoding.upper() == 'EBCDIC':\n textual_header = \\\n textual_header.decode('EBCDIC-CP-BE').encode('ascii')\n elif self.textual_header_encoding.upper() != 'ASCII':\n msg = \"\"\"\n The textual_header_encoding has to be either ASCII, EBCDIC or None\n for autodetection. ASCII, EBCDIC or None for autodetection.\n \"\"\".strip()\n raise SEGYError(msg)\n # Finally set it.\n self.textual_file_header = textual_header", "def headers(self):\n header_list = [(_RESPONSE_HEADER_DICT.get(k, k), v) for k, v in self._headers.iteritems()]\n if hasattr(self, '_cookies'):\n for v in self._cookies.itervalues():\n header_list.append(('Set-Cookie', v))\n header_list.append(_HEADER_X_POWERED_BY)\n return header_list", "def image_header(self):\n\n if not self._image_header:\n path_image_header = os.path.join(\n self._path, f\"ImageSet_{self._image['ImageSetID']}.header\"\n )\n\n # Make sure the ImageInfo file really exists\n if not os.path.exists(path_image_header):\n self.logger.warning(\n \"ImageHeader path doesn't exist: %s\", path_image_header\n )\n return None\n\n self.logger.debug(\"Reading image data from: %s\", path_image_header)\n self._image_header = {}\n with open(path_image_header) as f:\n for line in f:\n parts = line.split(\" = \")\n\n if len(parts) < 2:\n parts = line.split(\" : \")\n\n if len(parts) > 1:\n self._image_header[parts[0].strip()] = (\n parts[1].replace(\";\", \"\").replace(\"\\n\", \"\")\n )\n\n return self._image_header" ]
[ "0.65006596", "0.6416231", "0.6409983", "0.62817407", "0.6232245", "0.61517215", "0.6136782", "0.61335945", "0.6095595", "0.60901326", "0.6061071", "0.6038231", "0.602917", "0.59930795", "0.5944638", "0.59071773", "0.5892869", "0.5888837", "0.58837026", "0.58729154", "0.58251846", "0.5817716", "0.5810529", "0.5781698", "0.57783633", "0.57447666", "0.5737349", "0.57345635", "0.57300925", "0.57252425", "0.5712231", "0.57045394", "0.57000947", "0.5686851", "0.5682865", "0.56777906", "0.566475", "0.56603026", "0.56551576", "0.56498754", "0.56481194", "0.56210995", "0.561255", "0.5606241", "0.55938405", "0.5585204", "0.5580673", "0.555391", "0.5543919", "0.55383694", "0.5537122", "0.55207753", "0.5518896", "0.5516502", "0.55155975", "0.5508047", "0.5495765", "0.5492981", "0.548671", "0.54786557", "0.54596514", "0.5455418", "0.5454767", "0.54421073", "0.54378676", "0.5432594", "0.5432594", "0.5432594", "0.54282993", "0.5421297", "0.5416856", "0.5414705", "0.54140353", "0.5399436", "0.53852", "0.53842974", "0.5383417", "0.5382036", "0.53809273", "0.53764045", "0.53740203", "0.53555644", "0.5346288", "0.53435034", "0.5342033", "0.53290087", "0.53290087", "0.53211296", "0.5313017", "0.529847", "0.52977145", "0.52973455", "0.5281698", "0.5280471", "0.5280422", "0.5273432", "0.5269381", "0.5267867", "0.52669543", "0.5265201" ]
0.59786826
14
Guess the content_type, by using its file descriptor
def _get_content_type(file_descriptor): content_type = mimetypes.guess_type(file_descriptor.name)[0] if not content_type: content_type = 'text/plain' return content_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_content_type(filename):\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'", "def guess_content_type ( self, path_info ) :\n _type, _enc = guess_type ( path_info )\n return _type", "def guess_content_type(filename):\n return mimetypes.guess_type(filename)[0]", "def guess_type(content):\n global mimeLock\n global mimeInitialized\n\n if not mimeInitialized:\n with mimeLock:\n if not mimeInitialized:\n mimetypes.init()\n mimeInitialized = True\n guessed = mimetypes.guess_type(content)\n\n if guessed[1] is None:\n guessed = (guessed[0], \"\")\n\n return guessed", "def GetContentType(filename):\r\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'", "def GetContentType(filename):\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'", "def get_content_type(ct):\n content_type = ct\n\n if ct == \"csv\":\n content_type = \"text/csv\"\n elif ct == \"json\":\n content_type = \"application/json\"\n\n return content_type", "def guess_type_from_content(file_obj):\n first_bytes = file_obj.read(2)\n if first_bytes == b\"PK\":\n filetype = \"xlsx\"\n else:\n content = file_obj.read()\n if b\"\\t\" in content:\n filetype = \"tsv\"\n else:\n filetype = \"csv\"\n return filetype", "def guess_mimetype(filename):\n m, encoding = mimetypes.guess_type(filename)\n if encoding:\n m = ENCODING_MIMETYPES.get(encoding, None)\n return m or \"application/octet-stream\"", "def detect_content_type(self, path=None, payload=None):\n\n f = file_path(path, payload)\n switches = [\"-d\", f]\n result = self._command_template(switches).lower()\n return result, path, f", "def get_content_type(file_path):\n\n try:\n magic_obj = magic.Magic(mime=True)\n magic_obj.file = magic_obj.from_file\n except AttributeError as e:\n magic_obj = magic.open(magic.MAGIC_MIME_TYPE)\n magic_obj.load()\n\n content_type = magic_obj.file(file_path)\n return content_type", "def _guess_mimetype(self, file):\n if not is_exe_in_path('file'):\n return self.DEFAULT_MIMETYPE\n\n # The browser didn't know what this was, so we'll need to do\n # some guess work. If we have 'file' available, use that to\n # figure it out.\n p = subprocess.Popen(['file', '--mime-type', '-b', '-'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE)\n\n # Write the content from the file until file has enough data to\n # make a determination.\n for chunk in file.chunks():\n try:\n p.stdin.write(chunk)\n except IOError:\n # file closed, so we hopefully have an answer.\n break\n\n p.stdin.close()\n ret = p.wait()\n\n if ret == 0:\n mimetype = p.stdout.read().strip()\n else:\n mimetype = None\n\n # Reset the read position so we can properly save this.\n file.seek(0)\n\n return mimetype or self.DEFAULT_MIMETYPE", "def content_type(self):\n return self.environ.get('CONTENT_TYPE') or 'application/octet-stream'", "def _check_url_file_type(headers: Dict[str, str]) -> Optional[str]:\n content_type = headers.get(\"content-type\", \"\").lower()\n file_type = None\n\n for extension in SUPPORTED_MIME_TYPES.keys():\n for mime_type in SUPPORTED_MIME_TYPES.get(extension, []):\n if mime_type in content_type:\n file_type = extension\n break\n\n return file_type", "def best_match_content_type(self):\n # First lookup http request path\n parts = self.path.rsplit('.', 1)\n if len(parts) > 1:\n _format = parts[1]\n if _format in ['json', 'xml']:\n return 'application/{0}'.format(_format)\n\n #Then look up content header\n type_from_header = self.get_content_type()\n if type_from_header:\n return type_from_header\n ctypes = ['application/json', 'application/xml']\n\n #Finally search in Accept-* headers\n bm = self.accept.best_match(ctypes)\n return bm or 'application/json'", "def content_type(self):\n return self._headers.get(\"content-type\")", "def get_ctype(f):\n return mimetypes.guess_type(f)[0]", "def getContentType(content):\n\n xml = 'application/xml'\n\n if isXML(content):\n return xml\n elif content == '':\n return xml\n elif content is None:\n return xml\n else:\n return 'application/octet-stream'", "def content_type(self) -> str:\n raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore[attr-defined]\n if self._stored_content_type != raw:\n self._parse_content_type(raw)\n return self._content_type # type: ignore[return-value]", "def get_mime_type(file):\n initial_pos = file.tell()\n file.seek(0)\n mime_type = magic.from_buffer(file.read(2048), mime=True)\n file.seek(initial_pos)\n return mime_type", "def content_type(self):\n return self.guess_content_type(self.store_key)", "def get_content_type(self, headers):\n if headers:\n for h, val in headers.items():\n if h.lower().strip() == 'content-type':\n # As it turns out, content-type often appears with some\n # additional values e.g \"text/css; charset=utf8\" so we want\n # just 'text/css' rather than the whole string\n return val[0].split(\";\")[0]\n return \"\"", "def mimetype(self) -> 'Mimetype':\n if self._mimetype:\n # We take the mimetype reported in the dataset as authoritative.\n return Mimetype(self._mimetype)\n # If no mimetype is specified explicitly, we fall back to mimetype detection mechanisms:\n if self.scheme in ['file', 'http', 'https']:\n mt, _ = mimetypes.guess_type(self.parsed_url.path)\n if mt:\n return Mimetype(mt)\n if self.scheme == 'data':\n mt, _, data = self.parsed_url.path.partition(',')\n if mt.endswith(';base64'):\n mt = mt.replace(';base64', '').strip()\n if mt:\n return Mimetype(mt)\n # There's an explicit default mimetype for data URLs!\n return Mimetype('text/plain;charset=US-ASCII')\n if self.scheme in ['http', 'https']:\n res = urllib.request.urlopen(urllib.request.Request(self.url, method=\"HEAD\"))\n mt = res.headers.get('Content-Type')\n if mt:\n return Mimetype(mt)\n return Mimetype('application/octet-stream')", "def guess_mimetype(fn, default=\"application/octet-stream\"):\n if \".\" not in fn:\n return default\n bfn, ext = fn.lower().rsplit(\".\", 1)\n if ext == \"jpg\": ext = \"jpeg\"\n return mimetypes.guess_type(bfn + \".\" + ext)[0] or default", "def mime_type(filename):\n mtype, encoding = guess_type(filename, False)\n if encoding is None:\n return mtype or \"application/octet-stream\"\n elif encoding == \"gzip\":\n # application/gzip is defined by RFC 6713\n return \"application/gzip\"\n # Note that there is a \"+gzip\" MIME structured syntax suffix specified\n # in an RFC draft that may one day mean the correct code is:\n # return mtype + '+gzip'\n else:\n return \"application/x-\" + encoding", "def content_type(self):\n return self._headers['CONTENT-TYPE']", "def get_content_type(self):\n if hasattr(self, '_content_type'):\n return self._content_type\n mimetype = None\n querystring_mimetype = self.request.get('mimetype')\n acceptheader = self.request.getHeader('Accept')\n\n if querystring_mimetype and querystring_mimetype in self.content_types:\n mimetype = querystring_mimetype\n else:\n querystring_error = 'No acceptable mimetype in QUERY_STRING: {0}'.format(querystring_mimetype)\n if acceptheader:\n mimetype = self.content_types.negotiate_accept_header(acceptheader)\n if not mimetype:\n acceptheader_error = 'No acceptable mimetype in ACCEPT header: {0}'.format(acceptheader)\n raise CouldNotDetermineContentType(querystring_error=querystring_error,\n acceptheader_error=acceptheader_error,\n acceptable_mimetypes=self.content_types.get_mimetypelist())\n content_type = self.content_types[mimetype]\n self._content_type = content_type\n return content_type", "def file_type(filename, stream=False):\n magic_dict = {\"\\x1f\\x8b\\x08\": \"gz\",\n \"\\x42\\x5a\\x68\": \"bz2\",\n \"\\x50\\x4b\\x03\\x04\": \"zip\",\n b\"\\x50\\x4b\\x03\\x04\": \"zip\",\n \"PK\\x03\\x04\": \"zip\",\n b\"PK\\x03\\x04\": \"zip\",\n }\n\n max_len = max(len(x) for x in magic_dict)\n if not stream:\n with open(filename) as f:\n file_start = f.read(max_len)\n for magic, filetype in magic_dict.items():\n if file_start.startswith(magic):\n return filetype\n else:\n for magic, filetype in magic_dict.items():\n if filename[:len(magic)] == magic:\n return filetype\n\n return None", "def get_file_type(file_str):\n process_list = [\"file\", \"--mime-type\", file_str]\n p = subprocess.Popen(process_list, stdout=subprocess.PIPE)\n file_type, err = p.communicate()\n\n return file_type.decode(\"utf-8\")", "def get_media_type(f):\n tipe = mimetypes.guess_type(f)\n if tipe[0]:\n if \"image\" in tipe[0]:\n return \"image\"\n elif \"video\" in tipe[0]:\n return \"video\"\n elif \"audio\" in tipe[0]:\n return \"audio\"", "def gettype(self, failobj=None):\n missing = []\n value = self.get('content-type', missing)\n if value is missing:\n return failobj\n return re.split(r';\\s*', value.strip())[0].lower()", "def _get_content_type(self):\n return '%s; charset=%s' % (self.content_type, self.charset)", "def _guess_mimetype(self, filename, default=\"application/octet-stream\"):\n if \".\" not in filename:\n return default\n\n prefix, extension = filename.lower().rsplit(\".\", 1)\n\n if extension == \"jpg\":\n extension = \"jpeg\"\n\n return mimetypes.guess_type(prefix + \".\" + extension)[0] or default", "def get_content_type(self):\n if \"Content-Type\" not in self.headers:\n return None\n\n content_type = self.content_type\n\n # NOTE(markmc): text/plain is the default for eventlet and\n # other webservers which use mimetools.Message.gettype()\n # whereas twisted defaults to ''.\n if not content_type or content_type == 'text/plain':\n return None\n\n if content_type not in SUPPORTED_CONTENT_TYPES:\n raise exception.InvalidContentType(content_type=content_type)\n\n return content_type", "def find_file_type(file_str):\n try:\n #p = subprocess.Popen(\n # 'file --mime-type %s' % file_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n #output, errors = p.communicate()\n #return file_str, output.split(\" \")[-1].strip(), errors\n mime = magic.from_file( file_str, mime=True )\n return file_str, mime, \"\"\n except Exception, e:\n return file_str, \"unknown\", repr( e )", "def mime_type(location):\n try:\n return _detect(location, DETECT_MIME)\n except:\n # TODO: log errors\n return ''", "def _content_type_strategy(self, host, port, environ):\n app = None\n params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1]\n if 'version' in params:\n app, app_url = self._match(host, port, '/v' + params['version'])\n if app:\n app = self._set_script_name(app, app_url)\n\n return app", "def best_match_content_type(self):\n if 'nova.best_content_type' not in self.environ:\n # Calculate the best MIME type\n content_type = None\n\n # Check URL path suffix\n parts = self.path.rsplit('.', 1)\n if len(parts) > 1:\n possible_type = 'application/' + parts[1]\n if possible_type in SUPPORTED_CONTENT_TYPES:\n content_type = possible_type\n\n if not content_type:\n content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)\n\n self.environ['nova.best_content_type'] = (content_type or\n 'application/json')\n\n return self.environ['nova.best_content_type']", "def _get_request_body_file_type(self) -> Optional[str]:\n result = None\n for decorator in self._find_decorators(AcceptsFileDecorator):\n if result is not None:\n raise TypeError(\"An endpoint cannot accept files of multiple types\")\n\n result = decorator.mime_type\n\n return result", "def content_type_header(request: Request) -> str:\n return request.content_type", "def CONTENT_TYPE(self):", "def get_mimetype(self):\n if self.resource.get_mimetype():\n return self.resource.get_mimetype()\n # Give best guess at mimetype\n mimetype = mimetypes.guess_type(self.resource.name)\n if mimetype[0]:\n return mimetype[0]\n else:\n # Interpret as binary data\n return \"application/octet-stream\"", "def content_type(self):\r\n return self.__content_type", "def test_guess_content_type(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n\n src = os.path.join(self.upload_path, 'test.jpg')\n id = utils.generate_id('demo.jpg')\n backend.put(src, id, True)\n\n path = '/'.join(backend.id_to_path(id)) + '/demo.jpg'\n client = boto3.client('s3', **backend.credentials)\n res = client.head_object(\n Bucket=backend.bucket_name,\n Key=path\n )\n headers = res['ResponseMetadata']['HTTPHeaders']\n self.assertEquals('image/jpeg', headers['content-type'])", "def def_typ(mede):\n\tif mede:\n\t\tmime=mede['mime_type'].split('/')\n\t\ttyp_mime = mime[0]\n\telse:\n\t\t#could be cause by : not a referenced type or hachoir failed.\n\t\ttyp_mime='other'\n\t\t\n\ttyp=0\n\tif typ_mime=='image':\n\t\ttyp=1\n\telif typ_mime=='audio':\n\t\ttyp=2\n\telif typ_mime=='video':\n\t\ttyp=3\n\telif typ_mime=='model':\n\t\ttyp=4\n\telif typ_mime=='text':\n\t\ttyp=5\n\telif typ_mime=='application':\n\t\ttyp=6\n\telse:\n\t\ttyp=7\n\n\treturn typ", "def guess_content_type(self, path):\n extention_split = path .split('.')\n\n if self.content_type_table.has_key(extention_split[-1]):\n return self.content_type_table[extention_split[-1]]\n else:\n return self.content_type_table['fallback']", "def get_upload_content_type(self):\n if not hasattr(self, '_upload_content_type'):\n with self.get_storage().open(self.get_upload_path()) as upload:\n content_type = Magic(mime=True).from_buffer(upload.read(1024))\n self._upload_content_type = content_type\n return self._upload_content_type", "def get_content_type(self, type):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Content/GetContentType/{type}/\"))", "def get_best_mimetype():\n # find out what the client accepts\n return request.accept_mimetypes.best_match(\n current_app.blueprints[request.blueprint].response_mimetypes.keys()\n )", "def determine_file_type(input_file):\r\n file_info, error = subprocess.Popen([settings.FILE, input_file], stdout=subprocess.PIPE).communicate()\r\n\r\n file_type = file_info.decode(\"utf-8\").split()[1]\r\n\r\n if file_type == \"tcpdump\":\r\n return \"pcap\"\r\n elif file_type == \"pcap-ng\":\r\n return \"pcapng\"\r\n elif file_type == \"data\" and (b\"nfdump\" in file_info or b\"nfcapd\" in file_info):\r\n return \"nfdump\"\r\n else:\r\n raise UnsupportedFileTypeError(\"The file type \" + file_type + \" is not supported.\")", "def coerce_content_type(content: types.AnyStr, file_mode: str) -> str | bytes:\n if \"t\" in file_mode:\n return utils.to_unicode(content)\n elif \"b\" in file_mode:\n return utils.to_bytes(content)\n return content", "def file_type(location):\n try:\n return _detect(location, DETECT_TYPE)\n except:\n # TODO: log errors\n return ''", "def get_mime_for_format(self, format):\r\n try:\r\n return self.content_types[format]\r\n except KeyError:\r\n return 'application/json'", "def content_type(self):\n return self._content_type", "def match_mime_type(self, src: str):\n for key in self.keys():\n if Pattern.test(key, src):\n return self[key]\n return \"text/plain\"", "def content_type(self):\n return self.__content_type", "def guess_rdf_format(fname):\n\n if fname is None:\n return None, None\n\n extension = _get_extension(fname)\n\n # if compressed, needs content encoding\n content_encoding = _COMPRESSION_EXTENSIONS.get(extension)\n if content_encoding:\n # remove encoding extension\n extension = _get_extension(fname[: -len(extension) - 1])\n\n # get content type\n content_type = _RDF_EXTENSIONS.get(extension)\n\n return content_encoding, content_type", "def get_stream_type(self) -> str:", "def fetchlocal_original_mimetype_fromcontent(folderhash):\n filename = IMAGE_BASE + folderhash+\"/\"+FIXEDNAME_ORIGINALBINARY\n with imageio.get_reader(filename) as r:\n fileformat_md = r.get_meta_data()\n\n #\n graphics_fileformat = str(r.format.name) # !\n assert type(graphics_fileformat) is str\n\n #print(fileformat_md)\n if 'version' in fileformat_md and fileformat_md['version'] == b'GIF87a':\n #log(\"GIF\")\n \"\"\"\n GIF:\n {'version': b'GIF87a', 'extension': (b'NETSCAPE2.0', 27), 'loop': 0, 'duration': 10}\n \"\"\"\n return MIME_LOOKUP['gif']\n elif 'jfif_version' in fileformat_md or 'jfif' in fileformat_md:\n #log(\"JPEG\")\n \"\"\"\n JPEG:\n {'jfif_version': (1, 1), 'dpi': (72, 72), 'jfif': 257, 'jfif_unit': 1, 'jfif_density': (72, 72)}\n \"\"\"\n return MIME_LOOKUP['jpeg']\n elif graphics_fileformat == 'PNG-PIL':\n return MIME_LOOKUP['png']\n else:\n log_err(\"unknown image type. (Could be PNG which is not implemented)\")\n log_err(\"imageio output: \"+json.dumps(fileformat_md)+\" format:\"+graphics_fileformat)\n raise UnknownFileFormat(\n imageid=repr(folderhash),\n comment=\"ImageIO could not detect the original image type. \",\n imageio_metadata=json.dumps(fileformat_md),\n format=graphics_fileformat,\n )\n #return mimetype\n #throw image does not exist", "def validate_content_type(uri: str) -> None:\n try:\n response = requests.head(uri)\n response.raise_for_status()\n except RequestException as e:\n raise ValidationError(f\"groundtruth content type ({uri}) validation failed\") from e\n\n content_type = response.headers.get(\"Content-Type\", \"\")\n if content_type not in SUPPORTED_CONTENT_TYPES:\n raise ValidationError(f\"groundtruth entry has unsupported type {content_type}\")", "def content_type(self):\n response = self.response\n\n if response.content_type:\n return response.content_type\n\n return 'application/xhtml+xml' if response.xml_output else 'text/html'", "def get_file_type(self):\n # Save original position\n orig = self.file.tell()\n line = self.file.readline()\n regex_squid = re.compile(settings.SQUID_LOG_RE)\n regex_common = re.compile(settings.APACHE_COMMON_LOG_RE)\n regex_combined = re.compile(settings.APACHE_COMBINED_LOG_RE)\n if regex_squid.match(line):\n # Move cursor back to original\n self.file.seek(orig)\n logging.info(\"File type SQUID detected\")\n return settings.SQUID\n elif regex_common.match(line):\n self.file.seek(orig)\n logging.info(\"File type APACHE COMMON detected\")\n return settings.APACHE_COMMON\n elif regex_combined.match(line):\n self.file.seek(orig)\n logging.info(\"File type APACHE COMBINED detected\")\n return settings.APACHE_COMBINED\n else:\n raise ValueError(\"Unrecognized file format.\\nWe currently support \"\n \"only Apache Web Server Log file and Squid Proxy \"\n \"Server Log file.\")", "def content_type(self):\n return self.content_types[0]", "def mime_type(path):\n cmd = ['/usr/bin/file', '-b', '--mime-type', path]\n return subprocess.check_output(cmd).rstrip()", "def content_type(self) -> pulumi.Input[Union[str, 'FileImportContentType']]:\n return pulumi.get(self, \"content_type\")", "def getMime(filename):\n line = mimeDB.file(filename)\n if line is not None:\n parts = line.split(';')\n mime = parts[0].strip()\n if mime.find('/')==-1:\n mime = 'application/octet-stream'\n elif mime == 'text/html' and \\\n (filename[-5:].lower() == '.xslt' or filename[-4:].lower() == '.xsl'):\n # @notice: workaround for the broken mime detection on debian\n # @todo: fix the real problem and remove this code\n mime = 'application/xml'\n encoding = None\n if len(parts)==2:\n encoding = parts[1][9:]\n return mime, encoding\n return None, None", "def _mime_type_for_path(path):\n # type: (str) -> QMimeType\n db = QMimeDatabase()\n mtype = db.mimeTypeForFile(path, QMimeDatabase.MatchDefault)\n if any(mtype.inherits(t) for t in compression_types):\n # peek contents\n try:\n with _open(path, \"rb\") as f:\n sample = f.read(4096)\n except Exception:\n sample = b''\n mtype = db.mimeTypeForData(sample)\n return mtype", "def get_file_type(filename):\n try:\n with open(filename, 'rb') as file:\n magic = file.read(4)\n #print (magic)\n file.close()\n if magic == b'\\x7fELF':\n return \"elf\"\n elif int.from_bytes(magic[0:2], \"little\") == 0xc2:\n return \"coff\"\n else:\n return \"Unexpected file type\"\n \n except FileNotFoundError:\n return \"File <\"+filename+\"> not found.\"", "def content_type(self) -> str:\n return pulumi.get(self, \"content_type\")", "def get_mimetype(\n media_filepath: Path, buffer_size: Optional[int] = None\n) -> Optional[str]:\n\n if not media_filepath.is_file():\n raise FileNotFoundError(f\"no such file {media_filepath!s} exists\")\n\n with media_filepath.open(\"rb\") as media_buffer:\n return magic.from_buffer(\n media_buffer.read(buffer_size or DEFAULT_MAGIC_BUFFER_SIZE), mime=True\n )", "def get_file_type(cls, filename):\n with open(filename) as f:\n file_header = f.read(cls.MAX_FILE_HEADER_LEN)\n for magic, filetype in cls.MAGIC_DICT.items():\n if file_header.startswith(magic):\n return filetype\n return \"uncompressed\"", "def content_type(self, _format=None):\r\n _format = _format or self.format\r\n return \"application/%s\" % (_format)", "def get_type_content(mail):\n type = 1\n if not mail.is_multipart():\n #Contains an attachment\n for key in mail.keys():\n #Check all keys for an attachment file\"name\"\n if key.startswith('Content'):\n if str(mail[key]).find('name') == -1:\n if str(mail[key]).startswith('text/html'):\n if type <= 2:\n type = 2\n else:\n type = 3\n elif str(mail[key]).startswith('text/plain'):\n if type == 2:\n type = 3\n return type\n else:\n for part in mail.get_payload():\n temp = get_type_content(part)\n if (type > 1 and type != temp):\n return 3\n else:\n type = temp\n return type", "def check_content_type(content_type):\n if request.headers[\"Content-Type\"] == content_type:\n return\n app.logger.error(\"Invalid Content-Type: %s\", request.headers[\"Content-Type\"])\n abort(415, \"Content-Type must be {}\".format(content_type))", "def check_content_type(content_type):\n if request.headers['Content-Type'] == content_type:\n return\n app.logger.error('Invalid Content-Type: %s',\n request.headers['Content-Type'])\n abort(415, 'Content-Type must be {}'.format(content_type))", "def CONTENT_TYPE(self):\n return self.content_type", "def mime_type():\r\n return tuple(linecache.getline(\r\n os.path.join(os.path.abspath(os.path.dirname(__file__)), 'mimes.csv'),\r\n _random.randrange(0, 647)\r\n ).strip(\"\\n\").split(','))", "def get_type(filepath):\n if os.path.isdir(filepath):\n return MIME_DIRECTORY\n\n if not mimetypes.inited:\n mimetypes.init()\n\n # .gz does not return archive type as expected\n if filepath.endswith('.gz'):\n filepath += '.tgz'\n\n try:\n mime_type = mimetypes.types_map[os.path.splitext(filepath)[-1].lower()]\n except KeyError:\n mime_type = ''\n\n if mime_type.startswith('image'):\n return MIME_IMAGE_GENERIC\n\n elif any(tag in mime_type for tag in ('x-gtar', 'x-tar', 'zip', 'rar', 'x-7z')):\n return MIME_ARCHIVE\n\n elif 'audio' in mime_type:\n return MIME_AUDIO\n\n elif any(tag in mime_type for tag in ('iso9660-image', 'diskimage')):\n return MIME_DISK\n\n elif 'font' in mime_type:\n return MIME_FONT\n\n elif any(tag in mime_type for tag in ('msword', 'wordprocessingml.document', 'opendocument.text')):\n return MIME_DOC\n\n elif any(tag in mime_type for tag in ('powerpoint', 'presentation')):\n return MIME_PRESENT\n\n elif any(tag in mime_type for tag in ('spreadsheet', 'excel', 'text/csv')):\n return MIME_SPREADSHEET\n\n elif 'pdf' in mime_type:\n return MIME_PDF\n\n elif 'python' in mime_type:\n return MIME_PYTHON\n\n elif 'x-sh' in mime_type:\n return MIME_SHELL\n\n elif 'video' in mime_type:\n return MIME_VIDEO\n\n elif 'text' in mime_type:\n return MIME_PLAINTEXT\n\n return MIME_BINARY # Generic file", "def _get_name_from_content_type(self, request):\n\n content_type = request.META.get('CONTENT_TYPE', None)\n if content_type:\n # remove the possible charset-encoding info\n return util.strip_charset(content_type)\n return None", "def content_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"content_type\")", "def determine_filetype(self):\n filetype = None\n # Loop through file formats\n for file_format in self._format_registry:\n validator = self._format_registry[file_format](\n self._synapse_client, self.center\n )\n try:\n filenames = [entity.name for entity in self.entitylist]\n filetype = validator.validateFilename(filenames)\n except AssertionError:\n continue\n # If valid filename, return file type.\n if filetype is not None:\n break\n return filetype", "def __set_content_type(self):\n if self.headers is None:\n return\n\n content_type = self.headers.get(\"content-type\", None)\n\n if content_type is None:\n return\n if \";\" in content_type:\n content_type_parts = content_type.split(\";\")\n\n if len(content_type_parts) == 2:\n self.__content_type = content_type_parts[0]\n else:\n self.__content_type = content_type", "def test_content_type(self) -> None:\n issuer = unpaid_redemption()\n treq = treq_for_loopback_ristretto(issuer)\n d = treq.post(\n NOWHERE.child(\"v1\", \"redeem\").to_text().encode(\"ascii\"),\n b\"{}\",\n )\n self.assertThat(\n d,\n succeeded(\n AfterPreprocessing(\n lambda response: response.code,\n Equals(UNSUPPORTED_MEDIA_TYPE),\n ),\n ),\n )", "def get_file_transfer_type(sql: str) -> FileTransferType | None:\n commentless_sql = remove_starting_comments(sql)\n if PUT_SQL_RE.match(commentless_sql):\n return FileTransferType.PUT\n elif GET_SQL_RE.match(commentless_sql):\n return FileTransferType.GET", "def __set_content_type(self):\r\n if self.headers is None:\r\n return\r\n\r\n content_type = self.headers.get(\"content-type\", None)\r\n\r\n if content_type is None:\r\n return\r\n if \";\" in content_type:\r\n content_type_parts = content_type.split(\";\")\r\n\r\n if len(content_type_parts) == 2:\r\n self.__content_type = content_type_parts[0]\r\n else:\r\n self.__content_type = content_type", "def from_buffer(cls, buf, **kwargs):\n cls._initialize(**kwargs)\n # perform processing\n try:\n filetype = cls.cookie.buffer(buf)\n if cls.old_api:\n cls.cookie.close()\n except magic.MagicException:\n # should never enter here, but in case of\n filetype = None\n return filetype", "def file_type(self):\n return self.__file_type", "def mimetype(self):\n hcell = self._get_hcell2()\n mimetype = hcell.get(\"mimetype\")\n if mimetype is not None:\n return mimetype\n celltype = hcell[\"celltype\"]\n if celltype == \"code\":\n language = hcell[\"language\"]\n mimetype = language_to_mime(language)\n return mimetype\n if celltype == \"structured\":\n datatype = hcell[\"datatype\"]\n if datatype in (\"mixed\", \"binary\", \"plain\"):\n mimetype = get_mime(datatype)\n elif datatype in (\"float\", \"int\", \"str\", \"bool\"):\n mimetype = get_mime(\"plain\")\n else:\n mimetype = ext_to_mime(datatype)\n else:\n mimetype = get_mime(celltype)\n return mimetype", "def get_content_type_and_encoding(content_type_header):\n\tif not content_type_header:\n\t\treturn (None, None)\n\t\n\th_parts = content_type_header.split(';')\n\tcontent_type = h_parts[0]\n\tpage_encoding = None\n\tfor h_part in h_parts[1:]:\n\t\th_part = h_part.strip()\n\t\tif h_part.lower().startswith('charset='):\n\t\t\tpage_encoding = h_part[8:]\n\treturn (content_type, page_encoding,)", "def test_extension_to_content_type(self):\n assert ct.extension_to_content_type(\"jpg\") == \"image/jpg\"\n assert ct.extension_to_content_type(\"jpeg\") == \"image/jpg\"\n assert ct.extension_to_content_type(\"png\") == \"image/png\"\n ct.extension_to_content_type(\"css\",) == \"text/css\"\n ct.extension_to_content_type(\"html\") == \"text/html\"\n ct.extension_to_content_type(\"json\") == \"application/json\"\n ct.extension_to_content_type(\"xml\") == \"application/xml\"\n ct.extension_to_content_type(\"zip\") == \"application/zip\"", "def get_s3_file_type(file_id):\n file_object = S3_CLIENT.get_object(Bucket=runtime_context.BUCKET_NAME,\n Key=file_id)\n ct = file_object.get('ContentType')\n return ct.split(';')[0] if ct else 'application/octet-stream'", "def get_mime_type(blob: bytes) -> str:\n\n # If this fails to import, you probably need to\n # install libmagic system wide. It should be:\n #\n # OSX : brew install libmagic\n # Windows : pip install python-magic-bin\n # Debian : apt install libmagic1\n # Arch : pacman -S imagemagick\n import magic\n\n # Create some magic\n m = magic.Magic(mime=True)\n\n # Calculate mime from bytes\n return m.from_buffer(blob)", "def content_negotiation(self, request, environ, mtype_list):\n alist = request.sys_query_options.get(core.SystemQueryOption.format,\n None)\n if alist is None:\n if \"HTTP_ACCEPT\" in environ:\n try:\n alist = messages.AcceptList.from_str(\n environ[\"HTTP_ACCEPT\"])\n except grammar.BadSyntax:\n # we'll treat this as a missing Accept header\n alist = self.DefaultAcceptList\n else:\n alist = self.DefaultAcceptList\n return_type = alist.select_type(mtype_list)\n logging.debug(\"Content negotiation request: %s\", str(alist))\n logging.debug(\"Content negotiation result: picked %s from %s\", repr(\n return_type), repr(mtype_list))\n return return_type", "def get_type(self):\n\n if self.type: return self.type\n if str(self.get_ext()) in str(MIMETYPES_VIDEOS_LIST):\n self.type = Video()\n elif str(self.get_ext()) in str(MIMETYPES_IMAGES_LIST):\n self.type = Image()\n else: Settings.warn_print(\"unable to parse file type\")\n return self.type", "def test_content_type(self,url):\n \n p = re.compile('.*pdf*.')\n cookies = self.get_cookies()\n print \"cookies are:\\n \" + str(cookies)\n content_type = requests.get(url, allow_redirects=True, cookies=cookies).headers.get('content-type')\n is_pdf = p.match(content_type)\n print \"content type is pdf: \" + str(is_pdf)\n if is_pdf:\n return(True)\n else:\n return(False)", "def check_media_file_type(media_file_class):\n if media_file_class == 'AudioFile':\n media_file_type = 'Audio file'\n elif media_file_class == 'VideoFile':\n media_file_type = 'Video file'\n elif media_file_class == 'DocumentFile':\n media_file_type = 'Document file'\n elif media_file_class == 'ImageFile':\n media_file_type = 'Image file'\n\n return media_file_type", "def file_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"file_type\")", "def file_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"file_type\")", "def get_request_format():\n # if the user specifies a `format` HTTP parameter, use that\n mimetype = request.args.get('format', '').strip() or \\\n request.accept_mimetypes.best\n if not mimetype:\n return 'html' # default\n mimetype = mimetype.lower()\n choices = {\n 'application/json': 'json',\n 'text/javascript': 'json',\n 'application/twiml': 'twiml',\n 'text/html': 'html',\n 'text/plain': 'text',\n }\n if mimetype in choices:\n return choices[mimetype]\n bits = mimetype.split(\"/\")\n if len(bits) == 2:\n return bits[-1]\n return mimetype", "def getType(file):\n\n with open(file, \"r\") as result:\n return result.readline().strip()" ]
[ "0.7407128", "0.74055415", "0.7343658", "0.7118253", "0.70626557", "0.7053132", "0.70111525", "0.6947122", "0.68888956", "0.68175477", "0.6781097", "0.67605287", "0.67187476", "0.6655244", "0.6640195", "0.66329396", "0.660614", "0.66005784", "0.65136355", "0.6447863", "0.6439395", "0.64054847", "0.6404971", "0.64015704", "0.63843954", "0.63714516", "0.63662547", "0.6356395", "0.632482", "0.6318174", "0.6318128", "0.6304941", "0.63040805", "0.628958", "0.62845063", "0.62829846", "0.6280979", "0.62775534", "0.62551224", "0.624775", "0.62018776", "0.61891073", "0.61730486", "0.61704946", "0.61620355", "0.61601514", "0.6151091", "0.61419225", "0.61015373", "0.60982877", "0.60738814", "0.6033818", "0.60239565", "0.60155016", "0.6008047", "0.59949845", "0.5990241", "0.59680927", "0.5966408", "0.5965215", "0.59498566", "0.59490764", "0.5936355", "0.5935286", "0.5882541", "0.5876658", "0.5861971", "0.5846796", "0.5834368", "0.5824365", "0.580317", "0.5799984", "0.5792707", "0.5762553", "0.5762069", "0.57544124", "0.57525337", "0.57499844", "0.5726099", "0.56956416", "0.56955004", "0.56832397", "0.567697", "0.56616175", "0.56591916", "0.5654347", "0.56518537", "0.56460726", "0.5645116", "0.56446254", "0.563019", "0.562418", "0.56215227", "0.5619159", "0.56157374", "0.5604", "0.5596596", "0.5596596", "0.559059", "0.558747" ]
0.7975829
0
Asserts if a given file (w/ name filename) can be compressed. content_type is optional and can speed up assertion. Should return True if it is a Text Type (CSS/JS)
def _file_can_be_compressed(filename): content_type = '' with open(filename, 'rb') as f: content_type = _get_content_type(f) return content_type in TEXT_TYPES
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isgzip(filename):\n magic_number = b'\\x1f\\x8b\\x08'\n with open(filename, 'rb') as f:\n file_start = f.read(len(magic_number))\n\n if magic_number == file_start:\n return True\n return False", "def is_archive(afile):\n return file_ext(os.path.basename(afile)) in ARCHIVE_COMPRESS_FORMATS", "def test_gzip(handler,config):\r\n if not config.gzip:\r\n return False\r\n if not gzip_support:\r\n return False\r\n accept_encoding = handler.headers.get('accept-encoding','').split(',')\r\n accept_encoding = [ x.strip() for x in accept_encoding ]\r\n ctype = handler.resp_headers[\"Content-type\"]\r\n # if gzip is supported by the user agent,\r\n # and if the option gzip in the configuration file is set, \r\n # and content type is text/ or javascript, \r\n # set Content-Encoding to 'gzip' and return True\r\n if 'gzip' in accept_encoding and \\\r\n ctype and (ctype.startswith('text/') or \r\n ctype=='application/x-javascript'):\r\n return True\r\n return False", "def is_zip(filepath):\n\treturn os.path.splitext(filepath)[1] == '.gz'", "def identify_compression(file_path: str) -> Optional[str]:\r\n sign_dict = {\r\n b\"\\x1f\\x8b\\x08\": \"gz\",\r\n b\"\\x42\\x5a\\x68\": \"bz2\",\r\n b\"\\x50\\x4b\\x03\\x04\": \"zip\",\r\n b\"\\x37\\x7a\\xbc\\xaf\\x27\\x1c\": \"7z\",\r\n b\"\\x75\\x73\\x74\\x61\\x72\": \"tar\",\r\n b\"\\x52\\x61\\x72\\x21\\x1a\\x07\\x00\": \"rar\",\r\n }\r\n\r\n max_len = max(len(x) for x in sign_dict)\r\n with open(file_path, \"rb\") as f:\r\n file_start = f.read(max_len)\r\n for magic, filetype in sign_dict.items():\r\n if file_start.startswith(magic):\r\n return filetype\r\n return None", "def is_gzipped(infile):\n logger = logging.getLogger(__name__)\n\n magic_number = b'\\x1f\\x8b'\n f = open(infile, 'rb')\n with f:\n try:\n assert f.read(2) == magic_number\n except AssertionError as e:\n logger.info(f'{infile} is not gzipped')\n return False\n else:\n logger.debug(f'{infile} is gzipped')\n return True", "def _is_archive(local_path: str) -> bool:\n archive_mimetypes = [\n \"application/zip\",\n \"application/x-tar\",\n \"application/x-gzip\",\n \"application/x-bzip2\",\n \"application/x-7z-compressed\",\n \"application/x-rar-compressed\",\n \"application/x-xz\",\n \"application/x-lzip\",\n \"application/x-lzma\",\n \"application/x-lzop\",\n \"application/x-bzip\",\n \"application/x-bzip2\",\n \"application/x-compress\",\n \"application/x-compressed\",\n ]\n\n return mimetypes.guess_type(local_path)[0] in archive_mimetypes", "def _gz(filename):\n \n with open(filename, 'rb') as f:\n return binascii.hexlify(f.read(2)) == b'1f8b'", "def are_files_gzipped(raw_files):\n files_are_gzipped = None\n for file_name in raw_files:\n if re.search(r\"\\.gz$\", file_name) is not None:\n if files_are_gzipped is False:\n raise Exception(\n \"It seems one file is compressed and the \"\n \"other is \"\n \"not:\\n{}\".format(\"\\n\".join(raw_files))\n )\n files_are_gzipped = True\n else:\n if files_are_gzipped:\n raise Exception(\n \"It seems one file is compressed and the \"\n \"other is \"\n \"not:\\n{}\".format(\"\\n\".join(raw_files))\n )\n files_are_gzipped = False\n return files_are_gzipped", "def is_gz_file(f):\n with open(f, \"rb\") as fin:\n return binascii.hexlify(fin.read(2)) == b\"1f8b\"", "def test_compress_file_response(self):\n with open(__file__, \"rb\") as file1:\n\n def get_response(req):\n file_resp = FileResponse(file1)\n file_resp[\"Content-Type\"] = \"text/html; charset=UTF-8\"\n return file_resp\n\n r = GZipMiddleware(get_response)(self.req)\n with open(__file__, \"rb\") as file2:\n self.assertEqual(self.decompress(b\"\".join(r)), file2.read())\n self.assertEqual(r.get(\"Content-Encoding\"), \"gzip\")\n self.assertIsNot(r.file_to_stream, file1)", "def is_gzipped(response):\n ctype = response.headers.get('Content-Type', b'').lower()\n cenc = response.headers.get('Content-Encoding', b'').lower()\n return (_is_gzipped(ctype) or\n (_is_octetstream(ctype) and cenc in (b'gzip', b'x-gzip')))", "def maybe_compress(filename, compress_minsize=config.COMPRESS_MINSIZE):\n size = os.path.getsize(filename)\n if size < compress_minsize:\n return open(filename, 'rb'), False\n\n compressed_size, compressed_fobj = compress_file(filename)\n if compressed_size >= size:\n # Compressed file was larger\n log.info(\"%s was larger when compressed; using uncompressed version\", filename)\n return open(filename, 'rb'), False\n\n return compressed_fobj, True", "def is_jpegxl_recompressed_jpeg_file(filename):\n try:\n with open(filename, 'rb') as h:\n header = h.read(len(JPEGXL_RECOMPRESSED_JPEG_HEADER))\n # Cf. https://arxiv.org/pdf/1908.03565.pdf, section 9.1,\n # on recompressed-JPEG header.\n return header == JPEGXL_RECOMPRESSED_JPEG_HEADER\n except: # pylint:disable=bare-except\n # If anything failed, this means that we cannot establish that the file\n # has the expected header, so we return False.\n return False", "def chk_for_gz(filenm):\n import os\n from os.path import expanduser\n filenm = expanduser(filenm)\n\n # File exist?\n if os.path.lexists(filenm):\n chk=True\n return filenm, chk\n\n # .gz already\n if filenm.find('.gz') > 0:\n chk=0\n return filenm, chk\n\n # Add .gz\n if os.path.lexists(filenm+'.gz'):\n chk=True\n return filenm+'.gz', chk\n else:\n chk=False\n return None, chk", "def compress_content(content_type, content):\n \n command = 'java -jar %s --type=%s' % (yuicompressor_path, content_type)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n p.stdin.write(content)\n p.stdin.close()\n \n compressed = p.stdout.read()\n p.stdout.close()\n \n err = p.stderr.read()\n p.stderr.close()\n \n if p.wait() != 0:\n if not err:\n err = 'Unable to use YUI Compressor'\n \n \n return err, compressed", "def test__decompress(filename):\n with open(filename, mode=\"rb\") as file_handle:\n name, content = Submit._decompress(filename, file_handle)\n assert name.endswith(\"EcoliCore.xml\")\n assert len(content.read()) >= 494226", "def is_gzip(fp):\r\n return open(fp, 'rb').read(2) == '\\x1f\\x8b'", "def isGzippable(self, css=0, js=0, REQUEST=None):\n # force: force http compression even if the browser doesn't send an accept\n # debug: return compression state (0: no, 1: yes, 2: force)\n # css: set this to 1 inside a css file (for later use)\n # js: set this to 1 inside a js file (for later use)\n\n if REQUEST is None:\n REQUEST = self.REQUEST\n use_gzip = self.getGzip()\n if not self.getEnabled():\n use_gzip = 'never'\n\n force = 0\n if use_gzip == 'never':\n enable_compression = 0\n elif use_gzip == 'always':\n enable_compression = 1\n force = 1\n elif use_gzip == 'accept-encoding':\n # compress everything except css and js\n enable_compression = 1\n elif use_gzip == 'accept-encoding+user-agent':\n # gzip compatibility info courtesy of\n # http://httpd.apache.org/docs/2.2/mod/mod_deflate.html\n user_agent = REQUEST.get('HTTP_USER_AGENT', '')\n if user_agent.startswith('Mozilla/4'):\n # Netscape 4.x can't handle gzipped css and js\n enable_compression = (css==0 and js==0)\n # Netscape 4.0.6-4.0.8 has some gzip-related bugs\n if user_agent[len('Mozilla/4.')] in ('6','7','8'):\n enable_compression = 0\n # Some versions of MSIE pretend to be Netscape 4.x but are OK with gzipping\n if user_agent.find('MSIE'):\n enable_compression = 1\n\n return (enable_compression, force, REQUEST.get('HTTP_ACCEPT_ENCODING', '').find('gzip') != -1)", "def is_accept_type(file_name):\n bare_name, file_extension = os.path.splitext(file_name)\n for ext in ACCEPTED_FILES:\n if file_extension.lower() == ext:\n return True\n return False", "def check_compression(ctype, clevel, olevel):\n repository = Repository(archiver.repository_path, exclusive=True)\n with repository:\n manifest = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)\n state = None\n while True:\n ids, state = repository.scan(limit=LIST_SCAN_LIMIT, state=state)\n if not ids:\n break\n for id in ids:\n chunk = repository.get(id, read_data=True)\n meta, data = manifest.repo_objs.parse(id, chunk) # will also decompress according to metadata\n m_olevel = meta.get(\"olevel\", -1)\n m_psize = meta.get(\"psize\", -1)\n print(\n hexlify(id).decode(),\n meta[\"ctype\"],\n meta[\"clevel\"],\n meta[\"csize\"],\n meta[\"size\"],\n m_olevel,\n m_psize,\n )\n # this is not as easy as one thinks due to the DecidingCompressor choosing the smallest of\n # (desired compressed, lz4 compressed, not compressed).\n assert meta[\"ctype\"] in (ctype, LZ4.ID, CNONE.ID)\n assert meta[\"clevel\"] in (clevel, 255) # LZ4 and CNONE has level 255\n if olevel != -1: # we expect obfuscation\n assert \"psize\" in meta\n assert m_olevel == olevel\n else:\n assert \"psize\" not in meta\n assert \"olevel\" not in meta", "def is_valid(path):\n with open(path, 'rb') as handle:\n size = os.fstat(handle.fileno()).st_size\n try:\n mgz.header.parse_stream(handle)\n mgz.body.meta.parse_stream(handle)\n while handle.tell() < size:\n mgz.body.operation.parse_stream(handle)\n print('valid')\n return True\n except ConstructError:\n print('invalid')\n return False", "def test_IsPackage_files():\n with tempfile.NamedTemporaryFile() as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".txt\") as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".tar.bz2\") as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".dpack.tar.bz2\") as f:\n assert dpack._IsPackage(pathlib.Path(f.name))", "def testCompressedSize(self):\n\n uncompressed_file = tempfile.NamedTemporaryFile(delete=False)\n for line in range(200):\n uncompressed_file.write(\n 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. '\n 'Sed eleifend')\n uncompressed_file.close()\n compressed_path = uncompressed_file.name + '.compressed'\n compressor_path = os.path.join(DIR_SOURCE_ROOT, 'third_party',\n 'fuchsia-sdk', 'sdk', 'tools', 'x64',\n 'blobfs-compression')\n subprocess.call([compressor_path, uncompressed_file.name, compressed_path])\n self.assertEqual(binary_sizes.CompressedSize(uncompressed_file.name),\n os.path.getsize(compressed_path))\n os.remove(uncompressed_file.name)\n os.remove(compressed_path)", "def _is_valid_ct(content_type: str) -> bool:\n content_type = content_type.strip()\n return _is_valid_regex(CT_CONTENT_TYPE_REGEX_PATTERN, content_type)", "def test_compressed(self):\n try:\n import zlib\n except ImportError:\n self.skipTest('zlib is missing')\n\n ba = amf3.ByteArray()\n\n self.assertFalse(ba.compressed)\n\n z = zlib.compress(b'b' * 100)\n ba = amf3.ByteArray(z)\n\n self.assertTrue(ba.compressed)\n\n z = zlib.compress(b'\\x00' * 100)\n ba = amf3.ByteArray(z)\n\n self.assertTrue(ba.compressed)", "def _iszip(self, filename):\n fname, ext = os.path.splitext(filename)\n return ext in _file_openers.keys()", "def test_compress_response(self):\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(self.decompress(r.content), self.compressible_string)\n self.assertEqual(r.get(\"Content-Encoding\"), \"gzip\")\n self.assertEqual(r.get(\"Content-Length\"), str(len(r.content)))", "def can_minimize_file(file_path):\n # If this is not a binary file, we should be able to minimize it in some way.\n if not utils.is_binary_file(file_path):\n return True\n\n # Attempt to minimize IPC dumps.\n if file_path.endswith(testcase_manager.IPCDUMP_EXTENSION):\n return supports_ipc_minimization(file_path)\n\n # Other binary file formats are not supported.\n return False", "def test_extension_to_content_type(self):\n assert ct.extension_to_content_type(\"jpg\") == \"image/jpg\"\n assert ct.extension_to_content_type(\"jpeg\") == \"image/jpg\"\n assert ct.extension_to_content_type(\"png\") == \"image/png\"\n ct.extension_to_content_type(\"css\",) == \"text/css\"\n ct.extension_to_content_type(\"html\") == \"text/html\"\n ct.extension_to_content_type(\"json\") == \"application/json\"\n ct.extension_to_content_type(\"xml\") == \"application/xml\"\n ct.extension_to_content_type(\"zip\") == \"application/zip\"", "def test_no_compress_compressed_response(self):\n self.resp[\"Content-Encoding\"] = \"deflate\"\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(r.content, self.compressible_string)\n self.assertEqual(r.get(\"Content-Encoding\"), \"deflate\")", "def check_zlib():\n\n try:\n import zlib\n zlib.compress('Compress this')\n return True\n except Exception as ex:\n LOG.error(str(ex))\n LOG.error('Failed to import zlib module.')\n return False", "def check_file(filename):\n\tfile = open(filename, 'r')\n\tfile_content = file.read()\n\tif len(file_content) < 3 or file_content.isspace():\n\t\tfile.close()\n\t\treturn (0, 'File content must begin with a keyword (HEX, BIN or ASC)!')\n\t# First 3 characters should represent the base of the content.\n\tbase = file_content[0:3]\n\tfile_content = file_content[3:]\n\tforbidden_chars = {'BIN': [None], 'HEX': [None]}\n\n\t# Content is claimed to be hexadecimal:\n\tif base == 'HEX':\n\t\tfile_content = ''.join(file_content.split())\n\t\tfile_content = file_content.upper()\n\t\tif len(file_content) < 2:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File must contain at least 1 byte of data after the keyword!')\n\t\tmod = len(file_content) % 2\n\t\tif mod != 0:\n\t\t\treturn (0, 'File must contain full bytes of data (2 hex digits = 1 byte)!')\n\t\t# Use regular expression for verifying the content.\n\t\tif re.match('[0-9A-F]+$', file_content):\n\t\t\tcontent = ''\n\t\t\tfor start in range(0, len(file_content), 2):\n\t\t\t\tif start + 2 <= len(file_content):\n\t\t\t\t\tcontent += file_content[start:start+2] + ' '\n\t\t\t\telse:\n\t\t\t\t\tcontent += file_content[start:]\t\t# add the remainings\n\t\t\t\n\t\t\tcontent = content.rstrip()\t\t# remove possible whitespace at the end\n\t\t\t# Check that the file doesn't contain any forbidden control characters\n\t\t\tfor val in content.split():\n\t\t\t\tif val in forbidden_chars['HEX']:\n\t\t\t\t\tfile.close()\n\t\t\t\t\treturn (0, 'File must not contain other control characters than TAB, LF or CR!')\n\t\t\t# Return type indicator and the chopped content.\n\t\t\tfile.close()\n\t\t\treturn (1, content)\n\t\telse:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File content was invalid hexadecimal data!')\n\t\t\t\n\t# Content is claimed to be binary:\n\telif base == 'BIN':\n\t\tfile_content = ''.join(file_content.split())\n\t\tif len(file_content) < 8:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File must contain at least 1 byte of data after the keyword!')\n\t\tmod = len(file_content) % 8\n\t\tif mod != 0:\n\t\t\treturn (0, 'File must contain full bytes of data (8 bits = 1 byte)!')\n\t\t\t\n\t\t# Use regular expression for verifying the content.\n\t\tre.purge()\t\t# clear regex cache\n\t\tif re.match('[0-1]+$', file_content):\n\t\t\tcontent = ''\n\t\t\tfor start in range(0, len(file_content), 8):\n\t\t\t\tif start + 8 <= len(file_content):\n\t\t\t\t\tcontent += file_content[start:start+8] + ' '\n\t\t\t\telse:\n\t\t\t\t\tcontent += file_content[start:]\t\t# add the remainings\n\t\t\t\t\t\n\t\t\tcontent = content.rstrip()\t\t# remove possible whitespace at the end\n\t\t\t# Check that the file doesn't contain any forbidden control characters\n\t\t\tfor val in content.split():\n\t\t\t\tif val in forbidden_chars['BIN']:\n\t\t\t\t\tfile.close()\n\t\t\t\t\treturn (0, 'File must not contain other control characters than TAB, LF or CR!')\n\t\t\t# Return type indicator and the chopped content.\n\t\t\tfile.close()\n\t\t\treturn (2, content)\n\t\telse:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File content was invalid binary data!')\n\t\t\t\n\t# Content is claimed to be ASCII:\n\telif base == 'ASC':\n\t\tescape_chars = ['\\a', '\\b', '\\f', '\\n', '\\r', '\\t', '\\v']\n\t\tescape_letters = ['a', 'b', 'f', 'n', 'r', 't', 'v']\n\t\t# Use regular expression for verifying the content.\n\t\tre.purge()\t\t# clear regex cache\n\t\tif re.match('[\\x00-\\x7F]+$', file_content):\t\t# [\\x20-\\x7E]\n\t\t\t# Check that the file doesn't contain any forbidden control characters\n\t\t\tfor c in file_content:\n\t\t\t\tif binascii.hexlify(c).upper() in forbidden_chars['HEX']:\n\t\t\t\t\tfile.close()\n\t\t\t\t\treturn (0, 'File contains illegal control characters!')\n\t\t\tfor c in escape_chars:\n\t\t\t\tif file_content.count(c) != 0:\n\t\t\t\t\tfile_content = file_content.replace(c, '')\t\t\t\t\t\n\t\t\t# Replace all \"\\\\n\", \"\\\\r\" etc. with \"\\n\", \"\\r\" etc. (i.e. remove\n\t\t\t# the extra backslash) so that the control characters are interpreted\n\t\t\t# correctly into hex values.\n\t\t\tfor c in range(0, len(file_content)):\n\t\t\t\tif file_content[c:c+1] == '\\\\':\n\t\t\t\t\tif file_content[c+1:c+2] in escape_letters:\n\t\t\t\t\t\tfor e in escape_letters:\n\t\t\t\t\t\t\tif file_content[c+1:c+2] == e:\n\t\t\t\t\t\t\t\tfile_content = file_content[:c] + escape_chars[escape_letters.index(e)] + file_content[c+2:]\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn (0, 'File contains illegal control characters!\\n\\n' + \n\t\t\t\t\t\t\t\t'Legal characters after a backslash are: a, b, f, n, r, t, and v.')\n\n\t\t\t# Return type indicator and the file content.\n\t\t\tfile.close()\n\t\t\treturn (3, file_content)\n\t\telse:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File content was invalid ASCII data!')\n\t\t\n\t# Content is invalid:\n\telse:\n\t\tfile.close()\n\t\treturn (0, 'File content must begin with a keyword (HEX, BIN or ASC)!')", "def test_no_compress_incompressible_response(self):\n self.resp.content = self.incompressible_string\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(r.content, self.incompressible_string)\n self.assertIsNone(r.get(\"Content-Encoding\"))", "def is_archive_ext(filepath):\n file_extension = os.path.splitext(filepath)[1].lower()\n if file_extension in get_archive_extensions():\n return True\n else:\n return False", "def test_compress_non_200_response(self):\n self.resp.status_code = 404\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(self.decompress(r.content), self.compressible_string)\n self.assertEqual(r.get(\"Content-Encoding\"), \"gzip\")", "def is_archive_file(filepath):\n\n file_extension = os.path.splitext(filepath)[1].lower()\n if file_extension == \".zip\":\n if not zipfile.is_zipfile(filepath):\n raise DeidentificationError(\n \"The ZIP file has the .zip extension but it is not a ZIP file\")\n else:\n return True\n elif file_extension in get_archive_extensions():\n if not tarfile.is_tarfile(filepath):\n raise DeidentificationError(\n \"The file has an archive extension but it is not a TAR file\")\n else:\n return True\n else:\n return False", "def _check_url_file_type(headers: Dict[str, str]) -> Optional[str]:\n content_type = headers.get(\"content-type\", \"\").lower()\n file_type = None\n\n for extension in SUPPORTED_MIME_TYPES.keys():\n for mime_type in SUPPORTED_MIME_TYPES.get(extension, []):\n if mime_type in content_type:\n file_type = extension\n break\n\n return file_type", "def is_binary_file_mime_type(mime_type, cfg):\n if mime_type:\n # We require explicit handling of the web-friendly images.\n # For all other types, pattern-matching is used.\n if is_viewable_image(mime_type):\n return mime_type in cfg.options.binary_mime_types\n for pattern in cfg.options.binary_mime_types:\n if fnmatch.fnmatch(mime_type, pattern):\n return True\n return False", "def test_uncompressed(mode, size, test_file):\n\n with Image.open(test_file) as im:\n assert im.format == \"DDS\"\n assert im.mode == mode\n assert im.size == size\n\n assert_image_equal_tofile(im, test_file.replace(\".dds\", \".png\"))", "def test_guess_content_type(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n\n src = os.path.join(self.upload_path, 'test.jpg')\n id = utils.generate_id('demo.jpg')\n backend.put(src, id, True)\n\n path = '/'.join(backend.id_to_path(id)) + '/demo.jpg'\n client = boto3.client('s3', **backend.credentials)\n res = client.head_object(\n Bucket=backend.bucket_name,\n Key=path\n )\n headers = res['ResponseMetadata']['HTTPHeaders']\n self.assertEquals('image/jpeg', headers['content-type'])", "def check_file_type(fname):\n ext = path.splitext(fname)[1]\n return ext in allowed_extensions", "def _IsFile(self, file_message):\n message_type = file_message.message_type\n return (message_type == FileMessage.FILE_DOWNLOAD or\n message_type == FileMessage.FILE_UPLOAD or\n message_type == FileMessage.FILE_CLOUD_COPY or\n message_type == FileMessage.FILE_DAISY_COPY or\n message_type == FileMessage.FILE_LOCAL_COPY or\n message_type == FileMessage.FILE_REWRITE or\n message_type == FileMessage.FILE_HASH)", "def _check_zip_file (filename, path_unzip, outfile) :\n assert path_unzip is not None\n file,ext = os.path.splitext (filename)\n ext = ext.lower ()\n if ext == \".gz\" :\n \n import gzip\n \n if outfile is None :\n dest = filename.split (\"!\")\n dest = dest [ len(dest)-1 ]\n ext = os.path.splitext (dest) [1]\n dest = dest.replace (ext, \".txt\")\n path = os.path.split (filename)\n path = \"/\".join (path [:len (path)-1])\n dest = path + \"/\" + dest\n else :\n dest = outfile\n \n if not os.path.exists (dest) :\n file = gzip.GzipFile (filename, \"r\")\n if outfile is None :\n dest = os.path.split (dest) [1]\n dest = os.path.join (path_unzip, dest)\n \n if os.path.exists (dest) :\n st1 = datetime.datetime.utcfromtimestamp (os.stat (filename).st_mtime)\n st2 = datetime.datetime.utcfromtimestamp (os.stat (dest).st_mtime)\n if st2 > st1 : \n fLOG (\"ungzipping file (already done)\", dest)\n return dest\n \n fLOG (\"ungzipping file\", dest)\n f = open (dest, \"w\")\n data = file.read (2**27)\n size = 0\n while len (data) > 0 :\n size += len (data)\n fLOG (\"ungzipping \", size, \"bytes\")\n if isinstance (data, bytes) : f.write (bytes.decode (data))\n else : f.write (data)\n data = file.read (2**27)\n f.close ()\n file.close ()\n \n return dest\n \n if ext == \".zip\" :\n \n import zipfile\n try :\n file = zipfile.ZipFile (filename, \"r\")\n except Exception as e :\n fLOG (\"problem with \", filename)\n raise e\n \n if len (file.infolist()) != 1:\n if outfile is not None :\n raise PQHException (\"the archive contains %d files and not one as you expected by filling outfile\" % len (file.infolist()))\n fLOG (\"unzip file (multiple) \", filename)\n #message = \"\\n\".join ([ fi.filename for fi in file.infolist() ] )\n #raise Exception.YstException(\"ColumnInfoSet.load_from_file: file %s contains no file or more than one file\\n\" + message)\n folder = os.path.split (filename) [0]\n todo = 0\n _zip7_path = r\"c:\\Program Files\\7-Zip\"\n zip7 = os.path.exists (_zip7_path)\n wait = [ ]\n for info in file.infolist () :\n fileinside = info.filename\n dest = os.path.join (folder, fileinside)\n if not os.path.exists (dest) :\n fol = os.path.split (dest) [0]\n if not os.path.exists (fol) : os.makedirs (fol)\n if os.path.exists (dest) :\n st1 = datetime.datetime.utcfromtimestamp (os.stat (filename).st_mtime)\n st2 = datetime.datetime.utcfromtimestamp (os.stat (dest).st_mtime)\n if st2 > st1 : \n continue\n \n if not sys.platform.startswith(\"win\") or not zip7 :\n data = file.read (fileinside)\n dest = os.path.split (dest) [1]\n dest = os.path.join (path_unzip, dest)\n fLOG (\"unzipping file\", dest)\n wait.append(dest)\n f = open (dest, \"w\")\n if isinstance (data, bytes) :\n f.write (str (data))\n else :\n f.write (data)\n f.close ()\n else :\n todo += 1\n \n if todo > 0 and zip7 :\n dest = os.path.realpath (path_unzip)\n cmd = '\"' + _zip7_path + '\\\\7z.exe\" e -y -o\"%s\" \"%s\"' % (dest, os.path.realpath (filename)) \n out,err = run_cmd (cmd, wait = True)\n if len (err) > 0 : raise PQHException (\"command {0} failed\\n{1}\".format(cmd,err))\n if \"Error\" in out : raise PQHException (\"command {0} failed\\n{1}\".format(cmd,out))\n else :\n dest = path_unzip\n \n file.close ()\n \n ch = False\n while not ch :\n ch = True\n for a in wait :\n if not os.path.exists(a) : \n ch = False\n break\n time.sleep(0.5)\n \n return dest\n \n else :\n for info in file.infolist () :\n fileinside = info.filename\n \n path = os.path.split (filename)\n dest = outfile if outfile is not None else path [0] + \"/\" + fileinside\n if not os.path.exists (dest) :\n data = file.read (fileinside)\n if outfile is None :\n dest = os.path.split (dest) [1]\n dest = os.path.join (path_unzip, dest)\n \n if os.path.exists (dest) :\n st1 = datetime.datetime.utcfromtimestamp (os.stat (filename).st_mtime)\n st2 = datetime.datetime.utcfromtimestamp (os.stat (dest).st_mtime)\n if st2 > st1 : \n fLOG(\"unzipping one file (already done)\", dest)\n return dest\n \n fLOG(\"unzipping one file\", dest)\n f = open (dest, \"w\")\n if isinstance (data, bytes) :\n f.write (bytes.decode (data))\n else :\n f.write (data)\n f.close ()\n file.close ()\n return dest\n \n return filename", "def _moov_is_compressed(datastream, moov_atom):\r\n # seek to the beginning of the moov atom contents\r\n datastream.seek(moov_atom.position+8)\r\n \r\n # step through the moov atom childeren to see if a cmov atom is among them\r\n stop = moov_atom.position + moov_atom.size\r\n while datastream.tell() < stop:\r\n child_atom = _read_atom_ex(datastream)\r\n datastream.seek(datastream.tell()+child_atom.size - 8)\r\n \r\n # cmov means compressed moov header!\r\n if child_atom.name == 'cmov':\r\n return True\r\n \r\n return False", "def test_gzip_page(self):\n content = self.unique_gzip()\n self.assertViewBehavior(\n {\"get\": content},\n headers={\"HTTP_ACCEPT_ENCODING\": \"gzip\"},\n status_code=200,\n content=self.compress(content),\n headers_exact={\"Content-Encoding\": \"gzip\"})", "def test_compress(self):\n self.logger.info(\"STEP: Create the workspace directory to be compressed.\")\n workspace = Workspace(Mock)\n directory = Path.cwd().joinpath(\"workspace\")\n directory.mkdir()\n workspace.workspace = directory\n\n # Create a file to verify compression.\n directory.joinpath(\"file.txt\").touch()\n\n test_folder = Path.cwd().joinpath(\"testfolder\")\n test_folder.mkdir()\n self.items.append(test_folder)\n\n self.logger.info(\"STEP: Compress the directory.\")\n workspace.compress()\n\n self.logger.info(\n \"STEP: Verify that the directory was compressed using the gztar format.\"\n )\n self.items.append(test_folder)\n compressed_workspace = Path.cwd().joinpath(\"workspace.tar.gz\")\n unpack_archive(compressed_workspace, test_folder, format=\"gztar\")\n compressed_file = test_folder.joinpath(\"workspace/file.txt\")\n self.assertTrue(compressed_file.exists() and compressed_file.is_file())", "def test_zlib():\n body = b\"test 123\"\n compressed_body = zlib.compress(body)\n\n headers = [(b\"Content-Encoding\", b\"deflate\")]\n response = httpx.Response(\n 200,\n headers=headers,\n content=compressed_body,\n )\n assert response.content == body", "def get_file_info(fname) -> Tuple[str, bool]:\n fname = fname.lower()\n is_compressed = False\n if fname.endswith((\".tgz\", \".tar.gz\")):\n is_compressed = True\n fname = re.sub(r\"\\.(tgz|tar\\.gz)$\", \"\", fname)\n elif fname.endswith(\".gz\"):\n is_compressed = True\n fname = fname[:-3]\n elif fname.endswith(\".zip\"):\n is_compressed = True\n fname = fname[:-4]\n split = os.path.splitext(fname)\n return split[1], is_compressed", "def get_file_type(cls, filename):\n with open(filename) as f:\n file_header = f.read(cls.MAX_FILE_HEADER_LEN)\n for magic, filetype in cls.MAGIC_DICT.items():\n if file_header.startswith(magic):\n return filetype\n return \"uncompressed\"", "def secure_filetype(file):\n ext_list = ['png', 'jpg', 'jpeg']\n ext_valid = file.filename.split('.')[-1] in ext_list\n\n mimetype_list = [\"image/jpeg\", \"image/jpg\", \"image/png\"]\n mimetype_valid = file.mimetype in mimetype_list\n\n return ext_valid and mimetype_valid", "def is_image(content_type):\n return content_type == \"image/jpeg\" or content_type == \"image/png\"", "def is_tarfile(filename):\n\n import tarfile\n\n return tarfile.is_tarfile(filename)", "def _should_send_binary(self) -> bool:\n if not self.binary_support:\n return False\n\n content_type = self._get_content_type()\n if not content_type.startswith(self.non_binary_content_type_prefixes):\n return True\n\n content_encoding = self._get_content_encoding()\n # Content type is non-binary but the content encoding might be.\n return \"gzip\" in content_encoding.lower()", "def check_file(self, path):\n import subprocess\n\n with open('/dev/null', 'w') as null:\n ret = subprocess.call(['tar', '-tzf', path],\n stdout=null, stderr=null, stdin=null)\n if ret:\n return False\n else:\n return True", "def is_zipfile(filename):\n\n import zipfile\n\n return zipfile.is_zipfile(filename)", "def test_compress_deterministic(self):\n\n class DeterministicGZipMiddleware(GZipMiddleware):\n max_random_bytes = 0\n\n r1 = DeterministicGZipMiddleware(self.get_response)(self.req)\n r2 = DeterministicGZipMiddleware(self.get_response)(self.req)\n self.assertEqual(r1.content, r2.content)\n self.assertEqual(self.get_mtime(r1.content), 0)\n self.assertEqual(self.get_mtime(r2.content), 0)", "def is_file_type(file_path, file_type):\n\n if not file_exists(file_path):\n return False\n\n if file_path.endswith(file_type):\n return True\n\n return False", "def should_download_file(file_lang: str, file_name: str) -> bool:\n if file_lang == LANG_CHINESE:\n return True\n if file_lang == LANG_PALI:\n return True\n if file_lang == LANG_SANSKRIT:\n return True\n if file_lang == LANG_TIBETAN:\n return True\n else:\n return False", "def valid_package(filename=None, file_type=None):\n f = File(file_path=filename)\n if filename and not file_type:\n file_type = f.get_type()\n\n if not file_type:\n return None\n\n if f.get_size() > FILESIZE_LIMIT:\n return None\n\n if \"DLL\" in file_type and filename.endswith(\".cpl\"):\n return \"cpl\"\n elif \"DLL\" in file_type:\n return \"dll\"\n elif \"PE32\" in file_type or \"MS-DOS\" in file_type:\n return \"exe\"\n elif \"PDF\" in file_type:\n return \"pdf\"\n elif \"Rich Text Format\" in file_type or \"Microsoft Office Word\" in file_type or filename.endswith(\".docx\"):\n return \"doc\"\n elif \"Microsoft Office Excel\" in file_type or \"Microsoft Excel\" in file_type or filename.endswith(\".xlsx\"):\n return \"xls\"\n elif \"Zip archive\" in file_type:\n return \"zip\"\n elif \"HTML\" in file_type:\n return \"html\"\n else:\n return None", "def test_content_type(self,url):\n \n p = re.compile('.*pdf*.')\n cookies = self.get_cookies()\n print \"cookies are:\\n \" + str(cookies)\n content_type = requests.get(url, allow_redirects=True, cookies=cookies).headers.get('content-type')\n is_pdf = p.match(content_type)\n print \"content type is pdf: \" + str(is_pdf)\n if is_pdf:\n return(True)\n else:\n return(False)", "def test_file(self, file: CollectedFile):\n\n return file.filename[-3:].upper() == 'TXT'", "def test_content_type_to_extension(self):\n assert ct.content_type_to_extension(\"image/jpg\") == \"jpg\"\n assert ct.content_type_to_extension(\"image/jpeg\") == \"jpg\"\n assert ct.content_type_to_extension(\"image/png\",) == \"png\"\n assert ct.content_type_to_extension(\"text/css\",) == \"css\"\n assert ct.content_type_to_extension(\"text/html\") == \"html\"\n assert ct.content_type_to_extension(\"text/css\") == \"css\"\n assert ct.content_type_to_extension(\"application/json\") == \"json\"\n assert ct.content_type_to_extension(\"application/xml\") == \"xml\"\n assert ct.content_type_to_extension(\"application/zip\") == \"zip\"", "def _should_compress(new_descriptor: Union[FileDescriptor, StreamDescriptor], ingestion_properties: IngestionProperties) -> bool:\n return not new_descriptor.is_compressed and ingestion_properties.format.compressible", "def is_compressed(self):\n return self.instance == 1", "def _check_format(file_path, content):\n if not content:\n # testcase file content is empty\n err_msg = u\"Testcase file conetent is empty: {}\".format(file_path)\n logger.log_error(err_msg)\n raise exception.FileFormatError(err_msg)", "def isCfile(path:str) -> bool:\n if not isexist(path):\n return False\n \n name, ext = path.split(\".\")\n \n return ext == \"c\"", "def validate_txtfile(path):\n bFile = True if mimetypes.guess_type(path)[0] == 'text/plain' else False\n return bFile", "def allowed_file_type(file_name):\n\treturn file_name.lower().endswith(ALLOWED_FILE_TYPES)", "def is_valid_content_type(cls, content_type: str) -> bool:\n return content_type in cls.CONTENT_TYPES.value", "def _is_valid_content_type_format(content_type: str) -> bool:\n return (\n _is_valid_ct(content_type)\n or _is_valid_pt(content_type)\n or _is_valid_set(content_type)\n or _is_valid_list(content_type)\n or _is_valid_dict(content_type)\n or _is_valid_union(content_type)\n or _is_valid_optional(content_type)\n )", "def file_is_this_type(cls, path):\n if not os.path.exists(path):\n raise HelperError(2, \"No such file or directory: '{0}'\"\n .format(path))\n if helpers['isoinfo']:\n logger.debug(\"Using 'isoinfo' to check whether %s is an ISO\", path)\n try:\n helpers['isoinfo'].call(['-i', path, '-d'])\n return 100\n except HelperError:\n # Not an ISO\n return 0\n\n # else, try to detect ISO files by file magic number\n with open(path, 'rb') as fileobj:\n for offset in (0x8001, 0x8801, 0x9001):\n fileobj.seek(offset)\n magic = fileobj.read(5).decode('ascii', 'ignore')\n if magic == \"CD001\":\n return 100\n return 0", "def _check_orig(self):\n if self.is_dir():\n self._orig = False\n return\n\n parts = self._path.split('.')\n try:\n if parts[-1] == 'tgz':\n self._orig = True\n elif parts[-2] == 'tar':\n if (parts[-1] in Compressor.Opts or\n parts[-1] in Compressor.Aliases):\n self._orig = True\n except IndexError:\n self._orig = False", "def __soft_check_compressed_file_length(self, file_object,\n compressed_file_length):\n\n observed_length = file_object.get_compressed_length()\n if observed_length > compressed_file_length:\n raise tuf.DownloadLengthMismatchError(compressed_file_length,\n observed_length)\n else:\n logger.debug('file length ('+str(observed_length)+\\\n ') <= trusted length ('+str(compressed_file_length)+')')", "def _accept_for_flag (self, filename):\n\t\troot, ext = os.path.splitext(filename)\n\t\tif not ext:\n\t\t\treturn 1\n\t\telse:\n\t\t\tbinary_extensions = ['.jpg', '.gif', '.png', '.jar' ]\n\t\t\treturn ext not in ['.bak', '.off','.old', '.works', '.clean', '.obs', '.log', '.db'] + binary_extensions", "def file_type(filename, stream=False):\n magic_dict = {\"\\x1f\\x8b\\x08\": \"gz\",\n \"\\x42\\x5a\\x68\": \"bz2\",\n \"\\x50\\x4b\\x03\\x04\": \"zip\",\n b\"\\x50\\x4b\\x03\\x04\": \"zip\",\n \"PK\\x03\\x04\": \"zip\",\n b\"PK\\x03\\x04\": \"zip\",\n }\n\n max_len = max(len(x) for x in magic_dict)\n if not stream:\n with open(filename) as f:\n file_start = f.read(max_len)\n for magic, filetype in magic_dict.items():\n if file_start.startswith(magic):\n return filetype\n else:\n for magic, filetype in magic_dict.items():\n if filename[:len(magic)] == magic:\n return filetype\n\n return None", "def check_eligible_mimetype(self, ctype, uid):\n self.helper.log_debug(\n 'check_eligible_mimtype: checking content-type %s of msg uid %s' %\n (ctype, uid))\n if ctype == \"application/zip\":\n return True\n elif ctype == \"application/gzip\":\n return True\n elif ctype == \"application/x-gzip\":\n return True\n elif ctype == \"application/octet-stream\":\n # Non-standard mimetype used by Amazon SES dmarc reports\n return True\n elif ctype == \"application-x-gzip\":\n # Non-standard mimetype used by Comcast dmarc reports\n return True\n elif ctype == \"application/x-zip-compressed\":\n # Non-standard mimetype used by Yahoo dmarc reports\n return True\n elif ctype == \"application/xml\":\n return True\n elif ctype == \"text/xml\":\n return True\n else:\n self.helper.log_debug(\n 'check_eligible_mimtype: skipping content-type %s of msg uid %s' %\n (ctype, uid))\n return False", "def handle_file(self, path):\n\n if path:\n if not matches_patterns(path, self.gzip_patterns):\n return\n\n try:\n original_file = self.open(path, mode=\"rb\")\n except FileNotFoundError:\n pass\n else:\n gzipped_path = \"{0}.gz\".format(path)\n\n if self.exists(gzipped_path):\n self.delete(gzipped_path)\n\n gzipped_file = self._compress(original_file)\n gzipped_path = self.save(gzipped_path, gzipped_file)\n\n return gzipped_path, gzipped_path, True", "def file_test(vcf_file):\n if vcf_file.endswith(\".gz\"):\n return gzip.open, \"rt\"\n else:\n return open, \"rt\"", "def __hard_check_compressed_file_length(self, file_object,\n compressed_file_length):\n\n observed_length = file_object.get_compressed_length()\n if observed_length != compressed_file_length:\n raise tuf.DownloadLengthMismatchError(compressed_file_length,\n observed_length)\n else:\n logger.debug('file length ('+str(observed_length)+\\\n ') == trusted length ('+str(compressed_file_length)+')')", "def test_resource_only_content_type(self):\n\n def do_check(path):\n \"\"\"The contents of the .iml file should certain sourceFolder entries:\n\n <sourceFolder url=\".../testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/resources_and_code\" isTestSource=\"false\" />\n <sourceFolder url=\".../testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/resources_only\" type=\"java-resource\" />\n <sourceFolder url=\".../testprojects/src/resources/org/pantsbuild/testproject/idearesourcesonly\" type=\"java-resource\" />\n ...\n \"\"\"\n found = set()\n iml_file = os.path.join(path, 'project.iml')\n self.assertTrue(os.path.exists(iml_file))\n dom = minidom.parse(iml_file)\n for sourceFolder in self._get_sourceFolders(dom):\n url = sourceFolder.getAttribute('url')\n is_test_source = sourceFolder.getAttribute('isTestSource')\n type_attr = sourceFolder.getAttribute('type')\n url = re.sub(r'^.*/testprojects/', 'testprojects/', url)\n found.add(url)\n if url == 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/code':\n self.assertEquals('', type_attr)\n self.assertEquals('False', is_test_source)\n if url == 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly/code':\n self.assertEquals('', type_attr)\n self.assertEquals('True', is_test_source)\n if url == 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/resources_only':\n self.assertEquals('java-resource', type_attr)\n self.assertEquals('False', is_test_source)\n # TODO(Eric Ayers) tests/resources/.../idearesourcesonly : this directory has no\n # junit_tests depending on a target, so it is assumed to be plain resources.\n # Since this is under .../tests, humans know this is supposed to be a test only\n # resource. Right now we don't have a good way of communicating\n # that to the idea goal other than inferring from the presence of junit_tests in\n # source_root, which may not be a reliable indicator.\n if url == 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly/resources_only':\n self.assertEquals('java-resource', type_attr)\n self.assertEquals('False', is_test_source)\n if url == 'testprojects/src/resources/org/pantsbuild/testproject/idearesourcesonly':\n self.assertEquals('java-resource', type_attr)\n self.assertEquals('False', is_test_source)\n if url == 'testprojects/tests/resources/org/pantsbuild/testproject/idearesourcesonly':\n self.assertEquals('java-test-resource', type_attr)\n self.assertEquals('True', is_test_source)\n\n self.assertEquals(set([\n 'testprojects/src/resources/org/pantsbuild/testproject/idearesourcesonly',\n 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/code',\n 'testprojects/tests/resources/org/pantsbuild/testproject/idearesourcesonly',\n 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly/code',\n 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/resources_only',\n 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly/resources_only'\n ]), found)\n\n self._idea_test([\n 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly::',\n 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly::'\n ], check_func=do_check)", "def validate_image_type(filename: str) -> bool:\n supported_extensions = (\"png\", \"jpg\", \"jpeg\")\n return (filename not in (None, \"\")) and (get_extension(filename) in supported_extensions)", "def is_min(filename):\r\n return re.search(\"min.js$\", filename)", "def _compress_string(content):\n zbuf = StringIO()\n zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)\n zfile.write(content)\n zfile.close()\n return zbuf.getvalue()", "def verify_and_get_compress_func(compression_type, compression_level=9):\n # type: (str, int) -> Optional[Callable]\n if compression_type not in SUPPORTED_COMPRESSION_ALGORITHMS:\n return None\n\n try:\n compress_func, decompress_func = get_compress_and_decompress_func(\n compression_type, compression_level=compression_level\n )\n\n # Return early if special \"none\" compression is used\n if compression_type == \"none\":\n return compress_func\n\n # Perform a sanity check that data compresses and that it can be decompressed\n cdata = compress_func(COMPRESSION_TEST_STR)\n\n if (\n len(cdata) < len(COMPRESSION_TEST_STR)\n and decompress_func(cdata) == COMPRESSION_TEST_STR\n ):\n return compress_func\n except Exception:\n pass\n\n return None", "def check_type(filename):\n try:\n im = Image.read(filename)\n except SanperaError:\n return False\n else:\n return im.original_format in [b'JPEG', b'PNG', b'GIF']", "def test_archive_wrong_extension(self):\n testfile = 'wrongextension.eml'\n try:\n tmpfile = tempfile.NamedTemporaryFile(\n suffix='wrongext', prefix='fuglu-unittest', dir='/tmp')\n shutil.copy(\"%s/%s\" % (TESTDATADIR, testfile), tmpfile.name)\n\n user = 'recipient-wrongarchextension@unittests.fuglu.org'\n conffile = self.tempdir + \"/%s-archivenames.conf\" % user\n # the largefile in the test message is just a bunch of zeroes\n open(conffile, 'w').write(\n \"deny \\.exe$ exe detected in zip with wrong extension\")\n self.rulescache._loadrules()\n suspect = Suspect(\n 'sender@unittests.fuglu.org', user, tmpfile.name)\n\n result = self.candidate.examine(suspect)\n if type(result) is tuple:\n result, message = result\n self.assertEqual(\n result, DELETE, 'exe in zip with .gz extension was not detected')\n\n finally:\n tmpfile.close()\n os.remove(conffile)", "def detect_file_format(path):\n with open(path, \"rb\") as f:\n first_bytes = f.read(16)\n if first_bytes.startswith(b\"CRAM\"):\n return \"CRAM\"\n if first_bytes.startswith(b\"##fileformat=VCF\"):\n return \"VCF\"\n\n gzip_header = b\"\\037\\213\"\n if first_bytes.startswith(gzip_header):\n with gzip.GzipFile(path, \"rb\") as f:\n first_bytes = f.read(16)\n if first_bytes.startswith(b\"BAM\\1\"):\n return \"BAM\"\n elif first_bytes.startswith(b\"##fileformat=VCF\"):\n return \"VCF\"\n\n return None", "def is_filetype(filename=None, search_str=None):\n if not search_str:\n return False\n results = puremagic.magic_file(filename)\n for result in results:\n if search_str.lower() in result.name.lower():\n return True\n return False", "def isSupportedContent(cls, fileContent):\n magic = bytearray(fileContent)[:4]\n magics = (\n p('>I', 0xfeedface),\n p('>I', 0xfeedfacf),\n p('>I', 0xcafebabe),\n\n p('<I', 0xfeedface),\n p('<I', 0xfeedfacf),\n p('<I', 0xcafebabe),\n )\n return magic in magics", "def test_decompress_file():\n gz_file = os.path.join(\n tempfile.gettempdir(),\n \"jade-unit-test-file.gz\",\n )\n with gzip.open(gz_file, \"wb\") as f:\n f.write(b\"Hello World\")\n assert os.path.exists(gz_file)\n\n new_file = decompress_file(gz_file)\n assert os.path.exists(new_file)\n with open(new_file, \"r\") as f:\n data = f.read()\n assert data == \"Hello World\"\n\n if os.path.exists(gz_file):\n os.remove(gz_file)\n\n if os.path.exists(new_file):\n os.remove(new_file)", "def detect_content_type(self, path=None, payload=None):\n\n f = file_path(path, payload)\n switches = [\"-d\", f]\n result = self._command_template(switches).lower()\n return result, path, f", "def is_textfile(filename, blocksize=512):\n if any(filename.endswith(ext) for ext in KNOWN_BINARY_FILE_EXTS):\n return False\n return is_text(open(filename, \"rb\").read(blocksize)) # pylint: disable=consider-using-with", "def _validate_content_type(\n content_type: str, content_name: str, performative: str\n) -> Tuple[bool, str]:\n if not _is_valid_content_type_format(content_type):\n return (\n False,\n \"Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.\".format(\n content_name,\n performative,\n ),\n )\n\n return (\n True,\n \"Type of content '{}' of performative '{}' is valid.\".format(\n content_name, performative\n ),\n )", "def is_good_file(filename):\n for e in extensions:\n if filename.endswith(e):\n return True\n return False", "def is_downloadable(url) -> bool:\n content_type = requests.head(url, allow_redirects=True).headers.get('content-type')\n if 'text' in content_type.lower() or 'html' in content_type.lower():\n return False\n return True", "def testXzFile(self):\n try:\n remoteLocator = self.__xzFile\n fn = self.__fileU.getFileName(remoteLocator)\n lPath = os.path.join(self.__workPath, fn)\n ok = self.__fileU.get(remoteLocator, lPath)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.isLocal(lPath)\n self.assertTrue(ok)\n tPath = self.__fileU.getFilePath(lPath)\n self.assertEqual(lPath, tPath)\n fp = self.__fileU.uncompress(lPath, outputDir=self.__workPath)\n ok = fp.endswith(\".pdb\")\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_compress():\n print('Testing compress')\n\n # Cases given to test this problem\n assert_equals('c1o17l1k1a1n1g1a1r1o2',\n hw1.compress('cooooooooooooooooolkangaroo'))\n assert_equals('a3', hw1.compress('aaa'))\n assert_equals('', hw1.compress(''))\n\n # Additional cases to test this problem\n assert_equals('a1p2l1e1', hw1.compress('apple'))\n assert_equals('g1o6d1a1w1g4s3', hw1.compress('goooooodawggggsss'))", "def archive_type_from_content_type(content_type, all_impl = False, custom_ctypes_dict = None):\n\n if content_type is None:\n return None\n\n archive_type = None\n if all_impl:\n ctypes2check = Archivehandle.implemented_archive_ctypes\n elif custom_ctypes_dict is not None:\n ctypes2check = custom_ctypes_dict\n else:\n ctypes2check = Archivehandle.avail_archive_ctypes\n\n for regex, atype in iter(ctypes2check.items()):\n if re.match(regex, content_type, re.I):\n archive_type = atype\n break\n\n return archive_type", "def get_compressed_file_content(location, decompressor):\n warnings = []\n with decompressor(location, 'rb') as compressed:\n content = compressed.read()\n if getattr(decompressor, 'has_trailing_garbage', False):\n warnings.append(location + ': Trailing garbage found and ignored.')\n return content, warnings" ]
[ "0.64723974", "0.64621985", "0.6352601", "0.6308476", "0.6298893", "0.62387055", "0.617346", "0.6168313", "0.613196", "0.6075621", "0.6057141", "0.6023388", "0.59639585", "0.59618855", "0.59406054", "0.5932078", "0.5916286", "0.5899778", "0.5892047", "0.5887669", "0.5869888", "0.58216345", "0.5794927", "0.57736546", "0.57636416", "0.57325137", "0.57113236", "0.57090557", "0.569644", "0.5648207", "0.5647328", "0.5626939", "0.56233096", "0.5623104", "0.5615818", "0.55925", "0.55870837", "0.5553818", "0.5551925", "0.5550314", "0.5535831", "0.55048704", "0.54869086", "0.5485934", "0.5475414", "0.5474593", "0.54666036", "0.54542977", "0.54451805", "0.5437323", "0.5425436", "0.5415297", "0.54143965", "0.5411671", "0.54039717", "0.5399272", "0.5386517", "0.53692126", "0.53535664", "0.53439176", "0.53326666", "0.533181", "0.5329631", "0.53248215", "0.53218216", "0.531834", "0.5316137", "0.531463", "0.5311191", "0.5311167", "0.5308893", "0.53071284", "0.52903205", "0.52864057", "0.5280289", "0.5275072", "0.52535766", "0.524302", "0.5241811", "0.5238022", "0.52362347", "0.52266973", "0.5225058", "0.5222188", "0.5215971", "0.52154493", "0.521332", "0.5204819", "0.51950824", "0.5194638", "0.5193266", "0.51885086", "0.518346", "0.5181825", "0.51800936", "0.5176082", "0.5175954", "0.51730627", "0.51631564", "0.51625705" ]
0.8202419
0
Compress the content string passed. Should be called when gzip is enabled to compress text types. There is no real advantage in using this with images, since most are already nicely compressed by some image processing algorithm.
def _compress_string(content): zbuf = StringIO() zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf) zfile.write(content) zfile.close() return zbuf.getvalue()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compress(string):", "def compress(string):", "def _compress_content(self, content):\n zbuf = io.BytesIO()\n zfile = gzip.GzipFile(mode=\"wb\", compresslevel=9, fileobj=zbuf)\n\n try:\n zfile.write(content.read())\n finally:\n zfile.close()\n\n content.file = zbuf\n content.seek(0)\n\n return content", "def compress(content, threshold=512):\n compression_enabled = CONF.logging.http_request_compression\n\n if is_dict(content):\n for key in content:\n content[key] = compress(content[key])\n if is_string(content) and compression_enabled:\n if len(content) > threshold:\n less_data = content[:50]\n compressed_data = base64.b64encode(\n zlib.compress(bytes(content.encode(\"utf-8\"))))\n if not six.PY2:\n compressed_data = str(compressed_data.decode(\"utf-8\"))\n return pprint.pformat(\n \"\\n***Content compressed by Syntribos.***\"\n \"\\nFirst fifty characters of content:\\n\"\n \"***{data}***\"\n \"\\nBase64 encoded compressed content:\\n\"\n \"{compressed}\"\n \"\\n***End of compressed content.***\\n\".format(\n data=less_data, compressed=compressed_data))\n return content", "def compression(s):", "def _gzipencode(content):\n import gzip\n out = BytesIO()\n f = gzip.GzipFile(fileobj=out, mode='w', compresslevel=5)\n f.write(content)\n f.close()\n return out.getvalue()", "def compress_zlib(self, string):\n #encode the input sting\n self.string = string.encode()\n return zlib.compress(self.string)", "def compressString(s):\n import cStringIO, gzip\n\n # Nasty monkeypatch to avoid gzip changing every time\n class FakeTime:\n def time(self):\n return 1111111111.111\n\n gzip.time = FakeTime()\n\n zbuf = cStringIO.StringIO()\n zfile = gzip.GzipFile(mode='wb', compresslevel=9, fileobj=zbuf)\n zfile.write(s)\n zfile.close()\n return zbuf.getvalue()", "def encode(self, compress=0):\n raw = bytes(self._encode())\n return gzip.compress(raw, compress) if compress else raw", "def compress_content(content_type, content):\n \n command = 'java -jar %s --type=%s' % (yuicompressor_path, content_type)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n p.stdin.write(content)\n p.stdin.close()\n \n compressed = p.stdout.read()\n p.stdout.close()\n \n err = p.stderr.read()\n p.stderr.close()\n \n if p.wait() != 0:\n if not err:\n err = 'Unable to use YUI Compressor'\n \n \n return err, compressed", "def compress(self, s):\n data = zlib.compress(s)\n # drop gzip headers and tail\n return data[2:-4]", "def __handle_compression(self, x):\n if self.__compress:\n return zlib.compress(x)\n return x", "def gzip_compress(data):\n s = BytesIO()\n g = gzip.GzipFile(fileobj=s, mode='wb')\n g.write(data)\n g.close()\n return s.getvalue()", "def compression(self) -> str:\n ...", "def compress_encode(value):\n return base64.b64encode(zlib.compress(value.encode(\"ascii\"))).decode(\"ascii\")", "def _gzip_str(string_):\n out = BytesIO()\n\n with gzip.GzipFile(fileobj=out, mode='w') as fo:\n fo.write(string_.encode())\n\n bytes_obj = out.getvalue()\n return bytes_obj", "def compressBuffer(self, buffer):\r\n # http://jython.xhaus.com/http-compression-in-python-and-jython/\r\n zbuf = cStringIO.StringIO()\r\n zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)\r\n zfile.write(buffer)\r\n zfile.close()\r\n return zbuf.getvalue()", "def string_compression(w):\n if len(w) <= 1:\n return w\n\n substrings = []\n prev_char = w[0]\n char_count = 1\n for char in w[1:]:\n if prev_char == char:\n char_count += 1\n else:\n substrings.append('%s%s' % (prev_char, char_count))\n char_count = 1\n prev_char = char\n\n substrings.append('%s%s' % (prev_char, char_count))\n\n compression = ''.join(substrings)\n if len(compression) < len(w):\n return compression\n else:\n return w", "def compress(bstr):\n from sphobjinv.re import pb_comments, pb_data\n\n # Preconvert any DOS newlines to Unix\n s = bstr.replace(b\"\\r\\n\", b\"\\n\")\n\n # Pull all of the lines\n m_comments = pb_comments.findall(s)\n m_data = pb_data.finditer(s)\n\n # Assemble the binary header comments and data\n # Comments and data blocks must end in newlines\n hb = b\"\\n\".join(m_comments) + b\"\\n\"\n db = b\"\\n\".join(_.group(0) for _ in m_data) + b\"\\n\"\n\n # Compress the data block\n # Compression level nine is to match that specified in\n # sphinx html builder:\n # https://github.com/sphinx-doc/sphinx/blob/1.4.1/sphinx/\n # builders/html.py#L843\n dbc = zlib.compress(db, 9)\n\n # Return the composited bytestring\n return hb + dbc", "def gzdeflate():\n return zlib.compress(val)", "def postprocess(self, json_string):\n is_compressing, is_hash, compressed, spaces = False, False, [], 0\n for row in json_string.split(\"\\n\"):\n if is_compressing:\n if (row[:spaces + 5] == \" \" * (spaces + 4) +\n (\"\\\"\" if is_hash else \"{\")):\n compressed.append(row.rstrip())\n elif (len(row) > spaces and row[:spaces] == \" \" * spaces and\n re.match(\"[\\]\\}],?\", row[spaces:].rstrip())):\n compressed.append(row.rstrip())\n is_compressing = False\n else:\n compressed[-1] += \" \" + row.strip()\n else:\n compressed.append(row.rstrip())\n if any(a in row for a in [\"edges\", \"nodes\"]):\n # Fix to handle issues that arise with empty lists\n if \"[]\" in row:\n continue\n spaces = sum(1 for _ in takewhile(str.isspace, row))\n is_compressing, is_hash = True, \"{\" in row\n return \"\\n\".join(compressed)", "def compress(value):\n pickled = pickle_util.dump(value)\n return zlib.compress(pickled)", "def compress(self, data):\r\n return self.add_chunk(data)", "def compress(string):\n\n compressed = []\n\n curr_char = \"\"\n char_count = 0\n\n for char in string:\n if char != curr_char:\n compressed.append(curr_char)\n\n if char_count > 1:\n compressed.append(str(char_count))\n\n curr_char = char\n char_count = 0\n\n char_count += 1 \n\n compressed.append(curr_char)\n if char_count > 1:\n compressed.append(str(char_count))\n\n return \"\".join(compressed)", "def compress(string):\n \n # Build the dictionary.\n dict_size = 256\n seen = dict((chr(i), i) for i in range(dict_size))\n \n p = \"\"\n output = 0\n for c in string:\n pc = p + c\n if pc in seen:\n p = pc\n else:\n # We have not seen this. Output the stuff.\n output += 1\n seen[pc] = dict_size\n dict_size += 1\n p = c\n \n # Output the code for w.\n return output * 12", "def test_compress():\n print('Testing compress')\n\n # Cases given to test this problem\n assert_equals('c1o17l1k1a1n1g1a1r1o2',\n hw1.compress('cooooooooooooooooolkangaroo'))\n assert_equals('a3', hw1.compress('aaa'))\n assert_equals('', hw1.compress(''))\n\n # Additional cases to test this problem\n assert_equals('a1p2l1e1', hw1.compress('apple'))\n assert_equals('g1o6d1a1w1g4s3', hw1.compress('goooooodawggggsss'))", "def compress(string):\n\n past_chars = [string[0]]\n char_counts = [1]\n\n for i in range(1, len(string)):\n if string[i] == past_chars[-1]:\n char_counts[-1] += 1\n else:\n past_chars.append(string[i])\n char_counts.append(1)\n\n compressed_string = \"\"\n\n # list_of_ones = []\n # for i in range(len(string)):\n # list_of_ones.append(1)\n list_of_ones = [1 for x in range(len(string))]\n\n if char_counts == list_of_ones:\n return string\n else:\n for char, count in zip(past_chars, char_counts):\n compressed_string += char + str(count)\n\n\n return compressed_string", "def compress(self, file):\n\t\t\n\t\ttext = file.read() \n\t\ttext = text.rstrip() #elimina los espacios en blanco del final\n\n\t\t\n\t\tfrequency = self.make_frequency_dict(text)#obtenemos la frencuencia de cada numero en el texto\n\t\tself.make_heap(frequency)\n\t\tself.merge_nodes()\n\t\tself.make_codes()\n\t\tencoded_text = self.get_encoded_text(text)\n\t\tpadded_encoded_text = self.pad_encoded_text(encoded_text)\n\n\t\tb = self.get_byte_array(padded_encoded_text)\n\n\t\treturn b", "def tar_gz_compress(self, destination):\n\n if destination is not None and isinstance(destination, str):\n with tarfile_open(destination, \"w:gz\") as tar:\n tar.add(self.file)", "def Compress(input_filename, output_filename):\n _Write(zlib.compress(_Read(input_filename)), output_filename)", "def compress(self, tensor, *args, **kwargs):\n return self.compressor.compress(tensor)", "def compress(self, tensor, *args, **kwargs):\n return self.compressor.compress(tensor)", "def string_compression(input_string):\n compressed_string = ''\n char_count = 1\n prev_char = ''\n for char in input_string:\n if char == prev_char:\n char_count += 1\n else:\n compressed_string = compressed_string + str(char_count) + char\n char_count = 1\n prev_char = char\n\n return compressed_string[1:] + str(char_count)", "def compress():\n run_manage_cmd('compress_assets')", "def compress(uncompressed):\r\n \r\n # Build the dictionary.\r\n dict_size = 256\r\n dictionary = dict((chr(i), i) for i in range(dict_size))\r\n # in Python 3: dictionary = {chr(i): i for i in range(dict_size)}\r\n \r\n w = \"\"\r\n result = []\r\n for c in uncompressed:\r\n wc = w + c\r\n if wc in dictionary:\r\n w = wc\r\n else:\r\n result.append(dictionary[w])\r\n # Add wc to the dictionary.\r\n dictionary[wc] = dict_size\r\n dict_size += 1\r\n w = c\r\n \r\n # Output the code for w.\r\n if w:\r\n result.append(dictionary[w])\r\n return result", "def encode(self, obj):\n s = super(CustomEncoder, self).encode(obj)\n # If uncompressed, postprocess for formatting\n if len(s.splitlines()) > 1:\n s = self.postprocess(s)\n return s", "def isGzippable(self, css=0, js=0, REQUEST=None):\n # force: force http compression even if the browser doesn't send an accept\n # debug: return compression state (0: no, 1: yes, 2: force)\n # css: set this to 1 inside a css file (for later use)\n # js: set this to 1 inside a js file (for later use)\n\n if REQUEST is None:\n REQUEST = self.REQUEST\n use_gzip = self.getGzip()\n if not self.getEnabled():\n use_gzip = 'never'\n\n force = 0\n if use_gzip == 'never':\n enable_compression = 0\n elif use_gzip == 'always':\n enable_compression = 1\n force = 1\n elif use_gzip == 'accept-encoding':\n # compress everything except css and js\n enable_compression = 1\n elif use_gzip == 'accept-encoding+user-agent':\n # gzip compatibility info courtesy of\n # http://httpd.apache.org/docs/2.2/mod/mod_deflate.html\n user_agent = REQUEST.get('HTTP_USER_AGENT', '')\n if user_agent.startswith('Mozilla/4'):\n # Netscape 4.x can't handle gzipped css and js\n enable_compression = (css==0 and js==0)\n # Netscape 4.0.6-4.0.8 has some gzip-related bugs\n if user_agent[len('Mozilla/4.')] in ('6','7','8'):\n enable_compression = 0\n # Some versions of MSIE pretend to be Netscape 4.x but are OK with gzipping\n if user_agent.find('MSIE'):\n enable_compression = 1\n\n return (enable_compression, force, REQUEST.get('HTTP_ACCEPT_ENCODING', '').find('gzip') != -1)", "def compress(value):\n # a) removing indentation in the begning of the string.\n value = re.sub(r\"(?m)^[\\t ]+\", \"\", value)\n\n # b) replacing each two whitespaces with a single one and each\n # __three__ newlines with __two__.\n return re_whitespace.sub(\"\\\\1\",\n re_newlines.sub(\"\\\\1\\\\1\", value)).strip()", "def test_compress_response(self):\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(self.decompress(r.content), self.compressible_string)\n self.assertEqual(r.get(\"Content-Encoding\"), \"gzip\")\n self.assertEqual(r.get(\"Content-Length\"), str(len(r.content)))", "def compress(value):\n\n process = Popen([\"xz\", \"--compress\", \"--force\"], stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def compress(value):\n\n process = Popen([\"xz\", \"--compress\", \"--force\"], stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def compress(path, path_out, terms, iterations, annotate, silent):\n if terms is None:\n terms = DEFAULT_TERMS\n\n if not silent:\n print(f\"Compressing image...\")\n\n result = compress_image_to_file(path=path, terms=terms,\n iterations=iterations,\n path_out=path_out,\n annotate=annotate)\n\n output_path = result['output_path']\n\n if not silent:\n print(f\"Compressed to:\\n{output_path}\")\n print(f\"Terms in singular value expansion: {terms}\")\n print(f\"Power method iterations: {result['iterations']}\")\n print(f\"Compression ratio: {result['compression_ratio']}\")\n\n return result", "def decompress_gzip(in_str):\n import gzip\n # gzip can only handle file object therefore using StringIO\n copmressed_stream = StringIO.StringIO(in_str)\n gzipper = gzip.GzipFile(fileobj=copmressed_stream)\n s = gzipper.read()\n gzipper.close()\n return s", "def write_gzip_bytes(self, bytes, compresslevel=5):\n sio = StringIO()\n with gzip.GzipFile(None, 'w', compresslevel, sio) as gz:\n gz.write(bytes)\n with self.open('wb') as f:\n f.write(sio.getvalue())", "def compress(filename, remove=False):\n import gzip\n fin = open(filename, 'rb')\n fout = gzip.open(filename+'.gz', 'wb')\n fout.writelines(fin)\n fout.close()\n fin.close()\n if remove == True:\n os.remove(filename)\n return", "def compress_file(compression, pretty, src, dst):\n str_tail = \"sed 1d\"\n str_cleanup = \";exit\"\n if pretty:\n str_tail = \"tail -n+2\"\n str_cleanup = \";rm ~;exit\"\n if \"lzma\" == compression:\n command = [\"xz\", \"--format=lzma\", \"--lzma1=preset=9e,lc=1,lp=0,pb=0\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|lzcat>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n elif \"raw\" == compression:\n command = [\"xz\", \"-9\", \"--extreme\", \"--format=raw\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|xzcat -F raw>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n elif \"xz\" == compression:\n command = [\"xz\", \"--format=xz\", \"--lzma2=preset=9e,lc=1,pb=0\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|xzcat>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n else:\n raise RuntimeError(\"unknown compression format '%s'\" % compression)\n (compressed, se) = run_command(command + [src], False)\n wfd = open(dst, \"wb\")\n wfd.write((header + \"\\n\").encode())\n wfd.write(compressed)\n wfd.close()\n make_executable(dst)\n print(\"Wrote '%s': %i bytes\" % (dst, os.path.getsize(dst)))", "def decompress_gzip(in_str):\n # gzip can only handle file object therefore using StringIO\n copmressed_stream = StringIO.StringIO(in_str)\n gzipper = gzip.GzipFile(fileobj=copmressed_stream)\n s = gzipper.read()\n gzipper.close()\n return s", "def convert_to_jpg_then_compress(self):\n\t\tself._compressed_file_name = 'c_' + self.file_name\n\t\tself._compressed_save_path = self.full_path.replace(self.file_name, self._compressed_file_name).replace('.png', '.jpg')\n\n\t\timage = Image.open(self.full_path)\n\t\timage.save(self._compressed_save_path)\n\n\t\timage = Image.open(self._compressed_save_path)\n\t\timage.save(self._compressed_save_path, quality=85, progressive=False)\n\n\t\tself._compressed_file_size = ufo.get_file_size_in_bytes(self._compressed_save_path)\n\n\t\ttransfer_path = self._compressed_save_path.replace('c_' + self.file_name, self.file_name).replace('/configuration_files/', '/quasar_site_django/')\n\t\tufo.copy_file_to_path(self._compressed_save_path, transfer_path)", "def setCompressMode(mode):\n libxml2mod.xmlSetCompressMode(mode)", "def apply_compressed_sensing(self, inputs, rng):\n print('using compressed sensing!')\n train_path = os.path.join(\n self.data_dir, 'assist{0}-{1}'.format(self.which_year, 'train'))\n\n if self.which_set == 'test':\n loaded = np.load(train_path + '-compression-matrix.npz')\n self.compress_matrix = loaded['compress_matrix']\n self.compress_dim = self.compress_matrix.shape[1]\n elif self.which_set == 'train':\n self.compress_matrix = self.make_compression_matrix(train_path, rng)\n\n inputs = self.compress_inputs(inputs)\n return inputs", "def test_no_compress_compressed_response(self):\n self.resp[\"Content-Encoding\"] = \"deflate\"\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(r.content, self.compressible_string)\n self.assertEqual(r.get(\"Content-Encoding\"), \"deflate\")", "def compress_image(filename,k):", "def zip_compress(self, destination):\n\n if destination is not None and isinstance(destination, str):\n with ZipFile(destination, \"w\") as thezip:\n thezip.write(self.file)", "def compress(dbconfig, target_name):\n fmt = dbconfig.get(\"format\", None)\n if fmt in [\"tarball\", \".tar.gz\", \"tar.gz\"]:\n info(\"zipping and compressing \" + target_name)\n output_name = target_name + \".tar.gz\"\n cmd = [\"tar\", \"zcvf\", output_name, target_name]\n subprocess.call(cmd)\n info(\"removing \" + target_name)\n cmd = [\"rm\", \"-r\", target_name]\n subprocess.call(cmd)\n elif fmt in [\".gz\", \"gz\", \"compress\", \"compressed\", \"gzip\", \"gzipped\"]:\n info(\"compressing \" + target_name)\n cmd = [\"gzip\", \"-r\", \"-q\", target_name]\n output_name = target_name + \".gz\"\n subprocess.call(cmd)\n else:\n error(\"invalid \\\"compress\\\" setting, should be tarball or compress, \" + target_name)\n output_name = \"\"\n return output_name", "def test_compress_2(self):\n text = 'abcdefdeabc'\n actual = LZ77.compress(text)\n expected = bytearray([3]) + bytearray(b'abcdef')\\\n + bytearray([0, 32]) + bytearray([0, 113])\n self.assertEqual(actual, expected)", "def test_deflate():\n body = b\"test 123\"\n compressor = zlib.compressobj(9, zlib.DEFLATED, -zlib.MAX_WBITS)\n compressed_body = compressor.compress(body) + compressor.flush()\n\n headers = [(b\"Content-Encoding\", b\"deflate\")]\n response = httpx.Response(\n 200,\n headers=headers,\n content=compressed_body,\n )\n assert response.content == body", "def compress(string):\n r = \"\"\n l = len(string)\n\n if l == 0:\n return \"\"\n\n if l == 1:\n return string + \"1\"\n\n count = 1\n i = 1\n\n while i < l:\n\n if string[i] == string[i - 1]:\n count += 1\n else:\n r = r + string[i - 1] + str(count)\n count = 1\n\n i += 1\n\n r = r + string[i - 1] + str(count)\n\n return r", "def de_gzip(data):\n cmps = StringIO.StringIO(data)\n gzipper = gzip.GzipFile(fileobj=cmps)\n return gzipper.read()", "def de_gzip(data):\n cmps = StringIO.StringIO(data)\n gzipper = gzip.GzipFile(fileobj=cmps)\n return gzipper.read()", "def js_minify(js):\n log.info(\"Compressing Javascript...\")\n ins, outs = StringIO(js), StringIO()\n JavascriptMinify(ins, outs).minify()\n return force_single_line_js(outs.getvalue())", "def test_zlib():\n body = b\"test 123\"\n compressed_body = zlib.compress(body)\n\n headers = [(b\"Content-Encoding\", b\"deflate\")]\n response = httpx.Response(\n 200,\n headers=headers,\n content=compressed_body,\n )\n assert response.content == body", "def test_compress_1_char(self):\n text = 'a'\n actual = LZ77.compress(text)\n expected = bytearray([0]) + bytearray(b'a')\n self.assertEqual(actual, expected)", "def test_no_compress_incompressible_response(self):\n self.resp.content = self.incompressible_string\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(r.content, self.incompressible_string)\n self.assertIsNone(r.get(\"Content-Encoding\"))", "def compressible(f):\n @wraps(f)\n def compressor(*args, **kwargs):\n @flask.after_this_request\n def compress(response):\n if (response.status_code < 200 or\n response.status_code >= 300 or\n 'Content-Encoding' in response.headers):\n # Don't encode anything other than a 2xx response\n # code. Don't encode a response that's\n # already been encoded.\n return response\n\n accept_encoding = flask.request.headers.get('Accept-Encoding', '')\n if not 'gzip' in accept_encoding.lower():\n return response\n\n # At this point we know we're going to be changing the\n # outgoing response.\n\n # TODO: I understand what direct_passthrough does, but am\n # not sure what it has to do with this, and commenting it\n # out doesn't change the results or cause tests to\n # fail. This is pure copy-and-paste magic.\n response.direct_passthrough = False\n\n buffer = BytesIO()\n gzipped = gzip.GzipFile(mode='wb', fileobj=buffer)\n gzipped.write(response.data)\n gzipped.close()\n response.data = buffer.getvalue()\n\n response.headers['Content-Encoding'] = 'gzip'\n response.vary.add('Accept-Encoding')\n response.headers['Content-Length'] = len(response.data)\n\n return response\n\n return f(*args, **kwargs)\n return compressor", "def on_format_changed(self):\n\n format = self.format.currentText()\n compressions = lib.list_compressions(format)\n self.compression.clear()\n self.compression.addItems(compressions)", "def generate_compressed(text, codes):\n\n def cut(bits):\n \"\"\"Return a list of strings which represent bytes.\n\n @param str bits: A string representation of bits\n @rtype: list\n\n >>> cut(\"00000000\")\n ['00000000']\n >>> cut(\"101110011\")\n ['10111001', '1']\n \"\"\"\n\n if len(bits) <= 8:\n return [bits]\n else:\n list_ = [bits[:8]]\n list_.extend(cut(bits[8:]))\n return list_\n\n string = \"\"\n comp_byte = bytes([])\n for by in text:\n string += codes[by]\n list_ = cut(string)\n for i in list_:\n comp_byte += bytes([bits_to_byte(i)])\n return comp_byte", "def encode(self, text):", "def compress(cls, img, as_string=False):\n h0, w0 = img.shape\n w = binary_cast([w0], 'H', 'BB')\n h = binary_cast([h0], 'H', 'BB')\n cp = np.concatenate((w, h, img.astype('uint8').flatten()))\n # VLR.cmp: more 2x compression\n scp = VariableLength.compress(cp)\n if as_string:\n return scp\n # translate string into unit8 for storage\n vcp = np.array([ord(d) for d in scp]).astype('uint8')\n return vcp", "def compress(result):\n\treturn string.join((result.split()),' ')", "def compress(in_file, out_file):\n with open(in_file, \"rb\") as f1:\n text = f1.read()\n freq = make_freq_dict(text)\n tree = huffman_tree(freq)\n codes = get_codes(tree)\n number_nodes(tree)\n print(\"Bits per symbol:\", avg_length(tree, freq))\n result = (num_nodes_to_bytes(tree) + tree_to_bytes(tree) +\n size_to_bytes(len(text)))\n result += generate_compressed(text, codes)\n with open(out_file, \"wb\") as f2:\n f2.write(result)", "def compress_bytes(text: bytes, codes: Dict[int, str]) -> bytes:\n\n if not text:\n return bytes([])\n else:\n bit = \"\"\n lst = []\n for symbols in text:\n bit += codes[symbols]\n\n if len(bit) == 8:\n lst.append(bits_to_byte(bit))\n bit = \"\"\n\n elif len(bit) > 8:\n lst.append(bits_to_byte(bit[:8]))\n bit = bit[8:]\n\n if 0 < len(bit) < 8:\n byte = bits_to_byte(bit)\n lst.append(byte)\n\n return bytes(lst)", "def compress_file(path):\n\n process = Popen([\"xz\", \"--compress\", \"--force\", \"--stdout\", path], stdout=PIPE)\n return process.communicate()[0]", "def compress_file(path):\n\n process = Popen([\"xz\", \"--compress\", \"--force\", \"--stdout\", path], stdout=PIPE)\n return process.communicate()[0]", "def test_compress_2_idenctical_char(self):\n text = 'aa'\n actual = LZ77.compress(text)\n expected = bytearray([0]) + bytearray(b'aa')\n self.assertEqual(actual, expected)", "def compress_file(in_file: str, out_file: str) -> None:\n with open(in_file, \"rb\") as f1:\n text = f1.read()\n freq = build_frequency_dict(text)\n tree = build_huffman_tree(freq)\n codes = get_codes(tree)\n number_nodes(tree)\n print(\"Bits per symbol:\", avg_length(tree, freq))\n result = (tree.num_nodes_to_bytes() + tree_to_bytes(tree) +\n int32_to_bytes(len(text)))\n result += compress_bytes(text, codes)\n with open(out_file, \"wb\") as f2:\n f2.write(result)", "def stringCompression(s):\n\n orig_len = len(s)\n t = []\n current_letter = s[0]\n count = 1\n\n for i in range(1, orig_len):\n if s[i] == current_letter:\n count += 1\n if i == orig_len - 1:\n t.append(current_letter + str(count))\n else:\n t.append(current_letter + str(count))\n current_letter = s[i]\n count = 1\n\n t = ''.join(t)\n return t if len(t) < orig_len else s\n\n # Time Complexity: O(len(s))\n # Space Complexity: O(len(s)), worst case is 2*len(s)", "def _get_compressed(self):\n assert self.compression_type != CompressionType.NONE\n tmp_mset = MessageSet(messages=self._messages)\n uncompressed = bytearray(len(tmp_mset))\n tmp_mset.pack_into(uncompressed, 0)\n if self.compression_type == CompressionType.GZIP:\n compressed = compression.encode_gzip(buffer(uncompressed))\n elif self.compression_type == CompressionType.SNAPPY:\n compressed = compression.encode_snappy(buffer(uncompressed))\n else:\n raise TypeError(\"Unknown compression: %s\" % self.compression_type)\n return Message(compressed, compression_type=self.compression_type)", "def compress_javascript_data(data):\n tmp_fname = tempfile.mktemp(\"urfastr-player-min.js\")\n open(tmp_fname, \"w+\").write(data)\n cmdline = [\"yui-compressor\", tmp_fname]\n compressed_data = subprocess.Popen(cmdline, stdout=subprocess.PIPE).communicate()[0]\n os.remove(tmp_fname) \n return compressed_data", "def test_gzip(handler,config):\r\n if not config.gzip:\r\n return False\r\n if not gzip_support:\r\n return False\r\n accept_encoding = handler.headers.get('accept-encoding','').split(',')\r\n accept_encoding = [ x.strip() for x in accept_encoding ]\r\n ctype = handler.resp_headers[\"Content-type\"]\r\n # if gzip is supported by the user agent,\r\n # and if the option gzip in the configuration file is set, \r\n # and content type is text/ or javascript, \r\n # set Content-Encoding to 'gzip' and return True\r\n if 'gzip' in accept_encoding and \\\r\n ctype and (ctype.startswith('text/') or \r\n ctype=='application/x-javascript'):\r\n return True\r\n return False", "def compress(self, tensor, *args, **kwargs):\n pass", "def data_zip(self, data):\n stringio = StringIO.StringIO()\n gzip_file = gzip.GzipFile(fileobj=stringio, mode='wb')\n gzip_file.write(data)\n gzip_file.close()\n return stringio.getvalue()", "def sql_encode(data: str) -> str:\n return base64.urlsafe_b64encode(gzip.compress(data.encode())).decode()", "def compress(file, output, pw):\n try:\n bsc.compress_file(file, output, pw)\n print(Fore.GREEN + \"Compressed!\")\n except bsc.FrequencyOverflowException as err:\n print(err)\n except FileNotFoundError:\n print(Fore.RED + \"File not found!\")", "def save_to_gzip(data,fname):\n with gzip.open(fname + '.gz', 'wb',compresslevel = 9) as f:\n f.write(data.tobytes())", "def compression(binary_sequence:str):\r\n compressed_sequence = \"\"\r\n calcul_byte =(len(binary_sequence) % 8)\r\n if calcul_byte != 0:\r\n binary_sequence = (8 - calcul_byte)*'0' + binary_sequence\r\n \"\"\" \r\n Add the missing 0's at the beginning of the string so that its length \r\n is divisible by 8 without remainder\r\n \"\"\"\r\n for byte in range(0, len(binary_sequence), 8):\r\n compressed_sequence += chr(int(binary_sequence[byte:byte+8], 2))\r\n return (compressed_sequence, calcul_byte)", "def image_compress(img):\n img_mode = img.mode\n img_size = img.size\n img = img.tobytes()\n zlib.compress(img)\n\n return img_mode, img_size, img", "def css_minify(css, wrap=False, comments=False, sort=False):\n log.info(\"Compressing CSS...\")\n css = remove_comments(css) if not comments else css\n css = sort_properties(css) if sort else css\n css = unquote_selectors(css)\n css = condense_whitespace(css)\n css = remove_url_quotes(css)\n css = condense_xtra_named_colors(css)\n css = condense_font_weight(css)\n css = remove_unnecessary_whitespace(css)\n css = condense_std_named_colors(css)\n css = remove_unnecessary_semicolons(css)\n css = condense_zero_units(css)\n css = condense_multidimensional_zeros(css)\n css = condense_floating_points(css)\n css = normalize_rgb_colors_to_hex(css)\n css = condense_hex_colors(css)\n css = condense_border_none(css)\n css = wrap_css_lines(css, 80) if wrap else css\n css = condense_semicolons(css)\n css = add_encoding(css)\n css = restore_needed_space(css)\n log.info(\"Finished compressing CSS !.\")\n return css.strip()", "def compress(value):\n\t# type: (Any, ) -> Any\n\n\t# sets are not processed because they cannot contain lists or bytearrays anyway.\n\n\tif isinstance(value, (tuple, list)): # tuple *can* contain mutables\n\t\treturn tuple(compress(x) for x in value)\n\telif isinstance(value, bytearray):\n\t\treturn bytes(value) # bytearray can only be bytes or List[int] right?\n\telif isinstance(value, dict):\n\t\treturn {k: compress(v) for k, v in value.items()}\n\telse:\n\t\treturn value", "def compress(clean):\n if clean is None:\n return None\n clean = re.sub(r'[\\r\\n\\t\\xa0]', ' ', clean)\n clean = re.sub(r'&nbsp;?', ' ', clean)\n clean = re.sub(r'\\s+', ' ', clean)\n return clean.strip()", "def optimize(data):\n try:\n optimized_data = tinify.from_buffer(data).to_buffer()\n return optimized_data\n except tinify.AccountError as e:\n # This exception may rise, since a Free account is being used (only 500 requests/month)\n logger.error(\"There is a problem with the TinyPNG Account: {0}\".format(e))\n except tinify.ServerError as e:\n logger.error(\"There seem to be problems in the compression server: {0}\".format(e))\n except Exception as e:\n logger.error(\"The image could not be compressed: {0}\".format(e))\n finally:\n return data", "def gzipPage(page):\n #if not hasattr(page,\"info\"):\n # return(\"\")\n data = object()\n # Check if content encoding is gzip\n if page.info().get('Content-Encoding') == 'gzip':\n buf = StringIO(page.read())\n f = gzip.GzipFile(fileobj=buf)\n data = f.read()\n else :\n data = page.read()\n return(data)", "def set_compression(self, compression):\n converter = geowave_pkg.datastore.redis.config.RedisOptions.CompressionConverter()\n self._java_ref.setCompression(converter.convert(compression))", "def test_compress_4_idenctical_char(self):\n text = 'bbbb'\n actual = LZ77.compress(text)\n expected = bytearray([32]) + bytearray(b'bb') + bytearray([0, 16])\n self.assertEqual(actual, expected)", "def do_gzip(fileobj):\r\n sio = cStringIO.StringIO()\r\n gzf = gzip.GzipFile(fileobj = sio, mode = \"wb\")\r\n while True:\r\n data = fileobj.read(buf_size)\r\n if not data:\r\n break\r\n gzf.write(data)\r\n gzf.close()\r\n return sio", "def html_minify(html, comments=False):\n log.info(\"Compressing HTML...\")\n html = remove_html_comments(html) if not comments else html\n html = condense_style(html)\n html = condense_script(html)\n html = clean_unneeded_html_tags(html)\n html = condense_html_whitespace(html)\n html = unquote_html_attributes(html)\n log.info(\"Finished compressing HTML !.\")\n return html.strip()", "def compress(self, tensor, *args, **kwargs):\n tensor_compressed = tensor\n if 'float' in str(tensor.dtype):\n # Only allow compression from other floating point types\n tensor_compressed = tensor.astype('float16', copy=False)\n return tensor_compressed, tensor.dtype", "def serialize(content):\r\n return unicode(content)", "def compress(condition, a, axis=None, out=None):\n return a.compress(condition, axis, out)", "def compress_stream(src, dst):\n with gzip.GzipFile(fileobj=dst, mode='wb') as gz:\n for block in iterfile(src):\n gz.write(block)", "def compressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.compress(data, 1)\n except zlib.error, e:\n raise EnvironmentError(str(e))\n cake.filesys.writeFile(target, data)" ]
[ "0.727991", "0.727991", "0.7271702", "0.7243655", "0.72023225", "0.7170906", "0.71320665", "0.6685947", "0.6557357", "0.64971095", "0.6466577", "0.63694817", "0.6278639", "0.624268", "0.6192079", "0.61507696", "0.6118266", "0.59070766", "0.57896465", "0.5700065", "0.5692797", "0.56811476", "0.567238", "0.5641472", "0.56245315", "0.5582722", "0.55467457", "0.5535953", "0.5488455", "0.5457179", "0.5437802", "0.5437802", "0.54267704", "0.54044956", "0.5381578", "0.5358276", "0.5321115", "0.5296449", "0.52880806", "0.52758485", "0.52758485", "0.52415735", "0.52314967", "0.52263683", "0.5220648", "0.5208666", "0.52069163", "0.51329106", "0.5130008", "0.51170987", "0.51151776", "0.509434", "0.50850624", "0.5062556", "0.5045321", "0.50254697", "0.5005322", "0.5000827", "0.5000827", "0.4999585", "0.49948516", "0.49843374", "0.49560088", "0.49525595", "0.49510208", "0.49214247", "0.4908577", "0.49065968", "0.4897295", "0.48949757", "0.48930773", "0.4888466", "0.4888466", "0.48814917", "0.4877827", "0.48628482", "0.4848956", "0.48440382", "0.48414856", "0.48406976", "0.4832966", "0.48224646", "0.47969395", "0.47813925", "0.4780147", "0.47737467", "0.47698855", "0.4751519", "0.47421187", "0.47392064", "0.47310704", "0.47238117", "0.47214517", "0.4721026", "0.47093824", "0.47061542", "0.47003734", "0.46990237", "0.46989182", "0.46873617" ]
0.7453164
0
Returns the sha metada of a local file
def _get_sha_metadata(filename): with open(filename) as f: return hashlib.sha1(f.read()).hexdigest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_file_sha(full_path):\n in_file = open(full_path, 'rb')\n try:\n # Bug: why doesn't this use sha_func?\n sha_value = sha1()\n while True:\n bytes = in_file.read(READ_CHUNK_LEN)\n if bytes == \"\":\n break\n sha_value.update(bytes)\n return sha_value.digest()\n finally:\n in_file.close()", "def calchash(filename):\n sha = hashlib.sha1()\n with open(filename, 'rb') as f:\n sha.update(f.read())\n return sha", "def hash_of_file(path):\n with open(path, 'rb') as archive:\n sha = sha256()\n while True:\n data = archive.read(2 ** 20)\n if not data:\n break\n sha.update(data)\n return encoded_hash(sha)", "def hash_file(filename):\r\n\r\n # make a hash object\r\n h = hashlib.sha1()\r\n\r\n # open file for reading in binary mode\r\n with open(filename,'rb') as file:\r\n\r\n # loop till the end of the file\r\n chunk = 0\r\n while chunk != b'':\r\n # read only 1024 bytes at a time\r\n chunk = file.read(1024)\r\n h.update(chunk)\r\n\r\n # return the hex representation of digest\r\n return h.hexdigest()", "def checksum(file):\n\n cksm = hashlib.sha256()\n f = open(file, 'rb')\n try:\n cksm.update(f.read())\n finally:\n f.close()\n return cksm.hexdigest()", "def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha256()\n\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n \n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n \n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def file_digest(file):\n # 'rb' file mode reads the file as bytes\n input_file = open(file, 'rb')\n data = input_file.read()\n # getting the digest\n digest = hash_comparing(data).hexdigest()\n input_file.close()\n return digest", "def sha_hash(file_name: str):\n BLOCKSIZE = 65536\n line = '' # format one line for hash\n with open(file_name, 'rb') as afile:\n buf = afile.read(BLOCKSIZE) # read each line of doc\n while len(buf) > 0:\n line += buf.decode('utf-8')\n buf = afile.read(BLOCKSIZE)\n\n hex = \"0x\" + sha1(line.encode()) # create sha1 hash\n return int(hex, 0)", "def hash_file_native(file_path, tool=\"sha256sum\"):\n output = subprocess.check_output([tool, file_path], shell=False)\n return output.decode(\"utf-8\").partition(\" \")[0].strip()", "def get_file_hash (fullpath) : \n\n # This bit was sourced from Stack Overflow via Google, specifically:\n # http://stackoverflow.com/questions/1131220/get-md5-hash-of-a-files-without-open-it-in-python\n\n md5 = hashlib.md5()\n with open(fullpath,'rb') as f: \n for chunk in iter(lambda: f.read(512*md5.block_size), ''): \n md5.update(chunk)\n # Hexdigest is the safe varchar(32) style output\n return md5.hexdigest()", "def hashfile(file):\n\n hasher = hashlib.sha256()\n\n with open(file, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n hasher.update(buf)\n\n return(hasher.hexdigest())", "def compute_hash(fileName):\n m = hashlib.sha1()\n try:\n fd = open(fileName,\"rb\")\n except IOError:\n print (\"Unable to open the file in readmode:\", fileName)\n return\n content = fd.readlines()\n fd.close()\n for eachLine in content:\n m.update(eachLine)\n return m.hexdigest()", "def file_hash(filepath: Path):\n hsh = hashlib.sha256()\n b = bytearray(128 * 1024)\n mv = memoryview(b)\n with Path(filepath).open(\"rb\", buffering=0) as f:\n for n in iter(lambda: f.readinto(mv), 0):\n hsh.update(mv[:n])\n return hsh.hexdigest()", "def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha1()\n\n # open file for reading in binary mode\n with open(filename, 'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()", "def _hash_file_content(self, path):\n hasher = hashlib.sha1()\n with open(path, 'rb') as file:\n buffer = file.read(self.hash_block_size)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = file.read(self.hash_block_size)\n return hasher.hexdigest()", "def hash_from_file(file_path):\r\n return hash_from_code(open(file_path, 'rb').read())", "def GetFileSha1(file_path):\n return base64.b64encode(GetFileHashes(file_path, do_sha1=True)['sha1'])", "def computeHash(infile):\n f = open(infile, 'rb')\n buffer = f.read()\n f.close()\n return hashlib.sha1(buffer).hexdigest()", "def get_checksum(file_path: str) -> str:\n\n # Open the file in binary mode\n with open(file_path, \"rb\") as file:\n # Create a SHA-256 hash object\n hash_object = hashlib.sha256()\n\n # Iterate over the file in chunks\n for chunk in iter(lambda: file.read(4096), b\"\"):\n # Feed the chunk to the hash object\n hash_object.update(chunk)\n\n # Obtain the checksum in hexadecimal format\n checksum = hash_object.hexdigest()\n\n return checksum", "def get_sha256_file(filename):\n BLOCKSIZE = 65536\n hasher = hashlib.sha256()\n with open(filename, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n return hasher.hexdigest()", "def get_file_hash(fname, hash_length):\n hash_sha = hashlib.sha256()\n with open(fname, 'rb') as infile:\n for chunk in iter(lambda: infile.read(4096), b''):\n hash_sha.update(chunk)\n hash_sha = hash_sha.hexdigest()\n hash_sha = int(hash_sha, 16) % (2 ** (4 * hash_length))\n return hex_encode(hash_sha, hash_length)", "def get_hash(file_buffer):\n data = file_buffer.read()\n hasher = sha1()\n hasher.update(data)\n return hasher.hexdigest()", "def hashFile(path: str) -> str:\n\tif not os.path.exists(path):\n\t\traise FileNotFoundError\n\n\thasher = hashlib.sha1()\n\tblock_sz = 8192\n\twith open(path, 'rb') as f:\n\t\tbuf = f.read(block_sz)\n\t\twhile len(buf) > 0:\n\t\t\thasher.update(buf)\n\t\t\tbuf = f.read(block_sz)\n\treturn str(hasher.hexdigest())", "def get_file_sha256(fname):\n with open(fname, 'rb') as afile:\n return base64.b64encode(get_file_hash(afile, hashlib.sha256()))", "def _get_hash(self, path):\n with open(path, \"r\") as fp:\n content = fp.read()\n\n return sha256(content).hexdigest()", "def _get_file_sha256_hash(file_path):\n sha256hash = hashlib.sha256()\n chunk_size = 8192\n with open(file_path, \"rb\") as f:\n while True:\n buffer = f.read(chunk_size)\n if not buffer:\n break\n sha256hash.update(buffer)\n return sha256hash.hexdigest()", "def get_file_checksum(file_path):\n with open(file_path) as f:\n content = f.read()\n return md5(content.encode()).hexdigest()", "def fetch_local_hashcode(self, path):\n\t\treturn hashlib.sha256(open(self.config[\"daemon\"][\"rootdir\"] + path, \"rb\").read()).hexdigest()", "def calc_file_hash(filepath):\n with open(filepath, 'rb') as f:\n return md5(f.read()).hexdigest()", "def get_file_hash(file_path):\n with open(file_path, 'rb') as f:\n file_name = os.path.basename(file_path)\n to_hash = f.read() + file_name.encode('utf-8')\n new_hash = hashlib.md5(to_hash).hexdigest()\n return new_hash", "def checksumFile(filename):\n return md5File(filename)", "def checksum_of(filepath):\n bfsz = 10240000 # 10 MB buffer\n sum = hashlib.sha256()\n with open(filepath) as fd:\n while True:\n buf = fd.read(bfsz)\n if not buf: break\n sum.update(buf)\n return sum.hexdigest()", "def fast_hash(infile):\n\n m = hashlib.sha256()\n with open(infile, 'rb', 1024 * 1024) as f:\n l = f.read(1024 * 1024)\n while (len(l) > 0):\n m.update(l)\n f.seek(1024 * 1024 * (512 - 1), 1)\n l = f.read(1024 * 1024)\n return m.hexdigest()", "def hash_file(filepath):\n digest = hashlib.sha1()\n with open(filepath, 'rb') as f:\n while True:\n chunk = f.read(1024*1024)\n if not chunk:\n break\n digest.update(chunk)\n return digest.hexdigest()", "def hash(path):\n\n with open(path, 'r') as file:\n return hashlib.sha1(file.read()).hexdigest()", "def fsum(fpath):\n import hashlib\n import codecs\n with codecs.open(fpath, \"r\", \"utf-8\") as filep:\n buff = filep.read()\n cksum = hashlib.md5(buff.encode(\"utf-8\"))\n return cksum.hexdigest()", "def checksum(path):\n with open(path, 'r') as f:\n return md5(f.read()).digest()", "def hashfile(filename):\n BLOCKSIZE = 65536\n sha1 = hashlib.sha1()\n with open(filename, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n sha1.update(buf)\n buf = afile.read(BLOCKSIZE)\n return(sha1.hexdigest())", "def hash_file(method, path):\n f = open(path, \"rb\")\n h = method()\n while True:\n buf = f.read(BUFSIZE)\n if not buf:\n break\n h.update(buf)\n return h.hexdigest()", "def get_checksum(input_fname):\n with open(input_fname, \"rb\") as infile:\n file_contents = infile.read()\n\n checksum = hashlib.md5(file_contents).hexdigest()\n return checksum", "def hash_file(file_to_hash):\n print(\"Hashing \" + file_to_hash + \"...\")\n hash_algorithm = hashlib.sha256()\n file = open(file_to_hash, 'rb')\n while True:\n contents = file.read(65536)\n if not contents:\n break\n hash_algorithm.update(contents)\n hash_str = hash_algorithm.hexdigest()\n return hash_str", "def gerar_hash(nome_arquivo):\n m = hashlib.sha256()\n arquivo = open(nome_arquivo,'rb').read()\n m.update(arquivo)\n hash_votos = m.digest()\n open(\"hash_votos_cifrados.txt\",\"w\").write(hash_votos)", "def ondisk_digest(self):\n with open(self.rename_phase_src) as f:\n return hasher(f.read()).hexdigest()", "def sha256(self):\n return sha256file(self.abspath)", "def hash_file(file_name):\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n with open(file_name, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n return(hasher.hexdigest())", "def hash_file(path: str) -> str:\n return _hash_file(path, hashlib.md5()).hexdigest()", "def computeHash(filename):\n fileHash = hashlib.sha256()\n with open(filename, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n fileHash.update(chunk)\n return fileHash.hexdigest()", "def GetFileSha256(file_path):\n return base64.b64encode(GetFileHashes(file_path, do_sha256=True)['sha256'])", "def file_digest(path, algo=hashlib.md5):\n checksum = algo()\n with open(path, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n checksum.update(chunk)\n return checksum.hexdigest()", "def GetFileMd5(file_path):\n return binascii.hexlify(GetFileHashes(file_path, do_md5=True)['md5'])", "def hash_file ( filename ):\n sha1 = hashlib.sha1()\n with open( filename, 'rb' ) as f:\n while True:\n buf = f.read(65536) # read by 64kb buffers size\n if not buf:\n break\n sha1.update(buf)\n return sha1", "def hashfile(file: str, block_size: int = 65536) -> str:\n with open(file, 'rb') as message:\n m = hashlib.sha256()\n block = message.read(block_size)\n while len(block) > 0:\n m.update(block)\n block = message.read(block_size)\n digest = m.hexdigest()\n\n return digest", "def file_hash(file_to_hash: Path) -> str:\n sha256_hash = hashlib.sha256()\n with file_to_hash.open(\"rb\") as f:\n for block in iter(lambda: f.read(4096), b\"\"):\n sha256_hash.update(block)\n return sha256_hash.hexdigest()", "def sigfile(fpath):\n sigsha = hashlib.sha1()\n fbj = open(fpath, 'rb')\n try:\n sigsha.update(fbj.read()) # pylint: disable-msg=E1101\n finally:\n fbj.close()\n return sigsha.hexdigest()", "def getHashFile(file):\n try:\n fileContent = open(file, 'rb').read()\n except:\n raise IOError, \"No such file...\"\n return False\n return getHash(fileContent)", "def sha1(fname):\n fh = open(fname, 'rb')\n sha1 = hashlib.sha1()\n block = fh.read(2 ** 16)\n while len(block) > 0:\n sha1.update(block)\n block = fh.read(2 ** 16)\n\n return sha1.hexdigest()", "def sha256sum(filename):\n content = open(filename, 'rb').read()\n sha256_obj = hashlib.sha256(content)\n return sha256_obj.hexdigest()", "def hash_for_file(file_name, block_size=2 ** 20):\n hasher = SHA256.new()\n source_file = open(file_name, \"r\")\n\n while True:\n data = source_file.read(block_size)\n if not data:\n break\n hasher.update(data.encode('utf-8'))\n\n source_file.close()\n return hasher.hexdigest()", "def get_hash(path: Path) -> str:\n m = hashlib.sha256()\n m.update(path.read_bytes())\n return m.hexdigest()", "def static_file_hash(filepath):\n hasher = hashlib.md5() # nosec: B303\n\n with contextlib.closing(open(filepath, 'rb')) as file:\n hasher.update(file.read())\n return hasher.hexdigest()", "def sha256sum(filename):\n if not os.path.isfile(filename):\n return ''\n hasher = hashlib.sha256()\n with open(filename, 'rb') as hash_file:\n buf = hash_file.read(HASH_BLOCK_SIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = hash_file.read(HASH_BLOCK_SIZE)\n return hasher.hexdigest()", "def hash_file(fname,bs=M):\n h = hashlib.md5()\n with open(fname,'rb') as f:\n chunk = f.read(bs)\n while chunk:\n h.update(chunk)\n chunk = f.read(bs)\n return h.digest()", "def get_hash_from_file(img):\n with open(img, 'rb') as f:\n return hashlib.sha256(f.read()).hexdigest()", "def _calc_sha1(path):\n calc = hashlib.sha1()\n with open(path, 'r') as f:\n calc.update(f.read())\n return calc.hexdigest()", "def get_checksum(self, u_file: 'UserFile') -> str:\n ...", "def file_checksum(file_path, block_size=65536):\n path = Path(file_path)\n h = xxhash.xxh64()\n with path.open(\"rb\") as f:\n for chunk in iter(lambda: f.read(block_size), b\"\"):\n h.update(chunk)\n return h.hexdigest()", "def hash_file(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()", "def hash(self):\n block = 1024 * 1024 * 4 # 4 MB.\n hasher = hashlib.sha256()\n\n with open(self.path, \"rb\") as f:\n while True:\n chunk = f.read(block)\n if not chunk:\n break\n hasher.update(hashlib.sha256(chunk).digest())\n\n digest = hasher.hexdigest()\n pdbox.debug(\"Hash for %s: %s\" % (self.path, digest))\n return digest", "def calculate_hash(filename, raise_on_not_found = False):\n if not is_file(filename) and not raise_on_not_found:\n return \"NOTFOUND\"\n\n with open(filename, \"rb\") as file:\n sha256 = hashlib.sha256()\n buf = file.read(128)\n while len(buf) > 0:\n sha256.update(buf)\n buf = file.read(128)\n return str(binascii.hexlify(sha256.digest()), \"utf8\")", "def hash_file(path, digest=None):\r\n digest = digest or hashlib.sha1()\r\n with open(path, 'rb') as fd:\r\n s = fd.read(8192)\r\n while s:\r\n digest.update(s)\r\n s = fd.read(8192)\r\n return digest.hexdigest()", "def sha512_file(file_name):\n\n hash_func = hashlib.sha256()\n\n with open(file_name, \"rb\") as fd:\n hash_func.update(fd.read())\n\n return hash_func.hexdigest()", "def get_file_sha(repo_dir, filename):\n try:\n sha = subprocess.check_output(['git', 'ls-files', filename], cwd=repo_dir).strip()\n if not sha:\n return \"\"\n sha = subprocess.check_output(['git', 'hash-object', filename], cwd=repo_dir)\n return sha.decode('utf-8').strip()\n except Exception as e:\n print(\"Failed to get sha for '%s/%s': %s\" % (repo_dir, filename, e))\n return \"\"", "def sha256sum(filename):\n with open(filename, 'rb') as f:\n m = hashlib.sha256()\n while True:\n data = f.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()", "def semhash(file):\n _hash_helper(file)", "def semhash(file):\n _hash_helper(file)", "def file_checksum(filename):\n hash_md5 = hashlib.md5()\n with tf.gfile.Open(filename, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n f.close()\n return hash_md5.hexdigest()", "def file_sha256(file_path, chunk_size=10240):\n\n sha256 = hashlib.sha256()\n if os.path.exists(file_path):\n with open(file_path, \"rb\") as f:\n while True:\n data = f.read(chunk_size)\n if not data:\n break\n else:\n sha256.update(data)\n\n return sha256.hexdigest()", "def _hash_file(fpath, algorithm='sha256', chunk_size=65535):\n if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):\n hasher = hashlib.sha256()\n else:\n hasher = hashlib.md5()\n\n with open(fpath, 'rb') as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n hasher.update(chunk)\n\n return hasher.hexdigest()", "def get_256_hash_from_file(file_location):\n\n sha256 = hashlib.sha256()\n\n with open(file_location, 'rb') as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n sha256.update(data)\n\n return sha256.hexdigest()", "def sha1HashFile(self, filename: Path):\n bufferSize = 65536\n sha1Hash = hashlib.sha1()\n\n with filename.open('rb') as f:\n while True:\n data = f.read(bufferSize)\n\n if not data:\n break\n\n sha1Hash.update(data)\n\n return str(sha1Hash.hexdigest())", "def quick_hash_file(fname,bs=M):\n size = os.path.getsize(fname)\n if size < 3*bs:\n return hash_file(fname,bs)\n h = hashlib.md5()\n with open(fname,'rb') as f:\n h.update(f.read(bs))\n f.seek(size//2,0)\n h.update(f.read(bs))\n f.seek(-bs,2)\n h.update(f.read(bs))\n return h.digest()", "def checksum(self, filepath) -> str:\n if os.path.exists(filepath):\n hash_md5 = md5()\n with open(filepath, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return urlsafe_b64encode(hash_md5.digest()).decode('utf-8')\n\n return \"\"", "def cksum(filename):\n hash, err = Popen([\"cksum\", filename], stdout=PIPE, stderr=PIPE).communicate()\n if err != '':\n raise Exception(\"Error hashing {filename}\".format(**locals()))\n return hash.split(\" \")[0]", "def _hash_file(fpath, algorithm='sha256', chunk_size=65535):\n if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64):\n hasher = hashlib.sha256()\n else:\n hasher = hashlib.md5()\n\n with open(fpath, 'rb') as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n hasher.update(chunk)\n\n return hasher.hexdigest()", "def generate_sum(file_path):\n #file = open(file_path, 'rb')\n #header = file.read()\n header = open(file_path, 'rb').read()\n suma_md5 = md5(header).hexdigest()\n return suma_md5", "def readfile(self, filename):\n \n f = file(filename,'rb');\n #print \"\\nReading %s \\n\" % f.name;\n m = md5.new();\n readBytes = 1024; # read 1024 bytes per time\n totalBytes = 0;\n while (readBytes):\n readString = f.read(readBytes);\n m.update(readString);\n readBytes = len(readString);\n totalBytes+=readBytes;\n f.close();\n \n return m.hexdigest()", "def get_md5_hash(file_path: str) -> str:\n from hashlib import md5\n\n # local file\n if file_path.startswith('/'):\n return md5(open(file_path, 'rb').read()).hexdigest()\n\n # remote file\n httpresponse = url_is_alive(file_path)\n if not httpresponse:\n error_open_mess(file_path)\n return ''\n\n md5hash = md5()\n max_file_size = 100 * 1024 * 1024\n total_read = 0\n while True:\n data = httpresponse.read(4096)\n total_read += 4096\n\n if not data or total_read > max_file_size:\n break\n\n md5hash.update(data)\n\n httpresponse.close()\n return md5hash.hexdigest()", "def _actual_hash(self):\n return hash_of_file(join(self._temp_path, self._downloaded_filename()))", "def getFileMD5(f: java.io.File, monitor: ghidra.util.task.TaskMonitor) -> unicode:\n ...", "def hash_file(path):\n if not os.path.isfile(path):\n raise ValueError(\"The given path `{}` is not a file.\".format(path))\n\n md5 = hashlib.md5()\n\n with open(path, 'rb') as file_:\n while True:\n data = file_.read(65536)\n if not data:\n break\n md5.update(data)\n\n return \"{}\".format(md5.hexdigest())", "def md5_hash(file_path):\n with open(file_path, 'rb') as fp:\n return md5(fp.read()).hexdigest()", "def get_file_hash(afile, hasher, block_size=65536):\n buf = afile.read(block_size)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(block_size)\n return hasher.digest()", "def sha1sum(filename):\n if not os.path.isfile(filename):\n return ''\n hasher = hashlib.sha1()\n with open(filename, 'rb') as hash_file:\n buf = hash_file.read(HASH_BLOCK_SIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = hash_file.read(HASH_BLOCK_SIZE)\n return hasher.hexdigest()", "def CalcMD5(filepath):\n with open(filepath,'rb') as f:\n md5obj = hashlib.md5()\n md5obj.update(f.read())\n return md5obj.hexdigest()", "def hash_file_at_path(file_path, algorithm=\"sha1\"):\n block_size = 64 * 1024\n hasher = getattr(hashlib, algorithm)()\n with open(file_path, \"rb\") as file_handler:\n while True:\n data = file_handler.read(block_size)\n if not data:\n break\n hasher.update(data)\n return hasher.hexdigest()", "def file_sha1(file_name, ignore_format=False, max_call_times=None):\r\n _FILE_SLIM = 65536 # read stuff in 64kb chunks!\r\n call_times = 0\r\n my_sha1 = hashlib.sha1()\r\n with open(file_name, \"rb\") as ob:\r\n while True:\r\n data = ob.read(_FILE_SLIM)\r\n if not data:\r\n break\r\n if ignore_format:\r\n data = data.decode(encoding=\"utf-8\")\r\n data = data.replace(\"\\r\", '')\r\n data = data.replace(\"\\n\", '')\r\n data = data.encode(encoding=\"utf-8\")\r\n if max_call_times:\r\n call_times += 1\r\n if call_times > max_call_times:\r\n break\r\n my_sha1.update(data)\r\n return my_sha1.hexdigest()", "def _get_sha1(file_descriptor):\n sha1 = hashlib.sha1()\n for block in iter(partial(file_descriptor.read, BLOCK_SIZE), ''):\n sha1.update(block)\n file_descriptor.seek(0)\n return sha1.hexdigest()" ]
[ "0.7884196", "0.78605366", "0.7665543", "0.7652545", "0.76196074", "0.7603502", "0.75687635", "0.75554144", "0.75554144", "0.75550866", "0.754885", "0.7488408", "0.74789304", "0.746627", "0.7455596", "0.74492663", "0.74489915", "0.7430027", "0.7418533", "0.7417911", "0.74131805", "0.7408378", "0.73823774", "0.7355488", "0.7342709", "0.7333559", "0.73213816", "0.731423", "0.7300847", "0.72602636", "0.72508204", "0.72215736", "0.72185814", "0.7194642", "0.7192775", "0.7181767", "0.7174142", "0.716611", "0.7157666", "0.71561855", "0.7132321", "0.71300125", "0.7129674", "0.71285254", "0.71223736", "0.711322", "0.71109194", "0.7110712", "0.71044624", "0.709462", "0.7093635", "0.7090482", "0.7081742", "0.70802194", "0.70742863", "0.70696217", "0.70579034", "0.70577765", "0.7040614", "0.70395225", "0.7027299", "0.70205605", "0.7018748", "0.7011957", "0.7009515", "0.70071197", "0.7001646", "0.6999875", "0.6998502", "0.6988425", "0.6981481", "0.69772", "0.6971502", "0.6963481", "0.694162", "0.693451", "0.6926111", "0.6926111", "0.69201267", "0.69188446", "0.6908689", "0.69072694", "0.69007593", "0.6875333", "0.68701404", "0.6866709", "0.68660706", "0.68419135", "0.68168604", "0.6814477", "0.6802069", "0.67833215", "0.677906", "0.677347", "0.67458624", "0.67420805", "0.6734768", "0.67007047", "0.6700236", "0.6695252" ]
0.7672078
2
Build the metadata local file with all sha information about files. File location is computed based on home kwargument.
def _build_local_metadata_file(files, home=''): filepaths = [os.path.join(home, f) for f in files] shas = [_get_sha_metadata(f) for f in filepaths] metadata = dict(zip(files, shas)) with open(LOCAL_METADATA_FILE, 'w') as f: f.write(json.dumps(metadata))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_metadata(self):\n self.metadata = {\n 'title': os.path.basename(self.source_file).rsplit('.', 1)[0],\n 'url': self.relative_destination_file,\n 'full_path': os.path.dirname(self.relative_destination_file),\n 'short_path': self.shorten_path(\n os.path.dirname(self.relative_destination_file))\n }", "def _update_filesystem_metadata(self, metadata):\n directory, fname = os.path.split(self.fname)\n fbase = os.path.splitext(fname)[0]\n \n # Test for presence and size of zip file\n zip_file = fbase + '.zip'\n zip_path = os.path.join(directory, zip_file)\n \n if os.path.isfile(zip_path):\n location = 'on_disk'\n data_file_size = os.path.getsize(zip_path)\n else:\n location = 'on_tape'\n data_file_size = 0\n \n # Test for presence of quick look PNG file\n quicklook_file = fbase + '.png'\n quicklook_path = os.path.join(directory, quicklook_file)\n \n if not os.path.isfile(quicklook_path):\n quicklook_file = ''\n\n # Add to metadata dictionary\n item_map = {'directory': directory, 'metadata_file': fname,\n 'data_file': zip_file, 'location': location, \n 'data_file_size': data_file_size, 'quicklook_file': quicklook_file}\n \n for key, value in item_map.items():\n metadata[key] = value", "def metadata_path(self):\n return os.path.join(self.path, 'metadata.txt')", "def metadata_path(self) -> Path:\n return self.download_folder() / f\"{self.manufacturer_ref}-meta.json\"", "def generate_metadata(install_req):\n # type: (InstallRequirement) -> str\n assert install_req.pep517_backend is not None\n build_env = install_req.build_env\n backend = install_req.pep517_backend\n\n # NOTE: This needs to be refactored to stop using atexit\n metadata_tmpdir = TempDirectory(kind=\"modern-metadata\")\n atexit.register(metadata_tmpdir.cleanup)\n\n metadata_dir = metadata_tmpdir.path\n\n with build_env:\n # Note that Pep517HookCaller implements a fallback for\n # prepare_metadata_for_build_wheel, so we don't have to\n # consider the possibility that this hook doesn't exist.\n runner = runner_with_spinner_message(\"Preparing wheel metadata\")\n with backend.subprocess_runner(runner):\n distinfo_dir = backend.prepare_metadata_for_build_wheel(\n metadata_dir\n )\n\n return os.path.join(metadata_dir, distinfo_dir)", "def add_file_metadata(self):\n metadata = self.__file.require_group(METADATA)\n self.__write_value(metadata, DATE_CREATED, date.today().strftime(\"%Y-%m-%d\"))\n self.__write_value(metadata, SDK_VERSION, __version__)", "def generate_metadata_files(self):\n\n data_folder = self.get_data_folder(mode='absolute')\n\n parents = (data_folder / '_').parents\n\n for mfile in self.mdata:\n for regex, level in METADATA_LEVEL_BY_NAME.items():\n if re.compile(regex).match(mfile.name):\n create_file(mfile, parents[(3-level)] / mfile.name,\n mode='copy')", "def build_data(cmd, rel_new_path, new_md5, founded_path=None):\n data = {'cmd': cmd}\n if cmd == 'copy':\n data['file'] = {'src': founded_path,\n 'dst': rel_new_path,\n 'md5': new_md5,\n }\n else:\n data['file'] = {'filepath': rel_new_path,\n 'md5': new_md5,\n }\n return data", "def get_metadata(self):\n previous = DirectoryMetadata.load_pickle(self)\n metadata = {}\n\n for dirpath, dirnames, filenames in os.walk(self.prefix_dir):\n for fname in filenames:\n path = os.path.join(dirpath, fname)\n relative_path = path.split(self.base_dir, 1)[1]\n try:\n stats = os.stat(path)\n except OSError:\n log.exception('Error stating a file on disk while building up metadata, skipping file %s' % path)\n continue\n swift_bytes = stats.st_size\n mtime = datetime.utcfromtimestamp(stats.st_mtime)\n if (previous is not None) and (relative_path in previous.metadata) and\\\n (previous.metadata[relative_path].bytes == swift_bytes):\n swift_hash = previous.metadata[relative_path].hash\n else:\n try:\n with open(path, 'rb') as afile:\n md5_hash = hashlib.md5()\n md5_hash.update(afile.read())\n swift_hash = md5_hash.hexdigest()\n except OSError:\n log.exception('Error reading a file to create the md5 while building up metadata, skipping file %s' % path)\n continue\n\n metadata[relative_path] = FileMetadata(relative_path, swift_bytes, mtime, swift_hash)\n\n return metadata", "def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]", "def printFileHash(fulltextData, artMeta):\n crawlerName = fulltextData['crawlerName']\n for ext, page in fulltextData.iteritems():\n if ext in ('crawlerName', 'status'):\n continue\n if ext == 'main.pdf':\n checkIfPdf(page, artMeta)\n sha1 = hashlib.sha1(page['data']).hexdigest()\n row = [crawlerName,\n ext,\n page['url'],\n str(len(page['data'])),\n sha1]\n print('\\t'.join(row))", "def gen_file_metadata_summary(self, metadata):\n title = sub('[\\W_]+', '', metadata['title'].lower())\n season = str(metadata['season']).zfill(2)\n episode = str(metadata['episode']).zfill(2)\n\n file_metadata_summary = f'{title}.S{season}E{episode}'\n return file_metadata_summary", "def __build_file_name(self, func, args):\n # Build a unique string to hash\n if self.__log:\n self.__logger.info(f\"Building file name for {func.__name__} with {args}\")\n\n # Hash with the specified algorithm and hexdigest\n # to produce a string\n fname = self.algorithm(\n b\"\".join([func.__name__.encode(\"utf8\"), pickle.dumps(args)])\n ).hexdigest()\n\n pathToFile = os.path.join(self.cacheDir, fname)\n if self.__log:\n self.__logger.info(f\"Built path {pathToFile}\")\n return pathToFile", "def gen_meta(self, filename):\n nf_meta = {}\n nf_meta['filename'] = filename\n nf_meta['deleted'] = 0\n\n # http://stackoverflow.com/a/5297483\n nf_meta['key'] = hashlib.md5(str(filename).encode('utf-8')).hexdigest()\n self.log.debug(\"Note File Meta Key: %s\", nf_meta['key'])\n\n path = self.config.get_config('cfg_nt_path')\n\n # WARNING THIS IS PLATFORM SPECIFIC\n nf_meta['createdate'] = os.stat(path + \"/\" + filename).st_birthtime\n self.log.debug(\"Note File Meta Created: %s [%s]\", nf_meta['createdate'], time.ctime(nf_meta['createdate']))\n\n nf_meta['modifydate'] = os.stat(path + \"/\" + filename).st_mtime\n self.log.debug(\"Note File Meta Modified: %s [%s]\", nf_meta['modifydate'], time.ctime(nf_meta['modifydate']))\n\n return nf_meta", "def create_meta(prefix, dist, info_dir, extra_info):\n # read info/index.json first\n with open(join(info_dir, 'index.json')) as fi:\n meta = json.load(fi)\n # add extra info\n meta.update(extra_info)\n # write into <prefix>/conda-meta/<dist>.json\n meta_dir = join(prefix, 'conda-meta')\n if not isdir(meta_dir):\n os.makedirs(meta_dir)\n with open(join(meta_dir, 'history'), 'w') as fo:\n fo.write('')\n with open(join(meta_dir, dist + '.json'), 'w') as fo:\n json.dump(meta, fo, indent=2, sort_keys=True)", "def metadata_path(self, fmt: str = \"csv\"):\n if self.options.metadata_as_name:\n save_name = self.dataset_name.lower().replace(\" \", \"_\").replace(\"-\", \"_\") + f\".{fmt}\"\n return os.path.join(self.extracted_path, f\"{save_name}\")\n else:\n return os.path.join(self.extracted_path, f\"metadata.{fmt}\")", "def _store_package_metadata(self):", "def create_file_meta_data(vk4_container, args):\n log.debug(\"Entering create_file_meta_data()\")\n\n header_list = list()\n header_list.append(args.layer)\n header_list.append('\\n')\n header_list.append('File name')\n header_list.append(args.input)\n header_list.append('Title')\n header_list.append(args.input[:-4])\n header_list.append('Measurement date')\n header_list.append(str(vk4_container.measurement_conditions['month']) + '\\\\' +\n str(vk4_container.measurement_conditions['day']) + '\\\\' +\n str(vk4_container.measurement_conditions['year']))\n header_list.append('Measurement time')\n header_list.append(str(vk4_container.measurement_conditions['hour']) + ':' +\n str(vk4_container.measurement_conditions['minute']) + ':' +\n str(vk4_container.measurement_conditions['second']))\n # User mode?\n header_list.append('Objective lens')\n header_list.append(vk4_container.string_data['lens_name'] + ' ' +\n str(vk4_container.measurement_conditions['lens_magnification'] / 10.0) + 'x')\n header_list.append('Numerical Aperture')\n header_list.append(vk4_container.measurement_conditions['num_aperture'] / 1000.0)\n # Size? Standard?\n # Mode? Surface profile?\n # RPD? OFF?\n header_list.append('Quality')\n header_list.append('Skip 4 lines')\n header_list.append('Pitch (um)')\n header_list.append(vk4_container.measurement_conditions['pitch'] / 1000.0)\n header_list.append('Z measurement distance (um)')\n header_list.append(vk4_container.measurement_conditions['distance'] / 1000.0)\n # Double scan? OFF?\n header_list.append('Brightness 1')\n header_list.append(vk4_container.measurement_conditions['PMT_gain'])\n header_list.append('Brightness 2')\n br_2 = vk4_container.measurement_conditions['PMT_gain_2']\n header_list.append('---') if br_2 == 0 else header_list.append(br_2)\n # Not sure how they got ND filter to 30% in example csv\n header_list.append('ND filter (%)')\n header_list.append(vk4_container.measurement_conditions['ND_filter'] * 30)\n header_list.append('Optical zoom')\n header_list.append(vk4_container.measurement_conditions['optical_zoom'] / 10.0)\n # Average count? 1 time?\n # Filter? OFF?\n # Fine mode? ON?\n header_list.append('Line count')\n l_count = vk4_container.measurement_conditions['number_of_lines']\n header_list.append(l_count)\n\n header_list.append('Line position1')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][0])\n\n header_list.append('Line position2')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][1])\n\n header_list.append('Line position3')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][2])\n\n header_list.append('Camera gain (db)')\n header_list.append(vk4_container.measurement_conditions['camera_gain'] * 6)\n header_list.append('Shutter speed')\n header_list.append(vk4_container.measurement_conditions['shutter_speed'])\n header_list.append('White balance mode')\n wb_mode = vk4_container.measurement_conditions['white_balance_mode']\n header_list.append('Auto') if wb_mode == 1 else header_list.append(wb_mode)\n header_list.append('White balance R')\n header_list.append(vk4_container.measurement_conditions['white_balance_red'])\n header_list.append('White balance B')\n header_list.append(vk4_container.measurement_conditions['white_balance_blue'])\n header_list.append('Intensity correction mode')\n header_list.append('Gamma correction')\n header_list.append('Gamma correction value')\n header_list.append(vk4_container.measurement_conditions['gamma'] / 100.0)\n header_list.append('Gamma offset (%)')\n header_list.append(vk4_container.measurement_conditions['gamma_correction_offset'] /\n 65536.0)\n # W/B inversion? OFF?\n # Head type? VK-X110?\n # Correct intensity eccentricity? OFF?\n # Correct field curvature? OFF?\n header_list.append('XY calibration (nm/pixel)')\n header_list.append(vk4_container.measurement_conditions['x_length_per_pixel'] / 1000.0)\n header_list.append('Z calibration (nm/digit)')\n header_list.append(vk4_container.measurement_conditions['z_length_per_digit'] / 1000.0)\n # Saturation?\n # Contrast?\n # Brightness?\n # AI noise elimination? Auto(ON)?\n # Angled surface noise filter? Auto(OFF)?\n header_list.append('Width')\n header_list.append(vk4_container.image_width)\n header_list.append('Height')\n header_list.append(vk4_container.image_height)\n # Skip amount? 1?\n\n out_type = args.type\n if out_type == 'hcsv':\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return np.reshape(header_list, (len(header_list) // 2, 2))\n else:\n # Can use a dict to attach info to an image using PILs Image module\n meta_dict = dict()\n for n in range(0, len(header_list), 2):\n meta_dict[header_list[n]] = header_list[n + 1]\n\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return meta_dict", "def __init__(self, updater_name, repository_mirrors):\n \n # Do the arguments have the correct format?\n # These checks ensure the arguments have the appropriate\n # number of objects and object types and that all dict\n # keys are properly named.\n # Raise 'tuf.FormatError' if there is a mistmatch.\n tuf.formats.NAME_SCHEMA.check_match(updater_name)\n tuf.formats.MIRRORDICT_SCHEMA.check_match(repository_mirrors)\n \n # Save the validated arguments.\n self.name = updater_name\n self.mirrors = repository_mirrors\n\n # Store the trusted metadata read from disk.\n self.metadata = {}\n \n # Store the currently trusted/verified metadata.\n self.metadata['current'] = {} \n \n # Store the previously trusted/verified metadata.\n self.metadata['previous'] = {}\n\n # Store the file information of all the metadata files. The dict keys are\n # paths, the dict values fileinfo data. This information can help determine\n # whether a metadata file has changed and so needs to be re-downloaded.\n self.fileinfo = {}\n \n # Store the location of the client's metadata directory.\n self.metadata_directory = {}\n \n # Ensure the repository metadata directory has been set.\n if tuf.conf.repository_directory is None:\n message = 'The TUF update client module must specify the directory' \\\n ' containing the local repository files.' \\\n ' \"tuf.conf.repository_directory\" MUST be set.'\n raise tuf.RepositoryError(message)\n\n # Set the path for the current set of metadata files. \n repository_directory = tuf.conf.repository_directory\n current_path = os.path.join(repository_directory, 'metadata', 'current')\n \n # Ensure the current path is valid/exists before saving it.\n if not os.path.exists(current_path):\n message = 'Missing '+repr(current_path)+'. This path must exist and, ' \\\n 'at a minimum, contain the root metadata file.' \n raise tuf.RepositoryError(message)\n self.metadata_directory['current'] = current_path\n \n # Set the path for the previous set of metadata files. \n previous_path = os.path.join(repository_directory, 'metadata', 'previous') \n \n # Ensure the previous path is valid/exists.\n if not os.path.exists(previous_path):\n message = 'Missing '+repr(previous_path)+'. This path must exist.'\n raise tuf.RepositoryError(message)\n self.metadata_directory['previous'] = previous_path\n \n # Load current and previous metadata.\n for metadata_set in ['current', 'previous']:\n for metadata_role in ['root', 'targets', 'release', 'timestamp']:\n self._load_metadata_from_file(metadata_set, metadata_role)\n \n # Raise an exception if the repository is missing the required 'root'\n # metadata.\n if 'root' not in self.metadata['current']:\n message = 'No root of trust! Could not find the \"root.txt\" file.'\n raise tuf.RepositoryError(message)", "def create_readme(case_dict):\n # ---------------------------------------------------------------------\n logger.debug(\"create_readme\")\n os.chdir(case_dict[\"archive_temp_dir\"])\n\n fname = open(\"README.archive\", \"w\")\n fname.write(\"Archived metadata is available for this case at URL:\\n\")\n fname.write(case_dict[\"base_expdb_url\"])\n fname.close()", "def create_version_file(version='unknown', gitmeta=''):\n\tfname = join(dirname(abspath(__file__)), 'MHLogin', '_version.py')\n\tf = open(fname, 'wb')\n\tf.write(VERSION_PY % {'version': version, 'gitmeta': gitmeta, })\n\tf.close()", "def write_fileindex_md(hashalgo_md, fileindex, repo_name, hashalgo, format=None, include_local_filename=False):\n if format is None:\n format = \"list\"\n with open(hashalgo_md, \"wb\") as f:\n if format==\"table\":\n header = []\n header.append(\"| FileName | FileDate | \" + hashalgo + \" |\\n\")\n header.append(\"|----------|----------|-------------|\\n\")\n if include_local_filename:\n header[0] = \"| LocalFileName \" + header[0]\n header[1] = \"|---------------\" + header[1]\n for header_line in header:\n f.write(bytes(header_line, \"UTF-8\"))\n for fileindex_item in fileindex:\n checksum = fileindex_item[COLUMN_CHECKSUM]\n filename = fileindex_item[COLUMN_FILENAME]\n filedate = fileindex_item[COLUMN_FILEDATE] if len(fileindex_item) > COLUMN_FILEDATE else \"\"\n local_filename = fileindex_item[COLUMN_LOCAL_FILENAME] if len(fileindex_item) > COLUMN_LOCAL_FILENAME else \"\"\n if format==\"table\":\n row = \"\"\n if include_local_filename:\n row += \"| \" + local_filename + \" \"\n row += \"| [\" + filename + \"](https://github.com/\" + repo_name + \"/releases/download/\" + hashalgo + \"/\" + checksum + \") \"\n row += \"| \" + filedate + \" \"\n row += \"| \" + checksum + \" \"\n f.write(bytes(row + \"|\\n\", \"UTF-8\",))\n else:\n f.write(bytes(\"- [\" + filename + \"](https://github.com/\" + repo_name + \"/releases/download/\" + hashalgo + \"/\" + checksum + \")\\n\", \"UTF-8\",))\n if include_local_filename:\n f.write(bytes(\" - LocalFileName: \" + local_filename + \"\\n\", \"UTF-8\",))\n if filedate:\n f.write(bytes(\" - FileDate: \" + filedate + \"\\n\", \"UTF-8\",))\n f.write(bytes(\" - \" + hashalgo +\": \" + checksum + \"\\n\", \"UTF-8\",))", "def _gen_metadata_for_list(self, filename, **extra_data):\n file_size = os.stat(filename).st_size\n with open(filename, 'rb') as f:\n md5sum = compute_md5_hash(f)\n core_metadata = {\n 'filename': os.path.abspath(filename),\n 'md5sum': md5sum,\n 'file_size_bytes': file_size\n }\n return {**core_metadata, **extra_data}", "def build_metadata_from_file(tmp_crawler_folder, abs_data_path, commitTime):\n m = MetaData()\n\n post_path = abs_data_path[len(tmp_crawler_folder):]\n if post_path[0] == '/':\n post_path = post_path[1:]\n\n m['mimeType'] = mime.get_mime(abs_data_path)\n m['url'] = MetaData.get_url_from_path(post_path)\n m['domain'] = MetaData.get_domain_from_path(post_path)\n m['path'] = MetaData.get_content_path_from_tmp(abs_data_path, m['domain'])\n m['tmpPath'] = abs_data_path\n m['createTime'] = utl.get_ctime(abs_data_path)\n m['commitTime'] = commitTime\n m['title'] = extractor.get_title(abs_data_path, m['mimeType'])\n\n if not m['url'] in m['path']:\n print('WARNING, url not in path!')\n return m", "def run(self, info):\n\n # Write the metadata to the file's xattrs\n self._downloader.to_screen('[metadata] Writing metadata to file\\'s xattrs')\n\n filename = info['filepath']\n\n try:\n xattr_mapping = {\n 'user.xdg.referrer.url': 'webpage_url',\n # 'user.xdg.comment': 'description',\n 'user.dublincore.title': 'title',\n 'user.dublincore.date': 'upload_date',\n 'user.dublincore.description': 'description',\n 'user.dublincore.contributor': 'uploader',\n 'user.dublincore.format': 'format',\n }\n\n num_written = 0\n for xattrname, infoname in xattr_mapping.items():\n\n value = info.get(infoname)\n\n if value:\n if infoname == 'upload_date':\n value = hyphenate_date(value)\n\n byte_value = value.encode('utf-8')\n write_xattr(filename, xattrname, byte_value)\n num_written += 1\n\n return [], info\n\n except XAttrUnavailableError as e:\n self._downloader.report_error(str(e))\n return [], info\n\n except XAttrMetadataError as e:\n if e.reason == 'NO_SPACE':\n self._downloader.report_warning(\n 'There\\'s no disk space left, disk quota exceeded or filesystem xattr limit exceeded. '\n + (('Some ' if num_written else '') + 'extended attributes are not written.').capitalize())\n elif e.reason == 'VALUE_TOO_LONG':\n self._downloader.report_warning(\n 'Unable to write extended attributes due to too long values.')\n else:\n msg = 'This filesystem doesn\\'t support extended attributes. '\n if compat_os_name == 'nt':\n msg += 'You need to use NTFS.'\n else:\n msg += '(You may have to enable them in your /etc/fstab)'\n self._downloader.report_error(msg)\n return [], info", "def build_content(self) -> None:\n logger.info(__('writing content.opf file...'))\n metadata = self.content_metadata()\n\n # files\n self.files: list[str] = []\n self.ignored_files = ['.buildinfo', 'mimetype', 'content.opf',\n 'toc.ncx', 'META-INF/container.xml',\n 'Thumbs.db', 'ehthumbs.db', '.DS_Store',\n 'nav.xhtml', self.config.epub_basename + '.epub'] + \\\n self.config.epub_exclude_files\n if not self.use_index:\n self.ignored_files.append('genindex' + self.out_suffix)\n for root, dirs, files in os.walk(self.outdir):\n dirs.sort()\n for fn in sorted(files):\n filename = relpath(path.join(root, fn), self.outdir)\n if filename in self.ignored_files:\n continue\n ext = path.splitext(filename)[-1]\n if ext not in self.media_types:\n # we always have JS and potentially OpenSearch files, don't\n # always warn about them\n if ext not in ('.js', '.xml'):\n logger.warning(__('unknown mimetype for %s, ignoring'), filename,\n type='epub', subtype='unknown_project_files')\n continue\n filename = filename.replace(os.sep, '/')\n item = ManifestItem(html.escape(quote(filename)),\n html.escape(self.make_id(filename)),\n html.escape(self.media_types[ext]))\n metadata['manifest_items'].append(item)\n self.files.append(filename)\n\n # spine\n spinefiles = set()\n for refnode in self.refnodes:\n if '#' in refnode['refuri']:\n continue\n if refnode['refuri'] in self.ignored_files:\n continue\n spine = Spine(html.escape(self.make_id(refnode['refuri'])), True)\n metadata['spines'].append(spine)\n spinefiles.add(refnode['refuri'])\n for info in self.domain_indices:\n spine = Spine(html.escape(self.make_id(info[0] + self.out_suffix)), True)\n metadata['spines'].append(spine)\n spinefiles.add(info[0] + self.out_suffix)\n if self.use_index:\n spine = Spine(html.escape(self.make_id('genindex' + self.out_suffix)), True)\n metadata['spines'].append(spine)\n spinefiles.add('genindex' + self.out_suffix)\n # add auto generated files\n for name in self.files:\n if name not in spinefiles and name.endswith(self.out_suffix):\n spine = Spine(html.escape(self.make_id(name)), False)\n metadata['spines'].append(spine)\n\n # add the optional cover\n html_tmpl = None\n if self.config.epub_cover:\n image, html_tmpl = self.config.epub_cover\n image = image.replace(os.sep, '/')\n metadata['cover'] = html.escape(self.make_id(image))\n if html_tmpl:\n spine = Spine(html.escape(self.make_id(self.coverpage_name)), True)\n metadata['spines'].insert(0, spine)\n if self.coverpage_name not in self.files:\n ext = path.splitext(self.coverpage_name)[-1]\n self.files.append(self.coverpage_name)\n item = ManifestItem(html.escape(self.coverpage_name),\n html.escape(self.make_id(self.coverpage_name)),\n html.escape(self.media_types[ext]))\n metadata['manifest_items'].append(item)\n ctx = {'image': html.escape(image), 'title': self.config.project}\n self.handle_page(\n path.splitext(self.coverpage_name)[0], ctx, html_tmpl)\n spinefiles.add(self.coverpage_name)\n\n auto_add_cover = True\n auto_add_toc = True\n if self.config.epub_guide:\n for type, uri, title in self.config.epub_guide:\n file = uri.split('#')[0]\n if file not in self.files:\n self.files.append(file)\n if type == 'cover':\n auto_add_cover = False\n if type == 'toc':\n auto_add_toc = False\n metadata['guides'].append(Guide(html.escape(type),\n html.escape(title),\n html.escape(uri)))\n if auto_add_cover and html_tmpl:\n metadata['guides'].append(Guide('cover',\n self.guide_titles['cover'],\n html.escape(self.coverpage_name)))\n if auto_add_toc and self.refnodes:\n metadata['guides'].append(Guide('toc',\n self.guide_titles['toc'],\n html.escape(self.refnodes[0]['refuri'])))\n\n # write the project file\n copy_asset_file(path.join(self.template_dir, 'content.opf_t'), self.outdir, metadata)", "def generate_hash(self):\r\n\r\n hash_list = []\r\n for root, dirs, files in os.walk(self.options['source']):\r\n for f in sorted([f for f in files if not f.startswith('.')]):\r\n hash_list.append(os.path.join(root, f))\r\n hash_list.append(str(os.path.getmtime(os.path.join(root, f))))\r\n hash_list = ''.join(hash_list)\r\n\r\n if sys.version < '3':\r\n return hashlib.sha1(hash_list).hexdigest()\r\n return hashlib.sha1(hash_list.encode('utf-8')).hexdigest()", "def create_metadata(scene: \"Scenemaker\") -> None:\r\n create_datadir()\r\n\r\n with open(dirpath / cng.GENERATED_DATA_DIR / cng.METADATA_FILE, \"w+\") as f:\r\n f.write(str(scene.num2name))", "def get_and_update_metadata():\n if not os.path.exists('.git') and os.path.exists(METADATA_FILENAME):\n with open(METADATA_FILENAME) as fh:\n metadata = json.load(fh)\n else:\n git = Git()\n revision = os.environ.get('TRAVIS_BUILD_NUMBER', git.revision)\n split_version = git.version.split('.')\n split_version[-1] = revision\n version = '.'.join(split_version)\n metadata = {\n 'version': version,\n 'git_hash': git.hash,\n 'git_origin': git.origin,\n 'git_branch': git.branch,\n 'git_version': git.version\n }\n with open(METADATA_FILENAME, 'w') as fh:\n json.dump(metadata, fh)\n return metadata", "def bundle_metadata(self, metadata):\n\n metadata_file = None\n try:\n metadata_file = tempfile.NamedTemporaryFile(delete=False)\n except IOError:\n task_error('Cannot create metadata file in working directory')\n\n metadata_file.write(metadata)\n fname = metadata_file.name\n metadata_file.close()\n\n metadata_file = open(fname, mode='rb')\n\n # metadata_file.seek(0)\n\n if self.empty_tar:\n tarball = tarfile.TarFile(name=self.bundle_path, mode='w')\n self.empty_tar = False\n else:\n tarball = tarfile.TarFile(name=self.bundle_path, mode='a')\n\n try:\n tar_info = tarfile.TarInfo('metadata.txt')\n tar_info.size = len(metadata)\n tar_info.mtime = time.time()\n tarball.addfile(tar_info, metadata_file)\n metadata_file.close()\n tarball.close()\n os.remove(fname)\n except Exception, ex:\n print ex\n traceback.print_exc(file=sys.stdout)\n raise ex", "def populate_hash_list(self):\n checkout = 'tmp/repo/tmp/keylime-checkout'\n\n import_ostree_commit(\n os.getcwd(),\n self._metadata.build_dir,\n self._metadata)\n subprocess.check_call([\n 'ostree', 'checkout',\n '--repo=tmp/repo', '-U',\n self._metadata['ostree-commit'], checkout])\n self.hash_from_path(checkout)\n\n # Extract initramfs contents\n initramfs_path = ensure_glob(\n os.path.join(\n checkout, 'usr/lib/modules/*/initramfs.img'))[0]\n initramfs_path = os.path.realpath(initramfs_path)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n skipcpio = subprocess.Popen(\n ['/usr/lib/dracut/skipcpio', initramfs_path],\n stdout=subprocess.PIPE)\n gunzip = subprocess.Popen(\n ['gunzip', '-c'],\n stdin=skipcpio.stdout,\n stdout=subprocess.PIPE)\n cpio = subprocess.Popen(\n ['cpio', '-idmv'],\n stdin=gunzip.stdout,\n cwd=tmpdir)\n cpio.wait(timeout=300) # timeout of 5 minutes\n self.hash_from_path(tmpdir)", "def _find_file_meta(metadata, bucket_name, saltenv, path):\n env_meta = metadata[saltenv] if saltenv in metadata else {}\n bucket_meta = {}\n for bucket in env_meta:\n if bucket_name in bucket:\n bucket_meta = bucket[bucket_name]\n files_meta = list(list(filter((lambda k: \"Key\" in k), bucket_meta)))\n\n for item_meta in files_meta:\n if \"Key\" in item_meta and item_meta[\"Key\"] == path:\n try:\n # Get rid of quotes surrounding md5\n item_meta[\"ETag\"] = item_meta[\"ETag\"].strip('\"')\n except KeyError:\n pass\n return item_meta", "def gen_metadata(args):\n with open(args.bibfile) as bibfile:\n bib_db = BibTexParser(common_strings=True).parse_file(bibfile)\n entries = sorted(list(bib_db.entries),\n key=lambda x: x['year'], reverse=True)\n list([update_file(entry) for entry in entries])\n annotations = [entry_to_annotation(entry, args.PI) for entry in entries]\n stream = open(args.metadata, 'w')\n yaml.dump(annotations, stream, width=192, default_flow_style=False)\n stream.close()", "def masterPath(self):\n\t\treturn fl.File( self._path + '/master.data' )", "def getFileInfoFromMetadata(thisfile, guid, replicas_dic, region, sitemover, error):\n\n ec = 0\n pilotErrorDiag = \"\"\n\n # create a dictionary for the metadata tags (which includes the md5sum/adler32 value)\n dic = {}\n dic['md5sum'] = \"\"\n dic['adler32'] = \"\"\n dic['fsize'] = \"\"\n csumtype = \"unknown\"\n if not os.environ.has_key('Nordugrid_pilot'):\n # extract the filesize and checksum\n try:\n # always use the first replica (they are all supposed to have the same file sizes and checksums)\n _fsize = replicas_dic[guid][0].filesize\n _fchecksum = replicas_dic[guid][0].csumvalue\n except Exception, e:\n pilotErrorDiag = \"filesize/checksum could not be extracted for guid: %s, %s\" % (guid, str(e))\n tolog(\"!!FAILED!!2999!! %s\" % (pilotErrorDiag))\n tolog(\"Mover get_data finished (failed) [getFileInfoFromMetadata]\")\n return error.ERR_FAILEDLFCGETREPS, pilotErrorDiag, None, None\n else:\n tolog(\"Extracted fsize: %s fchecksum: %s for guid: %s\" % (str(_fsize), _fchecksum, guid))\n\n csumtype = sitemover.getChecksumType(_fchecksum)\n if _fchecksum == \"\":\n dic['md5sum'] = 0\n dic['adler32'] = 0\n else:\n if csumtype == \"adler32\":\n dic['adler32'] = _fchecksum\n dic['md5sum'] = 0\n else:\n dic['adler32'] = 0\n dic['md5sum'] = _fchecksum\n \n dic['fsize'] = str(_fsize)\n else:\n for i in range(len(thisfile.getElementsByTagName(\"metadata\"))):\n key = str(thisfile.getElementsByTagName(\"metadata\")[i].getAttribute(\"att_name\"))\n dic[key] = str(thisfile.getElementsByTagName(\"metadata\")[i].getAttribute(\"att_value\"))\n # eg. dic = {'lastmodified': '1178904328', 'md5sum': 'fa035fc0a92066a5373ff9580e3d9862',\n # 'fsize': '33200853', 'archival': 'P'}\n # Note: md5sum/adler32 can assume values <32/8strings>, \"NULL\", \"\", 0\n tolog(\"dic = %s\" % str(dic)) \n if dic['adler32'] != 0 and dic['adler32'] != \"\" and dic['adler32'] != \"NULL\":\n csumtype = \"adler32\"\n elif dic['md5sum'] != 0 and dic['md5sum'] != \"\" and dic['md5sum'] != \"NULL\":\n csumtype = \"md5sum\"\n else:\n csumtype = CMD_CHECKSUM\n\n if csumtype == \"adler32\":\n fchecksum = dic['adler32']\n else: # pass a 0 if md5sum was actually not set\n fchecksum = dic['md5sum']\n fsize = dic['fsize']\n tolog(\"csumtype: %s, checksum: %s, fsize: %s\" % (csumtype, str(fchecksum), str(fsize)))\n\n return ec, pilotErrorDiag, fsize, fchecksum", "def full_file_path_f(self, *args, **kwargs):\n return '%s/%s' % (self.location_f(*args, **kwargs), self.key_f(*args, **kwargs))", "def _get_sha_metadata(filename):\n with open(filename) as f:\n return hashlib.sha1(f.read()).hexdigest()", "def meta_info(self):\n\n if not self.meta_was_built:\n self.__meta_info = self.build_meta(self.dataset_path, self.file_types)\n self.meta_was_built = True\n\n return self.__meta_info", "def gen_fileinfo(filepath, temproot, sourceroot, resultroot):\n sourceroot = sourceroot.rstrip(os.sep) + os.sep\n resultroot = resultroot.rstrip(os.sep) + os.sep\n fileinfo = {\n 'origfilename': os.path.basename(filepath),\n 'origdir': os.path.dirname(filepath),\n 'tempfilehash': hashlib.md5(filepath).hexdigest(),\n 'pgcount': 0,\n 'errors': ''}\n fileinfo['workingdir'] = joinp(temproot, fileinfo['tempfilehash'])\n fileinfo['subpath'] = filepath.replace(sourceroot, '')\n fileinfo['resultpath'] = joinp(resultroot, fileinfo['subpath'])\n fileinfo['resultdir'] = os.path.dirname(fileinfo['resultpath'])\n return fileinfo", "def make_checksum_file(self, project):\n return None", "def build_extra_vars_file(self, instance, private_data_dir):", "def calculate_hash(self, include_md: bool = True) -> str:\n # sourcery skip: reintroduce-else, swap-if-else-branches, use-named-expression\n # BUF_SIZE is totally arbitrary,\n BUF_SIZE = 65536 * 16 # lets read stuff in 16 x 64kb chunks!\n\n file_hash = hashlib.sha1()\n # Stubs Only\n files = list((self.package_path).rglob(\"**/*.pyi\"))\n if include_md:\n files += (\n [self.package_path / \"LICENSE.md\"]\n + [self.package_path / \"README.md\"]\n # do not include [self.toml_file]\n )\n for file in sorted(files):\n # TODO: Extract function to allow for retry on file not found\n try:\n with open(file, \"rb\") as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n file_hash.update(data)\n except FileNotFoundError:\n log.warning(f\"File not found {file}\")\n # ignore file not found errors to allow the hash to be created WHILE GIT / VIRUS SCANNERS HOLD LINGERING FILES\n return file_hash.hexdigest()", "def snapshot_info(self) -> MetaFile:\n raise NotImplementedError", "def build_ns_file_metadata(file_type):\r\n file_description = f\"Your Nightscout {file_type} data, last updated at {datetime.utcnow()} UTC.\"\r\n file_tags = [\"open-aps\", \"Nightscout\", file_type, \"json\"]\r\n file_updated = str(datetime.now())\r\n return {\"tags\": file_tags, \"description\": file_description, \"updated_at\": file_updated}", "def create_readme(histfile, vb):\n\tme = \"Utils.create_readme: \"\n\treadmefile = os.path.dirname(histfile)+\"/README.txt\"\n\ttry:\n\t\tassert os.path.isfile(readmefile)\n\texcept AssertionError:\n\t\tnow = str(datetime.now().strftime(\"%Y-%m-%d %H.%M\"))\n\t\tcommit = subprocess.check_output(['git', 'rev-parse', 'HEAD'])\n\t\theader = \"Time:\\t\"+now+\"\\nCommit hash:\\t\"+commit+\"\\n\\n\"\n\t\twith open(readmefile,\"w\") as f:\n\t\t\tf.write(header)\n\t\tif vb: print me+\"Created readme file \"+readmefile\n\treturn", "def save_meta_file(gen_dict, f_name):\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n filename = run_id+'_'+ f_name +'.meta'\r\n f = open(os.path.join(unique_op_dir, filename),'a')\r\n print('Output stored in %s'%(str(os.path.join(unique_op_dir, filename))))\r\n logger.info('Output stored in %s'%(str(os.path.join(unique_op_dir, filename))))\r\n for key, val in gen_dict.items():\r\n line = str(key)+\" : \"+str(val)+\"\\n\"\r\n f.write(line)", "def get_hash_curr_files(self):\n temp = None\n for f in self.file_list:\n if not os.stat(f).st_size:\n self.print_to_log('Skipping Zero Length File: ' + f)\n else:\n try:\n\n batch_file = open(f,'U')\n time_stamp = self.get_timestamp()\n temp = ['pass',\n time_stamp,\n self.get_hash(batch_file),\n '1',\n time_stamp,\n batch_file.name[batch_file.name.rfind('\\\\') + 1 :]]\n\n batch_file.close()\n self.hash_curr_files[temp[2]] = temp\n self.print_to_log(\"successfully hashed file: \" + temp[5])\n except IOError:\n self.print_to_log('Cannot Open File: ' + f)\n except:\n self.print_to_log('Unknown Error, Exiting')\n raise", "def prepareFluidinfo(runTests):\n if runTests:\n local('make build-clean build', capture=False)\n local('make check-all', capture=False)\n\n local('git archive --prefix=fluidinfo/ -v --format tar HEAD | '\n 'bzip2 > fluidinfo.tar.bz2')\n return datetime.utcnow().strftime('%Y%m%d-%H%M')", "def __init__(self, fullpath, config, magic):\n if not fullpath:\n pass\n else:\n try:\n self.fullpath = str(fullpath)\n self.name = str(os.path.basename(fullpath))\n self.size = str(os.path.getsize(fullpath))\n except:\n raise IOError(\"\"\"Cannot read basic file information.\n Permissions problem?\"\"\")\n try:\n self.owner = str(os.stat(fullpath).st_uid)\n self.group = str(os.stat(fullpath).st_gid)\n except:\n self.owner = -1\n self.group = -1\n if config.DEBUG:\n print(\"\"\"Cannot read owner/group id.\n File system might not support ownerships.\"\"\")\n try:\n self.perm = oct(os.stat(fullpath).st_mode)\n except:\n self.perm = 'UFORIA_NO_PERM'\n if config.DEBUG:\n print(\"\"\"Cannot read permissions.\n File system might not support permissions.\"\"\")\n try:\n timestamp = os.path.getmtime(fullpath)\n self.mtime = datetime.datetime.fromtimestamp(timestamp).isoformat()\n except:\n self.mtime = Null\n if config.DEBUG:\n print('File system might not support MACtimes.')\n try:\n timestamp = os.path.getatime(fullpath)\n self.atime = datetime.datetime.fromtimestamp(timestamp).isoformat()\n except:\n self.atime = Null\n if config.DEBUG:\n print('File system might not support MACtimes.')\n try:\n timestamp = os.path.getctime(fullpath)\n self.ctime = datetime.datetime.fromtimestamp(timestamp).isoformat()\n except:\n self.ctime = Null\n if config.DEBUG:\n print('File system might not support MACtimes.')\n try:\n self.md5 = hashlib.md5()\n self.sha1 = hashlib.sha1()\n self.sha256 = hashlib.sha256()\n with open(fullpath, 'rb') as f:\n for chunk in iter(lambda: f.read(config.CHUNKSIZE), b''):\n self.md5.update(chunk)\n self.sha1.update(chunk)\n self.sha256.update(chunk)\n self.md5 = str(self.md5.hexdigest())\n self.sha1 = str(self.sha1.hexdigest())\n self.sha256 = str(self.sha256.hexdigest())\n except:\n traceback.print_exc(file=sys.stderr)\n try:\n magic_default = magic.Magic(magic_file=config.MAGICFILE)\n magic_mime = magic.Magic(mime=True,\n magic_file=config.MAGICFILE)\n except:\n traceback.print_exc(file=sys.stderr)\n try: \n self.ftype = str(magic_default.from_file(fullpath))\n self.mtype = str(magic_mime.from_file(fullpath))\n self.btype = str(magic_default.from_buffer(open(fullpath).read(65536)))\n except:\n traceback.print_exc(file=sys.stderr)\n if config.DEBUG:\n print \"Filename:\\t\", self.name\n print \"UID/GID:\\t\", self.owner + \":\" + self.group\n print \"Permissions:\\t\", self.perm\n print (\"Magic:\\t\\tF:\", self.ftype, \"\\n\\t\\tM:\",\n self.mtype, \"\\n\\t\\tB:\", self.btype)\n print (\"Modified:\\t\", self.mtime, \"\\nAccessed:\\t\",\n self.atime, \"\\nChanged:\\t\", self.ctime)\n print (\"MD5:\\t\\t\", self.md5, \"\\nSHA1:\\t\\t\",\n self.sha1, \"\\nSHA256:\\t\\t\", self.sha256)", "def _jobfile(self):\n job = self.job.format(fnum=self.fnum)\n with open(job, 'w') as f:\n f.write('#!/bin/sh\\n' + self.phast_cmmd + self.cleanup_cmmd)", "def make_readme(digest):\n o = 'SHA1 digest: %s\\n\\n'%digest[:10]\n print '...build readme file for GitHub' \n open('README.md','w').write(o + make_readme.__doc__)", "def prepopulate_memo(self):\n existing = self.gi.libraries.show_library(self.library_id, contents=True)\n\n uploading_to = [x for x in existing if x['id'] == self.folder_id]\n if len(uploading_to) == 0:\n raise Exception(\"Unknown folder [%s] in library [%s]\" %\n (self.folder_id, self.library_id))\n else:\n uploading_to = uploading_to[0]\n\n for x in existing:\n # We only care if it's a subdirectory of where we're uploading to\n if not x['name'].startswith(uploading_to['name']):\n continue\n\n name_part = x['name'].split(uploading_to['name'], 1)[-1]\n if name_part.startswith('/'):\n name_part = name_part[1:]\n self.memo_path[name_part] = x['id']", "def _create_releaseinfo_file(projname, relinfo_str):\n dirs = projname.split('.')\n os.chdir(os.path.join(*dirs))\n print 'updating releaseinfo.py for %s' % projname\n with open('releaseinfo.py', 'w') as f:\n f.write(relinfo_str)", "def main():\n \n root = Folder(name=os.getcwd(), file='meta.json',\n collection='.github/jekyll')\n root.update()\n root.export_folders(True)", "def full_info(files: List[str], args, dir_: str ='.') -> List[str]:\n temp_info = []\n for item in files:\n f_info = {}\n f_st = os.stat(os.path.join(CURRENT_DIR, dir_, item))\n f_info['mpde'] = f'{stat.filemode(f_st.st_mode):10}'\n f_info['nlink'] = f'{f_st.st_nlink:>3}'\n f_info['uid'] = f'{f_st.st_uid:>3}'\n size = f_st.st_size\n if args.block_size:\n size = ceil(size / args.block_size)\n f_info['size'] = f'{size:>8}'\n date = dt.datetime.fromtimestamp(f_st.st_mtime)\n if (dt.datetime.now() - date).days / 30 > 6:\n date_format = '%b %d %Y'\n else:\n date_format = '%b %d %I:%M'\n f_info['time'] = f'{date.strftime(date_format)} '\n f_info['name'] = f'{item:<}'\n temp_info.append(\n ' '.join([f_info['mpde'], f_info['nlink'], f_info['uid'],\n f_info['size'], f_info['time'], f_info['name']])\n )\n temp_info.append('\\n')\n return temp_info", "def generate_hash(self, fname, args):\n fobj = self._open_file(fname, args.binary)\n hash_value = self._calculate_hash(fobj)\n\n line = '{0} {1}{2}\\n'.format(hash_value, '*' if args.binary else ' ',\n fname)\n\n if '//' in line:\n line = '//' + line.replace('//', '////')\n self.app.stdout.write(line)", "def write_metadata(dir_path, fs, *metas, global_metadata=True):\n assert metas\n md = metas[0]\n with fs.open(\"/\".join([dir_path, \"_common_metadata\"]), \"wb\") as fil:\n md.write_metadata_file(fil)\n if global_metadata:\n for meta in metas[1:]:\n md.append_row_groups(meta)\n with fs.open(\"/\".join([dir_path, \"_metadata\"]), \"wb\") as fil:\n md.write_metadata_file(fil)", "def test_history_import_abspath_in_metadata():\n with HistoryArchive() as history_archive:\n history_archive.write_metafiles(\n dataset_file_name=os.path.join(history_archive.temp_directory, 'outside.txt'))\n history_archive.write_file('datasets/Pasted_Entry_1.txt', 'foo')\n history_archive.write_outside()\n _run_jihaw_cleanup_check_secure(history_archive, 'Absolute path in datasets_attrs.txt allowed')", "def task_generate_virtual_metadata():\n script = Path(__file__).parents[0] / \"generate_metadata_virtual_experiment.py\"\n metadata_files = Path(__file__).parent.glob('*_meta.yaml')\n\n return {\n \"actions\": [f\"{PYTHON_EXE} {script}\"],\n \"file_dep\": [script],\n \"verbosity\": 2, # show stdout\n \"targets\": [*metadata_files],\n 'uptodate': [len(list(metadata_files)) > 0],\n }", "def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.guid)\n return str(Path('Metadata') / 'TV Shows' / guid_hash[0] / f'{guid_hash[1:]}.bundle')", "def _populate_index(self):\n os.makedirs(self.cache_dir, exist_ok=True)\n local_files = glob('{}/*'.format(self.cache_dir))\n for file in local_files:\n self._add_to_index(os.path.basename(file), os.path.getsize(file))", "def metadata(self, truncate: bool = False) -> Tuple[str, str]:\n\t\tif not self._closed:\n\t\t\tfilename = self.filename\n\t\t\tmd_filename = \"%s.file_md.json.gzip\" % (self.file_path)\n\t\t\tmd_mod_filename = \"%s.file_md.lastmod.gzip\" % (self.file_path)\n\t\t\tlogging.debug(\"Expanding metada (stored as %s.file_md.json.gzip)\", filename)\n\n\t\t\tlast_mod = self.last_modified()\n\t\t\tif os.path.isfile(md_filename):\n\t\t\t\tlogging.debug(\" Found previously extracted JSON file\")\n\t\t\t\tif truncate:\n\t\t\t\t\tself.clear_metadata()\n\t\t\t\telse:\n\t\t\t\t\tmd_json = load_gzipped_json_string(md_filename)\n\t\t\t\t\tmd_mod = load_gzipped_json_string(md_mod_filename)\n\t\t\t\t\tmd_parsed = json.loads(md_json)\n\t\t\t\t\t# check if cached metadata is up to date and\n\t\t\t\t\t# points to correct project folder and filename\n\t\t\t\t\t# if so return cache, otherwise clear it\n\t\t\t\t\tlogging.debug(\" md_mod: %s\", md_mod)\n\t\t\t\t\tlogging.debug(\" last_mod: %s\", last_mod)\n\t\t\t\t\tif md_mod != last_mod or md_parsed.project != self.project or md_parsed.filename != filename:\n\t\t\t\t\t\tself.clear_metadata()\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogging.debug(\" Cache up to date\")\n\t\t\t\t\t\treturn (md_json, last_mod)\n\n\t\t\tds = self.ds\n\t\t\tattrs = ds.attrs.keys()\n\t\t\ttitle = filename if \"title\" not in attrs else ds.attrs.title\n\t\t\tdescr = \"\" if \"description\" not in attrs else ds.attrs.description\n\t\t\turl = \"\" if \"url\" not in attrs else ds.attrs.url\n\t\t\tdoi = \"\" if \"doi\" not in attrs else ds.attrs.doi\n\t\t\t# converts compact ISO timestamps to human-readable ones.\n\t\t\t# Example: \"20180130T155028.262458Z\" becomes \"2018/01/13 15:50\"\n\t\t\tlast_mod_humanreadable = \"{}/{}/{} {}:{}:{}\".format(last_mod[0:4], last_mod[4:6], last_mod[6:8], last_mod[9:11], last_mod[11:13], last_mod[13:15])\n\t\t\t# default to last_modified for older files that do\n\t\t\t# not have a creation_date field\n\t\t\tcreation_date = last_mod_humanreadable if \"creation_date\" not in attrs else ds.attrs.creation_date\n\t\t\t# get arbitrary col/row attribute, they are all lists\n\t\t\t# of equal size. The length equals total cells/genes\n\t\t\ttotal_cells = ds.shape[1]\n\t\t\ttotal_genes = ds.shape[0]\n\n\t\t\tmd_data = {\n\t\t\t\t\"project\": self.project,\n\t\t\t\t\"filename\": filename,\n\t\t\t\t\"dataset\": filename,\n\t\t\t\t\"title\": title,\n\t\t\t\t\"description\": descr,\n\t\t\t\t\"url\": url,\n\t\t\t\t\"doi\": doi,\n\t\t\t\t\"creationDate\": creation_date,\n\t\t\t\t\"lastModified\": last_mod_humanreadable,\n\t\t\t\t\"totalCells\": total_cells,\n\t\t\t\t\"totalGenes\": total_genes,\n\t\t\t}\n\t\t\tlogging.debug(\" Saving extracted metadata as JSON file\")\n\t\t\tmd_json = json.dumps(md_data)\n\t\t\tsave_gzipped_json_string(md_filename, md_json)\n\t\t\tsave_gzipped_json_string(md_mod_filename, json.dumps(last_mod))\n\t\t\treturn (md_json, last_mod)\n\t\treturn None", "def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.parentGuid)\n return str(Path('Metadata') / 'TV Shows' / guid_hash[0] / f'{guid_hash[1:]}.bundle')", "def setup(outpath):\n time = datetime.now().strftime(\"%d_%m_%Y_%H_%M_%S\")\n temp = os.path.join(outpath, \"data\", \"temp\")\n result = os.path.join(outpath, \"results\")\n logs = os.path.join(outpath, \"logs\")\n download = os.path.join(outpath, \"data\", \"download\")\n chromsizes = os.path.join(outpath,\n \"data\", \"chromsizes\")\n if not os.path.exists(download):\n os.makedirs(download)\n if not os.path.exists(temp):\n os.makedirs(temp)\n if not os.path.exists(result):\n os.makedirs(result)\n if not os.path.exists(logs):\n os.makedirs(logs)\n if not os.path.exists(chromsizes):\n os.makedirs(chromsizes)\n\n logname = time + \"_tfanalyzer.log\"\n logfile = os.path.join(logs, logname)\n logging.basicConfig(filename=logfile, level=logging.INFO)\n return logfile", "def test_history_import_relpath_in_metadata():\n with HistoryArchive() as history_archive:\n history_archive.write_metafiles(dataset_file_name='../outside.txt')\n history_archive.write_file('datasets/Pasted_Entry_1.txt', 'foo')\n history_archive.write_outside()\n _run_jihaw_cleanup_check_secure(history_archive, 'Relative parent path in datasets_attrs.txt allowed')", "def storeFilesNoZip(pmid, metaData, fulltextData, outDir):\n warnMsgs = []\n fileDir = join(outDir, 'files')\n if not isdir(fileDir):\n os.makedirs(fileDir)\n suppFnames = []\n suppUrls = []\n pdfFound = False\n for suffix, pageDict in fulltextData.iteritems():\n if suffix in ('status', 'crawlerName'):\n continue\n if suffix == 'landingPage':\n metaData['landingUrl'] = pageDict['url']\n continue\n filename = pmid + '.' + suffix\n warnMinSize = 5000\n if len(pageDict['data']) < warnMinSize:\n warnMsgs.append('%s is smaller than %d bytes' % (suffix, warnMinSize))\n if suffix == 'main.html':\n if '<html' in pageDict['data']:\n warnMsgs.append('main.html contains html tag')\n metaData['mainHtmlFile'] = filename\n metaData['mainHtmlUrl'] = pageDict['url']\n elif suffix == 'main.pdf':\n pdfFound = True\n checkIfPdf(pageDict, metaData)\n metaData['mainPdfFile'] = filename\n metaData['mainPdfUrl'] = pageDict['url']\n elif suffix.startswith('S'):\n suppFnames.append(filename)\n suppUrls.append(pageDict['url'])\n fileData = pageDict['data']\n filePath = join(fileDir, filename)\n logging.debug('Writing file %s' % filePath)\n fh = open(filePath, 'wb')\n fh.write(fileData)\n fh.close()\n\n if not pdfFound:\n warnMsgs.append('No PDF file')\n suppFnames = [ s.replace(',', '') for s in suppFnames ]\n suppFnames = [ s.replace('\\t', '') for s in suppFnames ]\n suppUrls = [ s.replace(',', '') for s in suppUrls ]\n suppUrls = [ s.replace('\\t', '') for s in suppUrls ]\n metaData['suppFiles'] = ','.join(suppFnames)\n metaData['suppUrls'] = ','.join(suppUrls)\n return (metaData, warnMsgs)", "def makefilename(self):\n fp= (pathlib.Path(self.vr_folder).expanduser()/(time.strftime(self.vr_filename))).with_suffix('')\n fp.parent.mkdir(parents=True, exist_ok=True)\n print('files setup', str(fp))\n return fp", "def cache_file_metadata(self, filenames):\n file_metadata = {}\n for fn in filenames:\n metadata = parse(fn)\n metadata['fn'] = fn[:-4]\n file_metadata_summary = self.gen_file_metadata_summary(metadata)\n file_metadata[file_metadata_summary] = metadata\n return file_metadata", "def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.grandparentGuid)\n return str(Path('Metadata') / 'TV Shows' / guid_hash[0] / f'{guid_hash[1:]}.bundle')", "def make_deps_sha_file(self, deps_sha):\n return _DEPS_SHA_PATCH % {'deps_sha': deps_sha}", "def assemble_file(names):\n md5 = hashlib.md5()\n filename = ''.join([name.split('-')[-1] for name in names])\n fpath = os.path.join(FILES_DIR, filename)\n with open(fpath, \"wb\") as dst:\n for name in names:\n for chunk in chunked_reader(os.path.join(DATA_DIR, name)):\n md5.update(chunk)\n dst.write(chunk)\n\n return fpath, md5.digest().hex()", "def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.guid)\n return str(Path('Metadata') / 'Movies' / guid_hash[0] / f'{guid_hash[1:]}.bundle')", "def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.guid)\n return str(Path('Metadata') / 'Movies' / guid_hash[0] / f'{guid_hash[1:]}.bundle')", "def create_file_hash_dict(cls, file, file_path):\n\n file_info = {}\n file_info['path'] = file_path\n file_info['hash'] = cls.get_256_hash_from_file(file_path)\n file_info['type'] = 'file'\n file_info['name'] = file\n file_info['perm'] = stat.S_IMODE(os.lstat(file_path).st_mode)\n\n return file_info", "def _build_system_home(self, directory):\n\t\treturn directory", "def cache_file(self, repo):\n token = blake2b(repo.location.encode()).hexdigest()[:10]\n dirname = f\"{repo.repo_id.lstrip(os.sep)}-{token}\"\n return pjoin(self.options.cache_dir, \"repos\", dirname, self.cache.file)", "def info_filename(directory, install_path, cache_path):\n path = directory\n if path.startswith(install_path):\n path = path[len(install_path):]\n return os.path.join(cache_path, 'info%s.json' % path.replace('/', '-'))", "def reportinfo(self):\n return super().reportinfo()[:2] + (self.fspath.relto(os.getcwd()),)", "def _file_storage_path(self, sha1, filename):\n # pylint: disable=no-member\n path = (\n '{loc.org}/{loc.course}/{loc.block_type}/{loc.block_id}/'\n '{student_id}/{sha1}{ext}'.format(\n\t\tstudent_id = self.xmodule_runtime.anonymous_student_id,\n loc=self.location,\n sha1=sha1,\n ext=os.path.splitext(filename)[1]\n )\n )\n return path", "def __init__(self, debug=False, output_writer=None):\n super(RecycleBinMetadataFile, self).__init__(\n debug=debug, output_writer=output_writer)\n self.deletion_time = None\n self.format_version = None\n self.original_filename = None\n self.original_file_size = None", "def report_metadata(self) -> dict:\n metadata = super().report_metadata()\n metadata[\"sensor_file\"] = os.path.splitext(os.path.basename(__file__))[0]\n return metadata", "def find_metadata_dir(cube, image):\n return find_subdir(cube, image, 'metadata')", "def _assert_build_info(self):\n if not self.path.exists():\n from zensols.pybuild import SetupUtil\n self.path.parent.mkdir(parents=True, exist_ok=True)\n if not self.rel_setup_path.exists():\n raise OSError('configuration file does not ' +\n f'exist: {self.rel_setup_path}')\n su = SetupUtil.source(rel_setup_path=self.rel_setup_path)\n logger.info(f'saving build info to {self.path}')\n with open(self.path, 'w') as f:\n su.to_json(writer=f)", "def app_metadata_url():\n return \"https://raw.githubusercontent.com/aiidalab/aiidalab-hello-world/master/metadata.json\"", "def main(args):\n metafiles = []\n verbose = args.verbose\n\n if (args.metalist is not None):\n for listfile in args.metalist:\n metafiles.extend(addmeta.list_from_file(listfile))\n\n if (args.metafiles is not None):\n metafiles.extend(args.metafiles)\n\n if verbose: print(\"metafiles: \",\" \".join([str(f) for f in metafiles]))\n\n addmeta.find_and_add_meta(args.files, metafiles)", "def build_epub(self) -> None:\n outname = self.config.epub_basename + '.epub'\n logger.info(__('writing %s file...'), outname)\n epub_filename = path.join(self.outdir, outname)\n with ZipFile(epub_filename, 'w', ZIP_DEFLATED) as epub:\n epub.write(path.join(self.outdir, 'mimetype'), 'mimetype', ZIP_STORED)\n for filename in ('META-INF/container.xml', 'content.opf', 'toc.ncx'):\n epub.write(path.join(self.outdir, filename), filename, ZIP_DEFLATED)\n for filename in self.files:\n epub.write(path.join(self.outdir, filename), filename, ZIP_DEFLATED)", "def _get_log_file(self, _action):\n prefix = \"work/{mapper}.{{library_name}}/log/{mapper}.{{library_name}}\".format(\n mapper=self.__class__.name\n )\n key_ext = (\n (\"log\", \".log\"),\n (\"conda_info\", \".conda_info.txt\"),\n (\"conda_list\", \".conda_list.txt\"),\n )\n for key, ext in key_ext:\n yield key, prefix + ext\n yield key + \"_md5\", prefix + ext + \".md5\"", "def getFileInfo(region, ub, queuename, guids, dsname, dsdict, lfns, pinitdir, analysisJob, tokens, DN, sitemover, error, workdir, dbh, DBReleaseIsAvailable, \\\n scope_dict, pfc_name=\"PoolFileCatalog.xml\", filesizeIn=[], checksumIn=[], thisExperiment=None):\n\n fileInfoDic = {} # FORMAT: fileInfoDic[file_nr] = (guid, pfn, size, checksum, filetype, copytool) - note: copytool not necessarily the same for all file (e.g. FAX case)\n replicas_dic = {} # FORMAT: { guid1: [replica1, .. ], .. } where replica1 is of type replica\n surl_filetype_dictionary = {} # FORMAT: { sfn1: filetype1, .. } (sfn = surl, filetype = DISK/TAPE)\n copytool_dictionary = {} # FORMAT: { surl1: copytool1, .. }\n totalFileSize = 0L\n ec = 0\n pilotErrorDiag = \"\"\n\n tolog(\"Preparing to build paths for input files\")\n\n # Get the site information object\n si = getSiteInformation(thisExperiment.getExperiment())\n\n # In case we are staging in files from an object store, we can do a short cut and skip the catalog lookups below\n copytool, dummy = getCopytool(mode=\"get\")\n if \"objectstore\" in copytool:\n tolog(\"Objectstore stage-in: cutting a few corners\")\n\n # Format: fileInfoDic[file_nr] = (guid, gpfn, size, checksum, filetype, copytool)\n # replicas_dic[guid1] = [replica1, ..]\n\n espath = si.getObjectstorePath(\"eventservice\") #getFilePathForObjectStore(filetype=\"eventservice\")\n logpath = si.getObjectstorePath(\"logs\") #getFilePathForObjectStore(filetype=\"logs\")\n\n i = 0\n try:\n for lfn in lfns:\n if \".log.\" in lfn:\n fullpath = os.path.join(logpath, lfns[i])\n else:\n fullpath = os.path.join(espath, lfns[i])\n fileInfoDic[i] = (guids[i], fullpath, filesizeIn[i], checksumIn[i], 'DISK', copytool) # filetype is always DISK on objectstores\n replicas_dic[guids[i]] = [fullpath]\n surl_filetype_dictionary[fullpath] = 'DISK' # filetype is always DISK on objectstores\n i += 1\n except Exception, e:\n tolog(\"!!WARNING!!2233!! Failed to create replica and file dictionaries: %s\" % (e))\n ec = -1\n tolog(\"fileInfoDic=%s\" % str(fileInfoDic))\n tolog(\"replicas_dic=%s\" % str(replicas_dic))\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n # If the pilot is running on a Tier 3 site, then neither LFC nor PFC should be used\n if si.isTier3():\n tolog(\"Getting file info on a Tier 3 site\")\n\n # Create file path to local SE (not used for scope based paths)\n path = sitemover.getTier3Path(dsname, DN) # note: dsname will only be correct for lib files, otherwise fix dsdict, currently empty for single lib file input?\n file_nr = -1\n for lfn in lfns:\n file_nr += 1\n\n # Use scope based path if possible\n# #if scope_dict and readpar('useruciopaths').lower() == \"true\":\n# if scope_dict and (\"/rucio\" in readpar('seprodpath') or \"/rucio\" in readpar('sepath')):\n# se_path = sitemover.getRucioPath(file_nr, tokens, scope_dict, lfn, path, analysisJob)\n# else:\n# se_path = os.path.join(path, lfn)\n se_path = os.path.join(path, lfn)\n\n # Get the file info\n ec, pilotErrorDiag, fsize, fchecksum = sitemover.getLocalFileInfo(se_path, csumtype=\"default\")\n if ec != 0:\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n # Fill the dictionaries\n fileInfoDic[file_nr] = (guids[file_nr], se_path, fsize, fchecksum, 'DISK', copytool) # no tape on T3s, so filetype is always DISK\n surl_filetype_dictionary[fullpath] = 'DISK' # filetype is always DISK on T3s\n\n # Check total file sizes to avoid filling up the working dir, add current file size\n try:\n totalFileSize += long(fsize)\n except:\n pass\n else:\n # Get the PFC from the proper source\n ec, pilotErrorDiag, xml_from_PFC, xml_source, replicas_dic, surl_filetype_dictionary, copytool_dictionary = \\\n getPoolFileCatalog(ub, guids, lfns, pinitdir, analysisJob, tokens, workdir, dbh,\\\n DBReleaseIsAvailable, scope_dict, filesizeIn, checksumIn,\\\n sitemover, pfc_name=pfc_name, thisExperiment=thisExperiment)\n\n if ec != 0:\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n tolog(\"Using XML source %s\" % (xml_source))\n if xml_from_PFC == '':\n pilotErrorDiag = \"Failed to get PoolFileCatalog\"\n tolog(\"!!FAILED!!2999!! %s\" % (pilotErrorDiag))\n tolog(\"Mover get_data finished (failed)\")\n return error.ERR_NOPFC, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n xmldoc = minidom.parseString(xml_from_PFC) \n fileList = xmldoc.getElementsByTagName(\"File\")\n\n # Extracts the guids from the file list\n guids_filelist = getGuids(fileList)\n fileInfoDictionaryFromDispatcher = getFileInfoDictionaryFromDispatcher(lfns, filesizeIn, checksumIn) \n file_nr = -1\n for thisfile in fileList:\n file_nr += 1\n # Get the SURL and GUID from the XML\n gpfn = str(thisfile.getElementsByTagName(\"pfn\")[0].getAttribute(\"name\"))\n guid = guids_filelist[file_nr]\n\n # Get the filesize and checksum from the primary location (the dispatcher)\n _lfn = getLFN(gpfn, lfns) #os.path.basename(gpfn)\n\n # Remove any __DQ2 substring from the LFN if necessary\n if \"__DQ2\" in _lfn:\n _lfn = stripDQ2FromLFN(_lfn)\n fsize, fchecksum = getFileInfoFromDispatcher(_lfn, fileInfoDictionaryFromDispatcher)\n\n # Get the file info from the metadata [from LFC]\n if not fsize or not fchecksum:\n ec, pilotErrorDiag, fsize, fchecksum = getFileInfoFromMetadata(thisfile, guid, replicas_dic, region, sitemover, error)\n if ec != 0:\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n # Even though checksum and file size is most likely already known from LFC, more reliable file\n # info is stored in Rucio. Try to get it from there unless the dispatcher has already sent it to the pilot\n if dsdict == {}:\n _dataset = dsname\n else:\n _dataset = getDataset(os.path.basename(gpfn), dsdict)\n _filesize, _checksum = sitemover.getFileInfoFromRucio(scope_dict[_lfn], _dataset, guid)\n if _filesize != \"\" and _checksum != \"\":\n if _filesize != fsize:\n tolog(\"!!WARNING!!1001!! Catalog file size (%s) not the same as Rucio file size (%s) (using Rucio value)\" % (fsize, _filesize))\n if _checksum != fchecksum:\n tolog(\"!!WARNING!!1001!! Catalog checksum (%s) not the same as Rucio checksum (%s) (using Rucio value)\" % (fchecksum, _checksum))\n fsize = _filesize\n fchecksum = _checksum\n\n # Get the filetype for this surl\n filetype = getFiletypeFromDictionary(gpfn, surl_filetype_dictionary)\n\n # Extract the copytool for this PFN\n _copytool = extractCopytoolForPFN(gpfn, copytool_dictionary)\n\n # Store in the file info dictionary\n fileInfoDic[file_nr] = (guid, gpfn, fsize, fchecksum, filetype, _copytool)\n\n # Check total file sizes to avoid filling up the working dir, add current file size\n try:\n totalFileSize += long(fsize)\n except:\n pass\n\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic", "def _download_metadata():\n if not os.path.isfile(L1000FWD_METADATA):\n if not os.path.exists('L1000FWD'):\n os.mkdir('L1000FWD')\n response = requests.get('https://amp.pharm.mssm.edu/l1000fwd/download/Drugs_metadata.csv', stream=True)\n if response.status_code != 200:\n raise Exception('This should not happen')\n with open(L1000FWD_METADATA, 'wb') as outfile:\n for chunk in response.iter_content(chunk_size=1024):\n outfile.write(chunk)", "def build(self, file_number, data):\n pass", "def build_base_filename(self):\n if self.stream:\n self.stream.close()\n self.stream = None\n\n # remove old suffix\n if self.suffix_time != \"\":\n index = self.baseFilename.find(\".\" + self.suffix_time)\n if index == -1:\n index = self.baseFilename.rfind(\".\")\n self.baseFilename = self.baseFilename[:index]\n\n # add new suffix\n current_time_tuple = time.localtime()\n self.suffix_time = time.strftime(self.suffix, current_time_tuple)\n self.baseFilename = self.baseFilename + \".\" + self.suffix_time\n self.mode = 'a'\n\n # create soft links\n index = self.baseFilename.rfind(\".\")\n os.unlink(self.baseFilename[:index])\n os.symlink(self.baseFilename, self.baseFilename[:index])\n\n if not self.delay:\n self.stream = self._open()", "def make_readme_txt(self, args):\n with open(self.readme_txt, 'w') as writer:\n log.info(\"args=%s\\n\", args)\n writer.write(\"# Created by pbtranscript-internal-validation.ValidationRunner.make_readme_txt()\\n\")\n writer.write(\"args=%s\\n\\n\" % args)\n\n files = self.common_files + self.collapse_human_files + self.reseq_human_files + self.sirv_files\n for desc, fn in files:\n if op.exists(fn):\n writer.write(\"%s=%s\\n\" % (desc, fn))", "def read_metadata():\n subdirs = next(os.walk(os.getcwd()))[1]\n\n for subdir in subdirs:\n if '__init__.py' in os.listdir(subdir):\n print('Found package:', subdir)\n break\n else:\n raise SetupError('No package found! Did you forget an __init__.py?')\n\n metadata = {'name': subdir, 'packages': [subdir]}\n relevant_keys = {'__version__': 'version',\n '__author__': 'author',\n '__email__': 'author_email',\n '__license__': 'license'}\n\n m = open(os.path.join(subdir), '__init__.py')\n first_line = next(m)\n metadata['description'] = first_line.strip(). strip('\\n \"')\n for line in m:\n if len(relevant_keys) == 0:\n break\n for key in relevant_keys:\n if line.startswith(key):\n break\n else:\n continue\n\n metadatum_name = relevant_keys.pop(key)\n metadata[metadatum_name] = line.split('=', 1)[1].strip('\\n\\'\\\" ')\n\n if relevant_keys:\n print('FYI; You didn\\'t put the following info in your __init__.py:')\n print(' ', ', '.join(relevant_keys))\n return metadata", "def ondisk_digest(self):\n with open(self.rename_phase_src) as f:\n return hasher(f.read()).hexdigest()", "def add_git_info(run, scriptpath):\n try:\n repo = Repo(scriptpath, search_parent_directories=True)\n run[\"gitrepo\"] = repo.working_dir\n run[\"gitcommit\"] = repo.head.commit.hexsha\n run[\"gitorigin\"] = get_origin(repo)\n\n if not option_set('ignored metadata', 'diff'):\n whole_diff = ''\n diffs = repo.index.diff(None, create_patch=True)\n for diff in diffs:\n whole_diff += \"\\n\\n\\n\" + \"--- {}\\n+++ {}\\n\".format(\n diff.a_path, diff.b_path) + diff.diff.decode(\"utf-8\")\n\n run['diff'] = whole_diff\n except (InvalidGitRepositoryError, ValueError):\n # We can't store git info for some reason, so just skip it\n pass", "def _update_metadata(self, metadata_role, fileinfo, compression=None):\n\n # Construct the metadata filename as expected by the download/mirror modules.\n metadata_filename = metadata_role + '.txt'\n uncompressed_metadata_filename = metadata_filename\n \n # The 'release' or Targets metadata may be compressed. Add the appropriate\n # extension to 'metadata_filename'. \n if compression == 'gzip':\n metadata_filename = metadata_filename + '.gz'\n\n # Extract file length and file hashes. They will be passed as arguments\n # to 'download_file' function.\n compressed_file_length = fileinfo['length']\n uncompressed_file_hashes = fileinfo['hashes']\n\n # Attempt a file download from each mirror until the file is downloaded and\n # verified. If the signature of the downloaded file is valid, proceed,\n # otherwise log a warning and try the next mirror. 'metadata_file_object'\n # is the file-like object returned by 'download.py'. 'metadata_signable'\n # is the object extracted from 'metadata_file_object'. Metadata saved to\n # files are regarded as 'signable' objects, conformant to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n #\n # Some metadata (presently timestamp) will be downloaded \"unsafely\", in the\n # sense that we can only estimate its true length and know nothing about\n # its hashes. This is because not all metadata will have other metadata\n # for it; otherwise we will have an infinite regress of metadata signing\n # for each other. In this case, we will download the metadata up to the\n # best length we can get for it, not check its hashes, but perform the rest\n # of the checks (e.g signature verification).\n #\n # Note also that we presently support decompression of only \"safe\"\n # metadata, but this is easily extend to \"unsafe\" metadata as well as\n # \"safe\" targets.\n\n if metadata_role == 'timestamp':\n metadata_file_object = \\\n self.unsafely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length)\n else:\n metadata_file_object = \\\n self.safely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length,\n uncompressed_file_hashes,\n compression=compression)\n\n # The metadata has been verified. Move the metadata file into place.\n # First, move the 'current' metadata file to the 'previous' directory\n # if it exists.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n current_filepath = os.path.abspath(current_filepath)\n tuf.util.ensure_parent_dir(current_filepath)\n \n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filename)\n previous_filepath = os.path.abspath(previous_filepath)\n if os.path.exists(current_filepath):\n # Previous metadata might not exist, say when delegations are added.\n tuf.util.ensure_parent_dir(previous_filepath)\n shutil.move(current_filepath, previous_filepath)\n\n # Next, move the verified updated metadata file to the 'current' directory.\n # Note that the 'move' method comes from tuf.util's TempFile class.\n # 'metadata_file_object' is an instance of tuf.util.TempFile.\n metadata_signable = tuf.util.load_json_string(metadata_file_object.read())\n if compression == 'gzip':\n current_uncompressed_filepath = \\\n os.path.join(self.metadata_directory['current'],\n uncompressed_metadata_filename)\n current_uncompressed_filepath = \\\n os.path.abspath(current_uncompressed_filepath)\n metadata_file_object.move(current_uncompressed_filepath)\n else:\n metadata_file_object.move(current_filepath)\n\n # Extract the metadata object so we can store it to the metadata store.\n # 'current_metadata_object' set to 'None' if there is not an object\n # stored for 'metadata_role'.\n updated_metadata_object = metadata_signable['signed']\n current_metadata_object = self.metadata['current'].get(metadata_role)\n\n # Finally, update the metadata and fileinfo stores.\n logger.debug('Updated '+repr(current_filepath)+'.')\n self.metadata['previous'][metadata_role] = current_metadata_object\n self.metadata['current'][metadata_role] = updated_metadata_object\n self._update_fileinfo(metadata_filename)", "def compose_metadata(cmd, solo_flags, arg_flags, infile, outfile, full_cmd):\n return {\n 'outfile': os.path.split(outfile)[-1],\n 'cmd': cmd,\n 'infile': infile,\n 'solo_flags': solo_flags,\n 'arg_flags': {k.lstrip('-'):v for k, v in arg_flags},\n 'full_cmd': full_cmd\n }", "def _update_fileinfo(self, metadata_filename):\n \n # In case we delayed loading the metadata and didn't do it in\n # __init__ (such as with delegated metadata), then get the file\n # info now.\n \n # Save the path to the current metadata file for 'metadata_filename'.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n # If the path is invalid, simply return and leave fileinfo unset.\n if not os.path.exists(current_filepath):\n self.fileinfo[current_filepath] = None\n return\n \n # Extract the file information from the actual file and save it\n # to the fileinfo store.\n file_length, hashes = tuf.util.get_file_details(current_filepath)\n metadata_fileinfo = tuf.formats.make_fileinfo(file_length, hashes)\n self.fileinfo[metadata_filename] = metadata_fileinfo", "def new_values(basenames):\n result = {}\n for basename in basenames:\n home = os.environ['HOME']\n p = os.path.join(home, basename)\n if not os.path.isfile(p):\n continue\n size = '%d' % p.size\n mtime = '%0.8f' % p.mtime\n result[basename] = Signature(mtime, size, text_digest(p.text()))\n return pad_keys(result, basenames)", "def get_setup_file():\n repo_fs()\n return SETUP_FILES" ]
[ "0.61526465", "0.6049966", "0.5853711", "0.5817837", "0.5641126", "0.5638058", "0.5613152", "0.5524916", "0.5511576", "0.5498614", "0.54815716", "0.54704076", "0.5461879", "0.5444722", "0.54036105", "0.53994805", "0.53729725", "0.5360554", "0.53537804", "0.53431183", "0.5313954", "0.5287763", "0.528125", "0.52692026", "0.5260768", "0.52528274", "0.524233", "0.5220996", "0.5190659", "0.519015", "0.5189001", "0.5185949", "0.5165679", "0.5161132", "0.5159279", "0.51513344", "0.51485145", "0.51423097", "0.5142025", "0.5132259", "0.51285076", "0.5114136", "0.5110122", "0.5100133", "0.5092475", "0.5088172", "0.5087882", "0.50775003", "0.5075088", "0.5063711", "0.5059989", "0.5056022", "0.50492257", "0.50435334", "0.5029062", "0.50208145", "0.5020709", "0.50150836", "0.50132626", "0.5012133", "0.4995389", "0.49853238", "0.49784994", "0.49750373", "0.49702322", "0.49661025", "0.49588794", "0.49588194", "0.4958459", "0.49584517", "0.49561295", "0.4952451", "0.4952451", "0.49461177", "0.49429837", "0.49308777", "0.49287704", "0.491996", "0.4914374", "0.48965427", "0.4894629", "0.48935124", "0.4891647", "0.4887769", "0.4887574", "0.48875687", "0.48846626", "0.4878024", "0.4877275", "0.48712942", "0.48617712", "0.48612636", "0.485455", "0.48520547", "0.48465914", "0.4843844", "0.4841194", "0.48400655", "0.48398766", "0.48396707" ]
0.7596386
0
Uploads a file to S3 bucket. If gzip=True, compress and upload the gzipped version of the file instead of the original one. If gzip=True and it is not possible to compress, then quit the upload process (don't upload at all). So you should always pass the correct gzip info into this function, in order to get a upload.
def upload_file(conn, filename_local, filename_s3, gzip=False): filename_s3 = filename_s3.lstrip('./') file_descriptor = open(filename_local, 'rb') content = file_descriptor.read() content_type = _get_content_type(file_descriptor) headers = _get_headers(content_type) #should compress if the file is compressable and gzip is enabled can_be_gzipped = _file_can_be_compressed(filename_local) if gzip and can_be_gzipped: content = _compress_string(content) headers['Content-Length'] = str(len(content)) headers['Content-Encoding'] = 'gzip' extension = mimetypes.guess_extension(content_type) #we should not overwrite the original file in the server. #We change extensions: style.css --> style.gz.css, for instance filename_s3 = filename_s3.rstrip(extension) + '.gz' + extension #if gzip is enabled and it is not compressable, don't upload nothing at all elif gzip and not can_be_gzipped: return #upload print 'Uploading %s to %s' % (filename_local, filename_s3) _put(conn, filename_s3, content, headers=headers) file_descriptor.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_file(file_obj, filename, session, samples_resource, log_to, metadata, tags):\n upload_args = {\n 'filename': filename,\n 'size': 1, # because we don't have the actually uploaded size yet b/c we're gziping it\n 'upload_type': 'standard' # This is multipart form data\n }\n if metadata:\n upload_args['metadata'] = metadata\n\n if tags:\n upload_args['tags'] = tags\n\n try:\n upload_info = samples_resource.init_upload(upload_args)\n\n except requests.exceptions.HTTPError as e:\n error_object = e[0]\n process_api_error(error_object)\n\n upload_url = upload_info['upload_url']\n\n # Need a OrderedDict to preserve order for S3 (although this doesn't actually matter?)\n multipart_fields = OrderedDict()\n for k, v in upload_info['additional_fields'].items():\n multipart_fields[str(k)] = str(v)\n\n # First validate the file if a FASTXTranslator\n if isinstance(file_obj, FASTXTranslator):\n file_obj.validate()\n\n # If it isn't being modified and is already compressed, don't bother re-parsing it\n if not file_obj.modified and file_obj.is_gzipped:\n file_obj = FASTXReader(file_obj.reads.file_obj.fileobj,\n progress_callback=file_obj.progress_callback)\n\n multipart_fields['file'] = (filename, file_obj, 'application/x-gzip')\n encoder = MultipartEncoder(multipart_fields)\n content_type = encoder.content_type\n\n # try to upload the file, retrying as necessary\n max_retries = 3\n n_retries = 0\n while n_retries < max_retries:\n try:\n upload_request = session.post(upload_url, data=encoder,\n headers={'Content-Type': content_type}, auth={})\n if upload_request.status_code not in [200, 201]:\n msg = 'Upload failed. Please contact help@onecodex.com for assistance.'\n if upload_request.status_code >= 400 and upload_request.status_code < 500:\n try:\n msg = '{}. Please ensure your file is valid and then try again.'.format(\n upload_request.json()['message']\n )\n except Exception:\n pass\n raise UploadException(msg)\n\n file_obj.close()\n break\n except requests.exceptions.ConnectionError as e:\n # For proxy, try special route to check the errors\n # in case Python is just dropping the Connection due to validation issues\n if multipart_fields.get('sample_id'):\n error_url = '/'.join(upload_url.split('/')[:-1]) + '/errors'\n try:\n e_resp = session.post(error_url, json={'sample_id': multipart_fields.get('sample_id')})\n if e_resp.status_code == 200:\n msg = '{}. Please ensure your file is valid and then try again.'.format(\n e_resp.json()['message']\n )\n raise UploadException(msg)\n except requests.exceptions.RequestException:\n pass\n\n n_retries += 1\n # reset the file_obj back to the start; we may need to rebuild the encoder too?\n file_obj.seek(0)\n if n_retries == max_retries:\n raise UploadException(\n \"The command line client is experiencing connectivity issues and \"\n \"cannot complete the upload of %s at this time. Please try again \"\n \"later. If the problem persists, contact us at help@onecodex.com \"\n \"for assistance.\" % filename\n )\n\n # Finally, issue a callback\n try:\n if not multipart_fields.get('callback_url'):\n samples_resource.confirm_upload({\n 'sample_id': upload_info['sample_id'],\n 'upload_type': 'standard'\n })\n except requests.exceptions.HTTPError:\n raise UploadException('Failed to upload: %s' % filename)\n\n if log_to is not None:\n log_to.write('\\rUploading: {} finished as sample {}.\\n'.format(\n filename, upload_info['sample_id']\n ))\n log_to.flush()\n return upload_info['sample_id']", "def _upload_file(file_name, bucket, object_name):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n try:\n s3.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def upload_file(self, file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3', aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key)\n try:\n s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.exception(e)\n return False\n logging.info(\"Upload to S3 bucket complete!\")\n\n return True", "def upload_file_s3(file_name, bucket):\n\n # If S3 object_name was not specified, use file_name \n try:\n response = s3_client.upload_file(file_name,\n bucket, \n file_name.replace('../',''))\n print(\"Uploaded \" + file_name)\n except ClientError as e:\n print(\"Failed to upload \" + file_name)\n logging.error(e)\n return False\n return True", "def upload_to_s3(file_name, bucket, key): \n s3 = boto3.resource('s3') \n try:\n s3.meta.client.upload_file(file_name, bucket, key)\n print(\"s3 upload success -- uploaded \" + file_name + \" to the bucket: \" + bucket)\n except ClientError as e:\n logging.error(e)\n return False\n print(\"s3 upload error occurs\", e)\n return True", "def upload_file(file_name, bucket):\r\n object_name = file_name\r\n s3_client = boto3.client('s3')\r\n response = s3_client.upload_file(file_name, bucket, object_name)\r\n\r\n return response", "def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3', aws_access_key_id='', aws_secret_access_key='')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name, ExtraArgs={'ACL':'public-read'})\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def save_to_s3(bucket, path, data, compress=False):\n bucket = get_bucket(bucket)\n\n key = Key(bucket)\n key.key = path\n logger.debug(\"Uploading to %s\", key.key)\n\n if compress:\n mock_file = BytesIO()\n gzip_obj = gzip.GzipFile(filename='gzipped_file', mode='wb', fileobj=mock_file)\n if isinstance(data, str):\n data = data.encode('utf-8')\n gzip_obj.write(data)\n gzip_obj.close()\n data = mock_file.getvalue()\n\n key.set_contents_from_string(data)", "def upload_file(file_name, object_name=None, bucket = BUCKET_NAME):\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name) if type(file_name) == str else s3_client.upload_fileobj(file_name, BUCKET_NAME, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client(\"s3\")\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def _upload_s3(self, filename, bucket, objectKey):\n return s3_client.upload_file(filename, bucket, objectKey)", "def upload(self, bucket, obj, s3_client=None):\n\n s3_client = s3_client or self.s3_client\n transfer_config = boto3.s3.transfer.TransferConfig(multipart_threshold=1024, use_threads=True, max_concurrency=10)\n s3_transfer = boto3.s3.transfer.S3Transfer(client=s3_client, config=transfer_config)\n\n try:\n logging.debug(\"Uploading {} to {}\".format(obj, bucket))\n s3_transfer.upload_file(obj, bucket, helpers.strip_path(obj)[1])\n\n return True\n except botocore.exceptions.EndpointConnectionError:\n logging.error(\"Couldn't connect to an S3 endpoint. If you're using an S3 compatible provider other than AWS, remember to set --s3-endpoint-url\")\n return False\n except Exception as e:\n logging.error(\"Error uploading: {}\".format(e))\n return False", "def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = os.path.basename(file_name)\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def upload(\n bucket: str, key: str, filename: str, session: Optional[boto3.Session] = None\n) -> None:\n s3_client = _get_client(session)\n LOGGER.info(\"uploading %s to s3://%s/%s...\", filename, bucket, key)\n s3_client.upload_file(Filename=filename, Bucket=bucket, Key=key)", "def upload_to_s3(bucket, file_path, prefix, timestamp):\n upload_name = f'{prefix}_{timestamp or \"\"}{basename(file_path)}'\n\n try:\n bucket.upload_file(file_path, upload_name)\n syslog.syslog(syslog.LOG_INFO,\n f'Uploaded {file_path} to S3 Bucket - {bucket.name}')\n return True\n except S3UploadFailedError as s3ex:\n syslog.syslog(\n syslog.LOG_ERR, f'Failed to upload {file_path} to S3 Bucket - {bucket_name} - {s3ex}')\n return False\n finally:\n rm(file_path)", "def upload_to_s3(site, bucket, directory=None, files=None, prefix=None):\n if bucket is None:\n print red('Error: Bucket must be specified.')\n return\n if directory is None and files is None:\n print red('Error: Directory and/or files must be specified.')\n return\n # Setup boto\n import boto\n from boto.s3.bucket import Bucket\n from boto.s3.key import Key\n import mimetypes\n import fnmatch\n\n setup_aws_access_key(site)\n\n # Connect to S3\n c = boto.connect_s3()\n b = Bucket(c, bucket)\n\n # Fix the prefix\n # prefix itself shouldn't have a / prefix itself but should end with /\n if prefix:\n prefix = prefix.lstrip('/')\n if prefix and not prefix.endswith('/'):\n prefix = prefix + '/'\n\n def __upload(key, filename):\n k = Key(b)\n k.key = key\n headers = {}\n content_type = mimetypes.guess_type(filename)[0]\n if site.has_key('webapp') and site['webapp'].get('cache_control'):\n for pattern in site['webapp']['cache_control']:\n if fnmatch.fnmatch(filename, pattern):\n headers['Cache-Control'] = site['webapp']['cache_control'][pattern]\n break\n if site.has_key('webapp') and site['webapp'].get('gzip_types') and content_type in site['webapp']['gzip_types']:\n from gzip import GzipFile\n from StringIO import StringIO\n # Need to specify content_type when uploading from a string!\n headers['Content-Type'] = content_type\n headers['Content-Encoding'] = 'gzip'\n s = StringIO()\n g = GzipFile(fileobj=s, mode='wb')\n with open(filename, 'rb') as f:\n g.write(f.read())\n g.close()\n k.set_contents_from_string(s.getvalue(), headers)\n else:\n k.set_contents_from_filename(filename, headers)\n\n if files:\n # Upload individual files\n if directory:\n keys = [filename.lstrip('/') for filename in files]\n files = [os.path.join(directory, filename) for filename in files]\n else:\n keys = [os.path.split(filename)[1] for filename in files]\n for i, filename in enumerate(files):\n print 'Uploading %s' % keys[i]\n if prefix:\n key = prefix + keys[i]\n else:\n key = keys[i]\n __upload(key, filename)\n elif directory:\n # Upload an entire directory\n def __upload_dir(arg, dirname, names):\n # arg is the starting directory\n for name in names:\n filename = os.path.join(dirname, name)\n if not os.path.isdir(filename) and not os.path.islink(filename) and not name.startswith('.'):\n key = filename[len(arg):]\n if key.startswith('/'):\n key = key[1:]\n if prefix:\n key = prefix + key\n print 'Uploading %s' % key\n __upload(key, filename)\n os.path.walk(directory, __upload_dir, directory)", "def upload_s3_file(key, bucket, filename):\n s3_client = boto3.client('s3')\n s3_client.upload_file(filename, bucket, key)\n return True", "def upload_file(file_name, bucket_name, object_name=None):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = os.path.basename(file_name)\n\n try:\n # Upload the file\n response = s3.upload_file(file_name, bucket_name, object_name)\n # Get list of files in bucket to confirm\n describe_objects(bucket_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def upload(filename, bucket):\n print(\"Uploading {} to S3\".format(filename.lower().replace('_', '-')))\n url = \"https://s3.ca-central-1.amazonaws.com/{}/{}\".format(bucket,\n filename.lower().replace('_', '-'))\n with open('{}/{}'.format(WORK_DIR, filename), 'rb') as data:\n requests.put(url, data=data)", "def upload_file(s3_client, file_name, object_name=None):\n\n # read bucket name from cfg file\n bucket = config.get('S3', 'LANDING_ZONE')\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name.split('\\\\')[-1]\n\n # Upload the file\n try:\n response = s3_client.upload_file(file_name, bucket, object_name, Callback=ProgressPercentage(file_name))\n# logger.debug(f\"Got response from s3 client for uploading file: {response}\")\n except Exception as e:\n logger.error(f\"Error occurred while upload {file_name} : {e}\")\n return False\n return True", "def upload_file(file_name: str, bucket: str, object_name: str =None) -> None:\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client(\"s3\")\n try:\n s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)", "def upload_file_to_bucket(s3_client, file_obj, bucket, folder, object_name=None):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_obj\n\n # Upload the file\n try:\n response = s3_client.upload_fileobj(file_obj, bucket, f\"{folder}/{object_name}\")\n print(response)\n except ClientError:\n return False\n return True", "def upload_to_s3(file_path, config):\n logging.info(\"Uploading file to S3 bucket: %s\", config['s3_bucket_name'])\n s3 = boto3.resource('s3')\n s3_filename = config['s3_bucket_path'] + config['rendered_filename']\n s3.Bucket(config['s3_bucket_name']).upload_file(\n file_path, s3_filename, ExtraArgs={\n 'ContentType': 'text/html', 'ACL': 'public-read'})", "def upload_file(file_name, bucket, object_name='patients.log'):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def upload_object(self, file_path, s3_path):\n logging.info(\"Uploading file to \\\"{}\\\" to S3\".format(s3_path))\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n self.s3_resource.Bucket(bucket_name).upload_file(file_path, key)", "def upload(self, file_path, bucket_name, file_name):\n\n self.client.upload_file(file_path, bucket_name, file_name)", "def upload(iid, file_obj, content_type):\n if AWS_CLIENT_CONFIG and BUCKET_NAME:\n try:\n s3 = boto3.resource('s3', **AWS_CLIENT_CONFIG)\n s3.Bucket(BUCKET_NAME).put_object(Key=iid,\n Body=file_obj,\n ContentType=content_type)\n return StorageType.S3\n except botocore.exceptions.ClientError as e:\n logger.error(e)\n else:\n # store locally in temp dir (tests, local development)\n store_temp_file(iid, file_obj)\n return StorageType.TMP\n return None", "def upload_file(file, bucket_path, bucket=S3_BUCKET):\n # Bucket path should be somedir/name_of_file.ext\n try:\n if isinstance(file, str):\n resource.upload_file(file, bucket, bucket_path)\n else:\n resource.upload_fileobj(file, bucket, bucket_path)\n except:\n raise ChildProcessError('Something broke, Cap\\'n')", "def upload_to_bucket(bucket_name, path_to_source_file, upload_file_name):\r\n\r\n try:\r\n # initialize client & get blob\r\n _, _, blob = create_client(bucket_name, upload_file_name)\r\n\r\n # set the path to source file\r\n blob.upload_from_filename(path_to_source_file)\r\n \r\n except Exception as err:\r\n raise err\r\n sys.exit(1)\r\n \r\n else:\r\n print(f\"upload file '{path_to_source_file}' succeed\")\r\n\r\n return None", "def upload_file_to_s3(file_path, bucket, object_name=None, access_key_id=None,\n secret_access_key=None):\n if object_name is None:\n object_name = Path(file_path).name\n\n try:\n s3_client = boto3.client(\n \"s3\",\n aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key\n )\n\n s3_client.upload_file(file_path, bucket, object_name)\n except ClientError:\n raise", "def upload(file_path, aws_path, access_key, secret_key) -> None:\n # bucket = \"dev-com-courtlistener-storage\"\n bucket = \"seals.free.law\"\n client = boto3.client(\n \"s3\",\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n )\n transfer = S3Transfer(client)\n if \".png\" in file_path:\n content_type = \"image/png\"\n else:\n content_type = \"image/svg+xml\"\n transfer.upload_file(\n file_path,\n bucket,\n aws_path,\n extra_args={\"ContentType\": content_type, \"ACL\": \"public-read\"},\n )\n print(f\"http://{bucket}.s3-us-west-2.amazonaws.com/{aws_path}\")", "def upload_file_to_s3(bucket_name, input_filepath, output_filename):\n s3 = boto3.client(\"s3\")\n with open(input_filepath, \"rb\") as f:\n s3.upload_fileobj(f, bucket_name, output_filename)", "def upload_file(Filename=None, Bucket=None, Key=None, ExtraArgs=None, Callback=None, Config=None):\n pass", "def upload_to_aws(local_file, bucket, s3_file, access_key, secret_key):\n s3 = boto3.client('s3', aws_access_key_id=access_key,\n aws_secret_access_key=secret_key)\n \n try:\n s3.upload_file(local_file, bucket, s3_file)\n print(f'Upload of {local_file} to {bucket} as {s3_file} successful.')\n return True\n except FileNotFoundError:\n print(f'File {local_file} not found.')\n return False\n except NoCredentialsError:\n print('Credentials invalid or not available.')\n return False", "async def put_file(object_name: str, file: File, **kwargs) -> str:\n # TODO: Do not read file but rather stream content as it comes\n await file.read()\n # Get the synchronous file interface from the asynchronous file\n file_obj = file.file\n # Store position of cursor (number of bytes read)\n file_size = file_obj.tell()\n # Reset cursor at start of file\n file_obj.seek(0)\n # Trace file upload with its size\n logger.debug(f\"Uploading file: {object_name} with {file_size} bytes\")\n # Time file upload for debug\n start = time.time()\n # Store object on s3 storage\n client.put_object(\n bucket_name=DATASETS_BUCKET,\n object_name=object_name,\n length=file_size,\n data=file_obj,\n )\n end = time.time()\n # Log time spent\n logger.debug(f\"Took {end - start} seconds to upload {file_size} bytes\")", "def test_gzip_string_write_to_s3(s3, bucket):\n file_text = \"test-text\"\n file_key = \"test-key.txt.gz\"\n s3_path = f\"s3://{bucket_name}/{file_key}\"\n gzip_string_write_to_s3(file_text, s3_path)\n file_object = io.BytesIO()\n s3.Object(bucket_name, file_key).download_fileobj(file_object)\n assert gzip.decompress(file_object.getvalue()).decode(\"utf-8\") == file_text", "def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, 'keypoints_descriptor/'+file_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def upload_file(\n self, bucket_id: uplink.Path, filename: uplink.Path, file: uplink.Body\n ):\n pass", "def _upload_to_bucket(self, filename, ext_filename):\n if ext_filename is None:\n return\n\n if self.s3:\n self.bucket.upload_file(filename, ext_filename)\n logging.info('Uploaded {} to S3 with name {}'.format(filename, ext_filename))\n if self.gs:\n try:\n client = storage.Client()\n bucket = client.get_bucket(self.bucket_name)\n blob = storage.Blob(ext_filename, bucket)\n blob.upload_from_filename(filename)\n logging.info('Uploaded to {}'.format(ext_filename))\n except:\n logging.warning('Uploading file to bucket failed')", "def upload_to_s3(file_from_machine, bucket, file_to_s3):\n s3.upload_file(file_from_machine, bucket, file_to_s3)\n print(file_to_s3, \" : is upoaded to s3\")", "def _upload(self, errors):\n if self.backup_bucket is None:\n return\n\n try:\n with open(\"%s/%s.tar.gz\"%(self.backup_path, self.name), 'r+') as f:\n s3upload.upload_to_s3(f,\n self.backup_bucket,\n \"%s/%s.tar.gz\"%(self.backup_id, self.name))\n\n # Cleaning up resources, since the upload was successful\n run(\"rm -f %s/%s.tar.gz\"%(self.backup_path, self.name))\n except Exception as e:\n logging.exception(e)\n errors.put(Exception(\"Error uploading %s server backup to S3\" % self.name))\n traceback.print_exc()", "def put(self, file_path, key=None):\n try:\n key_name = key if key else os.path.basename(file_path)\n size = os.stat(file_path).st_size\n if size < 104857600: #100 mb\n k = Key(self.bucket)\n k.key = key_name\n sent = k.set_contents_from_filename(file_path)\n log.info('Uploading %s to S3 (%s)' % (key_name, self.bucket_name))\n return sent == size\n else:\n log.info('Multipart Uploading %s to S3 (%s)' % (key_name, self.bucket_name))\n mp = self.bucket.initiate_multipart_upload(key_name)\n chunk_size = 52428800\n chunk_count = int(math.ceil(size / float(chunk_size)))\n #Send the file parts, using FileChunkIO to create a file-like object\n # that points to a certain byte range within the original file. We\n # set bytes to never exceed the original file size.\n for i in range(chunk_count):\n offset = chunk_size * i\n bytes = min(chunk_size, size - offset)\n with FileChunkIO(file_path, 'r', offset=offset, bytes=bytes) as fp:\n mp.upload_part_from_file(fp, part_num=i + 1)\n # Finish the upload\n mp.complete_upload()\n return True\n except Exception, e:\n log.error('Failed to upload to S3 (%s)' % (self.bucket_name), exc_info=True)\n return False", "def upload_to_s3(file_contents: bytes, path: str, content_type: str = None, tags: dict = None):\n client = S3BucketClient()\n client.upload(\n file_contents=file_contents,\n path=path,\n content_type=content_type,\n tags=tags,\n )", "def upload_file(local_path, s3_path):\n with open(local_path, 'rb') as binary_data:\n s3.Bucket(bucket_name).put_object(Key=s3_path, Body=binary_data)", "def upload_file(self, keyUrl='', body='', ContentType='', bucket=None):\n \n if bucket is None:\n bucket = self.AWS_S3_BUCKET\n \n #Verificamos si existe body\n if body is None:\n body=''\n \n try:\n self.get_s3_client().put_object(Bucket=bucket, Key=keyUrl, Body=body, ACL='public-read', ContentType=ContentType)\n return True\n \n except ClientError as e:\n return False", "def upload_fileobj(self, bucket_name, file_obj, key):\n self._client.upload_fileobj(Fileobj=file_obj, Bucket=bucket_name, Key=key)", "def pushToS3()-> None:\n logging.info(f\"Connecting to s3 {getTime()}\")\n s3 = boto3.client(\"s3\",endpoint_url=\"http://localhost:4566\")\n if(not s3.head_bucket(Bucket=\"demo\")):\n s3.create_bucket(Bucket='demo')\n try:\n logging.info(f\"Uploading to s3 {getTime()}\")\n s3.upload_file(\"result.csv\",\"demo\",\"result.csv\")\n logging.info(f\"Finished uploding to s3 {getTime()}\")\n except ClientError as e:\n logging.error(f\"Error uploading file to S3 {getTime()}\")", "def upload_file(bucket_name, filename, file):\n client = get_client()\n bucket = client.get_bucket(bucket_name)\n blob = bucket.blob(filename)\n blob.upload_from_file(file)", "def upload_file_to_s3(bucket, artefact, bucket_key):\n try:\n client = boto3.client('s3')\n\n except ClientError as err:\n print(\"Failed to create boto3 client.\\n\" + str(err))\n return False\n\n try:\n kwargs = {\n \"Body\": open(artefact, 'rb'),\n \"Bucket\": bucket,\n \"Key\": bucket_key\n }\n\n mime_type, encoding = mimetypes.guess_type(artefact)\n\n if mime_type is None:\n file_name, file_ext = os.path.splitext(artefact)\n\n if file_ext == \".icon\" :\n kwargs[\"ContentType\"] = \"image/vnd.microsoft.icon\"\n\n elif file_ext == \".woff2\" :\n kwargs[\"ContentType\"] = \"application/font-woff\"\n \n else:\n kwargs[\"ContentType\"] = mime_type\n\n client.put_object(**kwargs)\n\n except ClientError as err:\n print(\"Failed to upload artefact to S3.\\n\" + str(err))\n return False\n\n except IOError as err:\n print(\"Failed to access artefact in this directory.\\n\" + str(err))\n return False\n\n return True", "def put(self, path: str, filename: str) -> None:\n\n payload_hash, content_md5, length = _hash(path)\n\n now = datetime.datetime.utcnow()\n timestamp = now.strftime('%Y%m%dT%H%M%SZ')\n headers = [\n ('Connection', 'keep-alive'),\n ('Content-Length', str(length)),\n ('Content-MD5', content_md5),\n ('Content-Type', 'application/zip'),\n ('Date', now.strftime('%a, %d %b %Y %H:%M:%S GMT')),\n ('Host', '%s.s3.amazonaws.com' % self.bucket),\n ('x-amz-content-sha256', payload_hash),\n ('x-amz-date', timestamp),\n ]\n signed_headers = ';'.join(header[0].lower() for header in headers)\n canonical_request = 'PUT\\n%s\\n\\n%s\\n\\n%s\\n%s' % (filename, '\\n'.join(\n ('%s:%s' % (header[0].lower(), header[1])\n for header in headers)), signed_headers, payload_hash)\n logging.debug('canonical request %r',\n canonical_request.encode('utf-8'))\n string_to_sign = 'AWS4-HMAC-SHA256\\n%s\\n%s\\n%s' % (\n timestamp, self.scope,\n hashlib.sha256(canonical_request.encode('utf-8')).hexdigest())\n logging.debug('string to sign %r', string_to_sign.encode('utf-8'))\n\n signature = hmac.new(self.signing_key,\n string_to_sign.encode('utf-8'),\n digestmod='sha256').hexdigest()\n headers.append((\n 'Authorization',\n 'AWS4-HMAC-SHA256 Credential=%s/%s,SignedHeaders=%s,Signature=%s' %\n (self.aws_access_key, self.scope, signed_headers, signature)))\n with open(path, 'rb') as file_stream:\n if not self.conn:\n self.conn = http.client.HTTPSConnection('%s.s3.amazonaws.com' %\n self.bucket)\n try:\n self.conn.request('PUT',\n filename,\n file_stream,\n headers=dict(headers))\n res = self.conn.getresponse()\n payload = res.read()\n except (http.client.BadStatusLine, http.client.ResponseNotReady,\n http.client.CannotSendRequest):\n self.conn.close()\n raise\n if res.status != 200:\n raise Exception(payload.decode('utf-8'))", "def upload_file(self, file_name, bucket, destination_name):\n try:\n not self.client.upload_file(file_name, bucket, destination_name)\n except Exception as ex:\n raise ex", "def _s3_intermediate_upload(file_obj, file_name, fields, session, callback_url):\n import boto3\n from boto3.s3.transfer import TransferConfig\n from boto3.exceptions import S3UploadFailedError\n\n # actually do the upload\n client = boto3.client(\n \"s3\",\n aws_access_key_id=fields[\"upload_aws_access_key_id\"],\n aws_secret_access_key=fields[\"upload_aws_secret_access_key\"],\n )\n\n multipart_chunksize = _choose_boto3_chunksize(file_obj)\n\n # if boto uses threads, ctrl+c won't work\n config = TransferConfig(use_threads=False, multipart_chunksize=multipart_chunksize)\n\n # let boto3 update our progressbar rather than our FASTX wrappers, if applicable\n boto_kwargs = {}\n\n if hasattr(file_obj, \"progressbar\"):\n boto_kwargs[\"Callback\"] = file_obj.progressbar.update\n file_obj._progressbar = file_obj.progressbar\n file_obj.progressbar = None\n\n for attempt in range(1, 4):\n try:\n client.upload_fileobj(\n file_obj,\n fields[\"s3_bucket\"],\n fields[\"file_id\"],\n ExtraArgs={\"ServerSideEncryption\": \"AES256\"},\n Config=config,\n **boto_kwargs\n )\n break\n except S3UploadFailedError as e:\n logging.debug(\"Caught S3UploadFailedError on attempt {}/3: {}\".format(attempt, str(e)))\n logging.error(\n \"{}: Connectivity issue, retrying upload via intermediary ({}/3)...\".format(\n file_name, attempt\n )\n )\n\n # rewind the progressbar if possible, then remove so boto3 can update the bar directly\n if hasattr(file_obj, \"_progressbar\"):\n file_obj.progressbar = file_obj._progressbar\n file_obj.seek(0)\n file_obj.progressbar = None\n else:\n file_obj.seek(0)\n else:\n logging.debug(\"{}: exhausted all retries via intermediary\")\n raise_connectivity_error(file_name)\n\n # issue a callback\n try:\n resp = session.post(\n callback_url,\n json={\n \"s3_path\": \"s3://{}/{}\".format(fields[\"s3_bucket\"], fields[\"file_id\"]),\n \"filename\": file_name,\n \"import_as_document\": fields.get(\"import_as_document\", False),\n },\n )\n except requests.exceptions.ConnectionError:\n raise_connectivity_error(file_name)\n\n if resp.status_code != 200:\n raise_connectivity_error(file_name)\n\n try:\n return resp.json()\n except ValueError:\n return {}", "def save_file_aws(obj, file_path, aws_credentials):\n bucket_engine = S3Bucket(*aws_credentials)\n data = gzip.compress(json.dumps(obj).encode('utf-8'))\n bucket_engine.write(file_path, data)", "def up(self, args):\n try:\n assert len(args) > 0\n path = args[0]\n compress = should('Compress file?')\n self.prepare_upload(path, compress)\n except AssertionError:\n print(\"I need a file name!\")", "def upload_file_to_s3(self, file_data):\r\n\r\n file_key = file_data.name + datetime.now(UTC).strftime(\r\n xqueue_interface.dateformat\r\n )\r\n\r\n file_data.seek(0)\r\n s3_public_url = upload_to_s3(\r\n file_data, file_key, self.s3_interface\r\n )\r\n\r\n return s3_public_url", "def file_upload(self, bucket_id, file_path, tmp_file_path):\n\n self.__logger.debug('Upload %s in bucket %s', file_path, bucket_id)\n self.__logger.debug('Temp folder %s', tmp_file_path)\n\n bname = os.path.split(file_path)[1] # File name\n\n file_mime_type = 'text/plain'\n\n # Encrypt file\n self.__logger.debug('Encrypting file...')\n\n file_crypto_tools = FileCrypto()\n\n # File name of encrypted file\n file_name_ready_to_shard_upload = '%s.encrypted' % bname\n # Path where to save the encrypted file in temp dir\n file_path_ready = os.path.join(tmp_file_path,\n file_name_ready_to_shard_upload)\n self.__logger.debug('file_path_ready: %s', file_path_ready)\n\n # Begin file encryption\n file_crypto_tools.encrypt_file(\n 'AES',\n file_path,\n file_path_ready,\n self.client.password)\n\n self.fileisdecrypted_str = ''\n\n file_size = os.stat(file_path).st_size\n self.__logger.info('File encrypted')\n\n # Get the PUSH token from Storj Bridge\n self.__logger.debug('Get PUSH Token')\n\n push_token = None\n try:\n push_token = self.client.token_create(bucket_id, 'PUSH')\n except BridgeError as e:\n self.__logger.error(e)\n self.__logger.debug('PUSH token create exception')\n self.__logger.error('File not uploaded')\n return\n\n self.__logger.info('PUSH Token ID %s', push_token.id)\n\n # Get a frame\n self.__logger.debug('Frame')\n frame = None\n\n try:\n frame = self.client.frame_create()\n except BridgeError as e:\n self.__logger.error(e)\n self.__logger.debug('Unhandled exception while creating file \\\nstaging frame')\n self.__logger.error('File not uploaded')\n return\n\n self.__logger.info('frame.id = %s', frame.id)\n\n # Now generate shards\n self.__logger.debug('Sharding started...')\n shards_manager = model.ShardManager(filepath=file_path_ready,\n tmp_path=tmp_file_path)\n self.all_shards_count = len(shards_manager.shards)\n\n self.__logger.debug('Sharding ended...')\n\n self.__logger.info('There are %s shards', self.all_shards_count)\n\n # Calculate timeout\n self._calculate_timeout(shard_size=shards_manager.shards[0].size,\n mbps=1)\n\n # Upload shards\n mp = ThreadPool()\n res = mp.map(lambda n_s: self.upload_shard(\n n_s[1], n_s[0], frame, file_name_ready_to_shard_upload, tmp_file_path),\n enumerate(shards_manager.shards))\n\n self.__logger.debug('===== RESULTS =====')\n self.__logger.debug(res)\n if False in res or None in res:\n self.__logger.error('File not uploaded: shard %s not uploaded' %\n res.index(False))\n self.__logger.error('Exiting with errors')\n exit(1)\n # finish_upload\n self.__logger.debug('Generating HMAC...')\n\n # create file hash\n hash_sha512_hmac_b64 = self._prepare_bucket_entry_hmac(\n shards_manager.shards)\n hash_sha512_hmac = hashlib.sha224(str(\n hash_sha512_hmac_b64['SHA-512'])).hexdigest()\n\n self.__logger.debug('Now upload file')\n data = {\n 'x-token': push_token.id,\n 'x-filesize': str(file_size),\n 'frame': frame.id,\n 'mimetype': file_mime_type,\n 'filename': str(bname) + str(self.fileisdecrypted_str),\n 'hmac': {\n 'type': 'sha512',\n 'value': hash_sha512_hmac\n },\n }\n\n self.__logger.debug('Finishing upload')\n self.__logger.debug('Adding file %s to bucket...', bname)\n\n success = False\n try:\n # Post an upload_file request\n response = self.client._request(\n method='POST',\n path='/buckets/%s/files' % bucket_id,\n headers={\n 'x-token': push_token.id,\n 'x-filesize': str(file_size),\n },\n json=data,\n )\n success = True\n\n except BridgeError as e:\n self.__logger.error(e)\n self.__logger.debug('Unhandled bridge exception')\n\n if success:\n self.__logger.info('File uploaded successfully!')\n\n # Remove temp files\n try:\n # Remove shards\n file_shards = map(lambda i: '%s-%s' % (file_path_ready, i),\n range(1, self.all_shards_count + 1))\n self.__logger.debug('Remove shards %s' % file_shards)\n map(os.remove, file_shards)\n # Remove encrypted file\n self.__logger.debug('Remove encrypted file %s' % file_path_ready)\n os.remove(file_path_ready)\n except OSError as e:\n self.__logger.error(e)", "def upload_data_to_s3(data: dict, bucket_name: str, object_key: str) -> None:\n uploader = S3Uploader(bucket_name)\n with tempfile.NamedTemporaryFile(mode=\"w+\") as local_file:\n json.dump(data, local_file, cls=FancyJsonEncoder, indent=\" \", sort_keys=True)\n local_file.write(\"\\n\")\n local_file.flush()\n uploader(local_file.name, object_key)", "def upload_fileobj(Fileobj=None, Bucket=None, Key=None, ExtraArgs=None, Callback=None, Config=None):\n pass", "def _put(conn, remote_file, contents, bucket_name=BUCKET_NAME, headers=None):\n error_msg = 'Failed to upload to %s' % remote_file\n try:\n reply = conn.put(bucket_name, remote_file,\n S3.S3Object(contents), headers)\n if reply.http_response.status != 200:\n print error_msg\n except:\n print error_msg", "def upload_file(self, bucket_name, file_path, key):\n self._client.upload_file(Filename=file_path, Bucket=bucket_name, Key=key)", "def uploadFilestoS3(self):\n allfilesuploadedcount = 0\n for eachfiledic in self.fileTobeUploaded:\n if eachfiledic[\"uploadedSuccess\"] == 0: #Means this file never got uploaded.\n if os.path.getsize(eachfiledic[\"filepath\"]) < 1000000000: #<1GB\n s3Log.info (\"FileSize < 1GB for :{}, so using single part upload.\".format(eachfiledic[\"filepath\"]) )\n if self.singlePartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n else:\n s3Log.info (\"FileSize > 1GB for :{}, so using Multi Part upload. \\n\".format(eachfiledic[\"filepath\"]) )\n if self.multiPartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n\n elif eachfiledic[\"uploadedSuccess\"] == 1: #Means it got uploaded in the last run.\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n self.saveStateOfThisRun()\n if len(self.fileTobeUploaded) == allfilesuploadedcount: #Means we uploaded all files in the queue\n return True\n else:\n return False", "def upload_file(file_name, s3_key):\n # Upload the file\n s3_connection = boto.connect_s3(aws_access_key_id, aws_secret_access_key)\n bucket = s3_connection.get_bucket(predator_bucket_name)\n try:\n key = boto.s3.key.Key(bucket, s3_key)\n key.set_contents_from_filename(file_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def upload_file(self, bucket_name, object_name, filepath, metadata=None):\n try:\n _metadata = {}\n if metadata is not None:\n for key in metadata.keys():\n _metadata[self.__metadata_prefix+key] = metadata[key]\n self.client.fput_object(bucket_name, object_name, filepath,\n metadata=_metadata)\n except ResponseError as _err:\n logging.exception(\"Could not upload file\")\n return False\n return True", "def upload_file(self, bucket_name, object_name, filepath, metadata=None):\n try:\n _metadata = {}\n if metadata is not None:\n for key in metadata.keys():\n _metadata[self.__metadata_prefix + key] = metadata[key]\n self.client.fput_object(\n bucket_name, object_name, filepath, metadata=_metadata\n )\n except ResponseError as _err:\n logging.exception(\"Could not upload file\")\n return False\n return True", "def upload_to_gcs(file_name, tmp_obj_name, google_cloud_storage_conn_id, gcs_bucket):\n\n gcs_hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=google_cloud_storage_conn_id)\n gcs_hook.upload(bucket=gcs_bucket,\n object=file_name,\n filename=tmp_obj_name,\n gzip=True)\n logging.info(f'new file created {file_name}')", "def _upload_file_by_url(self, url, upload_file_path, compress_upload_file, timeout_in_milliseconds=None):\n\n _, ext = path.splitext(upload_file_path)\n if compress_upload_file and ext != '.zip':\n should_compress = True\n else:\n should_compress = False\n\n try:\n if should_compress:\n name, ext = path.splitext(upload_file_path)\n zip_file_path = os.path.join(self.working_directory, '{0}_{1}.zip'.format(name, uuid.uuid1()))\n\n with contextlib.closing(zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED)) as f:\n f.write(upload_file_path)\n upload_file_path = zip_file_path\n headers = {\n 'DeveloperToken': self._authorization_data.developer_token,\n 'CustomerId': str(self._authorization_data.customer_id),\n 'AccountId': str(self._authorization_data.account_id),\n 'User-Agent': USER_AGENT,\n }\n self._authorization_data.authentication.enrich_headers(headers)\n\n with open(upload_file_path, 'rb') as f:\n name, ext = path.splitext(upload_file_path)\n\n filename = '{0}{1}'.format(uuid.uuid1(), ext)\n s = requests.Session()\n s.mount('https://', TlsHttpAdapter())\n timeout_seconds = None if timeout_in_milliseconds is None else timeout_in_milliseconds / 1000.0\n try:\n r = s.post(url, files={'file': (filename, f)}, verify=True, headers=headers, timeout=timeout_seconds)\n except requests.Timeout as ex:\n raise FileUploadException(ex)\n r.raise_for_status()\n except Exception as ex:\n raise ex\n finally:\n if should_compress:\n name, ext = path.splitext(upload_file_path)\n zip_file_path = name + '.zip'\n if path.exists(zip_file_path):\n os.remove(zip_file_path)", "def upload_to_s3(channel, file):\n s3_resource = boto3.resource('s3')\n data = open(file, \"rb\")\n key = channel + '/' + file\n s3_resource.Bucket(BUCKET).put_object(Key=key, Body=data)", "def test_put_file(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put(src, id)\n path = '/'.join(backend.id_to_path(id)) + '/demo-test.tar.gz'\n self.assertTrue(backend.exists(path))", "def test_put_file_variant(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put_variant(src, id, 'variant.tar.gz')\n path = '/'.join(backend.id_to_path(id)) + '/variant.tar.gz'\n self.assertTrue(backend.exists(path))", "def s3(ctx, bucket_name, data_file, region):\n ctx.obj['BUCKET_NAME'] = bucket_name\n ctx.obj['DATA_FILE'] = data_file\n ctx.obj['TYPE'] = 's3'\n ctx.obj['REGION'] = region", "def upload_package(self, filename=None):\n logger.info(\"Uploading the package to S3\")\n s3f = S3FunctionUploader(self.function_config['Code']['S3Bucket'])\n self.s3_filename = path.join(\n self.function_config['Code']['S3KeyPath'],\n path.basename(filename or self.local_filename)\n )\n s3f.upload(filename or self.local_filename,\n self.s3_filename)", "def upload(filename, bucket):\n k = Key(bucket)\n k.key = uuid.uuid1().hex\n print \"Uploading batch to {}, key: {}...\".format(bucket.name, k.key)\n k.set_contents_from_filename(filename, reduced_redundancy=True)\n print \" Done.\"\n \n\n\n bucket = openBucket(dest)", "def _upload_to_s3(s3_uploader, relative_path, file_path, filename):\n try:\n key = os.path.join(s3_uploader[\"key_prefix\"], relative_path, filename)\n s3_uploader[\"transfer\"].upload_file(file_path, s3_uploader[\"bucket\"], key)\n except FileNotFoundError: # noqa ignore=F821\n # Broken link or deleted\n pass\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to upload file to s3.\")\n finally:\n # delete the original file\n if os.path.exists(file_path):\n os.remove(file_path)", "def upload_file(self, key, filepath, access, keep_original=True,\n verbose=False):\n\n # file entry\n try:\n file_entry = self.bucket.new_key(key)\n file_entry.set_metadata('filepath', filepath)\n file_entry.set_contents_from_filename(filepath)\n file_entry.set_acl(access) # access control\n except Exception as error:\n print str(error)\n return False\n else:\n if verbose:\n print \"{} uploaded to amazon s3.\".format(key)\n\n # original file removal\n if not keep_original and os.access(filepath, os.W_OK):\n try:\n os.remove(filepath)\n except (IOError, OSError):\n print \"I/O error, could not remove file.\"\n else:\n if verbose:\n print \"{} (original) removed\".format(filepath)\n\n return True", "def test_upload_file_to_s3_bucket(self):\n conn = boto3.resource('s3', region_name='us-east-1')\n # We need to create the bucket since this is all in Moto's 'virtual' AWS account\n conn.create_bucket(Bucket='foobucket')\n\n s3_connector = S3Connector()\n s3_connector.connect(\"default\")\n s3_connector.upload_file(\n file_path=\"test/test_resources/test_file\", file_name=\"foofile\", bucket_name=\"foobucket\")\n\n # get bucket contents\n response = boto3.client('s3').list_objects(Bucket=\"foobucket\")\n contents = []\n for content in response.get('Contents', []):\n contents.append(content.get('Key'))\n\n self.assertEqual(contents, [\"foofile\"])", "def upload_blob(self, bucket_name, file_name, contents):\n\n bucket = self.storage_client.bucket(bucket_name)\n blob = bucket.blob(file_name)\n blob.upload_from_string(contents)\n print(\n \"File {} uploaded to bucket {} as file {}.\".format(\n file_name, bucket_name, file_name\n )\n )", "def store_to_s3():\n\n try:\n # establish aws/s3 connection\n s3 = boto3.client('s3',\n aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY\n )\n logger.info(\"S3 connection established!\")\n except Exception as e:\n logger.error('Fail to connect to aws s3. Please check your credentials!')\n logger.error(e)\n else:\n try:\n # upload local file to S3 bucket\n logger.info(\"Uploading {} to {} bucket as {}\".format(config.Local_File_To_Upload,\n config.Bucket_Name,\n config.S3_Filename))\n s3.upload_file(config.Local_File_To_Upload,\n config.Bucket_Name,\n config.S3_Filename)\n logger.info('File successfully uploaded to S3 bucket!')\n except FileNotFoundError:\n logger.error('File not found, pleas check the file path.')\n except Exception as e:\n logger.error(e)", "def upload_file_to_bucket(self, bucket_id, filename):\n url = self.upload_endpoint + \"/\" + bucket_id + \"/files\"\n\n headers = {\"Authorization\": \"Bearer \" + self.bearer_token}\n\n files = {\"file\": open(filename, \"rb\")}\n\n r = requests.post(url, headers=headers, files=files)\n\n if r.status_code == 200:\n logging.info(\"Successfully uploaded file to the bucket\")\n else:\n logging.warning(\"HTTP Error {}\".format(r.status_code))", "def put_upload(self):\n # print \"starting upload...\", self.current_upload['filepath']\n self.touch()\n self.log(\"STARTING_UPLOAD\", level=INFO)\n try:\n Backend.put_file(self.fileobj, self.current_upload[\"gcs_url\"])\n except exceptions.FilePutError as err:\n self.handle_put_error(err, self.fileobj)\n raise", "def upload_to_s3(bucket_name, sourceDir):\n try:\n client = boto3.client('s3')\n resource = boto3.resource('s3')\n except ClientError as err:\n print(\"Failed to create boto3 client.\\n\" + str(err))\n return False\n try:\n # clean the bucket\n bucket = resource.Bucket(bucket_name)\n for key in bucket.objects.all():\n key.delete()\n\n # upload the new files\n uploadFileNames = getFiles(sourceDir)\n print(\"Found \" + len(uploadFileNames).__str__() + ' files')\n\n for filename in uploadFileNames:\n destName = os.path.join(*(filename.split('/')[1:]))\n print(\"Uploading file \" + filename + ' to ' + destName)\n resource.Object(bucket_name, destName).put(Body=open(filename, 'rb'),\n ContentType=get_contenttype_from_filename(filename))\n\n except ClientError as err:\n print(\"Failed to upload artefact to S3.\\n\" + str(err))\n return False\n except IOError as err:\n print(\"Failed to access artefact in this directory.\\n\" + str(err))\n return False\n\n return True", "def upload_object(object_location: ObjectLocation, stream: io.BytesIO) -> None:\n s3 = boto3.client(\"s3\")\n result = s3.upload_fileobj(stream, object_location.bucket.name, object_location.key)\n log.debug(f\"Result of upload to {object_location}: {result}\")", "def _upload(auth_http, project_id, bucket_name, file_path, object_name, acl):\n with open(file_path, 'rb') as f:\n data = f.read()\n content_type, content_encoding = mimetypes.guess_type(file_path)\n\n headers = {\n 'x-goog-project-id': project_id,\n 'x-goog-api-version': API_VERSION,\n 'x-goog-acl': acl,\n 'Content-Length': '%d' % len(data)\n }\n if content_type: headers['Content-Type'] = content_type\n if content_type: headers['Content-Encoding'] = content_encoding\n\n try:\n response, content = auth_http.request(\n 'http://%s.storage.googleapis.com/%s' % (bucket_name, object_name),\n method='PUT',\n headers=headers,\n body=data)\n except httplib2.ServerNotFoundError, se:\n raise Error(404, 'Server not found.')\n\n if response.status >= 300:\n raise Error(response.status, response.reason)\n\n return content", "def _cloud_storage_upload(local_file, bucket, filename_on_bucket):\n client = storage.Client()\n\n bucket = client.get_bucket(bucket)\n blob = bucket.blob(filename_on_bucket)\n blob.upload_from_filename(local_file)\n print('uploaded ', bucket, filename_on_bucket)", "def upload_to_s3(self, file: str, force_upload: bool = False) -> str:\n if self.aws_access_key_id is None:\n raise Exception(\n 'To use `upload_to_s3` you need to pass '\n '`aws_access_key_id` and '\n '`aws_secret_access_key`'\n )\n\n filename = file.split('/')[-1]\n\n s3 = boto3.client(\n 's3',\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key\n )\n # Check if exists\n if not force_upload:\n try:\n session = boto3.Session(\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key\n )\n\n session.resource('s3').Object(self.bucket_name, filename).load()\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != \"404\":\n raise e\n else:\n # The object does exist\n return filename\n\n # Progress bar\n size = os.stat(file).st_size\n progress_bar = self._progress(size)\n\n # Uploading file\n s3.upload_file(file, self.bucket_name, filename, Callback=progress_bar)\n\n return filename", "def upload_bucket_file(\n self, organization_id: str, bucket_id: str, file_obj: IO,\n file_location: str, content_type: str,\n metadata: dict=None, lifetime: str=None) -> dict:\n path = '/organizations/{}/buckets/{}/files'.format(\n organization_id, bucket_id)\n\n if str(convert_to_valid_path(file_location)) != file_location:\n error_message = \"'file_location' must not start with '/' and must not include \" \\\n \"a relative path of '..' and '.'. '{}'\".format(file_location)\n raise BadRequest(\n error=error_message,\n error_description=error_message,\n status_code=400)\n\n if not metadata:\n metadata = {}\n encoded_metadata = encode_metadata(metadata)\n headers = dict()\n headers.update(encoded_metadata)\n\n params = {}\n if lifetime:\n params['lifetime'] = lifetime\n params = BytesIO(json.dumps(params).encode())\n\n files = {\n 'file': (file_location, file_obj, content_type),\n 'parameters': ('params.json', params, 'application/json')\n }\n res = self._connection.api_request(\n method='POST', headers=headers, path=path, files=files)\n return decode_file_metadata_if_exist(res)", "def upload(self, path, key, extra_args={}):\n if key.endswith(\"/\"):\n key += os.path.basename(path)\n if key.startswith(\"/\"):\n key = key[1:]\n remote_path = self.base.full_cell + \"/\" + key\n self.s3.meta.client.upload_file(path, self.bucket, remote_path, ExtraArgs=extra_args)\n print \"UPLOADED {} to s3://{}/{}\".format(path, self.bucket, remote_path)", "def upload_file_to_s3(local_path: Path, s3_path: Path, overwrite: bool = False) -> None:\n import warnings\n\n warnings.filterwarnings(\n action=\"ignore\", message=\"unclosed\", category=ResourceWarning\n )\n\n s3_args, unknown = get_s3_args().parse_known_args()\n s3_client = get_s3_client(s3_args)\n log = get_logger(\"upload_file_to_s3\")\n\n try:\n # only write files to s3 that don't already exist unless overwrite is passed\n if s3_object_exists(s3_path) and not overwrite:\n log.debug(\n f\"s3://{s3_args.s3_bucket}/{s3_path} already exists in s3, not overwriting\"\n )\n return\n\n s3_client.put_object(\n Body=local_path.read_bytes(), Bucket=s3_args.s3_bucket, Key=str(s3_path)\n )\n log.debug(f\"uploaded s3://{s3_args.s3_bucket}/{s3_path}\")\n\n except s3_client.exceptions.ClientError as exc:\n # catch and raise any errors generated while attempting to communicate with s3\n s3_client_attributes = {\n attr: getattr(s3_client, attr) for attr in s3_client.__dict__.keys()\n }\n s3_client_attributes.update(\n {\"bucket\": s3_args.s3_bucket, \"object_path\": s3_path,}\n )\n raise S3Error(f\"{s3_client_attributes} S3 ClientError\") from exc", "def upload(self, bucket_name, key_name, fname):\n bucket = self.s3_.get_bucket(bucket_name)\n key = boto.s3.key.Key(bucket)\n with open(fname, 'rb') as infile:\n key.key = key_name\n return key.set_contents_from_file(infile)", "def upload(bucket_name, source_file, destination_blob_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n try:\n blob.upload_from_file(source_file)\n except:\n return None\n else:\n return('File {} uploaded to {}.'.format(\n source_file,\n destination_blob_name))", "def _upload_to_gcs(self, file_to_upload):\n hook = GCSHook(\n gcp_conn_id=self.gcp_conn_id,\n impersonation_chain=self.impersonation_chain,\n )\n is_data_file = file_to_upload.get(\"file_name\") != self.schema_filename\n metadata = None\n if is_data_file and self.upload_metadata:\n metadata = {\"row_count\": file_to_upload[\"file_row_count\"]}\n\n object_name = file_to_upload.get(\"file_name\")\n if is_data_file and self.partition_columns:\n # Add partition column values to object_name\n partition_values = file_to_upload.get(\"partition_values\")\n head_path, tail_path = os.path.split(object_name)\n partition_subprefix = [\n f\"{col}={val}\" for col, val in zip(self.partition_columns, partition_values)\n ]\n object_name = os.path.join(head_path, *partition_subprefix, tail_path)\n\n hook.upload(\n self.bucket,\n object_name,\n file_to_upload.get(\"file_handle\").name,\n mime_type=file_to_upload.get(\"file_mime_type\"),\n gzip=self.gzip if is_data_file else False,\n metadata=metadata,\n )", "def _upload_file_to_aws(aws_upload_details):\n # Step 1: get the request signed\n sig_uri = aws_upload_details['signature']\n\n now = dt.datetime.utcnow()\n expires = now + dt.timedelta(hours=1)\n now_ts = timegm(now.timetuple())\n key = 'data_imports/%s.%s' % (filename, now_ts)\n\n payload = {}\n payload['expiration'] = expires.isoformat() + 'Z'\n payload['conditions'] = [\n {'bucket': aws_upload_details['aws_bucket_name']},\n {'Content-Type': 'text/csv'},\n {'acl': 'private'},\n {'success_action_status': '200'},\n {'key': key}\n ]\n\n sig_result = requests.post(main_url + sig_uri,\n headers=upload_header,\n data=json.dumps(payload))\n if sig_result.status_code != 200:\n msg = \"Something went wrong with signing document.\"\n raise RuntimeError(msg)\n else:\n sig_result = sig_result.json()\n\n # Step 2: upload the file to S3\n upload_url = \"http://%s.s3.amazonaws.com/\" % (aws_upload_details['aws_bucket_name'])\n\n # s3 expects multipart form encoding with files at the end, so this\n # payload needs to be a list of tuples; the requests library will encode\n # it property if sent as the 'files' parameter.\n s3_payload = [\n ('key', key),\n ('AWSAccessKeyId', aws_upload_details['aws_client_key']),\n ('Content-Type', 'text/csv'),\n ('success_action_status', '200'),\n ('acl', 'private'),\n ('policy', sig_result['policy']),\n ('signature', sig_result['signature']),\n ('file', (filename, open(upload_filepath, 'rb')))\n ]\n\n result = requests.post(upload_url,\n files=s3_payload)\n\n if result.status_code != 200:\n msg = \"Something went wrong with the S3 upload: %s \" % result.reason\n raise RuntimeError(msg)\n\n # Step 3: Notify SEED about the upload\n completion_uri = aws_upload_details['upload_complete']\n completion_payload = {\n 'import_record': upload_dataset_id,\n 'key': key,\n 'source_type': upload_datatype\n }\n return requests.post(main_url + completion_uri,\n headers=upload_header,\n data=completion_payload)", "def upload_obj(bucketname, dateiname, zielname=None):\n pass", "def upload_to_s3(self, name, file_name):\n f = open(file_name, \"r\")\n path = \"cluster/\" + name + \"/\" + os.path.basename(file_name)\n bucket = self.s3.get_bucket(self.__secrets_bucket__)\n k = Key(bucket)\n k.name = path\n\n try:\n k.set_contents_from_file(f)\n except Exception as e:\n print \"[-] Error uploading file to s3\"\n print e\n\n print \"[+] Uploaded {0}\".format(\"s3://\" + self.__secrets_bucket__ + \"/\" + path)\n\n return", "def upload_to_storage_client(self, **kwargs):\n if 'source_path' in kwargs:\n source_path = kwargs.get('source_path')\n else:\n raise ValueError(\"Must provide the \\'source_path\\' parameter for local storage client to find the file!\")\n\n if 'destination_path' in kwargs:\n destination_path = kwargs.get('destination_path')\n else:\n raise ValueError(\n \"Must provide the \\'destination_path\\' parameter for local storage client to find the destination!\")\n\n compression = kwargs.get('compression')\n intended_stored_file_name = kwargs.get('intended_stored_file_name', None)\n\n if not os.path.isdir(source_path) and compression:\n raise ValueError(\"Only directories can be zipped. Single files cannot be zipped.\")\n\n self.__check_dir(destination_path)\n\n upload_parameters = {'source_path': source_path, 'compression': compression,\n 'destination_path': destination_path}\n\n if compression:\n # TODO\n # if no name is supplied, name of the zipfile will be the destination; currently funky at move() due to source =/= location\n if not intended_stored_file_name:\n file_name = os.path.split(source_path)[-1]\n intended_stored_file_name = 'archive_' + file_name + \"_\" + datetime.now().strftime(\"%A_%d_%B_%Y_%I_%M%p\")\n upload_parameters['intended_stored_file_name'] = intended_stored_file_name\n\n # compression can only happen on DIRECTORIES, and not on single files\n # compress2 takes a name from the kwargs, and source from parameter of save_local\n compress(intended_stored_file_name, source_path)\n\n # TODO: Perhaps zipfile dumping location can be found by getting a parent/ child from source\n\n # To find where compress2 dumps zipfile, currently: working directory path\n location = self.__prj_root_dir + \"\\\\\" + intended_stored_file_name + \".zip\"\n shutil.move(location, destination_path)\n\n upload_parameters['intended_stored_file_name'] = intended_stored_file_name\n upload_parameters['upload_date_time'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.generate_json_upload_parameters(**upload_parameters)\n else:\n upload_parameters['intended_stored_file_name'] = intended_stored_file_name\n upload_parameters['upload_date_time'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n shutil.move(source_path, destination_path)\n self.generate_json_upload_parameters(**upload_parameters)", "def _gzip_file(filename):\n gzip_filename = filename + '.gz'\n with open(filename, 'rb') as f_in, gzip.open(gzip_filename, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)", "def upload_from_file(self, file_obj, name_on_storage, **keyword_args):\n blob = self.bucket.blob(name_on_storage)\n blob.upload_from_file(file_obj, **keyword_args)\n print(f\"Upload object {name_on_storage}\")", "def aws_s3_upload(self):\n\n # absolute path to the csv file to create\n csv_file = os.path.join(self.csvfile, \"DLTINS_20210117_01of01.csv\")\n\n # Test for correct data\n self.assertTrue(\n aws_s3_upload(\n csv_file,\n self.region_name,\n self.aws_access_key_id,\n self.aws_secret_access_key,\n self.bucket_name,\n )\n )\n\n # Test for non existent bucket\n self.assertFalse(\n aws_s3_upload(\n csv_file,\n \"useast\",\n self.aws_access_key_id,\n self.aws_secret_access_key,\n self.bucket_name,\n )\n )\n\n # Test for non existent region\n self.assertFalse(\n aws_s3_upload(\n csv_file,\n self.region_name,\n self.aws_access_key_id,\n self.aws_secret_access_key,\n \"nonexistentbucketname\",\n )\n )\n\n # Test for incorrect keys\n self.assertFalse(\n aws_s3_upload(\n csv_file,\n self.region_name,\n \"xjvachiahvlchabo;jvbo\",\n \"khkc vah haaih aih ika\",\n self.bucket_name,\n )\n )" ]
[ "0.695896", "0.68229353", "0.68038195", "0.6781243", "0.67778784", "0.6743604", "0.6743087", "0.67419064", "0.6726084", "0.67161316", "0.6695823", "0.6695823", "0.66937494", "0.66827035", "0.667724", "0.6662698", "0.66604984", "0.6652789", "0.66431695", "0.66219", "0.6605921", "0.65906703", "0.65893906", "0.65613395", "0.6542626", "0.64855653", "0.6418108", "0.63972515", "0.63959455", "0.6392514", "0.63831264", "0.63216484", "0.6281864", "0.627787", "0.6266243", "0.62596196", "0.62519395", "0.62380904", "0.6235762", "0.6224165", "0.6193104", "0.6192044", "0.6176318", "0.61475676", "0.61445385", "0.61371595", "0.6129973", "0.6122635", "0.6118666", "0.6113273", "0.6108023", "0.61018825", "0.60931444", "0.6092522", "0.6088016", "0.60695446", "0.60620165", "0.6051164", "0.6046822", "0.60465205", "0.60353076", "0.60338867", "0.60229504", "0.60104895", "0.59976345", "0.59721047", "0.5972071", "0.59544146", "0.5939658", "0.59056956", "0.5902278", "0.59010154", "0.5882613", "0.5867015", "0.5851078", "0.5846975", "0.5828802", "0.5793883", "0.57909405", "0.5778851", "0.5760598", "0.57536453", "0.57405174", "0.57395405", "0.57373655", "0.57262164", "0.5695901", "0.5694551", "0.5671783", "0.5652604", "0.5635917", "0.5625256", "0.56179464", "0.5613899", "0.5612108", "0.5609027", "0.56040984", "0.5592583", "0.55877507", "0.55819327" ]
0.78993046
0
Returns a recursive list of all files inside folder. The list element is a string w/ file path relative to folder. If any file is found with the same name as LOCAL_METADATA_FILE, then do not append it to the list.
def _get_file_list(folder): tree = [x for x in os.walk(folder)] files = [os.path.join(t[0], y) for t in tree for y in t[2]] return [os.path.relpath(x, start=folder) for x in files if x != LOCAL_METADATA_FILE]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def files_in_folder(folder):\n files = []\n for f in glob.glob(folder):\n if os.path.isdir(f):\n files.extend(files_in_folder(f + os.sep + \"**\"))\n else:\n files.append(f)\n return files", "def ReadFolder(folder: str) -> List[str]:\n\n onlyfiles = [f for f in listdir(folder) if isfile(join(folder, f))]\n \n return onlyfiles", "def list_file(self, path: str) -> List[FileName]:\n current_file = self.file_exists(path)\n if current_file is None:\n raise FileNotExistException(\n \"Path doesn't exist. Can't list non-existent path\"\n )\n if current_file.file_type != GOOGLE_FOLDER_TYPE:\n raise NotAFolderException(\"file_nod has to be a folder to list contents\")\n if current_file.children is None:\n raise NotAFolderException(\"file_nod has to be a folder to list contents\")\n if len(current_file.children) == 0:\n return []\n return [\n child_file.file_name for child_file in list(current_file.children.values())\n ]", "def list_files_in_given_folder(path_to_folder):\r\n file_names_list = []\r\n for file_name in glob.glob(path_to_folder+\"/*\"):\r\n file_names_list.append(file_name)\r\n assert file_names_list != [], \"failed to populate folder\"+path_to_folder\r\n return file_names_list", "def listfiles(self, *path):\n dir = self.localpath(*path)\n files = []\n for root, dirs, fnms in os.walk(dir):\n for f in fnms:\n if f[-5:] == '.info' and os.path.exists(os.path.join(root, f[:-5])):\n try:\n _open_file_info(os.path.join(root, f))\n files.append(\n path + tuple(_split_path(\n os.path.relpath(os.path.join(root, f[:-5]), start=dir)\n )))\n except ValueError:\n pass\n return files", "def list_all_files(root):\n local_files = []\n for path, dirs, files in os.walk(os_path(root), followlinks=False):\n if len(files) > 0:\n path_wo_root = path[(len(root) + len(slash)):] # remove root part\n local_files.extend([os.path.join(path_wo_root, f) for f in files])\n return local_files", "def list_files_recursively(\n api,\n query,\n parent,\n files=[],\n folder_name=\"\",\n):\n # type checking\n if isinstance(parent, sbg.models.project.Project):\n parent_id = parent.root_folder\n elif isinstance(parent, sbg.models.file.File):\n parent_id = parent.id\n\n if not folder_name:\n folder_name = Path(folder_name)\n for file in query.all():\n if not file.is_folder():\n file.metadata[\"parent_file_name\"] = folder_name\n files.append(file)\n else:\n folder_name = folder_name / file.name\n res = list_files_recursively(\n api,\n api.files.query(parent=file),\n folder_name=folder_name,\n parent=file,\n )\n folder_name = folder_name.parents[0]\n return files", "def listFilesInDir(self, path, recursive=False, fileNameOnly=True, filter=None):\n self._checkActive()\n def _process(args, path, ttype, moddate=0, size=0, md5hash=\"\"):\n fileNameOnly, filter, pathsreturn = args \n if ttype == \"F\":\n if (filter is None) or fnmatch.fnmatch(path, filter):\n #fullpath=q.system.fs.joinPaths(path, fileNameOnly)\n if fileNameOnly:\n pathsreturn.append(q.system.fs.getBaseName(path))\n else:\n pathsreturn.append(path)\n pathsreturn=[]\n self.walk(_process, (fileNameOnly, filter, pathsreturn) , path, recursive=recursive) \n return pathsreturn", "def get_files_by_folder(path):\n\n f = []\n for (dirpath, dirnames, filenames) in walk(path):\n f.extend(filenames)\n break\n return f", "def get_files(folder_name: str) -> list:\r\n files = [f for f in os.listdir(os.path.join(os.getcwd(), folder_name))\r\n if os.path.isfile(os.path.join(os.getcwd(), folder_name, f))]\r\n return files", "def get_file_list(file_or_folder, with_subfolders):\n if os.path.isfile(file_or_folder):\n return [file_or_folder] if is_processable_file(file_or_folder) else []\n elif os.path.isdir(file_or_folder):\n file_list = []\n if with_subfolders:\n for path, _, files in os.walk(file_or_folder):\n for name in files:\n if is_processable_file(name):\n file_list.append(os.path.join(path, name))\n else:\n for item in os.listdir(file_or_folder):\n if is_processable_file(item):\n candidate = os.path.join(file_or_folder, item)\n if os.path.isfile(candidate):\n file_list.append(candidate)\n return file_list\n else:\n return []", "def get_files_from_directory(self, folder):\n return ['{}/{}'.format(folder, each) for each in os.listdir(folder) if each.endswith('.vm')]", "def getFileList(folder_path):\n file_path_list = []\n if os.path.exists(folder_path):\n for path, _, files in os.walk(folder_path):\n if not files:\n continue\n for file in files:\n file_path_list.append(os.path.join(path, file))\n return file_path_list", "def _list_files(folder, pattern):\n for root, folders, files in os.walk(folder):\n for filename in files:\n if fnmatch.fnmatch(filename, pattern):\n yield os.path.join(root, filename)", "def _list_files(folder, pattern):\n for root, folders, files in os.walk(folder):\n for filename in files:\n if fnmatch.fnmatch(filename, pattern):\n yield os.path.join(root, filename)", "def get_file_paths_recursive(folder=None, file_ext=None):\n file_list = []\n if folder is None:\n return file_list\n\n # for dir_path, dir_names, file_names in os.walk(folder):\n # for file_name in file_names:\n # if file_ext is None:\n # file_list.append(os.path.join(dir_path, file_name))\n # continue\n # if file_name.endswith(file_ext):\n # file_list.append(os.path.join(dir_path, file_name))\n file_list = [os.path.join(folder, f) for f in sorted(os.listdir(folder)) if f.endswith(file_ext)]\n\n return file_list", "def get_files_in_dir(dir_path: str) -> List[FileInfo]:\n dir_walk_items = os.walk(dir_path)\n\n all_files = []\n for dir_walk_item in dir_walk_items:\n path_to_dir = dir_walk_item[0]\n file_names = dir_walk_item[2]\n for file_name in file_names:\n if file_name not in IGNORED_FILES:\n all_files.append(\n FileInfo.create(path_to_dir, file_name)\n )\n\n return all_files", "def getFiles(folderToProcess,filter):\n\n print(f\"Parsing {folderToProcess} for {filter} files\")\n\n if debug:\n for path in Path(folderToProcess).rglob(filter):\n print(f\"Found {path}\")\n\n all_files = [str(x) for x in Path(folderToProcess).rglob(filter)] \n\n return all_files", "def get_all_files_walk(folder):\n files = []\n for root, dirs, filenames in os.walk(folder):\n files.extend(os.path.join(root, f) for f in filenames)\n return files", "def file_list(folder_path: List[str]) -> list:\n drive = _drive_gen()\n return _list_file(folder_path, drive)[1]", "def _listdir(folder):\n\tfilePattern = r\"^\\d{4}\\-(0?[1-9]|1[012])\\-(0?[1-9]|[12][0-9]|3[01])\\-clipping\\-[\\d]*\\.json$\"\n\tfilenames = [f for f in os.listdir(folder) if re.match(filePattern, f)]\n\treturn filenames", "def get_file_list(folder):\n\tfilelist = []\n\tfor file in os.listdir(folder):\n\t\tif file.endswith('.png'):\n\t\t\tfilelist.append(file)\n\treturn filelist", "def folder(fpath):\n file_paths = glob.glob(fpath + '/*.dat')\n return list(file_paths)", "def get_list_of_files_in_folder(\n self, folder_name: str, limit: int = 1\n ) -> List[str]:\n\n files = []\n if os.path.isdir(folder_name):\n # Get list of only html files from folder:\n files = [file for file in os.listdir(folder_name) if file.endswith(\".html\")]\n\n if len(files) < limit: # short dialogs\n return []\n\n # Descending sort to consider message order:\n files = sorted(\n files,\n key=lambda x: int(re.search(r\"messages(\\d+)\\.html\", x).group(1)),\n reverse=True,\n )\n else:\n print(f\"No such directory: {folder_name}\")\n return files", "def get_file_list(rootdir): #{{{\n file_list = []\n for f in os.listdir(rootdir):\n if f == None or not f.endswith(\".csv\"):\n continue\n file_list.append(os.path.join(rootdir, f))\n \n return file_list", "def list_files(file, folder, extension = '*.evtx'):\r\n if file:\r\n return [file]\r\n elif folder:\r\n return [ y for x in os.walk(folder) for y in glob(os.path.join(x[0], extension))]\r\n else:\r\n return []", "def json_files_from_folder(folder: str) -> list:\n\n files = []\n for file_name in os.listdir(folder):\n splitted_filename = file_name.split(\".\")\n if splitted_filename[-1] == \"json\":\n files.append(file_name)\n return files", "def __getFileList(self, path, filterRe):\n path = os.path.abspath(path)\n files = []\n for dirname, _, names in os.walk(path):\n files.extend([os.path.join(dirname, f)\n for f in names\n if re.match(filterRe, f)]\n )\n return files", "def _get_files(self, path):\n result = []\n for f in os.listdir(path):\n if os.path.isdir(os.path.join(path, f)):\n result += self._get_files(os.path.join(path, f))\n else:\n result.append(os.path.join(path, f))\n return result", "def parse_folder(self, path):\n\n data = []\n for filename in os.listdir(path):\n data.append(self.parse_file(os.path.join(path, filename), filename))\n return data", "def list_of_files(sourcedir, recursive):\n result = list()\n if recursive is False:\n listdir = sorted_listdir(os.listdir(sourcedir))\n if '.nomedia' not in listdir:\n for basename in listdir:\n result.append(os.path.join(sourcedir, basename))\n else:\n for root, dirs, files in os.walk(sourcedir):\n if '.nomedia' not in files:\n for basename in sorted_listdir(files):\n result.append(os.path.join(root, basename))\n return result", "def files_in_folder(self):\n non_til = set()\n filesInFolder = []\n for f in self.find_all_files():\n newstr = f.replace(\"~\", \"\") \n if newstr in self.find_all_files():\n non_til.add(newstr)\n for fs in non_til:\n filesInFolder.append(fs)\n return filesInFolder", "def get_file_list(self):\n try:\n for filename in os.listdir(SHARED_DIR):\n self.file_list.append(filename)\n except Exception as e:\n print \"Error: retriving file list, %s\" % e", "def file_paths_in_folder(folder_path: str) -> typing.List[str]:\n file_paths = []\n for root, _, file_names in os.walk(folder_path):\n for name in file_names:\n file_paths.append(os.path.join(root, name))\n return sorted(file_paths)", "def list_dir_recursively(dir: str) -> list:\n all_files = []\n for root, dirs, files in os.walk(dir):\n for name in files:\n file_path = os.path.join(root, name)\n file_path = os.path.relpath(file_path, dir)\n all_files.append(file_path)\n return all_files", "def get_file_list(path: str) -> list:\n\treturn [f for f in listdir(path) if isfile(join(path, f))]", "def get_file_list(service, folder_id):\n result = []\n\n page_token = None\n while True:\n try:\n param = {}\n if page_token:\n param['pageToken'] = page_token\n children = service.children().list(\n folderId=folder_id, **param).execute()\n\n for child in children.get('items', []):\n result.append(child['id'])\n page_token = children.get('nextPageToken')\n if not page_token:\n break\n except errors.HttpError as error:\n print('Could not retrieve file list: {}'.format(error))\n break\n\n return result", "def list_files(folder_path):\n try:\n for name in os.listdir(folder_path):\n base, ext = os.path.splitext(name)\n if ext != '.rst':\n continue\n yield os.path.join(folder_path, name)\n except OSError as ex:\n log.error('Exception occured in list_files: {0}'.format(ex))", "def list_files_local(path: str) -> List[str]:\n files = os.listdir(path)\n files = [f.split('.')[0] for f in files if 'pkl' in f]\n return files", "def get_files(self, dir):\n path = os.path.join(self.loc, dir)\n return [f for f in os.listdir(path)\n if os.path.isfile(os.path.join(path, f))]", "def scan_folder(folder):\n LOGGER.debug(\"Scanning folder: %s\", folder)\n for file in os.listdir(folder):\n if file.endswith(\".csv\"):\n yield os.path.join(folder, file)", "def walkdir(self, folder):\n for dirpath, dirs, files in os.walk(folder):\n for filename in files:\n yield os.path.abspath(os.path.join(dirpath, filename))", "def list_and_filter(self, pattern, root_path):\n for path, dirs, files in os.walk(os.path.abspath(root_path)):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(path, filename)", "def get_files_in_folder(self, folder_id: str) -> list:\n if folder_id:\n response = self.service.files().list(\n q=f\"parents = '{folder_id}'\",\n spaces='drive',\n fields='nextPageToken, files(id, name, kind, mimeType, trashed, createdTime, owners)',\n pageToken=None).execute()\n else:\n response = self.service.files().list(\n # q=f\"parents = '{folder_id}'\",\n spaces='drive',\n fields='nextPageToken, files(id, name, kind, mimeType, trashed, createdTime, owners)',\n pageToken=None).execute()\n items = response.get('files', [])\n\n folder_list = []\n for item in items:\n zen = item['owners']\n folder_details = {\n 'owner_name': zen[0]['displayName'],\n 'owner_kind': zen[0]['kind'],\n 'fileid': item['id'],\n 'filename': item['name'],\n 'file_kind': item['kind'],\n 'mime_type': item['mimeType'],\n 'trashed': item['trashed'],\n 'created_time': item['createdTime']\n }\n folder_list.append(folder_details)\n return folder_list", "def list_local(paths, prefix=None):\n results = []\n for path in paths:\n if os.path.isdir(path):\n for filename in os.listdir(path):\n fullpath = os.path.join(path, filename)\n if os.path.isdir(fullpath):\n results += list_local([fullpath], prefix)\n else:\n fullname = fullpath\n if prefix and fullname.startswith(prefix):\n fullname = fullname[len(prefix):]\n mtime = datetime.datetime.fromtimestamp(\n os.path.getmtime(fullpath), tz=utc)\n results += [{\"Key\": fullname,\n \"LastModified\": mtime.strftime(\n '%a, %d %b %Y %H:%M:%S %Z')}]\n else:\n fullpath = path\n fullname = fullpath\n if prefix and fullname.startswith(prefix):\n fullname = fullname[len(prefix):]\n mtime = datetime.datetime.fromtimestamp(\n os.path.getmtime(fullpath), tz=utc)\n results += [{\"Key\": fullname,\n \"LastModified\": mtime.strftime(\n '%a, %d %b %Y %H:%M:%S %Z')}]\n return results", "def get_paths_list_from_folder(folder):\n names = os.listdir(folder)\n relative_paths = [os.path.join(folder, image_name) for image_name in names]\n return relative_paths", "def build_files_list(root_dir):\n return [\n os.path.join(dirpath, file_path)\n for dirpath, subdirs, files in os.walk(root_dir)\n for file_path in files\n ]", "def build_files_list(root_dir):\n return [\n os.path.join(dirpath, file_path)\n for dirpath, subdirs, files in os.walk(root_dir)\n for file_path in files\n ]", "def getAllFiles(self):\n\n\t\treturn self.getFilesForDirs([])", "def getImmediateFiles(aDir):\n return [name for name in os.listdir(aDir)\n if os.path.isfile(os.path.join(aDir,name))]", "def get_files(self) -> list:\n files = []\n for file in os.listdir(self.root):\n if file.endswith(f\".{self.suffix}\"):\n files.append(os.path.join(self.root, file))\n return files", "def test_only_files(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n need_result = ['meme1.jpg',\n 'meme2.png',\n 'meme4.jpg',\n 'meme4.png',\n 'meme monty python',\n ]\n need_result_new = [os.path.join(dummy_folder, 'memes', x) for x in need_result[:-1]]\n result = listdir(os.path.join(dummy_folder, 'memes'), only_files=True)\n self.assertEqual(sorted(result), sorted(need_result_new))\n\n need_result_new = [os.path.join(dummy_folder, 'memes', x) for x in need_result]\n result = listdir(os.path.join(dummy_folder, 'memes'), only_files=False)\n self.assertEqual(sorted(result), sorted(need_result_new))", "def list_raw_file_directory(\n raw_file_dir: Union[OtherPath, pathlib.Path, str, None] = None,\n project_dir: Union[OtherPath, pathlib.Path, str, None] = None,\n extension: Optional[str] = None,\n levels: Optional[int] = 1,\n only_filename: Optional[bool] = False,\n with_prefix: Optional[bool] = True,\n):\n\n file_list = []\n\n if raw_file_dir is None:\n raw_file_dir = prms.Paths.rawdatadir\n\n # 'dressing' the raw_file_dir in a list in case we want to\n # search in several folders (not implemented yet):\n if not isinstance(raw_file_dir, (list, tuple)):\n raw_file_dir = [OtherPath(raw_file_dir)]\n else:\n raw_file_dir = [OtherPath(d) for d in raw_file_dir]\n\n if project_dir is not None:\n raw_file_dir = [r / project_dir for r in raw_file_dir]\n\n for d in raw_file_dir:\n _file_list = d.listdir(levels=levels)\n if extension is not None:\n logging.debug(f\"filtering for extension: {extension}\")\n _file_list = fnmatch.filter(_file_list, f\"*.{extension}\")\n if only_filename:\n logging.debug(\"only returning the file names\")\n _file_list = [f.name for f in _file_list]\n elif with_prefix:\n logging.debug(\"adding prefix to file names\")\n logging.debug(f\"{d.pathlike_location=}\")\n _file_list = [d.pathlike_location / f for f in _file_list]\n\n file_list.extend(_file_list)\n\n return file_list", "def list_filenames(folder):\n full = glob.glob(os.path.join(_get_path(folder), '*'))\n ignore = ['Icon',]\n return [os.path.basename(f.strip()) for f in full if os.path.basename(f.strip()) not in ignore]", "def list_all_files(dir):\n\n result = []\n for root, _, filenames in os.walk(dir):\n for name in filenames:\n filename, ext = os.path.splitext(name)\n if ext == '.cs' or ext == '.xaml':\n result.append(os.path.join(root, name))\n return result", "def get_child_file_names(folder_path):\n file_names_in_folder = []\n try:\n for f in listdir(folder_path):\n if '.pyc' not in f and '.swp' not in f and isfile(\"%s/%s\" %(folder_path,f)):\n file_names_in_folder.append(f)\n except OSError as e:\n # error\n print(\"ERROR IN get_child_file_names\")\n\n return file_names_in_folder", "def read_files(folder):\n print_header(\"READING FILES FROM FOLDER (RECURSIVE)\", \"=\")\n files = []\n for dirpath, dirnames, filenames in os.walk(folder):\n if not dirpath.endswith(\"updates\"):\n for filename in filenames:\n root, ext = os.path.splitext(filename)\n if ext.lower() == \".sql\":\n full_path = os.path.join(dirpath, filename)\n with open(full_path, \"r\") as f:\n sql = f.read()\n sql = sql.decode(\"latin-1\")\n\n files.append((filename, sql))\n return files", "def _filter_file_list(files, local_metadata, remote_metadata):\n def _is_tracked(filename, metadata):\n \"\"\"\n Is the filename tracked in the remote metadata dict.\n The file may be not even locally tracked yet\n \"\"\"\n current_local_sha = local_metadata.get(filename, None)\n current_remote_sha = metadata.get(filename, None)\n return current_local_sha is not None \\\n and current_remote_sha is not None \\\n and current_local_sha == current_remote_sha\n\n def _is_inside_ignored_dir(filename):\n \"\"\" Is the filename inside any of the IGNORE_DIRS list \"\"\"\n ignore_dirs = ['./' + x for x in IGNORE_DIRS]\n return any([filename.startswith(x) for x in ignore_dirs])\n\n def _has_ignored_extension(filename):\n return any([ext in IGNORE_EXTENSIONS\n for ext in filename.split('.')[1:]])\n\n files = [f for f in files\n if not _is_inside_ignored_dir(f)\n and not _has_ignored_extension(f)\n and not _is_tracked(f, remote_metadata)]\n return files", "def get_files_list(tree):\n result = list()\n for (dir_path, _, file_names) in walk(tree):\n if file_names:\n for file in file_names:\n if file.lower().endswith(('.png', '.jpg', '.jpeg')):\n result.append(path.join(dir_path, file))\n\n return result", "def ls_files(self, path, recursive=False):\n if path != \"\" and not path.endswith(\"/\"):\n path += \"/\"\n\n blob_iter = self.client.list_blobs(name_starts_with=path)\n files = []\n for blob in blob_iter:\n relative_path = os.path.relpath(blob.name, path)\n if recursive or \"/\" not in relative_path:\n files.append(relative_path)\n return files", "def get_all_files(self):\n\t\tfiles_list = []\n\t\tfor path, subdirs, files in os.walk(self.root):\n\t\t for name in files:\n\t\t \tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]", "def file_list(start_dir):\n file_list = []\n for root, dirs, files in os.walk(start_dir):\n for f in files:\n if f[0] != '.':\n file_list.append(f)\n return file_list", "def getfiles(path): \n global picture_list\n try:\n # dir_list has all files and directories in path\n # any directory is WITHOUT ending '/'\n dir_list = os.listdir(path)\n except:\n # path may not be a directory or permission error\n print \"ERROR: in getfiles, picture_list:\", picture_list\n picture_list = None\n return\n \n for line in dir_list:\n file = path + \"/\" + line\n if os.path.isdir(file):\n getfiles( file) # dig into subdirectory\n elif isPicture(file):\n picture_list.append(file)\n else: \n # neither picture file nor directory; ignore \n pass\n return", "def _listFiles(files, path):\n\n for item in os.listdir(path):\n item = os.path.join(path, item)\n if os.path.isdir(item):\n _listFiles(files, item)\n else:\n files.append(item)", "def files_list(directory: str) -> list:\n files = os.listdir(directory)\n\n return files", "def getFilesList(data):\n\n filesList = []\n\n if os.path.isdir(data):\n logging.info(\"Using files from \" + data)\n #Create a list containing the file names\n for root, dirs, files in os.walk(data):\n for filename in files:\n filesList.append(os.path.join(root,filename))\n\n else:\n logging.info(\"Using file \" + data)\n filesList.append(os.path.abspath(data))\n\n return sorted(filesList)", "def get_update_file_list(directory):\n update_files_list = set(UPDATE_FILES_STATIC)\n update_files_exclude = set(UPDATE_FILES_EXCLUDE)\n\n for root, dirs, files in os.walk(path.join(PATH_ROOT, directory)):\n for filen in files:\n if UPDATE_FILES_RE.match(filen):\n filep = path.join(root, filen)\n update_files_list.add(path.relpath(filep, PATH_ROOT))\n \n return update_files_list - update_files_exclude", "def get_list_of_files_in_dir(file_list_path=None):\n return os.listdir(file_list_path)", "def filesInDir(self, path=None, pattern=None):\n if path is None:\n path = self.myDir\n if os.path.isfile(path):\n fileList = [path]\n else:\n fileList = os.listdir(path)\n if pattern is None:\n return fileList\n results = []\n for fileName in fileList:\n if pattern in fileName:\n results.append(fileName)\n return results", "def ListFolder(self, path): # real signature unknown; restored from __doc__\n pass", "def contents(self):\n entries = []\n walk = next(os.walk(self.path))\n entries.extend(LocalFolder(os.path.join(walk[0], f)) for f in walk[1])\n entries.extend(LocalFile(os.path.join(walk[0], f)) for f in walk[2])\n return entries", "def getFileList(*args, filespec: AnyStr=\"\", folder: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass", "def getListOfFiles(directory):\n listOfFiles = []\n for path, dirs, files in os.walk(directory):\n for eachFile in files:\n filePath = os.path.join(path, eachFile)\n listOfFiles.append(filePath)\n return listOfFiles", "def get_file_list(dir_path):\n onlyfiles = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]\n return onlyfiles", "def get_files(recursive, path):\n if not recursive:\n # this makes a list of just filenames (no paths)\n filenames = [e for e in os.listdir(path)]\n # but we wants full paths, use an os path join\n file_list = [os.path.join(path, e) for e in filenames]\n else:\n # this will decend into all subdirs\n file_list = [os.path.join(dir_path, x)\n for dir_path, dirs, files in os.walk(path)\n for x in files]\n return file_list", "def _list_dir(self):\n return [os.path.join(self._path, fn) for fn in os.listdir(self._path)\n if not fn.endswith(self._fs_transaction_suffix)]", "def find_all_files(self):\n look4files = [ f for f in listdir(self.file_location) if isfile(join(self.file_location,f)) ]\n return look4files", "def _get_files(file_path: str, file_name=None) -> List[File]:\n files = []\n items = os.scandir(file_path)\n for item in items:\n if item.is_file():\n if item.name.startswith('.'):\n continue # Ignore hidden files\n if item.name == FileSystemService.WF_JSON_FILE:\n continue # Ignore the json files.\n if file_name is not None and item.name != file_name:\n continue\n file = FileSystemService.to_file_object_from_dir_entry(item)\n files.append(file)\n return files", "def util_build_file_list(dirname, IGNORE_CREGEX):\n outlist = []\n logging.info('Scanning directory: %s', dirname)\n try:\n with os.scandir(dirname) as filelist:\n filelist_filt = [a for a in filelist if a.is_file() and not any(list(map(lambda rg: True if rg.match(a.name) else False, IGNORE_CREGEX)))]\n outlist = [ {'dir': dirname, 'filename': a.name, 'ctime': a.stat().st_ctime, 'mtime': a.stat().st_mtime} for a in filelist_filt ]\n dirlist = [ a for a in filelist if a.is_dir() ]\n if len(dirlist) > 0:\n outlist.append(list(map(util_build_file_list, dirlist)))\n except FileNotFoundError:\n logging.error('Directory not found: %s' % dirname)\n pass\n except Exception as e:\n logging.error('Error due to %s' % e) \n logging.debug('Filelist generated for %s as %s' % (dirname, outlist))\n return outlist", "def list_files(path, file_prototype, subfolders = True):\n files =[]\n if subfolders:\n for dirpath, dirnames, filenames in os.walk(path):\n for filename in [f for f in filenames if f.find(file_prototype)>=0]:\n files.append(os.path.join(dirpath, filename))\n else:\n files = [f for f in os.listdir(path) if os.path.isfile(path+f) and f.find(file_prototype)>=0] \n return(files)", "def _get_all_entries(entry_list: List[str], keep_top_dir: bool) -> List[Path]:\n all_files = []\n\n entry_list = [Path(entry) for entry in entry_list]\n\n if keep_top_dir:\n return entry_list\n\n for entry in entry_list:\n if entry.is_dir():\n all_files.extend(entry.iterdir())\n else:\n all_files.append(entry)\n return all_files", "def filelist(folder):\n file_dict={}\n folderlist = glob.glob(os.getcwd()+\"/\"+folder+\"/*\")\n for i in tqdm(folderlist):\n filelist = glob.glob(i+\"/*\")\n filename = i.rsplit(\"/\")[-1]\n file_dict[filename]= filelist\n\n return file_dict", "def getFiles(directory):\n # os.listdir only for locally downloaded files\n _files=[]\n for item in os.listdir(directory):\n path = os.path.join(directory, item)\n if not os.path.isdir(path) and \".lhe.gz\" in path:\n _files.append(path)\n elif os.path.isdir(path):\n getFiles(path)\n return _files", "def get_list_of_files(directory: str, file_type: str) -> list:\n ret = []\n for (root, subdirectories, files) in os.walk(directory):\n for file in files:\n if file.endswith(file_type):\n ret.append(os.path.abspath(os.path.join(root, file)))\n return ret", "def GetAllFiles(self):\r\n\t\tdir_list = []\r\n\t\tdir_list.append(self.path) \r\n\t\tfor dir in dir_list: \r\n\t\t\tfiles = os.listdir(dir)\r\n\t\t\tfor file in files:\r\n\t\t\t\tfull_name = dir + \"\\\\\\\\\" + file\r\n\t\t\t\tif(os.path.isdir(full_name)): \r\n\t\t\t\t\tif(file[0] == '.'):\t# 排除隐藏文件夹\r\n\t\t\t\t\t\tpass \r\n\t\t\t\t\telse:\t# 添加非隐藏文件夹 \r\n\t\t\t\t\t\tdir_list.append(full_name) \r\n\t\t\t\tif(os.path.isfile(full_name)):\r\n\t\t\t\t\tfor type in self.types.split('|'):\r\n\t\t\t\t\t\tif file.find(type) != -1:\r\n\t\t\t\t\t\t\t# 添加文件 \r\n\t\t\t\t\t\t\tself.file_list.append(full_name)\r\n\t\t\t\t\t\t\t#print \"Add file \" + full_name\r", "def get_paths(input_folder: str) -> list[str]:\n\n return [f for f in os.listdir(input_folder) if f[-4:] == '.txt' and f[:3] != 'top']", "def get_files(root_dir, recursive=True):\n\n ret_files = []\n\n for root, _, files in os.walk(root_dir, topdown=True):\n\n for name in files:\n ret_files.append(os.path.join(root, name))\n\n if not recursive:\n break\n\n return ret_files", "def _findFilesEntriesInFolderByExtension(self, path, extension, subFolders=True, pathList=[]):\n try:\n for entry in os.scandir(path):\n if entry.is_file() and entry.path.endswith(extension):\n fileStats = os.stat(entry.path)\n # TODO: Revisar que siempre devuleve specialfolder type 0 aunque se trate de public o my folder etc.\n # Esto estaba asi en .net pero es para revisar\n fileEntry = FileEntry(\n text=str(entry.path[entry.path.rfind(os.path.sep)+1:]),\n type=eFileTypes.NOTHING,\n data=FileEntryData(\n # fullPath=entry.path,\n fullPath=str(entry.path[entry.path.rfind(os.path.sep)+1:]),\n fileSize=fileStats.st_size,\n lastUpdateTime=datetime.datetime.fromtimestamp(\n fileStats.st_mtime).isoformat()\n )\n )\n pathList.append(fileEntry)\n elif entry.is_dir() and subFolders: # if its a directory, then repeat process as a nested function\n pathList = self._findFilesEntriesInFolderByExtension(\n entry.path, extension, subFolders, pathList)\n except OSError:\n print('Cannot access ' + path + '. Probably a permissions error')\n\n return pathList", "def _find_file_list(cloud_bucket_name, file_name_prefix=None, sub_folder_name=None, file_suffix_name='.csv'):\n bucket_stat_list = list_blobs(\"/\" + cloud_bucket_name)\n if not bucket_stat_list:\n raise FileNotFoundError(\"No files in cloud bucket %r.\" % cloud_bucket_name)\n\n # GCS does not really have the concept of directories (it's just a filename convention), so all\n # directory listings are recursive and we must filter out subdirectory contents.\n bucket_stat_list = [\n s\n for s in bucket_stat_list\n if s.name.lower().endswith(file_suffix_name) and (sub_folder_name is None or sub_folder_name in s.name)\n ]\n if not bucket_stat_list:\n raise FileNotFoundError(\"No {} files in cloud bucket {} (sub-folder: {}).\".format(file_suffix_name,\n cloud_bucket_name,\n sub_folder_name))\n file_list = []\n bucket_stat_list.sort(key=lambda s: s.updated)\n for s in bucket_stat_list:\n if _is_fresh_file(s.updated):\n blob_name = s.name\n file_name = os.path.basename(blob_name)\n cloud_path = os.path.normpath(cloud_bucket_name + '/' + blob_name)\n if file_name_prefix and file_name_prefix in blob_name:\n file_list.append((cloud_path, file_name))\n elif file_name_prefix is None:\n file_list.append((cloud_path, file_name))\n\n return file_list", "def get_file_list(input_dir):\n\tfile_paths = [input_dir +'/' + f for f in listdir(input_dir) if isfile(join(input_dir, f)) ]\n\treturn file_paths", "def gen_recursive_filelist(d):\n \n for root, directories, files in os.walk(d):\n for file in files:\n yield os.path.join(root, file)", "def list_my_folders_by_searching_files() -> list:\n page_token = None\n getting_files = True\n my_folders = [] # all the folders i have access to\n\n while getting_files:\n if not page_token:\n response = drive_service().files().list(q=\"mimeType = 'application/vnd.google-apps.folder'\",\n fields=\"*\",\n spaces='drive').execute()\n else:\n response = drive_service().files().list(q=\"mimeType = 'application/vnd.google-apps.folder'\",\n fields=\"*\",\n spaces='drive',\n pageToken=page_token).execute()\n\n key_list = list(response.keys())\n if \"nextPageToken\" not in key_list:\n getting_files = False\n else:\n page_token = response[\"nextPageToken\"]\n\n folders = response['files'] # Drive api refers to files and folders as files.\n for folder in folders:\n my_folders.append(folder)\n\n return my_folders", "def files(self, extension: str = None) -> Optional[list]:\n if self._folder is None:\n raise ValueError(f\"{self.__class__.__name__}.folder not found\")\n if extension is not None:\n self.extension = extension\n if self.extension:\n return [join(self._folder, file) for file in listdir(self._folder) if file.endswith(self.extension)]\n else:\n return [join(self._folder, file) for file in listdir(self._folder)]", "def get_bibfiles(folder: str) -> t.List[str]:\n full_pathname = os.path.normpath(os.path.abspath(folder))\n bib_files = []\n for f in os.listdir(full_pathname):\n fullname = os.path.join(full_pathname, f)\n if f.endswith(\".bib\") and os.path.isfile(fullname):\n logging.debug(f'get bibfile \"{f}\" from directory \"{full_pathname}\"')\n bib_files.append(fullname)\n return bib_files", "def get_files(path: str) -> List[str]:\n if not isdir(path):\n return [path] # its expected to return a list each time even if its a single element\n return [file for fileOrDir in listdir(path) for file in get_files(path + '/' + fileOrDir)]\n # return list of each file returned by the recursive call getFiles(fileOrDir) on\n # each fileOrDir in listdir(path)", "def list_files_in_subfolders(fpath, pattern=r\"*\"):\n return tuple(pathlib.Path(fpath).rglob(pattern))", "def get_package_data_files(package, data, package_dir=None):\n if package_dir is None:\n package_dir = os.path.join(*package.split('.'))\n all_files = []\n for f in data:\n path = os.path.join(package_dir, f)\n if os.path.isfile(path):\n all_files.append(f)\n continue\n for root, _dirs, files in os.walk(path, followlinks=True):\n root = os.path.relpath(root, package_dir)\n for file in files:\n file = os.path.join(root, file)\n if file not in all_files:\n all_files.append(file)\n return all_files", "def get_file_list(work_dir, match_flag='*.*'):\n matches = []\n for root, dir, files in os.walk(work_dir):\n for items in fnmatch.filter(files, match_flag):\n matches.append(os.path.realpath(os.path.join(root, items)))\n\n return matches", "def get_files(path='.', file_mask=['*'], recursive=False):\n \n def process_directory(dir_path, items):\n \"\"\"\n Processes files in 1 directory.\n\n \"\"\"\n result = []\n for item in items:\n name = os.path.join(dir_path, item)\n if os.path.isfile(name) and not os.path.islink(name):\n for mask in masks:\n if fnmatch.fnmatch(name, mask):\n result.append(os.path.abspath(name))\n break\n return result\n\n masks = [file_mask] if isinstance(file_mask, str) else file_mask\n assert isinstance(masks, list)\n\n # final list to be returned, contains all files\n res_list = []\n if recursive:\n for root, dirs, files in os.walk(path):\n files_checked = process_directory(root, files)\n res_list.extend(files_checked)\n else:\n res_list = process_directory(path, os.listdir(path))\n return res_list", "def _add_all_files(folder: Folder):\n file_list = [\n position\n for position in os.listdir(str(folder))\n if os.path.isfile(os.path.join(str(folder), position))\n and not position.startswith(\".\")\n ]\n for position in file_list:\n temp_file = File(position)\n folder.files.append(temp_file)" ]
[ "0.66013783", "0.6589332", "0.6403268", "0.63590515", "0.6349886", "0.6340192", "0.6325134", "0.62352633", "0.61990255", "0.61880994", "0.618564", "0.61627126", "0.61410564", "0.6138895", "0.6138895", "0.6132723", "0.6122742", "0.610428", "0.60967124", "0.6089745", "0.6074586", "0.6074562", "0.60325915", "0.60208607", "0.6015542", "0.6005542", "0.6000987", "0.5985674", "0.5985037", "0.5977303", "0.59627694", "0.5948342", "0.5901803", "0.58972245", "0.58971226", "0.58628786", "0.58569896", "0.5846488", "0.58464855", "0.5844832", "0.5844683", "0.5835326", "0.5833899", "0.58248997", "0.5824223", "0.5823131", "0.5822239", "0.5822239", "0.58137226", "0.5798687", "0.5792545", "0.5791162", "0.57822746", "0.5780758", "0.5776017", "0.576746", "0.57608837", "0.57563424", "0.5751388", "0.5749244", "0.57491815", "0.57490754", "0.57390016", "0.5738204", "0.5733737", "0.57315415", "0.5725001", "0.5711337", "0.5707847", "0.5705158", "0.5704949", "0.5695967", "0.5688936", "0.56886524", "0.5687719", "0.5687033", "0.56825763", "0.5671247", "0.56712234", "0.5664071", "0.56596", "0.56593245", "0.56495786", "0.5649266", "0.5638445", "0.56288564", "0.56272364", "0.562599", "0.56256825", "0.562302", "0.5614221", "0.5610836", "0.5609917", "0.56097466", "0.5608659", "0.5599731", "0.5598886", "0.5594742", "0.55915225", "0.55909234" ]
0.81068176
0
Fetches the metadata remote file REMOTE_METADATA_FILE and returns the metadata dict equivalent.
def _fetch_current_remote_metadata(conn): content = _get(conn, REMOTE_METADATA_FILE) metadata = json.loads(content) if content else {} return metadata
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get(conn, remote_file, bucket_name=BUCKET_NAME):\n contents = None\n try:\n reply = conn.get(bucket_name, remote_file)\n contents = reply.body\n if reply.http_response.status != 200:\n print 'Failed to fetch current_remote metadata'\n contents = None\n except:\n contents = None\n return contents", "def _fetch_current_local_metadata():\n if not os.path.exists(LOCAL_METADATA_FILE):\n return {}\n\n with open(LOCAL_METADATA_FILE) as f:\n return json.loads(f.read())", "def fetchPRIDEProject(remote_file:URIType, cachedFilename:AbsPath, secContext:Optional[SecurityContextConfig]=None) -> Tuple[Union[URIType, ContentKind], List[URIWithMetadata]]:\n \n parsedInputURL = parse.urlparse(remote_file)\n projectId = parsedInputURL.path\n metadata_url = parse.urljoin(PRIDE_PROJECTS_REST, projectId)\n \n metadata_array = [\n URIWithMetadata(remote_file, {'fetched': metadata_url})\n ]\n metadata = None\n try:\n metaio = io.BytesIO()\n _ , metametaio = fetchClassicURL(metadata_url, metaio)\n metadata = json.loads(metaio.getvalue().decode('utf-8'))\n metadata_array.extend(metametaio)\n except urllib.error.HTTPError as he:\n raise WFException(\"Error fetching PRIDE metadata for {} : {} {}\".format(projectId, he.code, he.reason))\n \n try:\n pride_project_url = metadata['_links']['datasetFtpUrl']['href']\n except Exception as e:\n raise WFException(\"Error processing PRIDE project metadata for {} : {}\".format(remote_file, e))\n \n return pride_project_url, metadata_array", "def parse_remote_metadata(self, timeout=30):\n for metadataUrl in self.metadataUrls:\n if (\n metadataUrl[\"url\"] is not None and metadataUrl[\"format\"].lower() == \"text/xml\"\n ):\n try:\n content = openURL(metadataUrl[\"url\"], timeout=timeout, headers=self.headers, auth=self.auth)\n doc = etree.fromstring(content.read())\n\n if metadataUrl[\"type\"] == \"FGDC\":\n mdelem = doc.find(\".//metadata\")\n if mdelem is not None:\n metadataUrl[\"metadata\"] = Metadata(mdelem)\n else:\n metadataUrl[\"metadata\"] = None\n elif metadataUrl[\"type\"] in [\"TC211\", \"19115\", \"19139\"]:\n mdelem = doc.find(\n \".//\" + nspath_eval(\"gmd:MD_Metadata\", namespaces)\n ) or doc.find(\n \".//\" + nspath_eval(\"gmi:MI_Metadata\", namespaces)\n )\n if mdelem is not None:\n metadataUrl[\"metadata\"] = MD_Metadata(mdelem)\n else:\n metadataUrl[\"metadata\"] = None\n except Exception:\n metadataUrl[\"metadata\"] = None", "def get_metadata(self, filename):\n return self.execute_json(filename)[0]", "def get_metadata(self):\n\n\t\t#see redcap api documentation -- https://redcap.wustl.edu/redcap/srvrs/prod_v3_1_0_001/redcap/api/help/\n\t\tbuf = io.BytesIO()\n\n\t\tfields = {\n\t\t 'token': config['api_token'],\n\t\t 'content': 'metadata',\n\t\t 'format': 'json'\n\t\t}\n\n\t\tch = pycurl.Curl()\n\t\tch.setopt(ch.URL, config['api_url'])\n\t\tch.setopt(ch.HTTPPOST, list(fields.items()))\n\t\tch.setopt(ch.WRITEFUNCTION, buf.write)\n\t\tch.perform()\n\t\tch.close()\n\n\t\tmetadata = json.loads(buf.getvalue().decode())\n\t\tbuf.close()\n\t\treturn metadata", "def clowder_file_metadata(session, url, fileid):\n try:\n ret = session.get(posixpath.join(url, \"api/files\", fileid, \"metadata.jsonld\"))\n except session.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n return ret", "def get_downloads_metadata():\n global _METADATA\n if _METADATA is None:\n _METADATA = yaml.safe_load(resource_string(__name__, \"downloads.yml\"))\n return _METADATA", "def read_metadata(self, file_in_cache):\n metadata_file = self.get_metadata_file(file_in_cache)\n if self.context.is_file(metadata_file):\n return json.loads(auto_decode(self.context.read_file(metadata_file)))\n else:\n return {}", "def fetch_metadata(requests_impl=requests):\n\n print(f'fetching metadata at {Network.METADATA_URL}')\n return requests_impl.get(Network.METADATA_URL).json()", "async def get_file_metadata(\n location_id: LocationID, file_id: StorageFileID, user_id: UserID\n):", "def read_remote_file(org, repo, filename):\n import fsspec\n fs = fsspec.filesystem('github', org=org, repo=repo)\n\n with fs.open(filename) as f:\n data = loads(f.read())\n\n return data", "def get_metadata(self):\n try:\n r = requests.get('https://login.mailchimp.com/oauth2/metadata', auth=self)\n except requests.exceptions.RequestException as e:\n raise e\n else:\n r.raise_for_status()\n output = r.json()\n if 'error' in output:\n raise requests.exceptions.RequestException(output['error'])\n return output", "def _get_metadata(self, pkg_name):\n pkg_name = urllib.parse.quote(pkg_name, safe='@')\n if self.metadatas.get(pkg_name):\n return self.metadatas.get(pkg_name)\n else:\n url = urllib.parse.urljoin(self.REGISTRY, pkg_name)\n try:\n pkg_metadata = requests.get(url).json()\n self.metadatas[pkg_name] = pkg_metadata\n return pkg_metadata\n except urllib.error.HTTPError as e:\n print('Could not download {} from: {} with error: {}'. format(pkg_name, url, e.msg))\n exit(-1)", "def get_metadata(self, resource_url):\n response = self.response(resource_url)\n body = response[0]\n return ResourceParser.extract_metadata(body)", "def fetchZenodo(\n remote_file: \"URIType\",\n cachedFilename: \"AbsPath\",\n secContext: \"Optional[SecurityContextConfig]\" = None,\n) -> \"ProtocolFetcherReturn\":\n\n # TODO: implement support for access_token through security context\n\n # Dealing with an odd behaviour from urlparse\n for det in (\"/\", \"?\", \"#\"):\n if det in remote_file:\n parsedInputURL = urllib.parse.urlparse(remote_file)\n break\n else:\n parsedInputURL = urllib.parse.urlparse(remote_file + \"#\")\n parsed_steps = parsedInputURL.path.split(\"/\")\n\n if len(parsed_steps) < 1 or parsed_steps[0] == \"\":\n raise FetcherException(\n f\"{remote_file} is not a valid {ZENODO_SCHEME} CURIE. It should start with something like {ZENODO_SCHEME}:record_id\"\n )\n\n zenodo_id = parsed_steps[0]\n\n metadata_url = cast(\"URIType\", parse.urljoin(ZENODO_RECORD_REST, zenodo_id))\n\n gathered_meta = {\"fetched\": metadata_url}\n metadata_array = [URIWithMetadata(remote_file, gathered_meta)]\n try:\n metaio = io.BytesIO()\n _, metametaio, _ = fetchClassicURL(metadata_url, metaio)\n metadata = json.loads(metaio.getvalue().decode(\"utf-8\"))\n gathered_meta[\"payload\"] = metadata\n metadata_array.extend(metametaio)\n except urllib.error.HTTPError as he:\n raise FetcherException(\n f\"Error fetching Zenodo metadata for {zenodo_id} : {he.code} {he.reason}\"\n )\n\n if not isinstance(metadata, dict) or (metadata.get(\"conceptdoi\") is None):\n raise FetcherException(\n f\"Zenodo metadata for {zenodo_id} is inconsistent: {metadata}\"\n )\n\n zenodo_lic_id = metadata.get(\"metadata\", {}).get(\"license\", {}).get(\"id\")\n if zenodo_lic_id is None:\n raise FetcherException(\n f\"Zenodo metadata for {zenodo_id} is inconsistent: {metadata}\"\n )\n\n # Let's identify the licence of the contents\n licence_meta_url = cast(\n \"URIType\", parse.urljoin(ZENODO_LICENSE_REST, zenodo_lic_id)\n )\n\n gathered_l_meta = {\"fetched\": licence_meta_url}\n metadata_array.append(URIWithMetadata(remote_file, gathered_l_meta))\n try:\n metaio = io.BytesIO()\n _, metametalicio, _ = fetchClassicURL(licence_meta_url, metaio)\n l_metadata = json.loads(metaio.getvalue().decode(\"utf-8\"))\n gathered_l_meta[\"payload\"] = l_metadata\n metadata_array.extend(metametalicio)\n except urllib.error.HTTPError as he:\n raise FetcherException(\n f\"Error fetching Zenodo licence metadata {zenodo_lic_id} for {zenodo_id} : {he.code} {he.reason}\"\n )\n\n licence_url = l_metadata.get(\"metadata\", {}).get(\"url\")\n if licence_url is None:\n raise FetcherException(\n f\"Zenodo licence metadata {zenodo_lic_id} needed to describe {zenodo_id} is inconsistent: {l_metadata}\"\n )\n\n # When no URL, then the text should suffice\n if licence_url == \"\":\n licence_url = l_metadata[\"metadata\"].get(\"title\", zenodo_lic_id)\n\n # Let's select the contents\n kind: \"Optional[ContentKind]\" = None\n the_possible_files = metadata.get(\"files\", [])\n if len(parsed_steps) == 1:\n the_files = the_possible_files\n kind = ContentKind.Directory\n else:\n the_files = []\n prefix = \"/\".join(parsed_steps[1:])\n # Adjusting this properly\n if prefix[-1] == \"/\":\n prefix_slash = prefix\n prefix = prefix[0:-1]\n else:\n prefix_slash = prefix + \"/\"\n\n for the_file in the_possible_files:\n key = the_file.get(\"key\")\n if key is None:\n continue\n\n the_link = the_file.get(\"links\", {}).get(\"self\")\n if the_link is None:\n continue\n\n if key == prefix:\n the_files.append(the_file)\n kind = ContentKind.File\n break\n elif key.startswith(prefix_slash):\n the_files.append(the_file)\n kind = ContentKind.Directory\n\n if kind is None:\n raise FetcherException(\n f\"{remote_file} does not match contents from Zenodo entry {zenodo_id} (or entry has no associated file)\"\n )\n\n # Now, let's materialize the files\n try:\n if kind == ContentKind.Directory:\n os.makedirs(cachedFilename, exist_ok=True)\n for the_file in the_files:\n relpath = the_file[\"key\"]\n last_slash = relpath.rfind(\"/\")\n if last_slash != -1:\n the_file_local_dir = os.path.join(\n cachedFilename, relpath[0:last_slash]\n )\n os.makedirs(the_file_local_dir, exist_ok=True)\n\n the_file_local_path = cast(\n \"AbsPath\", os.path.join(cachedFilename, relpath)\n )\n _, metacont, _ = fetchClassicURL(\n the_file[\"links\"][\"self\"], the_file_local_path\n )\n metadata_array.extend(metacont)\n else:\n _, metacont, _ = fetchClassicURL(\n the_files[0][\"links\"][\"self\"], cachedFilename\n )\n metadata_array.extend(metacont)\n except urllib.error.HTTPError as he:\n raise FetcherException(\n f\"Error fetching Zenodo entry contents for {zenodo_id} : {he.code} {he.reason}\"\n )\n\n return ProtocolFetcherReturn(\n kind_or_resolved=kind,\n metadata_array=metadata_array,\n licences=(cast(\"URIType\", licence_url),),\n )", "def read_metadata_file():\n metadata = None\n if not os.path.isfile(META_DATA_FILE):\n ppg.log_info(\"No metadata found. The earthquake splitting might have not been ran yet.\")\n else:\n ppg.log_info(\"Found metadata file\")\n metadata = pd.read_csv(META_DATA_FILE)\n return metadata", "def getMetadata(samweb, filenameorid, locations=False):\n params = {}\n if locations: params['locations'] = True\n response = samweb.getURL(_make_file_path(filenameorid) + '/metadata', params=params)\n return convert_from_unicode(response.json())", "def _load_metadata_from_asset():\n\n with rasterio.Env(AWS_NO_SIGN_REQUEST='YES',\n GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR'):\n with rasterio.open(href) as src:\n # Retrieve metadata stored in COG file\n metadata = src.profile\n metadata.update(src.tags())\n metadata['shape'] = src.shape\n\n # Retrieve COG CRS. Note: these COGs do not appear to have CRS info that can be\n # accessed via the .crs method. If this occurs assume it is in WGS84.\n # All COGs in AWS appear to be projected in WGS84.\n if src.crs is None:\n metadata['crs'] = rasterio.crs.CRS.from_epsg(4326)\n else:\n metadata['crs'] = src.crs\n\n # Compute bounding box, image footprint, and gsd\n bbox, footprint, metadata = _get_geometries(src, metadata)\n\n # Derive some additional metadata from the filename\n fname = os.path.basename(href)\n metadata = _parse_filename(fname, metadata)\n\n return metadata, bbox, footprint", "def metadata(self):\n return parse_metadata(self.metadata_path())", "async def fetch_metadata(self, route: str):\n data = await self.http.get_metadata(route)\n return data", "async def get_remote_media_info(self, server_name: str, media_id: str) -> dict:\n if (\n self.federation_domain_whitelist is not None\n and server_name not in self.federation_domain_whitelist\n ):\n raise FederationDeniedError(server_name)\n\n # We linearize here to ensure that we don't try and download remote\n # media multiple times concurrently\n key = (server_name, media_id)\n async with self.remote_media_linearizer.queue(key):\n responder, media_info = await self._get_remote_media_impl(\n server_name, media_id\n )\n\n # Ensure we actually use the responder so that it releases resources\n if responder:\n with responder:\n pass\n\n return media_info", "def remote(self, requests, file, remoteHost):\n # Set the source and dest paths\n remote_url = self.base_url + '/remote?file=' + file + \"&host=\" + remoteHost\n\n print(\"Making remote request: \" + remote_url)\n\n r = requests.get(remote_url, max_price=10)\n\n print(\"Remote request completed.\")\n\n return r.json()", "def getFileMetadata( self, path ):\n res = self.__checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n gLogger.debug( \"DIPStorage.getFileMetadata: Attempting to obtain metadata for %s files.\" % len( urls ) )\n serviceClient = RPCClient( self.url )\n for url in urls:\n pfn = url\n if url.find( self.url ) == 0:\n pfn = url[ ( len( self.url ) ):]\n res = serviceClient.getMetadata( pfn )\n if res['OK']:\n if res['Value']['Exists']:\n if res['Value']['Type'] == 'File':\n gLogger.debug( \"DIPStorage.getFileMetadata: Successfully obtained metadata for %s.\" % url )\n successful[url] = res['Value']\n else:\n failed[url] = 'Supplied path is not a file'\n else:\n failed[url] = 'File does not exist'\n else:\n gLogger.error( \"DIPStorage.getFileMetadata: Failed to get metdata for %s.\" % url, res['Message'] )\n failed[url] = res['Message']\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )", "def metadata_file(self):\n return self._metadata_file", "async def _download_remote_file(\n self,\n server_name: str,\n media_id: str,\n ) -> dict:\n\n file_id = random_string(24)\n\n file_info = FileInfo(server_name=server_name, file_id=file_id)\n\n with self.media_storage.store_into_file(file_info) as (f, fname, finish):\n request_path = \"/\".join(\n (\"/_matrix/media/r0/download\", server_name, media_id)\n )\n try:\n length, headers = await self.client.get_file(\n server_name,\n request_path,\n output_stream=f,\n max_size=self.max_upload_size,\n args={\n # tell the remote server to 404 if it doesn't\n # recognise the server_name, to make sure we don't\n # end up with a routing loop.\n \"allow_remote\": \"false\"\n },\n )\n except RequestSendFailed as e:\n logger.warning(\n \"Request failed fetching remote media %s/%s: %r\",\n server_name,\n media_id,\n e,\n )\n raise SynapseError(502, \"Failed to fetch remote media\")\n\n except HttpResponseException as e:\n logger.warning(\n \"HTTP error fetching remote media %s/%s: %s\",\n server_name,\n media_id,\n e.response,\n )\n if e.code == twisted.web.http.NOT_FOUND:\n raise e.to_synapse_error()\n raise SynapseError(502, \"Failed to fetch remote media\")\n\n except SynapseError:\n logger.warning(\n \"Failed to fetch remote media %s/%s\", server_name, media_id\n )\n raise\n except NotRetryingDestination:\n logger.warning(\"Not retrying destination %r\", server_name)\n raise SynapseError(502, \"Failed to fetch remote media\")\n except Exception:\n logger.exception(\n \"Failed to fetch remote media %s/%s\", server_name, media_id\n )\n raise SynapseError(502, \"Failed to fetch remote media\")\n\n await finish()\n\n if b\"Content-Type\" in headers:\n media_type = headers[b\"Content-Type\"][0].decode(\"ascii\")\n else:\n media_type = \"application/octet-stream\"\n upload_name = get_filename_from_headers(headers)\n time_now_ms = self.clock.time_msec()\n\n # Multiple remote media download requests can race (when using\n # multiple media repos), so this may throw a violation constraint\n # exception. If it does we'll delete the newly downloaded file from\n # disk (as we're in the ctx manager).\n #\n # However: we've already called `finish()` so we may have also\n # written to the storage providers. This is preferable to the\n # alternative where we call `finish()` *after* this, where we could\n # end up having an entry in the DB but fail to write the files to\n # the storage providers.\n await self.store.store_cached_remote_media(\n origin=server_name,\n media_id=media_id,\n media_type=media_type,\n time_now_ms=self.clock.time_msec(),\n upload_name=upload_name,\n media_length=length,\n filesystem_id=file_id,\n )\n\n logger.info(\"Stored remote media in file %r\", fname)\n\n media_info = {\n \"media_type\": media_type,\n \"media_length\": length,\n \"upload_name\": upload_name,\n \"created_ts\": time_now_ms,\n \"filesystem_id\": file_id,\n }\n\n return media_info", "def get_metadata(self, file_id):\n pass", "def get_metadata(session, url, filelist):\n metadata = {}\n # Loop over the Clowder dataset image ID list\n for clowder_img in filelist.json():\n # Get metadata for the image from Clowder\n response = clowder_file_metadata(session, url, clowder_img['id'])\n # Metadata from multiple extractors may be present\n for extractor in response.json():\n # Find the extractor called \"deprecatedapi\" which refers to the API used to upload metadata\n if \"user_id\" in extractor['agent']:\n # Save a few metadata elements for convenience\n camera_type = extractor['content']['camera_type']\n perspective = extractor['content']['perspective']\n rotation_angle = extractor['content']['rotation_angle']\n # Store the image ID for later use\n extractor['img_id'] = clowder_img['id']\n if camera_type not in metadata:\n metadata[camera_type] = {}\n if perspective not in metadata[camera_type]:\n metadata[camera_type][perspective] = {}\n metadata[camera_type][perspective][rotation_angle] = extractor\n\n return metadata", "def get_metadata(key=''):\n response, content = httplib2.Http().request(\n '%s/%s' % (METADATA_BASE_URL, key),\n headers={'Metadata-Flavor': 'Google'},\n method='GET',\n )\n if response['status'] == '404':\n raise NotFoundError(response, content)\n return content", "def metadata_get(self, endpoint_name=None, key=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/metadata/%s' % key, 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/metadata/%s' % (endpoint_name, key), 'GET')\n return body", "def safely_get_metadata_file(self, metadata_role, metadata_filepath,\n compressed_file_length,\n uncompressed_file_hashes, compression):\n\n def safely_verify_uncompressed_metadata_file(metadata_file_object):\n self.__hard_check_compressed_file_length(metadata_file_object,\n compressed_file_length)\n self.__check_hashes(metadata_file_object, uncompressed_file_hashes)\n self.__verify_uncompressed_metadata_file(metadata_file_object,\n metadata_role)\n\n return self.__get_file(metadata_filepath,\n safely_verify_uncompressed_metadata_file, 'meta',\n compressed_file_length, download_safely=True,\n compression=compression)", "def _get_metadata(self) -> Metadata:\n manifest = self._get_manifest()\n\n return Metadata(**manifest[\"metadata\"])", "def get_metadata(\n self, scope, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = self.get_metadata.metadata['url']\n path_format_arguments = {\n 'scope': self._serialize.url(\"scope\", scope, 'str', skip_quote=True)\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n query_parameters['api-version'] = self._serialize.query(\"self.api_version\", self.api_version, 'str')\n\n # Construct headers\n header_parameters = {}\n header_parameters['Accept'] = 'application/xml'\n if self.config.generate_client_request_id:\n header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())\n if custom_headers:\n header_parameters.update(custom_headers)\n if self.config.accept_language is not None:\n header_parameters['accept-language'] = self._serialize.header(\"self.config.accept_language\", self.config.accept_language, 'str')\n\n # Construct and send request\n request = self._client.get(url, query_parameters, header_parameters)\n response = self._client.send(request, stream=False, **operation_config)\n\n if response.status_code not in [200]:\n raise models.QueryFailureException(self._deserialize, response)\n\n deserialized = None\n if response.status_code == 200:\n deserialized = self._deserialize('str', response)\n\n if raw:\n client_raw_response = ClientRawResponse(deserialized, response)\n return client_raw_response\n\n return deserialized", "def read_metadata(\n filename: Union[Path, str], marker: str = \"---\", **kwargs: Any\n) -> Dict[str, Any]:\n return read_header(filename, marker, **kwargs)[0]", "def _update_metadata(self, metadata_role, fileinfo, compression=None):\n\n # Construct the metadata filename as expected by the download/mirror modules.\n metadata_filename = metadata_role + '.txt'\n uncompressed_metadata_filename = metadata_filename\n \n # The 'release' or Targets metadata may be compressed. Add the appropriate\n # extension to 'metadata_filename'. \n if compression == 'gzip':\n metadata_filename = metadata_filename + '.gz'\n\n # Extract file length and file hashes. They will be passed as arguments\n # to 'download_file' function.\n compressed_file_length = fileinfo['length']\n uncompressed_file_hashes = fileinfo['hashes']\n\n # Attempt a file download from each mirror until the file is downloaded and\n # verified. If the signature of the downloaded file is valid, proceed,\n # otherwise log a warning and try the next mirror. 'metadata_file_object'\n # is the file-like object returned by 'download.py'. 'metadata_signable'\n # is the object extracted from 'metadata_file_object'. Metadata saved to\n # files are regarded as 'signable' objects, conformant to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n #\n # Some metadata (presently timestamp) will be downloaded \"unsafely\", in the\n # sense that we can only estimate its true length and know nothing about\n # its hashes. This is because not all metadata will have other metadata\n # for it; otherwise we will have an infinite regress of metadata signing\n # for each other. In this case, we will download the metadata up to the\n # best length we can get for it, not check its hashes, but perform the rest\n # of the checks (e.g signature verification).\n #\n # Note also that we presently support decompression of only \"safe\"\n # metadata, but this is easily extend to \"unsafe\" metadata as well as\n # \"safe\" targets.\n\n if metadata_role == 'timestamp':\n metadata_file_object = \\\n self.unsafely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length)\n else:\n metadata_file_object = \\\n self.safely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length,\n uncompressed_file_hashes,\n compression=compression)\n\n # The metadata has been verified. Move the metadata file into place.\n # First, move the 'current' metadata file to the 'previous' directory\n # if it exists.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n current_filepath = os.path.abspath(current_filepath)\n tuf.util.ensure_parent_dir(current_filepath)\n \n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filename)\n previous_filepath = os.path.abspath(previous_filepath)\n if os.path.exists(current_filepath):\n # Previous metadata might not exist, say when delegations are added.\n tuf.util.ensure_parent_dir(previous_filepath)\n shutil.move(current_filepath, previous_filepath)\n\n # Next, move the verified updated metadata file to the 'current' directory.\n # Note that the 'move' method comes from tuf.util's TempFile class.\n # 'metadata_file_object' is an instance of tuf.util.TempFile.\n metadata_signable = tuf.util.load_json_string(metadata_file_object.read())\n if compression == 'gzip':\n current_uncompressed_filepath = \\\n os.path.join(self.metadata_directory['current'],\n uncompressed_metadata_filename)\n current_uncompressed_filepath = \\\n os.path.abspath(current_uncompressed_filepath)\n metadata_file_object.move(current_uncompressed_filepath)\n else:\n metadata_file_object.move(current_filepath)\n\n # Extract the metadata object so we can store it to the metadata store.\n # 'current_metadata_object' set to 'None' if there is not an object\n # stored for 'metadata_role'.\n updated_metadata_object = metadata_signable['signed']\n current_metadata_object = self.metadata['current'].get(metadata_role)\n\n # Finally, update the metadata and fileinfo stores.\n logger.debug('Updated '+repr(current_filepath)+'.')\n self.metadata['previous'][metadata_role] = current_metadata_object\n self.metadata['current'][metadata_role] = updated_metadata_object\n self._update_fileinfo(metadata_filename)", "def get_metadata(self):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/recipes/%s/metadata\" % (self.project_key, self.recipe_name))", "def fetch(self, remote, *args):\n return self.cmd('fetch', remote, *args)", "def extract_metadata(self):\n if self.is_generatable_file:\n logger.debug(\"Converting collected details to dict..\")\n if self.metadata_collector:\n self.metadata = MetadataToDict(\n metadata_collector=self.metadata_collector,\n file_import=self.file_import,\n )\n self.metadata.build_integration_dict()", "def get_and_update_metadata():\n if not os.path.exists('.git') and os.path.exists(METADATA_FILENAME):\n with open(METADATA_FILENAME) as fh:\n metadata = json.load(fh)\n else:\n git = Git()\n revision = os.environ.get('TRAVIS_BUILD_NUMBER', git.revision)\n split_version = git.version.split('.')\n split_version[-1] = revision\n version = '.'.join(split_version)\n metadata = {\n 'version': version,\n 'git_hash': git.hash,\n 'git_origin': git.origin,\n 'git_branch': git.branch,\n 'git_version': git.version\n }\n with open(METADATA_FILENAME, 'w') as fh:\n json.dump(metadata, fh)\n return metadata", "def download_meta(self):\n for f in self._manager.remote.list_contents(\".yml\"):\n self._manager.remote.download(f)", "async def _get_remote_media_impl(\n self, server_name: str, media_id: str\n ) -> Tuple[Optional[Responder], dict]:\n media_info = await self.store.get_cached_remote_media(server_name, media_id)\n\n # file_id is the ID we use to track the file locally. If we've already\n # seen the file then reuse the existing ID, otherwise generate a new\n # one.\n\n # If we have an entry in the DB, try and look for it\n if media_info:\n file_id = media_info[\"filesystem_id\"]\n file_info = FileInfo(server_name, file_id)\n\n if media_info[\"quarantined_by\"]:\n logger.info(\"Media is quarantined\")\n raise NotFoundError()\n\n if not media_info[\"media_type\"]:\n media_info[\"media_type\"] = \"application/octet-stream\"\n\n responder = await self.media_storage.fetch_media(file_info)\n if responder:\n return responder, media_info\n\n # Failed to find the file anywhere, lets download it.\n\n try:\n media_info = await self._download_remote_file(\n server_name,\n media_id,\n )\n except SynapseError:\n raise\n except Exception as e:\n # An exception may be because we downloaded media in another\n # process, so let's check if we magically have the media.\n media_info = await self.store.get_cached_remote_media(server_name, media_id)\n if not media_info:\n raise e\n\n file_id = media_info[\"filesystem_id\"]\n if not media_info[\"media_type\"]:\n media_info[\"media_type\"] = \"application/octet-stream\"\n file_info = FileInfo(server_name, file_id)\n\n # We generate thumbnails even if another process downloaded the media\n # as a) it's conceivable that the other download request dies before it\n # generates thumbnails, but mainly b) we want to be sure the thumbnails\n # have finished being generated before responding to the client,\n # otherwise they'll request thumbnails and get a 404 if they're not\n # ready yet.\n await self._generate_thumbnails(\n server_name, media_id, file_id, media_info[\"media_type\"]\n )\n\n responder = await self.media_storage.fetch_media(file_info)\n return responder, media_info", "def _metadata_get(self, path):\n fd = self.fs.open(path, \"r\")\n # TODO iterate instead of assuming file < 4MB\n read_bytes = self.fs.read(fd, 0, 4096 * 1024)\n self.fs.close(fd)\n if read_bytes:\n return json.loads(read_bytes.decode())\n else:\n return None", "def metadata(self) -> Dict:\n # Lazy load the metadata\n if self._metadata is not None:\n return self._metadata\n\n # Initialize metadata\n self._metadata = {}\n # Find wich bucket the package belong to\n bucket_dir = os.path.join(self.scoop_root, \"buckets\")\n buckets = os.listdir(bucket_dir)\n metadata_json = None\n for bucket in buckets:\n metadata_file = os.path.join(\n bucket_dir, bucket, \"bucket\", f\"{self.name}.json\"\n )\n if os.path.isfile(metadata_file):\n with open(metadata_file) as file:\n metadata_json = json.load(file)\n break\n\n if metadata_json is None:\n logger.error(\"Could not find package metadata\")\n return self._metadata\n\n self._metadata = metadata_json\n return self._metadata", "def load(self) -> dict:\n if not os.path.exists(self.file_path):\n logger.error('Could not find meta file {}'.format(self.file_path))\n raise Exception()\n with open(self.file_path, encoding='utf-8') as meta_file:\n return json.loads(meta_file.read())", "def sync_get_metadata(self, chunk, coords):\n\n return chunk.get_metadata(coords)", "def get_resource(remote, token_response=None):\n cached_resource = session.pop(\"cern_resource\", None)\n if cached_resource:\n return cached_resource\n\n url = current_app.config.get(\n \"OAUTHCLIENT_CERN_OPENID_USERINFO_URL\",\n OAUTHCLIENT_CERN_OPENID_USERINFO_URL,\n )\n response = remote.get(url)\n dict_response = get_dict_from_response(response)\n if token_response:\n decoding_params = current_app.config.get(\n \"OAUTHCLIENT_CERN_OPENID_JWT_TOKEN_DECODE_PARAMS\",\n OAUTHCLIENT_CERN_OPENID_JWT_TOKEN_DECODE_PARAMS,\n )\n token_data = decode(token_response[\"access_token\"], **decoding_params)\n dict_response.update(token_data)\n session[\"cern_resource\"] = dict_response\n return dict_response", "def metadata(self, path, list=True, file_limit=25000, hash=None,\n rev=None, include_deleted=False):\n path = \"/metadata/%s%s\" % (self.session.root, format_path(path))\n\n params = {'file_limit': file_limit,\n 'list': 'true',\n 'include_deleted': include_deleted,\n }\n\n if not list:\n params['list'] = 'false'\n if hash is not None:\n params['hash'] = hash\n if rev:\n params['rev'] = rev\n\n url, params, headers = self.request(path, params, method='GET')\n\n return self.rest_client.GET(url, headers)", "def extract_metadata(self):\n metadata_file_path = self.create_metadata_file(\".metadata.txt\")\n mt = self.mimetype\n metadata_processing_method = self.metadata_mimetype_methods.get(mt)\n if metadata_processing_method:\n # TODO: should we return metadata and write it here instead of in processing method?\n metadata_processing_method(metadata_file_path)", "def _get_remote_files(config):\n if \"cache\" in config:\n return config[\"cache\"]\n out = {}\n for project, folder in _remote_folders(config):\n out.update(_project_files(project, folder))\n return out", "def _download_metadata():\n if not os.path.isfile(L1000FWD_METADATA):\n if not os.path.exists('L1000FWD'):\n os.mkdir('L1000FWD')\n response = requests.get('https://amp.pharm.mssm.edu/l1000fwd/download/Drugs_metadata.csv', stream=True)\n if response.status_code != 200:\n raise Exception('This should not happen')\n with open(L1000FWD_METADATA, 'wb') as outfile:\n for chunk in response.iter_content(chunk_size=1024):\n outfile.write(chunk)", "def metadata(self) -> dict:\n\n try:\n with zipfile.ZipFile(self.file_path).open(\"metadata.json\") as meta:\n metadata = json.load(meta)\n\n if \"hit\" in metadata:\n threats = metadata[\"hit\"].get(\"threats\", [{}])\n\n display_name = threats[0].get(\"display_name\")\n uri_name = threats[0].get(\"uri_name\")\n alert_name = display_name or uri_name or \"Unknown Alert Name\"\n else:\n alert_name = \"No Alert\"\n\n return {\n \"hostname\": metadata[\"agent\"].get(\"sysinfo\", {\"hostname\": \"Unknown\"})[\n \"hostname\"\n ],\n \"agent_id\": metadata[\"agent\"][\"_id\"],\n \"alert_name\": alert_name,\n \"platform\": metadata[\"agent\"].get(\"sysinfo\", {\"platform\": \"Unknown\"})[\n \"platform\"\n ],\n \"domain\": metadata[\"agent\"].get(\"sysinfo\", {\"domain\": \"Unknown\"})[\"domain\"],\n \"controller_link\": f'{metadata[\"appliance_uri\"]}/hx/hosts/{metadata[\"agent\"][\"_id\"]}',\n }\n except Exception as e:\n logger.error(f\"Could not parse triage's metadata due to {e}\")\n return {}", "def get_metadata(self, source, graph):\n return self.server.get_metadata(source, self.graphs.get(graph))", "def get_remote(path, meta=None):\n if meta: # Don't look up the path, just use what's provided.\n if isinstance(meta, dropbox.files.FileMetadata):\n return RemoteFile(None, meta=meta)\n if isinstance(meta, dropbox.files.FolderMetadata):\n return RemoteFolder(None, meta=meta)\n\n path = normpath(path)\n if path == \"/\": # get_metadata on the root is not supported.\n return RemoteFolder(path)\n try:\n meta = execute(pdbox.dbx.files_get_metadata, path)\n except DropboxError:\n raise ValueError(\"%s could not be found\" % dbx_uri(path))\n if isinstance(meta, dropbox.files.DeletedMetadata):\n pdbox.debug(\"%s was recently deleted\" % dbx_uri(path))\n raise ValueError(\"%s could not be found\" % dbx_uri(path))\n if isinstance(meta, dropbox.files.FolderMetadata):\n return RemoteFolder(None, meta=meta)\n else:\n # This doesn't account for types other than FileMetadata but I don't\n # think that they can be returned here.\n return RemoteFile(None, meta=meta)", "def read_meta(metafn=None):\n\n metadata = {}\n\n # potential future improvement: strip quotation marks from strings, where applicable. Will then need to adjust\n # the indices used to get the dates and times in the functions above \n # (get_DEM_img_times: dtstrings = {\"sourceImage1\":(5,19, '%Y%m%d%H%M%S')})\n\n #each key is equated with '='. This loop strips and seperates then fills the dictonary.\n with open(metafn) as f: \n for line in f:\n if not line.strip(';') == \"END\":\n val = line.strip().split('=')\n if len(val) == 1:\n continue\n else:\n metadata.setdefault(val[0].strip(), []).append(val[1].strip().strip(';')) \n else:\n break\n\t\n return metadata", "def get_metadata(self, req):\n try:\n new_meta = {}\n metadata = {}\n # get metadata from request headers\n metadata.update(\n (key.lower(), value)\n for key, value in req.headers.iteritems()\n if key.lower() in HEADERS or\n is_sys_or_user_meta('container', key))\n for key, value in metadata.iteritems():\n if key == 'x-container-read':\n new_meta.update({'r-' : value})\n elif key == 'x-container-write':\n new_meta.update({'w-' : value})\n else:\n ser_key = key.split('-')[2]\n if ser_key == 'meta':\n\n #Supported a single word key till first '-' \n #in the entire metadata header as X-Container-Meta-A\n #new_key = '%s-%s' % ('m', key.split('-')[3])\n \n #SANCHIT: This supports multi-part key for metadata \n #such as X-Container-Meta-A-B-C\n new_key = '%s-%s' % ('m', key.split('-', 3)[-1])\n new_meta.update({new_key : value})\n elif ser_key == 'sysmeta':\n #new_key = '%s-%s' % ('sm', key.split('-')[3])\n new_key = '%s-%s' % ('sm', key.split('-', 3)[-1])\n new_meta.update({new_key : value})\n else:\n self.logger.debug('Expected metadata not found')\n return new_meta\n except Exception as err:\n self.logger.error(('get_metadata failed ',\n 'close failure: %(exc)s : %(stack)s'),\n {'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def gdrive_metadata(url: str, fetch_all=False) -> object:\n payload = _get_endpoint_payload()\n route = payload['route'] + '/metadata'\n params = dict(url=url)\n\n response = requests.get(\n route,\n headers=payload['headers'],\n params=params\n )\n\n metadata = response.json()\n # metadata = {meta[0]: meta[1] for meta in response.json()}\n\n meta_fields = [\n 'mimeType',\n 'fileExtension',\n 'lastModifyingUser',\n 'title',\n 'parents',\n 'fileSize',\n 'alternateLink',\n ]\n\n try:\n metadata['folder_id'] = (\n None if not metadata['parents']\n else metadata['parents'][0]['id']\n )\n except IndexError:\n raise('The file must reside in a folder that is shared with '\n '<my-bot>@<my-domain>.com.')\n\n if 'lastModifyingUser' in metadata:\n metadata['last_mod_by_email'] = (\n metadata['lastModifyingUser']['emailAddress']\n )\n del metadata['lastModifyingUser']\n\n if not fetch_all:\n metadata = {\n k: v\n for k, v in metadata.items()\n if k in meta_fields +\n ['folder_id', 'last_mod_by_email']\n }\n del metadata['parents']\n Metadata = namedtuple('MetaData', metadata.keys())\n return Metadata(**metadata)\n\n return metadata", "def get_metadata (self, name):\n return self.metadata.get(name)", "def metadata(self):\n if self._open is not None:\n self._init_metadata()\n return self._metadata[self._metadata_root]\n else:\n return None", "def METADATA(self) -> Dict[str, Any]:\n return self._metadata", "def fetch(args):\n storage, remote_path = split_storage(args.remote)\n\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit(\"Local file %s already exists, not overwriting.\" % local_path)\n\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n\n osf = _setup_osf(args)\n project = osf.project(args.project)\n\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print(\"Local file %s already matches remote.\" % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n\n # only fetching one file so we are done\n break", "def read_metadata(metapath):\r\n with open(metapath) as metaFile:\r\n metadata = {}\r\n for line in metaFile.readlines():\r\n if \"=\" in line: # Get only key-value pairs\r\n l = line.split(\"=\")\r\n metadata[l[0].strip()] = l[1].strip()\r\n\r\n return metadata", "def get_metadata(self):\n return self.manager.get_metadata(self)", "def get_file_and_metadata(self, from_path, rev=None):\n file_res = self.get_file(from_path, rev)\n metadata = DropboxClient.__parse_metadata_as_dict(file_res)\n\n return file_res, metadata", "def read_remote_file(remote_command_executor, file_path):\n logging.info(f\"Retrieving remote file {file_path}\")\n result = remote_command_executor.run_remote_command(f\"cat {file_path}\")\n assert_that(result.failed).is_false()\n return result.stdout.strip()", "def load_metainfo(filename, dependencyLoader=None, extraArgsHandling=InfoKindEl.ADD_EXTRA_ARGS, uri=None):\n path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"../../../../nomad-meta-info/meta_info/nomad_meta_info/{}\".format(filename)))\n return loadJsonFile(path, dependencyLoader, extraArgsHandling, uri)", "def _get(self, remote_filename, local_path):\n\n with local_path.open('wb') as local_file:\n file_id = self.get_file_id(remote_filename)\n if file_id is None:\n raise BackendException(\n 'File \"%s\" cannot be downloaded: it does not exist' %\n remote_filename)\n\n response = self.http_client.get(\n self.content_url + '/nodes/' + file_id + '/content', stream=True)\n response.raise_for_status()\n for chunk in response.iter_content(chunk_size=DEFAULT_BUFFER_SIZE):\n if chunk:\n local_file.write(chunk)\n local_file.flush()", "def metadata_path(self) -> Path:\n return self.download_folder() / f\"{self.manufacturer_ref}-meta.json\"", "def GetMetadata(self):\n return self.dict['meta']", "def get_metadata(self):\n return self._metadata", "def fetch_metadata (self, id):\n payload = {\n 'movieid': id,\n 'imageformat': 'jpg',\n '_': int(time())\n }\n response = self._session_get(component='metadata', params=payload, type='api')\n return self._process_response(response=response, component=self._get_api_url_for(component='metadata'))", "def _metadata(self) -> Dict[str, Any]:\n return self.__metadata", "def get_metadata(self):\n result = defaultdict(str)\n result.update(self.metadata)\n result['file_name'] = self.file_name\n return result", "def unsafely_get_metadata_file(self, metadata_role, metadata_filepath,\n compressed_file_length):\n\n def unsafely_verify_uncompressed_metadata_file(metadata_file_object):\n self.__soft_check_compressed_file_length(metadata_file_object,\n compressed_file_length)\n self.__verify_uncompressed_metadata_file(metadata_file_object,\n metadata_role)\n\n return self.__get_file(metadata_filepath,\n unsafely_verify_uncompressed_metadata_file, 'meta',\n compressed_file_length, download_safely=False,\n compression=None)", "def _query(self, remote_filename):\n\n file_id = self.get_file_id(remote_filename)\n if file_id is None:\n return {'size': -1}\n response = self.http_client.get(self.metadata_url + 'nodes/' + file_id)\n response.raise_for_status()\n\n return {'size': response.json()['contentProperties']['size']}", "def get_metadata(self):\n # currently there is no metadata to send\n return {}", "def get_metadata(self, chunk, coords):\n\n return chunk.get_metadata(coords)", "def fetch(self, remote_id: str) -> dict:\n self._check_connection()\n return self._dispatch_json(\"post\", self._fetch_url(remote_id))", "def get_metadata(self) -> DeepDict:\n metadata = get_default_nwbfile_metadata()\n for interface in self.data_interface_objects.values():\n interface_metadata = interface.get_metadata()\n metadata = dict_deep_update(metadata, interface_metadata)\n return metadata", "def _getAllMeta(self):\n try:\n metadata = pyexiv2.ImageMetadata(self.imagePath)\n metadata.read()\n return metadata\n except:\n print 'error reading meta data'\n return None", "def get_file(self, remote_path, local_path, storage_id=None):\n return self.get(remote_path, local_path, directory=False, storage_id=storage_id)", "def _remote_or_local(fn, branch='master', remote=False):\n if remote:\n url = (\n 'https://raw.githubusercontent.com/bioconda/bioconda-recipes/'\n '{branch}/{path}'.format(branch=branch, path=fn)\n )\n print('Using config file {}'.format(url))\n with conda.fetch.TmpDownload(url) as f:\n cfg = yaml.load(open(f))\n else:\n cfg = yaml.load(open(os.path.join(HERE, fn)))\n return cfg", "def get_metadata_by_id(book_id, meta_fp):\n book_details = utility.get_book_metadata(book_id, meta_fp)\n #print(book_details)\n title = book_details['title_lat']\n title_ar = book_details['title_ar']\n author_date = book_details['date']\n author_ar = book_details['author_ar']\n author_eng = book_details['author_lat']\n #print(title)\n\n return book_details", "def metadata(self, server_id):\n\n server = self.compute.servers.get(server_id)\n\n if server.tenant_id not in Scope.projects():\n pecan.abort(403, 'unauthorized access a resource outside of your domain')\n\n # Required by Fog, but oddly not in novaclient.v2.servers\n return {u'metadata': server.metadata}", "def get_ckan_metadata(self, force_download=False):\n # Simplify the metadata structure to insulate from CKAN API changes? Only need resource name or dataset title?\n # No - more explicit if done in accessor methods instead, e.g. `self.get_resource_metadata`\n if not self._metadata and force_download is False:\n self.load_user_metadata()\n\n if not self._metadata or \\\n force_download or \\\n (self._metadata_last_updated + datetime.timedelta(seconds=self.check_for_updates_every) <\n datetime.datetime.utcnow()):\n try:\n # This returns a list of datasets, and within each there is a 'resources' key with a list of resources.\n metadata = self.api.action.package_search(include_private=True)['results']\n # `api.current_package_list_with_resources` gets public resources only, not private ones.\n except requests.exceptions.ConnectionError as e:\n error = \\\n 'Unable to reach CKAN and no local copy of CKAN metadata found at %s' % self.metadata_cache_filename\n logging.error(error)\n raise RuntimeError('%s\\n%s' % (error, str(e)))\n\n self._metadata_last_updated = datetime.datetime.utcnow()\n\n self._metadata = dict()\n for dataset in metadata:\n for resource in dataset['resources']:\n # After unpickling, `(meta['resource_a']['dataset'] is meta['resource_b']['dataset'])`\n resource['dataset'] = dataset\n self._metadata[resource['id']] = resource\n\n # self._metadata = {resource_id: {resource}} where resource['dataset'] = {dataset} for all CKAN resources\n\n if not self._in_context_block:\n self.save_user_metadata()\n return self._metadata", "def test_get_server_metadata_item(self):\n metadata_response = self.servers_client.get_server_metadata_item(\n self.server.id, 'meta_key_1')\n metadata = metadata_response.entity\n self.assertEqual(metadata.get('meta_key_1'), 'meta_value_1')", "def metadata(self) -> dict:\n meta = {}\n meta['filename'] = self.filename\n meta['label'] = self.label\n meta['url'] = self.url\n\n return meta", "def get_metadata_file(self, file_in_cache):\n return re.sub(r'\\.tar$', '.json', file_in_cache)", "def metadata(self) -> Mapping[str, str]:\n return pulumi.get(self, \"metadata\")", "def download_metadata(\n url='http://www.ipea.gov.br/geobr/metadata/metadata_gpkg.csv'):\n\n try:\n return pd.read_csv(url)\n\n except HTTPError:\n raise Exception('Metadata file not found. \\\n Please report to https://github.com/ipeaGIT/geobr/issues')", "def generate_metadata(self):\n self.metadata = {\n 'title': os.path.basename(self.source_file).rsplit('.', 1)[0],\n 'url': self.relative_destination_file,\n 'full_path': os.path.dirname(self.relative_destination_file),\n 'short_path': self.shorten_path(\n os.path.dirname(self.relative_destination_file))\n }", "def cache_file_metadata(self, filenames):\n file_metadata = {}\n for fn in filenames:\n metadata = parse(fn)\n metadata['fn'] = fn[:-4]\n file_metadata_summary = self.gen_file_metadata_summary(metadata)\n file_metadata[file_metadata_summary] = metadata\n return file_metadata", "def _get_file_info(self, photo_id):\n params = {\n 'method': 'flickr.photos.getInfo',\n 'photo_id': photo_id\n }\n response = self.oauth_session.get(self.API_ENDPOINT,\n params=params).json()\n if response['stat'] == 'fail':\n raise FileNotFound(\"Can't find '%s'\" % photo_id)\n return response", "def metadata(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"metadata\")", "def get_metadata(self, keys, value, version=None):\n path = make_metadata_path(keys, value, version)\n url = '{root}/{path}'.format(root=self._root, path=path)\n\n try:\n r = requests.get(url)\n text = r.text\n\n self._write_cache(path, text)\n except (requests.ConnectionError, requests.Timeout):\n text = self._read_cache(path)\n\n try:\n data = yaml.load(text)\n except yaml.YAMLError:\n raise ValueError('Failed to read or parse YAML at %s' % url)\n\n return data", "def get_image_data (file_path, metadata_required):\n lookup = ImageLookup()\n return lookup.lookup_by_filename(file_path, metadata_required=False)", "def get_metadata(self, loadbalancer, node=None, raw=False):\n if node:\n uri = \"/loadbalancers/%s/nodes/%s/metadata\" % (\n utils.get_id(loadbalancer), utils.get_id(node))\n else:\n uri = \"/loadbalancers/%s/metadata\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_get(uri)\n meta = body.get(\"metadata\", [])\n if raw:\n return meta\n ret = dict([(itm[\"key\"], itm[\"value\"]) for itm in meta])\n return ret", "def download(self):\n #the link has some meta data in it that we need to get a hold of so we cant use metaData.getLink()\n data = None\n\n for link in self.metaData.jsonObj['links']:\n if link.get('rel') == \"content\":\n data = link\n\n assert data is not None\n\n response = self._adapter.getRequest(data['href'], self._baseHeader)\n return {\"filename\": data['title'], \"mime\": data['type'], \"binary\": response['Body'] }", "def _download_metadata(track_id, dataset_version):\n metadata_path = os.path.join(METADATA_PATH, _METADATA_FMT % track_id)\n if os.path.exists(metadata_path):\n return True\n\n try:\n top_folderid = GDRIVE_FOLDERS[dataset_version]\n except KeyError:\n raise IOError(\"Unable to find data in Google Drive for this version.\")\n\n file_list = get_named_child(top_folderid, track_id)\n correct_file = [f for f in file_list if f['title'] == track_id]\n\n if len(correct_file) == 0:\n raise IOError(\"Could not find multitrack\")\n else:\n mtrack_file = correct_file[0]\n\n metadata_file_list = get_named_child(mtrack_file['id'], 'METADATA')\n if len(metadata_file_list) > 0:\n metadata_file = metadata_file_list[0]\n else:\n folder_file_list = get_files_in_folder(mtrack_file['id'])\n print(len(folder_file_list))\n for fobject in folder_file_list:\n print(fobject['title'])\n raise IOError(\"Could not find Metadata\")\n\n download_file(metadata_file['id'], metadata_path)\n\n DOWNLOADED_FILEPATHS.append(metadata_path)\n\n return True", "def get_metadata(self):\n self.log = jsonLogs()\n log_filename = JSON_DIR + '/' + MEASUREMENTS_REPO + '/' + self.filename\n \n # keeping the first metadata read in the file\n # TODO : handling metadata changes during experiment ?\n meta = self.log.read_metadata(log_filename)\n return(meta[0])", "def get_metadata():\n meta_data = {}\n keys = ['ami-id', 'placement/availability-zone', 'instance-id',\n 'instance-type', 'local-hostname', 'local-ipv4',\n 'public-hostname', 'public-ipv4', 'security-groups', 'user-data']\n for key in keys:\n url = \"http://169.254.169.254/latest/meta-data/\" + key\n meta_data[key] = urllib.urlopen(url).read()\n meta_data['security-groups'] = meta_data['security-groups'].split('\\n')\n return meta_data" ]
[ "0.743946", "0.6923274", "0.638175", "0.6271817", "0.6228844", "0.62113637", "0.6208526", "0.6199531", "0.6125181", "0.61230326", "0.6048329", "0.5846718", "0.5833795", "0.58308524", "0.5800759", "0.57391906", "0.5727758", "0.5712721", "0.5698382", "0.5687096", "0.5684791", "0.56756747", "0.5666228", "0.5661433", "0.5652427", "0.56100446", "0.55873746", "0.5584609", "0.55752695", "0.5550801", "0.5533375", "0.55057037", "0.55052763", "0.55041367", "0.55017215", "0.54907745", "0.5480078", "0.5478003", "0.547565", "0.54672647", "0.5458347", "0.5432208", "0.54129267", "0.5412355", "0.5407058", "0.5397158", "0.5389746", "0.5385931", "0.5381769", "0.53691834", "0.5365826", "0.53614473", "0.53498006", "0.5341544", "0.53398734", "0.532733", "0.53230405", "0.53221256", "0.5318997", "0.53152496", "0.52997553", "0.5293531", "0.528885", "0.52792555", "0.52757794", "0.52674043", "0.5260278", "0.52535087", "0.52528006", "0.52482927", "0.5220902", "0.5218003", "0.52072424", "0.5193928", "0.51828104", "0.5162431", "0.51623183", "0.51581967", "0.5156631", "0.5156568", "0.5153896", "0.51211387", "0.51139796", "0.5112638", "0.5109408", "0.51093554", "0.51084435", "0.50999945", "0.5098454", "0.5096163", "0.5095949", "0.5088166", "0.50867355", "0.5085953", "0.50842744", "0.50791466", "0.5078765", "0.50784385", "0.5072136", "0.505405" ]
0.79775
0
Fetches the metadata local file LOCAL_METADATA_FILE and returns the metadata dict equivalent.
def _fetch_current_local_metadata(): if not os.path.exists(LOCAL_METADATA_FILE): return {} with open(LOCAL_METADATA_FILE) as f: return json.loads(f.read())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fetch_current_remote_metadata(conn):\n content = _get(conn, REMOTE_METADATA_FILE)\n metadata = json.loads(content) if content else {}\n return metadata", "def get_downloads_metadata():\n global _METADATA\n if _METADATA is None:\n _METADATA = yaml.safe_load(resource_string(__name__, \"downloads.yml\"))\n return _METADATA", "def read_metadata(self, file_in_cache):\n metadata_file = self.get_metadata_file(file_in_cache)\n if self.context.is_file(metadata_file):\n return json.loads(auto_decode(self.context.read_file(metadata_file)))\n else:\n return {}", "def get_metadata(self, filename):\n return self.execute_json(filename)[0]", "def read_local_metadata(self, fld: str) -> Optional[str]:\n return self.read_metadata(self.get_obj_label(), fld)", "def read_metadata_file():\n metadata = None\n if not os.path.isfile(META_DATA_FILE):\n ppg.log_info(\"No metadata found. The earthquake splitting might have not been ran yet.\")\n else:\n ppg.log_info(\"Found metadata file\")\n metadata = pd.read_csv(META_DATA_FILE)\n return metadata", "async def get_file_metadata(\n location_id: LocationID, file_id: StorageFileID, user_id: UserID\n):", "def read_metadata(\n filename: Union[Path, str], marker: str = \"---\", **kwargs: Any\n) -> Dict[str, Any]:\n return read_header(filename, marker, **kwargs)[0]", "def metadata(self):\n return parse_metadata(self.metadata_path())", "def read_data_from_file(self, local_lookml_project_path: str) -> dict:\n logger.info(\n \"Parsing data from local LookML file {}\".format(\n self.lookml_file_name_and_path\n )\n )\n with open(\n utils.assemble_path(\n local_lookml_project_path, self.lookml_file_name_and_path\n ),\n \"r\",\n ) as lookml_file:\n return lkml.load(lookml_file)", "def _build_local_metadata_file(files, home=''):\n filepaths = [os.path.join(home, f) for f in files]\n shas = [_get_sha_metadata(f) for f in filepaths]\n metadata = dict(zip(files, shas))\n\n with open(LOCAL_METADATA_FILE, 'w') as f:\n f.write(json.dumps(metadata))", "def _get_metadata(self, pkg_name):\n pkg_name = urllib.parse.quote(pkg_name, safe='@')\n if self.metadatas.get(pkg_name):\n return self.metadatas.get(pkg_name)\n else:\n url = urllib.parse.urljoin(self.REGISTRY, pkg_name)\n try:\n pkg_metadata = requests.get(url).json()\n self.metadatas[pkg_name] = pkg_metadata\n return pkg_metadata\n except urllib.error.HTTPError as e:\n print('Could not download {} from: {} with error: {}'. format(pkg_name, url, e.msg))\n exit(-1)", "def fetch_metadata(requests_impl=requests):\n\n print(f'fetching metadata at {Network.METADATA_URL}')\n return requests_impl.get(Network.METADATA_URL).json()", "def load_metainfo(filename, dependencyLoader=None, extraArgsHandling=InfoKindEl.ADD_EXTRA_ARGS, uri=None):\n path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"../../../../nomad-meta-info/meta_info/nomad_meta_info/{}\".format(filename)))\n return loadJsonFile(path, dependencyLoader, extraArgsHandling, uri)", "def metadata_file(self):\n return self._metadata_file", "def _get(conn, remote_file, bucket_name=BUCKET_NAME):\n contents = None\n try:\n reply = conn.get(bucket_name, remote_file)\n contents = reply.body\n if reply.http_response.status != 200:\n print 'Failed to fetch current_remote metadata'\n contents = None\n except:\n contents = None\n return contents", "def _get_metadata(self) -> Metadata:\n manifest = self._get_manifest()\n\n return Metadata(**manifest[\"metadata\"])", "def _metadata_get(self, path):\n fd = self.fs.open(path, \"r\")\n # TODO iterate instead of assuming file < 4MB\n read_bytes = self.fs.read(fd, 0, 4096 * 1024)\n self.fs.close(fd)\n if read_bytes:\n return json.loads(read_bytes.decode())\n else:\n return None", "def _load_metadata_from_asset():\n\n with rasterio.Env(AWS_NO_SIGN_REQUEST='YES',\n GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR'):\n with rasterio.open(href) as src:\n # Retrieve metadata stored in COG file\n metadata = src.profile\n metadata.update(src.tags())\n metadata['shape'] = src.shape\n\n # Retrieve COG CRS. Note: these COGs do not appear to have CRS info that can be\n # accessed via the .crs method. If this occurs assume it is in WGS84.\n # All COGs in AWS appear to be projected in WGS84.\n if src.crs is None:\n metadata['crs'] = rasterio.crs.CRS.from_epsg(4326)\n else:\n metadata['crs'] = src.crs\n\n # Compute bounding box, image footprint, and gsd\n bbox, footprint, metadata = _get_geometries(src, metadata)\n\n # Derive some additional metadata from the filename\n fname = os.path.basename(href)\n metadata = _parse_filename(fname, metadata)\n\n return metadata, bbox, footprint", "def get_metadata (self, name):\n return self.metadata.get(name)", "def metadata(self):\n if self._open is not None:\n self._init_metadata()\n return self._metadata[self._metadata_root]\n else:\n return None", "def getlocalconfig(projroot: Path) -> Dict[str, Any]:\n localconfig: Dict[str, Any]\n try:\n with open(Path(projroot, 'config/localconfig.json')) as infile:\n localconfig = json.loads(infile.read())\n except FileNotFoundError:\n localconfig = {}\n return localconfig", "def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}", "def read_local_file(filename):\n import fsspec\n fs = fsspec.filesystem('file')\n\n with fs.open(filename) as f:\n data = loads(f.read())\n\n return data", "def load_metadata(self, directory: pathlib.Path) -> dict:\n path_to_metadata = directory / (self.name + \".json\")\n\n with open(path_to_metadata) as metadata_file:\n metadata = json.load(metadata_file)\n return metadata", "def load_metadata(self, directory: pathlib.Path) -> dict:\n path_to_metadata = directory / (self.name + \".json\")\n\n with open(path_to_metadata) as metadata_file:\n metadata = json.load(metadata_file)\n return metadata", "def load_metadata(self, directory: pathlib.Path) -> dict:\n path_to_metadata = directory / (self.name + \".json\")\n\n with open(path_to_metadata) as metadata_file:\n metadata = json.load(metadata_file)\n return metadata", "def load(self) -> dict:\n if not os.path.exists(self.file_path):\n logger.error('Could not find meta file {}'.format(self.file_path))\n raise Exception()\n with open(self.file_path, encoding='utf-8') as meta_file:\n return json.loads(meta_file.read())", "def clowder_file_metadata(session, url, fileid):\n try:\n ret = session.get(posixpath.join(url, \"api/files\", fileid, \"metadata.jsonld\"))\n except session.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n return ret", "def extract_metadata(self):\n if self.is_generatable_file:\n logger.debug(\"Converting collected details to dict..\")\n if self.metadata_collector:\n self.metadata = MetadataToDict(\n metadata_collector=self.metadata_collector,\n file_import=self.file_import,\n )\n self.metadata.build_integration_dict()", "def metadata(self) -> Dict:\n # Lazy load the metadata\n if self._metadata is not None:\n return self._metadata\n\n # Initialize metadata\n self._metadata = {}\n # Find wich bucket the package belong to\n bucket_dir = os.path.join(self.scoop_root, \"buckets\")\n buckets = os.listdir(bucket_dir)\n metadata_json = None\n for bucket in buckets:\n metadata_file = os.path.join(\n bucket_dir, bucket, \"bucket\", f\"{self.name}.json\"\n )\n if os.path.isfile(metadata_file):\n with open(metadata_file) as file:\n metadata_json = json.load(file)\n break\n\n if metadata_json is None:\n logger.error(\"Could not find package metadata\")\n return self._metadata\n\n self._metadata = metadata_json\n return self._metadata", "def read_metadata(dirname, use_gpu):\n try:\n if not os.path.isdir(dirname):\n pass\n elif not os.path.exists(os.path.join(dirname, 'metadata.json')):\n pass\n else:\n with open(os.path.join(dirname, 'metadata.json')) as f:\n metadata = json.load(f)\n if use_gpu and ('container_gpu' in metadata):\n container = metadata['container_gpu']\n else:\n container = metadata['container']\n entry_point = metadata['entry_point']\n except (IOError, KeyError, ValueError):\n print('Failed to read metadata from defense directory ', dirname)\n return (container, entry_point)", "def get_and_update_metadata():\n if not os.path.exists('.git') and os.path.exists(METADATA_FILENAME):\n with open(METADATA_FILENAME) as fh:\n metadata = json.load(fh)\n else:\n git = Git()\n revision = os.environ.get('TRAVIS_BUILD_NUMBER', git.revision)\n split_version = git.version.split('.')\n split_version[-1] = revision\n version = '.'.join(split_version)\n metadata = {\n 'version': version,\n 'git_hash': git.hash,\n 'git_origin': git.origin,\n 'git_branch': git.branch,\n 'git_version': git.version\n }\n with open(METADATA_FILENAME, 'w') as fh:\n json.dump(metadata, fh)\n return metadata", "def read_metadata(metapath):\r\n with open(metapath) as metaFile:\r\n metadata = {}\r\n for line in metaFile.readlines():\r\n if \"=\" in line: # Get only key-value pairs\r\n l = line.split(\"=\")\r\n metadata[l[0].strip()] = l[1].strip()\r\n\r\n return metadata", "def getMetadata(samweb, filenameorid, locations=False):\n params = {}\n if locations: params['locations'] = True\n response = samweb.getURL(_make_file_path(filenameorid) + '/metadata', params=params)\n return convert_from_unicode(response.json())", "def load_metadata(self, name) -> Dict[str, str]:\n return load_metadata(self._casedir / Path(\"{name}/metadata_{name}.yaml\".format(name=name)))", "def sync_get_metadata(self, chunk, coords):\n\n return chunk.get_metadata(coords)", "async def fetch_metadata(self, route: str):\n data = await self.http.get_metadata(route)\n return data", "def read_local_file(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()", "def get_metadata(self, file_id):\n pass", "def extract_metadata(self):\n metadata_file_path = self.create_metadata_file(\".metadata.txt\")\n mt = self.mimetype\n metadata_processing_method = self.metadata_mimetype_methods.get(mt)\n if metadata_processing_method:\n # TODO: should we return metadata and write it here instead of in processing method?\n metadata_processing_method(metadata_file_path)", "def _load_metadata(self, datapath):\n try:\n metadata = Metadata(datapath)\n return metadata\n except RuntimeError:\n print('Metadata does not exist. Please double check your datapath.')\n return None", "def get_metadata(self):\n result = defaultdict(str)\n result.update(self.metadata)\n result['file_name'] = self.file_name\n return result", "def _load_spec_filename_additional_info(spec_filename):\n import json\n\n try:\n additional_info_filename = _get_additional_info_filename(spec_filename)\n\n with open(additional_info_filename, \"r\") as stream:\n source_to_mtime = json.load(stream)\n return source_to_mtime\n except:\n log.exception(\"Unable to load source mtimes from: %s\", additional_info_filename)\n return {}", "def read_meta(metafn=None):\n\n metadata = {}\n\n # potential future improvement: strip quotation marks from strings, where applicable. Will then need to adjust\n # the indices used to get the dates and times in the functions above \n # (get_DEM_img_times: dtstrings = {\"sourceImage1\":(5,19, '%Y%m%d%H%M%S')})\n\n #each key is equated with '='. This loop strips and seperates then fills the dictonary.\n with open(metafn) as f: \n for line in f:\n if not line.strip(';') == \"END\":\n val = line.strip().split('=')\n if len(val) == 1:\n continue\n else:\n metadata.setdefault(val[0].strip(), []).append(val[1].strip().strip(';')) \n else:\n break\n\t\n return metadata", "def get_meta(filename):\n with fiona.open(filename) as collection:\n return collection.meta", "def get_metadata(\n self, scope, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = self.get_metadata.metadata['url']\n path_format_arguments = {\n 'scope': self._serialize.url(\"scope\", scope, 'str', skip_quote=True)\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n query_parameters['api-version'] = self._serialize.query(\"self.api_version\", self.api_version, 'str')\n\n # Construct headers\n header_parameters = {}\n header_parameters['Accept'] = 'application/xml'\n if self.config.generate_client_request_id:\n header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())\n if custom_headers:\n header_parameters.update(custom_headers)\n if self.config.accept_language is not None:\n header_parameters['accept-language'] = self._serialize.header(\"self.config.accept_language\", self.config.accept_language, 'str')\n\n # Construct and send request\n request = self._client.get(url, query_parameters, header_parameters)\n response = self._client.send(request, stream=False, **operation_config)\n\n if response.status_code not in [200]:\n raise models.QueryFailureException(self._deserialize, response)\n\n deserialized = None\n if response.status_code == 200:\n deserialized = self._deserialize('str', response)\n\n if raw:\n client_raw_response = ClientRawResponse(deserialized, response)\n return client_raw_response\n\n return deserialized", "def getArticleMeta(docId):\n artMeta = None\n haveMedline = pubConf.mayResolveTextDir('medline')\n if haveMedline and not SKIPLOCALMEDLINE:\n artMeta = readLocalMedline(docId)\n if artMeta == None:\n artMeta = downloadPubmedMeta(docId)\n return artMeta", "def _download_metadata():\n if not os.path.isfile(L1000FWD_METADATA):\n if not os.path.exists('L1000FWD'):\n os.mkdir('L1000FWD')\n response = requests.get('https://amp.pharm.mssm.edu/l1000fwd/download/Drugs_metadata.csv', stream=True)\n if response.status_code != 200:\n raise Exception('This should not happen')\n with open(L1000FWD_METADATA, 'wb') as outfile:\n for chunk in response.iter_content(chunk_size=1024):\n outfile.write(chunk)", "def get_metadata(self, chunk, coords):\n\n return chunk.get_metadata(coords)", "def read_file(file_path):\n raw_metadata = \"\"\n content = \"\"\n try:\n with open(file_path, 'rb') as f:\n for line in f:\n if line.strip() == '---':\n break\n raw_metadata += line\n for line in f:\n content += line\n except IOError as ex:\n log.error('Open file failed: {0}'.format(ex))\n try:\n metadata = json.loads(raw_metadata)\n except ValueError:\n log.error('No JSON object found in file {0}'.format(file_path))\n metadata = {}\n return metadata, content", "def load_metadata(lines):\n if lines is not None:\n return MetadataMap.from_file(lines)\n\n return None", "def _file_dict(self, fn_):\n if not os.path.isfile(fn_):\n err = \"The referenced file, {} is not available.\".format(fn_)\n sys.stderr.write(err + \"\\n\")\n sys.exit(42)\n with salt.utils.files.fopen(fn_, \"r\") as fp_:\n data = fp_.read()\n return {fn_: data}", "def cache_file_metadata(self, filenames):\n file_metadata = {}\n for fn in filenames:\n metadata = parse(fn)\n metadata['fn'] = fn[:-4]\n file_metadata_summary = self.gen_file_metadata_summary(metadata)\n file_metadata[file_metadata_summary] = metadata\n return file_metadata", "def _update_filesystem_metadata(self, metadata):\n directory, fname = os.path.split(self.fname)\n fbase = os.path.splitext(fname)[0]\n \n # Test for presence and size of zip file\n zip_file = fbase + '.zip'\n zip_path = os.path.join(directory, zip_file)\n \n if os.path.isfile(zip_path):\n location = 'on_disk'\n data_file_size = os.path.getsize(zip_path)\n else:\n location = 'on_tape'\n data_file_size = 0\n \n # Test for presence of quick look PNG file\n quicklook_file = fbase + '.png'\n quicklook_path = os.path.join(directory, quicklook_file)\n \n if not os.path.isfile(quicklook_path):\n quicklook_file = ''\n\n # Add to metadata dictionary\n item_map = {'directory': directory, 'metadata_file': fname,\n 'data_file': zip_file, 'location': location, \n 'data_file_size': data_file_size, 'quicklook_file': quicklook_file}\n \n for key, value in item_map.items():\n metadata[key] = value", "def metadata_path(self) -> Path:\n return self.download_folder() / f\"{self.manufacturer_ref}-meta.json\"", "def get_metadata():\n\n module = __name__.split('.', 1)\n\n pkg = pkg_resources.get_distribution(module[0])\n meta = {\n 'Name': None,\n 'Version': None,\n 'Summary': None,\n 'Home-page': None,\n 'Author': None,\n 'Author-email': None,\n 'License': None,\n }\n\n for line in pkg.get_metadata_lines(\"PKG-INFO\"):\n for par in meta:\n if line.startswith(par + \":\"):\n _, value = line.split(\": \", 1)\n meta[par] = value\n\n return meta", "def _get_metadata(self, request):\n path = self._get_filesystem_path(request)\n try:\n with open(path, \"rb\") as f:\n yield from read_script_metadata(f, js_meta_re)\n except OSError:\n raise HTTPException(404)", "def meta(self):\n if not hasattr(self, '_meta'):\n self._meta = {}\n meta_fn = os.path.join(self.path, 'meta.json')\n if os.path.exists(meta_fn):\n meta_file = open(meta_fn)\n try:\n self._meta.update(json.load(meta_file))\n finally:\n meta_file.close()\n return self._meta", "def _load_metadata_from_file(self, metadata_set, metadata_role):\n\n # Ensure we have a valid metadata set.\n if metadata_set not in ['current', 'previous']:\n raise tuf.Error('Invalid metadata set: '+repr(metadata_set))\n\n # Save and construct the full metadata path.\n metadata_directory = self.metadata_directory[metadata_set]\n metadata_filename = metadata_role + '.txt'\n metadata_filepath = os.path.join(metadata_directory, metadata_filename)\n \n # Ensure the metadata path is valid/exists, else ignore the call. \n if os.path.exists(metadata_filepath):\n # Load the file. The loaded object should conform to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n metadata_signable = tuf.util.load_json_file(metadata_filepath)\n\n tuf.formats.check_signable_object_format(metadata_signable)\n\n # Extract the 'signed' role object from 'metadata_signable'.\n metadata_object = metadata_signable['signed']\n \n # Save the metadata object to the metadata store.\n self.metadata[metadata_set][metadata_role] = metadata_object\n \n # We need to rebuild the key and role databases if \n # metadata object is 'root' or target metadata.\n if metadata_set == 'current':\n if metadata_role == 'root':\n self._rebuild_key_and_role_db()\n elif metadata_object['_type'] == 'Targets':\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)", "def metadata(self, truncate: bool = False) -> Tuple[str, str]:\n\t\tif not self._closed:\n\t\t\tfilename = self.filename\n\t\t\tmd_filename = \"%s.file_md.json.gzip\" % (self.file_path)\n\t\t\tmd_mod_filename = \"%s.file_md.lastmod.gzip\" % (self.file_path)\n\t\t\tlogging.debug(\"Expanding metada (stored as %s.file_md.json.gzip)\", filename)\n\n\t\t\tlast_mod = self.last_modified()\n\t\t\tif os.path.isfile(md_filename):\n\t\t\t\tlogging.debug(\" Found previously extracted JSON file\")\n\t\t\t\tif truncate:\n\t\t\t\t\tself.clear_metadata()\n\t\t\t\telse:\n\t\t\t\t\tmd_json = load_gzipped_json_string(md_filename)\n\t\t\t\t\tmd_mod = load_gzipped_json_string(md_mod_filename)\n\t\t\t\t\tmd_parsed = json.loads(md_json)\n\t\t\t\t\t# check if cached metadata is up to date and\n\t\t\t\t\t# points to correct project folder and filename\n\t\t\t\t\t# if so return cache, otherwise clear it\n\t\t\t\t\tlogging.debug(\" md_mod: %s\", md_mod)\n\t\t\t\t\tlogging.debug(\" last_mod: %s\", last_mod)\n\t\t\t\t\tif md_mod != last_mod or md_parsed.project != self.project or md_parsed.filename != filename:\n\t\t\t\t\t\tself.clear_metadata()\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogging.debug(\" Cache up to date\")\n\t\t\t\t\t\treturn (md_json, last_mod)\n\n\t\t\tds = self.ds\n\t\t\tattrs = ds.attrs.keys()\n\t\t\ttitle = filename if \"title\" not in attrs else ds.attrs.title\n\t\t\tdescr = \"\" if \"description\" not in attrs else ds.attrs.description\n\t\t\turl = \"\" if \"url\" not in attrs else ds.attrs.url\n\t\t\tdoi = \"\" if \"doi\" not in attrs else ds.attrs.doi\n\t\t\t# converts compact ISO timestamps to human-readable ones.\n\t\t\t# Example: \"20180130T155028.262458Z\" becomes \"2018/01/13 15:50\"\n\t\t\tlast_mod_humanreadable = \"{}/{}/{} {}:{}:{}\".format(last_mod[0:4], last_mod[4:6], last_mod[6:8], last_mod[9:11], last_mod[11:13], last_mod[13:15])\n\t\t\t# default to last_modified for older files that do\n\t\t\t# not have a creation_date field\n\t\t\tcreation_date = last_mod_humanreadable if \"creation_date\" not in attrs else ds.attrs.creation_date\n\t\t\t# get arbitrary col/row attribute, they are all lists\n\t\t\t# of equal size. The length equals total cells/genes\n\t\t\ttotal_cells = ds.shape[1]\n\t\t\ttotal_genes = ds.shape[0]\n\n\t\t\tmd_data = {\n\t\t\t\t\"project\": self.project,\n\t\t\t\t\"filename\": filename,\n\t\t\t\t\"dataset\": filename,\n\t\t\t\t\"title\": title,\n\t\t\t\t\"description\": descr,\n\t\t\t\t\"url\": url,\n\t\t\t\t\"doi\": doi,\n\t\t\t\t\"creationDate\": creation_date,\n\t\t\t\t\"lastModified\": last_mod_humanreadable,\n\t\t\t\t\"totalCells\": total_cells,\n\t\t\t\t\"totalGenes\": total_genes,\n\t\t\t}\n\t\t\tlogging.debug(\" Saving extracted metadata as JSON file\")\n\t\t\tmd_json = json.dumps(md_data)\n\t\t\tsave_gzipped_json_string(md_filename, md_json)\n\t\t\tsave_gzipped_json_string(md_mod_filename, json.dumps(last_mod))\n\t\t\treturn (md_json, last_mod)\n\t\treturn None", "def metadata(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"metadata\")", "def _read_metadata(self):\n self._wait_for_read_with_timeout(self.metadata_read_fd)\n flat_json = os.read(self.metadata_read_fd, MAX_METADATA_SIZE)\n os.close(self.metadata_read_fd)\n try:\n return json.loads(flat_json)\n except ValueError:\n self.logger.exception('Failed to load metadata from json')\n raise StorletRuntimeException('Got invalid format about metadata')", "def get_metadata_from_json(sample_metadata_path):\n\n try:\n return normalize_metadata(json.loads(open(sample_metadata_path).read()))\n except IOError:\n logging.exception('get_metadata')\n return {}", "def gdrive_metadata(url: str, fetch_all=False) -> object:\n payload = _get_endpoint_payload()\n route = payload['route'] + '/metadata'\n params = dict(url=url)\n\n response = requests.get(\n route,\n headers=payload['headers'],\n params=params\n )\n\n metadata = response.json()\n # metadata = {meta[0]: meta[1] for meta in response.json()}\n\n meta_fields = [\n 'mimeType',\n 'fileExtension',\n 'lastModifyingUser',\n 'title',\n 'parents',\n 'fileSize',\n 'alternateLink',\n ]\n\n try:\n metadata['folder_id'] = (\n None if not metadata['parents']\n else metadata['parents'][0]['id']\n )\n except IndexError:\n raise('The file must reside in a folder that is shared with '\n '<my-bot>@<my-domain>.com.')\n\n if 'lastModifyingUser' in metadata:\n metadata['last_mod_by_email'] = (\n metadata['lastModifyingUser']['emailAddress']\n )\n del metadata['lastModifyingUser']\n\n if not fetch_all:\n metadata = {\n k: v\n for k, v in metadata.items()\n if k in meta_fields +\n ['folder_id', 'last_mod_by_email']\n }\n del metadata['parents']\n Metadata = namedtuple('MetaData', metadata.keys())\n return Metadata(**metadata)\n\n return metadata", "def get_metadata(self):\n return self.manager.get_metadata(self)", "def fetch(args):\n storage, remote_path = split_storage(args.remote)\n\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit(\"Local file %s already exists, not overwriting.\" % local_path)\n\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n\n osf = _setup_osf(args)\n project = osf.project(args.project)\n\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print(\"Local file %s already matches remote.\" % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n\n # only fetching one file so we are done\n break", "def METADATA(self) -> Dict[str, Any]:\n return self._metadata", "def metadata(self) -> Mapping[str, str]:\n return pulumi.get(self, \"metadata\")", "def getFileMetadata( self, path ):\n res = self.__checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n gLogger.debug( \"DIPStorage.getFileMetadata: Attempting to obtain metadata for %s files.\" % len( urls ) )\n serviceClient = RPCClient( self.url )\n for url in urls:\n pfn = url\n if url.find( self.url ) == 0:\n pfn = url[ ( len( self.url ) ):]\n res = serviceClient.getMetadata( pfn )\n if res['OK']:\n if res['Value']['Exists']:\n if res['Value']['Type'] == 'File':\n gLogger.debug( \"DIPStorage.getFileMetadata: Successfully obtained metadata for %s.\" % url )\n successful[url] = res['Value']\n else:\n failed[url] = 'Supplied path is not a file'\n else:\n failed[url] = 'File does not exist'\n else:\n gLogger.error( \"DIPStorage.getFileMetadata: Failed to get metdata for %s.\" % url, res['Message'] )\n failed[url] = res['Message']\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )", "def metadata_path(self):\n return os.path.join(self.path, 'metadata.txt')", "def generate_metadata(self):\n self.metadata = {\n 'title': os.path.basename(self.source_file).rsplit('.', 1)[0],\n 'url': self.relative_destination_file,\n 'full_path': os.path.dirname(self.relative_destination_file),\n 'short_path': self.shorten_path(\n os.path.dirname(self.relative_destination_file))\n }", "def get_metadata(self):\n self.log = jsonLogs()\n log_filename = JSON_DIR + '/' + MEASUREMENTS_REPO + '/' + self.filename\n \n # keeping the first metadata read in the file\n # TODO : handling metadata changes during experiment ?\n meta = self.log.read_metadata(log_filename)\n return(meta[0])", "def _LocalDataPath(local_file):\n return data.ResourcePath(local_file)", "def GetMetadata(self):\n return self.dict['meta']", "def fetchPRIDEProject(remote_file:URIType, cachedFilename:AbsPath, secContext:Optional[SecurityContextConfig]=None) -> Tuple[Union[URIType, ContentKind], List[URIWithMetadata]]:\n \n parsedInputURL = parse.urlparse(remote_file)\n projectId = parsedInputURL.path\n metadata_url = parse.urljoin(PRIDE_PROJECTS_REST, projectId)\n \n metadata_array = [\n URIWithMetadata(remote_file, {'fetched': metadata_url})\n ]\n metadata = None\n try:\n metaio = io.BytesIO()\n _ , metametaio = fetchClassicURL(metadata_url, metaio)\n metadata = json.loads(metaio.getvalue().decode('utf-8'))\n metadata_array.extend(metametaio)\n except urllib.error.HTTPError as he:\n raise WFException(\"Error fetching PRIDE metadata for {} : {} {}\".format(projectId, he.code, he.reason))\n \n try:\n pride_project_url = metadata['_links']['datasetFtpUrl']['href']\n except Exception as e:\n raise WFException(\"Error processing PRIDE project metadata for {} : {}\".format(remote_file, e))\n \n return pride_project_url, metadata_array", "def get_metadata_by_id(book_id, meta_fp):\n book_details = utility.get_book_metadata(book_id, meta_fp)\n #print(book_details)\n title = book_details['title_lat']\n title_ar = book_details['title_ar']\n author_date = book_details['date']\n author_ar = book_details['author_ar']\n author_eng = book_details['author_lat']\n #print(title)\n\n return book_details", "def _metadata(self) -> Dict[str, Any]:\n return self.__metadata", "def metadata(self) -> Optional[pulumi.Input['SyntheticsPrivateLocationMetadataArgs']]:\n return pulumi.get(self, \"metadata\")", "def metadata(self) -> Optional[pulumi.Input['SyntheticsPrivateLocationMetadataArgs']]:\n return pulumi.get(self, \"metadata\")", "def _load_metadata(self, result_dir: Path) -> str:\n id_path = result_dir / SerializationAttributes.ID_FILENAME\n with open(id_path, 'r') as f:\n self.id = json.load(f)[SerializationAttributes.ID_KEY]\n\n version_path = result_dir / SerializationAttributes.VERSION_FILENAME\n with open(version_path, 'r') as f:\n self.version = json.load(f)[SerializationAttributes.VERSION_KEY]", "def retrieve_metadata(self, _vx):\n\t\tif (_vx):\n\t\t\tvx_files = _vx.get_files()\n\t\t\tif (len(vx_files) == 0):\n\t\t\t\treturn {}\n\t\t\telif (len(vx_files) == 1):\n\t\t\t\treturn self.retrieve_metadata_single_file(vx_files[0])\n\t\t\telse:\n\t\t\t\treturn self.retrieve_metadata_multiple_files(vx_files)\n\t\telse:\n\t\t\traise NullOrEmptyArgumentException()", "def get_metadata(self):\n\n\t\t#see redcap api documentation -- https://redcap.wustl.edu/redcap/srvrs/prod_v3_1_0_001/redcap/api/help/\n\t\tbuf = io.BytesIO()\n\n\t\tfields = {\n\t\t 'token': config['api_token'],\n\t\t 'content': 'metadata',\n\t\t 'format': 'json'\n\t\t}\n\n\t\tch = pycurl.Curl()\n\t\tch.setopt(ch.URL, config['api_url'])\n\t\tch.setopt(ch.HTTPPOST, list(fields.items()))\n\t\tch.setopt(ch.WRITEFUNCTION, buf.write)\n\t\tch.perform()\n\t\tch.close()\n\n\t\tmetadata = json.loads(buf.getvalue().decode())\n\t\tbuf.close()\n\t\treturn metadata", "def readLocalMedline(pmid):\n logging.debug('Trying PMID lookup with local medline copy')\n medlineDb = pubStore.getArtDbPath('medline')\n if not isfile(medlineDb):\n logging.debug('%s does not exist, no local medline lookups' % medlineDb)\n return\n else:\n con, cur = maxTables.openSqlite(medlineDb)\n con.row_factory = sqlite3.Row\n cur = con.cursor()\n rows = None\n tryCount = 60\n while rows == None and tryCount > 0:\n try:\n rows = list(cur.execute('SELECT * from articles where pmid=?', (pmid,)))\n except sqlite3.OperationalError:\n logging.info('Database is locked, waiting for 60 secs')\n time.sleep(60)\n tryCount -= 1\n\n if rows == None:\n raise Exception('Medline database was locked for more than 60 minutes')\n if len(rows) == 0:\n logging.info('No info in local medline for PMID %s' % pmid)\n return\n lastRow = rows[-1]\n result = {}\n for key, val in zip(lastRow.keys(), lastRow):\n result[key] = unicode(val)\n\n result['source'] = ''\n result['origFile'] = ''\n return result", "def safely_get_metadata_file(self, metadata_role, metadata_filepath,\n compressed_file_length,\n uncompressed_file_hashes, compression):\n\n def safely_verify_uncompressed_metadata_file(metadata_file_object):\n self.__hard_check_compressed_file_length(metadata_file_object,\n compressed_file_length)\n self.__check_hashes(metadata_file_object, uncompressed_file_hashes)\n self.__verify_uncompressed_metadata_file(metadata_file_object,\n metadata_role)\n\n return self.__get_file(metadata_filepath,\n safely_verify_uncompressed_metadata_file, 'meta',\n compressed_file_length, download_safely=True,\n compression=compression)", "def test_create_local_metadata(self):\n local_media = {\n 'path': 'test_mp4_short.mp4',\n 'title': 'Test media title',\n 'description': 'Test media description',\n }\n\n media_filename = \"%s/%s\" % (settings.get('base', 'path.local.media'), local_media['path'])\n\n self.model = Media.create(\n client=self.client,\n media_filename=media_filename,\n title=local_media['title'],\n description=local_media['description'],\n )\n\n media_item = Media.get(client=self.client, uuid=self.model.uuid)\n assert media_item.title == local_media['title']\n assert media_item.description == local_media['description']\n #TODO: assert creator is owner", "def _fake_meta(self):\n resp = tju.load_file(UPLOADED_FILE, self.adpt)\n return vf.File.wrap(resp)", "def get_metadata(self, source, graph):\n return self.server.get_metadata(source, self.graphs.get(graph))", "def get_metadata(self, resource_url):\n response = self.response(resource_url)\n body = response[0]\n return ResourceParser.extract_metadata(body)", "def get_metadata(self):\n return self._metadata", "def read_metadata():\n subdirs = next(os.walk(os.getcwd()))[1]\n\n for subdir in subdirs:\n if '__init__.py' in os.listdir(subdir):\n print('Found package:', subdir)\n break\n else:\n raise SetupError('No package found! Did you forget an __init__.py?')\n\n metadata = {'name': subdir, 'packages': [subdir]}\n relevant_keys = {'__version__': 'version',\n '__author__': 'author',\n '__email__': 'author_email',\n '__license__': 'license'}\n\n m = open(os.path.join(subdir), '__init__.py')\n first_line = next(m)\n metadata['description'] = first_line.strip(). strip('\\n \"')\n for line in m:\n if len(relevant_keys) == 0:\n break\n for key in relevant_keys:\n if line.startswith(key):\n break\n else:\n continue\n\n metadatum_name = relevant_keys.pop(key)\n metadata[metadatum_name] = line.split('=', 1)[1].strip('\\n\\'\\\" ')\n\n if relevant_keys:\n print('FYI; You didn\\'t put the following info in your __init__.py:')\n print(' ', ', '.join(relevant_keys))\n return metadata", "def get_metadata_file(self, file_in_cache):\n return re.sub(r'\\.tar$', '.json', file_in_cache)", "def get_file(self, remote_path, local_path, storage_id=None):\n return self.get(remote_path, local_path, directory=False, storage_id=storage_id)", "def get_metadata_from(directory):\n for filename in ['metadata.yml', 'metadata.json']:\n try:\n f = open(os.path.join(directory, filename))\n break\n except IOError:\n pass\n else:\n # tried all files, give up and return empty\n return {}\n return yaml.load(f.read())", "def _localfile(name):\n return os.path.abspath(resource_filename(__name__, name))", "def get_metadata(key=''):\n response, content = httplib2.Http().request(\n '%s/%s' % (METADATA_BASE_URL, key),\n headers={'Metadata-Flavor': 'Google'},\n method='GET',\n )\n if response['status'] == '404':\n raise NotFoundError(response, content)\n return content", "def get_metadata(self):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/recipes/%s/metadata\" % (self.project_key, self.recipe_name))", "def load(self):\n if not path.exists('service.json'):\n raise UserError('service.json not found')\n with open('service.json') as f:\n try:\n metadata = json.loads(f.read())\n except Exception as e:\n raise UserError('malformed service.json - ' + str(e))\n return metadata", "def get_file(self, key, local_file):\n\t\t\n\t\ttry:\n\t\t\tfh = open(local_file, 'wb')\n\t\t\tfh.write(self.s3.get(self.bucket, key).object.data)\n\t\t\tfh.close()\n\t\texcept:\n\t\t\treturn False", "def GetFile(localFilename):\n\tif os.path.isabs(localFilename):\n\t\tabsFilename = localFilename\n\t\tif os.path.isfile(absFilename):\n\t\t\treturn absFilename\n\t\telse:\n\t\t\treturn None\n\telse:\n\t\tglobal resourcePaths\n\t\tfor resourceDir in resourcePaths:\n\t\t\tabsFilename = os.path.join(resourceDir, localFilename)\n\t\t\tif os.path.isfile(absFilename):\n\t\t\t\treturn absFilename\n\t\treturn None" ]
[ "0.68818545", "0.6417708", "0.63122565", "0.62646693", "0.62405384", "0.62042487", "0.6080648", "0.6028128", "0.6001734", "0.59823257", "0.5974274", "0.5929091", "0.59028894", "0.5869242", "0.5845099", "0.5817974", "0.5808917", "0.57907844", "0.5776373", "0.5760592", "0.57372004", "0.5726373", "0.5669434", "0.56670403", "0.56494546", "0.56494546", "0.56494546", "0.56370044", "0.5634967", "0.5623329", "0.56156355", "0.56090015", "0.5581827", "0.5580535", "0.55677295", "0.5540902", "0.55371916", "0.55331117", "0.55145985", "0.55061394", "0.54802734", "0.5467634", "0.54578424", "0.54517984", "0.5446821", "0.54373586", "0.54319996", "0.5429833", "0.538989", "0.5388361", "0.5386461", "0.5373551", "0.53652745", "0.53649616", "0.5340621", "0.53406155", "0.5336637", "0.53330076", "0.5328842", "0.5326326", "0.5326305", "0.53204346", "0.5315117", "0.53142816", "0.53139526", "0.5312575", "0.53100884", "0.53069884", "0.5304242", "0.52959836", "0.5290984", "0.52710885", "0.5243239", "0.523383", "0.5232941", "0.5232644", "0.5218908", "0.52172667", "0.51996714", "0.51996714", "0.5198958", "0.5197788", "0.51887524", "0.5186825", "0.5185999", "0.5184036", "0.51787287", "0.517702", "0.5170628", "0.51701736", "0.51701504", "0.5164518", "0.5162933", "0.5147718", "0.51396143", "0.51374847", "0.5137073", "0.513536", "0.51321036", "0.51262236" ]
0.84643173
0
Based on comparison of local and remote metada dictionaries, filter files to retain only the files which doesn't exist on remote metadata dict or have different content and same filename. Also, based on IGNORE_DIRS and IGNORE_EXTENSIONS, filter the net file list.
def _filter_file_list(files, local_metadata, remote_metadata): def _is_tracked(filename, metadata): """ Is the filename tracked in the remote metadata dict. The file may be not even locally tracked yet """ current_local_sha = local_metadata.get(filename, None) current_remote_sha = metadata.get(filename, None) return current_local_sha is not None \ and current_remote_sha is not None \ and current_local_sha == current_remote_sha def _is_inside_ignored_dir(filename): """ Is the filename inside any of the IGNORE_DIRS list """ ignore_dirs = ['./' + x for x in IGNORE_DIRS] return any([filename.startswith(x) for x in ignore_dirs]) def _has_ignored_extension(filename): return any([ext in IGNORE_EXTENSIONS for ext in filename.split('.')[1:]]) files = [f for f in files if not _is_inside_ignored_dir(f) and not _has_ignored_extension(f) and not _is_tracked(f, remote_metadata)] return files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess_raw_remote_files(self, raw_remote_files):\n return [xfile for xfile in raw_remote_files if not xfile.exists()]", "def find_remote_files(product, date, channel, fs, mesoregion=None):\n if 'L1' in product:\n files = [fs.glob('gcp-public-data-goes-16/' + product + '/' + str(date.year) + '/' +\n '{0:03g}'.format(int(date.strftime('%j'))) +\n '/*/*{mesoregion}*M[36]C'.replace(\"{mesoregion}\", mesoregion) + str(channel) + '*.nc')]\n elif 'L2' in product:\n files = [fs.glob('gcp-public-data-goes-16/' + product + '/' + str(date.year) + '/' +\n '{0:03g}'.format(int(date.strftime('%j'))) +\n '/*/*{mesoregion}*'.replace(\"{mesoregion}\", mesoregion) + str(product) + '*M[36]' + '*.nc')]\n\n files = [y for x in files for y in x]\n\n return files", "def _get_remote_files(config):\n if \"cache\" in config:\n return config[\"cache\"]\n out = {}\n for project, folder in _remote_folders(config):\n out.update(_project_files(project, folder))\n return out", "def pre_combine_inventory(self, target, src_files):\n config = self.config\n\n self.stderr.write(f\"Layers detected: {self.layer_names_all}\\n\")\n if self.layer_names_all != self.layer_names_used:\n self.stderr.write(f\"Layers after filter: {self.layer_names_used}\\n\")\n\n # Convert src_files to a set to speed up\n src_files = set(src_files)\n self.target_extra_files = set()\n for (root, dirs, files) in relwalk(target, followlinks=config.follow_symlink):\n for fn in files:\n tgt_file = os.path.join(root, fn)\n if tgt_file not in src_files:\n if fn == CONTROLLED_DIR_MARKER or config.block_files.search(fn):\n continue # pragma: no cover (peephole optimization)\n self.target_extra_files.add(tgt_file)\n return src_files", "def filter_missing_files(file_names, split_by_client=False, allow_missing_files=True):\n\n if not allow_missing_files:\n return file_names\n\n if split_by_client:\n # filter out missing files and empty clients\n existing_files = [\n [f for f in client_files if os.path.exists(f)] for client_files in file_names]\n existing_files = [\n client_files for client_files in existing_files if client_files]\n else:\n # filter out missing files\n existing_files = [f for f in file_names if os.path.exists(f)]\n return existing_files", "def checkArchFiles(self, key = None, archName = None, verbose = False):\n\n # Set archive from passed args.\n if key is not None and archName is None:\n archName = self.nbDetails[key]['archName']\n elif key is None and archName is None:\n print('Skipping archive checks, no archive supplied.')\n return None\n\n # Check if file exists on remote\n # Note this returns a list\n archExists = self.checkFiles(archName)\n\n if archExists[0]:\n # Get arch contents from remote via Fabric.\n with self.c.prefix(f\"source {self.hostDefn[self.host]['condaPath']} {self.hostDefn[self.host]['condaEnv']}\"):\n result = self.c.run(f\"python -m zipfile -l {archName}\", hide = True)\n\n # Compare with local lsit\n # archFiles = result.stdout.splitlines()\n # localList = self.nbDetails[key]['pkgFileList'][5:]\n # fileComp = list(set(localList) - set(archFiles)) # Compare lists as sets\n archFiles = [(line.split()[0]) for line in result.stdout.splitlines()[1:]] # Keep file names only (drop header, and file properties)\n localList = self.nbDetails[key]['pkgFileList']\n\n # Test & set relative paths for local files in archive\n localListRel = []\n for fileIn in localList:\n try:\n localListRel.append(Path(fileIn).relative_to(self.hostDefn[self.host]['nbProcDir']).as_posix())\n except ValueError:\n localListRel.append(Path(fileIn).name) # In this case just take file name, will go in archive root\n\n fileComp = list(set(localListRel) - set(archFiles)) # Compare lists as sets\n\n # Results\n print(f\"\\n***Checking archive: {archName}\")\n print(f\"Found {len(archFiles)} on remote. Local list length {len(localList)}.\")\n\n # This will run if fileComp is not an empty list\n if fileComp:\n print(f\"Difference: {len(archFiles) - len(localList)}\")\n print(\"File differences:\")\n print(*fileComp, sep = '\\n')\n\n else:\n print(\"Local and remote file lists match.\")\n\n\n else:\n print(f\"***Missing archive: {archName}\")\n fileComp = None\n\n # Set fileComp\n # Either empty, None or list of differences.\n self.nbDetails[key]['archFileCheck'] = fileComp\n if fileComp:\n self.nbDetails[key]['archFilesOK'] = False\n elif fileComp is None:\n self.nbDetails[key]['archFilesOK'] = False\n else:\n self.nbDetails[key]['archFilesOK'] = True\n\n if verbose:\n print(\"\\n***Local file list:\")\n print(*localListRel, sep='\\n')\n print(\"\\n***Archive file list:\")\n print(*archFiles, sep='\\n')\n\n return localListRel, archFiles, fileComp, result", "def _filter_mrpack_files(file_list: List[MrpackFile], mrpack_install_options: MrpackInstallOptions) -> List[MrpackFile]:\n filtered_list: List[MrpackFile] = []\n for file in file_list:\n if \"env\" not in file:\n filtered_list.append(file)\n continue\n\n if file[\"env\"][\"client\"] == \"required\":\n filtered_list.append(file)\n if file[\"env\"][\"client\"] == \"optional\" and file[\"path\"] in mrpack_install_options.get(\"optionalFiles\", []):\n filtered_list.append(file)\n\n return filtered_list", "def org_diff(lst_dicts, media_type, main_server):\n diff_dict = {}\n # todo-me pull posters from connected servers\n\n for mtype in media_type:\n meta_lst = []\n seen = {}\n missing = []\n unique = []\n print('...combining {}s'.format(mtype))\n for server_lst in lst_dicts:\n for item in server_lst[mtype]:\n if mtype == 'movie':\n title = u'{} ({})'.format(item.title, item.year)\n else:\n title = item.title\n\n # Look for duplicate titles\n if title not in seen:\n seen[title] = 1\n meta_lst.append(get_meta(item))\n else:\n # Duplicate found\n if seen[title] >= 1:\n # Go back through list to find original\n for meta in meta_lst:\n if meta['title'] == title:\n # Append the duplicate server's name\n meta['server'].append(item._server.friendlyName)\n thumb_url = '{}{}?X-Plex-Token={}'.format(\n item._server._baseurl, item.thumb, item._server._token)\n meta['thumb'].append(thumb_url)\n seen[title] += 1\n # Sort item list by Plex rating\n # Duplicates will use originals rating\n meta_lst = sorted(meta_lst, key=lambda d: d['rating'], reverse=True)\n diff_dict[mtype] = {'combined': {\n 'count': len(meta_lst),\n 'list': meta_lst}}\n\n print('...finding {}s missing from {}'.format(\n mtype, main_server))\n for item in meta_lst:\n # Main Server name is alone in items server list\n if main_server not in item['server']:\n missing.append(item)\n # Main Server name is absent in items server list\n elif main_server in item['server'] and len(item['server']) == 1:\n unique.append(item)\n diff_dict[mtype].update({'missing': {\n 'count': len(missing),\n 'list': missing}})\n\n print('...finding {}s unique to {}'.format(\n mtype, main_server))\n diff_dict[mtype].update({'unique': {\n 'count': len(unique),\n 'list': unique}})\n\n return diff_dict", "def files_unchanged(self):\n\n passed = []\n failed = []\n ignored = []\n fixed = []\n could_fix = False\n\n # Check that we have the minimum required config\n required_pipeline_config = {\"manifest.name\", \"manifest.description\", \"manifest.author\"}\n missing_pipeline_config = required_pipeline_config.difference(self.nf_config)\n if missing_pipeline_config:\n return {\"ignored\": [f\"Required pipeline config not found - {missing_pipeline_config}\"]}\n try:\n prefix, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\n \"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is <pipeline> and default to repo 'nf-core'\"\n )\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\")\n prefix = \"nf-core\"\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n files_exact = [\n [\".gitattributes\"],\n [\".prettierrc.yml\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n ]\n files_partial = [\n [\".gitignore\", \".prettierignore\", \"pyproject.toml\"],\n ]\n\n # Only show error messages from pipeline creation\n logging.getLogger(\"nf_core.create\").setLevel(logging.ERROR)\n\n # Generate a new pipeline with nf-core create that we can compare to\n tmp_dir = tempfile.mkdtemp()\n\n # Create a template.yaml file for the pipeline creation\n template_yaml = {\n \"name\": short_name,\n \"description\": self.nf_config[\"manifest.description\"].strip(\"\\\"'\"),\n \"author\": self.nf_config[\"manifest.author\"].strip(\"\\\"'\"),\n \"prefix\": prefix,\n }\n\n template_yaml_path = os.path.join(tmp_dir, \"template.yaml\")\n with open(template_yaml_path, \"w\") as fh:\n yaml.dump(template_yaml, fh, default_flow_style=False)\n\n test_pipeline_dir = os.path.join(tmp_dir, f\"{prefix}-{short_name}\")\n create_obj = nf_core.create.PipelineCreate(\n None, None, None, no_git=True, outdir=test_pipeline_dir, template_yaml_path=template_yaml_path\n )\n create_obj.init_pipeline()\n\n # Helper functions for file paths\n def _pf(file_path):\n \"\"\"Helper function - get file path for pipeline file\"\"\"\n return os.path.join(self.wf_path, file_path)\n\n def _tf(file_path):\n \"\"\"Helper function - get file path for template file\"\"\"\n return os.path.join(test_pipeline_dir, file_path)\n\n # Files that must be completely unchanged from template\n for files in files_exact:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file has an identical match\n else:\n for f in files:\n try:\n if filecmp.cmp(_pf(f), _tf(f), shallow=True):\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n shutil.copy(_tf(f), _pf(f))\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # Files that can be added to, but that must contain the template contents\n for files in files_partial:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file contains the template file contents\n else:\n for f in files:\n try:\n with open(_pf(f), \"r\") as fh:\n pipeline_file = fh.read()\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n if template_file in pipeline_file:\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n with open(_pf(f), \"w\") as fh:\n fh.write(template_file)\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # cleaning up temporary dir\n shutil.rmtree(tmp_dir)\n\n return {\"passed\": passed, \"failed\": failed, \"ignored\": ignored, \"fixed\": fixed, \"could_fix\": could_fix}", "def default_filter(files):\n\n if '1.mkv' in files and '2.mkv' in files and 'Labels.json' in files:\n return True\n\n return False", "def filter_filelist(files: list, hour_mod: int = 12, min_mod: int = 60) -> list:\n files_restricted = []\n if hour_mod == 0 and min_mod == 0:\n files_restricted.append(sorted(files)[-1])\n else:\n for file in files:\n hour = int(file.split(\"_\")[3][8:10])\n minute = int(file.split(\"_\")[3][10:12])\n if hour % hour_mod == 0 and minute % min_mod == 0:\n files_restricted.append(file)\n logging.debug(f'Remote file added: {file}')\n else:\n logging.debug(f'Remote file ignored: {file}')\n logging.info('Files to be downloaded has been reduced from {} to {}'.format(len(files), len(files_restricted)))\n return files_restricted", "def search_local_files(filename, data_type, train_or_val, path, data_json):\n files = []\n\n if os.path.exists(path):\n all_files = os.listdir(path)\n for f in all_files:\n if f not in data_json.keys():\n continue\n if filename in f and data_type in f and train_or_val in f:\n dataset_filepath = os.path.join(path, f)\n local_file_md5 = get_file_md5(dataset_filepath)\n dataset_md5 = data_json[f][\"md5\"]\n if local_file_md5 == dataset_md5:\n files.append(f)\n else:\n print(f\"{f} is broken so that cannot partition from it.\")\n return files", "def test_mirror_filter_packages_nomatch_package_with_spec(tmpdir):\n test_configuration = \"\"\"\\\n[blacklist]\npackages =\n example3>2.0.0\n\"\"\"\n Singleton._instances = {}\n with open(\"test.conf\", \"w\") as testconfig_handle:\n testconfig_handle.write(test_configuration)\n BandersnatchConfig(\"test.conf\")\n for plugin in filter_project_plugins():\n plugin.initialize_plugin()\n m = Mirror(str(tmpdir), mock.Mock())\n m.packages_to_sync = {\"example1\": None, \"example3\": None}\n m._filter_packages()\n assert \"example3\" in m.packages_to_sync.keys()", "def test_syncer_sync_exclude(temp_data_dirs, syncer):\n tmp_source, tmp_target = temp_data_dirs\n\n syncer.sync_up(\n local_dir=tmp_source,\n remote_dir=\"/test/test_syncer_sync_exclude\",\n exclude=[\"*_exclude*\"],\n )\n syncer.wait()\n\n _download_from_fs_path(\n syncer.storage_filesystem, \"/test/test_syncer_sync_exclude\", tmp_target\n )\n\n # Excluded files should not be found in target\n assert_file(True, tmp_target, \"level0.txt\")\n assert_file(False, tmp_target, \"level0_exclude.txt\")\n assert_file(True, tmp_target, \"subdir/level1.txt\")\n assert_file(False, tmp_target, \"subdir/level1_exclude.txt\")\n assert_file(True, tmp_target, \"subdir/nested/level2.txt\")\n assert_file(False, tmp_target, \"subdir_nested_level2_exclude.txt\")\n assert_file(False, tmp_target, \"subdir_exclude/something/somewhere.txt\")", "def filterFiles(groupDict, fileList):\n for fl in fileList:\n cleanFile = cleanUpPath(fl)\n dirsList = PurePath(fl).parts\n try:\n # Find the first libs directory.\n index = dirsList.index(\"libs\")\n # Any child of libs directory is a group.\n grp = dirsList[index + 1]\n groupDict[grp].append(cleanFile)\n except ValueError:\n groupDict[GRP_UNFILTERED].append(cleanFile)", "def test_mirror_filter_packages_match(tmpdir):\n test_configuration = \"\"\"\\\n[blacklist]\nplugins = blacklist_project\npackages =\n example1\n\"\"\"\n Singleton._instances = {}\n with open(\"test.conf\", \"w\") as testconfig_handle:\n testconfig_handle.write(test_configuration)\n BandersnatchConfig(\"test.conf\")\n for plugin in filter_project_plugins():\n plugin.initialize_plugin()\n m = Mirror(str(tmpdir), mock.Mock())\n m.packages_to_sync = {\"example1\": None, \"example2\": None}\n m._filter_packages()\n assert \"example1\" not in m.packages_to_sync.keys()", "def filter_captured_urls(urls_files, url_list_file):\n captured_urls = load_captured_urls(url_list_file)\n \n to_capture = list(filter(lambda d: d['url'] not in captured_urls, urls_files))\n \n return to_capture", "def checkMissingFiles(inDir, jsonUrl):\n\n file_list = []\n remote = False\n try:\n file_list = os.listdir(inDir)\n except OSError:\n remote = True\n file_list = eos_ls(inDir)\n\n if file_list == []:\n print \"Directory does not exist or is empty!\"\n return []\n\n total_expected = 0\n missing_files = []\n suspicious_files = []\n recovered_files = []\n\n print 'Found %d files in input directory' % len(file_list)\n print 20*'-'\n\n jsonFile = open(jsonUrl,'r')\n procList = json.load(jsonFile,encoding = 'utf-8').items()\n\n for proc in procList:\n for desc in proc[1]:\n data = desc['data']\n isData = desc.get('isdata',False)\n mctruthmode = desc.get('mctruthmode')\n for d in data:\n dtag = d.get('dtag','')\n split = d.get('split',1)\n\n for segment in range(0,split):\n eventsFile = dtag\n if split > 1:\n eventsFile = dtag + '_' + str(segment)\n if mctruthmode:\n eventsFile += '_filt%d' % mctruthmode\n filename = eventsFile+'.root'\n\n sys.stdout.write('... checking %s' % filename)\n sys.stdout.flush()\n\n total_expected += 1\n\n if not filename in file_list:\n missing_files.append(filename)\n sys.stdout.write('\\033[91m MISSING \\033[0m \\n')\n # sys.stdout.flush()\n continue\n\n rootFileUrl = os.path.join(inDir, filename)\n if remote:\n rootFileUrl = ('root://eoscms//eos/cms/store' +\n rootFileUrl.split('store',1)[1])\n\n recovered, suspicious = False, False\n tfile = TFile.Open(rootFileUrl)\n try:\n if tfile.TestBit(TFile.kRecovered):\n recovered = True\n if tfile.IsZombie():\n suspicious = True\n tfile.Close()\n except AttributeError, ReferenceError:\n suspicious = True\n\n if recovered:\n sys.stdout.write('\\033[93m Recovered \\033[0m \\n')\n recovered_files.append(filename)\n if suspicious:\n sys.stdout.write('\\033[93m Failed to open \\033[0m \\n')\n suspicious_files.append(filename)\n\n sys.stdout.write('\\033[92m OK \\033[0m \\n')\n sys.stdout.flush()\n\n print 20*'-'\n if len(missing_files):\n print \"Missing the following files:\"\n print \"(%d out of %d expected)\"% (len(missing_files), total_expected)\n for filename in missing_files:\n print filename\n else:\n print \"NO MISSING FILES!\"\n print 20*'-'\n if len(suspicious_files):\n print \"Failed to open the following files:\"\n print \"(%d out of %d expected)\"% (len(suspicious_files), total_expected)\n for filename in suspicious_files:\n print filename\n print 20*'-'\n if len(recovered_files):\n print \"The following files are recovered:\"\n print \"(%d out of %d expected)\"% (len(recovered_files), total_expected)\n for filename in recovered_files:\n print filename\n print 20*'-'\n\n return missing_files+suspicious_files+recovered_files", "def getFileListLocal(dataset,blacklist=[ ],tag=\"\"):\n if '/pnfs/' in dataset:\n tag += \"_pnfs\"\n dataset = '__'.join(dataset.split('/')[-3:])\n filename = \"filelist/filelist_%s%s.txt\"%(dataset.lstrip('/').replace('/','__'),tag)\n filelist = [ ]\n if os.path.exists(filename):\n with open(filename,'r') as file:\n for line in file:\n line = line.rstrip('\\n')\n if line and '#' not in line and line not in blacklist:\n filelist.append(line.rstrip('\\n'))\n return filelist", "def filter_list(to_process_list):\n log_file_list = [file for file in to_process_list if \"tar\" not in file]\n tar_file_list = [file for file in to_process_list if \"tar\" in file]\n return log_file_list, tar_file_list", "def diffInLocalFiles():\n\taddedFiles = listdir(globals.LOCAL_SHARED_FILE_SPACE)\t#aka current files\n\tremovedFiles = globals.LOCAL_FILE_LIST\t\t\t\t\t#aka previously recorded files\n\t#TODO: this can be a lot more efficient\n\t\n\t#record files that appear in both lists\n\tcommonFiles = []\n\tfor file in removedFiles:\n\t\tif file in addedFiles:\n\t\t\tcommonFiles.append(file)\n\t\t\t\n\t#remove files that appear in both lists\n\tfor file in commonFiles:\n\t\taddedFiles.remove(file)\n\t\tremovedFiles.remove(file)\n\t\t\n\t#The files remaining in the respective list were either recently added or removed\n\tmessages = []\n\tfor file in removedFiles:\n\t\tmessages.append((globals.REMOVE_FILE, file))\t#these files not longer exist\n\tfor file in addedFiles:\n\t\tmessages.append((globals.ADD_FILE, file))\t\t#these files have been recently added\n\n\t#redefine list of local files\n\tglobals.LOCAL_FILE_LIST = listdir(globals.LOCAL_SHARED_FILE_SPACE)\n\treturn messages", "def filterImages(files, cfg):\r\n regex = \"\\.(\" + \"|\".join(cfg.image_formats) + \")$\"\r\n #filter(lambda s: re.match(regex, s), files)\r\n return [s for s in files if re.findall(regex, s)]", "def filter_target_extensions(self, files_dict):\n files_filtered = defaultdict(list)\n supported_formats = self.sox_get_supported_formats()\n logging.info('Filtering audio files ...')\n paths = list(files_dict.keys())\n\n for path in paths:\n if not path.endswith('letmehear'):\n files = sorted(files_dict[path])\n for f in files:\n if os.path.splitext(f)[1].lstrip('.').lower() in supported_formats:\n files_filtered[path].append(f)\n return files_filtered", "def list_backupable_files(files, config, file_filter):\n # For each file used by the application\n backupable_files = []\n for _filename in list(files):\n for filename in glob.glob(_filename):\n # print(filename)\n\n # ignore the user defined files\n if any(re.match(ignore, filename) for ignore in config.ignores):\n continue\n\n status = None\n # check for backuped files given from pipe:\n if filename in config.backuped_files:\n status = Status.EXISTS\n\n # If the file exists and is not already a link pointing to Original file\n if status is None:\n status = file_filter.get_status(filename)\n\n if status is None:\n status = Status.NOT_EXISTS\n\n backupable_files.append([status, filename])\n return backupable_files", "def collect():\n\n # Get database.\n with open(local_directory(path='file_diffs/packages.json'), 'r') as f:\n store = json.load(f)\n\n # UI.\n print('Checking files for differences...\\n')\n\n # Iterate database.\n for package_name in store:\n # Package variables.\n package_dir = os.path.join(package_directory, package_name)\n package = store[package_name]\n\n # Recursive (lazy) package searching.\n if type(package) == str:\n package = os.path.expanduser(package)\n for dirpath, dirnames, filenames in os.walk(package):\n for filename in filenames:\n sub_package_dir = package_dir + dirpath.replace(package, '')\n if not os.path.exists(sub_package_dir):\n os.makedirs(sub_package_dir)\n\n fp_local = os.path.join(dirpath, filename)\n fp_remote = os.path.join(sub_package_dir, filename)\n\n cs_local = file_checksum(fp=fp_local)\n cs_remote = file_checksum(fp=fp_remote)\n\n if cs_remote != cs_local:\n print('Found: {}/{}'.format(package_name, filename))\n shutil.copyfile(src=fp_local, dst=fp_remote)\n\n # Manual package searching.\n if type(package) == list:\n for fp in package:\n fn_local = fp['local']\n fn_remote = fp['remote']\n\n fp_local = os.path.expanduser(fn_local)\n fp_remote = os.path.join(package_dir, fn_remote)\n\n cs_local = file_checksum(fp=fp_local)\n cs_remote = file_checksum(fp=fp_remote)\n\n if cs_remote != cs_local:\n print('Found: {}/{}'.format(package_name, fn_remote))\n\n remote_dir_path = '/'.join(fp_remote.split('/')[:-1])\n if not os.path.exists(remote_dir_path):\n os.makedirs(remote_dir_path)\n shutil.copyfile(src=fp_local, dst=fp_remote)", "def compare_remote_elements(manifest_e1: Element, manifest_e2: Element,\n ignored_attrs: Set[str]) -> ChangeMap:\n return xml_diff.compare_subelements(\n tag='remote',\n p1=manifest_e1,\n p2=manifest_e2,\n ignored_attrs=ignored_attrs,\n key_fn=lambda x: x.get('name'),\n diff_fn=xml_diff.attribute_changes)", "def _filter_diff(diff, include_list, exclude_list=()):\n filtered = []\n for d in diff:\n if (d.status != 'D' and\n _match_regex_list(d.file, include_list) and\n not _match_regex_list(d.file, exclude_list)):\n # We've got a match!\n filtered.append(d)\n return filtered", "def localfiles_for_update(self, localfiles, obsfiles):\n upload_local_files = []\n obs_dict = {}\n for key, mtime, size in obsfiles:\n obs_dict[key.strip('/')] = mtime\n\n for localfile in localfiles:\n filepath, key = localfile\n fullkey = key + '/' + os.path.basename(filepath)\n fullkey = fullkey.strip('/')\n if fullkey in obs_dict.keys():\n localfile_timestamp = os.path.getmtime(filepath)\n obsfile_timestamp = time.mktime(time.strptime(obs_dict[fullkey], \"%Y/%m/%d %H:%M:%S\"))\n\n if localfile_timestamp > obsfile_timestamp:\n upload_local_files.append(localfile)\n else:\n upload_local_files.append(localfile)\n return upload_local_files", "def _is_remote_reusable(inputs, calculation):\n can_use_remote = False\n #If no charge density file is available to restart from the calculation will except\n #with a not nice error message. So we can only reuse the charge density if these files are available\n retrieved_filenames = calculation.base.links.get_outgoing().get_node_by_label('retrieved').list_object_names()\n if any(file in retrieved_filenames for file in (\n 'cdn_last.hdf',\n 'cdn1',\n )):\n can_use_remote = True\n\n if 'fleurinp' in inputs:\n modes = inputs.fleurinp.get_fleur_modes()\n if modes['force_theorem'] or modes['dos'] or modes['band']:\n # in modes listed above it makes no sense copying cdn.hdf\n can_use_remote = False\n # without fleurinp it is harder to extract modes in this case\n # - simply try to reuse cdn.hdf and hope it works\n\n return can_use_remote", "def prefilter_json_files_then_compare(args):\n\n logging.info(\"prefilter_json_files_then_compare: starting!\")\n with open(args.initialFile) as f:\n json_initial = file.read(f)\n with open(args.finalFile) as f2:\n json_final = file.read(f2)\n\n patch = jsonpatch.JsonPatch.from_diff(json_initial, json_final)\n logging.info(\n \"prefilter_json_files_then_compare:differences before patching: %d\",\n len(list(patch)),\n )\n\n json_initial_filtered = prefilter(json_initial, args.initial_prefilter)\n json_final_filtered = prefilter(json_final, args.finalPreFilter)\n\n patch_after_filtering = jsonpatch.JsonPatch.from_diff(\n json_initial_filtered, json_final_filtered\n )\n differences_after_patching = list(patch_after_filtering)\n logging.info(\n \"prefilter_json_files_then_compare: differences after patching: %d\",\n len(differences_after_patching),\n )\n\n if args.printDifferences:\n for patchline in differences_after_patching:\n print(json.dumps(patchline))\n\n print(len(differences_after_patching))\n return len(differences_after_patching)", "def _detect_files(data):\n return any(attr[\"extra\"].get(\"files\")\n for attr in data[\"attributes\"] if attr[\"extra\"])", "def find_URLs(directory, options):\n\n files = os.listdir(directory)\n filtered_files = []\n files_for_download = []\n for item in files:\n if item.endswith(\".json\"):\n filtered_files.append(item)\n\n for item in filtered_files:\n file_path = os.path.join(directory, item)\n\n with open(file_path, \"r\") as json_file:\n payload = json.load(json_file)\n for message in payload:\n if (\"subtype\" in message\n and message.get(\"subtype\") == \"file_share\"):\n\n download_URL = message.get(\"file\").get(\"url_download\")\n\n if options.remote_name:\n download_filename = message.get(\"file\").get(\"id\")\n else:\n download_filename = message.get(\"file\").get(\"name\")\n if download_filename.startswith(\"-.\"):\n download_filename = download_filename.lstrip(\"-\")\n download_filename = \"{}{}\".format(\n message.get(\"file\").get(\"id\"),\n download_filename)\n\n files_for_download.append(\n (download_filename, download_URL))\n\n download_URLs(files_for_download, directory)", "def look_for_interesting_files(self, interesting_files):\n self.valid_interesting_files = []\n for i, f in interesting_files.items():\n if f.hash in self.files:\n self.logger.info(\"New interesting file : %s\", f.name)\n self.valid_interesting_files.append((f, self.files[f.hash]))", "def _remove_remote_files_dirs(self):\n self.remote.remove_files_dirs()", "def filelist_cleaner(lista, dataset=''):\n if dataset == 'ncar':\n cleaned = [ l for l in lista if '.nc' not in l ]\n if dataset == 'bufr':\n cleaned = [ l for l in lista if '.bfr' in l ]\n if 'era5' in dataset:\n cleaned = [ l for l in lista if '.nc' not in l and '.conv.' in l ]\n else:\n cleaned = lista\n \n return cleaned", "def filter(self):\n for f in FileHelper.ALL_PATHS:\n media_obj = MediaObject(FileHelper.get_url(f), FileHelper.get_title(f), FileHelper.get_media_type(f), FileHelper.get_icon(f), FileHelper.get_duration(f), FileHelper.get_ctype(f))\n _id = media_obj.uuid\n if media_obj.media_type == \"image\":\n DB.IMAGES[_id] = media_obj\n elif media_obj.media_type == \"audio\":\n DB.MUSIC[_id] = media_obj\n elif media_obj.media_type == \"video\":\n DB.VIDEOS[_id] = media_obj\n else:\n print \"File '%s' doesn't play nice.\" % (f)", "def walk_files():\n\n # TODO: not check twice the same dir or file\n for path in config.targets:\n abs_path = os.path.join(cwd, path)\n\n if not os.path.islink(abs_path) and os.path.isfile(abs_path):\n walked.append(abs_path)\n yield abs_path\n #process_file(abs_path)\n\n if os.path.isdir(abs_path):\n walked.append(abs_path)\n for root, dirs, files in os.walk(abs_path):\n for fname in files:\n if isbackup(fname):\n continue\n abs_path = os.path.join(root, fname)\n walked.append(abs_path)\n if not os.path.islink(abs_path) and\\\n os.path.isfile(abs_path):\n base, name = os.path.split(abs_path)\n XXX, ext = os.path.splitext(name)\n\n ignored = False\n for pattern in IGNORE_FILES:\n if pattern.search(fname):\n ignored = True\n break\n\n # maybe should be merged with IGNORE_FILES?\n for regexp in config.exclude_list:\n if regexp.search(fname):\n ignored = True\n break\n\n if not ignored:\n for test_ext in config.disallow_exts:\n if test_ext == ext:\n ignored = True\n break\n\n if not ignored:\n if config.allow_exts:\n ignored = True\n for test_ext in config.allow_exts:\n if test_ext == ext:\n ignored = False\n break\n\n if not ignored:\n yield abs_path\n #process_file(abs_path)\n\n for dir in dirs[:]:\n if dir in IGNORE_DIRS:\n dirs.remove(dir)\n if dir in dirs:\n dirs.remove(dir)\n # mayb be should be merged with IGNORE_DIRS?\n else:\n for regexp in config.exclude_list:\n if regexp.search(dir):\n # This check is required\n # because several different patterns\n # could match one file name\n if dir in dirs:\n dirs.remove(dir)\n\n for dir in dirs:\n abs_path = os.path.join(root, dir)\n walked.append(abs_path)", "def _negate_filter(session,\n query,\n urls=None,\n acl=None,\n file_name=None,\n version=None,\n metadata=None,\n urls_metadata=None):\n if file_name is not None:\n query = query.filter(IndexRecord.file_name != file_name)\n\n if version is not None:\n query = query.filter(IndexRecord.version != version)\n\n if urls is not None and urls:\n query = query.join(IndexRecord.urls)\n for u in urls:\n query = query.filter(~IndexRecord.urls.any(IndexRecordUrl.url == u))\n\n if acl is not None and acl:\n query = query.join(IndexRecord.acl)\n for u in acl:\n query = query.filter(~IndexRecord.acl.any(IndexRecordACE.ace == u))\n\n if metadata is not None and metadata:\n for k, v in metadata.items():\n if not v:\n query = query.filter(~IndexRecord.index_metadata.any(IndexRecordMetadata.key == k))\n else:\n sub = session.query(IndexRecordMetadata.did)\n sub = sub.filter(\n and_(\n IndexRecordMetadata.key == k,\n IndexRecordMetadata.value == v\n )\n )\n query = query.filter(~IndexRecord.did.in_(sub.subquery()))\n\n if urls_metadata is not None and urls_metadata:\n query = query.join(IndexRecord.urls).join(IndexRecordUrl.url_metadata)\n for url_key, url_dict in urls_metadata.items():\n if not url_dict:\n query = query.filter(~IndexRecordUrlMetadata.url.contains(url_key))\n else:\n for k, v in url_dict.items():\n if not v:\n query = query.filter(~IndexRecordUrl.url_metadata.any(\n and_(IndexRecordUrlMetadata.key == k,\n IndexRecordUrlMetadata.url.contains(url_key)\n ))\n )\n else:\n sub = session.query(IndexRecordUrlMetadata.did)\n sub = sub.filter(\n and_(\n IndexRecordUrlMetadata.url.contains(url_key),\n IndexRecordUrlMetadata.key == k,\n IndexRecordUrlMetadata.value == v\n )\n )\n query = query.filter(~IndexRecord.did.in_(sub.subquery()))\n return query", "def UpdateConfigSetInfos(imported_infos, remote_infos):\n info_map = dict((info.url, info) for info in imported_infos)\n\n for info in remote_infos:\n existing_info = info_map.get(info.url)\n if existing_info:\n if existing_info.hash != info.hash:\n # Imported file is different\n existing_info.status = ndb_models.ConfigSetStatus.UPDATABLE\n info_map[info.url] = existing_info\n # If imported file is same, keep as imported=True and update=False\n else:\n # Not imported\n info_map[info.url] = info\n\n # Sort for consistent ordering\n info_list = []\n for key in sorted(info_map):\n info_list.append(info_map.get(key))\n return info_list", "def get_untracked_files():\n untracked_files = set()\n for _, dirs, files in os.walk(os.getcwd()):\n for d in dirs:\n if d not in staging_obj_names:\n file_path = get_path_outside_wit(filename=d.strip())\n if file_path:\n untracked_files.add(file_path)\n for f in files:\n if f not in staging_obj_names:\n file_path = get_path_outside_wit(filename=f.strip())\n if file_path:\n untracked_files.add(file_path)\n return untracked_files", "def test_find_with_excluded_hidden_dirs_relative(self):\n tdir1 = self._make_test_dir('.test1')\n tdir2 = self._make_test_dir('test_2')\n tdir3 = self._make_test_dir('test.3')\n files = [\n os.path.join(tdir1, 'testfile1.py'),\n os.path.join(tdir2, 'testfile2.py'),\n os.path.join(tdir3, 'testfile3.py'),\n ]\n _touch_files(files)\n\n # We must temporarily change the current directory, so that we test against\n # patterns like ./.test1/file instead of /tmp/foo/.test1/file\n with _restore_working_dir():\n\n os.chdir(self.test_tmpdir)\n actual = file_resources.GetCommandLineFiles(\n [os.path.relpath(self.test_tmpdir)],\n recursive=True,\n exclude=['*.test1*'])\n\n self.assertEqual(\n sorted(actual),\n sorted([\n os.path.join(\n os.path.relpath(self.test_tmpdir), os.path.basename(tdir2),\n 'testfile2.py'),\n os.path.join(\n os.path.relpath(self.test_tmpdir), os.path.basename(tdir3),\n 'testfile3.py'),\n ]))", "def _find_files(metadata):\n\n ret = []\n found = {}\n\n for bucket_dict in metadata:\n for bucket_name, data in bucket_dict.items():\n filepaths = [k[\"Key\"] for k in data]\n filepaths = [k for k in filepaths if not k.endswith(\"/\")]\n if bucket_name not in found:\n found[bucket_name] = True\n ret.append({bucket_name: filepaths})\n else:\n for bucket in ret:\n if bucket_name in bucket:\n bucket[bucket_name] += filepaths\n break\n return ret", "def exclude_filter(path):\n for ignore in IGNORE:\n if fnmatch(path, osp.join(SRC, ignore)): # in ignore list\n return True\n else:\n if osp.isdir(path) or osp.splitext(path)[1] != '.md':\n return False\n with open(path) as f:\n firstline = f.readline()\n return firstline.startswith('```{include}') # duplicate file", "def ignore_certain_metainf_files(filename):\n ignore = (\"META-INF/manifest.mf\",\n \"META-INF/*.sf\",\n \"META-INF/*.rsa\",\n \"META-INF/*.dsa\",\n \"META-INF/ids.json\")\n\n for glob in ignore:\n # Explicitly match against all upper case to prevent the kind of\n # runtime errors that lead to https://bugzil.la/1169574\n if fnmatch.fnmatchcase(filename.upper(), glob.upper()):\n return True\n return False", "def getFileListPNFS(dataset,blacklist=[ ]):\n dataset = dataset.replace('__','/')\n tmpList = glob.glob(dataset+'/*.root')\n director = \"dcap://t3se01.psi.ch:22125/\"\n filelist = [ ]\n for file in tmpList:\n if '.root' in file and file not in blacklist:\n filelist.append(director+file.rstrip())\n if \"SingleMuon/Run2018C-Nano14Dec2018-v1/NANOAOD\" in dataset: # temporary solution\n file = director+\"/pnfs/psi.ch/cms/trivcat/store/user/ineuteli/samples/NANOAOD_2018/SingleMuon/Run2018C-Nano14Dec2018-v1/NANOAOD/DA99EFB9-860F-F648-8D46-5AD80205F53B_skimmed.root\"\n if file in filelist:\n print bcolors.BOLD + bcolors.WARNING + \"FOUND %s !\"%file + bcolors.ENDC\n else:\n print bcolors.BOLD + bcolors.WARNING + \"ADDED %s !\"%file + bcolors.ENDC\n filelist.append(file)\n filelist.sort()\n return filelist", "def filter_images(history, whitelist):\n docker_client = docker.client.APIClient()\n local_images = common.get_local_images(docker_client)\n approved_images = set(local_images) - set(whitelist)\n return {image: timestamp for image, timestamp in history.items() if image in approved_images}", "def _GetMissingAndStaleFiles(file_pairs):\n\n missing_files = []\n stale_files = []\n\n for pair in file_pairs:\n if not os.path.isfile(pair.target):\n missing_files.append(pair)\n continue\n\n with open(pair.generated) as g, open(pair.target) as t:\n if g.read() != t.read():\n stale_files.append(pair)\n\n return missing_files, stale_files", "def test_filter_files(self):\n expected = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1\", False),\n ]\n files = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir2/fichier2\", False),\n (\"/subdir2/fichier3\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1/fichier1\", False),\n (\"/subdir1/subsubdir1/\", False),\n ]\n self.assertEqual(\n list(self.path_translator.filter_files(files, \"/subdir1\")),\n expected)", "def get_tool_version_files():\n similar_files = defaultdict(list)\n for path in Runtime_Datasets.RAW_FILE_PATHS:\n filename = get_file_name(path)\n filename = filename.rsplit('_', 1)[0]\n similar_files[filename].append(path)\n\n Runtime_Datasets.RAW_FILE_PATHS = similar_files", "def diff_files(self):\n pdup = []\n # Print out files that are only found in the DB\n if self.comparison_info['dbonly']:\n print(\"Files only found in the database --------- \")\n for fname in sorted(self.comparison_info['dbonly']):\n fdb = self.files_from_db[fname]\n print(f\"\\t{fdb['path']}/{fname}\")\n\n # print out files that are only found on disk\n if self.comparison_info['diskonly']:\n print(\"\\nFiles only found on disk --------- \")\n for fname in sorted(self.comparison_info['diskonly']):\n addon = \"\"\n if fname in self.duplicates:\n addon = \" *\"\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fdisk['relpath']}/{fname}{addon}\")\n if self.comparison_info['pathdup']:\n print(\"\\n The following files had multiple paths on disk (path filesize):\")\n listing = {}\n for fname in self.comparison_info['pathdup']:\n pdup.append(fname)\n listing[self.comparison_info['pathdup']['relpath']] = self.comparison_info['pathdup']['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if fname in self.files_from_db and self.files_from_db[fname]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{fname} {listing[pth]:d}{addon}\")\n\n # Print files that have different paths on disk and in the DB\n if self.comparison_info['path']:\n print(\"\\nPath mismatch (file name, db path, disk path) --------- \")\n for fname in sorted(self.comparison_info['path']):\n addon = \"\"\n if fname in self.duplicates:\n addon = \" *\"\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname}\\t{fdb['path']}\\t{fdisk['relpath']}{addon}\")\n if self.comparison_info['duplicates']:\n print(\" The following files have multiple disk paths on disk (path filesize):\")\n for fname in self.comparison_info['duplicates']:\n pdup.append(fname)\n listing[self.comparison_info['duplicates']['relpath']] = self.comparison_info['duplicates']['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if fname in self.files_from_db and self.files_from_db[fname]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{fname} {listing[pth]:d}{addon}\")\n\n # Print files that have different file sizes on disk and in the DB\n if self.comparison_info['filesize']:\n print(\"\\nFilesize mismatch (File name, size in DB, size on disk) --------- \")\n for fname in sorted(self.comparison_info['filesize']):\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname} {fdb['filesize']} {fdisk['filesize']}\")\n\n # Print files that have different md5sum on disk and in DB\n if self.md5sum and 'md5sum' in self.comparison_info and self.comparison_info['md5sum']:\n print(\"\\nmd5sum mismatch (File name, sum in DB, sum on disk) --------- \")\n for fname in sorted(self.comparison_info['md5sum']):\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname} {fdb['md5sum']} {fdisk['md5sum']}\")\n\n # Print out files that have multiple paths on disk\n if len(self.duplicates) > len(pdup):\n print(\"\\nThe following files have multiple disk paths on disk (path filesize):\")\n for dup in sorted(self.duplicates):\n if dup not in pdup:\n listing = {}\n for fls in self.duplicates[dup]:\n listing[fls['relpath']] = fls['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if dup in self.files_from_db and self.files_from_db[dup]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{dup} {listing[pth]:d}{addon}\")\n\n # Print out files that have multiple endtries in the DB\n if self.db_duplicates:\n print(\"\\nThe following files have multiple entries in the database (path filesize):\")\n for dup in sorted(self.db_duplicates):\n listing = {}\n for fls in self.db_duplicates[dup]:\n listing[fls['relpath']] = fls['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if dup in self.files_from_disk and self.files_from_disk[dup]['path'] == pth:\n addon = \" (Disk Match)\"\n print(f\" {start} {pth}/{dup} {listing[pth]:d}{addon}\")", "def filter_overlapping_files(files):\n keys = list(files.keys())\n base = min([key.replace(\"M\", \"\") for key in files.keys()])\n base = str(base) + \"M\"\n keys.remove(base)\n base_files = files[base]\n\n dict_files_all = {}\n for key in keys:\n file_keys = files[key]\n for file_key in file_keys:\n for file_base in base_files:\n dates_overlapping = filter_overlapping_dates(file_base, file_key)\n if len(dates_overlapping) > 0:\n list_files = [file_base, file_key]\n combination = base + \"_\" + key\n if combination in dict_files_all.keys():\n dict_files_all[combination].append(list_files)\n else:\n dict_files_all[combination] = [list_files]\n return dict_files_all", "def vpn_common_files(data, treshold=0.7):\n part_one = {}\n part_two = {}\n\n for extension in data:\n for webpage in data[extension]:\n if webpage not in result:\n part_one[webpage] = {}\n part_two[webpage] = {}\n for file in data[extension][webpage]:\n if file not in part_one[webpage]:\n part_one[webpage][file] = [\n extension,\n ]\n else:\n part_one[webpage][file].append(extension)\n\n # Keep common files in part one\n\n for wp_key, wp_value in list(part_one.items()):\n for f_key, f_value in list(wp_value.items()):\n if len(f_value) < len(data) * treshold:\n # Its not common in as many vpns as we want\n part_two[wp_key][f_key] = part_one[wp_key].pop(f_key)\n\n return part_one, part_two", "def filterAll(media_list_file, in_movie_dir, out_movie_dir):\n with open(media_list_file) as f:\n names = json.load(f)\n\n count_tweets = []\n count_kept_tweets = []\n for name in names:\n with open(\"{}/{}.json\".format(in_movie_dir, name)) as f:\n tweets = json.load(f)\n kept_tweets = [t for t in tweets.values() if keep(t)]\n print(\"total = {}\\tkeep = {}\\tName = {}\".format(\n len(tweets), len(kept_tweets), name))\n count_tweets.append(len(tweets))\n count_kept_tweets.append(len(kept_tweets))\n with open(\"{}/{}.json\".format(out_movie_dir, name), \"w\") as f:\n json.dump(kept_tweets, f)\n print(\"Mean tweets = {}\".format(np.mean(count_tweets)))\n print(\"Mean kept tweets = {}\".format(np.mean(count_kept_tweets)))\n print(\"Mean fraction kept tweets = {}\".format(\n np.mean(count_kept_tweets) / np.mean(count_tweets)))", "def test_calculate_indicates_removal_of_unrelated_files(self, m_free):\n # files are unrelated to backup\n walk_paths = {'/dst': [('/dst', ['/a'], ['x0.txt']),\n ('/dst/a', [], ['x1.txt'])]}\n copied_indexes = []\n reconciler = keepfilesreconciler.KeepFilesReconciler(self.resolver, self.options)\n with filesystemhelpers.mock_walk(walk_paths):\n filepaths = reconciler.calculate(self.copyfiles, copied_indexes)\n assert filepaths == {'/dst/a/x1.txt', '/dst/x0.txt'}", "def discard_resources(data, presence_treshold=1.0):\n\n # we will discard stuff from this copy\n res = copy.deepcopy(data)\n\n # to compare easier\n good_one = copy.deepcopy(data[\"no_vpn\"])\n del data[\"no_vpn\"]\n\n to_del = []\n\n for extension in data:\n for webpage in data[extension]:\n for f in data[extension][webpage]:\n if f in good_one[webpage]:\n del res[extension][webpage][f]\n # this will repeat, but it shouldn't be a problem\n to_del.append((webpage, f))\n\n for webpage, file in to_del:\n try:\n del data[\"no_vpn\"][webpage][file]\n except KeyError:\n pass\n\n return res", "def compare_config_files(config_filepath):\n tracked_config = get_config(config_filepath, True)\n local_config = get_config(config_filepath)\n\n unique_values = {}\n\n if not path.isfile(get_local_config_filepath(config_filepath)): #pragma: no cover\n #pytest.skip('no local .cfg found, skipping')\n return None\n\n local_unique_sections, local_unique_keys = find_unique_keys(\n local_config,\n tracked_config,\n 'local'\n )\n tracked_unique_sections, tracked_unique_keys = find_unique_keys(\n tracked_config,\n local_config,\n 'tracked'\n )\n\n ## vv TODO vv: TEST ME ##\n if any([\n local_unique_keys,\n tracked_unique_keys\n ]):\n unique_values['unique_keys'] = {}\n unique_values['unique_keys']['local'] = local_unique_keys\n unique_values['unique_keys']['tracked'] = tracked_unique_keys\n if any([\n local_unique_sections,\n tracked_unique_sections\n ]):\n unique_values['unique_sections'] = [local_unique_sections, tracked_unique_sections]\n ## ^^ TODO ^^ ##\n\n return unique_values", "def test_only_files(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n need_result = ['meme1.jpg',\n 'meme2.png',\n 'meme4.jpg',\n 'meme4.png',\n 'meme monty python',\n ]\n need_result_new = [os.path.join(dummy_folder, 'memes', x) for x in need_result[:-1]]\n result = listdir(os.path.join(dummy_folder, 'memes'), only_files=True)\n self.assertEqual(sorted(result), sorted(need_result_new))\n\n need_result_new = [os.path.join(dummy_folder, 'memes', x) for x in need_result]\n result = listdir(os.path.join(dummy_folder, 'memes'), only_files=False)\n self.assertEqual(sorted(result), sorted(need_result_new))", "def checkOverCopy(filelist, path, over):\n\n rawfiles = []\n missingRaw = []\n\n raw = '/net/mko-nfs/sci/dataflo'\n\n\n for entry in filelist:\n if glob.glob(path+'/'+entry):\n rawfiles.append(glob.glob(path+'/'+entry))\n else:\n missingRaw.append(entry)\n\n if rawfiles:\n if over:\n for entry in rawfiles:\n if os.path.exists(entry[0]):\n os.remove(entry[0])\n # copy all science images from a given night into ./Raw/\n for entry in filelist:\n if os.path.exists(raw+'/'+entry):\n shutil.copy(raw+'/'+entry, path)\n else:\n logging.info('SKIPPED ', entry)\n else:\n for entry in missingRaw:\n if os.path.exists(raw+'/'+entry):\n shutil.copy(raw+'/'+entry, path)\n else:\n logging.info('SKIPPED ', entry)\n\n else:\n for entry in filelist:\n if os.path.exists(raw+'/'+entry):\n shutil.copy(raw+'/'+entry, path)\n else:\n logging.info('SKIPPED ', entry)\n\n return", "def filter_none(data, split_by_client=False):\n\n if split_by_client:\n # filter out missing files and empty clients\n existing_data = [[d for d in client_data if d is not None]\n for client_data in data]\n existing_data = [\n client_data for client_data in existing_data if client_data]\n else:\n # filter out missing files\n existing_data = [d for d in data if d is not None]\n return existing_data", "def parse_url_files():\n a_copy = PY_FILES[::]\n for f in a_copy:\n if 'urls' in f:\n URL_FILES.append(f)\n PY_FILES.remove(f)", "def get_files_not_staged():\n unstaged_files = []\n current_staging_hashes = get_all_path_hashes(staging_path)\n for root, _, files in os.walk(os.getcwd()):\n for f in files:\n file_path = get_path_outside_wit(filename=f)\n if 'staging_area' in root and file_path:\n file_hash = get_file_hash(file_path=file_path)\n if file_hash not in current_staging_hashes:\n unstaged_files.append(file_path)\n return unstaged_files", "def update_files(self):\n try:\n db_files = self.dbc.get_file_list(self.remote_directory)\n except rest.ErrorResponse as e:\n print str(datetime.datetime.now()) \\\n + \": Could not get remote file list.\"\n print e.reason\n return False\n new_files = set(db_files) - self.file_set\n old_files = self.file_set - set(db_files)\n if new_files != set() or old_files != set():\n self.file_set = set(db_files)\n for filename in new_files:\n try:\n self.dbc.get_file(filename)\n except rest.ErrorResponse as e:\n print str(datetime.datetime.now()) + e.reason\n for filename in old_files:\n try:\n os.remove(self.local_directory + \"/\" + filename)\n except OSError:\n pass\n print str(datetime.datetime.now()) + \": Fileset changed:\"\n print self.file_set\n email_changes(new_files, old_files)\n print str(datetime.datetime.now()) \\\n + \": Email sent from update_files().\"\n return True\n return False", "def pullnlink(self,config):\n \n pull = []; link = []\n \n # choose files to pull and link\n for key,value in self.FILES.iteritems():\n \n # link big files\n if key == 'MESH':\n # mesh (merged or partitioned)\n value = expand_part(value,config)\n link.extend(value)\n elif key == 'DIRECT':\n # direct solution\n value = expand_time(value,config)\n link.extend(value)\n elif 'ADJOINT_' in key:\n # adjoint solution\n value = expand_time(value,config)\n link.extend(value)\n #elif key == 'STABILITY':\n #pass\n # copy all other files\n else:\n pull.append(value)\n \n #: for each filename\n \n return pull,link", "def __ignore_files_with_suffix(self, files):\n if self.__config.suffix().strip():\n files = [file for file in files if self.__config.suffix() not in file]\n return files", "def _warn_about_git_filters(files):\n repository = project_context.repository\n\n src_attrs = []\n dst_attrs = []\n\n for path, attrs in repository.get_attributes(*files).items():\n src = Path(path)\n dst = files[src].relative_to(project_context.path)\n src = src.relative_to(project_context.path)\n attrs_text = \"\"\n for name, value in attrs.items():\n if value == \"unset\":\n attrs_text += f\" -{name}\"\n elif value == \"set\":\n attrs_text += f\" {name}\"\n else:\n attrs_text += f\" {name}={value}\"\n\n src_attrs.append(f\"{str(src)}{attrs_text}\")\n dst_attrs.append(f\"{str(dst)}{attrs_text}\")\n\n if src_attrs:\n src_attrs_str = \"\\n\\t\".join(src_attrs)\n dst_attrs_str = \"\\n\\t\".join(dst_attrs)\n communication.warn(\n f\"There are custom git attributes for the following files:\\n\\t{src_attrs_str}\\n\"\n f\"You need to edit '.gitattributes' and add the following:\\n\\t{dst_attrs_str}\"\n )", "def test_find_not_should_ignore_path_glob(self, tmp_path):\n plugin_folder_path = populate_dir(tmp_path)\n\n detected_files = set()\n should_ignore_files = {\n \"test_notload.py\",\n \"test_notload_sub.py\",\n \"test_noneload_sub1.py\",\n \"test_shouldignore.py\",\n }\n should_not_ignore_files = {\n \"test_load.py\",\n \"test_load_sub1.py\",\n }\n ignore_list_file = \".airflowignore_glob\"\n for file_path in find_path_from_directory(plugin_folder_path, ignore_list_file, \"glob\"):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix == \".py\":\n detected_files.add(file_path.name)\n assert detected_files == should_not_ignore_files\n assert detected_files.isdisjoint(should_ignore_files)", "def _sift(self, fileslist, **arguments):\n\n def sort(reverse, arg, fileslist=fileslist):\n tdict = {fileslist[i][arg] : i for i in xrange(len(fileslist))}\n keys = tdict.keys()\n keys.sort(reverse=reverse)\n indexs = [tdict[i] for i in keys]\n fileslist = [fileslist[i] for i in indexs]\n return fileslist\n\n # for time\n if arguments.get('name'):\n reverse = None\n if arguments['name'] == 'reverse':\n reverse = True\n elif arguments['name'] == 'no_reverse':\n reverse = False\n fileslist = sort(reverse, 'server_filename')\n\n # for size\n if arguments.get('size'):\n reverse = None\n if arguments['size'] == 'reverse':\n reverse = True\n elif arguments['size'] == 'no_reverse':\n reverse = False\n fileslist = sort(reverse, 'size')\n\n # for size\n if arguments.get('time'):\n reverse = None\n if arguments['time'] == 'reverse':\n reverse = True\n elif arguments['time'] == 'no_reverse':\n reverse = False\n fileslist = sort(reverse, 'local_mtime')\n\n # for head, tail, include, exclude\n head = args.head\n tail = args.tail\n include = args.include\n exclude = args.exclude\n if head or tail or include or exclude:\n tdict = {fileslist[i]['server_filename'] : i for i in xrange(len(fileslist))}\n keys1 = [i for i in tdict.keys() if i.lower().startswith(head.encode('utf8').lower())] \\\n if head else []\n keys2 = [i for i in tdict.keys() if i.lower().endswith(tail.decode('utf8').lower())] \\\n if tail else []\n keys3 = [i for i in tdict.keys() if re.search(include, i.encode('utf8'), flags=re.I)] \\\n if include else []\n keys4 = [i for i in tdict.keys() if not re.search(exclude, i.encode('utf8'), flags=re.I)] \\\n if exclude else []\n\n # intersection\n keys = [i for i in [keys1, keys2, keys3, keys4] if i]\n if len(keys) > 1:\n tkeys = keys[0]\n for i in keys:\n tkeys &= i\n keys = tkeys\n elif len(keys) == 1:\n keys = keys[0]\n elif len(keys) == 0:\n keys = []\n\n indexs = [tdict[i] for i in keys]\n fileslist = [fileslist[i] for i in indexs]\n\n dirs = [i for i in fileslist if i['isdir']]\n files = [i for i in fileslist if not i['isdir']]\n if arguments.get('desc') == 1:\n dirs.reverse()\n files.reverse()\n fileslist = dirs + files\n\n return fileslist", "def get_files(metadata_dir, images_dir, image_format, metadata_format):\n all_metadata_files = [x for x in set(os.listdir(metadata_dir)) if x.endswith(metadata_format)]\n all_image_files = [x for x in set(os.listdir(images_dir)) if x.endswith(image_format)]\n images_and_metadata = {}\n for metadata, image in itertools.product(all_metadata_files, all_image_files):\n if image.split('.')[0] in metadata:\n images_and_metadata[metadata] = image\n return images_and_metadata", "def local_images(self, images):\n registries = self.known_docker_registries()\n found_images = []\n for image in images:\n # docker could have the image name as-is or prefixed with any registry\n imglist = [image] + [reg + \"/\" + image for reg in registries]\n if self.is_image_local(imglist):\n found_images.append(image)\n return found_images", "def file_checker():\n\n PATH_RELEASE1_IDEN = os.getcwd()+'/archive_all_2014-10/'\n PATH_RELEASE1_UNIDE = None\n #PATH_RELEASE1_UNIDE = os.getcwd()+'/archive_all_2014-10/'\n\n PATH_RELEASE2_IDEN = os.getcwd()+'/archive_all_2016-10/archive_identified_2016-10/'\n PATH_RELEASE2_UNIDE = os.getcwd() + '/archive_all_2016-10/archive_unidentified_2016-10/'\n\n\n #From here don't change anything.\n #This global function finds the .mgf files in paths\n list_of_files_release1_ide = glob.glob(PATH_RELEASE1_IDEN+'*.mgf')\n list_of_files_release1_unide = None #REMOVE THIS PART AND UNCOMMENT NEXT LINE IN NEXT RELEASES.\n\n #list_of_files_release1_unid = glob.glob(PATH_RELEASE1_UNID'+*.mgf')\n\n list_of_files_release2_ide = glob.glob(PATH_RELEASE2_IDEN+'*.mgf')\n list_of_files_release2_unide = glob.glob(PATH_RELEASE2_UNIDE+'*.mgf')\n\n\n #Check if exist cache folder. If not will make it. \n #RELEASE 1 \n if not os.path.exists(PATH_RELEASE1_IDEN+'cache'):\n os.makedirs(PATH_RELEASE1_IDEN+'cache')\n\n # if not os.path.exists(PATH_RELEASE1_UNIDE'+cache'):\n # os.makedirs(PATH_RELEASE1_UNIDE'+cache')\n\n #RELEASE2\n if not os.path.exists(PATH_RELEASE2_IDEN+'cache'):\n os.makedirs(PATH_RELEASE2_IDEN+'cache')\n\n if not os.path.exists(PATH_RELEASE2_UNIDE+'cache'):\n os.makedirs(PATH_RELEASE2_UNIDE+'cache')\n \n\n return PATH_RELEASE1_IDEN, \\\n PATH_RELEASE2_IDEN, \\\n PATH_RELEASE2_UNIDE, \\\n list_of_files_release1_ide, \\\n list_of_files_release2_ide, \\\n list_of_files_release2_unide", "def find_remotes(self):\n\n attrs = ['name', 'fetch', 'review']\n remotes = dict()\n\n for remote in self.tree.findall('remote'):\n values = [remote.get(attr) for attr in attrs]\n remote_dict = dict(zip(attrs, values))\n remote_name = remote_dict.pop('name')\n\n if remote_name is None or remote_dict['fetch'] is None:\n if self.fail_on_invalid:\n raise InvalidManifest(\n 'Remote entry missing \"name\" or \"fetch\" attribute'\n )\n else:\n continue\n\n if remote_name in remotes:\n raise InvalidManifest(\n 'Remote entry duplicates previous remote entry'\n )\n\n remotes[remote_name] = self.generate_data_dict(remote_dict)\n\n self.remotes = remotes", "def orphan_files(site, acting=False):\n return _get_files('orphans', site, acting)", "def getLocalFiles(self):\r\n\r\n for dirpath, dirnames, filenames in os.walk(self.dlLocation):\r\n for name in filenames:\r\n currentPath = os.path.join(dirpath, name)\r\n currentPath = re.sub('^\\w:', '', currentPath)\r\n currentPath = re.sub(r\"\\\\\",'/', currentPath)\r\n self.localStore.append(currentPath)\r\n\r\n return self.localStore", "def compare_FBS_results(fbs1, fbs2, ignore_metasources=False,\n compare_to_remote=False):\n import flowsa\n\n # load first file\n df1 = flowsa.getFlowBySector(fbs1,\n download_FBS_if_missing=compare_to_remote\n ).rename(columns={'FlowAmount': 'FlowAmount_fbs1'})\n df1 = replace_strings_with_NoneType(df1)\n # load second file\n if compare_to_remote:\n # Generate the FBS locally and then immediately load\n flowsa.flowbysector.main(method=fbs2,\n download_FBAs_if_missing=True)\n df2 = flowsa.getFlowBySector(fbs2).rename(\n columns={'FlowAmount': 'FlowAmount_fbs2'})\n df2 = replace_strings_with_NoneType(df2)\n # compare df\n merge_cols = list(df2.select_dtypes(include=[\n 'object', 'int']).columns)\n if ignore_metasources:\n for e in ['MetaSources', 'AttributionSources']:\n try:\n merge_cols.remove(e)\n except ValueError:\n pass\n # todo: remove merge_col edit once the added columns from DataVis branch\n # are pulled into master 12/1/22\n # ignore additional columns on merge if they do not exist in first\n # dataframe (version on Data commons if comparing to remote)\n merge_cols = [e for e in merge_cols if e in df1.columns]\n\n # aggregate dfs before merge - might have duplicate sectors due to\n # dropping metasources/attribution sources\n df1 = aggregator(df1[merge_cols + ['FlowAmount_fbs1']],\n groupbycols=list(df1.select_dtypes(include=[\n 'object', 'int']).columns),\n flowcolname='FlowAmount_fbs1')\n df2 = aggregator(df2[merge_cols + ['FlowAmount_fbs2']],\n groupbycols=list(df2.select_dtypes(include=[\n 'object', 'int']).columns),\n flowcolname='FlowAmount_fbs2')\n # check units\n compare_df_units(df1, df2)\n df_m = pd.merge(df1[merge_cols + ['FlowAmount_fbs1']],\n df2[merge_cols + ['FlowAmount_fbs2']],\n how='outer')\n df_m = df_m.assign(FlowAmount_diff=df_m['FlowAmount_fbs2']\n .fillna(0) - df_m['FlowAmount_fbs1'].fillna(0))\n df_m = df_m.assign(\n Percent_Diff=(df_m['FlowAmount_diff']/df_m['FlowAmount_fbs1']) * 100)\n df_m = df_m[df_m['FlowAmount_diff'].apply(\n lambda x: round(abs(x), 2) != 0)].reset_index(drop=True)\n # if no differences, print, if differences, provide df subset\n if len(df_m) == 0:\n vLog.debug('No differences between dataframes')\n else:\n vLog.debug('Differences exist between dataframes')\n df_m = df_m.sort_values(['Location', 'SectorProducedBy',\n 'SectorConsumedBy', 'Flowable',\n 'Context', ]).reset_index(drop=True)\n\n return df_m", "def list_of_medias_ext(args, sourcedir):\n result = list()\n listdir = sorted_listdir(os.listdir(sourcedir))\n if '.nomedia' not in listdir:\n for basename in listdir:\n fullname = os.path.join(sourcedir, basename)\n if os.path.isdir(fullname) and basename != '$RECYCLE.BIN' and contains_media(args, fullname):\n result.append(fullname)\n else:\n if is_media_within_dates(fullname, args.dates):\n result.append(fullname)\n return result", "def ExcludeFiles(filters, files):\n if not filters:\n return files\n match = set()\n for file_filter in filters:\n excludes = set(fnmatch.filter(files, file_filter))\n match |= excludes\n return [name for name in files if name not in match]", "def resolve_remote_files(config, local_dir, storage_client):\n\n with monitor_activity() as monitor:\n\n def _map_fn(value):\n if not isinstance(value, str) or not storage_client.is_managed_path(value):\n return value\n storage_id, remote_path = storage_client.parse_managed_path(value)\n if remote_path and remote_path[0] == \"/\":\n remote_path = remote_path[1:]\n local_path = os.path.join(local_dir, storage_id, remote_path)\n # can be a file or a directory\n storage_client.get(remote_path, local_path, storage_id=storage_id)\n monitor.notify()\n return local_path\n\n return _map_config_fn(config, _map_fn)", "def missing_files(site, acting=False):\n return _get_files('invalid', site, acting)", "def synch_present(cls, filter=None, *args):\n for dir in os.listdir():\n if os.path.isdir(dir):\n if filter and not re.search(filter, dir):\n logging.info('{} ignored; filtered by `{}`'.format(dir,\n filter))\n else:\n logging.info('{} exists; pulling changes'.format(dir))\n subprocess.run(['git', 'pull'], cwd=dir)", "def remote_paths(self) -> list:\r\n results: list = []\r\n\r\n if self.imports_node is not None:\r\n results.extend([node.text for node in filter(is_import_node, self.imports_node)\r\n if startswith(node.text, self.remote_schemas, ignorecase=True)])\r\n\r\n if self.folders_node is not None:\r\n results.extend([node.text for node in filter(is_folder_node, self.folders_node)\r\n if startswith(node.text, self.remote_schemas, ignorecase=True)])\r\n\r\n return results", "def remove_unused_files(self):\n\n response_list = self.client.api_call(\n f'files.list?'\n f'count=1000&'\n )\n assert response_list['ok']\n\n for file in [\n f for f in response_list['files']\n if not f['channels'] and not f['groups'] and not f['ims']\n ]:\n response_delete = self.client.api_call(\n f'files.delete?'\n f'file={file[\"id\"]}'\n )\n assert response_delete['ok']", "def test_retrieve_files_all(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_2)\n self.assertTrue(result_1)", "def remove_dup(files1, dict_3, files2):\n l1 = files1[:]\n for i in l1:\n if '/' not in i:\n if i in files2:\n files1.remove(i)\n del dict_3[i]\n return files1", "def _warn_about_dataset_files(files, dataset_gateway: IDatasetGateway):\n found = []\n for dataset in dataset_gateway.get_all_active_datasets():\n for src, dst in files.items():\n relative_src = get_relative_path(src, project_context.path)\n if not relative_src:\n continue\n\n found_file = dataset.find_file(relative_src)\n if not found_file:\n continue\n if not found_file.is_external and not is_subpath(dst, project_context.path / dataset.get_datadir()):\n found.append(str(src))\n\n if not found:\n return\n\n found_str = \"\\n\\t\".join(found)\n communication.confirm(\n msg=\"You are trying to move dataset files out of a datasets data directory. \"\n f\"These files will be removed from the source dataset:\\n\\t{found_str}\",\n abort=True,\n warning=True,\n )", "def _remove_files_dirs(self):\n if self.remove_remote_files_dirs:\n self._remove_remote_files_dirs()", "def missing_in_gn_by_file(self):\n return self._missing_gn_files", "def getFilesOnly(self,files):\n filesOnly = []\n for f in files:\n if not f['is_dir']:\n filesOnly.append(f)\n return filesOnly", "def test_find_many_files_zipped_no_allow(self):\n\n these_file_names = satellite_io.find_many_files(\n top_directory_name=TOP_DIRECTORY_NAME,\n first_date_string=FIRST_DATE_STRING,\n last_date_string=LAST_DATE_STRING,\n prefer_zipped=True, allow_other_format=False, test_mode=True\n )\n\n self.assertTrue(these_file_names == FILE_NAMES_ZIPPED)", "def _filter_files(file_dir: Union[str, Path], is_viya4: Optional[bool] = False) -> list:\n file_names = []\n file_names.extend(sorted(Path(file_dir).glob(\"*.json\")))\n if is_viya4:\n file_names.extend(sorted(Path(file_dir).glob(\"score_*.py\")))\n file_names.extend(sorted(Path(file_dir).glob(\"*.pickle\")))\n # Include H2O.ai MOJO files\n file_names.extend(sorted(Path(file_dir).glob(\"*.mojo\")))\n if file_names:\n return file_names\n else:\n raise FileNotFoundError(\n \"No valid model files were found in the provided file directory.\"\n )", "def ignore_from_repo(self, directory, ignore):\n for filename in os.listdir(directory):\n if not filename.endswith('.rpm'):\n continue\n _, basename = filename.split('-', 1)\n ignore.add(basename[:-4])", "def get_files(target_files, config):\n out = []\n find_fn = _find_file(config)\n for fname_in in target_files.keys():\n if isinstance(fname_in, (list, tuple)):\n fnames = fname_in\n else:\n fnames = fname_in.split(\";\")\n for fname in fnames:\n remote_fname = find_fn(fname)\n if remote_fname:\n if isinstance(remote_fname, (list, tuple)):\n out.extend(remote_fname)\n else:\n out.append(remote_fname)\n return out", "def files_in_folder(self):\n non_til = set()\n filesInFolder = []\n for f in self.find_all_files():\n newstr = f.replace(\"~\", \"\") \n if newstr in self.find_all_files():\n non_til.add(newstr)\n for fs in non_til:\n filesInFolder.append(fs)\n return filesInFolder", "def build_client_snapshot(self):\n self.client_snapshot = {}\n for dirpath, dirs, files in os.walk(self.cfg['sharing_path']):\n for filename in files:\n filepath = os.path.join(dirpath, filename)\n unwanted_file = False\n for r in Daemon.IGNORED_REGEX:\n if re.match(r, filepath) is not None:\n unwanted_file = True\n print 'Ignored Path:', filepath\n break\n if not unwanted_file:\n relative_path = self.relativize_path(filepath)\n with open(filepath, 'rb') as f:\n self.client_snapshot[relative_path] = ['', hashlib.md5(f.read()).hexdigest()]", "def load_remote_files(self,\n local_dir,\n remote_dir,\n files):\n # determine where the json files are\n s3_bucket = os.environ.get('S3_BUCKET', None)\n if not s3_bucket:\n self.logger.critical('handle: No S3_BUCKET environment variable found')\n return False\n \n bucket_name = s3_bucket.split('.')[0]\n self.logger.info('handle: Found S3_BUCKET environment variable {}'.format(bucket_name))\n local_dir = \"data\"\n remote_dir = \"\"\n s3_success = self.handle_s3(bucket_name,\n local_dir=local_dir,\n remote_dir=remote_dir,\n files=files)\n return s3_success", "def find_media_files(dirs, exclude_dirs=None):\n def condition(file_):\n return file_.isreg() and file_.check_regex(MEDIA_EXTENSIONS_REGEX)\n\n def precondition(file_):\n for dir_ in exclude_dirs:\n if dir_ in file_.path:\n return False\n return True\n\n if exclude_dirs is None:\n exclude_dirs = []\n result = []\n for dir_ in dirs:\n for file_ in pd.find.find(dir_, condition, precondition):\n m = re.match(TV_SHOW_REGEX, file_.path)\n if not m:\n print \"Skipped %s\" % file_.path\n continue\n tv_show, season, episode = m.groups()\n tv_show = tv_show.replace('.', ' ')\n result.append((tv_show, int(season), int(episode), file_.path))\n\n return result", "def monitor_check(monitor, config, file_list, event_list, display_event):\n global job_sets\n global active_transfers\n global transfer_list\n # if there are already three or more transfers in progress\n # hold off on starting any new ones until they complete\n if active_transfers >= 2:\n return\n event_list = push_event(event_list, \"Running check for remote files\")\n monitor.check()\n new_files = monitor.new_files\n patterns = config.get('global').get('output_patterns')\n for file_info in new_files:\n for folder, file_type in patterns.items():\n if file_type in file_info['filename']:\n file_info['type'] = folder\n break\n\n checked_new_files = []\n\n for new_file in new_files:\n file_type = new_file.get('type')\n if not file_type:\n event_list = push_event(event_list, \"Failed accessing remote directory, do you have access permissions?\")\n continue\n file_key = \"\"\n if file_type in ['ATM', 'MPAS_AM', 'MPAS_CICE', 'MPAS_RST']:\n file_key = filename_to_file_list_key(new_file['filename'])\n elif file_type == 'MPAS_CICE_IN':\n file_key = 'mpas-cice_in'\n elif file_type == 'MPAS_O_IN':\n file_key = 'mpas-o_in'\n elif file_type == 'STREAMS':\n file_key = 'streams.cice' if 'cice' in new_file['filename'] else 'streams.ocean'\n elif file_type == 'RPT':\n if 'ocn' in new_file['filename']:\n file_key = 'rpointer.ocn'\n elif 'atm' in new_file['filename']:\n file_key = 'rpointer.atm'\n else:\n continue\n try:\n status = file_list[file_type][file_key]\n except KeyError:\n continue\n if not status:\n continue\n if status == SetStatus.DATA_READY:\n local_path = os.path.join(\n config.get('global').get('data_cache_path'),\n new_file['type'],\n new_file['filename'].split('/')[-1])\n if not os.path.exists(local_path):\n checked_new_files.append(new_file)\n continue\n if not int(os.path.getsize(local_path)) == int(new_file['size']):\n os.remove(local_path)\n checked_new_files.append(new_file)\n if status == SetStatus.NO_DATA:\n checked_new_files.append(new_file)\n\n # if there are any new files\n if not checked_new_files:\n # print 'no new files'\n return\n else:\n # print pformat(checked_new_files)\n pass\n\n # find which year set the data belongs to\n frequencies = config.get('global').get('set_frequency')\n for file_info in checked_new_files:\n if file_info['type'] != 'ATM':\n continue\n for freq in frequencies:\n year_set = filename_to_year_set(file_info['filename'], freq)\n for job_set in job_sets:\n if job_set.set_number == year_set and job_set.status == SetStatus.NO_DATA:\n job_set.status = SetStatus.PARTIAL_DATA\n # Spawn jobs for that yearset\n job_set = add_jobs(job_set)\n\n t_config = config.get('transfer')\n g_config = config.get('global')\n m_config = config.get('monitor')\n\n transfer_config = {\n 'size': t_config.get('size'),\n 'file_list': checked_new_files,\n 'globus_username': t_config.get('globus_username'),\n 'globus_password': t_config.get('globus_password'),\n 'source_username': m_config.get('compute_username'),\n 'source_password': m_config.get('compute_password'),\n 'destination_username': t_config.get('processing_username'),\n 'destination_password': t_config.get('processing_password'),\n 'source_endpoint': t_config.get('source_endpoint'),\n 'destination_endpoint': t_config.get('destination_endpoint'),\n 'source_path': t_config.get('source_path'),\n 'destination_path': g_config.get('data_cache_path') + '/',\n 'recursive': 'False',\n 'pattern': config.get('global').get('output_patterns'),\n 'ncclimo_path': config.get('ncclimo').get('ncclimo_path')\n }\n\n # Check if the user is logged in, and all endpoints are active\n endpoints = [config['transfer']['source_endpoint'], config['transfer']['destination_endpoint']]\n client = get_client()\n for endpoint in endpoints:\n r = client.endpoint_autoactivate(endpoint, if_expires_in=3600)\n if r[\"code\"] == \"AutoActivationFailed\":\n display_event.set()\n sleep(3)\n while not setup_globus(endpoints):\n sleep(1)\n display_event.clear()\n diaplay_thread = threading.Thread(target=start_display, args=(config, display_event))\n diaplay_thread.start()\n \n transfer = Transfer(transfer_config, event_list)\n\n for item in transfer.config.get('file_list'):\n item_name = item['filename'].split('/').pop()\n item_type = item['type']\n if item_type in ['ATM', 'MPAS_AM']:\n file_key = filename_to_file_list_key(item_name)\n elif item_type == 'MPAS_CICE':\n file_key = 'mpas-cice_in'\n elif item_type == 'MPAS_O':\n file_key = 'mpas-o_in'\n elif item_type == 'MPAS_RST':\n file_key = '0002-01-01'\n elif item_type == 'RPT':\n file_key = 'rpointer.ocn' if 'ocn' in item_name else 'rpointer.atm'\n elif item_type == 'STREAMS':\n file_key == 'streams.cice' if 'cice' in item_name else 'streams.ocean'\n file_list[item_type][file_key] = SetStatus.IN_TRANSIT\n\n start_file = transfer.config.get('file_list')[0]['filename']\n end_file = transfer.config.get('file_list')[-1]['filename']\n index = start_file.find('-')\n start_readable = start_file[index - 4: index + 3]\n index = end_file.find('-')\n end_readable = end_file[index - 4: index + 3]\n message = 'Found {0} new remote files, creating transfer job from {1} to {2}'.format(\n len(checked_new_files),\n start_readable,\n end_readable)\n event_list = push_event(event_list, message)\n logging.info('## ' + message)\n\n if not config.get('global').get('dry_run', False):\n while True:\n try:\n thread = threading.Thread(target=handle_transfer, args=(transfer, checked_new_files, thread_kill_event, event_list))\n except:\n sleep(1)\n else:\n thread_list.append(thread)\n thread.start()\n break", "def _find_matching_unfinished_file(\n self,\n bucket_id,\n file_name,\n file_info,\n emerge_parts_dict,\n encryption: EncryptionSetting,\n file_retention: FileRetentionSetting | None = None,\n legal_hold: LegalHold | None = None,\n custom_upload_timestamp: int | None = None,\n cache_control: str | None = None,\n check_file_info_without_large_file_sha1: bool | None = False,\n eager_mode: bool | None = False,\n ):\n\n file_retention = file_retention or NO_RETENTION_FILE_SETTING\n best_match_file = None\n best_match_parts = {}\n best_match_parts_len = 0\n\n for file_ in self.services.large_file.list_unfinished_large_files(\n bucket_id, prefix=file_name\n ):\n if file_.file_name != file_name:\n logger.debug('Rejecting %s: file name mismatch', file_.file_id)\n continue\n\n if file_.file_info != file_info:\n if check_file_info_without_large_file_sha1:\n file_info_without_large_file_sha1 = self._get_file_info_without_large_file_sha1(\n file_info\n )\n if file_info_without_large_file_sha1 != self._get_file_info_without_large_file_sha1(\n file_.file_info\n ):\n logger.debug(\n 'Rejecting %s: file info mismatch after dropping `large_file_sha1`',\n file_.file_id\n )\n continue\n else:\n logger.debug('Rejecting %s: file info mismatch', file_.file_id)\n continue\n\n if encryption is not None and encryption != file_.encryption:\n logger.debug('Rejecting %s: encryption mismatch', file_.file_id)\n continue\n\n if cache_control is not None and cache_control != file_.cache_control:\n logger.debug('Rejecting %s: cacheControl mismatch', file_.file_id)\n continue\n\n if legal_hold is None:\n if LegalHold.UNSET != file_.legal_hold:\n logger.debug('Rejecting %s: legal hold mismatch (not unset)', file_.file_id)\n continue\n elif legal_hold != file_.legal_hold:\n logger.debug('Rejecting %s: legal hold mismatch', file_.file_id)\n continue\n\n if file_retention != file_.file_retention:\n logger.debug('Rejecting %s: retention mismatch', file_.file_id)\n continue\n\n if custom_upload_timestamp is not None and file_.upload_timestamp != custom_upload_timestamp:\n logger.debug('Rejecting %s: custom_upload_timestamp mismatch', file_.file_id)\n continue\n\n finished_parts = {}\n\n for part in self.services.large_file.list_parts(file_.file_id):\n\n emerge_part = emerge_parts_dict.get(part.part_number)\n\n if emerge_part is None:\n # something is wrong - we have a part that we don't know about\n # so we can't resume this upload\n logger.debug(\n 'Rejecting %s: part %s not found in emerge parts, giving up.',\n file_.file_id, part.part_number\n )\n finished_parts = None\n break\n\n # Compare part sizes\n if emerge_part.get_length() != part.content_length:\n logger.debug(\n 'Rejecting %s: part %s size mismatch', file_.file_id, part.part_number\n )\n continue # part size doesn't match - so we reupload\n\n # Compare part hashes\n if emerge_part.is_hashable() and emerge_part.get_sha1() != part.content_sha1:\n logger.debug(\n 'Rejecting %s: part %s sha1 mismatch', file_.file_id, part.part_number\n )\n continue # part.sha1 doesn't match - so we reupload\n\n finished_parts[part.part_number] = part\n\n if finished_parts is None:\n continue\n\n finished_parts_len = len(finished_parts)\n\n if finished_parts and (\n best_match_file is None or finished_parts_len > best_match_parts_len\n ):\n best_match_file = file_\n best_match_parts = finished_parts\n best_match_parts_len = finished_parts_len\n\n if eager_mode and best_match_file is not None:\n break\n\n return best_match_file, best_match_parts", "def filter_images(self, images):\n status = self.day_or_night(images[0][1],\n self.gray_refs['day'][0],\n self.gray_refs['night'][0])\n print status\n exclusions = self.gray_refs[status]\n threshold = 0.7\n last_ref = None\n result = []\n\n for filename, gray_img, raw_img in images:\n skip = False\n if last_ref:\n dist = ssim(gray_img, exclusions[last_ref], multichannel=False)\n if dist > threshold:\n skip = True\n\n if not skip:\n for i, gray_ref in enumerate(exclusions):\n if i == last_ref:\n continue\n dist = ssim(gray_img, gray_ref, multichannel=False)\n if dist > threshold:\n skip = True\n last_ref = i\n break\n\n if not skip:\n if (time.time() - self.last_notify) > notify_thresh:\n send_alert('Alert! Motion detected near front door.')\n self.last_notify = time.time()\n result.append((filename, gray_img, raw_img))\n return result", "def find_remote_files(remote_path, type, ssh):\n (ssh_in, ssh_out, ssh_err) = ssh.exec_command(\"find %s -name \\\"*\\\" -type %s\" % (remote_path, type))\n files = []\n for file in ssh_out.readlines():\n files.append(file.rstrip())\n return files", "def local_images(self, images, task_vars):\n return [\n image for image in images\n if self.is_image_local(image, task_vars)\n ]" ]
[ "0.6446468", "0.59767133", "0.5730664", "0.5725296", "0.5656467", "0.5532549", "0.55086225", "0.55084115", "0.5492199", "0.5476868", "0.54651487", "0.5445001", "0.5409476", "0.54082286", "0.53627706", "0.53226095", "0.5322206", "0.53167725", "0.5312311", "0.52992505", "0.52837795", "0.52400154", "0.5230779", "0.52141875", "0.52138233", "0.52003235", "0.5195438", "0.51881725", "0.5178879", "0.51775926", "0.5140811", "0.51281816", "0.5127684", "0.5115935", "0.5111049", "0.51080793", "0.50831467", "0.50811666", "0.50599575", "0.50558704", "0.50401413", "0.5012482", "0.50063664", "0.49945682", "0.49939317", "0.49894705", "0.4987397", "0.49836314", "0.49815875", "0.49803016", "0.49737445", "0.49485344", "0.4930201", "0.4925224", "0.49154964", "0.4914237", "0.49040043", "0.489386", "0.48740676", "0.48686704", "0.48646078", "0.48619413", "0.48512655", "0.48492187", "0.48310316", "0.48282725", "0.48257798", "0.48207307", "0.48173505", "0.4815537", "0.48092428", "0.4807557", "0.4806719", "0.48042518", "0.48028105", "0.48018393", "0.48011655", "0.48008418", "0.48007324", "0.47999278", "0.47995168", "0.4797797", "0.4788066", "0.47853675", "0.47792476", "0.4777926", "0.4776856", "0.47761586", "0.47760335", "0.47705916", "0.47695836", "0.47682682", "0.47676322", "0.47673613", "0.4760099", "0.4748139", "0.47464395", "0.4739428", "0.47385755", "0.4736908" ]
0.74489295
0
Is the filename tracked in the remote metadata dict. The file may be not even locally tracked yet
def _is_tracked(filename, metadata): current_local_sha = local_metadata.get(filename, None) current_remote_sha = metadata.get(filename, None) return current_local_sha is not None \ and current_remote_sha is not None \ and current_local_sha == current_remote_sha
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_remote_cached(cls, target_filename):\n is_cached = None\n cache = cls.CACHE_BACKEND()\n for file_name, file_id in cache.search():\n if file_name == os.path.basename(target_filename):\n is_cached = file_id\n logger.debug('File %r already cached at %r', target_filename, cls.CACHE_BACKEND)\n break\n return is_cached", "def is_local(self):\n try:\n return os.path.isfile(self.get_absolute_path())\n except ValueError:\n logger.error(\"'%s' is not a file\", self.get_absolute_path())\n except TypeError: # no datafile available or file does not exist\n pass\n return False", "def _fileinfo_has_changed(self, metadata_filename, new_fileinfo):\n \n # If there is no fileinfo currently stored for 'metadata_filename',\n # try to load the file, calculate the fileinfo, and store it.\n if metadata_filename not in self.fileinfo:\n self._update_fileinfo(metadata_filename)\n\n # Return true if there is no fileinfo for 'metadata_filename'.\n # 'metadata_filename' is not in the 'self.fileinfo' store\n # and it doesn't exist in the 'current' metadata location.\n if self.fileinfo.get(metadata_filename) is None:\n return True\n\n current_fileinfo = self.fileinfo[metadata_filename]\n\n if current_fileinfo['length'] != new_fileinfo['length']:\n return True\n\n # Now compare hashes. Note that the reason we can't just do a simple\n # equality check on the fileinfo dicts is that we want to support the\n # case where the hash algorithms listed in the metadata have changed\n # without having that result in considering all files as needing to be\n # updated, or not all hash algorithms listed can be calculated on the\n # specific client.\n for algorithm, hash_value in new_fileinfo['hashes'].items():\n # We're only looking for a single match. This isn't a security\n # check, we just want to prevent unnecessary downloads.\n if hash_value == current_fileinfo['hashes'][algorithm]:\n return False\n\n return True", "def has_metadata(self):\n if self.mimetype in Config.mimes_metadata:\n return True\n return False", "def isBasedInHiddenFile(self):\n #type: () -> Optional[bool]\n return (\n None if self.realFileName is None #if before\n else self.realFileName != self.fileName\n )", "def has_filename(self):\n if self.filename == \"untitled\":\n return False\n else:\n return True", "def has_local_tails_file(self) -> bool:\n tails_file_path = Path(self.get_receiving_tails_local_path())\n return tails_file_path.is_file()", "def exists(self):\n\n return os.path.exists(self[\"~filename\"])", "def local(self):\r\n return self._url.scheme in ('', 'file')", "def remote(self):\n return self.getItunesAttribute('Track Type') == 'Remote'", "def check_for_file(self):\n if self.task.file_name in os.listdir(self.task.file_storage):\n return True\n return False", "def file_exists(self):\n if os.path.isfile(self.file_name):\n return True\n else:\n return False", "def file_present(self,imagefile=None):\n import hashlib\n if self.filesize()==0:\n return False # empty files are never present\n if imagefile==None:\n imagefile=self.imagefile # use this one\n for hashname in ['md5','sha1']:\n oldhash = self.tag(hashname)\n if oldhash:\n newhash = hashlib.new(hashname,self.contents(imagefile=imagefile)).hexdigest()\n return oldhash==newhash\n raise ValueError,\"Cannot process file \"+self.filename()+\": no hash in \"+str(self)", "def has_file(self, name):\n return bool(self.input(name).__class__.__name__ == 'cgi_FieldStorage')", "def is_local_file(string):\n assert isinstance(string, basestring)\n return os.path.isfile(string)", "def is_new_file(self):\n return self.filename is None", "def is_remote(path: Text) -> bool:\n\n # TODO(Alex): add check for another remote storages (s3, ...) when they will be supported\n if path.startswith('gs://'):\n return True\n\n return False", "def is_local(self):\n if not \"COLLABORATIVE\" in self._file.upper():\n LOGGER.debug(['AIE4606', 'match_false'], {'file': self._file})\n return True\n else:\n LOGGER.debug(['AIE4607', 'match_true'], {'file': self._file})\n return False\n return self._is_local", "def has_file(self) -> bool:\n return self._file is not None", "def is_present(self):\n return self.file_is_present()", "def _dist_has_meta_data(dist: pkg_resources.Distribution) -> bool:\n return dist.has_metadata('direct_url.json')", "def is_file_exists(self):\n pass", "def test_get_file_exists_caching_with_raw_url(self):\n repository = self.remote_repository\n\n self.spy_on(repository._get_file_exists_uncached,\n op=kgb.SpyOpReturn(True))\n\n # Use spy to put key into cache\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Remove spy to ensure key is still in cache without needing spy\n repository._get_file_exists_uncached.unspy()\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Does not exist when raw_file_url changed because it is not cached.\n repository.raw_file_url = \\\n 'http://github.com/api/v2/yaml/blob/show/reviewboard/<revision>'\n\n self.assertFalse(repository.get_file_exists('PATH', 'd7e96b3'))", "def isfile(self):\n return os.path.isfile(self.path)", "def names_singleton(self):\r\n if self.stream:\r\n return True\r\n else:\r\n return os.path.isfile(self.object_name)", "def fileProcessed(self,fileInstance):\n if hasattr(fileInstance,\"name\"): name=fileInstance.name\n elif hasattr(fileInstance,\"url\"): name=fileInstance.url\n if name in self.emptyFileFlag: return self.emptyFileFlag[name]\n else: return False", "def in_file(self):\n return self.on_disk and not self.in_cached_file", "def exists(self):\n result = super().exists()\n if result:\n logger.debug(\"Found local file or directory %s\", self.path)\n else:\n logger.warning(\"Cannot find local file or directory %s\", self.path)\n return result", "def __check_metadata(s3client, key, bucket_name):\n response = s3client.head_object(Bucket=bucket_name, Key=key)\n if 'status' in response['Metadata']:\n return response['Metadata']['status'] == 'uploaded'\n return False", "def has_file(self, name):\n return name in self.files", "def file_exists(file_ref, config):\n find_fn = _find_file(config)\n if _is_remote(file_ref):\n _, file_ref = _get_id_fname(file_ref)\n return find_fn(file_ref)", "def has_checksum_file(self):\n return self.checksum_file_path.is_file()", "def is_file(self):\n\n url_path = self.url.split('/')\n if re.match(r\".+\\.\\w+\", url_path[-1]):\n # Find <file_name>.<extension>\n return True\n return False", "def isCached(filename, hash):\r\n path = cachePath(filename)\r\n if not os.path.exists(path):\r\n return False\r\n \r\n return hash == hashlib.sha1(open(path, 'rb').read()).hexdigest()", "def exists(self):\n return self.path.is_file()", "def DoesModifiedExist(name):\n if os.path.exists(GetModifiedFilename(name)):\n return True\n else:\n return False", "def exists(self, _uri):\n #print(\"%s %s\"%(_uri))\n\n\n #-------------------- \n # Query logged files before checking\n #-------------------- \n if (os.path.basename(_uri) in self.fileDict):\n return True\n\n\n\n #-------------------- \n # Clean string\n #-------------------- \n xnatUrl = Xnat.path.makeXnatUrl(self.host, _uri)\n parentDir = Xnat.path.getUriAt(xnatUrl, 'files')\n for i in self.__getJson(parentDir):\n if os.path.basename(xnatUrl) in i['Name']:\n return True \n return False", "def sniff( self, filename ):\r\n try:\r\n json.load( open(filename) )\r\n return True\r\n except Exception:\r\n return False", "def file_exists(self):\r\n if os.path.exists(self.file_path):\r\n return True\r\n else:\r\n return False", "def exist(self):\n return self.file_path.exists()", "def file_is_modified(filename, lastupdate):\n now = datetime.datetime.utcnow()\n update = file_get_mdatetime(filename)\n return now >= update and update >= lastupdate", "def has_file_key(self, key):\n return self.fileList.has_key( key )", "def _is_downloaded(self):\n return self._system.file_exists(self._tar_name)", "def is_discord_file(obj):\n return (obj.__class__.__name__) == \"File\"", "def file_is_present(self, key=None):\n return os.path.isfile(self.file_path(key))", "def object_exists(self, fname):\n return True", "def exists(self, filename):\n return filename in self.files", "def exists(self, name):\n # django 判断文件名是否可用\n return False # 代表就是可用的新文件", "def is_prepared(self):\n return os.path.exists(os.path.join(self.location, INFO_NM))", "def is_declaring_file(self, address, file_path):", "def is_hash_locally_cached(self, ipfs_hash: str, ipfs_refs_local=None) -> bool:\n output = run([\"ipfs\", \"files\", \"stat\", \"--with-local\", \"--size\", f\"/ipfs/{ipfs_hash}\"])\n if \"(100.00%)\" in output:\n log(\"already fully cached\", \"green\")\n log(output)\n return True\n else:\n log(\"not fully cached\", \"red\")\n log(output)\n return False", "def is_file(self):\n return self.type == \"file\"", "def object_exists(self, fname):\n return False", "def is_data_by_filename(fname):\n return \"Run2017\" in fname", "def _is_remote_file_different(local_file, remote_file, ftp_connection, fatal_if_nonexistant=False, local_must_be_newer=False):\n # Check for an error, if the error is that the file does not exist. By default, if the remote file does not exist,\n # assume that means that it needs to be uploaded. However, if fatal_if_nonexistant is True, then raise an exception.\n try:\n remote_size, remote_mtime = _remote_file_size_modtime(ftp_connection, remote_file)\n except error_perm: # I'm assuming that error_perm is only raised if the file doesn't exist, which is probably incorrect, but I have no way to test if you don't have permission to access the file\n if not fatal_if_nonexistant:\n return False\n else:\n raise\n\n local_size, local_mtime = _local_file_size_modtime(local_file)\n # We need to remove the sub-second components of the local mtime, because it is not required of the FTP MDTM command\n # that we use to get the remote time that it include smaller time resolution than seconds.\n local_mtime = local_mtime.replace(microsecond=0)\n \n if local_must_be_newer:\n return local_mtime > remote_mtime or local_size != remote_size\n else:\n return local_mtime != remote_mtime or local_size != remote_size", "def check_file_existence(self, filename):\n try:\n for sample in TimeoutingSampler(\n config.GAHOOKS_TIMEOUT, 1, self.machine.fs.exists,\n \"/tmp/%s\" % filename\n ):\n if sample:\n return True\n except APITimeout:\n return False", "def needs_update(self, *path):\n dt_fmt = \"%Y-%m-%d %H:%M:%S\"\n try:\n linfo = self.info(*path)\n dt_local = datetime.datetime.strptime(\n linfo[\"datetime\"][:19], dt_fmt)\n dt_server = datetime.datetime.strptime(\n self.serverfiles.info(*path)[\"datetime\"][:19], dt_fmt)\n return dt_server > dt_local\n except FileNotFoundError:\n return True\n except KeyError:\n return True", "def exists(self):\n log.warning('Could not determine whether %s exists due to unhandled scheme.', self.file_name)", "def isFile( self, path ):\n res = self.__checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n gLogger.debug( \"DIPStorage.isFile: Attempting to determine whether %s paths are files.\" % len( urls ) )\n serviceClient = RPCClient( self.url )\n for url in urls:\n res = serviceClient.getMetadata( url )\n if res['OK']:\n if res['Value']['Exists']:\n if res['Value']['Type'] == 'File':\n gLogger.debug( \"DIPStorage.isFile: Successfully obtained metadata for %s.\" % url )\n successful[url] = True\n else:\n successful[url] = False\n else:\n failed[url] = 'File does not exist'\n else:\n gLogger.error( \"DIPStorage.isFile: Failed to get metdata for %s.\" % url, res['Message'] )\n failed[url] = res['Message']\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )", "def check_file(self, path, approve_if_no_dbhash=False):\r\n if self.mod.filehash:\r\n h = create_filehash(path)\r\n return h == self.mod.filehash\r\n return approve_if_no_dbhash", "def file_exists(msl_data_path, filename):\n return os.path.isfile(msl_data_path + filename)", "def testRemote(self):\n try:\n remoteLocator = self.__httpsFileUrl\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n ok = self.__fileU.exists(remoteLocator)\n self.assertTrue(ok)\n size = self.__fileU.size(remoteLocator)\n self.assertGreaterEqual(size, 1000)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def save_file(self, username, filename) -> bool:\r\n file_id = self.get_file_id(username, filename)\r\n file_path = self.users_dir / username / filename\r\n return (file_id in self.patch_history) and self.try_save_file(\r\n file_path, self.patch_history[file_id])", "def file_exists(self):\n return os.path.exists(self._fileName)", "def valid(self):\n return (self.get(\"~#mtime\", 0) and\n self[\"~#mtime\"] == util.mtime(self[\"~filename\"]))", "def _update_fileinfo(self, metadata_filename):\n \n # In case we delayed loading the metadata and didn't do it in\n # __init__ (such as with delegated metadata), then get the file\n # info now.\n \n # Save the path to the current metadata file for 'metadata_filename'.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n # If the path is invalid, simply return and leave fileinfo unset.\n if not os.path.exists(current_filepath):\n self.fileinfo[current_filepath] = None\n return\n \n # Extract the file information from the actual file and save it\n # to the fileinfo store.\n file_length, hashes = tuf.util.get_file_details(current_filepath)\n metadata_fileinfo = tuf.formats.make_fileinfo(file_length, hashes)\n self.fileinfo[metadata_filename] = metadata_fileinfo", "def exists_and_is_more_recent(cont, filename, mtime):\n if not os.path.exists(filename):\n return False\n\n mtimes_dir = cont.named_cache_dir(\"mtimes\")\n digest = os.path.join(mtimes_dir,\n hashlib.md5(filename.encode(\"utf-8\")).hexdigest())\n fetched_mtime = fetch_mtime_from(digest)\n\n if fetched_mtime > mtime:\n return True\n elif fetched_mtime == 0:\n # No mtime was stored on disk, get filesystem mtime and store\n # it for caching later. We don't usually use the filesystem\n # mtime since it isn't tar safe.\n store_file_mtime_in(filename, digest)\n if os.stat(filename).st_mtime > mtime:\n return True\n\n return False", "def check_image_local(self, tag):\n tags = self.get_tags()\n return (tag in tags)", "def test_files_present(self, changes_file):\n for filename in changes_file.get_files():\n log.debug('Looking whether %s was actually uploaded' % filename)\n if os.path.isfile(os.path.join(pylons.config['debexpo.upload.incoming'], filename)):\n log.debug('%s is present' % filename)\n else:\n log.critical('%s is not present; importing cannot continue' % filename)\n raise OSError(\"Missing file %s in incoming\" % (filename))\n\n return True", "def file_exists(filename: str):\n if osp.exists(filename) is True:\n return True\n else:\n return False", "def checkFileInObject(self, obj, fp):\n\n containted = False\n ## Haso of all files\n filehash = self.getObjectFilesHash(obj)\n\n ## Local hash\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n\n with fp.open('rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n localhash = hasher.hexdigest()\n\n if localhash.upper() in filehash:\n containted = True\n\n return containted", "def get_mock_status(filename):\n return filename", "def _is_always_unsatisfied(self):\n # If this is a github sha tarball, then it is always unsatisfied\n # because the url has a commit sha in it and not the version\n # number.\n url = self._req.url\n if url:\n filename = filename_from_url(url)\n if filename.endswith(ARCHIVE_EXTENSIONS):\n filename, ext = splitext(filename)\n if is_git_sha(filename):\n return True\n return False", "def efile_exists(self):\n return os.path.isfile(self.efile)", "def object_exists(self, fname):\n return self.object_exists", "def file_downloaded(filename):\n fc = pathlib.Path(filename)\n if fc.is_file():\n return True\n else:\n return False", "def is_file(filename):\n return os.path.isfile(filename)", "def has_source_file( self ):\n return self._source_file is not None", "def has(self, key):\n return os.path.isfile(self._filename(key))", "def get_metadata_file(self, file_in_cache):\n return re.sub(r'\\.tar$', '.json', file_in_cache)", "def isfile (self, path):\r\n pass", "def log_file_exist(self, file_path_name):\n return os.path.isfile(file_path_name)", "async def async_is_playing_new_track(self):\n if self._playing_mediabrowser and self._media_source_uri is not None:\n # don't trigger new track flag for local mediabrowser files\n return False\n \n if self._icecast_name != None:\n import unicodedata\n artmed = unicodedata.normalize('NFKD', str(self._media_artist) + str(self._media_title)).lower()\n artmedd = u\"\".join([c for c in artmed if not unicodedata.combining(c)])\n if artmedd.find(self._icecast_name.lower()) != -1 or artmedd.find(self._source.lower()) != -1:\n # don't trigger new track flag for icecast streams where track name contains station name or source name; save some energy by not quering last.fm with this\n self._media_image_url = None\n return False\n\n if self._media_artist != self._media_prev_artist or self._media_title != self._media_prev_title:\n return True\n else:\n return False", "def test_file_managed_keep_source_false_http(\n file, tmp_path, remote_grail_scene33, modules\n):\n name = str(tmp_path / \"testfile\")\n # Run the state\n ret = file.managed(\n name=name,\n source=remote_grail_scene33.url,\n source_hash=remote_grail_scene33.hash,\n keep_source=False,\n )\n assert ret.result is True\n\n # Now make sure that the file is not cached\n ret = modules.cp.is_cached(remote_grail_scene33.url)\n assert not ret, \"File is still cached at {}\".format(ret)", "def verify_fields(msg: dict, data_path: str) -> bool:\n if \"metadata\" in msg and \"filename\" in msg:\n if os.path.isfile(data_path + msg[\"filename\"]):\n return True\n return False", "def valid_tpkg_file(self, path):\n\n\t\tprint(self.config[\"daemon\"][\"rootdir\"] + path)\n\t\tif os.path.exists(self.config[\"daemon\"][\"rootdir\"] + \"/\" + path):\n\t\t\treturn self.fetch_remote_hashcode(path) == self.fetch_local_hashcode(path)\n\t\telse:\n\t\t\tprint(\"Package: \" + path + \" has not been downloaded.\");\n\t\treturn False", "def download_needed(self, response, outfile, quiet=True):\r\n try:\r\n remote_date = datetime.strptime(response.headers['Last-Modified'],\r\n '%a, %d %b %Y %X %Z')\r\n if isfile(outfile):\r\n local_date = datetime.fromtimestamp(os.path.getmtime(outfile))\r\n if remote_date <= local_date:\r\n if not quiet:\r\n print(\r\n os.path.basename(outfile) +\r\n ': Skipping, found more recently modified local '\r\n 'copy (use --force to force download)')\r\n return False\r\n except:\r\n pass\r\n return True", "def is_already_ignored(file_rel, ignore_filename):\n with open(ignore_filename) as ignore_file:\n for line in ignore_file:\n l = line.strip()\n if l == file_rel:\n return True\n return False", "def _download_metadata(track_id, dataset_version):\n metadata_path = os.path.join(METADATA_PATH, _METADATA_FMT % track_id)\n if os.path.exists(metadata_path):\n return True\n\n try:\n top_folderid = GDRIVE_FOLDERS[dataset_version]\n except KeyError:\n raise IOError(\"Unable to find data in Google Drive for this version.\")\n\n file_list = get_named_child(top_folderid, track_id)\n correct_file = [f for f in file_list if f['title'] == track_id]\n\n if len(correct_file) == 0:\n raise IOError(\"Could not find multitrack\")\n else:\n mtrack_file = correct_file[0]\n\n metadata_file_list = get_named_child(mtrack_file['id'], 'METADATA')\n if len(metadata_file_list) > 0:\n metadata_file = metadata_file_list[0]\n else:\n folder_file_list = get_files_in_folder(mtrack_file['id'])\n print(len(folder_file_list))\n for fobject in folder_file_list:\n print(fobject['title'])\n raise IOError(\"Could not find Metadata\")\n\n download_file(metadata_file['id'], metadata_path)\n\n DOWNLOADED_FILEPATHS.append(metadata_path)\n\n return True", "def has_changed(self):\n timestamp = os.stat(self.filename).st_mtime\n if timestamp > self.last_timestamp:\n self.last_timestamp = timestamp\n return True\n return False", "def has_cached(self,ourmod,etag=None):\n if \"If-Modified-Since\" in self.request.headers:\n hdr = self.request.headers[\"If-Modified-Since\"]\n theirmod =time.mktime(parsedate(hdr))\n return theirmod < ourmod\n elif \"If-None-Match\" in self.request.headers and etag is not None:\n return self.request.headers[\"ETag\"] == etag", "def file_exist(file_url):\n try:\n response = requests.head(file_url)\n if 200 <= response.status_code < 300:\n return True\n return False\n except ConnectionError:\n return False", "def is_local(path: str) -> str:\n import os\n\n URL = path\n if os.path.exists(path) or path.startswith(\"file\"):\n if not URL.startswith(\"file\"):\n URL = f\"file://{URL}\"\n return URL", "def is_multi_file(self):\n return 'files' in self.torrent['info']", "def check_file_exist(self):\n return False", "def _verfiy_upload(self, file_local_path, file_id):\n local_sha1 = hash_utils.calc_file_sha1_hex_str(file_local_path)\n metadata = self._get_file_metadata(file_id)\n\n if 'sha1Hash' in metadata['file']['hashes']:\n if metadata['file']['hashes']['sha1Hash'].lower() != local_sha1:\n # Hashes don't match, delete the file on the server\n self.delete_item_by_id(file_id)\n logger.error('Checksums after upload of file {} to OneDrive didn\\'t match, '\n 'deleted the file on the server.'.format(file_local_path))\n return False\n\n return True", "def is_file_ingested(self, original_name, tablename):\n prep_stmt = self.session.prepare(\n 'SELECT * FROM {0} WHERE {1}=?'.format(tablename, COLUMNS_META[2])\n )\n bound = prep_stmt.bind([original_name])\n results = self.session.execute(bound)\n return True if len(results.current_rows) > 0 else False", "def _fetch_current_local_metadata():\n if not os.path.exists(LOCAL_METADATA_FILE):\n return {}\n\n with open(LOCAL_METADATA_FILE) as f:\n return json.loads(f.read())", "def file_exist() -> bool:\n pass", "def is_remote(self):\n return False" ]
[ "0.7122534", "0.6666199", "0.6614642", "0.6477976", "0.6251149", "0.6231352", "0.61951226", "0.6183894", "0.6110177", "0.6092684", "0.6037549", "0.6031686", "0.60248667", "0.6003988", "0.598465", "0.59665877", "0.59620124", "0.594347", "0.5927977", "0.58981687", "0.58882433", "0.5848515", "0.5846766", "0.5826577", "0.582648", "0.5804992", "0.5803461", "0.58014953", "0.57973117", "0.57946914", "0.57750154", "0.57611865", "0.5756652", "0.5740863", "0.5737669", "0.57277083", "0.5720412", "0.57026947", "0.5686912", "0.5685688", "0.56608725", "0.5659844", "0.5657703", "0.56538993", "0.5653157", "0.56408554", "0.5627712", "0.56178796", "0.5613505", "0.5602196", "0.55974245", "0.55972505", "0.55901974", "0.55856013", "0.5585467", "0.55822134", "0.556803", "0.55639243", "0.5550187", "0.55489033", "0.55457824", "0.55382735", "0.5514633", "0.5510226", "0.55062014", "0.55030966", "0.5491364", "0.54902303", "0.54884726", "0.5481123", "0.5478452", "0.5476163", "0.54714656", "0.54694325", "0.54632205", "0.54619807", "0.54442704", "0.5443928", "0.5443086", "0.54301554", "0.54111266", "0.5410291", "0.540877", "0.5405634", "0.53976214", "0.53926975", "0.5392491", "0.53911996", "0.53886616", "0.53800607", "0.53726536", "0.5370419", "0.5366592", "0.5356712", "0.5356214", "0.53560174", "0.53545606", "0.5353288", "0.53519875", "0.53504443" ]
0.811574
0
Is the filename inside any of the IGNORE_DIRS list
def _is_inside_ignored_dir(filename): ignore_dirs = ['./' + x for x in IGNORE_DIRS] return any([filename.startswith(x) for x in ignore_dirs])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ignore(ignored_dirs, path):\n return any([normpath(path).startswith(ignore_dir) for ignore_dir in ignored_dirs])", "def ignore(directory):\n for pattern in ignore_patterns:\n if pattern in directory:\n return True\n\n return False", "def _is_ignored(self, full_path):\n for ignor in self._ignored:\n if fnmatch.fnmatch(full_path, \"*/\" + ignor):\n return True\n return False", "def ignore_patterns(self, relpath):\n names = relpath.split('/')\n for name in names:\n for pattern in self.ignore:\n if fnmatch.fnmatch(name, pattern):\n return True\n return False", "def is_dir_ignored_file(file_name, cfg):\n if file_name:\n for pattern in cfg.options.dir_ignored_files:\n if fnmatch.fnmatch(file_name, pattern):\n return True\n return False", "def check_if_file_is_ignored(file_path):\n path_parts = file_path.split('/')\n\n for part in path_parts:\n if part in INGORED_PATHS:\n return True", "def _should_skip_file(path):\n for pattern in IGNORE_PATTERN_LIST:\n if pattern in path:\n return True\n\n return False", "def is_ignored(file, ignored):\n return any(i in PurePath(path.abspath(file)).parts for i in ignored)", "def dirname_filter ( self, dirname, _fnmatch=fnmatch.fnmatch ):\n return all (\n not _fnmatch ( dirname, pat ) for pat in self.DIRNAMES_IGNORE\n )", "def dir_excluded(path):\n\tname = os.path.basename(path)\n\t# skip any dirs which start with . (dot) and in EXCLUDED_DIRS\n\tif name.startswith('.') and u'.*' in EXCLUDED_DIRS:\n\t\treturn True\n\t# skip any dirs in EXCLUDED_DIRS\n\tif name in EXCLUDED_DIRS or path in EXCLUDED_DIRS:\n\t\treturn True\n\t# skip any dirs that are found in reg exp checks including wildcard searches\n\tfound_dir = False\n\tfound_path = False\n\tfor d in EXCLUDED_DIRS:\n\t\tif d == '.*':\n\t\t\tcontinue\n\t\tif d.startswith('*') and d.endswith('*'):\n\t\t\td = d.replace('*', '')\n\t\t\tif re.search(d, name):\n\t\t\t\tfound_dir = True\n\t\t\t\tbreak\n\t\t\telif re.search(d, path):\n\t\t\t\tfound_path = True\n\t\t\t\tbreak\n\t\telif d.startswith('*'):\n\t\t\td = d + '$'\n\t\t\tif re.search(d, name):\n\t\t\t\tfound_dir = True\n\t\t\t\tbreak\n\t\t\telif re.search(d, path):\n\t\t\t\tfound_path = True\n\t\t\t\tbreak\n\t\telif d.endswith('*'):\n\t\t\td = '^' + d\n\t\t\tif re.search(d, name):\n\t\t\t\tfound_dir = True\n\t\t\t\tbreak\n\t\t\telif re.search(d, path):\n\t\t\t\tfound_path = True\n\t\t\t\tbreak\n\t\telse:\n\t\t\tif d == name:\n\t\t\t\tfound_dir = True\n\t\t\t\tbreak\n\t\t\telif d == path:\n\t\t\t\tfound_path = True\n\t\t\t\tbreak\n\n\tif found_dir or found_path:\n\t\treturn True\n\n\treturn False", "def included(path):\n if path.endswith(Env.IGNORED_TEST_DIRS):\n return False\n return path.endswith('.py') or os.path.isdir(path)", "def _ignore(path):\n return any(re.match(pattern, path) for pattern in ignore)", "def test_file_paths(self, site):\n \n if site.home_page.contains_any_pattern(\n ['/etc/designs/','/libs/cq/', '/libs/wcm/', '/content/dam/']\n ):\n return 1\n else:\n return 0", "def FilterDirectory(dirpath, filenames):\n if not dirpath or not filenames:\n return False\n for no_crawl_dir in NO_CRAWL_DIRS:\n if no_crawl_dir in dirpath:\n return False\n return True", "def should_ignore_path(path):\n for p in config.compiled_ignore_patterns:\n if p.match(path):\n return True\n return False", "def exclude_filter(path):\n for ignore in IGNORE:\n if fnmatch(path, osp.join(SRC, ignore)): # in ignore list\n return True\n else:\n if osp.isdir(path) or osp.splitext(path)[1] != '.md':\n return False\n with open(path) as f:\n firstline = f.readline()\n return firstline.startswith('```{include}') # duplicate file", "def test_find_not_should_ignore_path_regexp(self, tmp_path):\n plugin_folder_path = populate_dir(tmp_path)\n\n detected_files = set()\n should_ignore_files = {\n \"test_notload.py\",\n \"test_notload_sub.py\",\n \"test_noneload_sub1.py\",\n \"test_shouldignore.py\",\n \".airflowignore_glob\",\n }\n should_not_ignore_files = {\n \"test_load.py\",\n \"test_load_sub1.py\",\n }\n ignore_list_file = \".airflowignore\"\n for file_path in find_path_from_directory(plugin_folder_path, ignore_list_file):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix == \".py\":\n detected_files.add(file_path.name)\n assert detected_files == should_not_ignore_files\n assert detected_files.isdisjoint(should_ignore_files)", "def is_in_directory(f):\n f = os.path.dirname(f) + os.path.sep\n return any(f.startswith(d) for d in dirs_to_group)", "def _is_in_excluded_patterns(self, path):\n for excluded_pattern in self._excluded_patterns:\n expandeduser_pattern = os.path.expanduser(excluded_pattern)\n if (path + '/').find(expandeduser_pattern) != -1:\n return True\n return False", "def folder_filter(folder_name):\n excluded_folders = get_setting('excluded_folders', [])\n folder_name = folder_name.rstrip(os.sep) + os.sep\n return True not in [exc in folder_name for exc in excluded_folders]", "def contains_files(self):\n if self.file_list is None:\n self._set_file_list()\n for individual_file in self.file_list:\n if not os.path.exists(os.path.join(self.base_dir, individual_file)):\n return False\n return True", "def path_excluded(self,path):\n\t\tfor pattern in self.excludes['file_exclude']:\n\t\t\tif pattern in path:\n\t\t\t\t#print \" \u001b[41mExcluding:\u001b[m\",path\n\t\t\t\treturn True\n\t\treturn False", "def ignore_path(path, ignore_list=None, whitelist=None):\n if ignore_list is None:\n return True\n\n should_ignore = matches_glob_list(path, ignore_list)\n if whitelist is None:\n return should_ignore\n\n return should_ignore and not matches_glob_list(path, whitelist)", "def skip(*filenames):\r\n for filename in filenames:\r\n if not os.path.isfile(filename):\r\n return False\r\n return True", "def test_find_not_should_ignore_path_glob(self, tmp_path):\n plugin_folder_path = populate_dir(tmp_path)\n\n detected_files = set()\n should_ignore_files = {\n \"test_notload.py\",\n \"test_notload_sub.py\",\n \"test_noneload_sub1.py\",\n \"test_shouldignore.py\",\n }\n should_not_ignore_files = {\n \"test_load.py\",\n \"test_load_sub1.py\",\n }\n ignore_list_file = \".airflowignore_glob\"\n for file_path in find_path_from_directory(plugin_folder_path, ignore_list_file, \"glob\"):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix == \".py\":\n detected_files.add(file_path.name)\n assert detected_files == should_not_ignore_files\n assert detected_files.isdisjoint(should_ignore_files)", "def _include_directory(self, root_parts):\n # include root\n if len(root_parts) == 0:\n return True\n\n # don't include lwc tests\n if root_parts[0] == \"lwc\" and any(part.startswith(\"__\") for part in root_parts):\n return False\n\n # include everything else\n return True", "def in_folder(self):\n return len(os.path.split(self.file_path)) > 1", "def blocks(self, dirs):\n return any([d in self.directories for d in dirs])", "def checkForFile(self, filename:str):\n\t\tfor item in os.listdir(self.getPath()):\n\t\t\tif filename in item:\n\t\t\t\treturn True\n\t\treturn False", "def dir_filter(item):\n return not item.startswith(\"_\")", "def predicate(path):\n p = os.path.abspath(path)\n return any(p == d or p.startswith(d + os.path.sep)\n for d in directories)", "def _split_exists( file_list, target_locus ):\n for filename in file_list:\n basename = filename.split('.')[0]\n parts = basename.split('_')\n if parts[-1] in ['5p', '3p'] and parts[-2] == target_locus:\n return True\n return False", "def has_leading_dir(paths):\n common_prefix = None\n for path in paths:\n prefix, rest = split_leading_dir(path)\n if not prefix:\n return False\n elif common_prefix is None:\n common_prefix = prefix\n elif prefix != common_prefix:\n return False\n return True", "def ignore_certain_metainf_files(filename):\n ignore = (\"META-INF/manifest.mf\",\n \"META-INF/*.sf\",\n \"META-INF/*.rsa\",\n \"META-INF/*.dsa\",\n \"META-INF/ids.json\")\n\n for glob in ignore:\n # Explicitly match against all upper case to prevent the kind of\n # runtime errors that lead to https://bugzil.la/1169574\n if fnmatch.fnmatchcase(filename.upper(), glob.upper()):\n return True\n return False", "def paths_exist(path_list):\n valid = True\n for path in path_list:\n if path and not os.path.exists(path):\n log(\"WARNING: The path %s does not exist!\" % path)\n valid = False\n return valid", "def __is_directory_name(filename):\n return filename[-1] == '/'", "def contains_dir_path(file_name: str) -> bool:\n return os.path.sep in file_name", "def dir_filter(x):\n return os.path.isdir('logs/{}'.format(x))", "def exclude_file(dirname, filename, excluded_files):\n rel_path = os.path.join(dirname, filename)\n for f in excluded_files:\n if not f:\n continue\n elif os.path.isfile(f) and rel_path == f:\n return True\n elif os.path.isdir(f) and f in rel_path:\n return True\n return False", "def is_excluded(root, excludes):\n sep = path.sep\n if not root.endswith(sep):\n root += sep\n for exclude in excludes:\n if root.startswith(exclude):\n return True\n return False", "def _supports_make_dirs(path):\n prefixes = [\"/bigstore\", \"gs://\"]\n return not any(path.startswith(prefix) for prefix in prefixes)", "def checkIfFileExistsInPossibleLocations(testConfig):\n assert \"name\" in testConfig\n assert \"file\" in testConfig\n assert \"file_locations\" in testConfig\n testPass = False\n for filePath in testConfig[\"file_locations\"]:\n if isfile(join(filePath,testConfig[\"file\"])):\n testPass=True\n \n assert testPass,\"Failure for package \"+testConfig[\"name\"]+\"\\n File: \"+\\\n testConfig[\"file\"]+\" does not exist\"+\"\\nSearched in \"+\\\n str(testConfig[\"file_locations\"])", "def test_ignores(self, tmpdir):\n from pytest_flake8 import Ignorer\n ignores = [\"E203\", \"b/?.py E204 W205\", \"z.py ALL\", \"*.py E300\"]\n ign = Ignorer(ignores)\n assert ign(tmpdir.join(\"a/b/x.py\")) == \"E203 E204 W205 E300\".split()\n assert ign(tmpdir.join(\"a/y.py\")) == \"E203 E300\".split()\n assert ign(tmpdir.join(\"a/z.py\")) is None", "def ignore_from_repo(self, directory, ignore):\n for filename in os.listdir(directory):\n if not filename.endswith('.rpm'):\n continue\n _, basename = filename.split('-', 1)\n ignore.add(basename[:-4])", "def check_configfiles():\n return (all(os.path.isdir(x) for x in CONFIG_DIRS) and\n os.path.isfile(CONFIG_FILE) and os.path.isfile(LOG_CONFIG_FILE))", "def ignore(self, directory, files):\n ignore_list = []\n ignores = ('build', 'var')\n build = os.path.join(directory, 'build')\n var = os.path.join(directory, 'var')\n for filename in files:\n full_path = os.path.join(directory, filename)\n if full_path.startswith(build):\n ignore_list.append(filename)\n if full_path.startswith(var):\n ignore_list.append(filename)\n\n return ignore_list", "def enforce_exclusion(folder_name, verbosity):\n exclusion_folder_start = [\".\", \"_\"] # skip folders that start with any of these characters\n if any([str(PurePath(folder_name)).startswith(each) for each in exclusion_folder_start]):\n if verbosity:\n show_verbose_output(verbosity, folder_name, \" starts with one of \", exclusion_folder_start, \" skipping\\n\")\n return True\n return False", "def isDir(self, fname):\n\t\tif fname in self.getAllDirs():\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True", "def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True", "def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True", "def output_files_exist(self):\n return all([split.exists() for split in self.split_files])", "def __contains__(self, proj_dir):\n for exclusion_path in self._scope:\n if hasattr(exclusion_path, 'match'):\n if exclusion_path.match(proj_dir):\n return True\n elif fnmatch.fnmatch(proj_dir, exclusion_path):\n return True\n return False", "def _check_file_not_used(self):\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(\n set(self._get_xml_referenced_files())\n )\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [\n f for f in (module_files - referenced_files)\n if f.split(os.path.sep)[0] not in excluded_dirs\n ]\n self.msg_args = no_referenced_files\n return not no_referenced_files", "def _any_file_exists(rootDir: str, filenames: List[str]) -> str:\n\n for file in filenames:\n fullFilepath = join(rootDir, file)\n if isfile(fullFilepath):\n return fullFilepath\n return None", "def exists(self):\n basedir = os.path.dirname(self.path)\n\n for filename in self.files:\n path = os.path.join(basedir, filename)\n if not os.path.exists(path):\n return False\n\n return True", "def is_dir(self, path):", "def contains(\n path: str,\n required: list\n ) -> bool:\n for file in path.iterdir():\n try:\n required.remove(file.name)\n except ValueError:\n pass\n\n if required:\n return False\n else:\n return True", "def _verify_prefix(prefix, files):\n for f in files:\n f = os.path.join(prefix, f)\n if not os.path.exists(f):\n return False\n else:\n return True", "def test_matches__directories_only(self):\n path_rule1 = gitignore_parser(\"z/?u*ns/\")[0]\n \"This is a directories only rule\"\n self.assertTrue(\n path_rule1.directories_only\n )\n \"And it matches as it should be\"\n self.assertTrue(\n path_rule1.matches(\n CPath(\"z/humans/\")\n )\n )\n\n path_rule2 = gitignore_parser(\"z/?uman\")[0]\n \"This is NOT a directories only rule\"\n self.assertFalse(\n path_rule2.directories_only\n )\n \"But it matches as it should be\"\n self.assertTrue(\n path_rule2.matches(CPath(\"z/human\"))\n )\n \"It matches both filesCpath (above) and directories (below)\"\n self.assertTrue(\n path_rule2.matches(CPath(\"z/human/\"))\n )", "def dir_exists(self, path):\n return self._dir_model.get_by_name(name=path) != []", "def is_ignored(string: str) -> bool:\n return any([fnmatch.fnmatch(string, pattern) for pattern in IGNORE_LIST])", "def _path_exclude(self,path):\n\t\t#exclusions = [\t'10A103_Milky_Way_DA_NY/flame_settings_BU',\n\t\t#\t\t\t'/2010_archive/conformFS/p6/0'\n\t\t#\t\t\t]\n\t\t#for exc in exclusions:\n\t\t#\tif exc in path:\n\t\t#\t\treturn True\n\t\treturn False", "def filename_filter ( self, filename, _fnmatch=fnmatch.fnmatch ):\n return all (\n not _fnmatch ( filename, pat ) for pat in self.FILENAMES_IGNORE\n )", "def _include_path(self, path, extensions=None):\r\n if extensions is None:\r\n extensions = tuple(self.readers.extensions)\r\n basename = os.path.basename(path)\r\n\r\n #check IGNORE_FILES\r\n ignores = self.settings['IGNORE_FILES']\r\n if any(fnmatch.fnmatch(basename, ignore) for ignore in ignores):\r\n return False\r\n\r\n if extensions is False or basename.endswith(extensions):\r\n return True\r\n return False", "def __is_dir(path):\n if path[-2:] == \"..\":\n return False\n try:\n os.listdir(path)\n return True\n except OSError:\n return False", "def test_check_exclude_none(self):\n\n self.assertTrue(DirExclude([]).check(self.file_gitignore))\n self.assertTrue(DirExclude([]).check(self.file_perceval))\n self.assertTrue(DirExclude([]).check(self.file_authors))\n self.assertTrue(DirExclude([]).check(self.file_tests))\n self.assertTrue(DirExclude([]).check(self.file_bin))", "def top_level_directories(self):\n return [d for d in self.directories if len([x for x in self.directories if x in d]) == 1]", "def Whitelisted(path):\n return os.path.basename(path) == 'OWNERS'", "def IsValidPath(path):\n path = path.lower()\n if any(path.endswith(extension) for extension in EXCLUDED_EXTENSIONS):\n return False\n\n segments = path.split('/')\n filename = segments[-1]\n if filename.startswith('.') or filename in EXCLUDED_FILENAMES:\n return False\n\n dirs = segments[:-1]\n # allow META-INF/services at the root to support ServiceLoader\n if dirs[:2] == ['meta-inf', 'services']:\n return True\n\n return not any(dir in EXCLUDED_DIRECTORIES for dir in dirs)", "def is_skip_reason_other(notebook):\n directories = Path(notebook).parents\n\n if str(notebook) in SKIP_LIST:\n return True\n elif any([str(directory) in SKIP_LIST for directory in directories]):\n return True\n return False", "def contains_dir(target_dir: str) -> bool:\n for dir_path, dir_names, files in os.walk(os.getcwd()):\n for name in dir_names:\n if name == target_dir:\n return True\n\n return False", "def empty_dir(value):\n return not os.listdir(value)", "def check_file_list(any_format,ignore_warnings=True, warnings=[]):\n import os\n from glob import glob\n folders = [any_format]\n if os.is_file(any_format):\n with open(any_format) as f: folders = [_f for _f in f]\n if any_format[-1]==\"*\": folders = list(glob(any_format))\n valid_folders = []\n for f in folders:\n c = context.folder_context(f)\n if not c.meta_key[\"valid\"]: warnings.append(f)\n else: valid_folders.append(c.meta_key)\n \n #list the goods and the bads by simply listing the info for the folder but then say warnings in tabs if req\n \n if ignore_warnings == False and len(warnings) > 0:\n #some folders cannot be processed. Do you want to continue(y) or quit(n)?\n pass\n \n return valid_folders", "def _match_all(abs_dir, matching, not_matching):\n num_not_matching = 0\n\n for expression in matching:\n if not fnmatch.fnmatch(abs_dir, expression):\n num_not_matching += 1\n\n if num_not_matching == len(matching):\n return False\n\n for expression in not_matching:\n if fnmatch.fnmatch(abs_dir, expression):\n return False\n\n return True", "def remove_if_ignored(file_name):\n for pattern in IGNORE_PATTERNS:\n if re.match(pattern, file_name):\n remove(file_name)\n return True\n if exists(file_name):\n with open(file_name) as f:\n if IGNORE_FLAG in f.read():\n remove(file_name)\n return True\n return False", "def check_files(self, data_path):\n files = os.listdir(data_path)\n\n if 'test_batch' not in files:\n return False\n\n if 'batches.meta' not in files:\n return False\n\n for i in range(1, 6):\n if 'data_batch_{}'.format(i) not in files:\n return False\n\n return True", "def insignificant(path):\n\n # This part is simply an implementation detail for the code base that the\n # script was developed against. Ideally this would be moved out to a config\n # file.\n return path.endswith('Dll.H') or path.endswith('Forward.H') or \\\n path.endswith('templates.H')", "def tracked(path):\n return not any(fnmatch(part, pattern) for pattern in untracked for part in path.split(os.sep))", "def gitignored(self, path):\n if path.startswith(self.options.target_repo.location):\n repo_prefix_len = len(self.options.target_repo.location) + 1\n path = path[repo_prefix_len:]\n return self.gitignore.match_file(path)", "def checkIfImport():\n instance_ipath, product_ipath = getImportedPathes()\n product_ilist = [i for i in os.listdir(product_ipath) \\\n if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]\n if product_ilist:\n return 1\n return 0", "def parse_dirs(self):\n valid_subdirs = []\n if isinstance(self.dirs, list):\n for dir in self.dirs:\n for root, dirs, paths in os.walk(dir):\n valid_subdirs += [os.path.join(root, dir) for dir in dirs\n if os.path.isfile(os.path.join(root, dir, 'image.npy'))\n and os.path.isfile(os.path.join(root, dir, 'mask.npy'))]\n\n else:\n for root, dirs, paths in os.walk(self.dirs):\n valid_subdirs += [os.path.join(root, dir) for dir in dirs\n if os.path.isfile(os.path.join(root, dir, 'image.npy'))\n and os.path.isfile(os.path.join(root, dir, 'mask.npy'))]\n return valid_subdirs", "def isFilePresent(fileName):\n global dataFolder,blackListFiles\n allDataFiles = [f for f in listdir(dataFolder) if (isfile(join(dataFolder, f)) and f.endswith('.zip'))]\n return fileName in allDataFiles and not (fileName in blackListFiles)", "def create_href_checker(pattern, working_dir):\n file_list = os.listdir(working_dir)\n def check_href(href):\n \"\"\"Return whether a url is vlaid or not\"\"\"\n if bool(pattern.match(href)):\n if os.path.basename(urlparse.urlparse(href).path) not in file_list:\n return True\n return False\n return check_href", "def files_exist(self):\n\n passed = []\n warned = []\n failed = []\n ignored = []\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n #: test autodoc\n try:\n _, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is '<pipeline>'.\")\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n\n files_fail = [\n [\".gitattributes\"],\n [\".gitignore\"],\n [\".nf-core.yml\"],\n [\".editorconfig\"],\n [\".prettierignore\"],\n [\".prettierrc.yml\"],\n [\"CHANGELOG.md\"],\n [\"CITATIONS.md\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [\"nextflow_schema.json\"],\n [\"nextflow.config\"],\n [\"README.md\"],\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"ci.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"conf\", \"modules.config\")],\n [os.path.join(\"conf\", \"test.config\")],\n [os.path.join(\"conf\", \"test_full.config\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"output.md\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"docs\", \"usage.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n [os.path.join(\"lib\", \"Utils.groovy\")],\n [os.path.join(\"lib\", \"WorkflowMain.groovy\")],\n ]\n\n files_warn = [\n [\"main.nf\"],\n [os.path.join(\"assets\", \"multiqc_config.yml\")],\n [os.path.join(\"conf\", \"base.config\")],\n [os.path.join(\"conf\", \"igenomes.config\")],\n [os.path.join(\".github\", \"workflows\", \"awstest.yml\")],\n [os.path.join(\".github\", \"workflows\", \"awsfulltest.yml\")],\n [os.path.join(\"lib\", f\"Workflow{short_name[0].upper()}{short_name[1:]}.groovy\")],\n [\"modules.json\"],\n [\"pyproject.toml\"],\n ]\n\n # List of strings. Fails / warns if any of the strings exist.\n files_fail_ifexists = [\n \"Singularity\",\n \"parameters.settings.json\",\n \".nf-core.yaml\", # yml not yaml\n os.path.join(\"bin\", \"markdown_to_html.r\"),\n os.path.join(\"conf\", \"aws.config\"),\n os.path.join(\".github\", \"workflows\", \"push_dockerhub.yml\"),\n os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.md\"),\n os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.md\"),\n os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo.png\"),\n \".markdownlint.yml\",\n \".yamllint.yml\",\n os.path.join(\"lib\", \"Checks.groovy\"),\n os.path.join(\"lib\", \"Completion.groovy\"),\n os.path.join(\"lib\", \"Workflow.groovy\"),\n ]\n files_warn_ifexists = [\".travis.yml\"]\n\n # Remove files that should be ignored according to the linting config\n ignore_files = self.lint_config.get(\"files_exist\", [])\n\n def pf(file_path):\n return os.path.join(self.wf_path, file_path)\n\n # First - critical files. Check that this is actually a Nextflow pipeline\n if not os.path.isfile(pf(\"nextflow.config\")) and not os.path.isfile(pf(\"main.nf\")):\n failed.append(\"File not found: nextflow.config or main.nf\")\n raise AssertionError(\"Neither nextflow.config or main.nf found! Is this a Nextflow pipeline?\")\n\n # Files that cause an error if they don't exist\n for files in files_fail:\n if any([f in ignore_files for f in files]):\n continue\n if any([os.path.isfile(pf(f)) for f in files]):\n passed.append(f\"File found: {self._wrap_quotes(files)}\")\n else:\n failed.append(f\"File not found: {self._wrap_quotes(files)}\")\n\n # Files that cause a warning if they don't exist\n for files in files_warn:\n if any([f in ignore_files for f in files]):\n continue\n if any([os.path.isfile(pf(f)) for f in files]):\n passed.append(f\"File found: {self._wrap_quotes(files)}\")\n else:\n warned.append(f\"File not found: {self._wrap_quotes(files)}\")\n\n # Files that cause an error if they exist\n for file in files_fail_ifexists:\n if file in ignore_files:\n continue\n if os.path.isfile(pf(file)):\n failed.append(f\"File must be removed: {self._wrap_quotes(file)}\")\n else:\n passed.append(f\"File not found check: {self._wrap_quotes(file)}\")\n\n # Files that cause a warning if they exist\n for file in files_warn_ifexists:\n if file in ignore_files:\n continue\n if os.path.isfile(pf(file)):\n warned.append(f\"File should be removed: {self._wrap_quotes(file)}\")\n else:\n passed.append(f\"File not found check: {self._wrap_quotes(file)}\")\n\n # Files that are ignoed\n for file in ignore_files:\n ignored.append(f\"File is ignored: {self._wrap_quotes(file)}\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed, \"ignored\": ignored}", "def is_file_excluded(self, file_path: Union[str, os.PathLike]) -> bool:\n # TODO: current design of ignore file can't distinguish between files and directories of the same name\n if self._path_spec is None:\n self._path_spec = self._create_pathspec()\n if not self._path_spec:\n return False\n file_path = self._get_rel_path(file_path)\n if file_path is None:\n return True\n\n norm_file = normalize_file(file_path)\n matched = False\n for pattern in self._path_spec:\n if pattern.include is not None:\n if pattern.match_file(norm_file) is not None:\n matched = pattern.include\n\n return matched", "def check_paths( self ):\n check_a = utility_code.checkDirectoryExistence( self.PATH_TO_SOURCE_FILE_DIRECTORY )\n check_b = utility_code.checkDirectoryExistence( self.PATH_TO_ARCHIVES_ORIGINALS_DIRECTORY )\n check_c = utility_code.checkDirectoryExistence( self.PATH_TO_ARCHIVES_PARSED_DIRECTORY )\n check_d = utility_code.checkDirectoryExistence( self.PATH_TO_PARSED_ANNEX_DATA_DIRECTORY )\n check_e = utility_code.checkDirectoryExistence( self.PATH_TO_PARSED_ANNEX_COUNT_DIRECTORY )\n if check_a == 'exists' and check_b == 'exists' and check_c == 'exists' and check_d == 'exists' and check_e == 'exists':\n log.debug( 'path check passed' )\n else:\n message='path check failed; quitting'\n log.error( message )\n sys.exit( message )\n return", "def _dir_empty(path):\n try:\n next(os.scandir(str(path)))\n except StopIteration:\n return True\n return False", "def is_directory(path_name):\n if not is_file(path_name):\n return True\n else:\n return False", "def ignore_pattern_filter(self, path):\n\n # Return False for the checksum file.\n if self.checksum_file_path.exists():\n if self.checksum_file_path.samefile(path):\n return False\n\n # Return True of no patterns where given.\n if self.ignore_patterns is None:\n return True\n\n # Loop through all patterns.\n for pattern in self.ignore_patterns:\n # Return False if the path matches an ignore pattern.\n if fnmatch.fnmatch(path.name, pattern):\n return False\n\n # If it reached this point, this means that it doesn't match any of the patterns,\n # so return True.\n return True", "def _check_missing_files_in_folder(self, expected_list_of_files):\n missing_files = [\n file_name for file_name in expected_list_of_files if self.folder_path / file_name not in self._ome_tif_files\n ]\n assert (\n not missing_files\n ), f\"Some of the TIF image files at '{self.folder_path}' are missing. The list of files that are missing: {missing_files}\"", "def should_skip(name, skip_list):\n if _arg_no_skip:\n return False\n\n \"\"\" Hardcoded skip, shell pattern match \"\"\"\n matched = [x for x in skip_list if fnmatch.fnmatch(name, x)]\n if len(matched) > 0:\n return True\n\n matched = [x for x in _extra_skip if fnmatch.fnmatch(name, x)]\n return len(matched) > 0", "def test_only_files(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n need_result = ['meme1.jpg',\n 'meme2.png',\n 'meme4.jpg',\n 'meme4.png',\n 'meme monty python',\n ]\n need_result_new = [os.path.join(dummy_folder, 'memes', x) for x in need_result[:-1]]\n result = listdir(os.path.join(dummy_folder, 'memes'), only_files=True)\n self.assertEqual(sorted(result), sorted(need_result_new))\n\n need_result_new = [os.path.join(dummy_folder, 'memes', x) for x in need_result]\n result = listdir(os.path.join(dummy_folder, 'memes'), only_files=False)\n self.assertEqual(sorted(result), sorted(need_result_new))", "def files_in( d ):\n return [ join(d,f) for f in os.listdir(d) if isfile(join(d,f)) ]", "def get_ignored_dirs(ci_ignore_path):\n with open(ci_ignore_path, 'r') as ignore_file:\n return set([\n normpath(line.strip())\n for line in ignore_file.readlines()\n if not line.startswith('#') and not is_blank(line)\n ])", "def matches_path(cls, path):\n return path.startswith('/') or \\\n path.startswith('./') or \\\n path.startswith('../') or \\\n path.startswith('file://')", "def allow_patterns(*patterns):\n\n def _ignore_patterns(path, names):\n\n files_only = [\n name for name in names if not os.path.isdir(os.path.join(path, name))\n ]\n\n allowed_files = []\n for pattern in patterns:\n allowed_files.extend(fnmatch.filter(files_only, pattern))\n\n ignore_others = set(files_only) - set(allowed_files)\n return ignore_others\n\n return _ignore_patterns", "def is_in_folder(base_path, directory):\n return op.normpath(directory).startswith(base_path)", "def _IsIgnoredFileType(filename):\n for extension in _IGNORE_FILETYPES_FOR_MINIDUMP_PULLS:\n if filename.endswith(extension):\n return True\n return False", "def _IsIgnoredFileType(filename):\n for extension in _IGNORE_FILETYPES_FOR_MINIDUMP_PULLS:\n if filename.endswith(extension):\n return True\n return False" ]
[ "0.7432789", "0.742647", "0.7269846", "0.7173609", "0.7124053", "0.6995675", "0.69607145", "0.6938394", "0.69038445", "0.68344337", "0.6772172", "0.6730275", "0.6686163", "0.65734845", "0.6516576", "0.6510022", "0.6423632", "0.6392135", "0.637968", "0.6378461", "0.63330704", "0.6315168", "0.6250827", "0.6184499", "0.6183902", "0.61769605", "0.61649156", "0.61556274", "0.6104933", "0.6100495", "0.60961825", "0.6012341", "0.60107285", "0.59716624", "0.5965673", "0.5963919", "0.59134233", "0.5906232", "0.5891688", "0.5881657", "0.58801174", "0.5847072", "0.5832236", "0.58256525", "0.5813977", "0.5804551", "0.5801508", "0.57973367", "0.5796899", "0.5796899", "0.5796899", "0.5795074", "0.5793044", "0.5785419", "0.57724863", "0.57692647", "0.5768082", "0.57618505", "0.5743858", "0.5734644", "0.5724532", "0.57177055", "0.571031", "0.5701448", "0.5696682", "0.56908315", "0.56854886", "0.56839377", "0.5681443", "0.5680511", "0.56724364", "0.56691706", "0.5637779", "0.56176525", "0.5616012", "0.5582848", "0.5566003", "0.55647963", "0.5563712", "0.5558927", "0.5555048", "0.5553884", "0.55506146", "0.5549003", "0.5541537", "0.5535935", "0.5529722", "0.5529007", "0.5526614", "0.5524693", "0.55220497", "0.5504531", "0.54958105", "0.5485053", "0.5483529", "0.54772687", "0.5471654", "0.5470962", "0.54609406", "0.54609406" ]
0.81426775
0
Walks through all the subfolders in static_root, and uploads everything valid found to S3. If Gzip is enabled, also tries to compress and upload the compressed version of the static asset.
def upload_all_to_s3(static_root): conn = _get_connection() files = _get_file_list(static_root) _build_local_metadata_file(files, home=static_root) local_metadata = _fetch_current_local_metadata() remote_metadata = _fetch_current_remote_metadata(conn) files_to_upload = _filter_file_list(files, local_metadata, remote_metadata) start_time = time.time() print 'Upload start: Landing in BUCKET_NAME: %s' % BUCKET_NAME for f in files_to_upload: #Upload to Bucket upload_file(conn, os.path.join(static_root, f), f) #Upload Gzip css/js version if gzip is enabled can_be_gzipped = _file_can_be_compressed(os.path.join(static_root, f)) if GZIP_ENABLED and can_be_gzipped: upload_file(conn, os.path.join(static_root, f), f, gzip=True) #Extra files if EXTRA_FILES: print 'Now, uploading extra files outside public/static' for filename_local, filename_s3 in EXTRA_FILES.items(): upload_file(conn, filename_local, filename_s3) end_time = time.time() print 'Upload finished: \ Time elapsed: %s s' % round(end_time - start_time, 3) # refresh metadata file on the server print 'Uploading local metadata file' upload_file(conn, LOCAL_METADATA_FILE, REMOTE_METADATA_FILE) print 'Uploading process DONE'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def s3_sync(s3_bucket, s3_prefix, sync_path=\".\"):\n # Get bucket\n s3_resource = boto3.resource(\"s3\")\n bucket = s3_resource.Bucket(s3_bucket)\n\n # Walk paths and subdirectories, uploading files\n for path, subdirs, files in os.walk(sync_path):\n # Get relative path prefix\n relpath = os.path.relpath(path, sync_path)\n if not relpath.startswith('.'):\n prefix = os.path.join(s3_prefix, relpath)\n else:\n prefix = s3_prefix\n\n for file in files:\n file_key = os.path.join(prefix, file)\n bucket.upload_file(os.path.join(path, file), file_key)", "def upload_child_objects(self, local_dir_path, s3_dir_path, recursive=False, fn_pattern=None):\n child_objects = [os.path.join(local_dir_path, f) for f in os.listdir(local_dir_path)]\n child_files = [f for f in child_objects if os.path.isfile(f)]\n child_dirs = [f for f in child_objects if os.path.isdir(f)]\n\n for child_file in child_files:\n if not fn_pattern or fnmatch.fnmatch(child_file, fn_pattern):\n s3_object_path = os.path.join(s3_dir_path, os.path.basename(child_file))\n logging.debug(\"Uploading \\\"{}\\\" to \\\"{}\\\"\".format(child_file, s3_object_path))\n self.upload_object(child_file, s3_object_path)\n\n if recursive:\n for child_dir_local in child_dirs:\n child_dir_s3 = os.path.join(s3_dir_path, os.path.basename(child_dir_local))\n self.upload_child_objects(child_dir_local, child_dir_s3, recursive, fn_pattern)", "def copy_static_resources(self):\n if not hasattr(settings, 'STATIC_ROOT'):\n raise MissingStaticRoot()\n destination = os.path.join(STORAGE_PATH, 'static')\n if os.path.exists(destination):\n shutil.rmtree(destination)\n shutil.copytree(settings.STATIC_ROOT, destination)", "def cp_static_files(self,inpath,outpath): \n if inpath==self.static_dir:\n dest=os.path.join(outpath,os.path.basename(inpath))\n if os.path.exists(dest):\n logger.warning('Remove old static folder')\n shutil.rmtree(dest) #not efficient. Should do it incrementaly...\n logger.info('cp_static_files %s -> %s' %(inpath,dest))\n copyfiles(inpath,dest) \n else:\n for folder in os.listdir(inpath):\n if folder == 'static':\n logger.info('found static folder, copy all...')\n dest=os.path.join(outpath,folder)\n src=os.path.join(inpath,folder)\n if os.path.exists(dest):\n logger.warning('Remove old static folder')\n shutil.rmtree(dest) #not efficient. Should do it incrementaly...\n logger.info('cp_static_files %s -> %s' %(src,dest))\n copyfiles(src,dest)\n return 0", "def deploy_static(): \n from fabdeploy.django import collectstatic as django_collectstatic\n# run(\"rm -rf %(root_path)s%(project_name)s/static/*\" % env) # call again git_add_commit_pull\n django_collectstatic()", "def copy_static(root_directory, dist_directory, sdk_directory):\n\n for static in configuration.STATICS:\n context = {\n \"root\": root_directory,\n \"sdk\": sdk_directory,\n \"dist\": dist_directory\n }\n\n source = templates.from_string(static[\"source\"], context)\n target = templates.from_string(static[\"target\"], context)\n target = os.path.join(dist_directory, target)\n\n # Perform the action.\n sys.stdout.write(\"Copying '%s'\\n\" % source)\n\n if static[\"type\"] == \"directory\":\n recursive_overwrite(source, target)\n else:\n shutil.copy(source, target)", "def upload_images_to_s3(directory):\n for f in directory.iterdir():\n if str(f).endswith(('.png', '.jpg', '.jpeg')):\n full_file_path = str(f.parent) + \"/\" + str(f.name)\n file_name = str(f.name)\n s3_client.upload_file(full_file_path, BASE_BUCKET, file_name)\n print(f,\"put\")", "def deploy_to_s3():\n env.gzip_path = '%(path)s/repository/gzip/assets/' % env\n run(('s3cmd -P --add-header=Content-encoding:gzip --guess-mime-type --rexclude-from=%(path)s/repository/s3exclude sync %(gzip_path)s s3://%(s3_bucket)s/%(project_name)s/') % env)", "def upload_json_to_s3(directory):\n for f in directory.iterdir():\n if str(f).endswith('.json'):\n full_file_path = str(f.parent) + \"/\" + str(f.name)\n file_name = str(f.name)\n s3_client.upload_file(full_file_path, BASE_BUCKET, file_name)", "def _upload_dir_to_bucket(self, path, ext_path):\n for file in os.listdir(path):\n self._upload_to_bucket(path+'/'+file, ext_path+'/'+file)", "def upload_to_s3(site, bucket, directory=None, files=None, prefix=None):\n if bucket is None:\n print red('Error: Bucket must be specified.')\n return\n if directory is None and files is None:\n print red('Error: Directory and/or files must be specified.')\n return\n # Setup boto\n import boto\n from boto.s3.bucket import Bucket\n from boto.s3.key import Key\n import mimetypes\n import fnmatch\n\n setup_aws_access_key(site)\n\n # Connect to S3\n c = boto.connect_s3()\n b = Bucket(c, bucket)\n\n # Fix the prefix\n # prefix itself shouldn't have a / prefix itself but should end with /\n if prefix:\n prefix = prefix.lstrip('/')\n if prefix and not prefix.endswith('/'):\n prefix = prefix + '/'\n\n def __upload(key, filename):\n k = Key(b)\n k.key = key\n headers = {}\n content_type = mimetypes.guess_type(filename)[0]\n if site.has_key('webapp') and site['webapp'].get('cache_control'):\n for pattern in site['webapp']['cache_control']:\n if fnmatch.fnmatch(filename, pattern):\n headers['Cache-Control'] = site['webapp']['cache_control'][pattern]\n break\n if site.has_key('webapp') and site['webapp'].get('gzip_types') and content_type in site['webapp']['gzip_types']:\n from gzip import GzipFile\n from StringIO import StringIO\n # Need to specify content_type when uploading from a string!\n headers['Content-Type'] = content_type\n headers['Content-Encoding'] = 'gzip'\n s = StringIO()\n g = GzipFile(fileobj=s, mode='wb')\n with open(filename, 'rb') as f:\n g.write(f.read())\n g.close()\n k.set_contents_from_string(s.getvalue(), headers)\n else:\n k.set_contents_from_filename(filename, headers)\n\n if files:\n # Upload individual files\n if directory:\n keys = [filename.lstrip('/') for filename in files]\n files = [os.path.join(directory, filename) for filename in files]\n else:\n keys = [os.path.split(filename)[1] for filename in files]\n for i, filename in enumerate(files):\n print 'Uploading %s' % keys[i]\n if prefix:\n key = prefix + keys[i]\n else:\n key = keys[i]\n __upload(key, filename)\n elif directory:\n # Upload an entire directory\n def __upload_dir(arg, dirname, names):\n # arg is the starting directory\n for name in names:\n filename = os.path.join(dirname, name)\n if not os.path.isdir(filename) and not os.path.islink(filename) and not name.startswith('.'):\n key = filename[len(arg):]\n if key.startswith('/'):\n key = key[1:]\n if prefix:\n key = prefix + key\n print 'Uploading %s' % key\n __upload(key, filename)\n os.path.walk(directory, __upload_dir, directory)", "def add_dirs_to_static(static_webapp_name):\n static_dir = '$HOME/webapps/%s' % static_webapp_name\n with settings(warn_only=True):\n with cd(static_dir):\n run(\"mkdir static && mkdir media\")\n run(\"rm index.html\")\n run(\"touch index.html\")\n with cd(code_dir):\n run(\"mkdir %s/static\" % project_name)", "def upload_handler(self):\n \n for root, dirs, files in os.walk(self.path):\n\n current_dir = os.path.basename(root)\n \n if root == self.path:\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True)\n else:\n parents_id = self.filesystem[os.path.dirname(root)][\"id\"]\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True, parents_id=[parents_id])\n print(f\"\\033[94m The directory {current_dir} was uploaded \\033[0m\")\n\n self.filesystem[root.rstrip(\"/\")] = { \"id\": root_id, \"files\": [] }\n \n if files:\n for f in files:\n if f not in IGNORE_FILES and os.path.getsize(root+\"/\"+f) > 0:\n file_id = self.gapy.create_file(f, path=root, parents_id=[root_id])\n self.filesystem[root][\"files\"].append({ \"name\": f, \"id\": file_id})\n print(f\"\\033[94m The file {f} was uploaded \\033[0m\")\n \n self.update_fs()", "def test_upload_directory_of_directories_to_s3_bucket(self):\n conn = boto3.resource('s3', region_name='us-east-1')\n # We need to create the bucket since this is all in Moto's 'virtual' AWS account\n conn.create_bucket(Bucket='foobucket')\n\n s3_connector = S3Connector()\n s3_connector.connect(\"default\")\n s3_connector.upload_directory(directory_path=\"test/test_resources/test_subdirectory\",\n bucket_name=\"foobucket\", aws_directory=\"test_directory\")\n\n # get bucket contents\n response = boto3.client('s3').list_objects(Bucket=\"foobucket\")\n contents = []\n for content in response.get('Contents', []):\n contents.append(content.get('Key'))\n\n self.assertEqual(\n contents, [\"test_directory/sub/fake\", \"test_directory/sub2/fake\"])", "def publish():\n reset()\n compress()\n build()\n s3deploy()\n log_success()", "def update_static_files(self):\n\n params = self.chose_param_value(\"--static\")\n self._check_path_availability([\"get_static_dir\", \"get_static_dir_to\"])\n if self._check_whether_has_params(params):\n self.updater.update_files(\n self.analizer.get_static_dir(),\n self.analizer.get_static_dir_to(),\n params\n )\n return self.write_debug_message(\"Static files upgrade is done!\\n\")\n return self.write_error_message(\"You haven't passed any params about static files\")", "def upload_files_s3(files, bucket):\n \n print('************************************')\n print('Uploading files to s3 bucket...')\n print('************************************')\n \n for i in range(len(files)):\n upload_file_s3(files[i], bucket)\n \n print('************************************')\n print('Upload complete')\n print('************************************')", "def copy_files(self):\n if settings.USE_S3_STORAGE:\n self.copy_to_s3()\n else:\n self.copy_to_local()", "def collect_assets(systems, settings):\r\n for sys in systems:\r\n sh(django_cmd(sys, settings, \"collectstatic --noinput > /dev/null\"))", "def _upload(self, errors):\n if self.backup_bucket is None:\n return\n\n try:\n with open(\"%s/%s.tar.gz\"%(self.backup_path, self.name), 'r+') as f:\n s3upload.upload_to_s3(f,\n self.backup_bucket,\n \"%s/%s.tar.gz\"%(self.backup_id, self.name))\n\n # Cleaning up resources, since the upload was successful\n run(\"rm -f %s/%s.tar.gz\"%(self.backup_path, self.name))\n except Exception as e:\n logging.exception(e)\n errors.put(Exception(\"Error uploading %s server backup to S3\" % self.name))\n traceback.print_exc()", "def upload(env):\n if not env:\n click.echo(\"Environment must be specified\")\n click.Abort()\n\n with open(\"zappa_settings.json\", \"r\") as f:\n settings = json.load(f)\n\n if not settings:\n click.echo(\"Settings not loaded\")\n click.Abort()\n return\n\n try:\n s3_bucket = settings[env][\"s3_bucket\"]\n aws_region = settings[env][\"aws_region\"]\n except AttributeError:\n click.echo(\"Failed to get details from settings\")\n click.Abort()\n return\n\n session = boto3.Session()\n credentials = session.get_credentials()\n current_credentials = credentials.get_frozen_credentials()\n\n app.config[\"FLASKS3_FORCE_MIMETYPE\"] = True\n\n try:\n css_assets.build()\n\n flask_s3.create_all(\n app,\n user=current_credentials.access_key,\n password=current_credentials.secret_key,\n bucket_name=s3_bucket,\n location=aws_region,\n put_bucket_acl=False,\n )\n click.echo(\n f\"Uploaded assets to Bucket https://{s3_bucket}.s3.{aws_region}.amazonaws.com\"\n )\n except Exception as e:\n click.echo(f\"Failed to upload assets: {e}\")", "def uploadFilestoS3(self):\n allfilesuploadedcount = 0\n for eachfiledic in self.fileTobeUploaded:\n if eachfiledic[\"uploadedSuccess\"] == 0: #Means this file never got uploaded.\n if os.path.getsize(eachfiledic[\"filepath\"]) < 1000000000: #<1GB\n s3Log.info (\"FileSize < 1GB for :{}, so using single part upload.\".format(eachfiledic[\"filepath\"]) )\n if self.singlePartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n else:\n s3Log.info (\"FileSize > 1GB for :{}, so using Multi Part upload. \\n\".format(eachfiledic[\"filepath\"]) )\n if self.multiPartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n\n elif eachfiledic[\"uploadedSuccess\"] == 1: #Means it got uploaded in the last run.\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n self.saveStateOfThisRun()\n if len(self.fileTobeUploaded) == allfilesuploadedcount: #Means we uploaded all files in the queue\n return True\n else:\n return False", "def ensure_static_exists():\n for entry in html_static_path:\n static_path = os.path.join(__repo_docs__, entry)\n if not os.path.isdir(static_path):\n os.makedirs(static_path)", "def upload_artifacts(ctx: Context, salt_version: str, artifacts_path: pathlib.Path):\n ctx.info(\"Preparing upload ...\")\n s3 = boto3.client(\"s3\")\n to_delete_paths: list[dict[str, str]] = []\n remote_path = f\"release-artifacts/{salt_version}\"\n try:\n ret = s3.list_objects(\n Bucket=tools.utils.STAGING_BUCKET_NAME,\n Prefix=remote_path,\n )\n if \"Contents\" in ret:\n objects = []\n for entry in ret[\"Contents\"]:\n if entry[\"Key\"].endswith(\".release-backup-done\"):\n continue\n objects.append({\"Key\": entry[\"Key\"]})\n to_delete_paths.extend(objects)\n except ClientError as exc:\n if \"Error\" not in exc.response:\n raise\n if exc.response[\"Error\"][\"Code\"] != \"404\":\n raise\n\n if to_delete_paths:\n with tools.utils.create_progress_bar() as progress:\n bucket_uri = f\"s3://{tools.utils.STAGING_BUCKET_NAME}/{remote_path}\"\n task = progress.add_task(f\"Deleting '{bucket_uri}'\", total=1)\n try:\n ret = s3.delete_objects(\n Bucket=tools.utils.STAGING_BUCKET_NAME,\n Delete={\"Objects\": objects},\n )\n except ClientError:\n log.exception(f\"Failed to delete '{bucket_uri}'\")\n finally:\n progress.update(task, advance=1)\n\n ctx.info(\"Uploading release artifacts ...\")\n to_upload_paths: list[pathlib.Path] = []\n copy_exclusions = [\n \".json\",\n ]\n for fpath in artifacts_path.iterdir():\n if fpath.suffix in copy_exclusions:\n continue\n to_upload_paths.append(fpath)\n\n try:\n for fpath in to_upload_paths:\n upload_path = f\"{remote_path}/{fpath.name}\"\n size = fpath.stat().st_size\n ctx.info(f\" {upload_path}\")\n with tools.utils.create_progress_bar(file_progress=True) as progress:\n task = progress.add_task(description=\"Uploading...\", total=size)\n s3.upload_file(\n str(fpath),\n tools.utils.STAGING_BUCKET_NAME,\n upload_path,\n Callback=tools.utils.UpdateProgress(progress, task),\n )\n except KeyboardInterrupt:\n pass", "def _process_task_log(self):\n directory = self._executor.log_dir\n if os.path.exists(directory):\n for root, _dirs, files in os.walk(directory):\n for name in files:\n filepath = os.path.join(root, name)\n object_name = str(self._task.project_id) + \"/\" + self._task.node_id + \"/log/\" + name\n if not self._s3.client.upload_file(self._s3.bucket, object_name, filepath):\n log.error(\"Error uploading file to S3\")", "def create_buckets(self):\n\n # 1. Create bucket\n for name in [BUCKET_1_SRC, BUCKET_1_DST, BUCKET_2_SRC, BUCKET_2_DST, BUCKET_3_SRC, BUCKET_3_DST]:\n self.create_gcs_bucket(name)\n\n # 2. Prepare parents\n first_parent = f\"gs://{BUCKET_1_SRC}/parent-1.bin\"\n second_parent = f\"gs://{BUCKET_1_SRC}/parent-2.bin\"\n\n self.execute_with_ctx(\n [\n \"bash\",\n \"-c\",\n f\"cat /dev/urandom | head -c $((1 * 1024 * 1024)) | gsutil cp - {first_parent}\",\n ],\n key=GCP_GCS_KEY,\n )\n\n self.execute_with_ctx(\n [\n \"bash\",\n \"-c\",\n f\"cat /dev/urandom | head -c $((1 * 1024 * 1024)) | gsutil cp - {second_parent}\",\n ],\n key=GCP_GCS_KEY,\n )\n\n self.upload_to_gcs(first_parent, f\"gs://{BUCKET_1_SRC}/file.bin\")\n self.upload_to_gcs(first_parent, f\"gs://{BUCKET_1_SRC}/subdir/file.bin\")\n self.upload_to_gcs(first_parent, f\"gs://{BUCKET_2_SRC}/file.bin\")\n self.upload_to_gcs(first_parent, f\"gs://{BUCKET_2_SRC}/subdir/file.bin\")\n self.upload_to_gcs(second_parent, f\"gs://{BUCKET_2_DST}/file.bin\")\n self.upload_to_gcs(second_parent, f\"gs://{BUCKET_2_DST}/subdir/file.bin\")\n self.upload_to_gcs(second_parent, f\"gs://{BUCKET_3_DST}/file.bin\")\n self.upload_to_gcs(second_parent, f\"gs://{BUCKET_3_DST}/subdir/file.bin\")\n\n self.delete_gcs_bucket(first_parent)\n self.delete_gcs_bucket(second_parent)", "def dispatch(self, request, *args, **kwargs):\n try:\n self.copy_static_resources()\n except MissingStaticRoot:\n self.template_name = 'general_error.html'\n kwargs['error'] = _('There is no STATIC_ROOT defined in the settings file')\n return super().dispatch(request, *args, **kwargs)\n except Exception as e:\n self.template_name = 'general_error.html'\n kwargs['error'] = str(e)\n return super().dispatch(request, *args, **kwargs)\n cms_pages = Page.objects.filter(publication_date__isnull=False)\n for page in cms_pages:\n languages = page.get_languages()\n for language in languages:\n url = page.get_public_url(language)\n if url not in self.done:\n self.done.append(url)\n static_page_path = '{}{}index.html'.format(STORAGE_PATH, url)\n fetch_url = \"{}{}\".format(self.SOURCE_DOMAIN, url)\n response = requests.get(fetch_url)\n make_dir(url)\n with open(static_page_path, 'w') as file:\n file.write(response.text)\n return super().dispatch(request, *args, **kwargs)", "def deploy_static_media(env=None, asset_version='', quick=False, haus_vars={}):\n print green('Deploying static media {}'.format('__quick__' if quick else ''))\n collectstatic(no_input=True, skip_admin=quick)", "def copy_images(repositories, static_dir):\n for repository in repositories:\n if repository.has_key('branch'):\n branch = repository['branch']\n else:\n branch = retrieve_current_branch(repository_directory=os.curdir, fix_environment=True)\n dir = fetch_repository(repository['url'], workdir=os.curdir, branch=branch)\n package_static_dir = os.path.join(dir, repository['package_name'], 'static')\n if os.path.exists(package_static_dir):\n copytree(package_static_dir, os.path.join(static_dir, repository['package_name']))", "def upload(jsonfiles):\n # clear S3 Bucket\n bucket = S3Bucket()\n bucket.clear()\n for jsonfile in jsonfiles:\n filename = os.path.basename(jsonfile)\n key = build_key(filename)\n logging.info(\"%s %s\", filename, key)\n # store json in S3 object\n bucket.store(key, jsonfile)", "def upload_files(self, folder):\n\n # Load all blobs in the session to make sure only upload needed files\n blobs = GoogleStorage().list_blobs_with_prefix(self.bucket_name, folder)\n blobs = [blob.name for blob in blobs]\n\n project_home = os.environ['PROJ_HOME']\n root_folder = os.path.join(project_home, folder)\n\n for file in os.listdir(root_folder):\n file_name = \"{folder}/{file}\".format(folder=folder, file=file)\n if file_name not in blobs:\n source_file_name = os.path.join(project_home, file_name)\n GoogleStorage().upload_blob(\n self.bucket_name, source_file_name, file_name)\n print('Uploaded file {}'.format(source_file_name))", "def collectstatic():\n puts(yellow(\"Collect statics\"))\n django_manage('collectstatic', '-l', '--noinput')", "def collect_static_files():\n with env.cd(settings.PROJECT_PATH), prefix(COMMANDS['set_environment']), \\\n prefix(COMMANDS['activate_virtualenv']):\n env.run('python rnacentral/manage.py collectstatic --noinput')", "def test_upload_directory_to_s3_bucket(self):\n conn = boto3.resource('s3', region_name='us-east-1')\n # We need to create the bucket since this is all in Moto's 'virtual' AWS account\n conn.create_bucket(Bucket='foobucket')\n\n s3_connector = S3Connector()\n s3_connector.connect(\"default\")\n s3_connector.upload_directory(directory_path=\"test/test_resources/test_directory\",\n bucket_name=\"foobucket\", aws_directory=\"test_directory\")\n\n # get bucket contents\n response = boto3.client('s3').list_objects(Bucket=\"foobucket\")\n contents = []\n for content in response.get('Contents', []):\n contents.append(content.get('Key'))\n\n self.assertEqual(\n contents, [\"test_directory/test_file\", \"test_directory/test_file2\"])", "def _sync_s3_media(self):\n # Copy media from one S3 bucket to another if needed\n logger.info(\"Syncing S3 Media\")\n if env.get('bucket_copy', False):\n with cd(env.project_root):\n cmd = [\n '%s scripts/s3_copy_bucket.py' % PYTHON_BIN,\n env.bucket_copy['from_aws_access_key'],\n env.bucket_copy['from_aws_secret_key'],\n env.bucket_copy['from_aws_storage_bucket'],\n env.bucket_copy['to_aws_access_key'],\n env.bucket_copy['to_aws_secret_key'],\n env.bucket_copy['to_aws_storage_bucket'],\n env.bucket_copy['to_aws_user_email'],\n '&',\n ]\n logger.info(u\"Copying S3 media\")\n with hide(*fab_output_hides):\n run('%s < /dev/null' % \" \".join(cmd))", "def gen_static(self, output_folder):\n files = []\n for l in self.file_listers:\n files += l()\n for f in files:\n _logger.info(\"generating %s\" % f)\n content = self.get(f)\n loc = os.path.join(output_folder, f)\n d = os.path.dirname(loc)\n if not os.path.exists(d):\n os.makedirs(d)\n with open(loc, \"wb\") as file_:\n file_.write(content)", "def update():\n\n metadata = _init()\n\n if S3_SYNC_ON_UPDATE:\n # sync the buckets to the local cache\n log.info(\"Syncing local cache from S3...\")\n for saltenv, env_meta in metadata.items():\n for bucket_files in _find_files(env_meta):\n for bucket, files in bucket_files.items():\n for file_path in files:\n cached_file_path = _get_cached_file_name(\n bucket, saltenv, file_path\n )\n log.info(\"%s - %s : %s\", bucket, saltenv, file_path)\n\n # load the file from S3 if it's not in the cache or it's old\n _get_file_from_s3(\n metadata, saltenv, bucket, file_path, cached_file_path\n )\n\n log.info(\"Sync local cache from S3 completed.\")", "def transfer(self):\n\n # Upload unverified matches to s3 bucket if unverified argument used (production only)\n if self.in_args.unverified:\n files = glob.glob(os.path.join(self.directories['unverified_matches_dir'].format(self.region_dir, self.proc_type), '*'))\n\n # Loop through files found in unverified_matches folder\n for filepath in files:\n filename = os.path.basename(filepath)\n # Upload each file to S3 bucket folder\n self.upload_file(filepath, self.bucket, 'UK_suppliers/Unverified_Matches/' + filename)\n self.unverified_file = filename\n\n # Zip file creation - note will only work for latest unverified file. Above loop is added just incase\n # any residual files get added manually to S3 bucket.\n\n # Get filepaths of stats file, filtered and excluded matches files\n stats_fp = self.directories['stats_file'].format(self.region_dir, self.proc_type)\n filtered_matches_fp = self.directories['filtered_matches'].format(self.region_dir, self.proc_type) + '_' + \\\n str(self.best_config) + '.csv'\n\n excluded_matches_fp = self.directories['excluded_matches'].format(self.region_dir, self.proc_type) + '_' + \\\n str(self.best_config) + '.csv'\n\n blacklisted_strings_fp = self.directories['blacklisted_string_matches'].format(self.region_dir)\n\n stats_file_fp = self.directories['script_performance_stats_file'].format(self.region_dir, self.proc_type)\n\n # Assign zip file which will contain above files\n files_zip = self.unverified_file[:10] + \"_files.zip\"\n\n with ZipFile(files_zip, 'w') as myzip:\n myzip.write(stats_fp, os.path.basename(stats_fp))\n myzip.write(filtered_matches_fp,os.path.basename(filtered_matches_fp))\n myzip.write(excluded_matches_fp, os.path.basename(excluded_matches_fp))\n myzip.write(blacklisted_strings_fp, os.path.basename(blacklisted_strings_fp))\n myzip.write(stats_file_fp, os.path.basename(stats_file_fp))\n\n self.upload_file(files_zip, self.bucket, 'UK_suppliers/Archive/' + files_zip)\n\n # Download verified matches from s3 bucket if verified argument (production only)\n if self.in_args.verified:\n self.process_verified_files()\n\n # Add confirmed matches/non-matches to training file\n if self.in_args.convert_training:\n self.runfile_mods.convert_training.ConvertToTraining.convert(self)", "def handle_noargs(self, **options):\r\n for staticfiles_dir in getattr(settings, \"STATICFILES_DIRS\", []):\r\n # Cribbed from the django-staticfiles app at:\r\n # https://github.com/jezdez/django-staticfiles/blob/develop/staticfiles/finders.py#L52\r\n if isinstance(staticfiles_dir, (list, tuple)):\r\n prefix, staticfiles_dir = staticfiles_dir\r\n\r\n # Walk over the current static files directory tree,\r\n # preprocessing files that have a template extension.\r\n for root, dirs, files in os.walk(staticfiles_dir):\r\n for filename in files:\r\n outfile, extension = os.path.splitext(filename)\r\n # We currently only handle Mako templates\r\n if extension == \".mako\":\r\n self.__preprocess(os.path.join(root, filename),\r\n os.path.join(root, outfile))", "def project_uploader():\n if not current_app.config['S3_KEY']:\n return ''\n if len(request.files) == 0:\n return 'No files selected'\n img = request.files['file']\n if not img or img.filename == '':\n return 'No filename'\n ext = img.filename.split('.')[-1].lower()\n if ext not in ACCEPTED_TYPES:\n return 'Invalid format (allowed: %s)' % ','.join(ACCEPTED_TYPES)\n # generate a simpler filename\n keepcharacters = ('.', '_')\n safe_filename = img.filename.replace(' ', '_')\n safe_filename = \"\".join(\n c for c in safe_filename\n if c.isalnum() or c in keepcharacters).rstrip()\n if not safe_filename:\n safe_filename = \"\".join(random_password(8), '.', ext)\n # use random subfolder inside user id folder\n filename = '/'.join([\n str(current_user.id),\n random_password(24),\n safe_filename\n ])\n # with tempfile.TemporaryDirectory() as tmpdir:\n # img.save(path.join(tmpdir, filename))\n if 'S3_FOLDER' in current_app.config:\n s3_filepath = '/'.join([current_app.config['S3_FOLDER'], filename])\n else:\n s3_filepath = filename\n # print('Uploading to %s' % s3_filepath)\n if 'S3_ENDPOINT' in current_app.config:\n s3_obj = boto3.client(\n service_name='s3',\n endpoint_url=current_app.config['S3_ENDPOINT'],\n aws_access_key_id=current_app.config['S3_KEY'],\n aws_secret_access_key=current_app.config['S3_SECRET'],\n )\n #print('Uploading to endpoint %s' % current_app.config['S3_ENDPOINT'])\n else:\n s3_obj = boto3.client(\n service_name='s3',\n region_name=current_app.config['S3_REGION'],\n aws_access_key_id=current_app.config['S3_KEY'],\n aws_secret_access_key=current_app.config['S3_SECRET'],\n )\n #print('Uploading to region %s' % current_app.config['S3_REGION'])\n # Commence upload\n s3_obj.upload_fileobj(img,\n current_app.config['S3_BUCKET'],\n s3_filepath,\n ExtraArgs={'ContentType': img.content_type,\n 'ACL': 'public-read'}\n )\n return escape('/'.join([current_app.config['S3_HTTPS'], s3_filepath]))", "def test_s3uri_find_all_files(s3_test_path):\n prefix = os.path.join(s3_test_path, \"test_s3uri_find_all_files\")\n all_files = make_files_in_dir(prefix, make_local_empty_dir_d_a=False)\n\n all_files_found = S3URI(prefix).find_all_files()\n assert sorted(all_files_found) == sorted(all_files)\n for file in all_files:\n assert S3URI(file).exists", "def upload_dataset(bucket_name, directory, num_threads=20):\n s3 = boto3.resource('s3')\n\n def upload_file(queue):\n while True:\n obj = queue.get()\n if obj is None:\n break\n abspath, s3_path = obj\n s3.meta.client.upload_file(abspath, bucket_name, s3_path)\n queue.task_done()\n\n # create a queue for objects that need to be uploaded\n # and spawn threads to upload them concurrently\n upload_queue = Queue(maxsize=0)\n workers = []\n for worker in range(num_threads):\n worker = Thread(target=upload_file, args=(upload_queue, ))\n worker.setDaemon(True)\n worker.start()\n workers.append(worker)\n\n for root, _, files in os.walk(directory):\n for file in files:\n abspath = os.path.join(root, file)\n relpath = os.path.relpath(abspath, directory)\n s3_path = os.path.basename(directory) + \"/\" + relpath\n upload_queue.put((abspath, s3_path))\n\n # wait for the queue to be empty, then join all threads\n upload_queue.join()\n for _ in range(num_threads):\n upload_queue.put(None)\n for worker in workers:\n worker.join()", "def compress(roles='webapp_servers'):\n if _current_host_has_role(roles):\n print(\"=== COMPRESSING STATIC MEDIA ===\")\n with cd(env.REMOTE_CODEBASE_PATH):\n run(\"workon %s && ./manage.py compress --force\" % env.REMOTE_VIRTUALENV_NAME)", "def serve_static_files(request, path, insecure=False, **kwargs):\n\n if not settings.DEBUG and not insecure:\n raise Http404\n normalized_path = posixpath.normpath(unquote(path)).lstrip('/')\n absolute_path = finders.find(normalized_path)\n if not absolute_path:\n if path.endswith('/') or path == '':\n raise Http404(\"Directory indexes are not allowed here.\")\n raise Http404(\"'%s' could not be found\" % path)\n document_root, path = os.path.split(absolute_path)\n return static.serve(request, path, document_root=document_root, **kwargs)", "def copy_static(self, outdir):\n pass", "def publish_artifacts(self): # pylint: disable=too-many-locals\n try:\n b3resource = boto3.resource(\n 's3', endpoint_url=os.environ[\"S3_ENDPOINT_URL\"])\n dst_s3_url = os.environ[\"S3_DST_URL\"]\n multipart_threshold = 5 * 1024 ** 5 if \"google\" in os.environ[\n \"S3_ENDPOINT_URL\"] else 8 * 1024 * 1024\n config = TransferConfig(multipart_threshold=multipart_threshold)\n bucket_name = urlparse(dst_s3_url).netloc\n try:\n b3resource.meta.client.head_bucket(Bucket=bucket_name)\n except botocore.exceptions.ClientError as exc:\n error_code = exc.response['Error']['Code']\n if error_code == '404':\n # pylint: disable=no-member\n b3resource.create_bucket(Bucket=bucket_name)\n else:\n raise exc\n except Exception as exc: # pylint: disable=broad-except\n raise exc\n path = urlparse(dst_s3_url).path.strip(\"/\")\n dst_http_url = os.environ[\"HTTP_DST_URL\"]\n output_str = \"\\n\"\n # protects if test cases return details as None\n self.details = self.details or {}\n self.details[\"links\"] = []\n for log_file in [self.output_log_name, self.output_debug_log_name]:\n if os.path.exists(os.path.join(self.dir_results, log_file)):\n abs_file = os.path.join(self.dir_results, log_file)\n mime_type = mimetypes.guess_type(abs_file)\n self.__logger.debug(\n \"Publishing %s %s\", abs_file, mime_type)\n # pylint: disable=no-member\n b3resource.Bucket(bucket_name).upload_file(\n abs_file, os.path.join(path, log_file), Config=config,\n ExtraArgs={'ContentType': mime_type[\n 0] or 'application/octet-stream'})\n link = os.path.join(dst_http_url, log_file)\n output_str += f\"\\n{link}\"\n self.details[\"links\"].append(link)\n for root, _, files in os.walk(self.res_dir):\n for pub_file in files:\n abs_file = os.path.join(root, pub_file)\n mime_type = mimetypes.guess_type(abs_file)\n self.__logger.debug(\n \"Publishing %s %s\", abs_file, mime_type)\n # pylint: disable=no-member\n b3resource.Bucket(bucket_name).upload_file(\n abs_file,\n os.path.join(path, os.path.relpath(\n os.path.join(root, pub_file),\n start=self.dir_results)),\n Config=config,\n ExtraArgs={'ContentType': mime_type[\n 0] or 'application/octet-stream'})\n link = os.path.join(dst_http_url, os.path.relpath(\n os.path.join(root, pub_file),\n start=self.dir_results))\n output_str += f\"\\n{link}\"\n self.details[\"links\"].append(link)\n self.__logger.info(\n \"All artifacts were successfully published: %s\\n\", output_str)\n return TestCase.EX_OK\n except KeyError as ex:\n self.__logger.error(\"Please check env var: %s\", str(ex))\n return TestCase.EX_PUBLISH_ARTIFACTS_ERROR\n except botocore.exceptions.NoCredentialsError:\n self.__logger.error(\n \"Please fill ~/.aws/credentials, ~/.boto or set \"\n \"AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY in env\")\n return TestCase.EX_PUBLISH_ARTIFACTS_ERROR\n except Exception: # pylint: disable=broad-except\n self.__logger.exception(\"Cannot publish the artifacts\")\n return TestCase.EX_PUBLISH_ARTIFACTS_ERROR", "def folder_to_s3(src, dst, region, max_parallelism=1, force_copy=False, **kwargs):\n bucket, root = utils.path.reverse_split(dst)\n\n s3 = boto3.resource('s3')\n\n # check if the bucket exists\n if not __bucket_exists(bucket):\n\n if force_copy:\n print('creating bucket: ' + bucket)\n\n try:\n s3.create_bucket(Bucket=bucket,\n CreateBucketConfiguration={'LocationConstraint': region})\n except botocore.exceptions.ClientError as e:\n raise e\n else:\n exit(-1)\n\n # instanciate transfer configuration\n conf = boto3.s3.transfer.TransferConfig(use_threads=True, **kwargs)\n\n # start uploading\n with ProcessPoolExecutor(max_workers=max_parallelism) as executor:\n try:\n for file in utils.path.dir_tree(src):\n # removes the root so that it can be\n # later added to the input string\n suffix = file.replace(src, '')\n executor.submit(file_to_s3,\n bucket,\n file,\n os.path.join(root, suffix),\n conf,\n utils.path.progress\n )\n\n except (BrokenProcessPool):\n try:\n # deleting the bucket if created\n # to do so, the bucket must be empty\n print(\"removing %s from %s\" % (root, bucket))\n delete_folder(bucket, root, region)\n if force_copy:\n print(\"attempting to delete %s\" % bucket)\n s3.Bucket(bucket).delete()\n\n except botocore.exceptions.ClientError as e:\n print(\"operation failed: %s\" % e)\n exit(-1)\n\n else:\n print(\"operation aborted. exiting...\")\n exit(0)", "def _s3_intermediate_upload(file_obj, file_name, fields, session, callback_url):\n import boto3\n from boto3.s3.transfer import TransferConfig\n from boto3.exceptions import S3UploadFailedError\n\n # actually do the upload\n client = boto3.client(\n \"s3\",\n aws_access_key_id=fields[\"upload_aws_access_key_id\"],\n aws_secret_access_key=fields[\"upload_aws_secret_access_key\"],\n )\n\n multipart_chunksize = _choose_boto3_chunksize(file_obj)\n\n # if boto uses threads, ctrl+c won't work\n config = TransferConfig(use_threads=False, multipart_chunksize=multipart_chunksize)\n\n # let boto3 update our progressbar rather than our FASTX wrappers, if applicable\n boto_kwargs = {}\n\n if hasattr(file_obj, \"progressbar\"):\n boto_kwargs[\"Callback\"] = file_obj.progressbar.update\n file_obj._progressbar = file_obj.progressbar\n file_obj.progressbar = None\n\n for attempt in range(1, 4):\n try:\n client.upload_fileobj(\n file_obj,\n fields[\"s3_bucket\"],\n fields[\"file_id\"],\n ExtraArgs={\"ServerSideEncryption\": \"AES256\"},\n Config=config,\n **boto_kwargs\n )\n break\n except S3UploadFailedError as e:\n logging.debug(\"Caught S3UploadFailedError on attempt {}/3: {}\".format(attempt, str(e)))\n logging.error(\n \"{}: Connectivity issue, retrying upload via intermediary ({}/3)...\".format(\n file_name, attempt\n )\n )\n\n # rewind the progressbar if possible, then remove so boto3 can update the bar directly\n if hasattr(file_obj, \"_progressbar\"):\n file_obj.progressbar = file_obj._progressbar\n file_obj.seek(0)\n file_obj.progressbar = None\n else:\n file_obj.seek(0)\n else:\n logging.debug(\"{}: exhausted all retries via intermediary\")\n raise_connectivity_error(file_name)\n\n # issue a callback\n try:\n resp = session.post(\n callback_url,\n json={\n \"s3_path\": \"s3://{}/{}\".format(fields[\"s3_bucket\"], fields[\"file_id\"]),\n \"filename\": file_name,\n \"import_as_document\": fields.get(\"import_as_document\", False),\n },\n )\n except requests.exceptions.ConnectionError:\n raise_connectivity_error(file_name)\n\n if resp.status_code != 200:\n raise_connectivity_error(file_name)\n\n try:\n return resp.json()\n except ValueError:\n return {}", "def transfer_files_to_s3(self, input_path, bucket_name, file_ext):\n client = boto3.client('s3', aws_access_key_id=self.access_key,\n aws_secret_access_key=self.secret_key)\n transfer = S3Transfer(client)\n for subdir, dirs, files in os.walk(input_path):\n for file in files:\n file_name, file_extension = os.path.splitext(file)\n full_path = os.path.join(subdir, file)\n if file_extension == '.' + file_ext:\n logging.info(\"transferring file {}\".format(file_name))\n transfer.upload_file(full_path, bucket_name, file_ext\n + '/' + file)", "def do_pack():\n with api.settings(warn_only=True):\n isdir = os.path.isdir('versions')\n if not isdir:\n mkdir = api.local('mkdir versions')\n if mkdir.failed:\n return False\n suffix = datetime.now().strftime('%Y%m%d%M%S')\n path = 'versions/web_static_{}.tgz'.format(suffix)\n tar = api.local('tar -cvzf {} web_static'.format(path))\n if tar.failed:\n return False\n size = os.stat(path).st_size\n print('web_static packed: {} -> {}Bytes'.format(path, size))\n return path", "def updateCache(self):\n for root, dirs, files in os.walk(cachedFilesPath):\n for file in files:\n if file.endswith(cachedFileExtensionSuffix):\n path = os.getcwd()+'/'+cachedFilesPath+file\n with open(path, mode='r') as f:\n payload_json = f.read()\n payload_obj=jsonpickle.decode(payload_json)\n r= self.upload(payload_obj)\n if isinstance(r, types.NoneType):\n #do nothing\n print(\"\")\n else:\n if r.status_code == 200 :\n #uploaded!\n if cacheArhive:\n #move it to archive\n dst=os.getcwd()+'/'+cachedArchivePath+file\n shutil.move(path, dst)\n print(\"archived log: \", file)\n else:\n #delete it\n os.remove(path)", "def compress(self):\n logger = logging.getLogger('logger')\n\n absolute_project_root = os.path.abspath(self.project_root)\n clone_dirs = self.find_clone_directories(absolute_project_root)\n logger.info('Found %d clone directories in %s',\n len(clone_dirs), self.project_root)\n\n if not clone_dirs:\n logger.info('There is no data to compress [exits]')\n return\n\n logger.info('Begin to compress %s', self.project_root)\n processed_dir_count = 0\n\n for clone_dir, parent_dir in clone_dirs:\n processed_dir_count += 1\n if self.max_dirs is not None and processed_dir_count > self.max_dirs:\n break\n try:\n os.chdir(parent_dir)\n self.compress_dir(clone_dir, parent_dir)\n self.remove_dir(clone_dir, parent_dir)\n except Exception as ex:\n logger.error('Unexpected error: %s', str(ex))\n finally:\n os.chdir(absolute_project_root)\n\n logger.info('Begin to compress %s...Done!', self.project_root)", "def lesson_static_generator_dir(lesson_slug, static_dir, search_dir):\n if not search_dir.exists():\n return\n\n for static_file in search_dir.iterdir():\n\n if static_file.is_dir():\n yield from lesson_static_generator_dir(lesson_slug, static_dir, static_file)\n continue\n\n relative = static_file.relative_to(static_dir)\n\n yield (\"lesson_static\", {\"lesson\": lesson_slug, \"path\": str(relative)})", "def process_images():\n image_path = os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/img/')\n static_images = os.path.join(settings.BASE_DIR, 'static/CMESH/img/')\n\n copy_files(image_path, static_images)", "def collect_s3(self):\n print('Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket))\n self.s3 = boto3.resource('s3')\n self.s3_bucket = self.s3.Bucket(s3_bucket)\n self.s3_client = boto3.client('s3')\n for item in self.s3_client.list_objects(Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'):\n self.collect_single(item.get('Key'))\n\n for a in self.artifacts:\n a.download()", "def upload_files_to_S3(sourceDir, bucket_name, destDir, aws_access_key_id=None, aws_secret_access_key=None):\n\n # set up the connection to the AWS Bucket.\n if aws_access_key_id == None or aws_secret_access_key == None:\n client = boto3.client(service_name='s3', aws_access_key_id=None, aws_secret_access_key=None)\n else:\n client = boto3.client(service_name='s3', aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key)\n transfer = boto3.s3.transfer.S3Transfer(client)\n\n # Get a list of all the files that have already been uploaded to S3\n MyS3Objects = [s.key for s in boto3.resource('s3').Bucket(bucket_name).objects.filter(Prefix=destDir)]\n\n\n\n\n uploadFileNames = files_to_upload(sourceDir)\n\n #print(sourceDir)\n #print(uploadFileNames)\n\n\n UploadCounter = 0\n\n for filename in uploadFileNames:\n sourcepath = filename[0]\n destpath = destDir + '/' + filename[1]\n\n # If the file is already on S3, don't upload it again\n if destpath in MyS3Objects:\n print(destpath, \" is already on S3\")\n continue\n\n UploadCounter += 1\n if UploadCounter % 100 == 0: print(\"Files Uploaded:\", UploadCounter)\n\n # print ('Uploading %s to Amazon S3 bucket %s' % (sourcepath, bucket_name))\n\n transfer.upload_file(sourcepath, bucket_name, destpath)\n\n print(\"All the files have been uploaded!\")", "def _s3_stash(self):\n s3_url = 's3://{}/{}'.format(BUCKET, self.atom_file)\n bucketpath = BUCKET.strip(\"/\")\n bucketbase = BUCKET.split(\"/\")[0]\n parts = urlparse.urlsplit(s3_url)\n mimetype = 'application/xml' \n \n conn = boto.connect_s3()\n\n try:\n bucket = conn.get_bucket(bucketbase)\n except boto.exception.S3ResponseError:\n bucket = conn.create_bucket(bucketbase)\n self.logger.info(\"Created S3 bucket {}\".format(bucketbase))\n\n if not(bucket.get_key(parts.path)):\n key = bucket.new_key(parts.path)\n key.set_metadata(\"Content-Type\", mimetype)\n key.set_contents_from_filename(self.atom_file)\n msg = \"created {0}\".format(s3_url)\n self.logger.info(msg)\n else:\n key = bucket.get_key(parts.path)\n key.set_metadata(\"Content-Type\", mimetype)\n key.set_contents_from_filename(self.atom_file)\n msg = \"re-uploaded {}\".format(s3_url)\n self.logger.info(msg)", "def upload_files(self):\n # refresh status of some widgets\n try:\n if not self.textedit.toPlainText().strip() and not self.textedit_folder.toPlainText():\n QMessageBox.warning(self, \"Warning\", \"No Files OR Folders\",\n QMessageBox.Yes)\n return\n self.textedit_upload.show()\n self.textedit_upload.setText('')\n self.label_upload.show()\n self.upload_pushButton.setEnabled(False)\n self.pushButton.setEnabled(False)\n self.directory_pushButton.setEnabled(False)\n\n # to keep every record in one line\n self.textedit_upload.setLineWrapColumnOrWidth(1000)\n self.textedit_upload.setLineWrapMode(QtWidgets.QTextEdit.FixedPixelWidth)\n\n self.textedit_upload.setText('start to upload... ')\n # get the values and push to s3\n self.__start_upload_time = time.time()\n if self.textedit.isVisible():\n files_list = self.textedit.toPlainText().strip().split('\\n')[1:]\n self.s3.batch_upload_files(files_list)\n\n if self.textedit_folder.isVisible():\n folder_list = self.textedit_folder.toPlainText().strip().split('\\n')[1:]\n self.s3.batch_upload_directory(folder_list)\n self.s3.list_objects()\n\n #start the qtimer\n self.qTimer.start()\n except:\n pass", "def do_pack():\n with api.settings(warn_only=True):\n isdir = os.path.isdir(\"versions\")\n if not isdir:\n mkdir = api.local(\"mkdir versions\")\n if mkdir.failed:\n return None\n sfx = datetime.now().strftime(\"%Y%m%d%M%S\")\n path = \"versions/web_static_{:s}.tgz\".format(sfx)\n tar = api.local(\"tar -cvzf {:s} web_static\".format(path))\n if tar.failed:\n return None\n size = os.stat(path).st_size\n print(\"wb_static packed: {} -> {}Bytes\".format(path, size))\n return path", "def upload_files(self, logger):\n logger.info(\"Uploading all files to GCS . . .\")\n\n source_file_name = self.path + '/data/'\n files = os.listdir(source_file_name)\n\n # Setting credentials using JSON file\n try:\n storage_client = storage.Client()\n # Getting bucket object\n bucket = storage_client.bucket(\"my-bigdata-projects\")\n if 'bt_challenge_boa.csv' in files:\n # Name of the object to be stored in the bucket\n object_name_in_gcs_bucket = bucket.blob(\n \"data/csv/bt_challenge_boa.csv\"\n )\n object_name_in_gcs_bucket.upload_from_filename(\n source_file_name + 'bt_challenge_boa.csv'\n )\n except Exception as error:\n logger.info(\"Something went wrong!\")\n logger.error(\"Error: {}\".format(error))\n\n logger.info(\"Files have been uploaded . . .\")", "def upload_scripts(client, script_dir, overwrite=True):\n local_dir = os.path.join(genometools._root, 'data', 'gcloud', 'scripts')\n match = _BUCKET_PAT.match(script_dir)\n script_bucket = match.group(1)\n script_prefix = match.group(2)\n\n depth = len(local_dir.split(os.sep))\n\n for root, dirs, files in os.walk(local_dir):\n rel_path = '/'.join(root.split(os.sep)[depth:])\n for f in files:\n local_path = os.path.join(root, f)\n\n if rel_path:\n remote_path = '/'.join([script_prefix, rel_path, f])\n else:\n remote_path = '/'.join([script_prefix, f])\n _LOGGER.info('Uploading \"%s\"...', remote_path)\n storage.upload_file(client, script_bucket, local_path, remote_path,\n overwrite=overwrite)", "def lambda_handler(*_):\n Log.info(\"Checking bucket %s\", S3_BUCKET)\n Log.info(\"Output Key: %s\", S3_OUTPUT_KEY)\n bucket_location, objects = s3index.get_objects(S3_BUCKET)\n filtered = s3index.filter_objects(objects)\n by_date = s3index.order_objects(filtered)\n template = s3index.template_from_string(TEMPLATE)\n index = s3index.build_index(\n template, SITE_NAME, by_date, S3_BUCKET, bucket_location)\n s3_client = boto3.client(\"s3\")\n s3_client.put_object(\n ACL=\"public-read\",\n Body=index.encode(),\n Bucket=S3_BUCKET,\n Key=S3_OUTPUT_KEY,\n ContentType=\"text/html\"\n )", "def upload_to_s3(bucket_name, sourceDir):\n try:\n client = boto3.client('s3')\n resource = boto3.resource('s3')\n except ClientError as err:\n print(\"Failed to create boto3 client.\\n\" + str(err))\n return False\n try:\n # clean the bucket\n bucket = resource.Bucket(bucket_name)\n for key in bucket.objects.all():\n key.delete()\n\n # upload the new files\n uploadFileNames = getFiles(sourceDir)\n print(\"Found \" + len(uploadFileNames).__str__() + ' files')\n\n for filename in uploadFileNames:\n destName = os.path.join(*(filename.split('/')[1:]))\n print(\"Uploading file \" + filename + ' to ' + destName)\n resource.Object(bucket_name, destName).put(Body=open(filename, 'rb'),\n ContentType=get_contenttype_from_filename(filename))\n\n except ClientError as err:\n print(\"Failed to upload artefact to S3.\\n\" + str(err))\n return False\n except IOError as err:\n print(\"Failed to access artefact in this directory.\\n\" + str(err))\n return False\n\n return True", "def stage_static_files(sample_type, working_dir, debug=False):\n stage_static_latex(sample_type, working_dir)\n stage_static_pdfs(sample_type, working_dir)", "def upload_file(conn, filename_local, filename_s3, gzip=False):\n\n filename_s3 = filename_s3.lstrip('./')\n\n file_descriptor = open(filename_local, 'rb')\n content = file_descriptor.read()\n\n content_type = _get_content_type(file_descriptor)\n headers = _get_headers(content_type)\n\n #should compress if the file is compressable and gzip is enabled\n can_be_gzipped = _file_can_be_compressed(filename_local)\n if gzip and can_be_gzipped:\n content = _compress_string(content)\n headers['Content-Length'] = str(len(content))\n headers['Content-Encoding'] = 'gzip'\n extension = mimetypes.guess_extension(content_type)\n #we should not overwrite the original file in the server.\n #We change extensions: style.css --> style.gz.css, for instance\n filename_s3 = filename_s3.rstrip(extension) + '.gz' + extension\n\n #if gzip is enabled and it is not compressable, don't upload nothing at all\n elif gzip and not can_be_gzipped:\n return\n\n #upload\n print 'Uploading %s to %s' % (filename_local, filename_s3)\n _put(conn, filename_s3, content, headers=headers)\n file_descriptor.close()", "def upload_folder_to_s3(folder_path, s3_uri, connection=None):\n\n if connection:\n run_out = connection.run(f\"aws s3 cp --recursive {folder_path}/ {s3_uri}/\")\n else:\n run_out = run(f\"aws s3 cp --recursive {folder_path}/ {s3_uri}/\")\n\n return run_out.return_code", "def glob_upload_gz(pattern, egnyte_path, log=True, dryrun=False):\n\timport glob\n\tfor filename in glob.glob(pattern):\n\t\tif log:\n\t\t\telog(f\"found file for upload:{filename}\")\n\t\tif not dryrun:\n\t\t\tupload_file_gz(filename, egnyte_path, progress_callbacks=ProgressCallbacks() if log else None)", "def deploy_user_media(env=None, haus_vars={} ):\n print green('Deploying user media')\n with cd(\"/var/www\"):\n run('./manage.py sync_media_s3 --prefix=uploads')", "def _download_s3_folder(s3, bucket_name, s3_store_path, local_dir):\n bucket = s3.Bucket(bucket_name)\n for obj in bucket.objects.filter(Prefix=s3_store_path):\n target = os.path.join(local_dir, os.path.relpath(obj.key, s3_store_path))\n if not os.path.exists(os.path.dirname(target)):\n os.makedirs(os.path.dirname(target))\n if obj.key[-1] == '/':\n continue\n bucket.download_file(obj.key, target)\n logger.info(\"{} Downloaded.\".format(obj.key)) # log progress", "def get_s3_files(self, path, bucket, profile, files=None, mydir=None):\n\n # Set the path to the directory where files reside\n s3_path = bucket + path\n\n # Create folder on VM for downloaded files\n if not isinstance(mydir, str):\n mydir = path\n if not os.path.exists(mydir):\n os.makedirs(mydir)\n\n # If files is an array of filenames, download them\n if isinstance(files, list):\n print(\"Getting files...\")\n for filename in files:\n s3_filepath = s3_path + str(filename)\n if os.path.exists(mydir + str(filename)):\n print(\"File \" + filename + \" already downloaded in that location.\")\n else:\n print(s3_filepath)\n cmd = [\"aws\", \"s3\", \"--profile\", profile, \"cp\", s3_filepath, mydir]\n try:\n output = subprocess.check_output(\n cmd, stderr=subprocess.STDOUT, shell=True\n ).decode(\"UTF-8\")\n except Exception as e:\n output = e.output.decode(\"UTF-8\")\n print(\"ERROR:\" + output)\n # If files == None, which syncs the s3_path 'directory'\n else:\n print(\"Syncing directory \" + s3_path)\n cmd = [\"aws\", \"s3\", \"--profile\", profile, \"sync\", s3_path, mydir]\n try:\n output = subprocess.check_output(\n cmd, stderr=subprocess.STDOUT, shell=True\n ).decode(\"UTF-8\")\n except Exception as e:\n output = e.output.decode(\"UTF-8\")\n print(\"ERROR:\" + output)\n print(\"Finished\")", "def files_to_upload(source_directory: str) -> list:\n upload_file_names = []\n\n print(source_directory)\n for dirName, subdirList, fileList in os.walk(source_directory):\n for filename in fileList:\n file_path = os.path.join(dirName, filename)\n s3key = os.path.join(os.path.basename(dirName) + '/' + filename)\n upload_file_names.append((file_path, s3key))\n return upload_file_names", "def init_downloads(self, pdir):\n self.log.info(\"init_downloads(\" + pdir + \")\")\n contents = os.listdir(pdir)\n for name in contents:\n path = os.path.join(pdir, name)\n if os.path.isdir(path):\n # recursively call with subdir\n self.init_downloads(path)\n else:\n # add file\n download = {}\n s3_uri = self.s3_prefix + path[(len(self.s3_dir)+1):]\n download['s3_uri'] = s3_uri\n download['state'] = 'COMPLETE'\n download[\"local_filepath\"] = path\n self.downloads[s3_uri] = download\n self.update_download(s3_uri) # update file properties", "def uploadFiles(self, filenames):\n bucket = self._S3_USER_UPLOAD_BUCKET\n prefix = self._S3_USER_UPLOAD_DIR\n uuid_dir = uuid.uuid4()\n # TODO(aimee): This should upload to a user-namespaced directory\n for filename in filenames:\n basename = os.path.basename(filename)\n response = self._upload_s3(filename, bucket, f\"{prefix}/{uuid_dir}/{basename}\")\n return f\"Upload file subdirectory: {uuid_dir} (keep a record of this if you want to share these files with other users)\"", "def test_put_file_variant(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put_variant(src, id, 'variant.tar.gz')\n path = '/'.join(backend.id_to_path(id)) + '/variant.tar.gz'\n self.assertTrue(backend.exists(path))", "def collect_links(self, env=None):\n for asset in self.assets.values():\n if asset.has_bundles():\n asset.collect_files()\n if env is None:\n env = self.config.env\n if env == static_bundle.ENV_PRODUCTION:\n self._minify(emulate=True)\n self._add_url_prefix()", "def s3deploy():\n # using aws cli since boto is busted with buckets that have periods (.) in the name\n local('cd {} && aws s3 cp --recursive --acl public-read build/ s3://{}/{}'.format(\n settings.BASE_DIR, AWS_BUCKET_NAME, VERBOSE_APP_NAME))\n log('Deployed! visit http://{}/{}/\\n'.format(AWS_BUCKET_NAME, VERBOSE_APP_NAME), 'green')", "def compress():\n run_manage_cmd('compress_assets')", "def put_static_assets_into_storage(\n assets: dict, prefix: str, storage, ignore_missing_assets: bool = True\n) -> List[dict]:\n _assets = []\n\n for asset_name, asset_path in assets.items():\n if not asset_path and ignore_missing_assets:\n continue\n\n _assets.append(\n {\"asset_id\": asset_name, \"asset_url\": storage.register(asset_path, prefix)}\n )\n\n return _assets", "def upload(self, bucket, obj, s3_client=None):\n\n s3_client = s3_client or self.s3_client\n transfer_config = boto3.s3.transfer.TransferConfig(multipart_threshold=1024, use_threads=True, max_concurrency=10)\n s3_transfer = boto3.s3.transfer.S3Transfer(client=s3_client, config=transfer_config)\n\n try:\n logging.debug(\"Uploading {} to {}\".format(obj, bucket))\n s3_transfer.upload_file(obj, bucket, helpers.strip_path(obj)[1])\n\n return True\n except botocore.exceptions.EndpointConnectionError:\n logging.error(\"Couldn't connect to an S3 endpoint. If you're using an S3 compatible provider other than AWS, remember to set --s3-endpoint-url\")\n return False\n except Exception as e:\n logging.error(\"Error uploading: {}\".format(e))\n return False", "def _recursive_put_files(self, is_subdirectory=False, sub_directory_name=None):\n current_path = os.path.basename(os.getcwd())\n LOG.info(f\"Copying files from the directory '{current_path}'\")\n for path_ in os.listdir():\n # Skip dotfiles and __pycache__\n if path_.startswith('.') or path_.startswith('__'):\n continue\n if os.path.isdir(path_):\n if sub_directory_name is not None:\n dir_name = os.path.join(sub_directory_name, path_)\n else:\n dir_name = path_\n try:\n self._file_explorer.md(dir_name)\n except Exception as e:\n print(e)\n os.chdir(dir_name.split(os.path.sep)[-1])\n self._recursive_put_files(\n is_subdirectory=True,\n sub_directory_name=dir_name,\n )\n else:\n try:\n if sub_directory_name is not None:\n self._file_explorer.put(path_, os.path.join(sub_directory_name, path_))\n else:\n self._file_explorer.put(path_)\n except RemoteIOError as e:\n print(path_, e)\n if is_subdirectory:\n os.chdir(UP_ONE_DIRECTORY)", "def setup_output_path(self):\n self.logger.info('setting up output path')\n try:\n self.output_path.mkdir()\n except FileExistsError:\n pass\n try:\n (self.output_path / 'simple').mkdir()\n except FileExistsError:\n pass\n for filename in resource_listdir(__name__, 'static'):\n if filename == 'index.html':\n # Skip template\n continue\n with (self.output_path / filename).open('wb') as f:\n source = resource_stream(__name__, 'static/' + filename)\n f.write(source.read())\n source.close()", "def _upload_all(self, uploads):\n for upload in uploads:\n if isinstance(upload, dict):\n self._upload(upload)\n elif upload in uploads and isinstance(uploads[upload], dict):\n self._upload(uploads[upload])\n else:\n raise Exception('invalid upload object')", "def _upload_s3(self, filename, bucket, objectKey):\n return s3_client.upload_file(filename, bucket, objectKey)", "def PopulateFilePaths(self):\n if os.path.isdir(self.backupFolder) == True:\n s3Log.info(\"BackUp Folder = {}\".format(self.backupFolder))\n backUpFilestoTransfer = (os.listdir(self.backupFolder))\n for eachfilename in backUpFilestoTransfer:\n path = os.path.join(self.backupFolder, eachfilename)\n filedictionary={\n \"filename\": eachfilename,\n \"filepath\": path,\n \"uploadedSuccess\": 0\n }\n self.fileTobeUploaded.append(filedictionary)\n\n s3Log.info(\"{} files are to be uploaded. \".format(len(self.fileTobeUploaded) ))\n pprint.pprint(self.fileTobeUploaded)", "def collectstatic(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n run('bin/django collectstatic --noinput')", "def collect_static():\n\n check_promt = (\n not env.prompt or\n console.confirm(\n \"Collect static files and copy them to collect_static?\",\n default=True,\n )\n )\n\n if check_promt:\n with cd(\"%s\" % env.work_path):\n with prefix(\"source %s/bin/activate\" % env.env_path):\n run(\n \"./manage.py collectstatic\"\n \" --noinput\"\n )", "def _watch(inotify, watchers, watch_flags, s3_uploader):\n # initialize a thread pool with 1 worker\n # to be used for uploading files to s3 in a separate thread\n executor = futures.ThreadPoolExecutor(max_workers=1)\n\n last_pass_done = False\n stop_file_exists = False\n\n # after we see stop file do one additional pass to make sure we didn't miss anything\n while not last_pass_done: # pylint: disable=too-many-nested-blocks\n # wait for any events in the directory for 1 sec and then re-check exit conditions\n for event in inotify.read(timeout=1000):\n for flag in inotify_simple.flags.from_mask(event.mask):\n # if new directory was created traverse the directory tree to recursively add all\n # created folders to the watchers list.\n # Upload files to s3 if there any files.\n # There is a potential race condition if upload the file and the see a notification\n # for it which should cause any problems because when we copy files to temp dir\n # we add a unique timestamp up to microseconds.\n if flag is inotify_simple.flags.ISDIR and inotify_simple.flags.CREATE & event.mask:\n path = os.path.join(intermediate_path, watchers[event.wd], event.name)\n for folder, _, files in os.walk(path):\n wd = inotify.add_watch(folder, watch_flags)\n relative_path = os.path.relpath(folder, intermediate_path)\n watchers[wd] = relative_path\n tmp_sub_folder = os.path.join(tmp_dir_path, relative_path)\n if not os.path.exists(tmp_sub_folder):\n os.makedirs(tmp_sub_folder)\n for file in files:\n _copy_file(executor, s3_uploader, relative_path, file)\n elif flag is inotify_simple.flags.CLOSE_WRITE:\n _copy_file(executor, s3_uploader, watchers[event.wd], event.name)\n\n last_pass_done = stop_file_exists\n stop_file_exists = os.path.exists(success_file_path) or os.path.exists(failure_file_path)\n\n # wait for all the s3 upload tasks to finish and shutdown the executor\n executor.shutdown(wait=True)", "def do_pack():\n from os import mkdir, path\n\n filename = \"web_static_{}.tgz\".format(now.strftime(\"%Y%m%d%H%M%S\"))\n filepath = \"versions/{}\".format(filename)\n\n try:\n mkdir('./versions')\n except FileExistsError:\n pass\n\n print(\"Packing web_static to {}\".format(filepath))\n cmd = local('tar -cvzf {} web_static'.format(filepath))\n if (cmd.return_code == 0):\n filesize = path.getsize(filepath)\n print(\"web_static packed: {} -> {}Bytes\".format(filepath, filesize))\n return filepath\n return None", "def ensure_fstar_js(static_path):\n for pth in static_path:\n candidate = os.path.join(pth, 'fstar.js')\n if os.path.isdir(candidate):\n return candidate\n raise ExtensionError(MISSING_FSTARJS_MESSAGE)", "def do_s3_static_url(parser, token):\n return do_s3_media_url(parser, token, static=True)", "def upload_bam(bam_s3_path, local_folder_path):\n\n upload_folder(bam_s3_path, local_folder_path)", "def zip_campaign_files(): # pylint: disable=too-many-locals\n try:\n build_tag = env.get('BUILD_TAG')\n assert Campaign.dump_db() == Campaign.EX_OK\n assert Campaign.dump_artifacts() == Campaign.EX_OK\n with zipfile.ZipFile(f'{build_tag}.zip',\n 'w', zipfile.ZIP_DEFLATED) as zfile:\n zfile.write(f\"{build_tag}.json\")\n for root, _, files in os.walk(build_tag):\n for filename in files:\n zfile.write(os.path.join(root, filename))\n b3resource = boto3.resource(\n 's3', endpoint_url=os.environ[\"S3_ENDPOINT_URL\"])\n dst_s3_url = os.environ[\"S3_DST_URL\"]\n multipart_threshold = 5 * 1024 ** 5 if \"google\" in os.environ[\n \"S3_ENDPOINT_URL\"] else 8 * 1024 * 1024\n tconfig = TransferConfig(multipart_threshold=multipart_threshold)\n bucket_name = urllib.parse.urlparse(dst_s3_url).netloc\n mime_type = mimetypes.guess_type(f'{build_tag}.zip')\n path = urllib.parse.urlparse(dst_s3_url).path.strip(\"/\")\n # pylint: disable=no-member\n b3resource.Bucket(bucket_name).upload_file(\n f'{build_tag}.zip',\n os.path.join(path, f'{build_tag}.zip'),\n Config=tconfig,\n ExtraArgs={'ContentType': mime_type[\n 0] or 'application/octet-stream'})\n dst_http_url = os.environ[\"HTTP_DST_URL\"]\n link = os.path.join(dst_http_url, f'{build_tag}.zip')\n Campaign.__logger.info(\n \"All data were successfully published:\\n\\n%s\", link)\n return Campaign.EX_OK\n except KeyError as ex:\n Campaign.__logger.error(\"Please check env var: %s\", str(ex))\n return Campaign.EX_ZIP_CAMPAIGN_FILES_ERROR\n except botocore.exceptions.NoCredentialsError:\n Campaign.__logger.error(\n \"Please fill ~/.aws/credentials, ~/.boto or set \"\n \"AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY in env\")\n return Campaign.EX_ZIP_CAMPAIGN_FILES_ERROR\n except Exception: # pylint: disable=broad-except\n Campaign.__logger.exception(\"Cannot publish the artifacts\")\n return Campaign.EX_ZIP_CAMPAIGN_FILES_ERROR", "def clean_gzip():\n this_dir = os.getcwd()\n os.chdir(\"/data/COHERENT2/data/CrystalChar/raw\")\n all_files = glob.glob(\"./**\", recursive=True)\n for f in all_files:\n if \".gz\" in f and \"tar\" not in f:\n print(f)\n sh(\"gunzip \" + f)\n os.chdir(this_dir)", "def _put_antenny_files_on_device(self):\n self._ensure_directory()\n self._recursive_put_files()", "def test_put_file(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put(src, id)\n path = '/'.join(backend.id_to_path(id)) + '/demo-test.tar.gz'\n self.assertTrue(backend.exists(path))", "def static_text_files():\n return send_from_directory(\"static/\", request.path[1:])", "def _check_required_directories(self) -> None:\n\n if self._all_stages:\n for stage in self._all_stages:\n stage_cfg = self._app_cfg['stages'][stage]\n processor_cfg = stage_cfg['configuration']\n\n # Populate all the directories requested in the configuration.\n for dir_key, dir_id in processor_cfg['dirs'].items():\n dir_path_value = os.path.join(self._data_dir_path, self._app_cfg['dir-paths'][dir_id])\n # Rebuild the key by replacing 'id' with 'path'\n dir_path_key = dir_key.replace('id', 'path')\n processor_cfg[dir_path_key] = dir_path_value\n\n # Create the directory if it doesn't exist.\n self._validate_path(dir_path_value)\n\n # Add the temporary directory.\n processor_cfg['tmp-dir-path'] = self._tmp_dir_path\n\n del processor_cfg['dirs']", "def process_all_files():\n src_files = get_doc_files()\n\n for src_pathname in src_files:\n if src_pathname.suffix in MARKDOWN_EXTENSIONS:\n process_file_markdown(src_pathname)\n elif src_pathname.suffix in STATIC_ASSET_EXTENSIONS:\n process_file_copytodest(src_pathname)", "def push_backup(args: Arguments) -> None:\n\n files = get_files_from_previous_backup(args.site)\n bucket = get_bucket(args)\n\n for path in files:\n upload_file(\n path=path,\n site_name=args.site,\n bucket=bucket,\n bucket_directory=args.bucket_directory,\n )\n\n print(\"Done!\")", "def modify_all_gulp_dirs(\n gulp_dir_root: Path,\n transform_func: Callable[[GulpExampleId, GulpExampleMetaDict], GulpExampleMetaDict],\n gulp_dir_pattern: Pattern = re.compile(\".*gulp.*\"),\n) -> None:\n gulp_dirs = [\n child_dir\n for child_dir in gulp_dir_root.iterdir()\n if gulp_dir_pattern.search(child_dir.name)\n ]\n for gulp_dir in gulp_dirs:\n modify_metadata(gulp_dir, transform_func)" ]
[ "0.6210542", "0.61488205", "0.5865301", "0.585198", "0.58491004", "0.58321655", "0.5650139", "0.5592172", "0.5504334", "0.54761356", "0.5457472", "0.5440076", "0.54384625", "0.5431056", "0.5406941", "0.5380988", "0.5366491", "0.53493273", "0.5324381", "0.53202146", "0.5297198", "0.52804065", "0.5241121", "0.5181118", "0.5133253", "0.51217544", "0.5097527", "0.50975126", "0.50973344", "0.50950176", "0.50852865", "0.50744647", "0.5068591", "0.5044925", "0.50352454", "0.50336015", "0.50244844", "0.49735463", "0.49633443", "0.49442577", "0.49429858", "0.4921805", "0.49217442", "0.4901294", "0.4894594", "0.4882944", "0.4865154", "0.48518392", "0.48462504", "0.4837769", "0.4825883", "0.48233536", "0.48186696", "0.4813145", "0.48102766", "0.48069382", "0.48021924", "0.4799554", "0.47916818", "0.47835356", "0.4782551", "0.47807252", "0.47773507", "0.47758952", "0.47702587", "0.4759697", "0.47590545", "0.47474825", "0.47396818", "0.47394356", "0.47350997", "0.47328958", "0.4731798", "0.47265357", "0.47132453", "0.4711438", "0.46766603", "0.46756345", "0.4663513", "0.46451622", "0.463714", "0.46292624", "0.461135", "0.46112838", "0.46097928", "0.45980152", "0.45899588", "0.45811263", "0.45810428", "0.4579358", "0.45733455", "0.4571335", "0.45694244", "0.456799", "0.4545087", "0.45362687", "0.45362252", "0.45287594", "0.45263445", "0.45231903" ]
0.81745136
0
Returns a |random value| <= SHIFT_MAX_VAL
def get_shift() -> int: return random.randint(low = -1 *SHIFT_MAX_VAL, high = SHIFT_MAX_VAL)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_random(value, max_value, min_value, max_delta):\n # Determine if sensor delta should be added or substracted.\n if value == max_value:\n add = False\n elif value == min_value:\n add = True\n else:\n add = random.random() > 0.5\n\n # Calculate a new delta.\n delta = random.randint(0, max_delta)\n\n # Apply the delta.\n if add:\n value += delta\n else:\n value -= delta\n if value > max_value:\n value = max_value\n elif value < min_value:\n value = min_value\n\n return value", "def test_generator_downward(narrow_power_range):\n with patch('random.randint', side_effect=lambda a,b: -1):\n range_min, range_max = narrow_power_range\n for msg in it.islice(generate_msgs(range_min, range_max), 0, 5):\n pass\n power = Message.parse(msg).power\n assert power == range_min", "def randbelow_from_randbits(self, n):\n k = int(n-1).bit_length()\n r = self.getrandbits(k) # 0 <= r < 2**k\n while int(r) >= n:\n r = self.getrandbits(k)\n return int(r)", "def randint(maxvalue):\n\n bit_size = common.bit_size(maxvalue)\n\n tries = 0\n while True:\n value = read_random_int(bit_size)\n if value <= maxvalue:\n break\n\n if tries % 10 == 0 and tries:\n # After a lot of tries to get the right number of bits but still\n # smaller than maxvalue, decrease the number of bits by 1. That'll\n # dramatically increase the chances to get a large enough number.\n bit_size -= 1\n tries += 1\n\n return value", "def rand_val(max):\n order = math.ceil(math.log10(max)) #Determine the num of digits in size\n index = math.floor(random.SystemRandom().random() * (10 ** order))\n\n # Yea, this is quite inefficient\n while (index >= max):\n index = math.floor(random.SystemRandom().random() * (10 ** order))\n\n return index", "def test_in_range_0_1():\n g = RG.larger_random()\n assert 0 <= next(g) <= 1", "def get_number(maxValue):\r\n return random.randint(1, maxValue)", "def test_generator_continuous():\n RANGE_MAX = 100\n prev_value = RANGE_MAX // 2\n for msg in it.islice(generate_msgs(0, RANGE_MAX), 0, 42):\n curr_value = Message.parse(msg).power\n assert curr_value - prev_value <= 1\n prev_value = curr_value", "def randomShiftVector(values, smin, smax):\n\tshift = np.random.uniform(smin, smax)\n\treturn list(map(lambda va: va + shift, values))", "def binary_blow_wind():\n s = random.random()\n return s < 0.05", "def randInt(max):\n return int(max * random.random())", "def get_random_integer():\n return random.randint(-MAX_GENERATED_NUMBER_RANGE, MAX_GENERATED_NUMBER_RANGE)", "def _random_max_wrap(*args):\n _, opt_pt = random_maximise(*args)\n return opt_pt", "def test_generator_upward(narrow_power_range):\n with patch('random.randint', side_effect=lambda a,b: 1):\n range_min, range_max = narrow_power_range\n for msg in it.islice(generate_msgs(range_min, range_max), 0, 5):\n pass\n power = Message.parse(msg).power\n assert power == range_max", "def maybe(self):\n return random.getrandbits(1)", "def get_offset(limit=12):\n return random.randrange(0, limit)", "def fix_rand_value(lo_bound: float, up_bound: float) -> float:\n # In this patch test function for determinism, just return lower bound\n nonlocal _i, _vals_sequence\n v_return = _vals_sequence[_i]\n _i = (_i + 1) % len(_vals_sequence)\n return v_return", "def _bitsfor(maxval):\n maxvalbits = int(round(math.log(maxval) / math.log(2)))\n if maxval != (1 << maxvalbits):\n raise ValueError(\"maxval must be a power of 2, not %d\" % maxval)\n return maxvalbits", "def random_shift(x, fraction):\n min_x, max_x = np.min(x), np.max(x)\n m = np.random.uniform(-fraction, fraction, size=x.shape) + 1\n return np.clip(x * m, min_x, max_x)", "def roll(self):\n return randint(1,6)", "def test_always_larger():\n g = RG.larger_random()\n first = next(g)\n second = next(g)\n assert second > first", "def _get_random_value(self):\r\n return random.randint(1, 10)", "def create_random_index(self, max:int):\n return random.randint(0, max - 1)", "def random_int(max=1000):\r\n return randint(0, max)", "def sample(self):\n L = e ** (-self.lamb)\n k, p = 1, rand()\n while p > L:\n k += 1\n p *= rand()\n return k - 1", "def seed_random(max_integer):\n return random.randrange(0,max_integer);", "def random_pitch_shift(\n spectrogram: tf.Tensor, shift_min: float = -1.0, shift_max: float = 1.0, **kwargs\n) -> tf.Tensor:\n semitone_shift = (\n tf.random_uniform(shape=(1,), seed=0) * (shift_max - shift_min) + shift_min\n )\n return pitch_shift(spectrogram, semitone_shift=semitone_shift, **kwargs)", "def pull(self):\n chance = np.random.uniform()\n return chance < self.winning_prob", "def constrain(value):\n size = 2**m\n return (value%size)", "def _limit_fill():\n z = random.randint(0, 10)\n if z/10.0 < LIMIT_FILL_PROBABILITY:\n return True\n else:\n return False", "def get_random_int_op(minimum: int, maximum: int) -> int:\n import random\n result = random.randint(minimum, maximum)\n print(result)\n return result", "def get_random_value():\n return randint(0, 255) / 256.0", "def range(self):\r\n\t\treturn max(self.sample) - min(self.sample)", "def get_random_bits(self):\n return random.getrandbits(8)", "def random_number(max_number):\n return random.randint(1, max_number)", "def max_value(gameState):\n if terminal_test(gameState): return -1", "def randomNumber(maxNumber):\n return random.randint(1, maxNumber)", "def get_random_value():\n return randint(0, 255) / 256.0", "def _max_in_bounds(self, max):\n if max >= self.valmax:\n if not self.closedmax:\n return self.val[1]\n max = self.valmax\n\n if max <= self.val[0]:\n max = self.val[0]\n return self._stepped_value(max)", "def le(value, limit):\n return value <= limit", "def random_test(self):\r\n return 1", "def random_test(self):\r\n return 1", "def rangeSample(val, minLim, maxLim):\n\tif val < minLim or val > maxLim:\n\t\tval = randint(minLim, maxLim)\n\treturn val", "def _random(self, key):\n\n if hasattr(key, \"encode\"):\n key = key.encode('ascii')\n\n value = (zlib.crc32(key, self.seed) & MAX_VALUE)\n\n return value * INV_MAX_VALUE", "def roll(self):\n self.current_roll = random.randint(self.min, self.max)\n return self.current_roll", "def roll(self):\n roll = random.random()\n sum = 0\n for item in self.mask:\n sum += item.prob\n if sum >= roll: return item.elem\n return None", "def block(self):\n return randint(0, self.max_block)", "def mt_rand(min = 0, max = sys.maxint):\n return random.randint(min, max)", "def rand(self) -> ZqValue:\n\n return self(randbelow(int(self.q)))", "def _abs_cap(val, max_abs_val=1):\r\n return max(min(val, max_abs_val), -max_abs_val)", "def lrshift(val, n) -> np.int64:\n return (val % (1 << 64)) >> n", "def get_random_direction(self) -> int:\n return int(self.generate_random_no() * 10 < 5)", "def argmax_random_tie(seq, key=identity):\n return argmax(shuffled(seq), key=key)", "def quasi_rand(values, feature, parent):\r\n seed = values[0]\r\n base = values[1]\r\n min = values[2]\r\n max = values[3]\r\n \r\n return math.floor(halton(seed, base) * (max - min + 1) + min)", "def attack(self):\n \n half_max_damage = int(self.max_damage) // 2\n random_value = randint(half_max_damage, int(self.max_damage))\n\n return random_value", "def rs():\n return random.choice([-1,1])", "def rs():\n return random.choice([-1,1])", "def getRandom(self) -> int:", "def getRandom(self) -> int:", "def safe_rand(self):\n rand_n = np.random.rand()\n if rand_n == float(1):\n rand_n -= 1e-10\n return rand_n", "def getRotPwr(self):\n out = self.getOutputValue(\"RotPwr\")\n pwr = max(out)\n return pwr", "def get_random_real_number():\n return random.uniform(-MAX_GENERATED_NUMBER_RANGE, MAX_GENERATED_NUMBER_RANGE)", "def generate_random(limit_lo, limit_hi):\n\n return RAND.randint(limit_lo, limit_hi)", "def decrease():\n decrease_amount = random.uniform(MIN_DECREASE, MAX_DECREASE)\n return decrease_amount", "def _shift_mask(mask, max_shift_factor=0.05):\n nzy, nzx, _ = mask.nonzero()\n h = nzy.max() - nzy.min()\n w = nzx.max() - nzx.min()\n size = np.sqrt(h * w)\n offset = np.random.uniform(-size * max_shift_factor, size * max_shift_factor,\n 2)\n shifted_mask = interpolation.shift(np.squeeze(mask, axis=2),\n offset, order=0).astype('bool')[...,\n np.newaxis]\n return shifted_mask", "def gen_rand(l):\n w = int(l / 2)\n\n min = (1 << (w - 1)) | 1\n max = (1 << w) - 1\n\n n = random.randrange(min, max) | 1\n\n return n", "def base_pick():\n\n rnd = generate_random(2, 15)\n return rnd", "def uniform(self, key, min_value=0., max_value=1.):\n return min_value + self._random(key) * (max_value - min_value)", "def mutate_with_max_frame(self, max_frame):\n self.frame = random.randint(1,max_frame)", "def get_random_sleep() -> int:\n return random.randint(1, 9)", "def i_rand_a():\n return i_random() % 95 + 32", "def generate_limit_and_ticks(max_value):\n print \"max value:\",max_value\n order = math.log10(max_value)\n multiple, limit_power = math.modf(order)\n multiple = math.pow(10, abs(multiple))\n\n print \"base power and extension:\", limit_power, multiple\n\n # within an order of magnitude, there are three ranges that are useful to look at:\n if multiple > 5:\n # over 5.0x10^x, and we care about that decade as a whole\n limit_power += 1\n num = 1\n extension = 0\n elif multiple > 2:\n # over 2.0*10^X, and we care about the first half of the decade (0-5.0)*10^x\n num = 5\n extension = 0\n else:\n # below 2.0*10^X, we really care about 20*10^(x-1)\n\n extension, num = math.modf(multiple)\n print \"multiple num and extension:\", num, extension\n\n # And the same 5/2/10 breakout repeats at this level, but adds to 10 to give more buckets over (10+n)*10^(x-1)\n num = 1\n if extension > 0.5:\n num += 1\n extension = 0\n elif extension > 0.2:\n extension = 0.5\n elif extension > 0.1:\n extension = 0.2\n elif extension > .001:\n extension = 0.1\n else:\n extension = 0\n\n print \"new power, number, extension:\", limit_power, num, extension\n\n max_value = math.pow(10, limit_power)* (num+extension)\n print \"new max: (number+extension)*10^power:\", max_value\n\n if 3 <= num < 10:\n steps = num\n else:\n steps = max_value / math.pow(10, limit_power-1)\n\n print \"steps:\",steps\n\n ticks = np.linspace(0, max_value, steps+1, endpoint=True)\n print \"steps:\", ticks\n\n return max_value, ticks", "def randomize_value(self) -> None:", "def d(qty, sides):\r\n value = 0\r\n while qty > 0:\r\n value = value + random.randint(1, sides)\r\n qty = qty - 1\r\n return value", "def mt_rand (low = 0, high = sys.maxint):\n return random.randint (low, high)", "def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between\n # half of max_damage and max_damage\n print(\"max damage of \" + self.name + \" is \")\n print(str(self.attack_strength))\n min_damage = self.attack_strength // 2\n weapon_attack_value = random.randint(min_damage, self.attack_strength)\n return weapon_attack_value", "def get_rescramble():\n mini = 2\n maxi = 7\n global r_base\n #set value of rescramble\n rescramble = r_base + r_key\n #resets rescramble if it gets to high or low.\n if rescramble > maxi or rescramble < mini:\n rescramble = mini\n r_base = rescramble\n return rescramble", "def random_round(x):\r\n prob = x - int(x)\r\n int_x = int(x)\r\n return int_x + (prob > np.random.rand())", "def custom_argmax(arr):\n return np.random.choice(np.flatnonzero(arr == arr.max()))", "def cross_corr_comparison_shift_allowed(data, synth, max_samples_shift_limit=5):\n # Upsample the data, via interpolation, in order to allow for finer shifts:\n upsamp_factor = 4\n data_high_res = np.interp(np.arange(0.,len(data),1./upsamp_factor), np.arange(len(data)), data)\n synth_high_res = np.interp(np.arange(0.,len(synth),1./upsamp_factor), np.arange(len(synth)), synth)\n # Loop over sample shifts, calculating ncc for each component:\n samps_to_shift = np.arange(-1*max_samples_shift_limit*upsamp_factor, max_samples_shift_limit*upsamp_factor, dtype=int)\n ncc_values = np.zeros(len(samps_to_shift),dtype=float)\n for a in range(len(samps_to_shift)):\n samp_to_shift = samps_to_shift[a]\n data_tmp = np.roll(data_high_res,samp_to_shift)\n synth_tmp = np.roll(synth_high_res,samp_to_shift)\n ncc_values[a] = cross_corr_comparison(data_tmp, synth_tmp)\n # And get max. cross-correlation coefficient:\n ncc_max = np.max(ncc_values)\n return ncc_max", "def random_negative(value, random_negative_prob):\n return -value if np.random.rand() < random_negative_prob else value", "def native_max_value(self) -> float:\n return 9", "def ge(value, limit):\n return value >= limit", "def select_arm(self):\n\n # Exploitation\n if random.uniform(0, 1) > self.epsilon:\n return np.argmax(self.values)\n\n # Exploration\n else:\n return random.randrange(len(self.values))", "def closure(x):\n\n nonlocal max_shift_amount\n\n if (x in log2_dict):\n return log2_dict[x]\n\n while (True):\n max_num = 1 << max_shift_amount\n log2_dict[max_num] = max_shift_amount\n max_shift_amount += 1\n if (x == max_num):\n return log2_dict[x]", "def getRandSpeed(self) -> int:\n num = int(random.uniform(-4,4))\n while(-1<=num and num<=1):\n num = int(random.uniform(-4,4))\n return num", "def bernoulli_trial(p: float) -> int:\n return 1 if random.random() < p else 0", "def roll(self, msg: MessageWrapper, max_value: int = 6) -> str:\n value = self.sys_random.randint(1, max_value)\n\n return f\"{msg.raw_msg.author.name} rolled a {value} on a d{max_value}\"", "def getRandom(self) -> int:\n size = len(self.value_set)\n if size > 0:\n from random import randint\n x = randint(1, size)\n return self.values[x - 1]", "def get_bracket(chance):\r\n rnd = random.randint(0,100)\r\n if rnd >=0 and rnd <= chance*100:\r\n return True\r\n else:\r\n return False", "def max_power_in_existing_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_in[g, y, s, t] - (m.P_IN_MAX[g] * (1 - m.F[g, y])) <= 0", "def stop(self):\n return self.random.uniform(0, 1) < 1/self.k", "def random_range():\n rnd = int(random.randrange(1,8))\n print \"Random number generated: %s\" %(rnd)\n return rnd", "def next_power_2(x: int) -> int:\n return 0 if x < 1 else shift_left_bit_length(x)", "def argmax_break_ties(self, probs):\n return np.random.choice(np.where(probs == probs.max())[0])", "def sop(*args, **kwargs):\n return _randint(0, 2 ** 15 - 1)", "def rand_gen(below, baseline):\n\treturn secrets.randbelow(below)/ baseline", "def random_temp():\n temp_min = 154\n temp_max = 500\n temp_interval = 1\n # `range`s are exclusive [min, max)\n return random.randrange(temp_min, temp_max + 1, temp_interval)", "def random(self):\r\n return random.randint(1, 4)", "def check_valid_range(val, max_val):\n if val < 0:\n val = 0\n elif val > max_val:\n val = max_val\n else:\n pass\n return val" ]
[ "0.6203564", "0.60628116", "0.600135", "0.59833807", "0.59038484", "0.58453417", "0.58328253", "0.5776378", "0.5762465", "0.575023", "0.5747984", "0.57101095", "0.57073265", "0.57069814", "0.5686693", "0.5646487", "0.5634794", "0.5613866", "0.5598931", "0.559132", "0.55743605", "0.5573085", "0.55629975", "0.55526876", "0.55520004", "0.55499864", "0.55307555", "0.55276936", "0.54975766", "0.5492536", "0.5449152", "0.5447049", "0.54366714", "0.5433934", "0.54324704", "0.5432203", "0.5418811", "0.54150903", "0.54150355", "0.5387811", "0.5370016", "0.5370016", "0.5369958", "0.5351004", "0.53485775", "0.534334", "0.53428066", "0.5342229", "0.5330117", "0.532849", "0.532381", "0.53181624", "0.5304323", "0.53018606", "0.5301754", "0.52875245", "0.52875245", "0.5286675", "0.5286675", "0.52681285", "0.52584994", "0.52460325", "0.5232785", "0.5232024", "0.5231728", "0.5227942", "0.52155906", "0.5214749", "0.5209919", "0.52012086", "0.5197503", "0.51964587", "0.5187058", "0.517438", "0.5172071", "0.51718783", "0.5170935", "0.5168152", "0.51636946", "0.5162392", "0.5148345", "0.5145212", "0.5144911", "0.5141107", "0.51346517", "0.51251626", "0.5117077", "0.51159483", "0.5115681", "0.5105796", "0.5100419", "0.50995713", "0.5099524", "0.5097219", "0.5093558", "0.5093313", "0.50823224", "0.50794166", "0.5077503", "0.5076099" ]
0.7717284
0
load batch of cifar
def load_cifar10_batch(directory): with open(directory, 'rb') as fo: datadict = pickle.load(fo, encoding='bytes') X = np.array(datadict[b'data']) Y = np.array(datadict[b'labels']) return X, Y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_batch(n):\r\n print ('Loadng one batch...')\r\n batchfilename = flist[n - 1] + '.pkl'\r\n if not os.path.exists(batchfilename):\r\n set_batch_data()\r\n with open(batchfilename, 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data", "def LoadBatch(filename):", "def _load_cifar_batch(fpath, label_key='labels'):\n if isinstance(fpath, (os.PathLike, str, bytes)):\n with open(fpath, 'rb') as f:\n return _load_cifar_batch(f, label_key)\n\n d = pickle.load(fpath, encoding='bytes')\n # decode utf8\n d_decoded = {}\n for k, v in d.items():\n d_decoded[k.decode('utf8')] = v\n d = d_decoded\n data = d['data']\n labels = d[label_key]\n\n data = data.reshape(data.shape[0], 3, 32, 32).transpose([0, 2, 3, 1])\n return data, labels", "def load_batch(batch_name):\n data_dict = unpickle('./datasets/cifar-10-batches-py/' + batch_name)\n X = data_dict[b'data'] / 255\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).reshape(10000, 3072).transpose(1,0)\n y = data_dict[b'labels']\n Y = make_one_hot(y)\n return X, Y, y", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def load_CIFAR_batch(filename):\n with open(filename, 'rb')as f:\n # datadict = p.load(f)\n datadict = pickle.load(f, encoding = 'bytes')\n X = datadict[b'data']\n Y = datadict[b'labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y", "def load_cifar10(batch_paths):\n batches = [load_cifar10_batch(path) for path in batch_paths]\n data = torch.cat([batch[0] for batch in batches])\n labels = torch.cat([batch[1] for batch in batches])\n return data, labels", "def load_CIFAR100(batch_dir):\r\n ims, coarse_labels, fine_labels = load_CIFAR_batch(batch_dir + '/train')\r\n ims_t, c_labels, f_labels = load_CIFAR_batch(batch_dir + '/test')\r\n ims = np.concatenate((ims, ims_t))\r\n coarse_labels = np.concatenate((coarse_labels, c_labels))\r\n fine_labels = np.concatenate((fine_labels, f_labels))\r\n return ims, coarse_labels, fine_labels", "def load_cifa_10():\n train_set_x = np.ndarray([ 50000, 3072 ])\n train_set_y = np.ndarray( [50000] )\n\n batch_size = 10000\n for i in xrange(5):\n batch = open( datapath + \"data_batch_\"+str(i+1), 'rb')\n map = cPickle.load( batch )\n batch.close()\n train_set_x[ i*batch_size : (i+1)*batch_size , : ] = np.asarray( map[ 'data' ], dtype = 'float32' )\n train_set_y[ i*batch_size : (i+1)*batch_size ] = np.asarray( map[ 'labels' ], dtype = 'float32' )\n\n test_file = open( datapath + 'test_batch', 'rb')\n map = cPickle.load( test_file )\n test_file.close()\n \n test_set_x = np.asarray( map['data'], dtype = 'float32' )\n test_set_y = np.asarray( map['labels'], dtype = 'float32' )\n \n\n return train_set_x, train_set_y, test_set_x, test_set_y", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = pickle.load(f, encoding='latin1')\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\r\n Y = np.array(Y)\r\n return X, Y", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = load_pickle(f)\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000,3072)\n Y = np.array(Y)\n return X, Y", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = load_pickle(f)\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000,3072)\r\n Y = np.array(Y)\r\n return X, Y", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f, encoding='latin1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype(\"float64\")\n Y = np.array(Y)\n return X, Y", "def load_dataset_cifar10():\n dirname = 'cifar-10-batches-py'\n origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n path = get_file(dirname, origin=origin, untar=True)\n\n num_train_samples = 50000\n\n x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')\n y_train = np.empty((num_train_samples,), dtype='uint8')\n\n for i in range(1, 6):\n fpath = os.path.join(path, 'data_batch_' + str(i))\n (x_train[(i - 1) * 10000: i * 10000, :, :, :],\n y_train[(i - 1) * 10000: i * 10000]) = load_batch(fpath)\n\n fpath = os.path.join(path, 'test_batch')\n x_test, y_test = load_batch(fpath)\n\n y_train = np.reshape(y_train, (len(y_train), 1))\n y_test = np.reshape(y_test, (len(y_test), 1))\n\n return (x_train, y_train), (x_test, y_test)", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n #一个样本由标签和图像数据组成\n #3072 = 32 x 32 x 3\n data_dict = p.load(f, encoding= 'bytes')\n images = data_dict[b'data']\n labels = data_dict[b'labels']\n #把原始数据结构调整为BCWH batches, channels, width, height\n images = images.reshape(10000, 3, 32, 32)\n #tensorflow 处理图像数据的结构:BWHC\n #把C移动到最后一个维度\n images = images.transpose(0, 2, 3, 1)\n\n labels = np.array(labels)\n return images, labels", "def load_CIFAR_batch(filename):\n with open(filename, 'rb')as f:\n datadict = p.load(f, encoding='iso-8859-1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y", "def load_CIFAR10(ROOT):\n xs = []\n ys = []\n for b in range(1, 6):\n f = os.path.join(ROOT, 'data_batch_%d' % (b,))\n X, Y = load_CIFAR_batch(f)\n xs.append(X)\n ys.append(Y)\n Xtr = np.concatenate(xs)\n Ytr = np.concatenate(ys)\n del X, Y\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\n return Xtr, Ytr, Xte, Yte", "def load_CIFAR10(ROOT):\n xs = []\n ys = []\n for b in range(1,6):\n f = os.path.join(ROOT, 'data_batch_%d' % (b, ))\n X, Y = load_CIFAR_batch(f)\n xs.append(X)\n ys.append(Y)\n Xtr = np.concatenate(xs)\n Ytr = np.concatenate(ys)\n del X, Y\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\n return Xtr, Ytr, Xte, Yte", "def load_CIFAR10(path):\r\n sampleList = []\r\n labelList = []\r\n # load all the data, as there only five training samples name as data_batch_id\r\n for i in range(1, 6):\r\n # get full filename\r\n filename = os.path.join(path, 'data_batch_%d' % (i, ))\r\n x, y = load_CIFAR_batch(filename)\r\n\r\n sampleList.append(x)\r\n labelList.append(y)\r\n\r\n # combine elements as one array\r\n Xtr = np.concatenate(sampleList)\r\n Ytr = np.concatenate(labelList)\r\n del x, y\r\n print(\"Training data loaded, total size : %d\", len(Xtr))\r\n # load test data\r\n Xte, Yte = load_CIFAR_batch(os.path.join(path, 'test_batch'))\r\n return Xtr, Ytr, Xte, Yte", "def load_batch(batch, feat_list, device='cpu'):\n batch_feat_list = []\n for hop_feat_list in feat_list:\n batch_feats = [feat[batch] for feat in hop_feat_list]\n batch_feat_list.append(batch_feats)\n\n batch_feat_list = [torch.stack(feat) for feat in batch_feat_list]\n batch_feats = torch.cat(batch_feat_list, dim=0)\n # if len(batch_feats.shape) == 2:\n # batch_feats = batch_feats.unsqueeze(1)\n\n return batch_feats.to(device)", "def load_cifar10(directory):\n train_data = []\n train_labels = []\n for b in range(1, 6):\n f = os.path.join(directory, 'data_batch_%d' % (b,))\n X, Y = load_cifar10_batch(f)\n train_data.append(X)\n train_labels.append(Y)\n train_data = np.concatenate(train_data)\n train_labels = np.concatenate(train_labels)\n del X, Y\n test_data, test_labels = load_cifar10_batch(os.path.join(directory, 'test_batch'))\n return train_data, train_labels, test_data, test_labels", "def load_cifar(hparams):\n all_labels = []\n\n total_batches_to_load = 5\n assert hparams.train_size + hparams.validation_size <= 50000\n if hparams.eval_test:\n total_batches_to_load += 1\n # Determine how many images we have loaded\n total_dataset_size = 50000\n train_dataset_size = total_dataset_size\n if hparams.eval_test:\n total_dataset_size += 10000\n\n if hparams.dataset == 'cifar10':\n all_images = []\n elif hparams.dataset == 'cifar100':\n all_images = np.empty((1, 50000, 3072), dtype=np.uint8)\n if hparams.eval_test:\n test_data = np.empty((1, 10000, 3072), dtype=np.uint8)\n if hparams.dataset == 'cifar10':\n datafiles = [\n 'data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4',\n 'data_batch_5']\n\n if hparams.eval_test:\n datafiles.append('test_batch')\n num_classes = 10\n elif hparams.dataset == 'cifar100':\n datafiles = ['train']\n if hparams.eval_test:\n datafiles.append('test')\n num_classes = 100\n else:\n raise NotImplementedError('Unimplemented dataset: ', hparams.dataset)\n if hparams.dataset != 'test':\n for file_num, f in enumerate(datafiles):\n d = unpickle(os.path.join(hparams.data_path, f))\n if hparams.dataset == 'cifar10':\n labels = np.array(d['labels'])\n else:\n labels = np.array(d['fine_labels'])\n if f == 'test':\n test_data[0] = copy.deepcopy(d['data'])\n if hparams.dataset == 'cifar10':\n all_images.append(test_data)\n else:\n all_images = np.concatenate([all_images, test_data], axis=1)\n else:\n if hparams.dataset == 'cifar10':\n all_images.append(copy.deepcopy(d['data']))\n else:\n all_images[file_num] = copy.deepcopy(d['data'])\n nsamples = len(labels)\n for idx in range(nsamples):\n all_labels.append(labels[idx])\n if hparams.dataset == 'cifar10':\n all_images = np.concatenate(all_images, axis=0)\n all_images = all_images.reshape(-1, 3072)\n all_images = all_images.reshape(-1, 3, 32, 32) # pylint: disable=too-many-function-args\n all_images = all_images.transpose(0, 2, 3, 1).copy()\n all_images = all_images / 255.0\n mean = augmentation_transforms.MEANS\n std = augmentation_transforms.STDS\n tf.logging.info('mean:{} std: {}'.format(mean, std))\n all_images = (all_images - mean) / std\n all_labels = np.eye(num_classes)[np.array(all_labels, dtype=np.int32)]\n\n assert len(all_images) == len(all_labels)\n tf.logging.info(\n 'In CIFAR10 loader, number of images: {}'.format(len(all_images)))\n\n extra_test_images = None\n extra_test_labels = None\n if hparams.extra_dataset == 'cifar10_1':\n extra_test_ds = tfds.as_numpy(\n tfds.load('cifar10_1', split='test', batch_size=-1))\n extra_test_images = ((extra_test_ds['image'] / 255.0) - mean) / std\n extra_test_labels = np.eye(num_classes)[np.array(\n extra_test_ds['label'], dtype=np.int32)]\n\n # Break off test data\n if hparams.eval_test:\n test_images = all_images[train_dataset_size:]\n test_labels = all_labels[train_dataset_size:]\n else:\n test_images = []\n test_labels = []\n all_images = all_images[:train_dataset_size]\n all_labels = all_labels[:train_dataset_size]\n return all_images, all_labels, test_images, test_labels, extra_test_images, extra_test_labels", "def load_cifar():\n print('==> Preparing data..')\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n trainset = torchvision.datasets.CIFAR10(\n root='./data', train=True, download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=1024, shuffle=True, num_workers=8)\n\n testset = torchvision.datasets.CIFAR10(\n root='./data', train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=128, shuffle=False, num_workers=8)\n return trainloader, testloader", "def load_CIFAR10(ROOT):\n xs = []\n ys = []\n for b in range(1,2):\n f = os.path.join(ROOT, 'data_batch_%d' % b)\n X, Y = load_CIFAR_batch(f)\n xs.append(X)\n ys.append(Y) \n #利用np.concatenate将xs、ys弄成一行\n Xtr = np.concatenate(xs)\n Ytr = np.concatenate(ys)\n del X, Y\n #获取测试集\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\n return Xtr, Ytr, Xte, Yte", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f, encoding='bytes')\n #datadict为尺寸为4的字典:b'labels', b'data', b'filenames', b'batch_label'\n X = datadict[b'data']\n Y = datadict[b'labels']\n #X的尺寸为10000*3072(10000张图片,每个图片尺寸为32*32,三通道),reshape为10000*32*32*3,再通过transpose令索引值(x',y',z',w')=(x,z,w,y),最后转为float类型\n #三个channel分别为rgb\n #索引为(图片编号,x索引,y索引,rgb三通道)\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\n Y = np.array(Y)\n return X, Y", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb')as f:\r\n datadict = p.load(f)\r\n \r\n X = datadict['data']\r\n Y = datadict['labels']\r\n \r\n print X.shape\r\n X = X.reshape(X.shape[0], SHAPE[0], SHAPE[1], SHAPE[2])\r\n Y = np.array(Y)\r\n return X, Y", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n data_dict = cPickle.load(f)\r\n ims = data_dict['data']\r\n coarse_labels = np.array(data_dict['coarse_labels'])\r\n fine_labels = np.array(data_dict['fine_labels'])\r\n return ims, coarse_labels, fine_labels", "def load_cifar(dataset_name='cifar10'):\n dataset_name = dataset_name.strip().lower().replace(' ', '')\n\n if dataset_name.lower() not in ['cifar10', 'cifar100']:\n raise ValueError('Only cifar10 or cifar100 are valid dataset_name.')\n baseURL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n if dataset_name == 'cifar100':\n baseURL = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n\n dirname = os.path.join(_trident_dir, dataset_name.strip())\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n\n \"\"\"Load CIFAR data from `path`\"\"\"\n _,filename,ext=split_path(baseURL)\n download_file(baseURL, dirname, filename+ext, dataset_name)\n file_path = os.path.join(dirname, filename+ext)\n\n\n if '.tar' in ext:\n extract_archive(file_path, dirname, archive_format='auto')\n filelist = glob.glob(dirname + '/*/*.*')\n extract_path ,_,_= split_path(filelist[0])\n filelist = [f for f in os.listdir(extract_path) if os.path.isfile(os.path.join(extract_path, f))]\n data=[]\n label=[]\n test_data=[]\n test_label=[]\n for file_path in filelist:\n if 'data_batch' in file_path:\n with open(os.path.join(extract_path,file_path), 'rb') as f:\n entry = pickle.load(f, encoding='latin1')\n data.append(entry['data'])\n label.append(entry['labels'])\n elif 'test_batch' in file_path:\n with open(os.path.join(extract_path,file_path), 'rb') as f:\n entry = pickle.load(f, encoding='latin1')\n test_data.append(entry['data'])\n test_label.append(entry['labels'])\n data = np.concatenate(data)\n data = data.reshape((data.shape[0], 3, 32, 32))\n data = data.transpose(0, 2, 3, 1).astype(np.float32)\n\n test_data = np.concatenate(test_data)\n test_data = test_data.reshape((test_data.shape[0], 3, 32, 32))\n test_data = test_data.transpose(0, 2, 3, 1).astype(np.float32)\n\n # Prepare labels\n label = np.concatenate(label)\n test_label = np.concatenate(test_label)\n\n trainData = Iterator(data=ImageDataset(data,object_type=ObjectType.rgb), label=LabelDataset(label,object_type=ObjectType.classification_label))\n testData = Iterator(data=ImageDataset(test_data,object_type=ObjectType.rgb), label=LabelDataset(test_label,object_type=ObjectType.classification_label))\n dataset = DataProvider(dataset_name, traindata=trainData, testdata=testData)\n dataset.binding_class_names(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship',\n 'truck'] if dataset_name == 'cifar10' else [], 'en-US')\n return dataset", "def load_cifar_data():\n train_loader = torch.utils.data.DataLoader(\n torchvision.datasets.CIFAR10('cifarfiles/', train=True, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=batch_size_train, shuffle=True, pin_memory=True)\n\n test_loader = torch.utils.data.DataLoader(\n torchvision.datasets.CIFAR10('cifarfiles/', train=False, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=batch_size_test, shuffle=True, pin_memory=True)\n return train_loader, test_loader", "def load_batch(fpath, label_key='labels'):\n f = open(fpath, 'rb')\n if sys.version_info < (3,):\n d = cPickle.load(f)\n else:\n d = cPickle.load(f, encoding='bytes')\n # decode utf8\n d_decoded = {}\n for k, v in d.items():\n d_decoded[k.decode('utf8')] = v\n d = d_decoded\n f.close()\n data = d['data']\n labels = d[label_key]\n\n data = data.reshape(data.shape[0], 3, 32, 32)\n return data, labels", "def load_cifar10(directory, normalize=True):\n training_data = []\n training_labels = []\n for i in range(1, 6):\n try:\n d = unpickle(directory + f\"/data_batch_{i}\")\n except FileNotFoundError:\n raise Exception(f\"File 'data_batch_{i}' is not found in the specified directory '{directory}'.\")\n training_data.append(d[b\"data\"])\n training_labels.append(d[b\"labels\"])\n training_data = np.vstack(training_data)\n training_data = np.reshape(training_data, newshape=(-1, 3, 32, 32))\n training_labels = np.concatenate(training_labels)\n training_labels = np.array(list(map(lambda hot: one_hot(10, hot), training_labels)))\n\n try:\n test = unpickle(directory + \"/test_batch\")\n except FileNotFoundError:\n raise Exception(f\"File 'test_batch' is not found in the specified directory '{directory}'.\")\n test_data = np.reshape(test[b\"data\"], newshape=(-1, 3, 32, 32))\n test_labels = np.array(list(map(lambda hot: one_hot(10, hot), test[b\"labels\"])))\n\n try:\n meta = unpickle(directory + \"/batches.meta\")\n except FileNotFoundError:\n raise Exception(f\"File 'batches.meta' is not found in the specified directory '{directory}'.\")\n label_names = meta[b\"label_names\"]\n label_names = list(map(lambda x: x.decode(\"utf-8\"), label_names))\n\n if normalize:\n training_data = training_data / 255\n test_data = test_data / 255\n\n return training_data, training_labels, test_data, test_labels, label_names", "def load_cifar10_data(self, data_path='data/cifar-10-batches-py',\n n_train_samples=50000, n_test_samples=10000):\n train_data = None\n train_labels = []\n\n for i in range(1, 6):\n data_dic = unpickle(data_path + '/data_batch_{}'.format(i))\n if i == 1:\n train_data = data_dic['data']\n else:\n train_data = np.vstack((train_data, data_dic['data']))\n\n train_labels += data_dic['labels']\n\n test_data_dic = unpickle(data_path + '/test_batch')\n test_data = test_data_dic['data']\n test_labels = test_data_dic['labels']\n\n train_data = train_data.reshape((len(train_data),\n self.LOADED_IMG_DEPTH,\n self.LOADED_IMG_HEIGHT,\n self.LOADED_IMG_HEIGHT))\n\n train_data = np.rollaxis(train_data, 1, 4)\n train_labels = np.array(train_labels)\n\n test_data = test_data.reshape((len(test_data),\n self.LOADED_IMG_DEPTH,\n self.LOADED_IMG_HEIGHT,\n self.LOADED_IMG_HEIGHT))\n\n test_data = np.rollaxis(test_data, 1, 4)\n test_labels = np.array(test_labels)\n\n self.train_dataset = {'data': train_data[0:n_train_samples],\n 'labels': train_labels[0:n_train_samples],\n 'cls': [np.zeros(10)\n for i in range(n_train_samples)]}\n\n for i in range(0, n_train_samples):\n self.train_dataset['cls'][i][self.train_dataset['labels'][i]] = 1.\n\n self.test_dataset = {'data': test_data[0:n_test_samples],\n 'labels': test_labels[0:n_test_samples],\n 'cls': [np.zeros(10)\n for i in range(n_train_samples)]}\n\n for i in range(0, n_test_samples):\n self.test_dataset['cls'][i][self.test_dataset['labels'][i]] = 1.\n\n self.train_dataset['data_array'] = np.array(\n [item.flatten() for item in self.train_dataset['data']])\n\n self.train_dataset['labels_array'] = np.array(\n [item.flatten() for item in self.train_dataset['labels']])\n\n self.train_dataset['cls_array'] = np.array(\n [item.flatten() for item in self.train_dataset['cls']])\n\n self.test_dataset['data_array'] = np.array(\n [item.flatten() for item in self.test_dataset['data']])\n\n self.test_dataset['labels_array'] = np.array(\n [item.flatten() for item in self.test_dataset['labels']])\n\n self.test_dataset['cls_array'] = np.array(\n [item.flatten() for item in self.test_dataset['cls']])\n\n return None", "def load_CIFAR10(ROOT):\r\n xs = []\r\n ys = []\r\n for b in range(1,6):\r\n f = os.path.join(ROOT, 'data_batch_%d' % (b, ))\r\n X, Y = load_CIFAR_batch(f)\r\n xs.append(X)\r\n ys.append(Y)\r\n Xtr = np.concatenate(xs)\r\n Ytr = np.concatenate(ys)\r\n del X, Y\r\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\r\n return Xtr, Ytr, Xte, Yte\r\n\r\n\tdef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=10000):\r\n # Load the raw CIFAR-10 data\r\n \r\n cifar10_dir = 'Downloads/cifar-10-batches-py'\r\n \r\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\r\n\r\n # Subsample the data\r\n mask = range(num_training, num_training + num_validation)\r\n X_val = X_train[mask]\r\n y_val = y_train[mask]\r\n mask = range(num_training)\r\n X_train = X_train[mask]\r\n y_train = y_train[mask]\r\n mask = range(num_test)\r\n X_test = X_test[mask]\r\n y_test = y_test[mask]\r\n\r\n x_train = X_train.astype('float32') \r\n x_test = X_test.astype('float32')\r\n \r\n x_train = x_train.reshape(-1, 32, 32, 3)\r\n x_test = x_test.reshape(-1, 32, 32, 3)\r\n x_train /= 255\r\n x_test /= 255\r\n\r\n return x_train, y_train, X_val, y_val, x_test, y_test", "def load_food_image_batch(filename, num):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f)\n url_parts = datadict['Image URL'].split(\"/\")\n img_fn = url_parts[-1]\n with open(img_fn):\n X = f.read()\n Y = datadict['coarse_labels']\n X = X.reshape(num, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\n Y = np.array(Y)\n return X, Y", "def load_data():\r\n print ('Loadng all the file one time......')\r\n if not os.path.exists('cifar.pkl'):\r\n set_data()\r\n with open('cifar.pkl', 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data", "def load_cifar10(data_path=\".\", test_size=0.2, random_state=1337):\n test_path = os.path.join(data_path, \"cifar-10-batches-py/test_batch\")\n train_paths = [os.path.join(data_path, \"cifar-10-batches-py/data_batch_%i\" % i) for i in range(1, 6)]\n\n if not os.path.exists(test_path) or not all(list(map(os.path.exists, train_paths))):\n print (\"Dataset not found. Downloading...\")\n download_cifar(data_path,\n url='https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',\n tarname='cifar-10-python.tar.gz')\n\n train_batches = list(map(unpickle, train_paths))\n test_batch = unpickle(test_path)\n\n X = np.concatenate([batch[\"data\"] for batch in train_batches]).reshape([-1, 3, 32, 32]).astype('float32') / 255\n y = np.concatenate([batch[\"labels\"] for batch in train_batches]).astype('int32')\n X_train, X_val, y_train, y_val = train_test_split(X, y,\n test_size=test_size,\n random_state=random_state)\n\n X_test = test_batch[\"data\"].reshape([-1, 3, 32, 32]).astype('float32') / 255\n y_test = np.array(test_batch[\"labels\"]).astype('int32')\n\n return X_train, y_train, X_val, y_val, X_test, y_test", "def cifar100(path, label_mode='fine'):\n def _load_batch(filepath, label_key):\n with open(filepath, 'rb') as f:\n if sys.version_info < (3,):\n d = cPickle.load(f)\n else:\n d = cPickle.load(f, encoding='bytes')\n d_decoded = {} # decode utf8\n for k, v in six.iteritems(d):\n d_decoded[k.decode('utf8')] = v\n d = d_decoded\n images = d['data']\n labels = d[label_key]\n images = images.reshape(images.shape[0], 3, 32, 32)\n labels = np.reshape(labels, len(labels,))\n return images, labels\n path = os.path.expanduser(path)\n directory = 'cifar-100-python'\n if not os.path.exists(os.path.join(path, directory)):\n url = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n maybe_download_and_extract(path, url)\n\n filepath = os.path.join(path, directory, 'train')\n x_train, y_train = _load_batch(filepath, label_mode + '_labels')\n\n filepath = os.path.join(path, directory, 'test')\n x_test, y_test = _load_batch(filepath, label_mode + '_labels')\n return (x_train, y_train), (x_test, y_test)", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def set_batch_data():\r\n if not os.path.exists(filepath):\r\n download_data()\r\n for n in range(0,6):\r\n d = read(filepath + flist[n])\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, trts = {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n trts['x'], trts['y'] = d['data'], d['labels']\r\n trtsflag = ['train', 'train', 'train', 'train', 'train', 'test']\r\n\r\n data['flag'] = trtsflag[n]\r\n data[trtsflag[n]] = trts\r\n save_pkl(data, savename=flist[n]+'.pkl')", "def _load_metadata(self):\n\n cub_dir = self.root / \"CUB_200_2011\"\n images_list: Dict[int, List] = OrderedDict()\n\n with open(str(cub_dir / \"train_test_split.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n is_train_instance = int(row[1]) == 1\n if is_train_instance == self.train:\n images_list[img_id] = []\n\n with open(str(cub_dir / \"images.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n images_list[img_id].append(row[1])\n\n with open(str(cub_dir / \"image_class_labels.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n # CUB starts counting classes from 1 ...\n images_list[img_id].append(int(row[1]) - 1)\n\n with open(str(cub_dir / \"bounding_boxes.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n box_cub = [int(float(x)) for x in row[1:]]\n box_avl = [box_cub[1], box_cub[0], box_cub[3], box_cub[2]]\n # PathsDataset accepts (top, left, height, width)\n images_list[img_id].append(box_avl)\n\n images_tuples = []\n for _, img_tuple in images_list.items():\n images_tuples.append(tuple(img_tuple))\n self._images = images_tuples # type: ignore\n\n # Integrity check\n for row_check in self._images:\n filepath = self.root / CUB200.images_folder / row_check[0]\n if not filepath.is_file():\n if self.verbose:\n print(\"[CUB200] Error checking integrity of:\", filepath)\n return False\n\n return True", "def __init__(self, data_path, batch_size, **kwargs):\n super().__init__(batch_size, **kwargs)\n\n _, num_classes, X_train, y_train, X_val, y_val = load_cifar10_shard(shard_num=data_path, **kwargs)\n\n self.training_data_size = len(X_train)\n self.validation_data_size = len(X_val)\n self.num_classes = num_classes\n self.train_loader = self.create_loader(X=X_train, y=y_train, shuffle=True)\n self.val_loader = self.create_loader(X=X_val, y=y_val, shuffle=False)", "def split_and_load(batch, ctx_list):\n num_ctx = len(ctx_list)\n new_batch = []\n for i, data in enumerate(batch):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n new_batch.append(new_data)\n return new_batch", "def split_and_load(batch, ctx_list):\n num_ctx = len(ctx_list)\n new_batch = []\n for i, data in enumerate(batch):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n new_batch.append(new_data)\n return new_batch", "def _load_batch_file(filename):\n # Load the pickled data-file.\n data = _unpickle(filename)\n # Get the raw images.\n raw_images = data[b'data']\n # Get the class-numbers for each image. Convert to numpy-array.\n cls = np.array(data[b'labels'])\n # Convert the images.\n images = _convert_images(raw_images)\n\n return images, cls", "def _get_batch(self):\n # index = self._index[self._current]\n # im_path = self._imdb.image_path_from_index(0)\n # im_path = 'data/demo/dog.jpg'\n # with open(im_path, 'rb') as fp:\n # img_content = fp.read()\n\n batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1]))\n batch_label = [] \n global imgi\n # img = mx.nd.array(imgi)\n # imgr = mx.img.imdecode(img_content)\n data = self._data_augmentation(imgi)\n batch_data[0] = data\n \n self._data = {'data': batch_data}\n self._label = {'label': None}", "def _load( self, i ):\n if ir.config.verbosity_level >= 2: print(\"[observation] Lazy loading raster\")\n self._raster_data[i] = raster_cube( self._raster_files, line=self._line_info['description'][i], keep_null=self._keep_null )", "def load_next_batch(self, roidb, num_classes):\n num_images = len(roidb)\n # Sample random scales to use for each image in this batch\n random_scale_inds = np.random.randint( 0, high=len(self.config.TRAIN.SCALES), size=num_images)\n assert (self.config.TRAIN.BATCH_SIZE % num_images == 0), 'num_images ({}) must divide BATCH_SIZE ({})'. \\\n format(num_images, self.config.TRAIN.BATCH_SIZE)\n \n # Get the input image blob, formatted for caffe\n im_blob, im_scales = self._get_image_blobs(roidb, random_scale_inds)\n \n blobs = {'data': im_blob}\n \n assert len(im_scales) == 1, \"Single batch only\"\n assert len(roidb) == 1, \"Single batch only\"\n # gt boxes: (x1, y1, x2, y2, cls)\n gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]\n gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)\n gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]\n gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]\n blobs['gt_boxes'] = gt_boxes\n blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds] if 'gt_ishard' in roidb[0] \\\n else np.zeros(gt_inds.size, dtype=int)\n # blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds]\n blobs['dontcare_areas'] = roidb[0]['dontcare_areas'] * im_scales[0] if 'dontcare_areas' in roidb[0] \\\n else np.zeros([0, 4], dtype=float)\n blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)\n blobs['im_name'] = os.path.basename(roidb[0]['image'])\n \n return blobs", "def batch(data_path):\n train, _, _ = get_datasets(\n data_path=data_path,\n nb_nodes=7,\n task_type=\"classification\",\n nb_classes=2,\n split=None,\n k_fold=None,\n seed=1234,\n )\n for batch in torch.utils.data.DataLoader(\n train, shuffle=False, batch_size=25, drop_last=False\n ):\n return batch", "def cycle_loader(loader, device):\n while True:\n for batch in loader:\n # NOTE this is an adhoc solution\n batch.src = (batch.src[0].to(device), batch.src[1].to(device))\n batch.tgt = batch.tgt.to(device)\n logit, indices = batch.bert_topk\n batch.bert_topk = (logit.to(device), indices.to(device))\n yield batch", "def init_batch(self):\n pass", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def __init__(self, data_path):\r\n\t\tfile_names = ['data_batch_%d' % i for i in range(1,6)]\r\n\t\tfile_names.append('test_batch')\r\n\r\n\t\tX = []\r\n\t\ty = []\r\n\t\tfor file_name in file_names:\r\n\t\t\twith open(data_path + file_name) as fin:\r\n\t\t\t\tdata_dict = cPickle.load(fin)\r\n\t\t\tX.append(data_dict['data'].ravel())\r\n\t\t\ty = y + data_dict['labels']\r\n\r\n\t\tself.X = np.asarray(X).reshape(60000, 32*32*3)\r\n\t\tself.y = np.asarray(y)\r\n\r\n\t\tfin = open(data_path + 'batches.meta')\r\n\t\tself.LABEL_NAMES = cPickle.load(fin)['label_names']\r\n\t\tfin.close()", "def get_loader(root, path, vocab_con,transform, batch_size, shuffle, num_workers):\n # COCO caption dataset\n clef = ClefDataset(root=root,\n csv= path,\n vocab_concept= vocab_con,\n transform=transform)\n\n # Data loader for COCO dataset\n # This will return (images, captions, lengths) for every iteration.\n # images: tensor of shape (batch_size, 3, 224, 224).\n # captions: tensor of shape (batch_size, padded_length).\n # lengths: list indicating valid length for each caption. length is (batch_size).\n data_loader = torch.utils.data.DataLoader(dataset=clef,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate_fn)\n return data_loader", "def load_coco_ann_files(self):\n if self.type == 'train':\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'train2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_train2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'train2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_train2017.json'))),\n # (os.path.join(self.dataset_root, 'mpii', 'images'),\n # COCO(os.path.join(self.dataset_root, 'mpii',\n # 'annotations', 'train.json')))\n ]\n else:\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'val2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_val2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'val2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_val2017.json')))\n ]\n\n dict_list = []\n for dataset_path, dataset in datasets:\n img_ids = dataset.getImgIds()\n\n for idx in img_ids:\n try:\n img = dataset.loadImgs([idx])[0]\n ann_ids = dataset.getAnnIds([idx])\n anns = dataset.loadAnns(ann_ids)\n\n if [ann['keypoints'] for ann in anns] and not all([ann['keypoints'] == [0]*51 for ann in anns]):\n keypoints = [ann['keypoints'] for ann in anns if ann['keypoints'] != [0]*51]\n for i in range(len(keypoints)):\n if 'coco' in dataset_path:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][17] and keypoints[i][20])\n else [(keypoints[i][15] + keypoints[i][18]) // 2, (keypoints[i][16] + keypoints[i][19]) // 2, 1])\n else:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][41] and keypoints[i][38])\n else [(keypoints[i][39] + keypoints[i][36]) // 2, (keypoints[i][40] + keypoints[i][37]) // 2, 1])\n\n if len([kp for kp in keypoints if kp != [0]*54]) <= 4:\n dict_list.append({'path': os.path.join(dataset_path, img[\"file_name\"]),\n 'keypoints': [kp for kp in keypoints if kp != [0]*54]})\n except:\n print(f'Skipped: {idx}')\n\n final_dataset = pd.DataFrame.from_dict(dict_list)\n\n return final_dataset", "def load_rbc( fname, skiprows, nx, ny ):\n C = numpy.loadtxt( fname, skiprows=skiprows ) \n cell_frames = [ C[i].reshape(( nx,ny )) for i in range( 5000-skiprows ) ]\n return cell_frames", "def split_and_load(batch_data, num_gpus):\n return [batch_data[i].data[0] for i in range(num_gpus)], \\\n [batch_data[i].label[0].as_in_context(mx.gpu(i)) for i in range(num_gpus)]", "def read_batch(batch_size ,file_dir):\n batch_images = []\n batch_labels = []\n temp,size= get_files(file_dir)\n\n image_list = list(temp[:, 0])\n label_list = list(temp[:, 1])\n Size = size-1\n for i in range(batch_size):\n # random class choice\n # (randomly choose a folder of image of the same class from a list of previously sorted wnids)\n # class of the im\n class_index = random.randint(0, Size)\n batch_images.append(read_image(image_list[class_index]))\n batch_labels.append(onehot(int(label_list[class_index])))\n np.vstack(batch_images)\n np.vstack(batch_labels)\n return batch_images, batch_labels", "def loadbatch():\n s=\"select * from tblbatch where status='1'\"\n c.execute(s)\n data=c.fetchall()\n return data", "def batch(img_path, gt_path,img_list, batch, total_size, label_list):\r\n\r\n image_list = [os.path.join(img_path, i) for i in img_list]\r\n gt_list = [os.path.join(gt_path,i) for i in img_list]\r\n\r\n \r\n for i in range(0, total_size, batch):\r\n yield image_load_resize(image_list[i:i+batch]), make_label_map(gt_list[i:i+batch], label_list)", "def load(cfg, train_mode, split, shot, query,\n bs, test_bs, num_workers, pin_memory,\n ret_name=False):\n if train_mode == \"train\":\n dataset = COCOTrain(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=bs,\n shuffle=True,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n else:\n dataset = COCOTest(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=test_bs, # Large batch for evaluation\n shuffle=False,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n num_classes = 80\n return dataset, data_loader, num_classes", "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n\n if os.path.isdir(filenames):\n num_records = len(os.listdir(filenames))\n print(\"Loading from directory. \" + str(num_records) + \" tfRecords found.\")\n files = tf.data.Dataset.list_files(filenames + \"/\" + \"*.tfrecord\").shuffle(num_records)\n dataset = files.apply(\n tf.contrib.data.parallel_interleave(\n lambda x: tf.data.TFRecordDataset(x, num_parallel_reads=256, buffer_size=8*1024*1024),\n cycle_length=32, sloppy=True)\n )\n else:\n print(\"Loading from single tfRecord...\")\n dataset = tf.data.TFRecordDataset(filenames + \".tfrecord\").repeat()\n \n dataset = dataset.map(self.parser, num_parallel_calls=128)\n \n if self.subset == 'train':\n min_queue_examples = int(\n Cifar10DataSet.num_examples_per_epoch(self.subset) * 0.4)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)\n \n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n dataset = dataset.prefetch(10)\n \n iterator = dataset.make_one_shot_iterator()\n seq_batch, input_batch, map_batch, transformation_batch = iterator.get_next()\n\n return seq_batch, input_batch, map_batch, transformation_batch", "def load_chunk(self, start): # TODO parallelize this whole process\n self.X = queue.Queue()\n n = 0 # number of loaded batches\n print('stop loading : %s' % self.stop_loading())\n print('start + n : %s' % str(start + n))\n while (not self.stop_loading()) and (start + n) < self.size:\n print('load')\n self.X.put(np.load(self.data_filenames[start+n]))\n n += 1\n print('return chunk')\n return n", "def create_dataset_cifar10(data_path, batch_size=32, num_parallel_workers=8, do_train=True):\r\n # define dataset\r\n data_path = os.path.join(data_path, \"cifar-10-batches-bin\" if do_train else \"cifar-10-verify-bin\")\r\n\r\n cifar_ds = ds.Cifar10Dataset(data_path, num_parallel_workers=num_parallel_workers, shuffle=do_train)\r\n\r\n # define map operations\r\n resize_height, resize_width = 32, 32\r\n rescale = 1.0 / 255.0\r\n shift = 0.0\r\n random_crop_op = CV.RandomCrop([32, 32], [4, 4, 4, 4])\r\n random_horizontal_op = CV.RandomHorizontalFlip(prob=0.5)\r\n resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)\r\n rescale_op = CV.Rescale(rescale, shift)\r\n normalize_op = CV.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010])\r\n hwc2chw_op = CV.HWC2CHW()\r\n type_cast_op = C.TypeCast(mstype.int32)\r\n\r\n if do_train:\r\n compose_op = [random_crop_op, random_horizontal_op, resize_op, rescale_op, normalize_op, hwc2chw_op]\r\n else:\r\n compose_op = [resize_op, rescale_op, normalize_op, hwc2chw_op]\r\n cifar_ds = cifar_ds.map(input_columns=\"image\", operations=compose_op, num_parallel_workers=num_parallel_workers)\r\n cifar_ds = cifar_ds.map(input_columns=\"label\", operations=type_cast_op, num_parallel_workers=num_parallel_workers)\r\n cifar_ds = cifar_ds.batch(batch_size, drop_remainder=True)\r\n\r\n return cifar_ds", "def _load_all(self, anno_file, shuffle):\n image_set_index = []\n labels = []\n coco = COCO(anno_file)\n img_ids = coco.getImgIds()\n #print(img_ids)\n cars=[3,6,8]\n pedestrians=[1]\n cyclists=[2,4]\n lights=[10]\n signs=[13]\n\n apex_categories=cars+pedestrians+cyclists+lights+signs\n cnt=0\n humanonly=0\n human_count=0\n\n for img_id in img_ids:\n relevant=False\n # filename\n image_info = coco.loadImgs(img_id)[0]\n filename = image_info[\"file_name\"]\n #print(filename)\n #subdir = filename.split('_')[1]\n height = image_info[\"height\"]\n width = image_info[\"width\"]\n # label\n anno_ids = coco.getAnnIds(imgIds=img_id)\n annos = coco.loadAnns(anno_ids)\n label = []\n\n #print(\"listing categories for filename: \"+filename)\n\n hashumans=False\n for anno in annos:\n cat_id = int(anno[\"category_id\"])\n if(cat_id in apex_categories):\n cat_reduced= 0 if (cat_id in cars) else 1 if(cat_id in pedestrians) else 2 if(cat_id in cyclists) else 3 if(cat_id in lights) else 4\n bbox = anno[\"bbox\"]\n assert len(bbox) == 4\n xmin = float(bbox[0]) / width\n ymin = float(bbox[1]) / height\n xmax = xmin + float(bbox[2]) / width\n ymax = ymin + float(bbox[3]) / height\n label.append([cat_reduced, xmin, ymin, xmax, ymax, 0])\n #print(\"category: %d\"%cat_reduced)\n if (cat_id in pedestrians):\n hashumans=True\n if(cat_id not in pedestrians): #at least one non-person object is necessary\n relevant=True\n\n if(label and not relevant):\n humanonly+=1\n if label and relevant:\n if(hashumans):\n human_count+=1\n #print(\"adding \"+filename)\n labels.append(np.array(label))\n image_set_index.append(os.path.join(self.set, filename))\n cnt+=1\n print(\"added %d images\"%cnt)\n print(\"%d images has only humans\"%humanonly)\n print(\"%d registered images has humans\"%human_count)\n\n if shuffle:\n import random\n indices = range(len(image_set_index))\n random.shuffle(indices)\n image_set_index = [image_set_index[i] for i in indices]\n labels = [labels[i] for i in indices]\n # store the results\n self.image_set_index = image_set_index\n self.labels = labels", "def load_data5():\n# dirname = 'cifar-10-batches-py'\n# origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n# path = get_file(dirname, origin=origin, untar=True)\n# path= './cifar-10-batches-py'\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n# Below shows a test class has 999 examples instead of the claimed 1000\n# tclasscount=np.zeros((10,), dtype=int)\n# for i in range(0, len(y_test)-1):\n# tclasscount[y_test[i][0]]= tclasscount[y_test[i][0]] + 1\n# print('Test class count',tclasscount)\n num_train_samples = 50000\n num_5_class = 25000\n num_5_test = 4999 # should be 5000 if all the categories had 1000 in them but they do not. One is missing.\n print('x_train shape orig:', x_train.shape)\n print('More:', x_train.shape[1:])\n print('y_test shape',y_test.shape)\n\n x5_train = np.empty((num_5_class, 32, 32, 3), dtype='uint8')\n y5_train = np.empty((num_5_class,), dtype='uint8')\n\n count=0\n\n for i in range(0, len(y_train)-1):\n if (y_train[i][0] == 2) or (y_train[i][0] == 3) or (y_train[i][0] == 4) or (y_train[i][0] == 5) or (y_train[i][0] == 7):\n x5_train[count]=x_train[i]\n y5_train[count]=y_train[i]\n count=count+1\n \n # find test data of interest\n count=0\n x5_test=np.empty((num_5_test, 32, 32, 3), dtype='uint8')\n y5_test= np.empty((num_5_test,), dtype='uint8')\n\n for i in range(0, len(y_test)-1):\n if (y_test[i][0] == 2) or (y_test[i][0] == 3) or (y_test[i][0] == 4) or (y_test[i][0] == 5) or (y_test[i][0] == 7):\n x5_test[count]=x_test[i]\n y5_test[count]=y_test[i]\n count=count+1\n# Below shows class 7 is only 999 and not 1000 examples!!! One horse got away it seems.\n# if(y_test[i][0] == 2):\n# c2=c2+1\n# if(y_test[i][0] == 3):\n# c3=c3+1\n# if(y_test[i][0] == 4):\n# c4=c4+1\n# if(y_test[i][0] == 5):\n# c5=c5+1\n# if(y_test[i][0] == 7):\n# c7=c7+1\n# print('c2count, c3count, c4count, c5count, c7count',c2,c3,c3,c5,c7)\n# print('y5tstshape',y5_test.shape, count)\n# print('y5tst',y5_test)\n# return (x_train, y_train), (x_test, y_test)\n return (x5_train, y5_train), (x5_test, y5_test)", "def load_train_batch(self):\n def _parse_train_img(img_path):\n with tf.device('/cpu:0'):\n img_buffer = tf.read_file(img_path)\n image_decoded = tf.image.decode_jpeg(img_buffer)\n tgt_image, src_image_stack = \\\n self.unpack_image_sequence(\n image_decoded, self.img_height, self.img_width, self.num_source)\n return tgt_image, src_image_stack\n\n def _batch_preprocessing(stack_images, intrinsics, optional_data):\n intrinsics = tf.cast(intrinsics, tf.float32)\n image_all = tf.concat([stack_images[0], stack_images[1]], axis=3)\n\n if self.match_num == 0: # otherwise matches coords are wrong\n image_all, intrinsics = self.data_augmentation(\n image_all, intrinsics, self.img_height, self.img_width)\n tgt_image = image_all[:, :, :, :3]\n src_image_stack = image_all[:, :, :, 3:]\n intrinsics = self.get_multi_scale_intrinsics(intrinsics, self.num_scales)\n return tgt_image, src_image_stack, intrinsics, optional_data\n\n file_list = self.format_file_list(self.dataset_dir, 'train')\n self.steps_per_epoch = int(len(file_list['image_file_list'])//self.batch_size)\n\n input_image_names_ph = tf.placeholder(tf.string, shape=[None], name='input_image_names_ph')\n image_dataset = tf.data.Dataset.from_tensor_slices(\n input_image_names_ph).map(_parse_train_img)\n\n cam_intrinsics_ph = tf.placeholder(tf.float32, [None, 3, 3], name='cam_intrinsics_ph')\n intrinsics_dataset = tf.data.Dataset.from_tensor_slices(cam_intrinsics_ph)\n\n datasets = (image_dataset, intrinsics_dataset, intrinsics_dataset)\n if self.read_pose:\n poses_ph = tf.placeholder(tf.float32, [None, self.num_source+1, 6], name='poses_ph')\n pose_dataset = tf.data.Dataset.from_tensor_slices(poses_ph)\n datasets = (image_dataset, intrinsics_dataset, pose_dataset)\n if self.match_num > 0:\n matches_ph = tf.placeholder(tf.float32, [None, self.num_source, self.match_num, 4], name='matches_ph')\n match_dataset = tf.data.Dataset.from_tensor_slices(matches_ph)\n datasets = (image_dataset, intrinsics_dataset, match_dataset)\n\n all_dataset = tf.data.Dataset.zip(datasets)\n all_dataset = all_dataset.batch(self.batch_size).repeat().prefetch(self.batch_size*4)\n all_dataset = all_dataset.map(_batch_preprocessing)\n iterator = all_dataset.make_initializable_iterator()\n return iterator", "def Cifar10_preload_and_split(path=None, splits=[0.4, 0.1, 0.25, 0.25], transform=None):\n\n if path is None:\n path = DATASETS_DIR\n index_file = os.path.join(path, 'cifar10.index.csv')\n\n indices = None\n if os.path.exists(index_file):\n index_csv = np.loadtxt(index_file)\n indices = torch.tensor(index_csv)\n print('Found predefined indexing file {}'.format(index_file))\n \n trainset = torchvision.datasets.CIFAR10(path, train=True, transform=transform[0], download=False)\n testset = torchvision.datasets.CIFAR10(path, train=False, transform=transform[0], download=False)\n fullset = ConcatDataset([trainset, testset])\n print('Initializing CIFAR10Dataset splits')\n \n # Currently five equal splits\n dset_size = fullset.cumulative_sizes[-1]\n int_splits = []\n for i in range(len(splits)):\n int_splits.append(int(dset_size * splits[i]))\n if sum(int_splits) < dset_size:\n rem = dset_size - sum(int_splits)\n int_splits[-1] += rem\n\n indices, splitsets = dataset_split(fullset, int_splits, indices=indices)\n\n if not os.path.exists(index_file):\n print('No predefined indexing file found, so index permutations saving to {}'.format(index_file))\n np.savetxt(index_file, indices.numpy(), fmt='%i', delimiter=',')\n\n print('Finished splitting data.')\n\n return splitsets", "def load_data(root, num_seen, batch_size, num_workers):\n CIFAR10.init(root, num_seen)\n query_dataset = CIFAR10('query', transform=query_transform())\n seen_dataset = CIFAR10('seen', transform=train_transform())\n unseen_dataset = CIFAR10('unseen', transform=train_transform())\n retrieval_dataset = CIFAR10('retrieval', transform=train_transform())\n\n query_dataloader = DataLoader(\n query_dataset,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n seen_dataloader = DataLoader(\n seen_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n unseen_dataloader = DataLoader(\n unseen_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n retrieval_dataloader = DataLoader(\n retrieval_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n return query_dataloader, seen_dataloader, unseen_dataloader, retrieval_dataloader", "def load_chunk(self, idx):\n for f in self.filenames[idx:]:\n ...", "def load_batch(self, fpath, match, in_num):\n if in_num == None:\n in_num = input('Please specify IN number: ')\n\n if match == None:\n match = input('Please specify filename string to match for batch loading (ex. \\'_s2_\\'): ')\n\n # get a list of all matching files\n glob_match = f'{fpath}/*{match}*'\n files = glob.glob(glob_match)\n\n # load & concatenate files into a single dataframe\n data = pd.concat((pd.read_csv(file, header = [0, 1], index_col = 0, parse_dates=True, low_memory=False) for file in files)).sort_index()\n\n # extract sampling frequency\n s_freq = 1/(data.index[1] - data.index[0]).total_seconds()\n\n # reset the index to continuous time\n ind_freq = str(int(1/s_freq*1000000))+'us'\n ind_start = '1900-01-01 00:00:00.000'\n ind = pd.date_range(start = ind_start, periods=len(data), freq=ind_freq)\n data.index = ind\n\n # set metadata & attributes\n self.metadata = {'file_info':{'in_num': in_num, 'files': files, 'dir': fpath,\n 'match_phrase': match},\n 'analysis_info':{'s_freq': s_freq} }\n self.data = data\n self.s_freq = s_freq", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'%s-labels-idx1-ubyte.gz'% kind)\n\n images_path = os.path.join(path,'%s-images-idx3-ubyte.gz'% kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,offset=16).reshape(len(labels), 784)\n\n print(\"Dataset Loaded\")\n \n return images, labels", "def load_all(): \n training_data = dict() \n for i in range(7):\n training_data[i+1] = load_data(i+1) \n\n return training_data", "def load_cifar100_dataset(dirname, labels='fine', transpose_permutation=(0,2,3,1)):\n \n #Verify paths exists for training and testing set\n if not os.path.exists(dirname):\n raise IOError, \"Cannot find path %s\" % dirname\n \n if labels not in ['fine', 'coarse']:\n raise AttributeError, \"Labels argument must be set to 'coarse' or 'fine'\"\n \n if len(set(transpose_permutation)) != 4:\n raise AttributeError, \"Expect transpose permutation to be \"\n\n full_path = os.path.abspath(dirname)\n \n train_path = os.path.join(full_path, 'train')\n test_path = os.path.join(full_path, 'test')\n \n #Load the training set\n with open(train_path, 'rb') as tr_f:\n tr_data_raw = pickle.load(tr_f)\n tr_data = {}\n \n for key, val in tr_data_raw.items():\n tr_data[key.decode('utf8')] = val #32 x 32 x 3 images.\n \n tr_X = tr_data['data']\n \n if labels=='fine':\n tr_y = tr_data['fine_labels']\n elif labels=='coarse':\n tr_y = tr_data['coarse_labels']\n \n tr_X = tr_X.reshape(tr_X.shape[0], 3, 32, 32)\n tr_y = np.reshape(tr_y, (len(tr_y), 1))\n \n #Load the testing set\n with open(test_path, 'rb') as te_f:\n te_data_raw = pickle.load(te_f)\n te_data = {}\n \n for key, val in te_data_raw.items():\n te_data[key.decode('utf8')] = val #32 x 32 x 3 images.\n \n te_X = te_data['data']\n \n if labels=='fine':\n te_y = te_data['fine_labels']\n elif labels=='coarse':\n te_y = te_data['coarse_labels']\n \n te_X = te_X.reshape(te_X.shape[0], 3, 32, 32)\n te_y = np.reshape(te_y, (len(te_y), 1))\n \n #scale to 255, transpose as needed\n tr_X = np.transpose(tr_X.astype('float32') / 255., transpose_permutation)\n te_X = np.transpose(te_X.astype('float32') / 255., transpose_permutation)\n \n return (tr_X, tr_y), (te_X, te_y), 100", "def split_and_load(batch, ctx_list):\n new_batch = []\n for i, data in enumerate(batch):\n if isinstance(data, (list, tuple)):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n else:\n new_data = [data.as_in_context(ctx_list[0])]\n new_batch.append(new_data)\n return new_batch", "def load_cifar_data(filepath):\n with open(filepath, 'rb') as f:\n data = pickle.load(f, encoding='bytes')\n return data[b'data'], data[b'labels']", "def minibatch_loader_thread(self):\r\n \r\n blobs = self.get_next_minibatch()\r\n \r\n ordered_blobs = OrderedDict()\r\n \r\n for key in self.get_output_names(): \r\n ordered_blobs[key] = blobs[key]", "def load_categories():\n\n Category.query.delete()\n\n with open(category_file) as f:\n for _ in range(1):\n next(f)\n \n for row in f:\n row = row.rstrip()\n categories_data = row.split(\",\")\n\n id = int(categories_data[0])\n category = categories_data[1]\n\n category_model = Category(id=id, category=category)\n db.session.add(category_model)\n db.session.commit()", "def load_batch(filename: str) -> Tuple[ndarray, ndarray, ndarray]:\n dataDict = unpickle(filename)\n print(\"1\", dataDict[b\"data\"][1, :])\n X = (dataDict[b\"data\"] / 255).T\n print(\"2\", X[:, 1])\n y = np.array(dataDict[b\"labels\"])\n Y = np.eye(10)[y].T\n return X, Y, y", "def load_data(self, f): \n self.sampling = True\n self.reads = np.load(f)\n self.total = self.reads.shape[0]", "def load_cifar10_img_form(directory):\n train_data, train_labels, test_data, test_labels = load_cifar10(directory)\n R, testR = train_data[:, :1024].reshape(-1, 32, 32, 1), test_data[:, :1024].reshape(-1, 32, 32, 1)\n G, testG = train_data[:, 1024:2048].reshape(-1, 32, 32, 1), test_data[:, 1024:2048].reshape(-1, 32, 32, 1)\n B, testB = train_data[:, 2048:].reshape(-1, 32, 32, 1), test_data[:, 2048:].reshape(-1, 32, 32, 1)\n train_data, test_data = np.concatenate((R, G, B), axis=3), np.concatenate((testR, testG, testB), axis=3)\n return train_data, train_labels, test_data, test_labels", "def train_data() -> Iterator[Tuple[Label, ChanneledImage]]:\n return zip(*get_data(TRAIN_FILES, 60000))", "def loadData(self):\n # Load the raw CIFAR-10 data\n num_training = 49000\n num_validation = 1000\n num_test = 1000\n subtract_mean = True\n\n cifar10_dir = '/home/parallels/PycharmProjects/Courses/232A/project2/stats232a/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n # Subsample the data\n mask = list(range(num_training, num_training + num_validation))\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = list(range(num_training))\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = list(range(num_test))\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n if subtract_mean:\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n # Transpose so that channels come first\n X_train = X_train.transpose(0, 3, 1, 2)\n X_val = X_val.transpose(0, 3, 1, 2)\n X_test = X_test.transpose(0, 3, 1, 2)\n\n # Package data into a dictionary\n self.data = {\n 'X_train': X_train, 'y_train': y_train,\n 'X_val': X_val, 'y_val': y_val,\n 'X_test': X_test, 'y_test': y_test,\n }", "def load(dirpath):\n\n batch = Pickler.load(join(dirpath, 'batch.pkl'))\n\n # load annotator\n if exists(join(dirpath, 'annotation.json')):\n annotator = Annotation.load(dirpath)\n batch.annotator = annotator\n\n return batch", "def _next_batch(self, loader) -> list:\n return self.mover.move(loader.__next__())", "def load_chunks(self):\n for key, array in self.chunks.items():\n loaded_array = np.asarray(array)\n self.chunks[key] = loaded_array", "def load_preprocess_training_batch(batch_id, batch_size):\r\n filename = 'preprocess_batch_' + str(batch_id) + '.p'\r\n features, labels = pickle.load(open(filename, mode='rb'))\r\n# labels = np.argmax(labels,1)\r\n# num = len(labels)\r\n# arr = np.zeros((num, 1))\r\n# for i in range(num):\r\n# arr[i][0] = labels[i]\r\n# np.reshape(features,(2500,150528))\r\n# ind = [i for i in range(len(features))]\r\n# random.shuffle(ind)\r\n# features = features[ind]\r\n# labels = labels[ind]\r\n\r\n # Return the training data in batches of size <batch_size> or less\r\n return features[0:batch_size],labels[0:batch_size]", "def load_crawl():\n\n\tmodule_path = dirname(__file__)\n\twith open(join(module_path, 'data', 'train2.csv')) as csv_file:\n\t\tdata_file = csv.reader(csv_file)\n\t\ttemp = next(data_file)\n\t\tglobal n_samples\n\t\tn_samples = int(temp[0])\n\t\tglobal n_features\n\t\tn_features = int(temp[1])\n\t\tprint \"n samples \" + str((n_samples))\n\t\tprint \"n_features\" + str((n_features))\n\t\ttarget_names = np.array(temp[2:4])\n\t\tdata = np.empty((n_samples, n_features))\n\t\ttarget = np.empty((n_samples,), dtype=np.int)\n\n\t\tfor count, value in enumerate(data_file):\n\t\t\tdata[count] = np.asarray(value[:-1], dtype=np.float)\n\t\t\ttarget[count] = np.asarray(value[-1], dtype=np.int)\n\t\t\t#print \"data is \" + str(data[count])\n\t\t\t#print \"target is \" + str(target[count])\n\t\tprint \"Number of target records is \" + str(len(target))\n\t#with open(join(module_path, 'descr', 'train.rst')) as rst_file:\n\t#\tfdescr = rst_file.read()\n\n\treturn Bunch(data=data, target=target,\n\t\t\t target_names=target_names,\n\t\t\t DESCR=None,\n\t\t\t feature_names = ['evalCount', 'setInterval', 'setTimeout', 'link', \n\t\t\t\t\t\t\t 'search', 'exec','escape', 'unescape', 'ratio', \n\t\t\t\t\t\t\t 'emtropyAvg', 'entropyScript', 'longStrings', \n\t\t\t\t\t\t\t 'maxEntropy', 'stringAvg', 'maxLength', 'longVarFunc', \n\t\t\t\t\t\t\t 'stringAssignments', 'stringModFuncsCount', 'eventFuncsCount', \n\t\t\t\t\t\t\t 'domModFuncsCounter', 'suspStrings', 'whiteSpaceRatio', \n\t\t\t\t\t\t\t 'hexaStrings', 'maxNonPrintableCharactersinString', 'lineAvg', \n\t\t\t\t\t\t\t 'iframeCount', 'malTagCount', 'jsLength'])", "def __init__(self, bool_train=False, bool_tensor=False):\n self.TRAIN_BOOL = bool_train\n self.PATH = r'A:\\CIFAR\\cifar-10-batches-py'\n self.bool_tensor = bool_tensor\n\n # Initialize Archives\n self.dict_batch_meta = self.unpickle(self.PATH+r\"\\batches.meta\")\n if bool_train:\n self.dict_batch_1 = self.unpickle(self.PATH+r\"\\data_batch_1\")\n self.dict_batch_2 = self.unpickle(self.PATH+r\"\\data_batch_2\")\n self.dict_batch_3 = self.unpickle(self.PATH+r\"\\data_batch_3\")\n self.dict_batch_4 = self.unpickle(self.PATH+r\"\\data_batch_4\")\n self.dict_batch_5 = self.unpickle(self.PATH+r\"\\data_batch_5\")\n else:\n self.dict_batch_test = self.unpickle(self.PATH+r\"\\test_batch\")\n\n # Generate Meta\n self.label_dict = dict()\n labels = self.getMetaLabelsList()\n for idx, val in enumerate(labels):\n self.label_dict[idx] = val", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def load_as_one_hot(self):\n\n labels = [] \n examples = [] \n\n # document number -> label mapping\n doc2label = n2b2.map_patients_to_labels(\n self.xml_dir,\n self.category)\n\n # load examples and labels\n for f in os.listdir(self.cui_dir):\n doc_id = f.split('.')[0]\n file_path = os.path.join(self.cui_dir, f)\n file_feat_list = read_cuis(file_path)\n examples.append(' '.join(file_feat_list))\n \n string_label = doc2label[doc_id]\n int_label = LABEL2INT[string_label]\n labels.append(int_label)\n\n examples = self.token2int.texts_to_matrix(examples, mode='binary')\n\n return examples, labels", "def loadRes(self, resFile):\n res = COCO()\n res.dataset['images'] = [img for img in self.dataset['images']]\n\n print('Loading and preparing results...')\n tic = time.time()\n if type(resFile) == str: #or type(resFile) == unicode:\n anns = json.load(open(resFile))\n elif type(resFile) == np.ndarray:\n anns = self.loadNumpyAnnotations(resFile)\n else:\n anns = resFile\n assert type(anns) == list, 'results in not an array of objects'\n annsImgIds = [ann['image_id'] for ann in anns]\n assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\n 'Results do not correspond to current coco set'\n if 'caption' in anns[0]:\n imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])\n res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]\n for id, ann in enumerate(anns):\n ann['id'] = id+1\n elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n bb = ann['bbox']\n x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]\n if not 'segmentation' in ann:\n ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]\n ann['area'] = bb[2]*bb[3]\n ann['id'] = id+1\n ann['iscrowd'] = 0\n elif 'segmentation' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n # now only support compressed RLE format as segmentation results\n ann['area'] = maskUtils.area(ann['segmentation'])\n if not 'bbox' in ann:\n ann['bbox'] = maskUtils.toBbox(ann['segmentation'])\n ann['id'] = id+1\n ann['iscrowd'] = 0\n elif 'keypoints' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n s = ann['keypoints']\n x = s[0::3]\n y = s[1::3]\n x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)\n ann['area'] = (x1-x0)*(y1-y0)\n ann['id'] = id + 1\n ann['bbox'] = [x0,y0,x1-x0,y1-y0]\n print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res", "def loadall(bot) :\n for feature in features :\n load(bot, feature)", "def __getitem__(self, index):#get_batch(self, index=None, patient_list=None):\n\n #if patient_list is None:\n # Generate batch based on the provided index\n indices = self.indices[index]# * self.batch_size:(index + 1) * self.batch_size]\n #else:\n # Generate batch based on the request patients\n # indices = self.patient_to_index(patient_list)\n\n # Make a list of files to be loaded\n file_paths_to_load = [self.file_paths_list[indices]]#k] for k in\n\n # Load the requested files as a tensors\n loaded_data = self.load_data(file_paths_to_load)\n\n if self.trans != []:\n for transform in self.trans:\n if transform == 'flip':\n if np.random.random() >= 0.5:\n for key in loaded_data.keys():\n if not isinstance(loaded_data[key], list):\n if loaded_data[key].ndim == 4:\n\n loaded_data[key] = loaded_data[key][:, :, :, ::-1]\n if key == 'structure_masks':\n loaded_data[key][[3, 2]] = loaded_data[key][[2, 3]]\n\n elif isinstance(transform, int):\n for key in loaded_data.keys():\n if not isinstance(loaded_data[key], list):\n if loaded_data[key].ndim == 4:\n loaded_data[key] = torch.from_numpy(loaded_data[key].copy()).float()\n #loaded_data[key] = interpolate(torch.from_numpy(loaded_data[key].copy()).unsqueeze(0), scale_factor=transform/128).squeeze(0).float()\n\n\n elif transform == 'crop':\n\n seed_x = np.random.randint(0,10)\n seed_y = np.random.randint(0,10)\n seed_z = np.random.randint(0,10)\n for key in loaded_data.keys():\n if not isinstance(loaded_data[key], list):\n if loaded_data[key].ndim == 4:\n loaded_data[key] = pad(loaded_data[key], [5, 5, 5, 5, 5, 5], 'constant', 0)\n loaded_data[key] = loaded_data[key][:, seed_x:128+seed_x, seed_y:128+seed_y, seed_z:128+seed_z]\n\n loaded_data['ct'] = ((loaded_data['ct'].clamp(0, self.ct_scaling_factor) /self.ct_scaling_factor)) # -0.5)/0.5\n if self.mode_name == 'training_model':\n loaded_data['dose'] = ((loaded_data['dose'].clamp(0, self.dose_scaling_factor) / self.dose_scaling_factor)) # -0.5)/0.5\n\n return loaded_data", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def load_cifar10(data_path=None, data_home=None, subsets=None):\n if data_path is None:\n data_path = _utils.validate_data_home(data_home)\n data_path /= 'cifar-10-python.tar.gz'\n url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n _ds_utils.get_file(data_path, url)\n \n if subsets is None:\n subsets = ['training', 'test']\n subsets = _ds_utils.validate_tvt(subsets, return_list=True)\n X, Y = [], []\n with arlib.open(data_path) as ar:\n for subset in subsets:\n if subset == 'training':\n for i in range(1, 6):\n mname = [x for x in ar.member_names\n if x.endswith('data_batch_'+str(i))]\n assert len(mname) == 1\n mname = mname[0]\n tmp = _load_cifar_batch(ar.open_member(mname,'rb'))\n X.append(tmp[0])\n Y.append(tmp[1])\n elif subset == 'test':\n mname = [x for x in ar.member_names if x.endswith('test_batch')]\n assert len(mname) == 1\n mname = mname[0]\n tmp = _load_cifar_batch(ar.open_member(mname, 'rb'))\n X.append(tmp[0])\n Y.append(tmp[1])\n else:\n raise ValueError('Subset:', subset, ' not supported.')\n return np.concatenate(X), np.concatenate(Y)", "def load_cifar() -> Tuple[torchvision.datasets.CIFAR10, torchvision.datasets.CIFAR10]:\n \n # Define the transform for the data.\n transform = transforms.Compose(\n [transforms.ToTensor(), \n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n )\n \n # Initialize Datasets. CIFAR-10 will automatically download if not present\n trainset = torchvision.datasets.CIFAR10(\n root=DATA_ROOT, train=True, download=True, transform=transform\n )\n testset = torchvision.datasets.CIFAR10(\n root=DATA_ROOT, train=False, download=True, transform=transform\n )\n \n # Return the datasets\n return trainset, testset", "def download_and_load(self, data_path=None):\n if data_path is None:\n data_path = 'data'\n\n if not self.check_files(data_path + '/cifar-10-batches-py'):\n self.download_and_extract(data_path=data_path)\n\n self.load_cifar10_data(data_path=data_path + '/cifar-10-batches-py')", "def load_partition(idx: int):\r\n assert idx in range(10)\r\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\r\n return (\r\n x_train[idx * 5000 : (idx + 1) * 5000],\r\n y_train[idx * 5000 : (idx + 1) * 5000],\r\n ), (\r\n x_test[idx * 1000 : (idx + 1) * 1000],\r\n y_test[idx * 1000 : (idx + 1) * 1000],\r\n )", "def open_nc_files(batch_id, speed_list):\n batch_trajectories = []\n batch_id_str = str(batch_id)\n \n for speed in speed_list:\n sp = speeds_dict[speed]\n filename = 'site'+batch_id_str+'_grid_dd30_sp'+sp+'.nc'\n nc_read = netcdf_dataset(data_file_path+filename)\n batch_trajectories.append(nc_read)\n \n return batch_trajectories", "def load_cifar10(split, with_info=False, data_augmentation=True,\n subsample_n=0):\n dataset, ds_info = tfds.load('cifar10',\n split=split,\n with_info=True,\n batch_size=-1)\n image_shape = ds_info.features['image'].shape\n\n numpy_ds = tfds.as_numpy(dataset)\n numpy_images = numpy_ds['image']\n numpy_labels = numpy_ds['label']\n\n # Perform subsampling if requested\n original_train_size = numpy_images.shape[0]\n if subsample_n > 0:\n subsample_n = min(numpy_images.shape[0], subsample_n)\n numpy_images = numpy_images[0:subsample_n, :, :, :]\n numpy_labels = numpy_labels[0:subsample_n]\n\n dataset = tf.data.Dataset.from_tensor_slices((numpy_images, numpy_labels))\n\n def preprocess(image, label):\n \"\"\"Image preprocessing function.\"\"\"\n if data_augmentation and split == tfds.Split.TRAIN:\n image = tf.image.random_flip_left_right(image)\n image = tf.pad(image, [[4, 4], [4, 4], [0, 0]])\n image = tf.image.random_crop(image, image_shape)\n\n image = tf.image.convert_image_dtype(image, tf.float32)\n return image, label\n\n dataset = dataset.map(preprocess)\n\n if with_info:\n info = {\n 'train_num_examples': ds_info.splits['train'].num_examples,\n 'train_num_examples_orig': original_train_size,\n 'test_num_examples': ds_info.splits['test'].num_examples,\n 'input_shape': ds_info.features['image'].shape,\n 'num_classes': ds_info.features['label'].num_classes,\n }\n return dataset, info\n return dataset" ]
[ "0.75140995", "0.73830444", "0.7317707", "0.72487396", "0.7237417", "0.7199762", "0.7171292", "0.7150728", "0.7141205", "0.7108447", "0.7081937", "0.7059933", "0.70378613", "0.7027403", "0.70267695", "0.6988743", "0.69326067", "0.6907136", "0.68534076", "0.68343514", "0.6816637", "0.6794088", "0.6781831", "0.67573863", "0.67565453", "0.6748009", "0.6635589", "0.6608162", "0.6498081", "0.6456383", "0.6428408", "0.6316274", "0.6233392", "0.6203296", "0.6198111", "0.6052462", "0.60484564", "0.602595", "0.59694546", "0.5948785", "0.59325385", "0.59256804", "0.59256804", "0.59250635", "0.590385", "0.5895526", "0.58763534", "0.58625984", "0.58609116", "0.58575183", "0.58448094", "0.583013", "0.5820423", "0.58182657", "0.57971394", "0.57949144", "0.5784587", "0.57756674", "0.57746065", "0.5746757", "0.57341397", "0.5724", "0.5708662", "0.5703918", "0.56860363", "0.5681485", "0.56810975", "0.5665602", "0.56655437", "0.56626415", "0.56595117", "0.5651228", "0.5631206", "0.5631007", "0.56123877", "0.5611952", "0.56118757", "0.56098735", "0.56044555", "0.56032723", "0.56011564", "0.55996406", "0.55815667", "0.5576117", "0.55719644", "0.55622166", "0.55530953", "0.55527294", "0.5552314", "0.55496764", "0.55320835", "0.5531742", "0.5529549", "0.55231345", "0.55229926", "0.5521533", "0.5514814", "0.5496164", "0.5485057", "0.54815984" ]
0.6843012
19
load all of cifar
def load_cifar10(directory): train_data = [] train_labels = [] for b in range(1, 6): f = os.path.join(directory, 'data_batch_%d' % (b,)) X, Y = load_cifar10_batch(f) train_data.append(X) train_labels.append(Y) train_data = np.concatenate(train_data) train_labels = np.concatenate(train_labels) del X, Y test_data, test_labels = load_cifar10_batch(os.path.join(directory, 'test_batch')) return train_data, train_labels, test_data, test_labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_cifar():\n print('==> Preparing data..')\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n trainset = torchvision.datasets.CIFAR10(\n root='./data', train=True, download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=1024, shuffle=True, num_workers=8)\n\n testset = torchvision.datasets.CIFAR10(\n root='./data', train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=128, shuffle=False, num_workers=8)\n return trainloader, testloader", "def load_cifar_data():\n train_loader = torch.utils.data.DataLoader(\n torchvision.datasets.CIFAR10('cifarfiles/', train=True, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=batch_size_train, shuffle=True, pin_memory=True)\n\n test_loader = torch.utils.data.DataLoader(\n torchvision.datasets.CIFAR10('cifarfiles/', train=False, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=batch_size_test, shuffle=True, pin_memory=True)\n return train_loader, test_loader", "def load_data():\r\n print ('Loadng all the file one time......')\r\n if not os.path.exists('cifar.pkl'):\r\n set_data()\r\n with open('cifar.pkl', 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data", "def __load_cogs(self):\n for cog in self.__cogs.get():\n logging.info('loading %s', cog)\n self.load_extension(cog)", "def load_CIFAR100(batch_dir):\r\n ims, coarse_labels, fine_labels = load_CIFAR_batch(batch_dir + '/train')\r\n ims_t, c_labels, f_labels = load_CIFAR_batch(batch_dir + '/test')\r\n ims = np.concatenate((ims, ims_t))\r\n coarse_labels = np.concatenate((coarse_labels, c_labels))\r\n fine_labels = np.concatenate((fine_labels, f_labels))\r\n return ims, coarse_labels, fine_labels", "def load_cifar(hparams):\n all_labels = []\n\n total_batches_to_load = 5\n assert hparams.train_size + hparams.validation_size <= 50000\n if hparams.eval_test:\n total_batches_to_load += 1\n # Determine how many images we have loaded\n total_dataset_size = 50000\n train_dataset_size = total_dataset_size\n if hparams.eval_test:\n total_dataset_size += 10000\n\n if hparams.dataset == 'cifar10':\n all_images = []\n elif hparams.dataset == 'cifar100':\n all_images = np.empty((1, 50000, 3072), dtype=np.uint8)\n if hparams.eval_test:\n test_data = np.empty((1, 10000, 3072), dtype=np.uint8)\n if hparams.dataset == 'cifar10':\n datafiles = [\n 'data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4',\n 'data_batch_5']\n\n if hparams.eval_test:\n datafiles.append('test_batch')\n num_classes = 10\n elif hparams.dataset == 'cifar100':\n datafiles = ['train']\n if hparams.eval_test:\n datafiles.append('test')\n num_classes = 100\n else:\n raise NotImplementedError('Unimplemented dataset: ', hparams.dataset)\n if hparams.dataset != 'test':\n for file_num, f in enumerate(datafiles):\n d = unpickle(os.path.join(hparams.data_path, f))\n if hparams.dataset == 'cifar10':\n labels = np.array(d['labels'])\n else:\n labels = np.array(d['fine_labels'])\n if f == 'test':\n test_data[0] = copy.deepcopy(d['data'])\n if hparams.dataset == 'cifar10':\n all_images.append(test_data)\n else:\n all_images = np.concatenate([all_images, test_data], axis=1)\n else:\n if hparams.dataset == 'cifar10':\n all_images.append(copy.deepcopy(d['data']))\n else:\n all_images[file_num] = copy.deepcopy(d['data'])\n nsamples = len(labels)\n for idx in range(nsamples):\n all_labels.append(labels[idx])\n if hparams.dataset == 'cifar10':\n all_images = np.concatenate(all_images, axis=0)\n all_images = all_images.reshape(-1, 3072)\n all_images = all_images.reshape(-1, 3, 32, 32) # pylint: disable=too-many-function-args\n all_images = all_images.transpose(0, 2, 3, 1).copy()\n all_images = all_images / 255.0\n mean = augmentation_transforms.MEANS\n std = augmentation_transforms.STDS\n tf.logging.info('mean:{} std: {}'.format(mean, std))\n all_images = (all_images - mean) / std\n all_labels = np.eye(num_classes)[np.array(all_labels, dtype=np.int32)]\n\n assert len(all_images) == len(all_labels)\n tf.logging.info(\n 'In CIFAR10 loader, number of images: {}'.format(len(all_images)))\n\n extra_test_images = None\n extra_test_labels = None\n if hparams.extra_dataset == 'cifar10_1':\n extra_test_ds = tfds.as_numpy(\n tfds.load('cifar10_1', split='test', batch_size=-1))\n extra_test_images = ((extra_test_ds['image'] / 255.0) - mean) / std\n extra_test_labels = np.eye(num_classes)[np.array(\n extra_test_ds['label'], dtype=np.int32)]\n\n # Break off test data\n if hparams.eval_test:\n test_images = all_images[train_dataset_size:]\n test_labels = all_labels[train_dataset_size:]\n else:\n test_images = []\n test_labels = []\n all_images = all_images[:train_dataset_size]\n all_labels = all_labels[:train_dataset_size]\n return all_images, all_labels, test_images, test_labels, extra_test_images, extra_test_labels", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def load_cifar(dataset_name='cifar10'):\n dataset_name = dataset_name.strip().lower().replace(' ', '')\n\n if dataset_name.lower() not in ['cifar10', 'cifar100']:\n raise ValueError('Only cifar10 or cifar100 are valid dataset_name.')\n baseURL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n if dataset_name == 'cifar100':\n baseURL = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n\n dirname = os.path.join(_trident_dir, dataset_name.strip())\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n\n \"\"\"Load CIFAR data from `path`\"\"\"\n _,filename,ext=split_path(baseURL)\n download_file(baseURL, dirname, filename+ext, dataset_name)\n file_path = os.path.join(dirname, filename+ext)\n\n\n if '.tar' in ext:\n extract_archive(file_path, dirname, archive_format='auto')\n filelist = glob.glob(dirname + '/*/*.*')\n extract_path ,_,_= split_path(filelist[0])\n filelist = [f for f in os.listdir(extract_path) if os.path.isfile(os.path.join(extract_path, f))]\n data=[]\n label=[]\n test_data=[]\n test_label=[]\n for file_path in filelist:\n if 'data_batch' in file_path:\n with open(os.path.join(extract_path,file_path), 'rb') as f:\n entry = pickle.load(f, encoding='latin1')\n data.append(entry['data'])\n label.append(entry['labels'])\n elif 'test_batch' in file_path:\n with open(os.path.join(extract_path,file_path), 'rb') as f:\n entry = pickle.load(f, encoding='latin1')\n test_data.append(entry['data'])\n test_label.append(entry['labels'])\n data = np.concatenate(data)\n data = data.reshape((data.shape[0], 3, 32, 32))\n data = data.transpose(0, 2, 3, 1).astype(np.float32)\n\n test_data = np.concatenate(test_data)\n test_data = test_data.reshape((test_data.shape[0], 3, 32, 32))\n test_data = test_data.transpose(0, 2, 3, 1).astype(np.float32)\n\n # Prepare labels\n label = np.concatenate(label)\n test_label = np.concatenate(test_label)\n\n trainData = Iterator(data=ImageDataset(data,object_type=ObjectType.rgb), label=LabelDataset(label,object_type=ObjectType.classification_label))\n testData = Iterator(data=ImageDataset(test_data,object_type=ObjectType.rgb), label=LabelDataset(test_label,object_type=ObjectType.classification_label))\n dataset = DataProvider(dataset_name, traindata=trainData, testdata=testData)\n dataset.binding_class_names(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship',\n 'truck'] if dataset_name == 'cifar10' else [], 'en-US')\n return dataset", "def load_coco_ann_files(self):\n if self.type == 'train':\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'train2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_train2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'train2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_train2017.json'))),\n # (os.path.join(self.dataset_root, 'mpii', 'images'),\n # COCO(os.path.join(self.dataset_root, 'mpii',\n # 'annotations', 'train.json')))\n ]\n else:\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'val2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_val2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'val2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_val2017.json')))\n ]\n\n dict_list = []\n for dataset_path, dataset in datasets:\n img_ids = dataset.getImgIds()\n\n for idx in img_ids:\n try:\n img = dataset.loadImgs([idx])[0]\n ann_ids = dataset.getAnnIds([idx])\n anns = dataset.loadAnns(ann_ids)\n\n if [ann['keypoints'] for ann in anns] and not all([ann['keypoints'] == [0]*51 for ann in anns]):\n keypoints = [ann['keypoints'] for ann in anns if ann['keypoints'] != [0]*51]\n for i in range(len(keypoints)):\n if 'coco' in dataset_path:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][17] and keypoints[i][20])\n else [(keypoints[i][15] + keypoints[i][18]) // 2, (keypoints[i][16] + keypoints[i][19]) // 2, 1])\n else:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][41] and keypoints[i][38])\n else [(keypoints[i][39] + keypoints[i][36]) // 2, (keypoints[i][40] + keypoints[i][37]) // 2, 1])\n\n if len([kp for kp in keypoints if kp != [0]*54]) <= 4:\n dict_list.append({'path': os.path.join(dataset_path, img[\"file_name\"]),\n 'keypoints': [kp for kp in keypoints if kp != [0]*54]})\n except:\n print(f'Skipped: {idx}')\n\n final_dataset = pd.DataFrame.from_dict(dict_list)\n\n return final_dataset", "def load_all_files(self):\n\t\tself.get_rankings()\n\t\tself.get_partition()\n\t\tself.__load_factors()\n\t\tself.get_document_associations()\n\t\tself.get_term_associations()", "def load_cifar10(batch_paths):\n batches = [load_cifar10_batch(path) for path in batch_paths]\n data = torch.cat([batch[0] for batch in batches])\n labels = torch.cat([batch[1] for batch in batches])\n return data, labels", "def _load_all(self):\n if self._loaded_all is True:\n return\n for iface in self._scan:\n for bname in self._scan[iface]:\n if self._scan[iface][bname].get(\"loaded\"):\n continue\n self._load_item(iface, bname)\n self._loaded_all = True", "def loadall(bot) :\n for feature in features :\n load(bot, feature)", "def load_cogs(self):\n\n path = \"cogs/\" # Should always have a trailing slash\n import_path = path.replace(\"/\", \".\")\n extensions: list[str] = [\n import_path + file.replace(\".py\", \"\")\n for file in os.listdir(path)\n if os.path.isfile(f\"{path}{file}\")\n ]\n\n for extension in extensions:\n try:\n self.load_extension(extension)\n except errors.ExtensionAlreadyLoaded:\n pass\n\n log.info(f\"Loaded {len(self.commands)} commands from {len(self.cogs)} cogs\")", "def _load_cifar_batch(fpath, label_key='labels'):\n if isinstance(fpath, (os.PathLike, str, bytes)):\n with open(fpath, 'rb') as f:\n return _load_cifar_batch(f, label_key)\n\n d = pickle.load(fpath, encoding='bytes')\n # decode utf8\n d_decoded = {}\n for k, v in d.items():\n d_decoded[k.decode('utf8')] = v\n d = d_decoded\n data = d['data']\n labels = d[label_key]\n\n data = data.reshape(data.shape[0], 3, 32, 32).transpose([0, 2, 3, 1])\n return data, labels", "def load_cifar() -> Tuple[torchvision.datasets.CIFAR10, torchvision.datasets.CIFAR10]:\n \n # Define the transform for the data.\n transform = transforms.Compose(\n [transforms.ToTensor(), \n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n )\n \n # Initialize Datasets. CIFAR-10 will automatically download if not present\n trainset = torchvision.datasets.CIFAR10(\n root=DATA_ROOT, train=True, download=True, transform=transform\n )\n testset = torchvision.datasets.CIFAR10(\n root=DATA_ROOT, train=False, download=True, transform=transform\n )\n \n # Return the datasets\n return trainset, testset", "def load_CIFAR10(path):\r\n sampleList = []\r\n labelList = []\r\n # load all the data, as there only five training samples name as data_batch_id\r\n for i in range(1, 6):\r\n # get full filename\r\n filename = os.path.join(path, 'data_batch_%d' % (i, ))\r\n x, y = load_CIFAR_batch(filename)\r\n\r\n sampleList.append(x)\r\n labelList.append(y)\r\n\r\n # combine elements as one array\r\n Xtr = np.concatenate(sampleList)\r\n Ytr = np.concatenate(labelList)\r\n del x, y\r\n print(\"Training data loaded, total size : %d\", len(Xtr))\r\n # load test data\r\n Xte, Yte = load_CIFAR_batch(os.path.join(path, 'test_batch'))\r\n return Xtr, Ytr, Xte, Yte", "def load_cifa_10():\n train_set_x = np.ndarray([ 50000, 3072 ])\n train_set_y = np.ndarray( [50000] )\n\n batch_size = 10000\n for i in xrange(5):\n batch = open( datapath + \"data_batch_\"+str(i+1), 'rb')\n map = cPickle.load( batch )\n batch.close()\n train_set_x[ i*batch_size : (i+1)*batch_size , : ] = np.asarray( map[ 'data' ], dtype = 'float32' )\n train_set_y[ i*batch_size : (i+1)*batch_size ] = np.asarray( map[ 'labels' ], dtype = 'float32' )\n\n test_file = open( datapath + 'test_batch', 'rb')\n map = cPickle.load( test_file )\n test_file.close()\n \n test_set_x = np.asarray( map['data'], dtype = 'float32' )\n test_set_y = np.asarray( map['labels'], dtype = 'float32' )\n \n\n return train_set_x, train_set_y, test_set_x, test_set_y", "def load_cifar_images(filename):\n\n from load_cifar import load_file\n from load_cifar import label_dict\n\n data,labels = load_file(filename)\n\n # two classes to keep\n class0 = label_dict['airplane']\n class1 = label_dict['bird']\n # remove all but two classes\n keep = np.logical_or(labels==class0,labels==class1)\n data = data[keep,...]\n labels = labels[keep]\n # set labels to 0 or 1\n labels[labels==class0]=0\n labels[labels==class1]=1\n\n # rgb -> grayscale\n gray_data = rgb2gray(data)\n return data,gray_data,labels", "def read_cliffs(self):\n cliff_list = Cliff.list()\n rtn = {}\n\n for clf in cliff_list:\n rtn[clf] = self.read_cliff(clf)\n\n return rtn", "def _load_classes(self):\n\t\t# load class names (name -> label)\n\t\tcategories = self.coco.loadCats(self.coco.getCatIds())\n\t\tcategories.sort(key=lambda x: x['id'])\n\n\t\tself.classes \t\t\t\t= {}\n\t\tself.coco_labels \t\t\t= {}\n\t\tself.coco_labels_inverse \t= {}\n\t\tfor c in categories:\n\t\t\tself.coco_labels[len(self.classes)] = c['id']\n\t\t\tself.coco_labels_inverse[c['id']] = len(self.classes)\n\t\t\tself.classes[c['name']] = len(self.classes)\n\t\tself.labels = {}\n\t\tfor key, value in self.classes.items():\n\t\t\tself.labels[value] = key\n\n\t\tprint(self.coco_labels)\n\t\tprint(self.coco_labels_inverse)\n\t\tprint(self.classes)\n\t\tprint(self.labels)", "def _load_all(self, anno_file, shuffle):\n image_set_index = []\n labels = []\n coco = COCO(anno_file)\n img_ids = coco.getImgIds()\n #print(img_ids)\n cars=[3,6,8]\n pedestrians=[1]\n cyclists=[2,4]\n lights=[10]\n signs=[13]\n\n apex_categories=cars+pedestrians+cyclists+lights+signs\n cnt=0\n humanonly=0\n human_count=0\n\n for img_id in img_ids:\n relevant=False\n # filename\n image_info = coco.loadImgs(img_id)[0]\n filename = image_info[\"file_name\"]\n #print(filename)\n #subdir = filename.split('_')[1]\n height = image_info[\"height\"]\n width = image_info[\"width\"]\n # label\n anno_ids = coco.getAnnIds(imgIds=img_id)\n annos = coco.loadAnns(anno_ids)\n label = []\n\n #print(\"listing categories for filename: \"+filename)\n\n hashumans=False\n for anno in annos:\n cat_id = int(anno[\"category_id\"])\n if(cat_id in apex_categories):\n cat_reduced= 0 if (cat_id in cars) else 1 if(cat_id in pedestrians) else 2 if(cat_id in cyclists) else 3 if(cat_id in lights) else 4\n bbox = anno[\"bbox\"]\n assert len(bbox) == 4\n xmin = float(bbox[0]) / width\n ymin = float(bbox[1]) / height\n xmax = xmin + float(bbox[2]) / width\n ymax = ymin + float(bbox[3]) / height\n label.append([cat_reduced, xmin, ymin, xmax, ymax, 0])\n #print(\"category: %d\"%cat_reduced)\n if (cat_id in pedestrians):\n hashumans=True\n if(cat_id not in pedestrians): #at least one non-person object is necessary\n relevant=True\n\n if(label and not relevant):\n humanonly+=1\n if label and relevant:\n if(hashumans):\n human_count+=1\n #print(\"adding \"+filename)\n labels.append(np.array(label))\n image_set_index.append(os.path.join(self.set, filename))\n cnt+=1\n print(\"added %d images\"%cnt)\n print(\"%d images has only humans\"%humanonly)\n print(\"%d registered images has humans\"%human_count)\n\n if shuffle:\n import random\n indices = range(len(image_set_index))\n random.shuffle(indices)\n image_set_index = [image_set_index[i] for i in indices]\n labels = [labels[i] for i in indices]\n # store the results\n self.image_set_index = image_set_index\n self.labels = labels", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = pickle.load(f, encoding='latin1')\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\r\n Y = np.array(Y)\r\n return X, Y", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n data_dict = cPickle.load(f)\r\n ims = data_dict['data']\r\n coarse_labels = np.array(data_dict['coarse_labels'])\r\n fine_labels = np.array(data_dict['fine_labels'])\r\n return ims, coarse_labels, fine_labels", "def load_CIFAR_batch(filename):\n with open(filename, 'rb')as f:\n # datadict = p.load(f)\n datadict = pickle.load(f, encoding = 'bytes')\n X = datadict[b'data']\n Y = datadict[b'labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y", "def load_all_corr():\n cache_path = SAVE_ROOT / 'corr' / 'all' / 'cache.pkl'\n if cache_path.is_file():\n return pickle.load(cache_path.open('rb'))\n\n root = SAVE_ROOT / 'corr'\n files = root.rglob('*.json')\n\n def path_to_items(path: Path):\n key = path.parts[-4:-1]\n return key, load_corr_matrix(*key)\n\n # Create the data afresh.\n all_corr = dict(map(path_to_items, files))\n make_parent_dirs(cache_path)\n pickle.dump(all_corr, cache_path.open('wb'))\n return all_corr", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f, encoding='latin1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype(\"float64\")\n Y = np.array(Y)\n return X, Y", "def load_all_resources():\n\n # Load the fonts\n ResourcesManager._load_font(\"Munro.ttf\")\n\n # Load images\n ResourcesManager.HIBER_NATION_IMG = ResourcesManager._load_image(\"hiber_nation.png\")\n ResourcesManager.SHIP_IMG = ResourcesManager._load_image(\"ship.png\")\n ResourcesManager.MISSILE_IMG = ResourcesManager._load_image(\"missile.png\")\n\n # Load sounds\n # ResourcesManager.MENU_MUSIC = ResourcesManager._load_sound(\"menu.ogg\")", "def load_dataset_cifar10():\n dirname = 'cifar-10-batches-py'\n origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n path = get_file(dirname, origin=origin, untar=True)\n\n num_train_samples = 50000\n\n x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')\n y_train = np.empty((num_train_samples,), dtype='uint8')\n\n for i in range(1, 6):\n fpath = os.path.join(path, 'data_batch_' + str(i))\n (x_train[(i - 1) * 10000: i * 10000, :, :, :],\n y_train[(i - 1) * 10000: i * 10000]) = load_batch(fpath)\n\n fpath = os.path.join(path, 'test_batch')\n x_test, y_test = load_batch(fpath)\n\n y_train = np.reshape(y_train, (len(y_train), 1))\n y_test = np.reshape(y_test, (len(y_test), 1))\n\n return (x_train, y_train), (x_test, y_test)", "def load(self):", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n #一个样本由标签和图像数据组成\n #3072 = 32 x 32 x 3\n data_dict = p.load(f, encoding= 'bytes')\n images = data_dict[b'data']\n labels = data_dict[b'labels']\n #把原始数据结构调整为BCWH batches, channels, width, height\n images = images.reshape(10000, 3, 32, 32)\n #tensorflow 处理图像数据的结构:BWHC\n #把C移动到最后一个维度\n images = images.transpose(0, 2, 3, 1)\n\n labels = np.array(labels)\n return images, labels", "def load_circuits(self):\n os.makedirs(settings.CIRCUITS_PATH, exist_ok=True)\n return [self.load_circuit(filename) for\n filename in os.listdir(settings.CIRCUITS_PATH) if\n os.path.isfile(os.path.join(settings.CIRCUITS_PATH, filename))]", "def load_CIFAR10(ROOT):\n xs = []\n ys = []\n for b in range(1,6):\n f = os.path.join(ROOT, 'data_batch_%d' % (b, ))\n X, Y = load_CIFAR_batch(f)\n xs.append(X)\n ys.append(Y)\n Xtr = np.concatenate(xs)\n Ytr = np.concatenate(ys)\n del X, Y\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\n return Xtr, Ytr, Xte, Yte", "def load_cifar_data(filepath):\n with open(filepath, 'rb') as f:\n data = pickle.load(f, encoding='bytes')\n return data[b'data'], data[b'labels']", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = load_pickle(f)\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000,3072)\n Y = np.array(Y)\n return X, Y", "def preload_all(self):\n for tp in self.tps:\n for f in self.featurefiles + self.maskfiles:\n file = os.path.join(tp, f)\n print('preloading {}'.format(file))\n self.load(file, lazy=False)", "def load_CIFAR10(ROOT):\n xs = []\n ys = []\n for b in range(1, 6):\n f = os.path.join(ROOT, 'data_batch_%d' % (b,))\n X, Y = load_CIFAR_batch(f)\n xs.append(X)\n ys.append(Y)\n Xtr = np.concatenate(xs)\n Ytr = np.concatenate(ys)\n del X, Y\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\n return Xtr, Ytr, Xte, Yte", "def load_models(self):\n self.models = {}\n for code in self.soi_codes:\n print(f\"> Loading CNN for species code {code:02d}.\")\n self.models[code] = load_model(self.cnn_locations[code])\n print(\"> Complete.\")", "def do_load(self):\n global g_state\n global g_list_of_classifier\n\n id_client = json.loads(request.data.decode('UTF-8'))['id']\n self._reset()\n if os.path.exists(CLASSIFIER_PATH + str(id_client) + '.cls'):\n g_list_of_classifier.load_from_file(CLASSIFIER_PATH + \n str(id_client) + '.cls')\n else:\n g_list_of_classifier = init_classifier()\n return ''", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = load_pickle(f)\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000,3072)\r\n Y = np.array(Y)\r\n return X, Y", "def load_all_data_from_file(self) -> None:\n self.load_gene_data_from_file()\n self.load_ontology_from_file(ontology_type=DataType.GO, ontology_url=self.go_ontology_url,\n ontology_cache_path=self.go_ontology_cache_path,\n config=self.config)\n self.load_associations_from_file(associations_type=DataType.GO, associations_url=self.go_associations_url,\n associations_cache_path=self.go_associations_cache_path, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.DO, ontology_url=self.do_ontology_url,\n ontology_cache_path=self.do_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.DO, associations_url=self.do_associations_url,\n associations_cache_path=self.do_associations_cache_path,\n association_additional_cache_path=self.do_associations_new_cache_path,\n association_additional_url=self.do_associations_new_url, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.EXPR, ontology_url=self.expression_ontology_url,\n ontology_cache_path=self.expression_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.EXPR,\n associations_url=self.expression_associations_url,\n associations_cache_path=self.expression_associations_cache_path,\n config=self.config)\n self.load_orthology_from_file()\n self.load_expression_cluster_data()\n self.load_protein_domain_information()", "def cifar100(path, label_mode='fine'):\n def _load_batch(filepath, label_key):\n with open(filepath, 'rb') as f:\n if sys.version_info < (3,):\n d = cPickle.load(f)\n else:\n d = cPickle.load(f, encoding='bytes')\n d_decoded = {} # decode utf8\n for k, v in six.iteritems(d):\n d_decoded[k.decode('utf8')] = v\n d = d_decoded\n images = d['data']\n labels = d[label_key]\n images = images.reshape(images.shape[0], 3, 32, 32)\n labels = np.reshape(labels, len(labels,))\n return images, labels\n path = os.path.expanduser(path)\n directory = 'cifar-100-python'\n if not os.path.exists(os.path.join(path, directory)):\n url = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n maybe_download_and_extract(path, url)\n\n filepath = os.path.join(path, directory, 'train')\n x_train, y_train = _load_batch(filepath, label_mode + '_labels')\n\n filepath = os.path.join(path, directory, 'test')\n x_test, y_test = _load_batch(filepath, label_mode + '_labels')\n return (x_train, y_train), (x_test, y_test)", "def load_CIFAR_batch(filename):\n with open(filename, 'rb')as f:\n datadict = p.load(f, encoding='iso-8859-1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y", "def _load_chairs(self):\n self.chairs = self.p_constants[\"NPHILOSOPHERS\"] * [0]\n for i in xrange(self.p_constants[\"NPHILOSOPHERS\"]):\n x, y, angle = self._get_chair_coord(i, 5.0)\n self.chairs[i] = self._load_model(\n \"chair1\", scale=[7, 7, 7], pos=[x, y - 1, 0], H=rad2deg(angle) + 15)\n self.chairs[i].setTexture(self.chair_tex)\n self.chairs[i].setTexScale(\n TextureStage.getDefault(), 0.005, 0.005)", "def load_classes(self):\n\t\t\t# Load class names (name -> label).\n\t\t\tcategories = self.coco.loadCats(self.coco.getCatIds())\n\t\t\tcategories.sort(key=lambda x: x['id'])\n\n\t\t\tself.classes = {}\n\t\t\tself.coco_labels = {}\n\t\t\tself.coco_labels_inverse = {}\n\t\t\tfor c in categories:\n\t\t\t\tself.coco_labels[len(self.classes)] = c['id']\n\t\t\t\tself.coco_labels_inverse[c['id']] = len(self.classes)\n\t\t\t\tself.classes[c['name']] = len(self.classes)\n\n\t\t\t# Also load the reverse (label -> name).\n\t\t\tself.labels = {}\n\t\t\tfor key, value in self.classes.items():\n\t\t\t\tself.labels[value] = key", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None, class_names=None,\n class_map=None, return_coco=False, auto_download=False):\n\n if auto_download is True:\n self.auto_download(dataset_dir, subset, year)\n\n coco = COCO(\"{}/annotations/instances_{}{}.json\".format(dataset_dir, subset, year))\n if subset == \"minival\" or subset == \"valminusminival\":\n subset = \"val\"\n image_dir = \"{}/{}{}\".format(dataset_dir, subset, year)\n\n # Select class_ids from class_names:\n if class_names:\n class_ids = sorted(coco.getCatIds(catNms=class_names))\n\n # Load all classes or a subset?\n if not class_ids:\n # All classes\n class_ids = sorted(coco.getCatIds())\n\n # All images or a subset?\n if class_ids:\n image_ids = []\n for id in class_ids:\n imgs = [] # list of images to add to image_ids\n # Select at most COCO_IMAGES_PER_OBJECT and select only the images\n # that have at most COCO_MAX_NUM_MASK_PER_IMAGE masks inside them:\n for imgid in list(coco.getImgIds(catIds=[id])):\n if len(imgs) >= COCO_IMAGES_PER_OBJECT:\n break\n if len(coco.loadAnns(coco.getAnnIds(imgIds=[imgid], catIds=class_ids, iscrowd=None))) <= COCO_MAX_NUM_MASK_PER_IMAGE:\n imgs.append(imgid)\n image_ids.extend(imgs)\n #image_ids.extend(list(coco.getImgIds(catIds=[id]))[:COCO_IMAGES_PER_OBJECT])\n # Remove duplicates\n image_ids = list(set(image_ids))\n else:\n # All images\n image_ids = list(coco.imgs.keys())\n\n # Add classes\n for i in class_ids:\n self.add_class(\"coco\", i, coco.loadCats(i)[0][\"name\"])\n\n # Add images\n for i in image_ids:\n #print(len(coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None))))\n self.add_image(\n \"coco\", image_id=i,\n path=os.path.join(image_dir, coco.imgs[i]['file_name']),\n width=coco.imgs[i][\"width\"],\n height=coco.imgs[i][\"height\"],\n annotations=coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None)))\n if return_coco:\n return coco", "def load_CIFAR10(ROOT):\n xs = []\n ys = []\n for b in range(1,2):\n f = os.path.join(ROOT, 'data_batch_%d' % b)\n X, Y = load_CIFAR_batch(f)\n xs.append(X)\n ys.append(Y) \n #利用np.concatenate将xs、ys弄成一行\n Xtr = np.concatenate(xs)\n Ytr = np.concatenate(ys)\n del X, Y\n #获取测试集\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\n return Xtr, Ytr, Xte, Yte", "def loadRes(self, resFile):\n res = COCO()\n res.dataset['images'] = [img for img in self.dataset['images']]\n\n print('Loading and preparing results...')\n tic = time.time()\n if type(resFile) == str: #or type(resFile) == unicode:\n anns = json.load(open(resFile))\n elif type(resFile) == np.ndarray:\n anns = self.loadNumpyAnnotations(resFile)\n else:\n anns = resFile\n assert type(anns) == list, 'results in not an array of objects'\n annsImgIds = [ann['image_id'] for ann in anns]\n assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\n 'Results do not correspond to current coco set'\n if 'caption' in anns[0]:\n imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])\n res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]\n for id, ann in enumerate(anns):\n ann['id'] = id+1\n elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n bb = ann['bbox']\n x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]\n if not 'segmentation' in ann:\n ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]\n ann['area'] = bb[2]*bb[3]\n ann['id'] = id+1\n ann['iscrowd'] = 0\n elif 'segmentation' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n # now only support compressed RLE format as segmentation results\n ann['area'] = maskUtils.area(ann['segmentation'])\n if not 'bbox' in ann:\n ann['bbox'] = maskUtils.toBbox(ann['segmentation'])\n ann['id'] = id+1\n ann['iscrowd'] = 0\n elif 'keypoints' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n s = ann['keypoints']\n x = s[0::3]\n y = s[1::3]\n x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)\n ann['area'] = (x1-x0)*(y1-y0)\n ann['id'] = id + 1\n ann['bbox'] = [x0,y0,x1-x0,y1-y0]\n print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res", "def load_cifar10_batch(directory):\n with open(directory, 'rb') as fo:\n datadict = pickle.load(fo, encoding='bytes')\n X = np.array(datadict[b'data'])\n Y = np.array(datadict[b'labels'])\n return X, Y", "def load_data(self):", "def load(self):\n for name, item in itertools.chain(\n self._cal_objs.items(),\n self._noise_objs.items()):\n logger.debug(\"load {}\".format(item))\n item.load()", "def load_cnns(self):\n self.cnn1 = cnn_utils.CNN()\n self.cnn1.load_state_dict(torch.load(f'{self.model_dir}/model1.pt'))\n self.cnn1.eval()\n self.cnn2 = cnn_utils.CNN()\n self.cnn2.load_state_dict(torch.load(f'{self.model_dir}/model2.pt'))\n self.cnn2.eval()", "def _load_metadata(self):\n\n cub_dir = self.root / \"CUB_200_2011\"\n images_list: Dict[int, List] = OrderedDict()\n\n with open(str(cub_dir / \"train_test_split.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n is_train_instance = int(row[1]) == 1\n if is_train_instance == self.train:\n images_list[img_id] = []\n\n with open(str(cub_dir / \"images.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n images_list[img_id].append(row[1])\n\n with open(str(cub_dir / \"image_class_labels.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n # CUB starts counting classes from 1 ...\n images_list[img_id].append(int(row[1]) - 1)\n\n with open(str(cub_dir / \"bounding_boxes.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n box_cub = [int(float(x)) for x in row[1:]]\n box_avl = [box_cub[1], box_cub[0], box_cub[3], box_cub[2]]\n # PathsDataset accepts (top, left, height, width)\n images_list[img_id].append(box_avl)\n\n images_tuples = []\n for _, img_tuple in images_list.items():\n images_tuples.append(tuple(img_tuple))\n self._images = images_tuples # type: ignore\n\n # Integrity check\n for row_check in self._images:\n filepath = self.root / CUB200.images_folder / row_check[0]\n if not filepath.is_file():\n if self.verbose:\n print(\"[CUB200] Error checking integrity of:\", filepath)\n return False\n\n return True", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def loadCaiman(self, forcePopulate=False):\n\t\t#caimanPath = '/home/cudmore/data/20201014/inferior/3_nif_inferior_cropped_results.hdf5'\n\t\tcaimanPath, fileExt = os.path.splitext(self.path)\n\t\tcaimanPath += '_results.hdf5'\n\t\tif not os.path.isfile(caimanPath):\n\t\t\treturn\n\t\tprint('bAnnotationList.loadCaiman() forcePopulate:', forcePopulate, 'is loading caimanPath:', caimanPath)\n\n\t\tself.caimanDict = bimpy.analysis.caiman.readCaiman(caimanPath)\n\n\t\tif self.numItems() == 0 or forcePopulate:\n\n\t\t\tself.myList = []\n\n\t\t\tnumROI = self.caimanDict['numROI']\n\t\t\tprint(' caimen numROI:', numROI)\n\t\t\tz = 1 # caiman roi do not have a z\n\t\t\tfor roiIdx in range(numROI):\n\t\t\t\t(x, y) = bimpy.analysis.caiman.getCentroid(self.caimanDict, roiIdx)\n\n\t\t\t\t# flip (x,y)\n\t\t\t\t'''\n\t\t\t\ttmp = x\n\t\t\t\tx = y\n\t\t\t\ty = tmp\n\t\t\t\t'''\n\n\t\t\t\tprint(f' loadCaiman() adding centroid {roiIdx} at', x, y)\n\t\t\t\tself.addAnnotation('caiman', x, y, z)", "def load_all_lcs(self, method: str = 'cut'):\n if self.useCpus == 1:\n if method == \"cut\":\n self.lcs = [self.load_cut_lc(self.ref.iloc[i])\n for i in range(len(self.ref))]\n elif method == \"log_w\":\n self.lcs = [self.load_weighted_lc(self.ref.iloc[i])\n for i in range(len(self.ref))]\n else:\n # todo: update for weighted\n with Pool(self.useCpus) as p:\n minirefs = [self.ref.iloc[i] for i in range(len(self.ref))]\n self.lcs = p.map(self.load_cut_lc, minirefs, chunksize=50)\n # with Pool(useCpus) as p:\n # lcs = p.map(load_weighted_lc, files, chunksize=100)\n self.loaded = True\n return", "def __loadKeys(self):\n key_image_file_names = os.listdir(self.key_image_full_path)\n\n self.maple_logger.info(\"Loading {0} keys.\", len(key_image_file_names))\n\n for key_image_file_name in key_image_file_names:\n self.__loadKey(key_image_file_name)", "def loadData(catalog):\n loadVideos(catalog)\n loadCategories(catalog)", "def load_cifar10_data(self, data_path='data/cifar-10-batches-py',\n n_train_samples=50000, n_test_samples=10000):\n train_data = None\n train_labels = []\n\n for i in range(1, 6):\n data_dic = unpickle(data_path + '/data_batch_{}'.format(i))\n if i == 1:\n train_data = data_dic['data']\n else:\n train_data = np.vstack((train_data, data_dic['data']))\n\n train_labels += data_dic['labels']\n\n test_data_dic = unpickle(data_path + '/test_batch')\n test_data = test_data_dic['data']\n test_labels = test_data_dic['labels']\n\n train_data = train_data.reshape((len(train_data),\n self.LOADED_IMG_DEPTH,\n self.LOADED_IMG_HEIGHT,\n self.LOADED_IMG_HEIGHT))\n\n train_data = np.rollaxis(train_data, 1, 4)\n train_labels = np.array(train_labels)\n\n test_data = test_data.reshape((len(test_data),\n self.LOADED_IMG_DEPTH,\n self.LOADED_IMG_HEIGHT,\n self.LOADED_IMG_HEIGHT))\n\n test_data = np.rollaxis(test_data, 1, 4)\n test_labels = np.array(test_labels)\n\n self.train_dataset = {'data': train_data[0:n_train_samples],\n 'labels': train_labels[0:n_train_samples],\n 'cls': [np.zeros(10)\n for i in range(n_train_samples)]}\n\n for i in range(0, n_train_samples):\n self.train_dataset['cls'][i][self.train_dataset['labels'][i]] = 1.\n\n self.test_dataset = {'data': test_data[0:n_test_samples],\n 'labels': test_labels[0:n_test_samples],\n 'cls': [np.zeros(10)\n for i in range(n_train_samples)]}\n\n for i in range(0, n_test_samples):\n self.test_dataset['cls'][i][self.test_dataset['labels'][i]] = 1.\n\n self.train_dataset['data_array'] = np.array(\n [item.flatten() for item in self.train_dataset['data']])\n\n self.train_dataset['labels_array'] = np.array(\n [item.flatten() for item in self.train_dataset['labels']])\n\n self.train_dataset['cls_array'] = np.array(\n [item.flatten() for item in self.train_dataset['cls']])\n\n self.test_dataset['data_array'] = np.array(\n [item.flatten() for item in self.test_dataset['data']])\n\n self.test_dataset['labels_array'] = np.array(\n [item.flatten() for item in self.test_dataset['labels']])\n\n self.test_dataset['cls_array'] = np.array(\n [item.flatten() for item in self.test_dataset['cls']])\n\n return None", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb')as f:\r\n datadict = p.load(f)\r\n \r\n X = datadict['data']\r\n Y = datadict['labels']\r\n \r\n print X.shape\r\n X = X.reshape(X.shape[0], SHAPE[0], SHAPE[1], SHAPE[2])\r\n Y = np.array(Y)\r\n return X, Y", "def load_cifar10(data_path=\".\", test_size=0.2, random_state=1337):\n test_path = os.path.join(data_path, \"cifar-10-batches-py/test_batch\")\n train_paths = [os.path.join(data_path, \"cifar-10-batches-py/data_batch_%i\" % i) for i in range(1, 6)]\n\n if not os.path.exists(test_path) or not all(list(map(os.path.exists, train_paths))):\n print (\"Dataset not found. Downloading...\")\n download_cifar(data_path,\n url='https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',\n tarname='cifar-10-python.tar.gz')\n\n train_batches = list(map(unpickle, train_paths))\n test_batch = unpickle(test_path)\n\n X = np.concatenate([batch[\"data\"] for batch in train_batches]).reshape([-1, 3, 32, 32]).astype('float32') / 255\n y = np.concatenate([batch[\"labels\"] for batch in train_batches]).astype('int32')\n X_train, X_val, y_train, y_val = train_test_split(X, y,\n test_size=test_size,\n random_state=random_state)\n\n X_test = test_batch[\"data\"].reshape([-1, 3, 32, 32]).astype('float32') / 255\n y_test = np.array(test_batch[\"labels\"]).astype('int32')\n\n return X_train, y_train, X_val, y_val, X_test, y_test", "def load_cityscapes(self, dataset_dir, subset):\n self.class_labels = {\n 'unlabeled':0,\n 'ego vehicle':1, \n 'rectification border':2,\n 'out of roi':3, \n 'static':4, \n 'dynamic':5, \n 'ground':6, \n 'road':7, \n 'sidewalk':8, \n 'parking':9, \n 'rail track':10, \n 'building':11, \n 'wall':12, \n 'fence':13, \n 'guard rail':14, \n 'bridge':15, \n 'tunnel':16, \n 'pole':17, \n 'polegroup':18, \n 'traffic light':19, \n 'traffic sign':20, \n 'vegetation':21, \n 'terrain':22, \n 'sky':23, \n 'person':24, \n 'rider':25, \n 'car':26, \n 'truck':27, \n 'bus':28, \n 'caravan':29, \n 'trailer':30, \n 'train':31, \n 'motorcycle':32, \n 'bicycle':33, \n 'license plate':34}\n \n annotation_dir = dataset_dir + 'gtFine_trainvaltest/' + subset + '_all.json'\n self.image_info = json.load(open(annotation_dir, 'r'))\n \n # Add classes\n for i in range(len(self.class_labels)):\n self.add_class(\"cityscape\", i, list(self.class_labels.keys())[i])", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load_fvcom_files(filepath=None,casename=None,ncname=None,neifile=None):\n\n currdir=os.getcwd()\n os.chdir(filepath)\n\n data=_load_grdfile(casename)\n\n data.update(_load_depfile(casename))\n \n data.update(_load_spgfile(casename))\n\n data.update(_load_obcfile(casename))\n\n data.update(_load_llfiles(casename))\n\n if ncname!=None:\n data.update(_load_nc(ncname))\n\n if neifile!=None:\n data.update(loadnei(neifile))\n\n os.chdir(currdir)\n\n return data", "def load_files(self):\n # Needs to be implemented by child class\n raise NotImplementedError", "def load_cityscapes(path, fdr):\n dataset = Dataset(path, split='val', mode=\"fine\", target_type=[\"semantic\", \"instance\"])\n\n from PATH import SCRI_PATH as spath\n\n for image, (sseg, inst), name in dataset:\n image = np.array(image)\n sseg = gt_covert(sseg)\n inst = np.array(inst)\n if os.path.exists(spath + \"/\" + fdr + \"/\" + name + \"_scri.png\"):\n scribbles = np.array(Image.open(spath + \"/\" + fdr + \"/\" + name + \"_scri.png\"))\n else:\n scribbles = None\n # scribbles = scribble_convert(scribbles)\n yield name, image, sseg, inst, scribbles", "def load(self):\n metalist = []\n metalist_files = glob.glob(os.path.join(self.__pickle_path, '*.pickle'))\n for metalist_dump in metalist_files:\n with open(metalist_dump, 'rb') as file_desc:\n metalist += pickle.load(file_desc)\n return metalist", "def load_all(**options):\n\n return get_component(CachingPackage.COMPONENT_NAME).load_all(**options)", "def _load_all_cubes(self, files_to_load):\n if self.process_workers > 1:\n arguments = [[self, load_file] for load_file in files_to_load]\n pool = multiprocessing.Pool(processes=self.process_workers)\n try:\n all_cubelists = pool.map(run_load_file, arguments)\n pool.close()\n pool.join()\n except KeyboardInterrupt:\n pool.terminate()\n else:\n all_cubelists = []\n for load_file in files_to_load:\n cubelist = self._load_file(load_file)\n if cubelist:\n all_cubelists.append(cubelist)\n \n all_cubes = []\n for cubelist in all_cubelists:\n for cube in cubelist:\n all_cubes.append(cube)\n\n if len(all_cubes) == 0:\n raise UserWarning('No data loaded.')\n \n # Gather universal information from the first cube.\n if self.xy_coords is None:\n self.xy_coords = [coord.name() \n for coord in get_xy_coords(\n all_cubes[0])]\n if self._area_inst.bounds_range is None:\n self._area_inst.bounds_range = self._area_inst.\\\n get_cube_area_bounds(all_cubes[0],\n self.xy_coords)\n if self.area_bounds is None:\n self.area_bounds = self._area_inst.get_cube_area_bounds(\n all_cubes[0],\n self.xy_coords)\n self.time_unit = all_cubes[0].coord(self.time_coord).units\n \n return iris.cube.CubeList(all_cubes)", "def preload(self):\n # load the objects\n for otype, fname in self.TYPE2NAME.items():\n if fname:\n path = os.path.join(self.anodir, fname + \".gz\")\n if os.path.isfile(path):\n with gzip.open(path, \"rt\") as handler:\n for line in handler:\n omap = json.loads(line)\n cls = self.TYPE2CLASS[otype]\n item = cls.from_map(omap, self)\n self.caches[otype][item.id] = item", "def loadCactus(iniFile):\n\tglobal cactusConfig, cactusImports\n\t#load everything in cactusConfigFile\n\tcactusConfig = dict(line.split(\": \",1) for line in open(iniFile).read().splitlines() if len(line) > 1)\n\n\t#load importable html modules\n\timportPath = cactusConfig['importPath']\n\tfor filename in glob.glob(os.path.join(importPath, '*.html')):\n\t\t\tcactusImports[os.path.basename(filename)] = open(filename).read().splitlines();", "def load_all(cls, data):\n return [cls.load(obj) for obj in data]", "def load_data(catalog):\n controller.load_data(catalog)", "def _load( self, i ):\n if ir.config.verbosity_level >= 2: print(\"[observation] Lazy loading raster\")\n self._raster_data[i] = raster_cube( self._raster_files, line=self._line_info['description'][i], keep_null=self._keep_null )", "def loadData(self):\n # Load the raw CIFAR-10 data\n num_training = 49000\n num_validation = 1000\n num_test = 1000\n subtract_mean = True\n\n cifar10_dir = '/home/parallels/PycharmProjects/Courses/232A/project2/stats232a/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n # Subsample the data\n mask = list(range(num_training, num_training + num_validation))\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = list(range(num_training))\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = list(range(num_test))\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n if subtract_mean:\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n # Transpose so that channels come first\n X_train = X_train.transpose(0, 3, 1, 2)\n X_val = X_val.transpose(0, 3, 1, 2)\n X_test = X_test.transpose(0, 3, 1, 2)\n\n # Package data into a dictionary\n self.data = {\n 'X_train': X_train, 'y_train': y_train,\n 'X_val': X_val, 'y_val': y_val,\n 'X_test': X_test, 'y_test': y_test,\n }", "def read_all():\n # Create the list of CIs from our data\n ci = db.session.query(CI).order_by(CI.id).all()\n app.logger.debug(pformat(ci))\n # Serialize the data for the response\n ci_schema = CISchema(many=True)\n data = ci_schema.dump(ci)\n return data", "def load_cifar10_img_form(directory):\n train_data, train_labels, test_data, test_labels = load_cifar10(directory)\n R, testR = train_data[:, :1024].reshape(-1, 32, 32, 1), test_data[:, :1024].reshape(-1, 32, 32, 1)\n G, testG = train_data[:, 1024:2048].reshape(-1, 32, 32, 1), test_data[:, 1024:2048].reshape(-1, 32, 32, 1)\n B, testB = train_data[:, 2048:].reshape(-1, 32, 32, 1), test_data[:, 2048:].reshape(-1, 32, 32, 1)\n train_data, test_data = np.concatenate((R, G, B), axis=3), np.concatenate((testR, testG, testB), axis=3)\n return train_data, train_labels, test_data, test_labels", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f, encoding='bytes')\n #datadict为尺寸为4的字典:b'labels', b'data', b'filenames', b'batch_label'\n X = datadict[b'data']\n Y = datadict[b'labels']\n #X的尺寸为10000*3072(10000张图片,每个图片尺寸为32*32,三通道),reshape为10000*32*32*3,再通过transpose令索引值(x',y',z',w')=(x,z,w,y),最后转为float类型\n #三个channel分别为rgb\n #索引为(图片编号,x索引,y索引,rgb三通道)\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\n Y = np.array(Y)\n return X, Y", "def load(self):\n self.classifier = joblib.load(\"data/models/repeatsfinder/repeatsfinder.joblib\")", "def get_loader(root, path, vocab_con,transform, batch_size, shuffle, num_workers):\n # COCO caption dataset\n clef = ClefDataset(root=root,\n csv= path,\n vocab_concept= vocab_con,\n transform=transform)\n\n # Data loader for COCO dataset\n # This will return (images, captions, lengths) for every iteration.\n # images: tensor of shape (batch_size, 3, 224, 224).\n # captions: tensor of shape (batch_size, padded_length).\n # lengths: list indicating valid length for each caption. length is (batch_size).\n data_loader = torch.utils.data.DataLoader(dataset=clef,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate_fn)\n return data_loader", "def load_data(self) -> None:", "def loadImages(self):\n for map_name, img in self.maps.items():\n if img is None or map_name not in __class__.input_tr:\n continue\n getCyclesImage(img)", "def load_batch(n):\r\n print ('Loadng one batch...')\r\n batchfilename = flist[n - 1] + '.pkl'\r\n if not os.path.exists(batchfilename):\r\n set_batch_data()\r\n with open(batchfilename, 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data", "def load(self, filename):\n self.classifiers = []\n for i in range(0, self.category_level):\n clf = joblib.load(filename + '.level_%d' % (i + 1))\n self.classifiers.append(clf)", "def loadData(catalog):\r\n controller.loadData(catalog)", "def load_assets(self, paths):\n try:\n self.background = load(paths['background'])\n self.bullet = load(paths['bullet'])\n self.bullet_red = load(paths['bullet_red'])\n self.icon = load(paths['icon'])\n\n self.Ship = load(paths['ship'])\n self.Ship_CR = load(paths['ship_cr'])\n self.Ship_CL = load(paths['ship_cl'])\n self.Ship_CC = load(paths['ship_cc'])\n\n self.InvaderA1 = load(paths['invadera1'])\n self.InvaderA2 = load(paths['invadera2'])\n self.InvaderB1 = load(paths['invaderb1'])\n self.InvaderB2 = load(paths['invaderb2'])\n self.InvaderC1 = load(paths['invaderc1'])\n self.InvaderC2 = load(paths['invaderc2'])\n\n except Exception as e:\n print(\" \"+str(e))\n return 0\n else:\n return 1", "def load_CIFAR10(ROOT):\r\n xs = []\r\n ys = []\r\n for b in range(1,6):\r\n f = os.path.join(ROOT, 'data_batch_%d' % (b, ))\r\n X, Y = load_CIFAR_batch(f)\r\n xs.append(X)\r\n ys.append(Y)\r\n Xtr = np.concatenate(xs)\r\n Ytr = np.concatenate(ys)\r\n del X, Y\r\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\r\n return Xtr, Ytr, Xte, Yte\r\n\r\n\tdef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=10000):\r\n # Load the raw CIFAR-10 data\r\n \r\n cifar10_dir = 'Downloads/cifar-10-batches-py'\r\n \r\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\r\n\r\n # Subsample the data\r\n mask = range(num_training, num_training + num_validation)\r\n X_val = X_train[mask]\r\n y_val = y_train[mask]\r\n mask = range(num_training)\r\n X_train = X_train[mask]\r\n y_train = y_train[mask]\r\n mask = range(num_test)\r\n X_test = X_test[mask]\r\n y_test = y_test[mask]\r\n\r\n x_train = X_train.astype('float32') \r\n x_test = X_test.astype('float32')\r\n \r\n x_train = x_train.reshape(-1, 32, 32, 3)\r\n x_test = x_test.reshape(-1, 32, 32, 3)\r\n x_train /= 255\r\n x_test /= 255\r\n\r\n return x_train, y_train, X_val, y_val, x_test, y_test", "def load_data(folders):\n features, labels = np.zeros(0), np.zeros(0, dtype=int)\n for folder_id in folders:\n folder = \"fold%d\"%(folder_id)\n for fn in glob.glob(os.path.join(RAW_DATA_DIR, folder, \"*.wav\")):\n just_fn_name = fn.split('/')[-1]\n class_id = just_fn_name.split('-')[1]\n #print(\"fn\", fn, just_fn_name, class_id)\n mfcc2 = _extract_features_from_one_file(fn)\n if mfcc2 is None:\n continue\n features = np.append(features, mfcc2)\n labels= np.append(labels, int(class_id))\n features = features.reshape(-1, N_MFCC)\n #labels = labels.reshape(-1, 1)\n #print(\"features.shape\", features.shape, \"labels.shape\", labels.shape)\n labels = one_hot_encode(labels)\n return features, labels", "def load_ccs9():\n ccs9 = pd.read_csv(pkg_resources.resource_filename(__name__,'$dxref 2015.csv'))\n ccs9 = ccs9.reset_index()\n for col in ccs9.columns:\n ccs9.loc[:,col]=ccs9[col].str.strip('\\'')\n ccs9.columns=ccs9.iloc[0,:]\n ccs9 = ccs9.iloc[1:,:]\n ccs9 = ccs9.replace(r'^\\s*$', np.nan, regex=True)\n ccs9 = ccs9.loc[ccs9['ICD-9-CM CODE'].notnull(),:]\n ccs9.loc[:,'ICD-9-CM CODE'] = ccs9['ICD-9-CM CODE'].str.replace(' ','')\n ccs9.loc[:,'CCS CATEGORY'] = ccs9['CCS CATEGORY'].str.replace(' ','')\n ccs9 = ccs9.iloc[:,0:4] \n ccs9_labels = pd.read_csv(pkg_resources.resource_filename(__name__,'dxlabel 2015.csv'))\n ccs9 = ccs9.merge(ccs9_labels,how='left',left_on='CCS CATEGORY',right_on='CCS DIAGNOSIS CATEGORIES')\n ccs9.drop('CCS CATEGORY DESCRIPTION',axis=1,inplace=True)\n ccs9.drop('CCS DIAGNOSIS CATEGORIES',axis=1,inplace=True)\n ccs9.columns = [i.replace('CCS DIAGNOSIS CATEGORIES LABELS','CCS CATEGORY DESCRIPTION') for i in ccs9.columns]\n return ccs9", "def _load(self):\n\t\tpool = []\n\t\tview = []\n\t\tlibrary = []\n\n\t\tif is_file(\"~/comiccrawler/pool.json\"):\n\t\t\tpool = json.loads(content_read(\"~/comiccrawler/pool.json\"))\n\n\t\tif is_file(\"~/comiccrawler/view.json\"):\n\t\t\tview = json.loads(content_read(\"~/comiccrawler/view.json\"))\n\n\t\tif is_file(\"~/comiccrawler/library.json\"):\n\t\t\tlibrary = json.loads(content_read(\"~/comiccrawler/library.json\"))\n\n\t\tfor m_data in pool:\n\t\t\t# reset state\n\t\t\tif m_data[\"state\"] in (\"DOWNLOADING\", \"ANALYZING\"):\n\t\t\t\tm_data[\"state\"] = \"ERROR\"\n\t\t\t# build episodes\n\t\t\tepisodes = []\n\t\t\tfor ep_data in m_data[\"episodes\"]:\n\t\t\t\tepisodes.append(Episode(**ep_data))\n\t\t\tm_data[\"episodes\"] = episodes\n\t\t\tmission = Mission(**m_data)\n\t\t\tself._add(mission)\n\n\t\tfor url in view:\n\t\t\tself.view[url] = self.pool[url]\n\n\t\tfor url in library:\n\t\t\tself.library[url] = self.pool[url]\n\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", self.view)\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", self.library)", "def load_cifar10(directory, normalize=True):\n training_data = []\n training_labels = []\n for i in range(1, 6):\n try:\n d = unpickle(directory + f\"/data_batch_{i}\")\n except FileNotFoundError:\n raise Exception(f\"File 'data_batch_{i}' is not found in the specified directory '{directory}'.\")\n training_data.append(d[b\"data\"])\n training_labels.append(d[b\"labels\"])\n training_data = np.vstack(training_data)\n training_data = np.reshape(training_data, newshape=(-1, 3, 32, 32))\n training_labels = np.concatenate(training_labels)\n training_labels = np.array(list(map(lambda hot: one_hot(10, hot), training_labels)))\n\n try:\n test = unpickle(directory + \"/test_batch\")\n except FileNotFoundError:\n raise Exception(f\"File 'test_batch' is not found in the specified directory '{directory}'.\")\n test_data = np.reshape(test[b\"data\"], newshape=(-1, 3, 32, 32))\n test_labels = np.array(list(map(lambda hot: one_hot(10, hot), test[b\"labels\"])))\n\n try:\n meta = unpickle(directory + \"/batches.meta\")\n except FileNotFoundError:\n raise Exception(f\"File 'batches.meta' is not found in the specified directory '{directory}'.\")\n label_names = meta[b\"label_names\"]\n label_names = list(map(lambda x: x.decode(\"utf-8\"), label_names))\n\n if normalize:\n training_data = training_data / 255\n test_data = test_data / 255\n\n return training_data, training_labels, test_data, test_labels, label_names", "def init():\n global DICO\n script_dir = os.path.dirname(__file__)\n dic = open(script_dir+\"/dico/dico-fr.txt\", \"r\")\n for line in dic:\n DICO.append(line.strip())", "def load_binaries(self, directory):\n for f in os.listdir(directory):\n self.binaries[f] = Instance()\n self.binaries[f].load_json(directory+\"/\"+f)\n self.binaries[f].label_sample()\n self.binaries[f].extract_features()\n self.binaries[f].extract_basic_features()", "def loadData(catalog):\n controller.loadData(catalog)" ]
[ "0.725037", "0.6986756", "0.69127166", "0.6820187", "0.6604054", "0.6501504", "0.6451561", "0.6348957", "0.62728024", "0.62693584", "0.6159214", "0.6103224", "0.60536623", "0.60487705", "0.60397464", "0.6020603", "0.60159737", "0.5978324", "0.5949136", "0.59246045", "0.5911884", "0.589998", "0.58857864", "0.586517", "0.585559", "0.58445776", "0.58228296", "0.58184516", "0.58120126", "0.58081996", "0.5798125", "0.5773177", "0.5770562", "0.5765482", "0.57621026", "0.5754893", "0.57517326", "0.5747007", "0.5739858", "0.5734065", "0.5717824", "0.5713038", "0.57051474", "0.57026", "0.57000315", "0.5697245", "0.56808656", "0.56751454", "0.56527203", "0.56500626", "0.56255984", "0.5618477", "0.55916965", "0.5589335", "0.55760187", "0.55616057", "0.553657", "0.5535947", "0.5530788", "0.5520894", "0.552077", "0.55173075", "0.5506883", "0.55067015", "0.5477681", "0.54585457", "0.54585457", "0.54585457", "0.54585457", "0.54522586", "0.5450557", "0.54284495", "0.5426376", "0.5424983", "0.54189825", "0.5410592", "0.5406229", "0.5403949", "0.5400344", "0.5394844", "0.5394736", "0.53840804", "0.5378816", "0.53758305", "0.5371308", "0.5362134", "0.53500336", "0.53457665", "0.5345563", "0.5343224", "0.5321683", "0.53079003", "0.5305004", "0.53048533", "0.5302177", "0.52938294", "0.5293657", "0.5289889", "0.52838844", "0.52758217" ]
0.5888813
22
load all of cifar as image form
def load_cifar10_img_form(directory): train_data, train_labels, test_data, test_labels = load_cifar10(directory) R, testR = train_data[:, :1024].reshape(-1, 32, 32, 1), test_data[:, :1024].reshape(-1, 32, 32, 1) G, testG = train_data[:, 1024:2048].reshape(-1, 32, 32, 1), test_data[:, 1024:2048].reshape(-1, 32, 32, 1) B, testB = train_data[:, 2048:].reshape(-1, 32, 32, 1), test_data[:, 2048:].reshape(-1, 32, 32, 1) train_data, test_data = np.concatenate((R, G, B), axis=3), np.concatenate((testR, testG, testB), axis=3) return train_data, train_labels, test_data, test_labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getimgs():", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def load_cifar_images(filename):\n\n from load_cifar import load_file\n from load_cifar import label_dict\n\n data,labels = load_file(filename)\n\n # two classes to keep\n class0 = label_dict['airplane']\n class1 = label_dict['bird']\n # remove all but two classes\n keep = np.logical_or(labels==class0,labels==class1)\n data = data[keep,...]\n labels = labels[keep]\n # set labels to 0 or 1\n labels[labels==class0]=0\n labels[labels==class1]=1\n\n # rgb -> grayscale\n gray_data = rgb2gray(data)\n return data,gray_data,labels", "def getimage(self):", "def load_cifar():\n print('==> Preparing data..')\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n trainset = torchvision.datasets.CIFAR10(\n root='./data', train=True, download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=1024, shuffle=True, num_workers=8)\n\n testset = torchvision.datasets.CIFAR10(\n root='./data', train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=128, shuffle=False, num_workers=8)\n return trainloader, testloader", "def refreshImages(self):\n fileName1 = \"DECK/\" + str(self.card1) + \".gif\"\n fileName2 = \"DECK/\" + str(self.card2) + \".gif\"\n fileName3 = \"DECK/\" + str('b') + \".gif\"\n self.image1 = PhotoImage(file = fileName1)\n self.cardLabel1[\"image\"] = self.image1\n self.image2 = PhotoImage(file = fileName2)\n self.cardLabel2[\"image\"] = self.image2\n self.image3 = PhotoImage(file = fileName3)\n self.cardLabel3[\"image\"] = self.image3", "def load_images():\n print(\"[+] UPDATE - Begin loading images\")\n\n colors = [\"w\", \"b\"]\n piece_types = [\"p\", \"R\", \"N\", \"B\", \"K\", \"Q\"]\n for color in colors:\n for type in piece_types:\n piece = color + type\n IMAGES[piece] = p.transform.scale(p.image.load(\"images/\" + piece + \".png\"), (SQ_SIZE, SQ_SIZE))\n\n print(\"[+] UPDATE - Images loaded\")", "def initImages(self):\n pass", "def initImages(self):\n pass", "def initImages(self):\n pass", "def load_image(self, **kwargs):\n ...", "def get_imgs_from_json(self):\n # instantiate COCO specifying the annotations json path\n # Specify a list of category names of interest\n catIds = self.coco.getCatIds(catNms=[self.categ])\n print(\"catIds: \", catIds)\n # Get the corresponding image ids and images using loadImgs\n imgIds = self.coco.getImgIds(catIds=catIds)\n images = self.coco.loadImgs(imgIds)\n print(f\"{len(images)} images in '{self.json_path}' with '{self.categ}' instances\")\n self.catIds = catIds # list\n return images", "def _load_components(self):\n compsf = self._fetch_components_file()\n comps_img = niimg.load_img(compsf)\n return comps_img", "def load_cifar_data():\n train_loader = torch.utils.data.DataLoader(\n torchvision.datasets.CIFAR10('cifarfiles/', train=True, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=batch_size_train, shuffle=True, pin_memory=True)\n\n test_loader = torch.utils.data.DataLoader(\n torchvision.datasets.CIFAR10('cifarfiles/', train=False, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=batch_size_test, shuffle=True, pin_memory=True)\n return train_loader, test_loader", "def _image(self):\n print(\"imaging\")\n self.images.append(self.device_control.image())\n yield", "def load_cifar(hparams):\n all_labels = []\n\n total_batches_to_load = 5\n assert hparams.train_size + hparams.validation_size <= 50000\n if hparams.eval_test:\n total_batches_to_load += 1\n # Determine how many images we have loaded\n total_dataset_size = 50000\n train_dataset_size = total_dataset_size\n if hparams.eval_test:\n total_dataset_size += 10000\n\n if hparams.dataset == 'cifar10':\n all_images = []\n elif hparams.dataset == 'cifar100':\n all_images = np.empty((1, 50000, 3072), dtype=np.uint8)\n if hparams.eval_test:\n test_data = np.empty((1, 10000, 3072), dtype=np.uint8)\n if hparams.dataset == 'cifar10':\n datafiles = [\n 'data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4',\n 'data_batch_5']\n\n if hparams.eval_test:\n datafiles.append('test_batch')\n num_classes = 10\n elif hparams.dataset == 'cifar100':\n datafiles = ['train']\n if hparams.eval_test:\n datafiles.append('test')\n num_classes = 100\n else:\n raise NotImplementedError('Unimplemented dataset: ', hparams.dataset)\n if hparams.dataset != 'test':\n for file_num, f in enumerate(datafiles):\n d = unpickle(os.path.join(hparams.data_path, f))\n if hparams.dataset == 'cifar10':\n labels = np.array(d['labels'])\n else:\n labels = np.array(d['fine_labels'])\n if f == 'test':\n test_data[0] = copy.deepcopy(d['data'])\n if hparams.dataset == 'cifar10':\n all_images.append(test_data)\n else:\n all_images = np.concatenate([all_images, test_data], axis=1)\n else:\n if hparams.dataset == 'cifar10':\n all_images.append(copy.deepcopy(d['data']))\n else:\n all_images[file_num] = copy.deepcopy(d['data'])\n nsamples = len(labels)\n for idx in range(nsamples):\n all_labels.append(labels[idx])\n if hparams.dataset == 'cifar10':\n all_images = np.concatenate(all_images, axis=0)\n all_images = all_images.reshape(-1, 3072)\n all_images = all_images.reshape(-1, 3, 32, 32) # pylint: disable=too-many-function-args\n all_images = all_images.transpose(0, 2, 3, 1).copy()\n all_images = all_images / 255.0\n mean = augmentation_transforms.MEANS\n std = augmentation_transforms.STDS\n tf.logging.info('mean:{} std: {}'.format(mean, std))\n all_images = (all_images - mean) / std\n all_labels = np.eye(num_classes)[np.array(all_labels, dtype=np.int32)]\n\n assert len(all_images) == len(all_labels)\n tf.logging.info(\n 'In CIFAR10 loader, number of images: {}'.format(len(all_images)))\n\n extra_test_images = None\n extra_test_labels = None\n if hparams.extra_dataset == 'cifar10_1':\n extra_test_ds = tfds.as_numpy(\n tfds.load('cifar10_1', split='test', batch_size=-1))\n extra_test_images = ((extra_test_ds['image'] / 255.0) - mean) / std\n extra_test_labels = np.eye(num_classes)[np.array(\n extra_test_ds['label'], dtype=np.int32)]\n\n # Break off test data\n if hparams.eval_test:\n test_images = all_images[train_dataset_size:]\n test_labels = all_labels[train_dataset_size:]\n else:\n test_images = []\n test_labels = []\n all_images = all_images[:train_dataset_size]\n all_labels = all_labels[:train_dataset_size]\n return all_images, all_labels, test_images, test_labels, extra_test_images, extra_test_labels", "def loadImages(self):\n for map_name, img in self.maps.items():\n if img is None or map_name not in __class__.input_tr:\n continue\n getCyclesImage(img)", "def build_filler_images(self):", "def load(cls):\n\n cls.images[\"Wall\"] = pygame.image.load(\n \"ressources/images/wall.png\").convert()\n cls.images[\"MacGyver\"] = pygame.image.load(\n \"ressources/images/Mac.png\").convert()\n cls.images[\"Guardian\"] = pygame.image.load(\n \"ressources/images/Guardian.png\").convert()\n cls.images[\"Path\"] = pygame.image.load(\n \"ressources/images/path.png\").convert()\n cls.images[\"Tube\"] = pygame.image.load(\n \"ressources/images/tube.png\").convert()\n cls.images[\"Ether\"] = pygame.image.load(\n \"ressources/images/ether.png\").convert()\n cls.images[\"Needle\"] = pygame.image.load(\n \"ressources/images/needle.png\").convert()\n cls.images[\"gr\"] = pygame.image.load(\n \"ressources/images/but_du_jeu.png\").convert()", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n #一个样本由标签和图像数据组成\n #3072 = 32 x 32 x 3\n data_dict = p.load(f, encoding= 'bytes')\n images = data_dict[b'data']\n labels = data_dict[b'labels']\n #把原始数据结构调整为BCWH batches, channels, width, height\n images = images.reshape(10000, 3, 32, 32)\n #tensorflow 处理图像数据的结构:BWHC\n #把C移动到最后一个维度\n images = images.transpose(0, 2, 3, 1)\n\n labels = np.array(labels)\n return images, labels", "def _load_metadata(self):\n\n cub_dir = self.root / \"CUB_200_2011\"\n images_list: Dict[int, List] = OrderedDict()\n\n with open(str(cub_dir / \"train_test_split.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n is_train_instance = int(row[1]) == 1\n if is_train_instance == self.train:\n images_list[img_id] = []\n\n with open(str(cub_dir / \"images.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n images_list[img_id].append(row[1])\n\n with open(str(cub_dir / \"image_class_labels.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n # CUB starts counting classes from 1 ...\n images_list[img_id].append(int(row[1]) - 1)\n\n with open(str(cub_dir / \"bounding_boxes.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n box_cub = [int(float(x)) for x in row[1:]]\n box_avl = [box_cub[1], box_cub[0], box_cub[3], box_cub[2]]\n # PathsDataset accepts (top, left, height, width)\n images_list[img_id].append(box_avl)\n\n images_tuples = []\n for _, img_tuple in images_list.items():\n images_tuples.append(tuple(img_tuple))\n self._images = images_tuples # type: ignore\n\n # Integrity check\n for row_check in self._images:\n filepath = self.root / CUB200.images_folder / row_check[0]\n if not filepath.is_file():\n if self.verbose:\n print(\"[CUB200] Error checking integrity of:\", filepath)\n return False\n\n return True", "def loadImagesTag(self): \n dictionary = {}\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(GENDER_FRONT)\n dictionary[\"gender\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIN_BACK)\n dictionary[\"skin\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(HEAD_BACK)\n dictionary[\"head\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BODY_BACK)\n dictionary[\"body\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(MASK_BACK)\n dictionary[\"mask\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(HAIR_BACK)\n dictionary[\"hair\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n if self.avatarConfiguration[\"gender\"] == \"boy\":\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHIRT_BACK)\n dictionary[\"shirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(TROUSERS_BACK)\n dictionary[\"trousers\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIRT_BACK)\n dictionary[\"skirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n else:\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHIRT_DISABLED)\n dictionary[\"shirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(TROUSERS_DISABLED)\n dictionary[\"trousers\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIRT_BACK)\n dictionary[\"skirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHOES_BACK)\n dictionary[\"shoes\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n return dictionary", "def load_CIFAR100(batch_dir):\r\n ims, coarse_labels, fine_labels = load_CIFAR_batch(batch_dir + '/train')\r\n ims_t, c_labels, f_labels = load_CIFAR_batch(batch_dir + '/test')\r\n ims = np.concatenate((ims, ims_t))\r\n coarse_labels = np.concatenate((coarse_labels, c_labels))\r\n fine_labels = np.concatenate((fine_labels, f_labels))\r\n return ims, coarse_labels, fine_labels", "def load_image(nom):\n print(\"load_image : [\", nom, \"]\")\n fic = gdal.Open(nom)\n print(fic)\n return fic.ReadAsArray(), fic.GetGeoTransform()", "def load_camus(self, patients_path, height, width):\n # Add classes\n self.add_class(\"camus\", 1, \"chamber\")\n# self.add_class(\"camus\", 1, \"ventricule\")\n# self.add_class(\"camus\", 2, \"muscle\")\n# self.add_class(\"camus\", 3, \"atrium\")\n \n i = 0\n patients_dir = glob(patients_path)\n for patient_path in tqdm(patients_dir, ncols=80):\n filenames = glob(patient_path + \"*_resized.png\")\n for image_filename in filenames:\n if '_gt' in image_filename:\n continue\n\n self.add_image(\"camus\", image_id=i, path=image_filename,\n width=width, height=height)\n i += 1", "def load_image(self, image_index):\n\t\t\timage_info = self.coco.loadImgs(self.image_ids[image_index])[0]\n\t\t\tpath = os.path.join(self.data_dir, 'images', self.set_name, image_info['file_name'])\n\t\t\treturn read_image_bgr(path)", "def image(self, where):\n cook = cookie()\n I = Image(cook, self)\n self.call('image', cook, where)\n print(\"IMAGE\", where)\n return I", "def loadRes(self, resFile):\n res = COCO()\n res.dataset['images'] = [img for img in self.dataset['images']]\n\n print('Loading and preparing results...')\n tic = time.time()\n if type(resFile) == str: #or type(resFile) == unicode:\n anns = json.load(open(resFile))\n elif type(resFile) == np.ndarray:\n anns = self.loadNumpyAnnotations(resFile)\n else:\n anns = resFile\n assert type(anns) == list, 'results in not an array of objects'\n annsImgIds = [ann['image_id'] for ann in anns]\n assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\n 'Results do not correspond to current coco set'\n if 'caption' in anns[0]:\n imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])\n res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]\n for id, ann in enumerate(anns):\n ann['id'] = id+1\n elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n bb = ann['bbox']\n x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]\n if not 'segmentation' in ann:\n ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]\n ann['area'] = bb[2]*bb[3]\n ann['id'] = id+1\n ann['iscrowd'] = 0\n elif 'segmentation' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n # now only support compressed RLE format as segmentation results\n ann['area'] = maskUtils.area(ann['segmentation'])\n if not 'bbox' in ann:\n ann['bbox'] = maskUtils.toBbox(ann['segmentation'])\n ann['id'] = id+1\n ann['iscrowd'] = 0\n elif 'keypoints' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n s = ann['keypoints']\n x = s[0::3]\n y = s[1::3]\n x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)\n ann['area'] = (x1-x0)*(y1-y0)\n ann['id'] = id + 1\n ann['bbox'] = [x0,y0,x1-x0,y1-y0]\n print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res", "def process_image(self):\n pass", "def loadImagesAvatar(self): \n dictionary = {}\n dictionary[\"body\"] = None\n dictionary[\"shoes\"] = None\n dictionary[\"shirt\"] = None\n dictionary[\"trousers\"] = None\n dictionary[\"skirt\"] = None\n dictionary[\"head\"] = None\n dictionary[\"hair\"] = None\n dictionary[\"mask\"] = None\n return dictionary", "def main():\n for city in CITIES:\n fetchNewseumImage(city)", "def prep_image_data(arg_dict):\n cat_df = pd.read_csv(arg_dict['category_file'],\n skiprows=1,\n sep='\\s+')\n bbox_df = pd.read_csv(arg_dict['bbox_file'],\n skiprows=1,\n sep='\\s+')\n img_dir = arg_dict['image_dir']\n\n combo_df = pd.merge(cat_df, bbox_df, how='outer', on='image_name')\n combo_df['image_name'] = combo_df['image_name'].apply(\n lambda x: x[len('img'):-len('.jpg')])\n labels = Labels(combo_df, img_dir, n_images_loaded=-1)\n labels.set_data_target('raw_image', chunksize=3000)\n return labels", "def images(self, **kwargs):\n\n raise NotImplementedError", "def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets", "def load_image(self, image_id):\n \n # load image infos\n \n info = self.image_info[image_id]\n patch_path = info['path']\n width = info['width']\n height = info['height']\n impath = os.path.join(patch_path,\"images\")\n file_list = os.listdir(impath) \n channels = info['channels']\n \n image = []\n \n # stack channels to be loaded.\n \n for channel in channels:\n \n if channel == \"none\":\n channel_image = skimage.img_as_ubyte(np.zeros( (height,width) ) )\n \n else:\n channel_image_name = [x for x in file_list if channel in x][0] \n channel_image_path = os.path.join(impath, channel_image_name)\n channel_image = skimage.io.imread(channel_image_path)\n channel_image = skimage.img_as_ubyte(channel_image)\n image.append(channel_image)\n \n image = np.stack(image, axis=2)\n \n return image", "def embed_images(self):\n for img in self.book.xpath(\"//img[ not(starts-with(@src, 'data:')) and @src!= '']\"):\n img_src = img.attrib[\"src\"]\n img_raw = self.get_remote_content(img_src)\n if img_raw != None:\n img_64 = base64.b64encode(img_raw)\n file_info = os.path.splitext(img_src)\n ext = file_info[1].replace(\".\", \"\")\n ext = re.sub(\"\\?.*$\", \"\" , ext)\n \n if ext == \"svg\":\n svg = html.fromstring(img_raw.decode(\"utf-8\"))\n img.clear()\n img.tag = \"svg\"\n img[:] = [svg]\n else:\n img.set(\"src\", \"data:image/{};base64,{}\".format(ext, img_64.decode(\"utf-8\")))", "def images(request, get_glance_steps, uncleanable, credentials):\n params = {'count': 1}\n params.update(getattr(request, 'param', {}))\n names = utils.generate_ids('cirros', count=params['count'])\n with create_images_context(get_glance_steps,\n uncleanable,\n credentials,\n names,\n config.CIRROS_QCOW2_URL) as images:\n yield [utils.AttrDict(name=image['name']) for image in images]", "def make_images(self):\n self._images = [tree.to_image() for tree in self.reaction_trees]\n self._update_route_dict(self._images, \"image\")", "def _load_all(self, anno_file, shuffle):\n image_set_index = []\n labels = []\n coco = COCO(anno_file)\n img_ids = coco.getImgIds()\n #print(img_ids)\n cars=[3,6,8]\n pedestrians=[1]\n cyclists=[2,4]\n lights=[10]\n signs=[13]\n\n apex_categories=cars+pedestrians+cyclists+lights+signs\n cnt=0\n humanonly=0\n human_count=0\n\n for img_id in img_ids:\n relevant=False\n # filename\n image_info = coco.loadImgs(img_id)[0]\n filename = image_info[\"file_name\"]\n #print(filename)\n #subdir = filename.split('_')[1]\n height = image_info[\"height\"]\n width = image_info[\"width\"]\n # label\n anno_ids = coco.getAnnIds(imgIds=img_id)\n annos = coco.loadAnns(anno_ids)\n label = []\n\n #print(\"listing categories for filename: \"+filename)\n\n hashumans=False\n for anno in annos:\n cat_id = int(anno[\"category_id\"])\n if(cat_id in apex_categories):\n cat_reduced= 0 if (cat_id in cars) else 1 if(cat_id in pedestrians) else 2 if(cat_id in cyclists) else 3 if(cat_id in lights) else 4\n bbox = anno[\"bbox\"]\n assert len(bbox) == 4\n xmin = float(bbox[0]) / width\n ymin = float(bbox[1]) / height\n xmax = xmin + float(bbox[2]) / width\n ymax = ymin + float(bbox[3]) / height\n label.append([cat_reduced, xmin, ymin, xmax, ymax, 0])\n #print(\"category: %d\"%cat_reduced)\n if (cat_id in pedestrians):\n hashumans=True\n if(cat_id not in pedestrians): #at least one non-person object is necessary\n relevant=True\n\n if(label and not relevant):\n humanonly+=1\n if label and relevant:\n if(hashumans):\n human_count+=1\n #print(\"adding \"+filename)\n labels.append(np.array(label))\n image_set_index.append(os.path.join(self.set, filename))\n cnt+=1\n print(\"added %d images\"%cnt)\n print(\"%d images has only humans\"%humanonly)\n print(\"%d registered images has humans\"%human_count)\n\n if shuffle:\n import random\n indices = range(len(image_set_index))\n random.shuffle(indices)\n image_set_index = [image_set_index[i] for i in indices]\n labels = [labels[i] for i in indices]\n # store the results\n self.image_set_index = image_set_index\n self.labels = labels", "def load_image_data():\n print(\"Loading image data...\")\n label_dict = get_label_vectors()\n categories = [c for c in os.listdir('images/') if c[0] != '.'] # ignore\n labels = [] # instantiate list for image labels\n data = [] # instantiate list for image data\n for i in categories:\n path = 'images/{}/'.format(i) # define path to category folder\n for j in os.listdir(path): # get images from category folder\n labels.append(label_dict[i]) # append label vector\n data.append(cv2.imread(path + j).flatten()) # append flattened image data\n\n labels = np.array(labels) # convert lists to array\n data = np.array(data)\n print(\"Done.\")\n\n return labels, data", "def read_image_data(self):\n\n for sequence_name in self.sequence_name_list:\n sequence = self.sequences[sequence_name]\n for image_id in sequence.image_id_list:\n sequence.image_dict[image_id].image_path = '{}{}/{}'.format(self.root_dir, self.name, sequence.image_dict[image_id].filename)", "def load_camus(self, patients_path, height, width):\n # Add classes\n self.add_class(\"camus\", 1, \"chamber\")\n \n i = 0\n if isinstance(patients_path, str):\n patients_dir = glob(patients_path)\n for patient_path in tqdm(patients_dir, ncols=80):\n filenames = glob(patient_path + \"*.jpg\")\n for image_filename in filenames:\n self.add_image(\"camus\", image_id=i, path=image_filename,\n width=width, height=height)\n i += 1\n elif isinstance(patients_path, list):\n filenames = [p for p in patients_path if p.endswith(\".jpg\")]\n for image_filename in filenames:\n self.add_image(\"camus\", image_id=i, path=image_filename,\n width=width, height=height)\n i += 1", "def load_images(self, files, sub_dir):\n\n for f in files:\n self.images.append(Image(f, sub_dir))", "def get_images(fish):\n fish_dir = TRAIN_DIR+'{}'.format(fish)\n images = [fish+'/'+im for im in os.listdir(fish_dir)]\n return images", "def process(self, image):", "def __init__(self, images, loader):\n super().__init__()\n self._images = images\n self._loader = loader", "def _load_colabeled_img(self) -> np.ndarray:\n return tifffile.imread(str(self.colabel_img))", "def load_images(filename):\n images = _load(filename)\n #_info_image(image, title=os.path.basename(filename))\n return images", "def update_image(self):\n if self.filenames:\n pos = self.slider.value()\n proj, flat, dark, theta = dx.read_aps_32id(self.filenames, proj=(pos, pos+1))\n if self.ffc_correction:\n image = proj[0,:,:].astype(np.float)/flat[0,:,:].astype(np.float)\n else:\n image = proj[0,:,:].astype(np.float)\n self.image_item.setImage(image)", "def load_image(self, image_id):\n info = self.image_info[image_id]\n # bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n # image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n # image = image * bg_color.astype(np.uint8)\n # for shape, color, dims in info['shapes']:\n # image = self.draw_shape(image, shape, dims, color)\n\n width, height = info['width'], info['height']\n\n if info['real']:\n # load image from disk\n impath = os.path.join(self.real_image_dirpath, info['real_image_path'])\n image = cv2.imread(impath,1)\n image = cv2.resize(image, (width, height), cv2.INTER_CUBIC)\n else:\n # synthesize image\n background_path = info['background_image_path']\n card_template_path = info['card_template_path']\n cornerpoints = info['cornerpoints']\n image = self.synthesize_image(card_template_path, background_path, cornerpoints, (width, height))\n return image", "def _get_im(self, idx):\n # load images\n path = self.uids[idx]\n img = self._load_im(path)\n\n # get information of each instance (e.g., tree) in a given image.\n # Each instance has its own row in the csv file,\n # so they need to be regrouped according to their path.\n groups = self.df.groupby('rgb_path')\n instances = groups.get_group(path) # contains all instances in given image\n\n num_objs = len(instances)\n boxes = [0.0] * num_objs\n labels = torch.zeros((num_objs,), dtype=torch.int64)\n #extras: cannot take string\n# uid = [''] * num_objs\n# sci_name = [''] * num_objs\n# nlcd_class = [''] * num_objs\n for i in range(num_objs):\n# import pdb; pdb.set_trace()\n boxes[i] = [instances.xmin.iloc[i], instances.ymin.iloc[i],\n instances.xmax.iloc[i], instances.ymax.iloc[i]]\n# uid[i] = self.df.uid.iloc[idx]\n# sci_name[i] = instances.scientific_name.iloc[i]\n# nlcd_class[i] = instances.nlcd_class.iloc[i]\n if self.object_rec == False:\n labels[i] = float(instances.class_id.iloc[i])\n\n if self.object_rec == True: # overwrite labels for object recognition task\n labels = torch.ones((num_objs,), dtype=torch.int64)\n\n boxes = torch.as_tensor(boxes, dtype=torch.float32)\n image_id = torch.tensor([idx])\n # for pycocotools MAP evaluation metric\n area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])\n iscrowd = torch.zeros((num_objs,), dtype=torch.int64)\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = labels\n target[\"image_id\"] = image_id\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n #extras: cannot take string\n# target[\"site_id\"] = instances.site_id.iloc[0]\n# target[\"uid\"] = uid\n# target[\"sci_name\"] = sci_name\n# target[\"nlcd_class\"] = nlcd_class\n \n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target", "def load_birds(self, count, img_floder, mask_floder, imglist, dataset_root_path):\n # Add classes\n self.add_class(\"birds\", 1, \"bird\")\n #self.add_class(\"birds\", 2, \"leg\")\n #self.add_class(\"birds\", 3, \"well\")\n \n for i in range(count):\n # 获取图片宽和高\n \n filestr = imglist[i].split(\".\")[0]\n #print(imglist[i],\"-->\",cv_img.shape[1],\"--->\",cv_img.shape[0])\n #print(\"id-->\", i, \" imglist[\", i, \"]-->\", imglist[i],\"filestr-->\",filestr)\n # filestr = filestr.split(\"_\")[1]\n mask_path = mask_floder + \"/\" + filestr + \".png\"\n #print(mask_path)\n yaml_path = dataset_root_path + \"dataset_json/\" + filestr + \"_json/info.yaml\"\n #print(dataset_root_path + \"dataset_json/\" + filestr + \"_json/img.png\")\n cv_img = cv2.imread(dataset_root_path + \"dataset_json/\" + filestr + \"_json/img.png\")\n #print(dataset_root_path + \"dataset_json/\" + filestr + \"_json/img.png\")\n self.add_image(\"birds\", image_id=i, path=img_floder + \"/\" + imglist[i],\n width=cv_img.shape[1], height=cv_img.shape[0], mask_path=mask_path, yaml_path=yaml_path)", "def get_image_data():\n #mac\n #user_images = [i.replace('static/img/', \"\") for i in glob.glob('static/img/*.png')]\n #pc\n #user_images = [i.replace('static\\\\img\\\\', \"\") for i in glob.glob('static\\\\img\\\\*.png')]\n user_images = [i.replace('static/img/', \"\") for i in glob.glob('static/img/*.png')]\n sports = [inflection.titleize(i.replace('.png', \"\").capitalize().replace(\"_\", \" \")) + \"!\" for i in user_images]\n data = list(zip(sports, user_images))\n return data", "def load_image(self, index):\n image_path = os.path.join(self.folder_path, self.image_ids[index] + '.jpg')\n img = Image.open(image_path).convert('RGB')\n if debug:\n print(\"Loaded image: \", image_path)\n return img", "def _init_img_dataset(self, dataset_path):\n\n # ==\n # Define the classes used in the various states\n # form: (state class : cifar label class)\n class_dict = {\n 'initial': 'automobile',\n 'choice_1': 'dog',\n 'choice_2': 'cat',\n 'corridor': 'bird',\n }\n\n # ==\n # Download / initialize dataset\n ds = CIFAR10(dataset_path, train=self.training,\n download=True)\n\n # Get the CIFAR class index for each of the state classes\n cifar_class_dict = {\n k: ds.class_to_idx[class_dict[k]] for k in class_dict\n }\n\n # Iterate over the CIFAR dataset and get the idxs to each class\n cifar_indexes = {k: [] for k in class_dict}\n for i in range(len(ds)):\n cur_cifar_class = ds[i][1]\n for k in class_dict:\n if cur_cifar_class == cifar_class_dict[k]:\n cifar_indexes[k].append(i)\n\n # Manually sub-sample choice classes\n for k in ['choice_1', 'choice_2']:\n n_imgs = min(self.num_ds_imgs, len(cifar_indexes[k]))\n rng = np.random.default_rng()\n choice_imgs = rng.choice(cifar_indexes[k], size=n_imgs,\n replace=False)\n cifar_indexes[k] = choice_imgs\n\n # Manually shuffle the corridor class\n rng = np.random.default_rng()\n corri_img_shufIdxs = rng.choice(cifar_indexes['corridor'],\n size=len(cifar_indexes['corridor']),\n replace=False)\n cifar_indexes['corridor'] = corri_img_shufIdxs\n\n # ==\n # Construct the data subset dictionary\n ds_dict = {}\n for k in class_dict:\n ds_dict[k] = Subset(ds, cifar_indexes[k])\n\n return ds_dict", "def load_from_images(self):\n logging.debug(\"load_from_images called\")\n return True", "def OnInit(self):\r\n self.imageID = self.loadImage()", "def load_images(self, folder):\n cwd = os.getcwd()\n dir = cwd + '/' + folder\n files = os.listdir(dir)\n for file in files:\n img = pygame.image.load(dir + '/' + file)\n self.images.append(img)", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb')as f:\r\n datadict = p.load(f)\r\n \r\n X = datadict['data']\r\n Y = datadict['labels']\r\n \r\n print X.shape\r\n X = X.reshape(X.shape[0], SHAPE[0], SHAPE[1], SHAPE[2])\r\n Y = np.array(Y)\r\n return X, Y", "def imgCIF(cif_file):\n\n cbf_handle = pycbf.cbf_handle_struct()\n cbf_handle.read_file(cif_file, pycbf.MSG_DIGEST)\n\n return ScanFactory.imgCIF_H(cif_file, cbf_handle)", "def build_list_gif(self, pathgif, nocv2 = True):\n dsize = (self.size, self.size)\n gif = mimread(pathgif)\n # convert form RGB to BGR\n listcv2 = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in gif]\n listgif = []\n for img in listcv2:\n listgif.append(cv2.resize(img, dsize))\n if nocv2:\n return self.convert_list_images(listgif)\n else:\n return listgif", "def images(self) -> dict:\n raise NotImplementedError", "def _load_cifar_batch(fpath, label_key='labels'):\n if isinstance(fpath, (os.PathLike, str, bytes)):\n with open(fpath, 'rb') as f:\n return _load_cifar_batch(f, label_key)\n\n d = pickle.load(fpath, encoding='bytes')\n # decode utf8\n d_decoded = {}\n for k, v in d.items():\n d_decoded[k.decode('utf8')] = v\n d = d_decoded\n data = d['data']\n labels = d[label_key]\n\n data = data.reshape(data.shape[0], 3, 32, 32).transpose([0, 2, 3, 1])\n return data, labels", "def read_vanhateren_images (n_imgs=5):\n folder_name = r'D:\\VanHateren\\vanhateren_imc' # change this to point to the directory which holds the van hateren data\n # files = listdir(folder_name)\n onlyfiles = [ f for f in listdir(folder_name) if isfile(join(folder_name,f)) ]\n imgs = []\n for i in range(n_imgs):\n filename = join(folder_name, onlyfiles[i])\n with open(filename, 'rb') as handle:\n s = handle.read()\n arr = array.array('H', s)\n arr.byteswap()\n img_i = np.array(arr, dtype='uint16').reshape(1024, 1536)\n imgs.append(img_i) \n return imgs\n #pylab.imshow(img)\n #pylab.show()", "def load_jpgs():\n X_tr = []\n Y_tr = []\n imges = train_df['id'].values\n for img_id in imges:\n X_tr.append(cv2.imread(fold + img_id)) \n Y_tr.append(train_df[train_df['id'] == img_id]['has_cactus'].values[0]) \n\n X_tr = np.asarray(X_tr)\n X_tr = X_tr.astype('float32')\n X_tr /= 255\n Y_tr = np.asarray(Y_tr)\n\n return X_tr, Y_tr", "def load_all_gfx(directory,colorkey=(0,0,0),accept=(\".png\",\".jpg\",\".bmp\")):\n graphics = {}\n for pic in os.listdir(directory):\n name,ext = os.path.splitext(pic)\n if ext.lower() in accept:\n img = pg.image.load(os.path.join(directory, pic))\n if img.get_alpha():\n img = img.convert_alpha()\n else:\n img = img.convert()\n img.set_colorkey(colorkey)\n graphics[name]=img\n return graphics", "def generate_image(self):\n pass", "def get_image():\n\n url = 'http://skyview.gsfc.nasa.gov/cgi-bin/images'\n params = dict(Position='%s,%s' % (source['ra'], source['dec']),\n Survey=source['survey'].val,\n Return='GIF')\n response = requests.get(url, params=params, stream=True)\n with open(files['image.gif'].rel, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)", "def load_data():\r\n print ('Loadng all the file one time......')\r\n if not os.path.exists('cifar.pkl'):\r\n set_data()\r\n with open('cifar.pkl', 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data", "def explore(df):\n for i in range(df.shape[0]):\n f = df['Path'].iloc[i]\n img = imread(f)\n # shows the shape (h, w, channels)\n print(img.dtype, img.shape)", "def load_images(self, filename):\n\n self.images = self.load(filename)\n self.length = len(self.images)\n self.create_teacher()", "def read_image(self, ifd):\n ifd.img_data = np.array([], dtype='uint8')\n strips = ifd.get_strips() # [(strip_offset, strip_byte_count)]\n for strip in strips:\n ifd.img_data = np.append(ifd.img_data, self.tif_file.read(size=strip[1], location=strip[0]))", "def load_coco_ann_files(self):\n if self.type == 'train':\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'train2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_train2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'train2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_train2017.json'))),\n # (os.path.join(self.dataset_root, 'mpii', 'images'),\n # COCO(os.path.join(self.dataset_root, 'mpii',\n # 'annotations', 'train.json')))\n ]\n else:\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'val2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_val2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'val2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_val2017.json')))\n ]\n\n dict_list = []\n for dataset_path, dataset in datasets:\n img_ids = dataset.getImgIds()\n\n for idx in img_ids:\n try:\n img = dataset.loadImgs([idx])[0]\n ann_ids = dataset.getAnnIds([idx])\n anns = dataset.loadAnns(ann_ids)\n\n if [ann['keypoints'] for ann in anns] and not all([ann['keypoints'] == [0]*51 for ann in anns]):\n keypoints = [ann['keypoints'] for ann in anns if ann['keypoints'] != [0]*51]\n for i in range(len(keypoints)):\n if 'coco' in dataset_path:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][17] and keypoints[i][20])\n else [(keypoints[i][15] + keypoints[i][18]) // 2, (keypoints[i][16] + keypoints[i][19]) // 2, 1])\n else:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][41] and keypoints[i][38])\n else [(keypoints[i][39] + keypoints[i][36]) // 2, (keypoints[i][40] + keypoints[i][37]) // 2, 1])\n\n if len([kp for kp in keypoints if kp != [0]*54]) <= 4:\n dict_list.append({'path': os.path.join(dataset_path, img[\"file_name\"]),\n 'keypoints': [kp for kp in keypoints if kp != [0]*54]})\n except:\n print(f'Skipped: {idx}')\n\n final_dataset = pd.DataFrame.from_dict(dict_list)\n\n return final_dataset", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = pickle.load(f, encoding='latin1')\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\r\n Y = np.array(Y)\r\n return X, Y", "def get_data(self):\n data_str = get_cls_img(root=self.root, suffix=self.suffix)\n\n if not self.load_images:\n return data_str\n\n cls_img_data = dict.fromkeys(data_str.keys())\n for cls_ in data_str:\n temp = [0] * len(data_str[cls_])\n for i, img_name in enumerate(data_str[cls_]):\n img = _load_image(\n img_url=os.path.join(self.root, cls_, img_name),\n expand_dim=self.expand_dim\n )\n temp[i] = img\n cls_img_data[cls_] = list(temp)\n\n return cls_img_data", "async def catpic(self, ctx):\n data = await self.bot.session.get_cat_pic()\n file = discord.File(data[\"img_data\"], filename=data[\"filename\"])\n await ctx.send(file=file)", "def extract_images(f):\r\n if f == \"train\":\r\n D_train = numpy_array_stick(\"/home/luzihao/xiaoluo/xiyuan/CNN/standard2/\",10)\r\n #D_train = numpy_array_stick(\"D:\\\\FDU\\\\Template\\\\FDUROP\\\\face_detection_and_recognition\\\\standard2\\\\\",10)\r\n #\"X\" means data,\"Y\" means label\r\n Y_train = D_train[1]\r\n X_train = D_train[0].reshape(people*10,80,80,1)\r\n return X_train,Y_train\r\n elif f == \"test\":\r\n D_test = numpy_array_stick(\"/home/luzihao/xiaoluo/xiyuan/CNN/standard2/\",1)\r\n #D_test = numpy_array_stick(\"D:\\\\FDU\\\\Template\\\\FDUROP\\\\face_detection_and_recognition\\\\standard2\\\\\",1)\r\n #\"X\" means data,\"Y\" means label\r\n Y_test = D_test[1]\r\n X_test = D_test[0].reshape(people*1,80,80,1)\r\n return X_test,Y_test", "def load_images(card_images):\n\n suits = [\"heart\", \"club\", \"diamond\", \"spade\"]\n face_cards = [\"jack\", \"king\", \"queen\"]\n\n # get the correct image format depending on the tkinter version\n if tkinter.TkVersion >= 8.6:\n extension = \"png\"\n else:\n extension = \"ppm\"\n\n # for each suit, retrieve the image for the cards\n # and store them in card_images list of tuples\n for suit in suits:\n\n # numbers 1 to 10\n for card in range(1, 11):\n name = f\"cards/{str(card)}_{suit}.{extension}\"\n image = tkinter.PhotoImage(file=name)\n card_images.append((card, image,))\n\n # face cards\n for card in face_cards:\n name = f\"cards/{str(card)}_{suit}.{extension}\"\n image = tkinter.PhotoImage(file=name)\n card_images.append((10, image,))", "def load_image(self, image_id):\n info = self.image_info[image_id]\n label_path = info['path']\n\n # 读取json文件\n with open(os.path.join(self.DATA_ROOT_DIR, label_path), encoding='utf-8') as json_file:\n labelmeJson = json.load(json_file)\n # height = labelmeJson['imageHeight']\n # width = labelmeJson['imageWidth']\n # shape_list = labelmeJson['shapes']\n image = self.img_b64_to_arr(labelmeJson['imageData'])\n # bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n # image = np.ones([labelmeJson['height'], labelmeJson['width'], 3], dtype=np.uint8)\n # image = image * bg_color.astype(np.uint8)\n #\n # for shape, color, dims in info['shapes']:\n # image = self.draw_shape(image, shape, dims, color)\n\n return image", "def load_scraped_food_images(ROOT):\n Xtr, Ytr = load_food_image_batch(os.path.join(ROOT, 'train'),50000)\n Xte, Yte = load_food_image_batch(os.path.join(ROOT, 'test'),10000)\n return Xtr, Ytr, Xte, Yte", "def readImages(self):\r\n\r\n #Read the file camera.csv for the image file name\r\n lines = [line.strip() for line in open(self.cameraFile)]\r\n i = 0;\r\n\tself.centers = []\r\n\tself.lefts = []\r\n\tself.rights = []\r\n\r\n for line in lines:\r\n info = line.split(',')\r\n \r\n\r\n if info[0] == 'seq':\r\n i += 1\r\n continue\r\n \r\n if info[4] == 'left_camera':\r\n self.lefts.append(info)\r\n if info[4] == 'center_camera':\r\n self.centers.append(info)\r\n if info[4] == 'right_camera':\r\n self.rights.append(info)\r\n i += 1\r\n\r\n print \"Total Frames: %d \" % (len(self.centers))", "def process_images():\n image_path = os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/img/')\n static_images = os.path.join(settings.BASE_DIR, 'static/CMESH/img/')\n\n copy_files(image_path, static_images)", "def get_image(request):\n data = [{'model': i.bike_model, 'image': i.image, 'id': i.id} for i in BikeDetails.objects.all()]\n data = {'data': data}\n return render(request, 'show_image.html', data)", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n data_dict = cPickle.load(f)\r\n ims = data_dict['data']\r\n coarse_labels = np.array(data_dict['coarse_labels'])\r\n fine_labels = np.array(data_dict['fine_labels'])\r\n return ims, coarse_labels, fine_labels", "def load_cifar10_data(img_rows=48,\n\timg_cols=48):\n\n\t(X_train, Y_train), (X_valid, Y_valid) = cifar10.load_data()\n\n\tX_train = np.array([cv2.resize(img, (img_rows,img_cols)) for img in X_train[:,:,:,:]])\n\tX_valid = np.array([cv2.resize(img, (img_rows,img_cols)) for img in X_valid[:,:,:,:]])\n\n\tY_train = to_categorical(Y_train[:], num_classes)\n\tY_valid = to_categorical(Y_valid[:], num_classes)\n\n\treturn X_train, Y_train, X_valid, Y_valid", "def OnInit( self ):\n self.imageID = self.loadImage ()", "def load_image(self, image_id):\n info = self.image_info[image_id]\n bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n image = image * bg_color.astype(np.uint8)\n for ship, dims in info['ships']:\n image = self.draw_ship(image, ship, dims)\n return image", "def get_images(image_folder_root, image_label_list):\n file_dcm=[]\n X = []\n y = []\n for file_name,label in image_label_list:\n try:\n current_file = pydicom.dcmread(image_folder_root + file_name + '.dcm')\n pixel_array = current_file.pixel_array\n if (pixel_array.shape != (512,512)):\n continue\n file_dcm.append((file_name,label,brain_window(current_file)))\n y.append(label)\n X.append(pydicom.dcmread(image_folder_root + file_name + '.dcm').pixel_array)\n except ValueError:\n continue\n return X,y", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def load_cityscapes(path, fdr):\n dataset = Dataset(path, split='val', mode=\"fine\", target_type=[\"semantic\", \"instance\"])\n\n from PATH import SCRI_PATH as spath\n\n for image, (sseg, inst), name in dataset:\n image = np.array(image)\n sseg = gt_covert(sseg)\n inst = np.array(inst)\n if os.path.exists(spath + \"/\" + fdr + \"/\" + name + \"_scri.png\"):\n scribbles = np.array(Image.open(spath + \"/\" + fdr + \"/\" + name + \"_scri.png\"))\n else:\n scribbles = None\n # scribbles = scribble_convert(scribbles)\n yield name, image, sseg, inst, scribbles", "def _load_data_worker(self,img_dir,lbl_dir):\n data = []\n\n for img,lbl in zip(glob(img_dir+\"/*.jpg\"),glob(lbl_dir+\"/*.txt\")):\n im = np.array(Image.open(img))\n im = make_square_image_with_padding(im, self.core_config.num_colors)\n lbl_fh = open(lbl,encoding='utf-8')\n\n objects = self._get_objects(lbl_fh)\n sorted_objects = sort_object_list(objects)\n object_class = self._get_object_classes(sorted_objects)\n \n image_with_objects = {\n 'img':im,\n 'objects':sorted_objects,\n 'object_class': object_class\n }\n\n image_with_mask = convert_to_mask(image_with_objects, self.core_config)\n\n data.append(image_with_mask)\n lbl_fh.close()\n\n return data", "def load_CIFAR_batch(filename):\n with open(filename, 'rb')as f:\n # datadict = p.load(f)\n datadict = pickle.load(f, encoding = 'bytes')\n X = datadict[b'data']\n Y = datadict[b'labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y", "def iniciar_sprites(self):\n\n res_gifs = os.path.join(_RESFOLDERS, '**', '*.gif')\n gifs_list = glob.glob(res_gifs, recursive=True)\n for gif in gifs_list:\n self.guardar_sprite(gif)", "def import_data(self, img_size):\n path = self._path\n images = []\n labels = []\n\n categs_name = [filename for filename in os.listdir(path)]\n for categ in categs_name:\n if isdir(join(path, categ)):\n\n for img_name in os.listdir(join(path, categ)):\n\n if \".jpg\" in img_name:\n\n img_name = self.correct_filename(img_name, categ)\n img_path = join(path, categ, img_name)\n img = cv2.imread(img_path)\n\n if img_size:\n dim = (img_size, img_size)\n try:\n img = cv2.resize(img, dim)\n except:\n print(img_name, \"has not been loaded.\")\n continue\n\n images.append(img)\n labels.append(categ)\n\n X = np.array(images)\n y = self.transform_labels(labels)\n\n return X, y", "def load_image(self, image_id):\n # Load image\n# print(self.image_info[image_id]['path'])\n image = cv2.imread(self.image_info[image_id]['path'],cv2.IMREAD_GRAYSCALE) \n image = image[:,:, np.newaxis] #Add 1 dimension for grayscale images\n return image", "def loadRes(self, resFile):\n res = COCO()\n res.dataset['images'] = [img for img in self.dataset['images']]\n\n anns = resFile\n annsImgIds = [ann['image_id'] for ann in anns]\n\n assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\n 'Results do not correspond to current coco set'\n\n if 'bbox' in anns[0] and not anns[0]['bbox'] == []:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n\n for id, ann in enumerate(anns):\n bb = ann['bbox']\n ann['area'] = bb[2] * bb[3]\n ann['id'] = id + 1\n ann['iscrowd'] = 0\n else:\n return res\n\n res.dataset['annotations'] = anns\n createIndex(res)\n return res", "def RegisterImages(self):\n images = [(autocomp.TYPE_FUNCTION, ed_glob.ID_FUNCT_TYPE),\n (autocomp.TYPE_METHOD, ed_glob.ID_METHOD_TYPE),\n (autocomp.TYPE_PROPERTY, ed_glob.ID_PROPERTY_TYPE),\n (autocomp.TYPE_ATTRIBUTE, ed_glob.ID_ATTR_TYPE),\n (autocomp.TYPE_CLASS, ed_glob.ID_CLASS_TYPE),\n (autocomp.TYPE_VARIABLE, ed_glob.ID_VARIABLE_TYPE),\n (autocomp.TYPE_ELEMENT, ed_glob.ID_ELEM_TYPE)]\n for idx, img in images:\n bmp = wx.ArtProvider.GetBitmap(str(img), wx.ART_MENU)\n if bmp.IsOk():\n self.RegisterImage(idx, bmp)", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def load_base_images(base_img):\n if base_img is not None:\n if not os.path.exists(base_img):\n base_img = os.path.join(LIGHTHOUSES_DIR, base_img)\n return (\n Image.open(os.path.join(base_img, 'on.gif')).convert('RGBA'),\n Image.open(os.path.join(base_img, 'off.gif'))\n )\n return None, None" ]
[ "0.68525964", "0.6523401", "0.6510388", "0.6463343", "0.6404395", "0.6381195", "0.6346877", "0.6258412", "0.6138871", "0.6138871", "0.6138871", "0.6136035", "0.60968804", "0.60725594", "0.6004178", "0.5993124", "0.59865046", "0.59833163", "0.5981841", "0.5942361", "0.5926865", "0.5919499", "0.5860616", "0.58507526", "0.5842023", "0.5825252", "0.5801594", "0.5792871", "0.577839", "0.5769065", "0.57620853", "0.57445055", "0.57393545", "0.57245636", "0.5719968", "0.56738764", "0.56697726", "0.56686145", "0.56615335", "0.5658143", "0.5645889", "0.564355", "0.56368256", "0.56344897", "0.5624512", "0.5621664", "0.56201893", "0.560929", "0.56012017", "0.5597784", "0.55936825", "0.557522", "0.557347", "0.5562916", "0.55616033", "0.55576354", "0.5556221", "0.5549883", "0.55387735", "0.55377346", "0.5525368", "0.552512", "0.55219364", "0.5516107", "0.5512547", "0.5512385", "0.5509836", "0.5507677", "0.55069035", "0.5506065", "0.55057466", "0.5504805", "0.550064", "0.54991734", "0.5498452", "0.5498147", "0.5495251", "0.549103", "0.5490896", "0.54892725", "0.54860234", "0.54843897", "0.547864", "0.54717994", "0.5465948", "0.5463039", "0.5457845", "0.54564935", "0.54539156", "0.5448303", "0.5446577", "0.5440166", "0.54357827", "0.54338545", "0.5429853", "0.5427436", "0.5426561", "0.5421804", "0.54161435", "0.5408868" ]
0.6069607
14
load all of imagenet data as flat vector
def load_imagenet(directory): path_train, path_val = directory + '/ILSVRC2012_img_train', directory + '/ILSVRC2012_img_val' train_labels = os.listdir(path_train) train_data = [] for label in train_labels: imgs_path = os.path.join(path_train, label) imgs = os.listdir(imgs_path) for img_name in imgs: img_path = os.path.join(imgs_path, img_name) img = cv2.imread(img_path) b, g, r = cv2.split(img) img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3) train_data.append(img) train_labels.append(label) train_data = np.concatenate(train_data) train_labels = np.array(train_labels, dtype='str') test_labels = os.listdir(path_val) test_data = [] for label in test_labels: imgs_path = os.path.join(path_val, label) for img_name in imgs: img_path = os.path.join(imgs_path, img_name) img = cv2.imread(img_path) b, g, r = cv2.split(img) img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3) test_data.append(img) test_labels.append(label) test_data = np.concatenate(test_data) test_labels = np.array(test_labels, dtype='str') _, train_labels = np.unique(train_labels, return_inverse=True) _, test_labels = np.unique(test_labels, return_inverse=True) del r, g, b, imgs_path, img_name, img, imgs return train_data, train_labels, test_data, test_labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_imagenet_data(net):\r\n\r\n # get a list of all the images (note that we use networks trained on ImageNet data)\r\n img_list = os.listdir(path_data)\r\n\r\n # throw away files that are not in the allowed format (png or jpg)\r\n for img_file in img_list[:]:\r\n if not (img_file.endswith(\".png\") or img_file.endswith(\".jpg\")):\r\n img_list.remove(img_file)\r\n \r\n # fill up data matrix\r\n img_dim = net.crop_dims\r\n X = np.empty((0, img_dim[0], img_dim[1], 3))\r\n X_filenames = []\r\n for i in range(len(img_list)):\r\n np_img = np.float32(PIL.Image.open('{}/{}'.format(path_data, img_list[i])))\r\n if np_img.shape[0] >= img_dim[0] and np_img.shape[1] >= img_dim[1]:\r\n o = 0.5*np.array([np_img.shape[0]-img_dim[0], np_img.shape[1]-img_dim[1]])\r\n X = np.vstack((X, np_img[o[0]:o[0]+img_dim[0], o[1]:o[1]+img_dim[1], :][np.newaxis]))\r\n X_filenames.append(img_list[i].replace(\".\",\"\"))\r\n else:\r\n print(\"Skipped \",img_list[i],\", image dimensions were too small.\")\r\n\r\n # the number of images we found in the folder\r\n num_imgs = X.shape[0]\r\n\r\n # cast to image values that can be displayed directly with plt.imshow()\r\n X_im = np.uint8(X)\r\n \r\n # preprocess\r\n X_pre = np.zeros((X.shape[0], 3, img_dim[0], img_dim[1]))\r\n for i in range(num_imgs):\r\n X_pre[i] = net.transformer.preprocess('data', X[i])\r\n X = X_pre\r\n \r\n return X, X_im, X_filenames", "def load_data(path,size, scale = True):\n images = os.listdir(path)\n images.sort()\n\n X = []\n for i, img in enumerate(images):\n photo = plt.imread(os.path.join(path,img))\n if size:\n photo = tf.image.resize(photo, (size, size))\n X.append(photo)\n \n X = np.array(X)\n if scale:\n X = X/X.max() \n return X", "def _get_data(path):\n archive = np.load(path)\n images = archive['faceData']\n return images", "def _load_data(self, imagepath):\n im = cv2.imread(imagepath)\n self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)", "def main(image_directory, cuda=False):\n\n BATCH_SIZE = 256\n\n if cuda:\n model = torch.nn.Sequential(*list(models.resnet18(pretrained=True).children())[:-1]).cuda()\n else:\n model = torch.nn.Sequential(*list(models.resnet18(pretrained=True).children())[:-1])\n model.eval()\n\n all_vectors = []\n\n dataset = Dataset(image_directory)\n loader = torch.utils.data.DataLoader(dataset,\n batch_size=BATCH_SIZE,\n collate_fn=collate,\n num_workers=4)\n\n for inputs, meta in tqdm(loader):\n if cuda:\n inputs = Variable(inputs.cuda())\n else:\n inputs = Variable(inputs)\n\n vectors = model(inputs).cpu().data.numpy()\n meta = map(lambda x: (x[0],\n x[1],\n int(re.search('image_(\\d+).jpg',\n x[2]).group(1))),\n meta)\n print(meta)\n print(vectors)\n all_vectors.append(\n np.concatenate(\n [np.array(meta), vectors.squeeze()],\n axis=1\n )\n )\n\n all_vectors = np.concatenate(all_vectors)\n np.save('vectors.npy', all_vectors)", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def get_raw_data():\n\twith open('train_label.pkl', 'rb') as f:\n\t\ttrain_label = pickle.load(f)\n\n\twith open('train_image.pkl', 'rb') as f:\n\t\ttrain_data = pickle.load(f)\n\n\tprint(np.unique(np.asarray(train_label)))\n\n\treturn (train_label, np.asarray(train_data))", "def load_one_img(ds):\n for img in ds.take(1):\n img = img[1, ...]\n yuv_image_tensor = tf.expand_dims(img, axis=0)\n\n return yuv_image_tensor", "def load_tiny_imagenet(directory):\n path_train, path_val, path_test = directory + '/train', directory + '/val', directory + '/test'\n labels = os.listdir(path_train)\n train_data = []\n train_labels = []\n for label in labels:\n imgs_path = os.path.join(path_train, label, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n train_data.append(img)\n train_labels.append(label)\n train_data = np.concatenate(train_data)\n train_labels = np.array(train_labels, dtype='str')\n \n test_data = []\n test_labels = []\n with open(path_val+'/val_annotations.txt', 'r') as f:\n val_annotations = [line.strip().split('\\t') for line in f]\n val_annotations = np.array(val_annotations)\n imgs_path = os.path.join(path_val, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n test_data.append(img)\n label = val_annotations[val_annotations[:, 0] == img_name, 1].astype('U9')\n test_labels.append(label)\n test_data = np.concatenate(test_data)\n test_labels = np.concatenate(test_labels)\n test_labels = np.array(test_labels, dtype='str')\n \n _, train_labels = np.unique(train_labels, return_inverse=True)\n _, test_labels = np.unique(test_labels, return_inverse=True)\n \n del r, g, b, label, labels, imgs_path, img_name, img, imgs, val_annotations\n \n return train_data, train_labels, test_data, test_labels", "def load_image_data():\n print(\"Loading image data...\")\n label_dict = get_label_vectors()\n categories = [c for c in os.listdir('images/') if c[0] != '.'] # ignore\n labels = [] # instantiate list for image labels\n data = [] # instantiate list for image data\n for i in categories:\n path = 'images/{}/'.format(i) # define path to category folder\n for j in os.listdir(path): # get images from category folder\n labels.append(label_dict[i]) # append label vector\n data.append(cv2.imread(path + j).flatten()) # append flattened image data\n\n labels = np.array(labels) # convert lists to array\n data = np.array(data)\n print(\"Done.\")\n\n return labels, data", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'%s-labels-idx1-ubyte.gz'% kind)\n\n images_path = os.path.join(path,'%s-images-idx3-ubyte.gz'% kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,offset=16).reshape(len(labels), 784)\n\n print(\"Dataset Loaded\")\n \n return images, labels", "def flatten(file_name):\n dataset = pickle.load(open(file_name, 'rb'))\n train_data = dataset['train']\n test_data = dataset['test']\n\n train_data = [y for x in train_data for y in x]\n test_data = [y for x in test_data for y in x]\n\n train_data=generate_binary_vectors(train_data,False)\n test_data=generate_binary_vectors(test_data,False)\n\n return train_data, test_data", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)", "def load_data(class_fnames):\n X = []\n y = []\n for label, fnames in enumerate(class_fnames):\n for fname in fnames:\n X.append(cv2.imread(fname))\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def load_data():\n\n # Load data\n # You can create this Numpy datafile by running the create_validation_sample.py script\n df = h5py.File(data_fn, \"r\")\n imgs_validation = df[\"imgs_validation\"]\n msks_validation = df[\"msks_validation\"]\n img_indicies = range(len(imgs_validation))\n\n \"\"\"\n OpenVINO uses channels first tensors (NCHW).\n TensorFlow usually does channels last (NHWC).\n So we need to transpose the axes.\n \"\"\"\n input_data = imgs_validation\n msks_data = msks_validation\n return input_data, msks_data, img_indicies", "def loadData(path):\r\n X = []\r\n y = []\r\n dir1 = os.listdir(path)\r\n for d1 in dir1:\r\n dir2 = os.listdir(path+'/'+d1)\r\n for d2 in dir2:\r\n if int(d1) == 0:\r\n image = cv2.imread(path+r'/'+d1+r'/'+d2, 0)\r\n X.append(np.array(image, dtype=np.float32).reshape(-1) / 255.0)\r\n y.append(1)\r\n elif int(d1) == 1:\r\n image = cv2.imread(path+r'/'+d1+r'/'+d2, 0)\r\n X.append(np.array(image, dtype=np.float32).reshape(-1) / 255.0)\r\n y.append(-1)\r\n X = np.array(X, dtype=np.float32)\r\n y = np.array(y, dtype=np.int64)\r\n perm = np.random.permutation(X.shape[0])\r\n X = X[perm]\r\n y = y[perm]\r\n return X, y", "def get_data(folder):\n X = []\n y = []\n\n for seismic_type in os.listdir(folder):\n if not seismic_type.startswith('.'):\n if seismic_type in ['Class1']:\n label = '0'\n else:\n label = '1'\n for image_filename in os.listdir(folder + seismic_type):\n img_file = cv2.imread(folder + seismic_type + '/' + image_filename)\n if img_file is not None:\n # Downsample the image to 120, 160, 3\n #img_file = scipy.misc.imresize(arr=img_file, size=(120, 160, 3))\n img_arr = np.asarray(img_file)\n # img_arr = image.img_to_array(img_arr)\n X.append(img_arr)\n y.append(label)\n X = np.asarray(X)\n y = np.asarray(y)\n return X,y", "def loadData(image, mask, im_shape):\r\n X, y = [], []\r\n\r\n img = transform.resize(image, im_shape, mode='constant')\r\n img = np.expand_dims(img, -1)\r\n mask = transform.resize(mask, im_shape, mode='constant')\r\n mask = np.expand_dims(mask, -1)\r\n X.append(img)\r\n y.append(mask)\r\n X = np.array(X)\r\n y = np.array(y)\r\n X -= X.mean()\r\n X /= X.std()\r\n\r\n return X, y", "def load_dataset(path_test, width, height):\n tot_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n tot_images += 1\n\n # allocate the memory\n # THE DTYPE is float, should be the right one\n all_images = np.zeros((tot_images, width, height, 3))\n\n true_labels = []\n num_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n # for img_name in listdir(label_full)[:10]:\n img_name_full = join(label_full, img_name)\n print(f\"Opening {img_name_full} {width}\")\n\n image = cv2.imread(img_name_full)\n\n image = cv2.resize(image, (width, height))\n\n # scale the pixel values to [0, 1]\n image = image.astype(\"float\") / 255.0\n\n all_images[num_images, :, :, :] = image\n\n num_images += 1\n true_labels.append(label)\n\n print(f\"All_images.shape {all_images.shape}\")\n\n # cv2.imshow('Resized all_images[0]', all_images[0])\n # cv2.waitKey(0)\n\n return all_images, true_labels", "def load(data, feature):\n #Settings\n train_path = os.path.join(\"data\", data, feature) #put your image path here if you want to override current directory\n\n X = []\n y = []\n for f in os.listdir(train_path):\n (X_i, y_i) = cPickle.load(open(os.path.join(train_path,f), \"rb\"))\n if type(X_i) is np.ndarray:\n X_i = X_i.tolist()\n X = X + X_i #Append the two lists together\n y = y + y_i\n assert np.size(X,0) == 50000 or np.size(X,0) == 10000\n assert np.size(y) == 50000 or np.size(y) == 10000\n # Raws are stored as SimpleCV Images so they can easily be converted to\n # features using SimpleCV\n # Since machine learning aglorithms take feature vectors as inputs, we\n # flatten the underlying 3D matrices of the images here.\n if feature == \"raw\":\n X = map (lambda img: img.getNumpy().flatten(), X)\n return X,y", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def load_batch(filename: str) -> Tuple[ndarray, ndarray, ndarray]:\n dataDict = unpickle(filename)\n print(\"1\", dataDict[b\"data\"][1, :])\n X = (dataDict[b\"data\"] / 255).T\n print(\"2\", X[:, 1])\n y = np.array(dataDict[b\"labels\"])\n Y = np.eye(10)[y].T\n return X, Y, y", "def load_from_array():\n\n x = np.load(settings.data(\"x.npy\")).reshape(-1, 1, 224, 224)\n y = np.load(settings.data(\"y.npy\"))\n\n return x, y", "def read_batch(self):\n imgs = []\n labels = []\n idx = np.random.choice(self.nImgs,self.batch_size)\n \tfor i in idx:\n imgs.append(cv2.imread(self.data_files[i]))\n \t labels.append(cv2.imread(self.label_files[i]))\n \timgs,labels = np.array(imgs),np.array(labels)\n imgs = (imgs - self.mean)/self.stddev\n \tlabels = (labels - self.mean)/self.stddev\n return imgs,labels", "def load_data_pkl(self):\n pkl_name = '{}/data/mini-imagenet-cache-{}.pkl'.format(self.root_dir, self.split)\n print('Loading pkl dataset: {} '.format(pkl_name))\n\n try:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f, encoding='bytes')\n image_data = data[b'image_data']\n class_dict = data[b'class_dict']\n except:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f)\n image_data = data['image_data']\n class_dict = data['class_dict']\n\n print(data.keys(), image_data.shape, class_dict.keys())\n data_classes = sorted(class_dict.keys()) # sorted to keep the order\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n idxs = class_dict[cls] \n np.random.RandomState(self.seed).shuffle(idxs) # fix the seed to keep label,unlabel fixed\n dataset_l[i] = image_data[idxs[0:self.n_label]]\n if self.n_unlabel>0:\n dataset_u[i] = image_data[idxs[self.n_label:]]\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes\n\n del image_data", "def get_data(path):\n all_images_as_array=[]\n label=[]\n for filename in os.listdir(path):\n try:\n if re.match(r'positive',filename):\n label.append(1)\n else:\n label.append(0)\n img=cv2.imread(path + filename)\n (b, g, r)=cv2.split(img)\n img=cv2.merge([r,g,b])\n np_array = np.asarray(img)\n l,b,c = np_array.shape\n np_array = np_array.reshape(l*b*c,)\n all_images_as_array.append(np_array)\n except:\n continue\n return np.array(all_images_as_array), np.array(label)", "def load_data():\n dirname = os.path.join('datasets', 'fashion-mnist')\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n files = [\n 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',\n 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'\n ]\n\n paths = []\n for fname in files:\n paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))\n\n with gzip.open(paths[0], 'rb') as lbpath:\n y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[1], 'rb') as imgpath:\n x_train = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)\n\n with gzip.open(paths[2], 'rb') as lbpath:\n y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[3], 'rb') as imgpath:\n x_test = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)\n\n return (x_train, y_train), (x_test, y_test)", "def load_images(filename='training_images'): \n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read() # hope ya get it all\n\n # grab the first four numbers ...\n # fmt='>i' means big-endian int32\n magic, n_images, n_rows, n_cols = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(4))\n\n # i am a god-fearing man\n assert magic[0] == 2051, \"bad magic number, what do?\"\n\n\n # so i think you can use the standard libary's \"array\" for this, just\n # because binary data of any sort is kinda dodgy, but this grabs 'the rest'\n # format='B' means unsigned char === 'uint8', and apparently endianness doesn't matter\n image_stream = array.array('B', b[16:])\n\n # so each 28*28 byte portion of image_stream is a flattened image. these two\n # numpy.reshape calls get it into the desired shape for A. maybe could\n # combine it into one call, idk. anyway, each flattened image appears as a\n # row, and there is a row for each image.\n image_first = numpy.reshape(image_stream, (n_images[0], n_rows[0], n_cols[0]))\n images = image_first.reshape(n_images[0], n_rows[0]*n_cols[0])\n\n # convert to float in [0,1]\n images = images.astype('f') / 255\n\n return images", "def loader(path):\n img = np.load(path)\n img = img[1:4]\n if np.random.choice((True, False)):\n img = img[:, :, ::-1]\n img = np.array(img)\n if np.random.choice((True, False)):\n img = img[:, ::-1, :]\n img = np.array(img)\n\n img = img.transpose((1, 2, 0)) # pytorch is going to rotate it back\n return img", "def import_data(self, img_size):\n path = self._path\n images = []\n labels = []\n\n categs_name = [filename for filename in os.listdir(path)]\n for categ in categs_name:\n if isdir(join(path, categ)):\n\n for img_name in os.listdir(join(path, categ)):\n\n if \".jpg\" in img_name:\n\n img_name = self.correct_filename(img_name, categ)\n img_path = join(path, categ, img_name)\n img = cv2.imread(img_path)\n\n if img_size:\n dim = (img_size, img_size)\n try:\n img = cv2.resize(img, dim)\n except:\n print(img_name, \"has not been loaded.\")\n continue\n\n images.append(img)\n labels.append(categ)\n\n X = np.array(images)\n y = self.transform_labels(labels)\n\n return X, y", "def _load_image(self, index: int) -> Tensor:\n path = self.files[index][\"image\"]\n with rasterio.open(path) as f:\n array = f.read()\n tensor = torch.from_numpy(array).float()\n return tensor", "def load_data(fname):\n pathname = \"data/\" + fname\n data = pickle.load(open(pathname, 'rb'), encoding='latin1')\n images = np.array([img[:-1] for img in data])\n ys = [int(img[-1]) for img in data]\n length = len(ys)\n labels = np.zeros((length, 10))\n\n for i in range(length):\n labels[i, ys[i]] = 1\n\n return images, labels", "def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData", "def load_verts(file_data, headers, scale_factor):\n\n\n def vert_from_pack(vert_data):\n return (\n (vert_data[0] * scale_factor, vert_data[1] * scale_factor, vert_data[2] * scale_factor,), #XYZ\n (vert_data[3], vert_data[4],), #UV1\n (vert_data[5], vert_data[6],), #UV2\n (vert_data[7], vert_data[8], vert_data[9],), #Normal\n (vert_data[10], vert_data[11], vert_data[12], vert_data[13],), #RGBA\n )\n\n vert_offset, vert_length = headers[10]\n vert_chunk = Struct(\"3f2f2f3f4B\") \n vert_size = vert_chunk.size\n vert_count = int(vert_length / vert_size)\n\n print (\"Found {} vertices\".format(vert_count))\n\n vertices = []\n\n for current_vert_idx in range(vert_count):\n vert_file_position = vert_offset + current_vert_idx * vert_size\n current_vert = vert_chunk.unpack(file_data[vert_file_position : vert_file_position+vert_size])\n vertices.append(vert_from_pack(current_vert))\n\n return vertices", "def readDataFromFile():\n image_size = 28 # each image is 28x28\n\n num_images = 60000 # there are 60k images\n with gzip.open(r'train-images-idx3-ubyte.gz', 'r') as f: # 60k train & valid\n f.read(16) # reading by 16-byte double\n buffer_Train_Images = f.read(image_size * image_size * num_images)\n f.close()\n data_Train_Images = np.frombuffer(buffer_Train_Images, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n data_Train_Images = data_Train_Images.reshape(num_images,\n image_size * image_size) # Data = 60k x 28 x 28 with 1 value in it\n\n with gzip.open('train-labels-idx1-ubyte.gz', 'r') as f: # 60k train & valid - labels\n f.read(8) # reading by 16-byte double\n buffer_Train_Labels = f.read(num_images)\n data_Train_Labels = np.frombuffer(buffer_Train_Labels, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n num_images = 10000 # there are 10k images\n with gzip.open('t10k-images-idx3-ubyte.gz', 'r') as f: # 10k tests\n f.read(16) # reading by 16-byte double\n buffer_Test_Image = f.read(image_size * image_size * num_images)\n data_Test_Image = np.frombuffer(buffer_Test_Image, dtype=np.uint8).astype(\n np.uint8) # translating into 0 to 255\n data_Test_Image = data_Test_Image.reshape(num_images, image_size * image_size) # Data = 60k x 28 x 28 with\n\n with gzip.open('t10k-labels-idx1-ubyte.gz', 'r') as f: # 10k tests - lbles\n f.read(8) # reading by 16-byte double\n buffer_Test_Label = f.read(num_images)\n data_Test_Labels = np.frombuffer(buffer_Test_Label, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n return data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels", "def image_to_feature_vector(raw_tensor):\n result = []\n for tensor in raw_tensor:\n result.append(tensor.flatten())\n return result", "def load_vecs(fin):\n h5f = tables.open_file(fin)\n h5vecs= h5f.root.vecs\n\n vecs=np.zeros(shape=h5vecs.shape,dtype=h5vecs.dtype)\n vecs[:]=h5vecs[:]\n h5f.close()\n return vecs", "def load_mnist(path, kind='train'):\n '''ref: http://yann.lecun.com/exdb/mnist/ '''\n ''' each hand write is 28x28 = 784, a 1 dim vector'''\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte'\n % kind)\n\n # check the offical doc to know how to extract the content\n '''\n [offset] [type] [value] [description]\n 0000 32 bit integer 0x00000801(2049) magic number (MSB first)\n 0004 32 bit integer 60000 number of items\n 0008 unsigned byte ?? label\n 0009 unsigned byte ?? label\n ........\n xxxx unsigned byte ?? label\n The labels values are 0 to 9.\n '''\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n\n '''\n [offset] [type] [value] [description]\n 0000 32 bit integer 0x00000803(2051) magic number\n 0004 32 bit integer 60000 number of images\n 0008 32 bit integer 28 number of rows\n 0012 32 bit integer 28 number of columns\n 0016 unsigned byte ?? pixel\n 0017 unsigned byte ?? pixel\n ........\n xxxx unsigned byte ?? pixel\n Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).\n '''\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n ''' each hand write is 28x28 = 784, a 1 dim vector'''\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels", "def read_x_data(data_dir):\n files = glob.glob(os.path.join(data_dir, '*.jpg'))\n return [(os.path.basename(file), io.imread(file)) for file in files]", "def _load( self, i ):\n if ir.config.verbosity_level >= 2: print(\"[observation] Lazy loading raster\")\n self._raster_data[i] = raster_cube( self._raster_files, line=self._line_info['description'][i], keep_null=self._keep_null )", "def loadDataset(dataset):\n # List of images.\n images = []\n\n\n\n # Read all filenames from the dataset.\n for filename in dataset:\n # Read the input image.\n image = cv2.imread(filename)\n\n # Add the current image on the list.\n if image is not None: \n images.append(image)\n else:\n print(\"Could not read file: {}\".format(filename))\n sys.exit()\n\n # Return the images list.\n return images", "def load_mnist(path, kind='train'):\n\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "def load_mnist(kind='train'):\r\n with open('%s-labels.idx1-ubyte' % kind, 'rb') as lbpath:\r\n magic, n = struct.unpack('>II', lbpath.read(8))\r\n labels = np.fromfile(lbpath, dtype=np.uint8)\r\n\r\n with open('%s-images.idx3-ubyte' % kind, 'rb') as imgpath:\r\n magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))\r\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)\r\n\r\n return images, labels", "def load_data(model, set='train', img_rows=128, img_cols=128):\n print('#' * 30)\n print('Loading {} data from file.'.format(set))\n\n # read in the .npy file containing the images\n images_train = np.load('output/processed_data/images_{}.npy'.format(set))\n\n # read in the .npy file containing the target features\n targets_train = np.load('output/processed_data/targets_{}.npy'.format(set))\n\n # scale image pixel values to [0, 1]\n images_train = images_train.astype(np.float32)\n images_train /= 255.\n\n # scale target center coordinates to [-1, 1] (from 0 to 95 initially)\n targets_train = targets_train.astype(np.float32)\n targets_train[:, 0] = (targets_train[:, 0] - (img_rows / 2)) / (img_rows / 2)\n targets_train[:, 1] = (targets_train[:, 1] - (img_rows / 2)) / (img_cols / 2)\n\n # reshape images according to the neural network model intended to be used\n if model == 'cnn':\n print('Indicated model is a CNN, reshaping images with channels first.')\n images_train = images_train.reshape(-1, 1, img_rows, img_cols)\n elif model == 'dnn':\n print('Indicated model is a DNN, flattening out images.')\n images_train = images_train.reshape(images_train.shape[0], img_rows * img_rows)\n\n print('Loading done. Pixel values have been scaled to [0, 1] and target center coordinates to [-1, 1].')\n print('#' * 30)\n\n return images_train, targets_train", "def load_fmnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_categorical(y)\n test_indices = np.random.choice(len(X), int(len(X) * float(config['model']['test_size'])), replace=False)\n X_train = np.asarray([e for idx, e in enumerate(X) if idx not in test_indices])\n X_test = np.asarray([e for idx, e in enumerate(X) if idx in test_indices])\n y_train = np.asarray([e for idx, e in enumerate(y) if idx not in test_indices])\n y_test = np.asarray([e for idx, e in enumerate(y) if idx in test_indices])\n return X_train, y_train, X_test, y_test", "def load_data():\n f = gzip.open('mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = pickle.load(f, encoding=\"latin1\")\n f.close()\n \n X_train = [np.reshape(x, (784, 1)) for x in training_data[0]]\n Y_train = [vectorized_result(y) for y in training_data[1]]\n \n X_validation = [np.reshape(x, (784, 1)) for x in validation_data[0]]\n Y_validation = validation_data[1]\n \n X_test = [np.reshape(x, (784, 1)) for x in test_data[0]]\n Y_test = test_data[1]\n \n return (X_train, Y_train, X_validation, Y_validation, X_test, Y_test)", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def test_model_sample(net, data_loader):\n net.eval()\n array = []\n with torch.no_grad():\n for data in data_loader:\n X = data['X']\n output = net(X)\n output = ToPILImage()(output)\n array.append(output)\n return array", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def load_food_image_batch(filename, num):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f)\n url_parts = datadict['Image URL'].split(\"/\")\n img_fn = url_parts[-1]\n with open(img_fn):\n X = f.read()\n Y = datadict['coarse_labels']\n X = X.reshape(num, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\n Y = np.array(Y)\n return X, Y", "def load_data(self, from_idx):\n length = len(self.filenames)\n # we assume all images have the same dimensions\n shape = cv2.imread(filenames[0], int(self.color)).shape\n if not self.color:\n shape += (1,) # add additionnal channel for black and white\n X = []\n for f in tqdm(self.filenames[:5000]):\n if psutil.virtual_memory()[2] >= 60.0:\n break # preserve memory\n img = cv2.imread(f, int(self.color))\n if img is not None:\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n # change range of image to [-1, 1]\n # TODO : different procedure for colored images\n if not self.color:\n img = img.astype('float32')\n mx = np.max(img)\n mn = np.min(img)\n m = mx/2 + mn/2\n r = mx/2 - mn/2\n else:\n mx = np.amax(np.amax(img, axis=0), axis=0)\n mn = np.amin(np.amin(img, axis=0), axis=0)\n m = mx/2 + mn/2\n r = mx/2 - mn/2\n if np.all(r):\n img = (img - m)/r # works in both cases\n # add to dataset\n X.append(img)\n self.X = np.array(X)", "def _load_metadata(self):\n\n cub_dir = self.root / \"CUB_200_2011\"\n images_list: Dict[int, List] = OrderedDict()\n\n with open(str(cub_dir / \"train_test_split.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n is_train_instance = int(row[1]) == 1\n if is_train_instance == self.train:\n images_list[img_id] = []\n\n with open(str(cub_dir / \"images.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n images_list[img_id].append(row[1])\n\n with open(str(cub_dir / \"image_class_labels.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n # CUB starts counting classes from 1 ...\n images_list[img_id].append(int(row[1]) - 1)\n\n with open(str(cub_dir / \"bounding_boxes.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n box_cub = [int(float(x)) for x in row[1:]]\n box_avl = [box_cub[1], box_cub[0], box_cub[3], box_cub[2]]\n # PathsDataset accepts (top, left, height, width)\n images_list[img_id].append(box_avl)\n\n images_tuples = []\n for _, img_tuple in images_list.items():\n images_tuples.append(tuple(img_tuple))\n self._images = images_tuples # type: ignore\n\n # Integrity check\n for row_check in self._images:\n filepath = self.root / CUB200.images_folder / row_check[0]\n if not filepath.is_file():\n if self.verbose:\n print(\"[CUB200] Error checking integrity of:\", filepath)\n return False\n\n return True", "def load_mnist(path, kind = 'train'):\n label_path = os.path.join(path, '%s-labels-idx1-ubyte' % kind)\n images_path = os.path.join(path, '%s-images-idx3-ubyte' % kind)\n\n\n with open(label_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II', lbpath.read(8))\n\n labels = np.fromfile(lbpath, dtype= np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))\n\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels),784)\n\n\n return images, labels", "def test_get_image(self):\n\n spine_data_loader = SpineDataLoader(dirpath_data=self.dirpath,\n batch_size=4)\n\n for idx in range(4):\n image = spine_data_loader.get_image(str(idx))\n assert image.shape == (256, 256, 1)\n assert image.min() == 0.0\n assert image.max() == 1.0\n assert image.dtype == 'float64'", "def load_volume(name, nx, ny, nz):\n\n # load raw volume into memory\n img = np.fromfile(name, dtype=np.float32)\n img = np.reshape(img, (ny, nx, nz))\n\n return img.transpose(0, 2, 1)", "def load_data(model_path):\n x_arrays = []\n y_arrays = []\n for partition in iter_embeddings(model_path):\n h5f = h5py.File(partition, 'r')\n X = h5f[\"embeddings\"][:]\n x_arrays.append(X)\n try:\n Y = h5f[\"labels\"][:]\n y_arrays.append(Y)\n except KeyError:\n print(\"Labels not defined\")\n if len(y_arrays) > 0:\n X = np.vstack(x_arrays)\n Y = np.hstack(y_arrays)\n return X, Y\n else:\n X = np.vstack(x_arrays)\n Y = np.zeros(len(X))\n return X, Y", "def load_data():\n\n \"\"\"The ``training_data`` is returned as a tuple with two entries.\n The first entry contains the actual training images. This is a\n numpy ndarray with 50,000 entries. Each entry is, in turn, a\n numpy ndarray with 784 values, representing the 28 * 28 = 784\n pixels in a single MNIST image.\"\"\"\n\n \"\"\"The second entry in the ``training_data`` tuple is a numpy ndarray\n containing 50,000 entries. Those entries are just the digit\n values (0...9) for the corresponding images contained in the first\n entry of the tuple.\"\"\"\n\n \"\"\"The ``validation_data`` and ``test_data`` are similar, except\n each contains only 10,000 images.\"\"\"\n f = gzip.open('MNIST/data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = Pickle.load(f, encoding='bytes'\n )\n f.close()\n return (training_data, validation_data, test_data)", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def load_test_data():\n X = []\n y = []\n for fname in os.listdir(test_dir):\n label = int(fname.split(\"_\")[0])\n img = plt.imread(os.path.join(test_dir, fname))\n X.append(img)\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def load_images(tags_pict):\n img_data_list = []\n for p in tags_pict.index :\n img_path = tags_pict.full_path[p]\n img = load_img(img_path, target_size= inputShape)\n x = img_to_array(img)\n x = np.expand_dims(img, axis=0)\n # pre-process the image using the appropriate function based on the\n # model that has been loaded (i.e., mean subtraction, scaling, etc.)\n x = preprocess_input(x)\n img_data_list.append(x)\n img_data = np.array(img_data_list)\n img_data=np.rollaxis(img_data,1,0)\n img_data=img_data[0]\n return(img_data)", "def load_mnist(path, kind='train'):\n\tlabels_path = os.path.join(path,'%s-labels.idx1-ubyte'%kind)\n\timages_path = os.path.join(path,'%s-images.idx3-ubyte'%kind)\n\t\n\twith open(labels_path, 'rb') as lbpath:\n\t\tmagic, n = struct.unpack('>II', lbpath.read(8))\n\t\tlabels = np.fromfile(lbpath, dtype=np.uint8)\n\t\t\n\twith open(images_path, 'rb') as imgpath:\n\t\tmagic, num, row, cols = struct.unpack('>IIII', imgpath.read(16))\n\t\timages = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)\n\t\n\treturn images, labels", "def Read_Raw_Images(path_data,path_labels):\n \n data = skimage.io.imread(path_data).astype(np.float32)\n for i in range(data.shape[0]):\n data[i,...] = skimage.exposure.rescale_intensity(data[i,...], out_range=(0,1))\n data_labels = skimage.io.imread(path_labels) > 0\n \n training_data=data[0:25,:,:]\n training_labels=data_labels[0:25,:,:]\n \n testing_data=data[25:data.shape[0],:,:]\n testing_labels=data_labels[25:data.shape[0],:,:]\n \n np.save(\"data.npy\",training_data)\n np.save(\"labels.npy\",training_labels)\n np.save(\"data_validation.npy\",testing_data)\n np.save(\"labels_validation.npy\",testing_labels)\n \n return()", "def load_data(dataset, root, batch_size, workers):\n # Data transform\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n train_transform = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n query_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])\n\n # Construct data loader\n index = dataset.index(\"IF\")\n sub = dataset[index:]\n if sub == 'IF100':\n train_dir = os.path.join(root, 'train-IF100')\n elif sub == 'IF50':\n train_dir = os.path.join(root, 'train-IF50')\n elif sub == 'IF20':\n train_dir = os.path.join(root, 'train-IF20')\n elif sub == 'IF10':\n train_dir = os.path.join(root, 'train-IF10')\n elif sub == 'IF1':\n train_dir = os.path.join(root, 'train-IF1')\n else:\n print('train path error')\n return\n # train_dir = os.path.join(root, 'train')\n query_dir = os.path.join(root, 'query')\n database_dir = os.path.join(root, 'database')\n\n train_dataset = ImagenetDataset(\n train_dir,\n transform=train_transform,\n targets_transform=Onehot(100),\n )\n\n train_dataloader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=workers,\n pin_memory=True,\n )\n\n query_dataset = ImagenetDataset(\n query_dir,\n transform=query_transform,\n targets_transform=Onehot(100),\n )\n\n query_dataloader = DataLoader(\n query_dataset,\n batch_size=batch_size,\n num_workers=workers,\n pin_memory=True,\n )\n\n database_dataset = ImagenetDataset(\n database_dir,\n transform=query_transform,\n targets_transform=Onehot(100),\n )\n\n database_dataloader = DataLoader(\n database_dataset,\n batch_size=batch_size,\n num_workers=workers,\n pin_memory=True,\n )\n\n return train_dataloader, query_dataloader, database_dataloader", "def get_data(self):\n\n if not self.checked:\n self.check_cache()\n h5f = h5py.File(self.data_filename, 'r')\n train_lbl = h5f['train_lbl'][:]\n train_img = h5f['train_img'][:]\n val_lbl = h5f['val_lbl'][:]\n val_img = h5f['val_img'][:]\n h5f.close()\n return train_img, train_lbl, val_img, val_lbl", "def _load_components(self):\n compsf = self._fetch_components_file()\n comps_img = niimg.load_img(compsf)\n return comps_img", "def load_data_1d(path, dset):\n labels, imgs, _, _ = load_data(path, dset)\n print(\"images.shape=%s, labels.shape=%s\" % (imgs.shape, labels.shape))\n return labels, imgs", "def load_data():\n prefix = 'mnist_data/'\n train_data = np.load(prefix + 'mnist_train_images.npy')\n train_labels = np.load(prefix + 'mnist_train_labels.npy')\n val_data = np.load(prefix + 'mnist_validation_images.npy')\n val_labels = np.load(prefix + 'mnist_validation_labels.npy')\n test_data = np.load(prefix + 'mnist_test_images.npy')\n test_labels = np.load(prefix + 'mnist_test_labels.npy')\n assert train_data.shape == (55000, 784) and train_labels.shape == (55000, 10)\n assert val_data.shape == (5000, 784) and val_labels.shape == (5000, 10)\n assert test_data.shape == (10000, 784) and test_labels.shape == (10000, 10)\n return train_data, train_labels, val_data, val_labels, test_data, test_labels", "def get_data(folder):\n X = []\n y = []\n for folderName in os.listdir(folder):\n if not folderName.startswith('.'):\n if folderName in ['NORMAL']:\n label = 0\n elif folderName in ['CNV']:\n label = 1\n elif folderName in ['DME']:\n label = 2\n elif folderName in ['DRUSEN']:\n label = 3\n else:\n label = 4\n for image_filename in tqdm(os.listdir(folder + folderName)):\n img_file = cv2.imread(folder + folderName + '/' + image_filename)\n if img_file is not None:\n img_file = skimage.transform.resize(img_file, (imageSize, imageSize, 3))\n img_arr = np.asarray(img_file)\n X.append(img_arr)\n y.append(label)\n X = np.asarray(X)\n y = np.asarray(y)\n return X,y", "def load_data(filename):\n emnist = loadmat(filename)\n\n # Load training images and labels\n train_images_unshuffled = emnist['train_images']\n train_labels_unshuffled = emnist['train_labels']\n\n # Combine labels and training data\n combined_training = np.hstack((train_images_unshuffled, train_labels_unshuffled))\n\n # Shuffle data\n np.random.shuffle(combined_training)\n\n # Seperate into data and labels\n # Split into training and validation sets\n train_images = combined_training[:20800,:-1] / 255 # Normalize data, values are now between 0 and 1\n train_labels = combined_training[:20800,-1][...,None] # Turns back into column vector\n validation_images = combined_training[20800:,:-1] / 255 # Normalize data, values are now between 0 and 1\n validation_labels = combined_training[20800:,-1][...,None] # Turns back into column vector\n\n # Load training images and labels\n test_images = emnist['test_images'] / 255 # Normalize data, values are now between 0 and 1\n test_labels = emnist['test_labels']\n\n return train_images, train_labels, test_images, test_labels, validation_images, validation_labels", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'{}-labels-idx1-ubyte'.format(kind))\n images_path = os.path.join(path,'{}-images-idx3-ubyte'.format(kind))\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8).reshape(n)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII',\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape((num,1,rows,cols))\n print(kind)\n print(\"label num:\",n)\n print(\"image num:\",num)\n print(\"image rows:\",rows)\n print(\"image cols:\",cols)\n images = images/255\n return images, labels", "def _load_batch_file(filename):\n # Load the pickled data-file.\n data = _unpickle(filename)\n # Get the raw images.\n raw_images = data[b'data']\n # Get the class-numbers for each image. Convert to numpy-array.\n cls = np.array(data[b'labels'])\n # Convert the images.\n images = _convert_images(raw_images)\n\n return images, cls", "def test_read(self):\n for root, dirs, files in os.walk(os.path.join(self.test_dir, 'files')):\n for filename in files:\n if filename.endswith('.bin'):\n d = Dataset(os.path.join(root, filename))\n data = d.as_dict()\n for freq_dict in data['frequencies']:\n x = freq_dict['easting']\n y = freq_dict['northing']\n image = freq_dict['intensity']\n self.assertIsInstance(x, np.ndarray)\n self.assertIsInstance(y, np.ndarray)\n self.assertIsInstance(image, np.ndarray)", "def load_mnist(path='mnist/mnist.npz'):\n\n with np.load(path) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n x_train = x_train.astype(np.float32) / 255.\n y_train = y_train.astype(np.int32)\n x_test = x_test.astype(np.float32) / 255.\n y_test = y_test.astype(np.int32)\n \n return (x_train, y_train), (x_test, y_test)", "def DataLoader(data_place):\n # Nd = []\n # Np = []\n # Nz = []\n # channel_num = []\n # images = []\n # id_labels = []\n # pose_labels = []\n\n # mycase\n # Nz = 50\n # channel_num = 3\n # images = np.load('{}/images.npy'.format(data_place))\n # id_labels = np.load('{}/ids.npy'.format(data_place))\n # pose_labels = np.load('{}/yaws.npy'.format(data_place))\n #\n # Np = int(pose_labels.max() + 1)\n # Nd = int(id_labels.max() + 1)\n #\n # return [images, id_labels, pose_labels, Nd, Np, Nz, channel_num]\n\n # mycase MultiPIE\n Nz = 50\n channel_num = 3\n image_attributes_df = pd.read_csv(data_place)\n\n Nd = int(np.max(image_attributes_df['Id'])+1)\n Np = int(np.max(image_attributes_df['pose'])+1)\n Ni = int(np.max(image_attributes_df['illum'])+1)\n\n return [image_attributes_df, Nd, Np, Ni, Nz, channel_num]", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte' % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte' % kind)\n \n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n \n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n \n return images, labels", "def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data", "def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data", "def load_data(path='mnist.npz'):\n origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'\n path = get_file(\n path,\n origin=origin_folder + 'mnist.npz',\n file_hash=\n '731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1')\n print('############################################' + path) \n with np.load(path, allow_pickle=True) as f: # pylint: disable=unexpected-keyword-arg\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n\n return (x_train, y_train), (x_test, y_test)", "def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets", "def __init__(self, image_root, label_root, img_x, img_y):\n self.images_path = image_root\n self.labels_path = label_root\n self.data_len = 0\n self.images = []\n self.labels = open(self.labels_path, \"r\").readlines()\n self.transform = transforms.Compose([\n transforms.Resize((img_x, img_y)), \n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n for file in self.labels:\n self.data_len += 1\n tem = file.split(\" \")[0]\n temp = tem.split(\"-\")\n self.images.append(self.images_path + temp[0] + '/' + temp[0] + \"-\" + temp[1] + \"/\" + tem + \".png\")", "def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X", "def load_data(data_file):\n data = pickle.load(open(data_file, \"rb\"))\n images = data[\"images\"]\n labels = data[\"labels\"]\n\n return images, labels", "def load_nifti(file_path, dtype=np.float32, incl_header=False, z_factor=None, mask=None):\n \n img = nib.load(file_path)\n struct_arr = img.get_data().astype(dtype)\n \n # replace infinite values with 0\n if np.inf in struct_arr:\n struct_arr[struct_arr == np.inf] = 0.\n \n # replace NaN values with 0 \n if np.isnan(struct_arr).any() == True:\n struct_arr[np.isnan(struct_arr)] = 0.\n \n if mask is not None:\n struct_arr *= mask\n \n if z_factor is not None:\n struct_arr = zoom(struct_arr, z_factor)\n \n if incl_header:\n return struct_arr, img\n else:\n return struct_arr", "def img_to_vector(img_fn, label=0):\r\n img = \"\"\r\n for line in open(img_fn).readlines()[:32]:\r\n img += line[:32]\r\n\r\n # labels are always attached at the last position\r\n itera = [_ for _ in img + str(label)]\r\n return numpy.fromiter(itera, \"f4\")", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def unpack_data(imagefile, labelfile):\n\t# Open the images with gzip in read binary mode\n\timages = open(imagefile, 'rb')\n\tlabels = open(labelfile, 'rb')\n\t# Read the binary data\n\t# We have to get big endian unsigned int. So we need '>I'\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0]\n\n\tif number_of_images != N:\n\t\traise Exception('number of labels did not match the number of images')\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t\tif i % 1000 == 0:\n\t\t\tprint(\"i: %i\" % i)\n\t\tfor row in range(rows):\n\t\t\tfor col in range(cols):\n\t\t\t\ttmp_pixel = images.read(1) # Just a single byte\n\t\t\t\ttmp_pixel = unpack('>B', tmp_pixel)[0]\n\t\t\t\tx[i][row][col] = tmp_pixel\n\t\ttmp_label = labels.read(1)\n\t\ty[i] = unpack('>B', tmp_label)[0]\n\treturn x, y", "def load_data():\n (trainx, trainy), (valx, valy), (testx, testy) = pickle.load(gzip.open(\"data/mnist_one_hot.pkl.gz\"),\n encoding=\"latin1\")\n trainy = np.argmax(trainy, axis=1)\n valy = np.argmax(valy, axis=1)\n testy = np.argmax(testy, axis=1)\n trainx = trainx * 2 - 1\n valx = valx * 2 - 1\n testx = testx * 2 - 1\n return (trainx.reshape(-1, 1, 28, 28), trainy), (valx.reshape(-1, 1, 28, 28), valy), (testx.reshape(-1, 1, 28, 28),\n testy)", "def load_back_from_disk(data_dir, istrain=True):\n \"\"\"load back metadata_df\"\"\"\n meta_data = pickle.load(open(os.path.join(data_dir, 'meta.pkl'), 'rb'))\n metadata_rows = meta_data[0]\n max_node = meta_data[1]\n\n \"\"\"itershard by loading from disk\"\"\"\n all_X, all_y, all_size, all_L, all_names, all_node_img = [], [], [], [], [], []\n\n for _, row in enumerate(metadata_rows):\n X = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['X'])))\n L = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['L'])))\n y = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['y'])))\n size = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['size'])))\n names = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['name'])))\n node_img = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['node_img'])))\n\n \"\"\" stack to list\"\"\"\n all_X.append(X)\n all_y.append(y)\n all_L.append(L)\n all_size.append(size)\n all_names.append(names)\n all_node_img.append(node_img)\n\n \"\"\" return a Dataset contains all X, y, w, ids\"\"\"\n all_X = np.squeeze(np.vstack(all_X))\n all_L = np.squeeze(np.vstack(all_L))\n all_y = np.squeeze(np.concatenate(all_y))\n all_size = np.squeeze(np.concatenate(all_size))\n all_names = np.squeeze(np.concatenate(all_names))\n all_node_img = np.squeeze(np.concatenate(all_node_img))\n\n # create output dataset\n dataset = dict()\n if istrain:\n dataset['X'] = all_X[:TRAIN_NUM]\n dataset['y'] = all_y[:TRAIN_NUM]\n dataset['size'] = all_size[:TRAIN_NUM]\n dataset['L'] = all_L[:TRAIN_NUM]\n dataset['name'] = all_names[:TRAIN_NUM]\n dataset['node_img'] = all_node_img[:TRAIN_NUM]\n else:\n dataset['X'] = all_X[:TEST_NUM]\n dataset['y'] = all_y[:TEST_NUM]\n dataset['size'] = all_size[:TEST_NUM]\n dataset['L'] = all_L[:TEST_NUM]\n dataset['name'] = all_names[:TEST_NUM]\n dataset['node_img'] = all_node_img[:TEST_NUM]\n\n return dataset, max_node", "def read_gz(images,labels):\n\t# Open the images with gzip in read binary mode\n\t# images = gzip.open('../MNIST-data/train-images-idx3-ubyte.gz', 'rb')\n\t# labels = gzip.open('../MNIST-data/train-labels-idx1-ubyte.gz', 'rb')\n\n\t# Read the binary data\n\n\t# We have to get big endian unsigned int. So we need '>I'\n\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]#28\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]#28\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0] #60000\n\t# print(number_of_images);\n\n\tif number_of_images != N:\n\t raise Exception('number of labels did not match the number of images')\n\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array #60000X28X28\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t if i % 1000 == 0:\n\t print(\"i: %i\" % i)\n\t for row in range(rows):\n\t for col in range(cols):\n\t tmp_pixel = images.read(1) # Just a single byte\n\t tmp_pixel = unpack('>B', tmp_pixel)[0]\n\t x[i][row][col] = tmp_pixel\n\t tmp_label = labels.read(1)\n\t y[i] = unpack('>B', tmp_label)[0]\n\t # print(y.shape)#60000X1\n\treturn (x, y)", "def load_mnist(dataset=\"training\", digits=np.arange(10), path=\".\"):\n\n if dataset == \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset == \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n flbl = open(fname_lbl, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n lbl = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(fname_img, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = pyarray(\"B\", fimg.read())\n fimg.close()\n\n ind = [ k for k in range(size) if lbl[k] in digits ]\n N = len(ind)\n\n images = zeros((N, rows, cols), dtype=uint8)\n labels = zeros((N, 1), dtype=int8)\n for i in range(len(ind)):\n images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))\n labels[i] = lbl[ind[i]]\n\n return images, labels", "def load_data(self):\n return numpy.fromfile(self.data_fname, dtype=numpy.float32)", "def load_data(self):\n print('Loading {} dataset'.format(self.split))\n data_split_path = os.path.join(self.root_dir, 'splits', '{}.csv'.format(self.split))\n with open(data_split_path,'r') as f:\n reader = csv.reader(f, delimiter=',')\n data_classes = {}\n for i,row in enumerate(reader):\n if i==0:\n continue\n data_classes[row[1]] = 1\n data_classes = data_classes.keys()\n print(data_classes)\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n im_dir = os.path.join(self.root_dir, 'data/{}/'.format(self.split), cls)\n im_files = sorted(glob.glob(os.path.join(im_dir, '*.jpg')))\n np.random.RandomState(self.seed).shuffle(im_files) # fix the seed to keep label,unlabel fixed\n for j, im_file in enumerate(im_files):\n im = np.array(Image.open(im_file).resize((self.im_width, self.im_height)), \n np.float32, copy=False)\n if j<self.n_label:\n dataset_l[i, j] = im\n else:\n dataset_u[i,j-self.n_label] = im\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes", "def load_data(datafile, num_class, save=False, save_path='dataset.pkl'):\n train_list = open(datafile, 'r')\n labels = []\n images = []\n for line in train_list:\n tmp = line.strip().split(' ')\n filepath = tmp[0]\n print(filepath)\n img = Image.open(filepath)\n img = prep.resize_image(img, 224, 224)\n np_img = prep.pil_to_nparray(img)\n images.append(np_img)\n\n # one-hot encoder\n index = int(tmp[1])\n label = np.zeros(num_class)\n label[index] = 1\n labels.append(label)\n if save:\n pickle.dump((images, labels), open(save_path, 'wb'))\n return images, labels", "def load_data(path):\n\n\t# Create a list of all files ending in .jpg\n\tim_list = list_images(path, '.jpg')\n\n\t# Create labels\n\tlabels = [int(im_name.split('/')[-1][0]) for im_name in im_list]\n\tfeatures = []\n\n\t# Create features from the images\n\t# TOD.O: iterate over images paths\n\tfor im_path in im_list:\n\t\t# TOD.O: load image as a gray level image\n\t\tim = np.array(Image.open(im_path).convert('L'))\n\t\t# TOD.O: process the image to remove borders and resize\n\t\tim = process_image(im)\n\t\t# TOD.O: append extracted features to the a list\n\t\tfeatures.append(extract_features(im))\n\n\t# TOD.O: return features, and labels\n\treturn features, labels", "def read_image(images_root):\n im_array = np.load(images_root)\n return im_array", "def load_EMNIST_data(file, verbose = False, standarized = False): \n mat = sio.loadmat(file)\n data = mat[\"dataset\"]\n \n X_train = data['train'][0,0]['images'][0,0]\n X_train = X_train.reshape((X_train.shape[0], 28, 28), order = \"F\")\n y_train = data['train'][0,0]['labels'][0,0]\n y_train = np.squeeze(y_train)\n y_train -= 1 #y_train is zero-based\n \n X_test = data['test'][0,0]['images'][0,0]\n X_test= X_test.reshape((X_test.shape[0], 28, 28), order = \"F\")\n y_test = data['test'][0,0]['labels'][0,0]\n y_test = np.squeeze(y_test)\n y_test -= 1 #y_test is zero-based\n \n if standarized: \n X_train = X_train/255\n X_test = X_test/255\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_test -= mean_image\n \n\n if verbose == True: \n print(\"EMNIST-letter dataset ... \")\n print(\"X_train shape :\", X_train.shape)\n print(\"X_test shape :\", X_test.shape)\n print(\"y_train shape :\", y_train.shape)\n print(\"y_test shape :\", y_test.shape)\n \n return X_train, y_train, X_test, y_test" ]
[ "0.6885546", "0.6619148", "0.6589122", "0.6569712", "0.65382355", "0.6508243", "0.6401413", "0.6387084", "0.63791114", "0.6374463", "0.6297042", "0.6268717", "0.6249637", "0.6221783", "0.61718935", "0.61413294", "0.6102105", "0.6062337", "0.6045171", "0.60450935", "0.6041338", "0.6030839", "0.6005366", "0.5979027", "0.5977899", "0.5972258", "0.59704506", "0.59702134", "0.59635496", "0.59613544", "0.59432817", "0.5940984", "0.59308237", "0.5926693", "0.5918751", "0.5901474", "0.58985984", "0.5896182", "0.589281", "0.58915", "0.58842236", "0.5882845", "0.587973", "0.58795476", "0.5866553", "0.5859637", "0.5853852", "0.5852208", "0.5845792", "0.5844838", "0.584027", "0.5837841", "0.5837638", "0.5835131", "0.58345985", "0.5825338", "0.58219707", "0.5821794", "0.5821733", "0.58207667", "0.5818924", "0.5812639", "0.5810135", "0.580977", "0.58049065", "0.58034915", "0.5801399", "0.5797148", "0.57941586", "0.5791284", "0.5788274", "0.578197", "0.5780986", "0.5778521", "0.57750857", "0.5774765", "0.57711", "0.57678676", "0.5765075", "0.57636577", "0.57636577", "0.5763545", "0.5759766", "0.57450575", "0.57388633", "0.57365584", "0.5735748", "0.5723468", "0.5715463", "0.5710096", "0.57041734", "0.5700868", "0.5700156", "0.5699846", "0.56957114", "0.56944865", "0.56925213", "0.569171", "0.5689834", "0.5687309" ]
0.6851224
1
load all of imagenet data as flat vector
def load_tiny_imagenet(directory): path_train, path_val, path_test = directory + '/train', directory + '/val', directory + '/test' labels = os.listdir(path_train) train_data = [] train_labels = [] for label in labels: imgs_path = os.path.join(path_train, label, 'images') imgs = os.listdir(imgs_path) for img_name in imgs: img_path = os.path.join(imgs_path, img_name) img = cv2.imread(img_path) b, g, r = cv2.split(img) img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3) train_data.append(img) train_labels.append(label) train_data = np.concatenate(train_data) train_labels = np.array(train_labels, dtype='str') test_data = [] test_labels = [] with open(path_val+'/val_annotations.txt', 'r') as f: val_annotations = [line.strip().split('\t') for line in f] val_annotations = np.array(val_annotations) imgs_path = os.path.join(path_val, 'images') imgs = os.listdir(imgs_path) for img_name in imgs: img_path = os.path.join(imgs_path, img_name) img = cv2.imread(img_path) b, g, r = cv2.split(img) img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3) test_data.append(img) label = val_annotations[val_annotations[:, 0] == img_name, 1].astype('U9') test_labels.append(label) test_data = np.concatenate(test_data) test_labels = np.concatenate(test_labels) test_labels = np.array(test_labels, dtype='str') _, train_labels = np.unique(train_labels, return_inverse=True) _, test_labels = np.unique(test_labels, return_inverse=True) del r, g, b, label, labels, imgs_path, img_name, img, imgs, val_annotations return train_data, train_labels, test_data, test_labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_imagenet_data(net):\r\n\r\n # get a list of all the images (note that we use networks trained on ImageNet data)\r\n img_list = os.listdir(path_data)\r\n\r\n # throw away files that are not in the allowed format (png or jpg)\r\n for img_file in img_list[:]:\r\n if not (img_file.endswith(\".png\") or img_file.endswith(\".jpg\")):\r\n img_list.remove(img_file)\r\n \r\n # fill up data matrix\r\n img_dim = net.crop_dims\r\n X = np.empty((0, img_dim[0], img_dim[1], 3))\r\n X_filenames = []\r\n for i in range(len(img_list)):\r\n np_img = np.float32(PIL.Image.open('{}/{}'.format(path_data, img_list[i])))\r\n if np_img.shape[0] >= img_dim[0] and np_img.shape[1] >= img_dim[1]:\r\n o = 0.5*np.array([np_img.shape[0]-img_dim[0], np_img.shape[1]-img_dim[1]])\r\n X = np.vstack((X, np_img[o[0]:o[0]+img_dim[0], o[1]:o[1]+img_dim[1], :][np.newaxis]))\r\n X_filenames.append(img_list[i].replace(\".\",\"\"))\r\n else:\r\n print(\"Skipped \",img_list[i],\", image dimensions were too small.\")\r\n\r\n # the number of images we found in the folder\r\n num_imgs = X.shape[0]\r\n\r\n # cast to image values that can be displayed directly with plt.imshow()\r\n X_im = np.uint8(X)\r\n \r\n # preprocess\r\n X_pre = np.zeros((X.shape[0], 3, img_dim[0], img_dim[1]))\r\n for i in range(num_imgs):\r\n X_pre[i] = net.transformer.preprocess('data', X[i])\r\n X = X_pre\r\n \r\n return X, X_im, X_filenames", "def load_imagenet(directory):\n path_train, path_val = directory + '/ILSVRC2012_img_train', directory + '/ILSVRC2012_img_val'\n train_labels = os.listdir(path_train)\n train_data = []\n for label in train_labels:\n imgs_path = os.path.join(path_train, label)\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n train_data.append(img)\n train_labels.append(label)\n train_data = np.concatenate(train_data)\n train_labels = np.array(train_labels, dtype='str')\n \n test_labels = os.listdir(path_val)\n test_data = []\n for label in test_labels:\n imgs_path = os.path.join(path_val, label)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n test_data.append(img)\n test_labels.append(label)\n test_data = np.concatenate(test_data)\n test_labels = np.array(test_labels, dtype='str')\n \n _, train_labels = np.unique(train_labels, return_inverse=True)\n _, test_labels = np.unique(test_labels, return_inverse=True)\n \n del r, g, b, imgs_path, img_name, img, imgs\n \n return train_data, train_labels, test_data, test_labels", "def load_data(path,size, scale = True):\n images = os.listdir(path)\n images.sort()\n\n X = []\n for i, img in enumerate(images):\n photo = plt.imread(os.path.join(path,img))\n if size:\n photo = tf.image.resize(photo, (size, size))\n X.append(photo)\n \n X = np.array(X)\n if scale:\n X = X/X.max() \n return X", "def _get_data(path):\n archive = np.load(path)\n images = archive['faceData']\n return images", "def _load_data(self, imagepath):\n im = cv2.imread(imagepath)\n self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)", "def main(image_directory, cuda=False):\n\n BATCH_SIZE = 256\n\n if cuda:\n model = torch.nn.Sequential(*list(models.resnet18(pretrained=True).children())[:-1]).cuda()\n else:\n model = torch.nn.Sequential(*list(models.resnet18(pretrained=True).children())[:-1])\n model.eval()\n\n all_vectors = []\n\n dataset = Dataset(image_directory)\n loader = torch.utils.data.DataLoader(dataset,\n batch_size=BATCH_SIZE,\n collate_fn=collate,\n num_workers=4)\n\n for inputs, meta in tqdm(loader):\n if cuda:\n inputs = Variable(inputs.cuda())\n else:\n inputs = Variable(inputs)\n\n vectors = model(inputs).cpu().data.numpy()\n meta = map(lambda x: (x[0],\n x[1],\n int(re.search('image_(\\d+).jpg',\n x[2]).group(1))),\n meta)\n print(meta)\n print(vectors)\n all_vectors.append(\n np.concatenate(\n [np.array(meta), vectors.squeeze()],\n axis=1\n )\n )\n\n all_vectors = np.concatenate(all_vectors)\n np.save('vectors.npy', all_vectors)", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def get_raw_data():\n\twith open('train_label.pkl', 'rb') as f:\n\t\ttrain_label = pickle.load(f)\n\n\twith open('train_image.pkl', 'rb') as f:\n\t\ttrain_data = pickle.load(f)\n\n\tprint(np.unique(np.asarray(train_label)))\n\n\treturn (train_label, np.asarray(train_data))", "def load_one_img(ds):\n for img in ds.take(1):\n img = img[1, ...]\n yuv_image_tensor = tf.expand_dims(img, axis=0)\n\n return yuv_image_tensor", "def load_image_data():\n print(\"Loading image data...\")\n label_dict = get_label_vectors()\n categories = [c for c in os.listdir('images/') if c[0] != '.'] # ignore\n labels = [] # instantiate list for image labels\n data = [] # instantiate list for image data\n for i in categories:\n path = 'images/{}/'.format(i) # define path to category folder\n for j in os.listdir(path): # get images from category folder\n labels.append(label_dict[i]) # append label vector\n data.append(cv2.imread(path + j).flatten()) # append flattened image data\n\n labels = np.array(labels) # convert lists to array\n data = np.array(data)\n print(\"Done.\")\n\n return labels, data", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'%s-labels-idx1-ubyte.gz'% kind)\n\n images_path = os.path.join(path,'%s-images-idx3-ubyte.gz'% kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,offset=16).reshape(len(labels), 784)\n\n print(\"Dataset Loaded\")\n \n return images, labels", "def flatten(file_name):\n dataset = pickle.load(open(file_name, 'rb'))\n train_data = dataset['train']\n test_data = dataset['test']\n\n train_data = [y for x in train_data for y in x]\n test_data = [y for x in test_data for y in x]\n\n train_data=generate_binary_vectors(train_data,False)\n test_data=generate_binary_vectors(test_data,False)\n\n return train_data, test_data", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)", "def load_data(class_fnames):\n X = []\n y = []\n for label, fnames in enumerate(class_fnames):\n for fname in fnames:\n X.append(cv2.imread(fname))\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def load_data():\n\n # Load data\n # You can create this Numpy datafile by running the create_validation_sample.py script\n df = h5py.File(data_fn, \"r\")\n imgs_validation = df[\"imgs_validation\"]\n msks_validation = df[\"msks_validation\"]\n img_indicies = range(len(imgs_validation))\n\n \"\"\"\n OpenVINO uses channels first tensors (NCHW).\n TensorFlow usually does channels last (NHWC).\n So we need to transpose the axes.\n \"\"\"\n input_data = imgs_validation\n msks_data = msks_validation\n return input_data, msks_data, img_indicies", "def loadData(path):\r\n X = []\r\n y = []\r\n dir1 = os.listdir(path)\r\n for d1 in dir1:\r\n dir2 = os.listdir(path+'/'+d1)\r\n for d2 in dir2:\r\n if int(d1) == 0:\r\n image = cv2.imread(path+r'/'+d1+r'/'+d2, 0)\r\n X.append(np.array(image, dtype=np.float32).reshape(-1) / 255.0)\r\n y.append(1)\r\n elif int(d1) == 1:\r\n image = cv2.imread(path+r'/'+d1+r'/'+d2, 0)\r\n X.append(np.array(image, dtype=np.float32).reshape(-1) / 255.0)\r\n y.append(-1)\r\n X = np.array(X, dtype=np.float32)\r\n y = np.array(y, dtype=np.int64)\r\n perm = np.random.permutation(X.shape[0])\r\n X = X[perm]\r\n y = y[perm]\r\n return X, y", "def loadData(image, mask, im_shape):\r\n X, y = [], []\r\n\r\n img = transform.resize(image, im_shape, mode='constant')\r\n img = np.expand_dims(img, -1)\r\n mask = transform.resize(mask, im_shape, mode='constant')\r\n mask = np.expand_dims(mask, -1)\r\n X.append(img)\r\n y.append(mask)\r\n X = np.array(X)\r\n y = np.array(y)\r\n X -= X.mean()\r\n X /= X.std()\r\n\r\n return X, y", "def get_data(folder):\n X = []\n y = []\n\n for seismic_type in os.listdir(folder):\n if not seismic_type.startswith('.'):\n if seismic_type in ['Class1']:\n label = '0'\n else:\n label = '1'\n for image_filename in os.listdir(folder + seismic_type):\n img_file = cv2.imread(folder + seismic_type + '/' + image_filename)\n if img_file is not None:\n # Downsample the image to 120, 160, 3\n #img_file = scipy.misc.imresize(arr=img_file, size=(120, 160, 3))\n img_arr = np.asarray(img_file)\n # img_arr = image.img_to_array(img_arr)\n X.append(img_arr)\n y.append(label)\n X = np.asarray(X)\n y = np.asarray(y)\n return X,y", "def load_dataset(path_test, width, height):\n tot_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n tot_images += 1\n\n # allocate the memory\n # THE DTYPE is float, should be the right one\n all_images = np.zeros((tot_images, width, height, 3))\n\n true_labels = []\n num_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n # for img_name in listdir(label_full)[:10]:\n img_name_full = join(label_full, img_name)\n print(f\"Opening {img_name_full} {width}\")\n\n image = cv2.imread(img_name_full)\n\n image = cv2.resize(image, (width, height))\n\n # scale the pixel values to [0, 1]\n image = image.astype(\"float\") / 255.0\n\n all_images[num_images, :, :, :] = image\n\n num_images += 1\n true_labels.append(label)\n\n print(f\"All_images.shape {all_images.shape}\")\n\n # cv2.imshow('Resized all_images[0]', all_images[0])\n # cv2.waitKey(0)\n\n return all_images, true_labels", "def load(data, feature):\n #Settings\n train_path = os.path.join(\"data\", data, feature) #put your image path here if you want to override current directory\n\n X = []\n y = []\n for f in os.listdir(train_path):\n (X_i, y_i) = cPickle.load(open(os.path.join(train_path,f), \"rb\"))\n if type(X_i) is np.ndarray:\n X_i = X_i.tolist()\n X = X + X_i #Append the two lists together\n y = y + y_i\n assert np.size(X,0) == 50000 or np.size(X,0) == 10000\n assert np.size(y) == 50000 or np.size(y) == 10000\n # Raws are stored as SimpleCV Images so they can easily be converted to\n # features using SimpleCV\n # Since machine learning aglorithms take feature vectors as inputs, we\n # flatten the underlying 3D matrices of the images here.\n if feature == \"raw\":\n X = map (lambda img: img.getNumpy().flatten(), X)\n return X,y", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def load_from_array():\n\n x = np.load(settings.data(\"x.npy\")).reshape(-1, 1, 224, 224)\n y = np.load(settings.data(\"y.npy\"))\n\n return x, y", "def load_batch(filename: str) -> Tuple[ndarray, ndarray, ndarray]:\n dataDict = unpickle(filename)\n print(\"1\", dataDict[b\"data\"][1, :])\n X = (dataDict[b\"data\"] / 255).T\n print(\"2\", X[:, 1])\n y = np.array(dataDict[b\"labels\"])\n Y = np.eye(10)[y].T\n return X, Y, y", "def read_batch(self):\n imgs = []\n labels = []\n idx = np.random.choice(self.nImgs,self.batch_size)\n \tfor i in idx:\n imgs.append(cv2.imread(self.data_files[i]))\n \t labels.append(cv2.imread(self.label_files[i]))\n \timgs,labels = np.array(imgs),np.array(labels)\n imgs = (imgs - self.mean)/self.stddev\n \tlabels = (labels - self.mean)/self.stddev\n return imgs,labels", "def get_data(path):\n all_images_as_array=[]\n label=[]\n for filename in os.listdir(path):\n try:\n if re.match(r'positive',filename):\n label.append(1)\n else:\n label.append(0)\n img=cv2.imread(path + filename)\n (b, g, r)=cv2.split(img)\n img=cv2.merge([r,g,b])\n np_array = np.asarray(img)\n l,b,c = np_array.shape\n np_array = np_array.reshape(l*b*c,)\n all_images_as_array.append(np_array)\n except:\n continue\n return np.array(all_images_as_array), np.array(label)", "def load_data_pkl(self):\n pkl_name = '{}/data/mini-imagenet-cache-{}.pkl'.format(self.root_dir, self.split)\n print('Loading pkl dataset: {} '.format(pkl_name))\n\n try:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f, encoding='bytes')\n image_data = data[b'image_data']\n class_dict = data[b'class_dict']\n except:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f)\n image_data = data['image_data']\n class_dict = data['class_dict']\n\n print(data.keys(), image_data.shape, class_dict.keys())\n data_classes = sorted(class_dict.keys()) # sorted to keep the order\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n idxs = class_dict[cls] \n np.random.RandomState(self.seed).shuffle(idxs) # fix the seed to keep label,unlabel fixed\n dataset_l[i] = image_data[idxs[0:self.n_label]]\n if self.n_unlabel>0:\n dataset_u[i] = image_data[idxs[self.n_label:]]\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes\n\n del image_data", "def load_data():\n dirname = os.path.join('datasets', 'fashion-mnist')\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n files = [\n 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',\n 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'\n ]\n\n paths = []\n for fname in files:\n paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))\n\n with gzip.open(paths[0], 'rb') as lbpath:\n y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[1], 'rb') as imgpath:\n x_train = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)\n\n with gzip.open(paths[2], 'rb') as lbpath:\n y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[3], 'rb') as imgpath:\n x_test = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)\n\n return (x_train, y_train), (x_test, y_test)", "def load_images(filename='training_images'): \n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read() # hope ya get it all\n\n # grab the first four numbers ...\n # fmt='>i' means big-endian int32\n magic, n_images, n_rows, n_cols = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(4))\n\n # i am a god-fearing man\n assert magic[0] == 2051, \"bad magic number, what do?\"\n\n\n # so i think you can use the standard libary's \"array\" for this, just\n # because binary data of any sort is kinda dodgy, but this grabs 'the rest'\n # format='B' means unsigned char === 'uint8', and apparently endianness doesn't matter\n image_stream = array.array('B', b[16:])\n\n # so each 28*28 byte portion of image_stream is a flattened image. these two\n # numpy.reshape calls get it into the desired shape for A. maybe could\n # combine it into one call, idk. anyway, each flattened image appears as a\n # row, and there is a row for each image.\n image_first = numpy.reshape(image_stream, (n_images[0], n_rows[0], n_cols[0]))\n images = image_first.reshape(n_images[0], n_rows[0]*n_cols[0])\n\n # convert to float in [0,1]\n images = images.astype('f') / 255\n\n return images", "def loader(path):\n img = np.load(path)\n img = img[1:4]\n if np.random.choice((True, False)):\n img = img[:, :, ::-1]\n img = np.array(img)\n if np.random.choice((True, False)):\n img = img[:, ::-1, :]\n img = np.array(img)\n\n img = img.transpose((1, 2, 0)) # pytorch is going to rotate it back\n return img", "def import_data(self, img_size):\n path = self._path\n images = []\n labels = []\n\n categs_name = [filename for filename in os.listdir(path)]\n for categ in categs_name:\n if isdir(join(path, categ)):\n\n for img_name in os.listdir(join(path, categ)):\n\n if \".jpg\" in img_name:\n\n img_name = self.correct_filename(img_name, categ)\n img_path = join(path, categ, img_name)\n img = cv2.imread(img_path)\n\n if img_size:\n dim = (img_size, img_size)\n try:\n img = cv2.resize(img, dim)\n except:\n print(img_name, \"has not been loaded.\")\n continue\n\n images.append(img)\n labels.append(categ)\n\n X = np.array(images)\n y = self.transform_labels(labels)\n\n return X, y", "def _load_image(self, index: int) -> Tensor:\n path = self.files[index][\"image\"]\n with rasterio.open(path) as f:\n array = f.read()\n tensor = torch.from_numpy(array).float()\n return tensor", "def load_data(fname):\n pathname = \"data/\" + fname\n data = pickle.load(open(pathname, 'rb'), encoding='latin1')\n images = np.array([img[:-1] for img in data])\n ys = [int(img[-1]) for img in data]\n length = len(ys)\n labels = np.zeros((length, 10))\n\n for i in range(length):\n labels[i, ys[i]] = 1\n\n return images, labels", "def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData", "def load_verts(file_data, headers, scale_factor):\n\n\n def vert_from_pack(vert_data):\n return (\n (vert_data[0] * scale_factor, vert_data[1] * scale_factor, vert_data[2] * scale_factor,), #XYZ\n (vert_data[3], vert_data[4],), #UV1\n (vert_data[5], vert_data[6],), #UV2\n (vert_data[7], vert_data[8], vert_data[9],), #Normal\n (vert_data[10], vert_data[11], vert_data[12], vert_data[13],), #RGBA\n )\n\n vert_offset, vert_length = headers[10]\n vert_chunk = Struct(\"3f2f2f3f4B\") \n vert_size = vert_chunk.size\n vert_count = int(vert_length / vert_size)\n\n print (\"Found {} vertices\".format(vert_count))\n\n vertices = []\n\n for current_vert_idx in range(vert_count):\n vert_file_position = vert_offset + current_vert_idx * vert_size\n current_vert = vert_chunk.unpack(file_data[vert_file_position : vert_file_position+vert_size])\n vertices.append(vert_from_pack(current_vert))\n\n return vertices", "def readDataFromFile():\n image_size = 28 # each image is 28x28\n\n num_images = 60000 # there are 60k images\n with gzip.open(r'train-images-idx3-ubyte.gz', 'r') as f: # 60k train & valid\n f.read(16) # reading by 16-byte double\n buffer_Train_Images = f.read(image_size * image_size * num_images)\n f.close()\n data_Train_Images = np.frombuffer(buffer_Train_Images, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n data_Train_Images = data_Train_Images.reshape(num_images,\n image_size * image_size) # Data = 60k x 28 x 28 with 1 value in it\n\n with gzip.open('train-labels-idx1-ubyte.gz', 'r') as f: # 60k train & valid - labels\n f.read(8) # reading by 16-byte double\n buffer_Train_Labels = f.read(num_images)\n data_Train_Labels = np.frombuffer(buffer_Train_Labels, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n num_images = 10000 # there are 10k images\n with gzip.open('t10k-images-idx3-ubyte.gz', 'r') as f: # 10k tests\n f.read(16) # reading by 16-byte double\n buffer_Test_Image = f.read(image_size * image_size * num_images)\n data_Test_Image = np.frombuffer(buffer_Test_Image, dtype=np.uint8).astype(\n np.uint8) # translating into 0 to 255\n data_Test_Image = data_Test_Image.reshape(num_images, image_size * image_size) # Data = 60k x 28 x 28 with\n\n with gzip.open('t10k-labels-idx1-ubyte.gz', 'r') as f: # 10k tests - lbles\n f.read(8) # reading by 16-byte double\n buffer_Test_Label = f.read(num_images)\n data_Test_Labels = np.frombuffer(buffer_Test_Label, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n return data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels", "def image_to_feature_vector(raw_tensor):\n result = []\n for tensor in raw_tensor:\n result.append(tensor.flatten())\n return result", "def load_vecs(fin):\n h5f = tables.open_file(fin)\n h5vecs= h5f.root.vecs\n\n vecs=np.zeros(shape=h5vecs.shape,dtype=h5vecs.dtype)\n vecs[:]=h5vecs[:]\n h5f.close()\n return vecs", "def load_mnist(path, kind='train'):\n '''ref: http://yann.lecun.com/exdb/mnist/ '''\n ''' each hand write is 28x28 = 784, a 1 dim vector'''\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte'\n % kind)\n\n # check the offical doc to know how to extract the content\n '''\n [offset] [type] [value] [description]\n 0000 32 bit integer 0x00000801(2049) magic number (MSB first)\n 0004 32 bit integer 60000 number of items\n 0008 unsigned byte ?? label\n 0009 unsigned byte ?? label\n ........\n xxxx unsigned byte ?? label\n The labels values are 0 to 9.\n '''\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n\n '''\n [offset] [type] [value] [description]\n 0000 32 bit integer 0x00000803(2051) magic number\n 0004 32 bit integer 60000 number of images\n 0008 32 bit integer 28 number of rows\n 0012 32 bit integer 28 number of columns\n 0016 unsigned byte ?? pixel\n 0017 unsigned byte ?? pixel\n ........\n xxxx unsigned byte ?? pixel\n Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).\n '''\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n ''' each hand write is 28x28 = 784, a 1 dim vector'''\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels", "def read_x_data(data_dir):\n files = glob.glob(os.path.join(data_dir, '*.jpg'))\n return [(os.path.basename(file), io.imread(file)) for file in files]", "def _load( self, i ):\n if ir.config.verbosity_level >= 2: print(\"[observation] Lazy loading raster\")\n self._raster_data[i] = raster_cube( self._raster_files, line=self._line_info['description'][i], keep_null=self._keep_null )", "def loadDataset(dataset):\n # List of images.\n images = []\n\n\n\n # Read all filenames from the dataset.\n for filename in dataset:\n # Read the input image.\n image = cv2.imread(filename)\n\n # Add the current image on the list.\n if image is not None: \n images.append(image)\n else:\n print(\"Could not read file: {}\".format(filename))\n sys.exit()\n\n # Return the images list.\n return images", "def load_mnist(path, kind='train'):\n\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "def load_mnist(kind='train'):\r\n with open('%s-labels.idx1-ubyte' % kind, 'rb') as lbpath:\r\n magic, n = struct.unpack('>II', lbpath.read(8))\r\n labels = np.fromfile(lbpath, dtype=np.uint8)\r\n\r\n with open('%s-images.idx3-ubyte' % kind, 'rb') as imgpath:\r\n magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))\r\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)\r\n\r\n return images, labels", "def load_data(model, set='train', img_rows=128, img_cols=128):\n print('#' * 30)\n print('Loading {} data from file.'.format(set))\n\n # read in the .npy file containing the images\n images_train = np.load('output/processed_data/images_{}.npy'.format(set))\n\n # read in the .npy file containing the target features\n targets_train = np.load('output/processed_data/targets_{}.npy'.format(set))\n\n # scale image pixel values to [0, 1]\n images_train = images_train.astype(np.float32)\n images_train /= 255.\n\n # scale target center coordinates to [-1, 1] (from 0 to 95 initially)\n targets_train = targets_train.astype(np.float32)\n targets_train[:, 0] = (targets_train[:, 0] - (img_rows / 2)) / (img_rows / 2)\n targets_train[:, 1] = (targets_train[:, 1] - (img_rows / 2)) / (img_cols / 2)\n\n # reshape images according to the neural network model intended to be used\n if model == 'cnn':\n print('Indicated model is a CNN, reshaping images with channels first.')\n images_train = images_train.reshape(-1, 1, img_rows, img_cols)\n elif model == 'dnn':\n print('Indicated model is a DNN, flattening out images.')\n images_train = images_train.reshape(images_train.shape[0], img_rows * img_rows)\n\n print('Loading done. Pixel values have been scaled to [0, 1] and target center coordinates to [-1, 1].')\n print('#' * 30)\n\n return images_train, targets_train", "def load_fmnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_categorical(y)\n test_indices = np.random.choice(len(X), int(len(X) * float(config['model']['test_size'])), replace=False)\n X_train = np.asarray([e for idx, e in enumerate(X) if idx not in test_indices])\n X_test = np.asarray([e for idx, e in enumerate(X) if idx in test_indices])\n y_train = np.asarray([e for idx, e in enumerate(y) if idx not in test_indices])\n y_test = np.asarray([e for idx, e in enumerate(y) if idx in test_indices])\n return X_train, y_train, X_test, y_test", "def load_data():\n f = gzip.open('mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = pickle.load(f, encoding=\"latin1\")\n f.close()\n \n X_train = [np.reshape(x, (784, 1)) for x in training_data[0]]\n Y_train = [vectorized_result(y) for y in training_data[1]]\n \n X_validation = [np.reshape(x, (784, 1)) for x in validation_data[0]]\n Y_validation = validation_data[1]\n \n X_test = [np.reshape(x, (784, 1)) for x in test_data[0]]\n Y_test = test_data[1]\n \n return (X_train, Y_train, X_validation, Y_validation, X_test, Y_test)", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def test_model_sample(net, data_loader):\n net.eval()\n array = []\n with torch.no_grad():\n for data in data_loader:\n X = data['X']\n output = net(X)\n output = ToPILImage()(output)\n array.append(output)\n return array", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def load_data(self, from_idx):\n length = len(self.filenames)\n # we assume all images have the same dimensions\n shape = cv2.imread(filenames[0], int(self.color)).shape\n if not self.color:\n shape += (1,) # add additionnal channel for black and white\n X = []\n for f in tqdm(self.filenames[:5000]):\n if psutil.virtual_memory()[2] >= 60.0:\n break # preserve memory\n img = cv2.imread(f, int(self.color))\n if img is not None:\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n # change range of image to [-1, 1]\n # TODO : different procedure for colored images\n if not self.color:\n img = img.astype('float32')\n mx = np.max(img)\n mn = np.min(img)\n m = mx/2 + mn/2\n r = mx/2 - mn/2\n else:\n mx = np.amax(np.amax(img, axis=0), axis=0)\n mn = np.amin(np.amin(img, axis=0), axis=0)\n m = mx/2 + mn/2\n r = mx/2 - mn/2\n if np.all(r):\n img = (img - m)/r # works in both cases\n # add to dataset\n X.append(img)\n self.X = np.array(X)", "def load_food_image_batch(filename, num):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f)\n url_parts = datadict['Image URL'].split(\"/\")\n img_fn = url_parts[-1]\n with open(img_fn):\n X = f.read()\n Y = datadict['coarse_labels']\n X = X.reshape(num, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\n Y = np.array(Y)\n return X, Y", "def _load_metadata(self):\n\n cub_dir = self.root / \"CUB_200_2011\"\n images_list: Dict[int, List] = OrderedDict()\n\n with open(str(cub_dir / \"train_test_split.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n is_train_instance = int(row[1]) == 1\n if is_train_instance == self.train:\n images_list[img_id] = []\n\n with open(str(cub_dir / \"images.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n images_list[img_id].append(row[1])\n\n with open(str(cub_dir / \"image_class_labels.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n # CUB starts counting classes from 1 ...\n images_list[img_id].append(int(row[1]) - 1)\n\n with open(str(cub_dir / \"bounding_boxes.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n box_cub = [int(float(x)) for x in row[1:]]\n box_avl = [box_cub[1], box_cub[0], box_cub[3], box_cub[2]]\n # PathsDataset accepts (top, left, height, width)\n images_list[img_id].append(box_avl)\n\n images_tuples = []\n for _, img_tuple in images_list.items():\n images_tuples.append(tuple(img_tuple))\n self._images = images_tuples # type: ignore\n\n # Integrity check\n for row_check in self._images:\n filepath = self.root / CUB200.images_folder / row_check[0]\n if not filepath.is_file():\n if self.verbose:\n print(\"[CUB200] Error checking integrity of:\", filepath)\n return False\n\n return True", "def load_volume(name, nx, ny, nz):\n\n # load raw volume into memory\n img = np.fromfile(name, dtype=np.float32)\n img = np.reshape(img, (ny, nx, nz))\n\n return img.transpose(0, 2, 1)", "def test_get_image(self):\n\n spine_data_loader = SpineDataLoader(dirpath_data=self.dirpath,\n batch_size=4)\n\n for idx in range(4):\n image = spine_data_loader.get_image(str(idx))\n assert image.shape == (256, 256, 1)\n assert image.min() == 0.0\n assert image.max() == 1.0\n assert image.dtype == 'float64'", "def load_mnist(path, kind = 'train'):\n label_path = os.path.join(path, '%s-labels-idx1-ubyte' % kind)\n images_path = os.path.join(path, '%s-images-idx3-ubyte' % kind)\n\n\n with open(label_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II', lbpath.read(8))\n\n labels = np.fromfile(lbpath, dtype= np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))\n\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels),784)\n\n\n return images, labels", "def load_data(model_path):\n x_arrays = []\n y_arrays = []\n for partition in iter_embeddings(model_path):\n h5f = h5py.File(partition, 'r')\n X = h5f[\"embeddings\"][:]\n x_arrays.append(X)\n try:\n Y = h5f[\"labels\"][:]\n y_arrays.append(Y)\n except KeyError:\n print(\"Labels not defined\")\n if len(y_arrays) > 0:\n X = np.vstack(x_arrays)\n Y = np.hstack(y_arrays)\n return X, Y\n else:\n X = np.vstack(x_arrays)\n Y = np.zeros(len(X))\n return X, Y", "def load_data():\n\n \"\"\"The ``training_data`` is returned as a tuple with two entries.\n The first entry contains the actual training images. This is a\n numpy ndarray with 50,000 entries. Each entry is, in turn, a\n numpy ndarray with 784 values, representing the 28 * 28 = 784\n pixels in a single MNIST image.\"\"\"\n\n \"\"\"The second entry in the ``training_data`` tuple is a numpy ndarray\n containing 50,000 entries. Those entries are just the digit\n values (0...9) for the corresponding images contained in the first\n entry of the tuple.\"\"\"\n\n \"\"\"The ``validation_data`` and ``test_data`` are similar, except\n each contains only 10,000 images.\"\"\"\n f = gzip.open('MNIST/data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = Pickle.load(f, encoding='bytes'\n )\n f.close()\n return (training_data, validation_data, test_data)", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def load_images(tags_pict):\n img_data_list = []\n for p in tags_pict.index :\n img_path = tags_pict.full_path[p]\n img = load_img(img_path, target_size= inputShape)\n x = img_to_array(img)\n x = np.expand_dims(img, axis=0)\n # pre-process the image using the appropriate function based on the\n # model that has been loaded (i.e., mean subtraction, scaling, etc.)\n x = preprocess_input(x)\n img_data_list.append(x)\n img_data = np.array(img_data_list)\n img_data=np.rollaxis(img_data,1,0)\n img_data=img_data[0]\n return(img_data)", "def load_test_data():\n X = []\n y = []\n for fname in os.listdir(test_dir):\n label = int(fname.split(\"_\")[0])\n img = plt.imread(os.path.join(test_dir, fname))\n X.append(img)\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def load_mnist(path, kind='train'):\n\tlabels_path = os.path.join(path,'%s-labels.idx1-ubyte'%kind)\n\timages_path = os.path.join(path,'%s-images.idx3-ubyte'%kind)\n\t\n\twith open(labels_path, 'rb') as lbpath:\n\t\tmagic, n = struct.unpack('>II', lbpath.read(8))\n\t\tlabels = np.fromfile(lbpath, dtype=np.uint8)\n\t\t\n\twith open(images_path, 'rb') as imgpath:\n\t\tmagic, num, row, cols = struct.unpack('>IIII', imgpath.read(16))\n\t\timages = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)\n\t\n\treturn images, labels", "def Read_Raw_Images(path_data,path_labels):\n \n data = skimage.io.imread(path_data).astype(np.float32)\n for i in range(data.shape[0]):\n data[i,...] = skimage.exposure.rescale_intensity(data[i,...], out_range=(0,1))\n data_labels = skimage.io.imread(path_labels) > 0\n \n training_data=data[0:25,:,:]\n training_labels=data_labels[0:25,:,:]\n \n testing_data=data[25:data.shape[0],:,:]\n testing_labels=data_labels[25:data.shape[0],:,:]\n \n np.save(\"data.npy\",training_data)\n np.save(\"labels.npy\",training_labels)\n np.save(\"data_validation.npy\",testing_data)\n np.save(\"labels_validation.npy\",testing_labels)\n \n return()", "def load_data(dataset, root, batch_size, workers):\n # Data transform\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n train_transform = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n query_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])\n\n # Construct data loader\n index = dataset.index(\"IF\")\n sub = dataset[index:]\n if sub == 'IF100':\n train_dir = os.path.join(root, 'train-IF100')\n elif sub == 'IF50':\n train_dir = os.path.join(root, 'train-IF50')\n elif sub == 'IF20':\n train_dir = os.path.join(root, 'train-IF20')\n elif sub == 'IF10':\n train_dir = os.path.join(root, 'train-IF10')\n elif sub == 'IF1':\n train_dir = os.path.join(root, 'train-IF1')\n else:\n print('train path error')\n return\n # train_dir = os.path.join(root, 'train')\n query_dir = os.path.join(root, 'query')\n database_dir = os.path.join(root, 'database')\n\n train_dataset = ImagenetDataset(\n train_dir,\n transform=train_transform,\n targets_transform=Onehot(100),\n )\n\n train_dataloader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=workers,\n pin_memory=True,\n )\n\n query_dataset = ImagenetDataset(\n query_dir,\n transform=query_transform,\n targets_transform=Onehot(100),\n )\n\n query_dataloader = DataLoader(\n query_dataset,\n batch_size=batch_size,\n num_workers=workers,\n pin_memory=True,\n )\n\n database_dataset = ImagenetDataset(\n database_dir,\n transform=query_transform,\n targets_transform=Onehot(100),\n )\n\n database_dataloader = DataLoader(\n database_dataset,\n batch_size=batch_size,\n num_workers=workers,\n pin_memory=True,\n )\n\n return train_dataloader, query_dataloader, database_dataloader", "def get_data(self):\n\n if not self.checked:\n self.check_cache()\n h5f = h5py.File(self.data_filename, 'r')\n train_lbl = h5f['train_lbl'][:]\n train_img = h5f['train_img'][:]\n val_lbl = h5f['val_lbl'][:]\n val_img = h5f['val_img'][:]\n h5f.close()\n return train_img, train_lbl, val_img, val_lbl", "def _load_components(self):\n compsf = self._fetch_components_file()\n comps_img = niimg.load_img(compsf)\n return comps_img", "def load_data_1d(path, dset):\n labels, imgs, _, _ = load_data(path, dset)\n print(\"images.shape=%s, labels.shape=%s\" % (imgs.shape, labels.shape))\n return labels, imgs", "def load_data():\n prefix = 'mnist_data/'\n train_data = np.load(prefix + 'mnist_train_images.npy')\n train_labels = np.load(prefix + 'mnist_train_labels.npy')\n val_data = np.load(prefix + 'mnist_validation_images.npy')\n val_labels = np.load(prefix + 'mnist_validation_labels.npy')\n test_data = np.load(prefix + 'mnist_test_images.npy')\n test_labels = np.load(prefix + 'mnist_test_labels.npy')\n assert train_data.shape == (55000, 784) and train_labels.shape == (55000, 10)\n assert val_data.shape == (5000, 784) and val_labels.shape == (5000, 10)\n assert test_data.shape == (10000, 784) and test_labels.shape == (10000, 10)\n return train_data, train_labels, val_data, val_labels, test_data, test_labels", "def get_data(folder):\n X = []\n y = []\n for folderName in os.listdir(folder):\n if not folderName.startswith('.'):\n if folderName in ['NORMAL']:\n label = 0\n elif folderName in ['CNV']:\n label = 1\n elif folderName in ['DME']:\n label = 2\n elif folderName in ['DRUSEN']:\n label = 3\n else:\n label = 4\n for image_filename in tqdm(os.listdir(folder + folderName)):\n img_file = cv2.imread(folder + folderName + '/' + image_filename)\n if img_file is not None:\n img_file = skimage.transform.resize(img_file, (imageSize, imageSize, 3))\n img_arr = np.asarray(img_file)\n X.append(img_arr)\n y.append(label)\n X = np.asarray(X)\n y = np.asarray(y)\n return X,y", "def load_data(filename):\n emnist = loadmat(filename)\n\n # Load training images and labels\n train_images_unshuffled = emnist['train_images']\n train_labels_unshuffled = emnist['train_labels']\n\n # Combine labels and training data\n combined_training = np.hstack((train_images_unshuffled, train_labels_unshuffled))\n\n # Shuffle data\n np.random.shuffle(combined_training)\n\n # Seperate into data and labels\n # Split into training and validation sets\n train_images = combined_training[:20800,:-1] / 255 # Normalize data, values are now between 0 and 1\n train_labels = combined_training[:20800,-1][...,None] # Turns back into column vector\n validation_images = combined_training[20800:,:-1] / 255 # Normalize data, values are now between 0 and 1\n validation_labels = combined_training[20800:,-1][...,None] # Turns back into column vector\n\n # Load training images and labels\n test_images = emnist['test_images'] / 255 # Normalize data, values are now between 0 and 1\n test_labels = emnist['test_labels']\n\n return train_images, train_labels, test_images, test_labels, validation_images, validation_labels", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'{}-labels-idx1-ubyte'.format(kind))\n images_path = os.path.join(path,'{}-images-idx3-ubyte'.format(kind))\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8).reshape(n)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII',\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape((num,1,rows,cols))\n print(kind)\n print(\"label num:\",n)\n print(\"image num:\",num)\n print(\"image rows:\",rows)\n print(\"image cols:\",cols)\n images = images/255\n return images, labels", "def _load_batch_file(filename):\n # Load the pickled data-file.\n data = _unpickle(filename)\n # Get the raw images.\n raw_images = data[b'data']\n # Get the class-numbers for each image. Convert to numpy-array.\n cls = np.array(data[b'labels'])\n # Convert the images.\n images = _convert_images(raw_images)\n\n return images, cls", "def test_read(self):\n for root, dirs, files in os.walk(os.path.join(self.test_dir, 'files')):\n for filename in files:\n if filename.endswith('.bin'):\n d = Dataset(os.path.join(root, filename))\n data = d.as_dict()\n for freq_dict in data['frequencies']:\n x = freq_dict['easting']\n y = freq_dict['northing']\n image = freq_dict['intensity']\n self.assertIsInstance(x, np.ndarray)\n self.assertIsInstance(y, np.ndarray)\n self.assertIsInstance(image, np.ndarray)", "def load_mnist(path='mnist/mnist.npz'):\n\n with np.load(path) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n x_train = x_train.astype(np.float32) / 255.\n y_train = y_train.astype(np.int32)\n x_test = x_test.astype(np.float32) / 255.\n y_test = y_test.astype(np.int32)\n \n return (x_train, y_train), (x_test, y_test)", "def DataLoader(data_place):\n # Nd = []\n # Np = []\n # Nz = []\n # channel_num = []\n # images = []\n # id_labels = []\n # pose_labels = []\n\n # mycase\n # Nz = 50\n # channel_num = 3\n # images = np.load('{}/images.npy'.format(data_place))\n # id_labels = np.load('{}/ids.npy'.format(data_place))\n # pose_labels = np.load('{}/yaws.npy'.format(data_place))\n #\n # Np = int(pose_labels.max() + 1)\n # Nd = int(id_labels.max() + 1)\n #\n # return [images, id_labels, pose_labels, Nd, Np, Nz, channel_num]\n\n # mycase MultiPIE\n Nz = 50\n channel_num = 3\n image_attributes_df = pd.read_csv(data_place)\n\n Nd = int(np.max(image_attributes_df['Id'])+1)\n Np = int(np.max(image_attributes_df['pose'])+1)\n Ni = int(np.max(image_attributes_df['illum'])+1)\n\n return [image_attributes_df, Nd, Np, Ni, Nz, channel_num]", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte' % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte' % kind)\n \n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n \n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n \n return images, labels", "def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data", "def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data", "def load_data(path='mnist.npz'):\n origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'\n path = get_file(\n path,\n origin=origin_folder + 'mnist.npz',\n file_hash=\n '731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1')\n print('############################################' + path) \n with np.load(path, allow_pickle=True) as f: # pylint: disable=unexpected-keyword-arg\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n\n return (x_train, y_train), (x_test, y_test)", "def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets", "def __init__(self, image_root, label_root, img_x, img_y):\n self.images_path = image_root\n self.labels_path = label_root\n self.data_len = 0\n self.images = []\n self.labels = open(self.labels_path, \"r\").readlines()\n self.transform = transforms.Compose([\n transforms.Resize((img_x, img_y)), \n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n for file in self.labels:\n self.data_len += 1\n tem = file.split(\" \")[0]\n temp = tem.split(\"-\")\n self.images.append(self.images_path + temp[0] + '/' + temp[0] + \"-\" + temp[1] + \"/\" + tem + \".png\")", "def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X", "def load_data(data_file):\n data = pickle.load(open(data_file, \"rb\"))\n images = data[\"images\"]\n labels = data[\"labels\"]\n\n return images, labels", "def load_nifti(file_path, dtype=np.float32, incl_header=False, z_factor=None, mask=None):\n \n img = nib.load(file_path)\n struct_arr = img.get_data().astype(dtype)\n \n # replace infinite values with 0\n if np.inf in struct_arr:\n struct_arr[struct_arr == np.inf] = 0.\n \n # replace NaN values with 0 \n if np.isnan(struct_arr).any() == True:\n struct_arr[np.isnan(struct_arr)] = 0.\n \n if mask is not None:\n struct_arr *= mask\n \n if z_factor is not None:\n struct_arr = zoom(struct_arr, z_factor)\n \n if incl_header:\n return struct_arr, img\n else:\n return struct_arr", "def img_to_vector(img_fn, label=0):\r\n img = \"\"\r\n for line in open(img_fn).readlines()[:32]:\r\n img += line[:32]\r\n\r\n # labels are always attached at the last position\r\n itera = [_ for _ in img + str(label)]\r\n return numpy.fromiter(itera, \"f4\")", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def unpack_data(imagefile, labelfile):\n\t# Open the images with gzip in read binary mode\n\timages = open(imagefile, 'rb')\n\tlabels = open(labelfile, 'rb')\n\t# Read the binary data\n\t# We have to get big endian unsigned int. So we need '>I'\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0]\n\n\tif number_of_images != N:\n\t\traise Exception('number of labels did not match the number of images')\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t\tif i % 1000 == 0:\n\t\t\tprint(\"i: %i\" % i)\n\t\tfor row in range(rows):\n\t\t\tfor col in range(cols):\n\t\t\t\ttmp_pixel = images.read(1) # Just a single byte\n\t\t\t\ttmp_pixel = unpack('>B', tmp_pixel)[0]\n\t\t\t\tx[i][row][col] = tmp_pixel\n\t\ttmp_label = labels.read(1)\n\t\ty[i] = unpack('>B', tmp_label)[0]\n\treturn x, y", "def load_data():\n (trainx, trainy), (valx, valy), (testx, testy) = pickle.load(gzip.open(\"data/mnist_one_hot.pkl.gz\"),\n encoding=\"latin1\")\n trainy = np.argmax(trainy, axis=1)\n valy = np.argmax(valy, axis=1)\n testy = np.argmax(testy, axis=1)\n trainx = trainx * 2 - 1\n valx = valx * 2 - 1\n testx = testx * 2 - 1\n return (trainx.reshape(-1, 1, 28, 28), trainy), (valx.reshape(-1, 1, 28, 28), valy), (testx.reshape(-1, 1, 28, 28),\n testy)", "def load_back_from_disk(data_dir, istrain=True):\n \"\"\"load back metadata_df\"\"\"\n meta_data = pickle.load(open(os.path.join(data_dir, 'meta.pkl'), 'rb'))\n metadata_rows = meta_data[0]\n max_node = meta_data[1]\n\n \"\"\"itershard by loading from disk\"\"\"\n all_X, all_y, all_size, all_L, all_names, all_node_img = [], [], [], [], [], []\n\n for _, row in enumerate(metadata_rows):\n X = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['X'])))\n L = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['L'])))\n y = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['y'])))\n size = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['size'])))\n names = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['name'])))\n node_img = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['node_img'])))\n\n \"\"\" stack to list\"\"\"\n all_X.append(X)\n all_y.append(y)\n all_L.append(L)\n all_size.append(size)\n all_names.append(names)\n all_node_img.append(node_img)\n\n \"\"\" return a Dataset contains all X, y, w, ids\"\"\"\n all_X = np.squeeze(np.vstack(all_X))\n all_L = np.squeeze(np.vstack(all_L))\n all_y = np.squeeze(np.concatenate(all_y))\n all_size = np.squeeze(np.concatenate(all_size))\n all_names = np.squeeze(np.concatenate(all_names))\n all_node_img = np.squeeze(np.concatenate(all_node_img))\n\n # create output dataset\n dataset = dict()\n if istrain:\n dataset['X'] = all_X[:TRAIN_NUM]\n dataset['y'] = all_y[:TRAIN_NUM]\n dataset['size'] = all_size[:TRAIN_NUM]\n dataset['L'] = all_L[:TRAIN_NUM]\n dataset['name'] = all_names[:TRAIN_NUM]\n dataset['node_img'] = all_node_img[:TRAIN_NUM]\n else:\n dataset['X'] = all_X[:TEST_NUM]\n dataset['y'] = all_y[:TEST_NUM]\n dataset['size'] = all_size[:TEST_NUM]\n dataset['L'] = all_L[:TEST_NUM]\n dataset['name'] = all_names[:TEST_NUM]\n dataset['node_img'] = all_node_img[:TEST_NUM]\n\n return dataset, max_node", "def load_mnist(dataset=\"training\", digits=np.arange(10), path=\".\"):\n\n if dataset == \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset == \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n flbl = open(fname_lbl, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n lbl = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(fname_img, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = pyarray(\"B\", fimg.read())\n fimg.close()\n\n ind = [ k for k in range(size) if lbl[k] in digits ]\n N = len(ind)\n\n images = zeros((N, rows, cols), dtype=uint8)\n labels = zeros((N, 1), dtype=int8)\n for i in range(len(ind)):\n images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))\n labels[i] = lbl[ind[i]]\n\n return images, labels", "def read_gz(images,labels):\n\t# Open the images with gzip in read binary mode\n\t# images = gzip.open('../MNIST-data/train-images-idx3-ubyte.gz', 'rb')\n\t# labels = gzip.open('../MNIST-data/train-labels-idx1-ubyte.gz', 'rb')\n\n\t# Read the binary data\n\n\t# We have to get big endian unsigned int. So we need '>I'\n\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]#28\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]#28\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0] #60000\n\t# print(number_of_images);\n\n\tif number_of_images != N:\n\t raise Exception('number of labels did not match the number of images')\n\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array #60000X28X28\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t if i % 1000 == 0:\n\t print(\"i: %i\" % i)\n\t for row in range(rows):\n\t for col in range(cols):\n\t tmp_pixel = images.read(1) # Just a single byte\n\t tmp_pixel = unpack('>B', tmp_pixel)[0]\n\t x[i][row][col] = tmp_pixel\n\t tmp_label = labels.read(1)\n\t y[i] = unpack('>B', tmp_label)[0]\n\t # print(y.shape)#60000X1\n\treturn (x, y)", "def load_data(self):\n return numpy.fromfile(self.data_fname, dtype=numpy.float32)", "def load_data(self):\n print('Loading {} dataset'.format(self.split))\n data_split_path = os.path.join(self.root_dir, 'splits', '{}.csv'.format(self.split))\n with open(data_split_path,'r') as f:\n reader = csv.reader(f, delimiter=',')\n data_classes = {}\n for i,row in enumerate(reader):\n if i==0:\n continue\n data_classes[row[1]] = 1\n data_classes = data_classes.keys()\n print(data_classes)\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n im_dir = os.path.join(self.root_dir, 'data/{}/'.format(self.split), cls)\n im_files = sorted(glob.glob(os.path.join(im_dir, '*.jpg')))\n np.random.RandomState(self.seed).shuffle(im_files) # fix the seed to keep label,unlabel fixed\n for j, im_file in enumerate(im_files):\n im = np.array(Image.open(im_file).resize((self.im_width, self.im_height)), \n np.float32, copy=False)\n if j<self.n_label:\n dataset_l[i, j] = im\n else:\n dataset_u[i,j-self.n_label] = im\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes", "def load_data(path):\n\n\t# Create a list of all files ending in .jpg\n\tim_list = list_images(path, '.jpg')\n\n\t# Create labels\n\tlabels = [int(im_name.split('/')[-1][0]) for im_name in im_list]\n\tfeatures = []\n\n\t# Create features from the images\n\t# TOD.O: iterate over images paths\n\tfor im_path in im_list:\n\t\t# TOD.O: load image as a gray level image\n\t\tim = np.array(Image.open(im_path).convert('L'))\n\t\t# TOD.O: process the image to remove borders and resize\n\t\tim = process_image(im)\n\t\t# TOD.O: append extracted features to the a list\n\t\tfeatures.append(extract_features(im))\n\n\t# TOD.O: return features, and labels\n\treturn features, labels", "def load_data(datafile, num_class, save=False, save_path='dataset.pkl'):\n train_list = open(datafile, 'r')\n labels = []\n images = []\n for line in train_list:\n tmp = line.strip().split(' ')\n filepath = tmp[0]\n print(filepath)\n img = Image.open(filepath)\n img = prep.resize_image(img, 224, 224)\n np_img = prep.pil_to_nparray(img)\n images.append(np_img)\n\n # one-hot encoder\n index = int(tmp[1])\n label = np.zeros(num_class)\n label[index] = 1\n labels.append(label)\n if save:\n pickle.dump((images, labels), open(save_path, 'wb'))\n return images, labels", "def read_image(images_root):\n im_array = np.load(images_root)\n return im_array", "def load_EMNIST_data(file, verbose = False, standarized = False): \n mat = sio.loadmat(file)\n data = mat[\"dataset\"]\n \n X_train = data['train'][0,0]['images'][0,0]\n X_train = X_train.reshape((X_train.shape[0], 28, 28), order = \"F\")\n y_train = data['train'][0,0]['labels'][0,0]\n y_train = np.squeeze(y_train)\n y_train -= 1 #y_train is zero-based\n \n X_test = data['test'][0,0]['images'][0,0]\n X_test= X_test.reshape((X_test.shape[0], 28, 28), order = \"F\")\n y_test = data['test'][0,0]['labels'][0,0]\n y_test = np.squeeze(y_test)\n y_test -= 1 #y_test is zero-based\n \n if standarized: \n X_train = X_train/255\n X_test = X_test/255\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_test -= mean_image\n \n\n if verbose == True: \n print(\"EMNIST-letter dataset ... \")\n print(\"X_train shape :\", X_train.shape)\n print(\"X_test shape :\", X_test.shape)\n print(\"y_train shape :\", y_train.shape)\n print(\"y_test shape :\", y_test.shape)\n \n return X_train, y_train, X_test, y_test" ]
[ "0.6884788", "0.6850081", "0.6620506", "0.65908414", "0.65704596", "0.65394974", "0.6508061", "0.6400126", "0.63884264", "0.6374563", "0.62975335", "0.6268397", "0.62499464", "0.62215036", "0.61717916", "0.61417174", "0.6101497", "0.6063292", "0.60463053", "0.60456073", "0.6041744", "0.6031684", "0.6005388", "0.5979402", "0.597933", "0.5972569", "0.59708035", "0.59692883", "0.5963826", "0.59620446", "0.59460896", "0.59414864", "0.5932652", "0.5926716", "0.59184235", "0.5903188", "0.5898054", "0.58970535", "0.58936113", "0.58917385", "0.5885329", "0.58833736", "0.58807987", "0.58802235", "0.58672124", "0.5859648", "0.58536327", "0.58529246", "0.58453894", "0.5844126", "0.58399993", "0.5838823", "0.58367425", "0.5836112", "0.58351225", "0.58251387", "0.58232814", "0.5822989", "0.5822142", "0.582097", "0.58182204", "0.581134", "0.58109826", "0.5810785", "0.5805183", "0.5803146", "0.5801181", "0.5795799", "0.57944995", "0.57910204", "0.5787358", "0.57821345", "0.5780287", "0.57786494", "0.57759905", "0.5774627", "0.5771135", "0.57685083", "0.5765379", "0.57648754", "0.57648754", "0.5763753", "0.57601607", "0.5744884", "0.57388926", "0.5735801", "0.57357985", "0.5726009", "0.5714862", "0.5709864", "0.5703266", "0.5700576", "0.56995773", "0.5699294", "0.5695917", "0.5693792", "0.5692676", "0.56923306", "0.5690117", "0.5686732" ]
0.63781524
9
Take as input a Keras ImageGen (Iterator) and generate random crops from the image batches generated by the original iterator.
def random_crop_generator(batches, crop_length): while True: batch_x, batch_y = next(batches) batch_crops = np.zeros((batch_x.shape[0], crop_length, crop_length, 3)) for i in range(batch_x.shape[0]): batch_crops[i] = random_crop(batch_x[i], (crop_length, crop_length)) yield (batch_crops, batch_y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crop_generator(batches, crop_length):\n while True:\n batch_x, batch_y = next(batches)\n batch_crops = np.zeros((batch_x.shape[0], crop_length, crop_length, 3))\n for i in range(batch_x.shape[0]):\n batch_crops[i] = random_crop(batch_x[i], (crop_length, crop_length))\n yield (batch_crops, batch_y)", "def flow(self, batch_size=32, output='both', crops=0):\n while True:\n for dataset in self.input_sets:\n X = self.training_set['input/'+dataset]\n y = self.training_set['target/'+dataset]\n y_seg = self.training_set['seg_map/'+dataset]\n\n for i in range(int(math.ceil(X.shape[0]/2000))):\n index = list(range(0,X.shape[0]))\n sample = random.sample(index, batch_size)\n sample.sort()\n X_batch = X[sample, ...]\n y_batch = y[sample, ...]\n y_seg_batch = y_seg[sample, ...]\n X_batch = self.augment(X_batch)\n\n if crops > 0:\n (X_batch, y_batch,\n y_seg_batch) = _augmentors.random_crops(\n X_batch, y_batch, y_seg_batch, n_crops=crops, crop_dim=20)\n\n if output=='both':\n yield (X_batch, [y_batch, y_seg_batch])\n elif output=='seg':\n yield (X_batch, y_seg)\n elif output=='density':\n yield (X_batch, y_batch)\n else:\n raise Exception('output must be \"density\", \"seg\" or \"both\"')", "def patchGenerator(gen, patch_size=128, patch_batch_size=1):\n \n for imgs, masks in gen: # For each batch\n img_list = []\n mask_list = []\n for i in range(0, imgs.shape[0]): # For each image in a batch\n patch_x = patchify(imgs[i], (patch_size, patch_size, imgs[i].shape[-1]), step=patch_size) # split image into 4*4 small 128*128 patches.\n img_p = patch_x.reshape(-1, *patch_x.shape[-3:])\n img_list.append(img_p)\n\n mask_y = patchify(masks[i], (patch_size, patch_size, 1), step=patch_size) # split mask into 4*4 small 128*128 patches.\n mask_p = mask_y.reshape(-1, *mask_y.shape[-3:])\n mask_list.append(mask_p)\n \n if (patch_batch_size == 1):\n for j in range(0, img_p.shape[0]): # For each patch in a image\n yield img_p[j][np.newaxis, :], mask_p[j][np.newaxis, :]\n \n if (patch_batch_size > 1):\n image_patches = np.concatenate(img_list)\n mask_patches = np.concatenate(mask_list)\n patch_batch_counter = 0\n for idx in range(0, patch_batch_size):\n image_patch_batch = image_patches[patch_batch_counter:patch_batch_counter + patch_batch_size]\n mask_patch_batch = mask_patches[patch_batch_counter:patch_batch_counter + patch_batch_size]\n shuffled_images, shuffled_masks = randomize(image_patch_batch, mask_patch_batch)\n yield shuffled_images, shuffled_masks", "def train_batches_with_generated_images(gen, x_train_input, X_train, disc_batch_size):\n # output of gen is input of disc\n image_input_batch = x_train_input[np.random.randint(0, X_train.shape[0], size=disc_batch_size), :, :, :]\n noise_X = np.random.uniform(0, 1, size=[disc_batch_size, 100])\n generated_images = gen.predict([image_input_batch, noise_X])\n train_batches(0)", "def generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n # append center image\n name = 'Sample_data/IMG/'+batch_sample[0].split('/')[-1]\n center_image = cv2.imread(name)\n center_angle = float(batch_sample[3])\n images.append(center_image)\n angles.append(center_angle)\n correction = 0.30 # shift angle commands\n # append left camera image\n left_angle = center_angle + correction\n lname = 'Sample_data/IMG/'+batch_sample[1].split('/')[-1]\n left_image = cv2.imread(lname)\n images.append(left_image)\n angles.append(left_angle)\n \n # append right camera image\n right_angle = center_angle + correction\n rname = 'Sample_data/IMG/'+batch_sample[1].split('/')[-1]\n right_image = cv2.imread(rname)\n images.append(right_image)\n angles.append(right_angle)\n\n # flip image to augment data\n Nsample = len(angles)\n for i in range(len(angles)):\n images.append(np.fliplr(images[i]))\n angles.append(-angles[i])\n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)", "def trainingBatchGenerator(data_folder, image_shape):\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n return get_batches_fn", "def batch_generator(samples, batch_size=32, is_training=True):\n num_samples = len(samples)\n while True: # Loop forever so the generator never terminates\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples.iloc[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples.iterrows():\n batch_sample = batch_sample[1]\n name = DATA_PATH + '/IMG/'+batch_sample['center'].split('/')[-1]\n center_image = cv2.imread(name)\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n center_angle = float(batch_sample['steering'])\n images.append(center_image)\n angles.append(np.clip(center_angle,-1,1))\n if is_training:\n # Center Flip\n images.append(cv2.flip(center_image,1))\n angles.append(np.clip(center_angle*-1.0,-1,1))\n # Left\n name = DATA_PATH + '/IMG/'+batch_sample['left'].split('/')[-1]\n correction = 0.2\n center_image = cv2.imread(name)\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n images.append(center_image)\n angles.append(np.clip(center_angle+correction,-1,1))\n # Left Flip\n images.append(cv2.flip(center_image,1))\n angles.append(np.clip((center_angle+correction)*-1.0,-1,1))\n # Right\n name = DATA_PATH + '/IMG/'+batch_sample['right'].split('/')[-1]\n correction = -0.2\n center_image = cv2.imread(name)\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n images.append(center_image)\n angles.append(np.clip(center_angle+correction,-1,1))\n # Right Flip\n images.append(cv2.flip(center_image,1))\n angles.append(np.clip((center_angle+correction)*-1.0,-1,1))\n \n X_train = np.array(images)\n y_train = np.array(angles)\n yield shuffle(X_train, y_train)", "def my_generator(batch_size, img_dir):\n cat_dirs = glob.glob(img_dir + \"/*\")\n counter = 0\n while True:\n input_images = np.zeros(\n (batch_size, config.height, config.width, 3 * 5))\n output_images = np.zeros((batch_size, config.height, config.width, 3))\n random.shuffle(cat_dirs)\n if (counter+batch_size >= len(cat_dirs)):\n counter = 0\n for i in range(batch_size):\n input_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[0-4]*\") \n imgs = [Image.open(img) for img in sorted(input_imgs)]\n input_images[i] = np.concatenate(imgs, axis=2)\n output_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[5-7]*\")\n imgs = [Image.open(img) for img in sorted(output_imgs)]\n output_images[i] = np.concatenate(imgs, axis=1)\n input_images[i] /= 255.\n output_images[i] /= 255.\n yield (input_images, output_images)\n counter += batch_size", "def generator(samples, batch_size=32, is_training=True):\n num_samples = len(samples)\n\n #vertical, horizontal range for random translation\n x_translate_range = 100\n y_translate_range = 10\n\n while 1: # Loop forever so the generator never terminates\n #shuffle the samples once the whole data is processed into batches\n shuffle(samples)\n #split data into batches\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n # corrections for centered view image, left camera view image and right camera view image\n corrections = [0,0.2,-0.2]\n # iterate over center, right and left camera view images\n for i in range(3):\n current_path = get_image_path(batch_sample[i])\n\n # read image\n image = cv2.imread(current_path)\n # append image for training/validation\n images.append(preprocess(image))\n\n # calculate angle measurement with applied angle corrections\n measurement = float(batch_sample[3]) + corrections[i]\n angles.append(measurement)\n\n # insert flipped image for opposite direction generalization\n images.append(preprocess(cv2.flip(image, 1)))\n angles.append(measurement*-1.0)\n\n # create random augmented image only for training\n if is_training:\n image, measurement = flip_image(image, measurement, flip_probability=0.5)\n image = add_salt_pepper_noise(image)\n image, measurement = random_translate(image, measurement, x_translate_range, y_translate_range)\n image = random_shadow(image)\n image = random_brightness(image)\n images.append(preprocess(image))\n angles.append(measurement)\n\n # create X, y dataset\n X_train = np.array(images)\n y_train = np.array(angles)\n\n yield sklearn.utils.shuffle(X_train, y_train)", "def image_generator_not_random(list_of_files, crop_size=320, scale=1):\n while True:\n text_region = []\n for jpgname in list_of_files:\n print jpgname\n # jpgname = np.random.choice(list_of_files)\n img = cv2.imread(jpgname)\n pattern = re.compile('jpg')\n txtname = pattern.sub('txt', jpgname)\n if not os.path.isfile(txtname):\n continue\n cropped_image = img\n with open(txtname, 'r') as f:\n for line in f:\n line_split = line.strip().split(',')\n print line_split\n # clockwise\n (x1, y1, x2, y2) = line_split[0:4]\n (x3, y3, x4, y4) = line_split[4:8]\n text_region.append([string.atof(x1), string.atof(y1), string.atof(x2), string.atof(y2),\n string.atof(x3), string.atof(y3), string.atof(x4), string.atof(y4)])\n if cropped_image is None or text_region is None or \\\n cropped_image.shape[0] != crop_size or cropped_image.shape[1] != crop_size:\n continue\n yield [scale * cropped_image, text_region]", "def generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n samples = sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples:\n #Because the file path in two folders are different, a if-else is needed.\n if len(batch_sample[0].split('/')) == 2:\n name = './data/IMG/'+batch_sample[0].split('/')[-1]\n else:\n name =batch_sample[0]\n originalImage = cv2.imread(name)\n image = cv2.cvtColor(originalImage, cv2.COLOR_BGR2RGB)\n images.append(image)\n measurement = float(line[3])\n angles.append(measurement)\n \n # Flipping\n images.append(cv2.flip(image,1))\n angles.append(measurement*(-1.0))\n\n # trim image to only see section with road\n inputs = np.array(images)\n outputs = np.array(angles)\n yield sklearn.utils.shuffle(inputs, outputs)", "def generator(array, batch_size):\n start = 0 # pointer to where we are in iteration\n while True:\n stop = start + batch_size\n diff = stop - array.shape[0]\n if diff <= 0:\n batch = array[start:stop]\n start += batch_size\n else:\n batch = np.concatenate((array[start:], array[:diff]))\n start = diff\n batch = batch.astype(np.float32) / 255.0 # normalize pixel intensities\n batch = np.random.binomial(1, batch) # binarize images\n yield batch", "def generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples:\n path, angle, flip = batch_sample\n image = cv2.imread(path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to RGB\n images.append(flip_image(image) if flip else image)\n angles.append(angle)\n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)", "def gen_batch_function(data_folder, image_shape, seed=None, samples_limit=None):\n # Grab image and label paths\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))\n }\n background_color = np.array([255, 0, 0])\n\n if samples_limit:\n image_paths = image_paths[0:samples_limit]\n\n samples_n = len(image_paths)\n\n rnd = random.Random(seed)\n\n def get_batches_fn(batch_size):\n \"\"\"\n\t\tCreate batches of training data\n\t\t:param batch_size: Batch Size\n\t\t:return: Batches of training data\n\t\t\"\"\"\n # Shuffle training data\n rnd.shuffle(image_paths)\n # Loop through batches and grab images, yielding each batch\n for batch_i in range(0, samples_n, batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n # Re-size to image_shape\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n # Create \"one-hot-like\" labels by class\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n\n return get_batches_fn, samples_n", "def batch_generator(batch_size):\n # Randomly shuffle the order of the files in directory\n files = glob.glob(os.path.join(data_dir, pattern))\n np.random.shuffle(files)\n n_files = len(files)\n\n for batch_num in range(0, n_files, batch_size):\n batch = []\n\n for img_file in files[batch_num:batch_num+batch_size]:\n # Load image from file\n img = scipy.misc.imread(img_file)\n\n # -----------\n # BOOKMARK: File preprocessing steps here\n # -----------\n img = scipy.misc.imresize(img, img_shape)\n # -----------\n\n # Append to the batch\n batch.append(img)\n\n # Yield the current batch\n yield np.array(images)", "def batch_generator(batch_size):\n\n # Infinite loop.\n while True:\n # Get a list of random indices for images in the training-set.\n idx = np.random.randint(100,size=batch_size)\n \n # Get the pre-computed transfer-values for those images.\n # These are the outputs of the pre-trained image-model.\n transf_values = np.array([transfer_values[_] for _ in idx])\n\n # For each of the randomly chosen images there are\n # at least 5 captions describing the contents of the image.\n # Select one of those captions at random and get the\n # associated sequence of integer-tokens.\n tokens = [caps_markedwords[_] for _ in idx]\n\n # Count the number of tokens in all these token-sequences.\n num_tokens = [len(t) for t in tokens]\n \n # Max number of tokens.\n max_tokens = np.max(num_tokens)\n # Pad all the other token-sequences with zeros\n # so they all have the same length and can be\n # input to the neural network as a numpy array.\n tokens_padded = pad_sequences(tokens,\n maxlen=max_tokens,\n padding='post',\n truncating='post')\n \n # Further prepare the token-sequences.\n # The decoder-part of the neural network\n # will try to map the token-sequences to\n # themselves shifted one time-step.\n decoder_input_data = tokens_padded[:, 0:-1]\n decoder_output_data = tokens_padded[:, 1:]\n\n # Dict for the input-data. Because we have\n # several inputs, we use a named dict to\n # ensure that the data is assigned correctly.\n x_data = \\\n {\n 'decoder_input': decoder_input_data,\n 'transfer_values_input': transf_values\n }\n\n\n # Dict for the output-data.\n y_data = \\\n {\n 'decoder_output': decoder_output_data\n }\n \n yield (x_data, y_data)", "def test_random_crop(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = RandomCrop(size=(64, 64))\n _image, _label = transform(image, label)\n assert _image.shape == (64, 64, image.shape[2])\n assert _label.shape == (64, 64, label.shape[2])\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = RandomCrop(size=(64, 64, 8))\n _image, _label = transform(image, label)\n assert _image.shape == (64, 64, 8, image.shape[3])\n assert _label.shape == (64, 64, 8, label.shape[3])", "def image_generator(df,batch_size,plab,augment=True):\n rng = np.random.RandomState(290615)\n if_train = 1 if plab < 1. else 0\n bi,b_list = 0,df.groupby('business_id').apply(get_biz_id,if_train,batch_size)\n b_list = b_list[b_list!=0]\n b_order = rng.permutation(b_list.index)\n pi,p_list = 0, df[df.iloc[:,-1]==0]['photo_id']\n p_order = rng.permutation(p_list.index)\n while True:\n if rng.rand(1)[0] < plab:\n # aggregate biz_id with outdoor-seating\n biz_id_i = b_list.ix[b_order[bi]]\n photo_train = df[df['business_id']==biz_id_i]['photo_id']\n y_batch = np.asarray(df[df['business_id']==biz_id_i].iloc[:,-1])\n # increase/loop indices for next iteration\n if bi < len(b_list)-1:\n bi += 1\n else:\n bi,b_order = 0,rng.permutation(b_list.index)\n else:\n # pic 32 random non-outdoor-seating pictures\n photo_train = p_list[p_order[pi:(pi+batch_size)]]\n y_batch = np.repeat(0, repeats=len(photo_train), axis=0)\n # increase/loop indices for next iteration\n if pi < len(p_list)-1-batch_size:\n pi += batch_size\n else:\n pi,p_order = 0,rng.permutation(p_list.index)\n batch_size_i = len(photo_train)\n # read and augment photos\n X_batch = np.empty((batch_size_i,h,w,ch))\n for i_ in range(batch_size_i):\n f_ = 'data/train_photos/' + str(photo_train.iloc[i_]) + '.jpg'\n im = Image.open(os.path.realpath(f_))\n im_sml = im.resize((w,h))\n # scale inputs [-1,+1]\n xi = np.asarray(im_sml)/128.-1\n if augment:\n # flip coords horizontally (but not vertically)\n if rng.rand(1)[0] > 0.5:\n xi = np.fliplr(xi)\n # rescale slightly within a random range\n jit = w*0.2\n if rng.rand(1)[0] > 0.1:\n xl,xr = rng.uniform(0,jit,1),rng.uniform(w-jit,w,1)\n yu,yd = rng.uniform(0,jit,1),rng.uniform(h-jit,h,1)\n pts1 = np.float32([[xl,yu],[xr,yu],[xl,yd],[xr,yd]])\n pts2 = np.float32([[0,0],[w,0],[0,h],[w,h]])\n M = cv2.getPerspectiveTransform(pts1,pts2)\n xi = cv2.warpPerspective(xi,M,(w,h))\n # save individual image to X_batch\n X_batch[i_,:,:,:] = xi\n# plt.imsave('data/aug_%i' % i_,(xi+1)/2);plt.close()\n yield([X_batch],y_batch)", "def two_step_generator(classes: list, paths_list: list, imgs_per_class: int, shape: tuple,\n nb_win: int, greys: bool, nb_to_gen: int, img_gen: ImageDataGenerator) -> list:\n \n datawin = list() \n datagen = list()\n \n for class_ in classes:\n print(class_)\n \n # Images paths list\n class_imgs_path = [paths_list[k] for k in range(len(paths_list)) if class_ in paths_list[k]]\n\n # Randomly choose images\n class_imgs_subset = np.random.choice(class_imgs_path, size=imgs_per_class, replace=False)\n\n # Get images\n class_imgs = get_imgs(class_imgs_subset)\n\n # Step 1: resize and crop on sliding windows\n class_new_imgs = create_windows_imgs(class_imgs, shape=shape, nb_win=nb_win, greys=greys)\n class_new_imgs = np.array(flat_list(class_new_imgs))\n datawin.append(class_new_imgs)\n \n # Step 2: DataGenerator\n class_datagen = datagen_class(class_new_imgs, nb_to_gen, img_gen)\n class_datagen = class_datagen.astype(int)\n\n datagen.append(class_datagen)\n \n return datawin, datagen", "def generator(lines, batch_size=32, augment=False):\n\n num_samples = len(lines)\n while 1: # Loop generator indefinitely\n shuffle(lines) # Shuffle data between epochs\n for offset in range(0, num_samples, batch_size):\n batch_samples = lines[offset: offset + batch_size]\n\n images = []\n steer_angles = []\n\n for line in batch_samples:\n image = ndimage.imread(line[0])\n steer_angle = line[1]\n\n # Apply data augmentation as necessary\n if augment:\n image, steer_angle = random_horizontal_flip(\n image, steer_angle)\n # image, steer_angle = random_all(image, steer_angle)\n # image = random_shadows(image)\n # image = random_gaussian(image)\n\n images.append(image)\n steer_angles.append(steer_angle)\n\n # Convert lists to numpy arrays for use with Keras\n X_data = np.array(images)\n y_data = np.array(steer_angles)\n\n yield shuffle(X_data, y_data)", "def my_generator(batch_size, img_dir):\n\timage_filenames = glob.glob(img_dir + \"/*\")\n\tcounter = 0\n\twhile True:\n\t\tbw_images = np.zeros((batch_size, config.width, config.height))\n\t\tcolor_images = np.zeros((batch_size, config.width, config.height, 3))\n\t\trandom.shuffle(image_filenames) \n\t\tif ((counter+1)*batch_size>=len(image_filenames)):\n\t\t\t counter = 0\n\t\tfor i in range(batch_size):\n\t\t\t img = Image.open(image_filenames[counter + i]).resize((config.width, config.height))\n\t\t\t color_images[i] = np.array(img)\n\t\t\t bw_images[i] = np.array(img.convert('L'))\n\t\tyield (bw_images, color_images)\n\t\tcounter += batch_size", "def batch_generator(data, batch_size):\r\n data = np.array(data)\r\n n_batches = int(np.ceil(len(data) / float(batch_size)))\r\n \r\n idx = np.random.permutation(len(data))\r\n data_shuffled = data[idx]\r\n \r\n for i in range(n_batches):\r\n start = i * batch_size\r\n end = start + batch_size\r\n\r\n batch = data_shuffled[start:end]\r\n if len(batch) < batch_size:\r\n # Pad with zeros \r\n pad = np.zeros((batch_size - batch.shape[0], batch.shape[1]),\r\n dtype=batch.dtype)\r\n batch = np.vstack((batch, pad))\r\n\r\n yield batch", "def test_generator(self, test_path):\n\n img_list = os.scandir(test_path)\n for img_entry in img_list:\n\n img = cv2.imread(img_entry.path, COLOR_TO_OPENCV[self.color_mode])\n if img.shape[-1] == 3:\n orig_shape = img.shape[-2::-1]\n else:\n orig_shape = img.shape[::-1]\n\n\n img = cv2.resize(img, tuple(self.target_size))\n img = img / 255\n if self.color_mode == \"grayscale\":\n img = np.reshape(img, img.shape + (1,))\n img = np.reshape(img, (1,) + img.shape)\n yield img, img_entry, orig_shape", "def get_generators(patch_size, batch_size, preprocess_func, output_reshape_func, num_validation, train_processes,\n train_cache, train_data_dir='data/train/'):\n\n dirs = util.get_data_list(train_data_dir)\n labels = util.parse_labels_months()\n train_paths, validation_paths = util.train_validation_split(dirs, labels)\n # generate train batch loader\n train_data_loader = CTBatchLoader(train_paths, batch_size, patch_size, num_threads_in_multithreaded=1,\n preprocess_func=preprocess_func)\n\n train_transforms = get_train_transform(patch_size)\n train_data_generator = MultiThreadedAugmenter(train_data_loader, train_transforms, num_processes=train_processes,\n num_cached_per_queue=train_cache, seeds=None, pin_memory=False)\n\n # wrapper to be compatible with keras\n train_generator_keras = KerasGenerator(train_data_generator, output_reshapefunc=output_reshape_func)\n\n # generate validation batch loader\n valid_data_loader = CTBatchLoader(validation_paths, num_validation, patch_size,\n num_threads_in_multithreaded=1, preprocess_func=preprocess_func)\n valid_transforms = get_valid_transform(patch_size)\n valid_data_generator = MultiThreadedAugmenter(valid_data_loader, valid_transforms, num_processes=1,\n num_cached_per_queue=1, seeds=None, pin_memory=False)\n # wrapper to be compatible with keras\n valid_generator_keras = KerasGenerator(valid_data_generator, output_reshape_func, 1)\n\n return train_generator_keras, valid_generator_keras", "def _generate_crop_images(\n crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format=None\n):\n cropped_images = []\n total_points_per_crop = []\n for i, crop_box in enumerate(crop_boxes):\n left, top, right, bottom = crop_box\n\n channel_dim = infer_channel_dimension_format(image, input_data_format)\n if channel_dim == ChannelDimension.LAST:\n cropped_im = image[top:bottom, left:right, :]\n else:\n cropped_im = image[:, top:bottom, left:right]\n\n cropped_images.append(cropped_im)\n\n cropped_im_size = get_image_size(cropped_im, channel_dim)\n points_scale = np.array(cropped_im_size)[None, ::-1]\n\n points = points_grid[layer_idxs[i]] * points_scale\n normalized_points = _normalize_coordinates(target_size, points, original_size)\n total_points_per_crop.append(normalized_points)\n\n return cropped_images, total_points_per_crop", "def _get_next_minibatch(self):\n images = np.zeros((self._batch_size, 3, self._crop_h, self._crop_w), dtype=np.float32)\n masks = np.zeros((self._batch_size, 1, self._crop_h, self._crop_w), dtype=np.float32)\n\n shuffled_batch = np.arange(self._batch_size)\n np.random.shuffle(shuffled_batch)\n for batch_index in shuffled_batch:\n blob_queue = self._blob_queue.get()\n images[batch_index, :, :, :] = blob_queue[0]\n masks[batch_index, :, :, :] = blob_queue[1]\n\n return [images, masks]", "def __call__(self, batch_size=20, shuffle=True, augment=True):\r\n\r\n if batch_size < 1:\r\n raise ValueError(\"batch_size must be more than 1.\")\r\n if shuffle:\r\n self.shuffle()\r\n\r\n for start in range(0, self.length, batch_size):\r\n batch = self.perm(start, start+batch_size)\r\n if augment:\r\n assert self._augmenter is not None, \"you have to set an augmenter.\"\r\n yield self._augmenter.augment_dataset(batch, method=[ia.ImageAugmenter.NONE, ia.ImageAugmenter.FLIP])\r\n else:\r\n yield batch", "def gen_batches_functions(data_folder, image_paths, image_shape, out_shape,\n label_folder):\n\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n id_road = 7\n id_lane = 6\n id_car = 10\n\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n # Get corresponding label img path\n gt_image_file = image_file.replace('CameraRGB', 'CameraSeg')\n # Read rgb and label images\n img_in = scipy.misc.imread(image_file, mode='RGB')\n gt_in = scipy.misc.imread(gt_image_file)\n # Crop sky part of the image\n image = img_in[-out_shape[0]:, :]\n gt_image = gt_in[-out_shape[0]:, :, 0]\n # Obtain labels\n gt_road = ((gt_image == id_road) | (gt_image == id_lane))\n gt_car = (gt_image == id_car)\n gt_car[-105:, :] = False\n gt_bg = np.invert(gt_car | gt_road)\n # Augmentation\n if bool(random.getrandbits(1)):\n image, gt_bg, gt_car, gt_road = flip_img(\n image, gt_bg, gt_car, gt_road)\n\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_car = gt_car.reshape(*gt_car.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n\n gt_image = np.concatenate((gt_bg, gt_car, gt_road), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n\n return get_batches_fn", "def generate_batch(X_train, y_train, batch_size=64):\r\n images = np.zeros((batch_size, 66, 200, 3), dtype=np.float32)\r\n angles = np.zeros((batch_size,), dtype=np.float32)\r\n while True:\r\n straight_count = 0\r\n for i in range(batch_size):\r\n # Select a random index to use for data sample\r\n sample_index = random.randrange(len(X_train))\r\n image_index = random.randrange(len(X_train[0]))\r\n angle = y_train[sample_index][image_index]\r\n # Limit angles of less than absolute value of .1 to no more than 1/2 of data\r\n # to reduce bias of car driving straight\r\n if abs(angle) < .1:\r\n straight_count += 1\r\n if straight_count > (batch_size * .5):\r\n while abs(y_train[sample_index][image_index]) < .1:\r\n sample_index = random.randrange(len(X_train))\r\n # Read image in from directory, process, and convert to numpy array\r\n image = cv2.imread('data/' + str(X_train[sample_index][image_index]))\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n image = process_image(image)\r\n image = np.array(image, dtype=np.float32)\r\n # Flip image and apply opposite angle 50% of the time\r\n if random.randrange(2) == 1:\r\n image = cv2.flip(image, 1)\r\n angle = -angle\r\n images[i] = image\r\n angles[i] = angle\r\n yield images, angles", "def get_batches(dirname,\n gen=keras.preprocessing.image.ImageDataGenerator(),\n shuffle=True,\n batch_size=1,\n target_size=(224, 224),\n class_mode=\"categorical\"):\n return gen.flow_from_directory(dirname,\n shuffle=shuffle,\n batch_size=batch_size,\n target_size=target_size,\n class_mode=class_mode)", "def data_generator(dataset, config, shuffle=True, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False,\n diverse=0, no_augmentation_sources=None):\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n no_augmentation_sources = no_augmentation_sources or []\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinitely.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n # If the image source is not to be augmented pass None as augmentation\n if dataset.image_info[image_id]['source'] in no_augmentation_sources: augmentation = None\n image, image_meta, gt_class_ids, gt_class_ids2, gt_boxes, gt_rboxes, gt_global_mask, \\\n gt_masks, gt_mask_score, gt_text_embeds, gt_embed_lengths = load_image_gt(dataset, config, image_id,\n augmentation=augmentation)\n\n \n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # Use only positive class_ids\n categories = np.unique(gt_class_ids)\n _idx = categories > 0\n categories = categories[_idx]\n \n if config.MODEL == \"smrcnn\":\n # Use only active classes\n active_categories = []\n for c in categories:\n if any(c == dataset.ACTIVE_CLASSES):\n active_categories.append(c)\n \n # Skiop image if it contains no instance of any active class \n if not np.any(np.array(active_categories) > 0):\n continue\n # Randomly select category\n category = np.random.choice(active_categories)\n \n # NOTE for siamese\n # Generate siamese target crop\n targets = []\n for i in range(config.NUM_TARGETS):\n targets.append(get_one_target(category, dataset, config, augmentation=augmentation))\n # target = np.stack(target, axis=0)\n \n # print(target_class_id)\n target_class_id = category\n target_class_ids = np.array([target_class_id])\n \n idx = gt_class_ids == target_class_id\n siamese_class_ids = idx.astype('int8')\n # print(idx)\n # print(gt_boxes.shape, gt_masks.shape)\n siamese_class_ids = siamese_class_ids[idx]\n gt_class_ids = gt_class_ids[idx]\n gt_boxes = gt_boxes[idx,:]\n gt_masks = gt_masks[:,:,idx]\n image_meta = image_meta[:15] # TODO\n # --------------------------------------------------------------\n\n # RPN Targets\n # if rpn have muiltple label, rewrite here\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\n gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_class_ids2, mrcnn_bbox, mrcnn_rbbox, mrcnn_mask,\\\n mrcnn_text_embeds, mrcnn_embed_lengths = build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_rboxes, gt_masks, gt_mask_score, gt_class_ids2, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros((batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros([batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros([batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros((batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros((batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros((batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_rboxes = np.zeros((batch_size, config.MAX_GT_INSTANCES, 5), dtype=np.float32)\n if config.MODEL == \"smrcnn\":\n batch_targets = np.zeros((batch_size, config.NUM_TARGETS) + targets[0].shape, dtype=np.float32)\n batch_gt_masks = np.zeros((batch_size, gt_masks.shape[0], gt_masks.shape[1],\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n batch_gt_class_ids2 = np.zeros((batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_text_embeds = np.zeros((batch_size, config.MAX_GT_INSTANCES, config.MAX_LABEL_LENGTH), dtype=np.int32)\n batch_gt_embed_lengths = np.zeros((batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n if random_rois:\n batch_rpn_rois = np.zeros((batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros((batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros((batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n \n # ************************* NOTE for 2 label dataset\n if config.NUM_CLASSES2 > 2:\n batch_mrcnn_class_ids2 = np.zeros(\n (batch_size,) + mrcnn_class_ids2.shape, dtype=mrcnn_class_ids.dtype)\n # ************************* NOTE for ocr\n if config.READ:\n batch_mrcnn_text_embeds = np.zeros(\n (batch_size,) + mrcnn_text_embeds.shape, dtype=mrcnn_text_embeds.dtype)\n batch_mrcnn_embed_lengths = np.zeros(\n (batch_size,) + mrcnn_embed_lengths.shape, dtype=mrcnn_text_embeds.dtype)\n batch_mrcnn_bbox = np.zeros((batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_rbbox = np.zeros((batch_size,) + mrcnn_rbbox.shape, dtype=mrcnn_rbbox.dtype)\n batch_mrcnn_mask = np.zeros((batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n siamese_class_ids = siamese_class_ids[ids] # NOTE\n gt_boxes = gt_boxes[ids]\n gt_rboxes = gt_rboxes[ids]\n gt_masks = gt_masks[:, :, ids]\n gt_class_ids2 = gt_class_ids2[ids]\n gt_text_embeds = gt_text_embeds[ids]\n gt_embed_lengths = gt_embed_lengths[ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n # NOTE for siamese\n if config.MODEL == \"smrcnn\":\n batch_targets[b] = np.stack([mold_image(target.astype(np.float32), config) for target in targets], axis=0)\n batch_gt_class_ids[b, :siamese_class_ids.shape[0]] = siamese_class_ids\n else:\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_rboxes[b, :gt_rboxes.shape[0]] = gt_rboxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n batch_gt_class_ids2[b, :gt_class_ids2.shape[0]] = gt_class_ids2\n batch_gt_text_embeds[b, :gt_text_embeds.shape[0], :gt_text_embeds.shape[1]] = gt_text_embeds\n batch_gt_embed_lengths[b, :gt_embed_lengths.shape[0]] = gt_embed_lengths\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_rbbox[b] = mrcnn_rbbox\n batch_mrcnn_mask[b] = mrcnn_mask\n batch_mrcnn_class_ids2[b] = mrcnn_class_ids2\n batch_mrcnn_text_embeds[b] = mrcnn_text_embeds\n batch_mrcnn_embed_lengths[b] = mrcnn_embed_lengths\n b += 1\n # Batch full?\n if b >= batch_size:\n \n\n # NOTE for siamese\n if config.MODEL == \"smrcnn\":\n inputs = [batch_images, batch_image_meta, batch_targets, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_class_ids2, batch_gt_boxes, batch_gt_rboxes, batch_gt_masks,\n batch_gt_text_embeds, batch_gt_embed_lengths]\n else:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_class_ids2, batch_gt_boxes, batch_gt_rboxes, batch_gt_masks,\n batch_gt_text_embeds, batch_gt_embed_lengths]\n outputs = []\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(batch_mrcnn_class_ids, -1) \n \n # ************************* NOTE for 2 label dataset\n # ************************* NOTE for ocr\n if config.RBOX and config.READ and config.HAVE_LABEL2:\n batch_mrcnn_class_ids2 = np.expand_dims(batch_mrcnn_class_ids2, -1)\n batch_mrcnn_text_embeds = np.expand_dims(batch_mrcnn_text_embeds, -1)\n batch_mrcnn_embed_lengths = np.expand_dims(batch_mrcnn_embed_lengths, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_class_ids2, batch_mrcnn_bbox,\\\n batch_mrcnn_rbbox, batch_mrcnn_mask,\n batch_mrcnn_text_embeds, batch_mrcnn_embed_lengths])\n elif config.RBOX and config.READ and not config.HAVE_LABEL2:\n batch_mrcnn_text_embeds = np.expand_dims(batch_mrcnn_text_embeds, -1)\n batch_mrcnn_embed_lengths = np.expand_dims(batch_mrcnn_embed_lengths, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_rbbox, batch_mrcnn_mask,\n batch_mrcnn_text_embeds, batch_mrcnn_embed_lengths])\n elif config.RBOX and not config.READ and config.HAVE_LABEL2:\n batch_mrcnn_class_ids2 = np.expand_dims(batch_mrcnn_class_ids2, -1) \n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_class_ids2, batch_mrcnn_bbox,\\\n batch_mrcnn_rbbox, batch_mrcnn_mask])\n elif config.RBOX and not config.READ and not config.HAVE_LABEL2:\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_rbbox, batch_mrcnn_mask])\n elif not config.RBOX and config.READ and config.HAVE_LABEL2:\n batch_mrcnn_class_ids2 = np.expand_dims(batch_mrcnn_class_ids2, -1)\n batch_mrcnn_text_embeds = np.expand_dims(batch_mrcnn_text_embeds, -1)\n batch_mrcnn_embed_lengths = np.expand_dims(batch_mrcnn_embed_lengths, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_class_ids2, batch_mrcnn_bbox,\\\n batch_mrcnn_mask,\n batch_mrcnn_text_embeds, batch_mrcnn_embed_lengths])\n elif not config.RBOX and config.READ and not config.HAVE_LABEL2:\n batch_mrcnn_text_embeds = np.expand_dims(batch_mrcnn_text_embeds, -1)\n batch_mrcnn_embed_lengths = np.expand_dims(batch_mrcnn_embed_lengths, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask,\n batch_mrcnn_text_embeds, batch_mrcnn_embed_lengths])\n elif not config.RBOX and not config.READ and config.HAVE_LABEL2:\n batch_mrcnn_class_ids2 = np.expand_dims(batch_mrcnn_class_ids2, -1) \n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_class_ids2, batch_mrcnn_bbox,\\\n batch_mrcnn_mask])\n elif not config.RBOX and not config.READ and not config.HAVE_LABEL2:\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(f\"Error processing image {dataset.image_info[image_id]}\")\n error_count += 1\n if error_count > 5:\n raise", "def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None, batch_size=1):\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n\n # Keras requires a generator to run indefinately.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n image, gt_class_ids = load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation)\n\n # Init batch arrays\n if b == 0:\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n\n # Add to batch\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, gt_class_ids] = 1\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_gt_class_ids]\n outputs = []\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise", "def rand_crop_whole_ct(image, label, res_s, out_s,\n apply_data_aug, augment_times=2):\n if image.shape != (res_s, res_s, res_s) or \\\n label.shape != (res_s, res_s, res_s):\n logging.info(\"Unexpected shapes. \"\n \"image.shape: %s, label.shape: %s\",\n image.shape, label.shape)\n return\n\n if not apply_data_aug:\n # Do not augment data.\n idx = (res_s - out_s) // 2\n image = image[idx:idx + out_s, idx:idx + out_s, idx:idx + out_s]\n label = label[idx:idx + out_s, idx:idx + out_s, idx:idx + out_s]\n yield image, label\n else:\n cut = res_s - out_s\n for _ in range(augment_times):\n for i in [0, cut // 2, cut]:\n for j in [0, cut // 2, cut]:\n for k in [0, cut // 2, cut]:\n image_aug = image[i:i + out_s, j:j + out_s, k:k + out_s].copy()\n label_aug = label[i:i + out_s, j:j + out_s, k:k + out_s].copy()\n image_aug = intensity_change(image_aug)\n yield image_aug, label_aug", "def gen_imgs(samples, batch_size, shuffle=False):\n \n num_samples = len(samples)\n print(num_samples)\n \n \n images = []\n \n for _, batch_sample in batch_samples.iterrows():\n \n with openslide.open_slide(batch_sample.slide_path) as slide:\n tiles = DeepZoomGenerator(slide, tile_size=224, overlap=0, limit_bounds=False)\n print(batch_sample.tile_loc[::], batch_sample.tile_loc[::-1])\n img = tiles.get_tile(tiles.level_count-1, batch_sample.tile_loc[::-1])\n \n \n images.append(np.array(img))\n\n X_train = np.array(images)\n \n yield X_train", "def sample_images(batches_done):\n imgs = next(iter(val_dataloader))\n G_AB.eval()\n G_BA.eval()\n real_A = Variable(imgs[\"A\"].type(Tensor))\n fake_B = G_AB(real_A)\n real_B = Variable(imgs[\"B\"].type(Tensor))\n fake_A = G_BA(real_B)\n # Arrange images along x-axis\n real_A = make_grid(real_A[:3,:,:,:], nrow=3, normalize=True)\n real_B = make_grid(real_B[:3,:,:,:], nrow=3, normalize=True)\n fake_A = make_grid(fake_A[:3,:,:,:], nrow=3, normalize=True)\n fake_B = make_grid(fake_B[:3,:,:,:], nrow=3, normalize=True)\n # Arrange images along y-axis\n image_grid = torch.cat((real_A, fake_B, real_B, fake_A), 1)\n save_image(image_grid, \"/content/drive/MyDrive/Night2Day/images/generated_4/%s.png\" % (batches_done), normalize=False)", "def __init__(self, dataset, width=512, height=512, pictures=10, generate_classes=True, generate_objects=True):\n super().__init__(dataset)\n\n cropper = Cropper(width=width, height=height)\n\n dir_name = \"tmp-data-{}x{}-from-{}-pictures\".format(width, height, pictures)\n origins = os.path.join(dir_name, \"origins\")\n classes = os.path.join(dir_name, \"classes\")\n origins_classes_v_join = os.path.join(dir_name, \"origin-classes-v-join\")\n objects = os.path.join(dir_name, \"objects\")\n origins_objects_v_join = os.path.join(dir_name, \"origin-objects-v-join\")\n\n if not os.path.exists(origins):\n os.makedirs(origins)\n\n trains = self.get_iterable_trains()\n vals = self.get_iterable_evals()\n\n selection_set = []\n for _, val in enumerate(trains):\n origin, class_v, object_v = self.get_train_triple(val)\n selection_set.append((origin, class_v, object_v))\n for _, val in enumerate(vals):\n origin, class_v, object_v = self.get_val_triple(val)\n selection_set.append((origin, class_v, object_v))\n\n final_set = random.sample(selection_set, pictures)\n\n if generate_classes:\n if not os.path.exists(classes):\n os.makedirs(classes)\n if not os.path.exists(origins_classes_v_join):\n os.makedirs(origins_classes_v_join)\n\n if generate_objects:\n if not os.path.exists(objects):\n os.makedirs(objects)\n if not os.path.exists(origins_objects_v_join):\n os.makedirs(origins_objects_v_join)\n\n for _, (origin, class_v, object_v) in enumerate(final_set):\n print(\"Processing {}, {}, {}\".format(origin, class_v, object_v))\n cropper.set_imgs(origin, class_v, object_v, add_randomly=5)\n counter = 1\n while not cropper.is_finished:\n origin_i, class_i, object_i = cropper.next_crop()\n # Check that classes are involved\n finded = False\n for l in class_i:\n for pix in l:\n for c in pix:\n if c != 0:\n finded = True\n break\n if finded:\n break\n if finded:\n break\n if not finded:\n continue\n path = \"{}-{}.png\".format(get_origin_name(origin), counter)\n # print(\"Writing: {}\".format(os.path.join(origins, path)))\n cv2.imwrite(os.path.join(origins, path), origin_i)\n if generate_classes:\n cv2.imwrite(os.path.join(classes, path), class_i)\n cv2.imwrite(os.path.join(origins_classes_v_join, path), cv2.hconcat([origin_i, class_i]))\n if generate_objects:\n cv2.imwrite(os.path.join(objects, path), object_i)\n cv2.imwrite(os.path.join(origins_objects_v_join, path), cv2.hconcat([origin_i, object_i]))\n counter += 1\n\n print(\"Generating of {}-pictures-subset done. You find it in: {}\".format(pictures, dir_name))", "def random_crop(img, mask):\n if str(img.dtype) != 'uint8':\n img = (img * 255).astype(np.uint8)\n if str(mask.dtype) != 'uint8':\n mask = (mask * 255).astype(np.uint8)\n img = Image.fromarray(img)\n mask = Image.fromarray(mask)\n x, y = img.size\n matrix = 256\n img_list = []\n label_list = []\n for i in range(CROP_NUM):\n x1 = randrange(0, x - matrix)\n y1 = randrange(0, y - matrix)\n img_list.append(img.crop((x1, y1, x1 + matrix, y1 + matrix)))\n label_list.append(mask.crop((x1, y1, x1 + matrix, y1 + matrix)))\n\n return img_list, label_list", "def __call__(self, results):\n\n if 'img_fields' in results:\n assert results['img_fields'] == ['img'], \\\n 'Only single img_fields is allowed'\n img = results['img']\n assert 'bbox_fields' in results\n boxes = [results[key] for key in results['bbox_fields']]\n boxes = np.concatenate(boxes, 0)\n h, w, c = img.shape\n while True:\n mode = random.choice(self.sample_mode)\n self.mode = mode\n if mode == 1:\n return results\n\n min_iou = mode\n for i in range(50):\n new_w = random.uniform(self.min_crop_size * w, w)\n new_h = random.uniform(self.min_crop_size * h, h)\n\n # h / w in [0.5, 2]\n if new_h / new_w < 0.5 or new_h / new_w > 2:\n continue\n\n left = random.uniform(w - new_w)\n top = random.uniform(h - new_h)\n\n patch = np.array(\n (int(left), int(top), int(left + new_w), int(top + new_h)))\n # Line or point crop is not allowed\n if patch[2] == patch[0] or patch[3] == patch[1]:\n continue\n overlaps = bbox_overlaps(\n patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)\n if len(overlaps) > 0 and overlaps.min() < min_iou:\n continue\n\n # center of boxes should inside the crop img\n # only adjust boxes and instance masks when the gt is not empty\n if len(overlaps) > 0:\n # adjust boxes\n def is_center_of_bboxes_in_patch(boxes, patch):\n center = (boxes[:, :2] + boxes[:, 2:]) / 2\n mask = ((center[:, 0] > patch[0]) *\n (center[:, 1] > patch[1]) *\n (center[:, 0] < patch[2]) *\n (center[:, 1] < patch[3]))\n return mask\n\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n if not mask.any():\n continue\n for key in results.get('bbox_fields', []):\n boxes = results[key].copy()\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n boxes = boxes[mask]\n if self.bbox_clip_border:\n boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])\n boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])\n boxes -= np.tile(patch[:2], 2)\n\n results[key] = boxes\n # labels\n label_key = self.bbox2label.get(key)\n if label_key in results:\n results[label_key] = results[label_key][mask]\n\n # mask fields\n mask_key = self.bbox2mask.get(key)\n if mask_key in results:\n results[mask_key] = results[mask_key][\n mask.nonzero()[0]].crop(patch)\n # adjust the img no matter whether the gt is empty before crop\n img = img[patch[1]:patch[3], patch[0]:patch[2]]\n results['img'] = img\n results['img_shape'] = img.shape\n\n # seg fields\n for key in results.get('seg_fields', []):\n results[key] = results[key][patch[1]:patch[3],\n patch[0]:patch[2]]\n return results", "def _yield_testing(self, batch_index):\n samples_start = batch_index % self.num_samples\n samples_end = (batch_index+1) % self.num_samples\n if samples_start < samples_end:\n batch_samples = self.test_data[samples_start:samples_end]\n else:\n batch_samples = self.test_data[samples_start:]\n batch_samples.extend(self.test_data[:samples_end])\n images = []\n rois = []\n for sample in batch_samples:\n # 'sample' has this structure:\n # {path: {\n # 'roi_origin_x': test_sample[1]['roi_origin_x'],\n # 'roi_origin_y': test_sample[1]['roi_origin_y'],\n # 'roi_width': test_sample[1]['roi_width'],\n # 'roi_height': test_sample[1]['roi_height'] \n # } \n # }\n img_path = os.path.join(self.dataset_root_path, list(sample.keys())[0])\n img = cv2.imread(img_path) # watch out for slashes (/)\n # if the path does not exist or there are problems while reading the image\n if img is None:\n print('[DATA LOADER ERROR] cannot find image at path: ', img_path)\n continue\n roi_data = list(sample.values())[0]\n roi = {\n 'upper_left_x': roi_data['roi_origin_x'],\n 'upper_left_y': roi_data['roi_origin_y'],\n 'width': roi_data['roi_width'],\n 'height': roi_data['roi_height']\n }\n img = img.astype('float32')\n images.append(img)\n rois.append(roi)\n return images, rois", "def __randomCrop(self, img):\n limit = self.PROCESSING_DIM - self.INPUT_DIM\n # pick 2 random integers less than this limit as the origin of the cropped image\n x_start = np.random.randint(limit)\n y_start = np.random.randint(limit)\n return img.crop((x_start, y_start, x_start + self.INPUT_DIM, y_start + self.INPUT_DIM))", "def get_batch_gen(self, split, config):\n config.augment_scale_anisotropic = True\n config.augment_scale_min = 0.9\n config.augment_scale_max = 1.1\n config.augment_noise = 0.001\n config.augment_color = 1.0\n config.augment_rotation = 'vertical'\n\n if split == 'training':\n config.augment_symmetries = [True, False, False]\n else:\n config.augment_symmetries = [False, False, False]\n\n if split == 'training':\n epoch_n = config.epoch_steps * config.batch_size\n elif split == 'validation':\n epoch_n = config.validation_size * config.batch_size\n elif split == 'test':\n epoch_n = config.validation_size * config.batch_size\n else:\n raise ValueError('Split argument in data generator should be \"training\", \"validation\" or \"test\"')\n\n # Initiate potentials for regular generation\n if not hasattr(self, 'potentials'):\n self.potentials = {}\n self.min_potentials = {}\n\n data_split = split\n\n # Reset potentials\n def reset_potentials():\n self.potentials[split] = []\n self.min_potentials[split] = []\n\n for i, tree in enumerate(self.input_trees[data_split]):\n self.potentials[split] += [np.random.rand(tree.data.shape[0]) * 1e-3]\n self.min_potentials[split] += [float(np.min(self.potentials[split][-1]))]\n\n reset_potentials()\n\n def spatially_regular_gen():\n for i in range(epoch_n):\n cloud_ind = int(np.argmin(self.min_potentials[split]))\n point_ind = np.argmin(self.potentials[split][cloud_ind])\n points = np.array(self.input_trees[data_split][cloud_ind].data, copy=False)\n center_point = points[point_ind, :].reshape(1, -1)\n noise = np.random.normal(scale=0.35, size=center_point.shape)\n pick_point = center_point + noise.astype(center_point.dtype)\n\n if config.in_radius > 0:\n input_inds = self.input_trees[split][cloud_ind].query_radius(pick_point, r=config.in_radius)[0]\n else:\n buffer = self.buffer+np.random.randint(0,self.buffer//4)\n if len(points) < self.npoint+buffer:\n input_inds = self.input_trees[split][cloud_ind].query(pick_point, k=len(points))[1][0]\n else:\n input_inds = self.input_trees[split][cloud_ind].query(pick_point, k=self.npoint+buffer)[1][0]\n\n input_inds = self.shuffle_idx(input_inds)\n input_inds = input_inds[:self.npoint]\n\n # Number collected\n n = input_inds.shape[0]\n if n == 0:\n # Reset potentials\n reset_potentials()\n return\n # Safe check for very dense areas\n\n # Update potentials\n dists = np.sum(np.square((points[input_inds] - pick_point).astype(np.float32)), axis=1)\n delta = np.square(1 - dists / np.max(dists))\n self.potentials[split][cloud_ind][input_inds] += delta\n self.min_potentials[split][cloud_ind] = float(np.min(self.potentials[split][cloud_ind]))\n n = input_inds.shape[0]\n\n # Collect points and colors\n input_points = (points[input_inds] - pick_point).astype(np.float32)\n input_colors = self.input_colors[data_split][cloud_ind][input_inds]\n\n if split == 'test':\n input_labels = np.zeros(input_points.shape[0])\n else:\n input_labels = self.input_labels[data_split][cloud_ind][input_inds]\n input_labels = np.array([self.label_to_idx[l] for l in input_labels])\n\n if split in ['test', 'validation']:\n label_weights = np.zeros(input_points.shape[0])\n else:\n label_weights = self.label_weights[input_labels]\n\n if len(input_inds) < self.npoint:\n input_points, input_colors, input_inds, label_weights, input_labels = \\\n self.data_rep(input_points, input_colors, input_labels, input_inds, label_weights, self.npoint)\n\n # Add yield data\n if n > 0:\n yield input_points, np.hstack((input_colors, input_points + pick_point)), input_labels, \\\n [input_points.shape[0]], input_inds, cloud_ind, label_weights\n\n # Define the generator that should be used for this split\n gen_func = spatially_regular_gen\n\n # Define generated types and shapes\n gen_types = (tf.float32, tf.float32, tf.int32, tf.int32, tf.int32, tf.int32, tf.float32)\n gen_shapes = ([self.npoint, 3], [self.npoint, 6], [self.npoint], [1], [self.npoint], [], [self.npoint])\n\n return gen_func, gen_types, gen_shapes", "def build_generators(folder_path, train_batch_size, val_batch_size, height, width):\n train_image_generator, train_mask_generator = create_train_generator(folder_path,\n train_batch_size,\n (height, width),\n preprocessing_masks)\n val_image_generator, val_mask_generator = create_validation_generator(folder_path,\n val_batch_size,\n (height, width),\n preprocessing_masks)\n my_train_generator = my_image_mask_generator(train_image_generator, train_mask_generator)\n my_val_generator = my_image_mask_generator(val_image_generator, val_mask_generator)\n\n return my_train_generator, my_val_generator", "def image_batch_generator(img_paths, model, batch_size, features=True):\n while True:\n ig = image_generator(img_paths)\n batch_img, batch_features, batch_labels = [], [], []\n\n for img, lab in ig:\n # Add the image and mask to the batch\n if features:\n img = np.expand_dims(img, 0)\n img_embedding = model.predict(img)\n batch_features.append(img_embedding)\n batch_img.append(img)\n batch_labels.append(lab)\n # If we've reached our batchsize, yield the batch and reset\n if len(batch_img) == batch_size:\n yield batch_img, batch_features, batch_labels\n batch_img, batch_features, batch_labels = [], [], []\n\n # If we have an nonempty batch left, yield it out and reset\n if len(batch_img) != 0:\n yield np.stack(batch_img, axis=1), np.array(batch_features), batch_labels\n batch_img, batch_features, batch_labels = [], [], []", "def flow(self, batch_size=32):\n nb_batches = int(len(self.image_ids_in_subset) / batch_size) + 1\n while True:\n # Before each epoch we shuffle the images' ids\n random.shuffle(self.image_ids_in_subset)\n\n for i in range(nb_batches):\n # We first get all the image ids for the next batch\n current_bach = self.image_ids_in_subset[i*batch_size:(i+1)*batch_size]\n X_batch = []\n Y_batch = []\n\n for image_id in current_bach:\n # Load the image and resize it. We get a PIL Image object\n img = image.load_img(self.get_img_path(int(image_id)), grayscale=False, target_size=(cfg.IMAGE.IMG_SIZE, cfg.IMAGE.IMG_SIZE))\n # Cast the Image object to a numpy array and put the channel has the last dimension\n img_arr = image.img_to_array(img, data_format='channels_last')\n X_batch.append(img_arr)\n # Y_batch.append(self.id_to_label[image_id])\n Y_batch.append(self.get_labels(image_id))\n\n # resize X_batch in (batch_size, IMG_HEIGHT, IMG_WIDTH, 3)\n X_batch = np.reshape(X_batch, (-1, cfg.IMAGE.IMG_SIZE, cfg.IMAGE.IMG_SIZE, 3))\n # resize Y_batch in (None, nb_classes)\n Y_batch = np.reshape(Y_batch, (-1, self.nb_classes))\n\n # substract mean values from imagenet\n X_batch = preprocess_input(X_batch, data_format='channels_last')\n yield(X_batch, Y_batch)", "def get_batches_fn(batch_size):\n # Shuffle training data\n rnd.shuffle(image_paths)\n # Loop through batches and grab images, yielding each batch\n for batch_i in range(0, samples_n, batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n # Re-size to image_shape\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n # Create \"one-hot-like\" labels by class\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)", "def generator(samples, bias=0.5, batch_size=CONFIG['batch_size']):\n correction = CONFIG['correction']\n num_samples = len(samples)\n while 1:\n samples = shuffle(samples)\n X_train = np.zeros(shape=(batch_size, 160, 320, 3), dtype=np.float32)\n y_train = np.zeros(shape=(batch_size,), dtype=np.float32)\n img_count = 0\n for batch_sample in samples:\n center_angle = float(batch_sample[3])\n if abs(center_angle) + bias < np.random.rand():\n continue # for training balance\n index = random.choice([0,1,2]) # randomly select front, left or right\n if index == 0:\n angle = center_angle\n elif index == 1:\n angle = center_angle + correction\n else:\n angle = center_angle - correction\n img_name = './IMG/' + batch_sample[index].split('/')[-1]\n img = cv2.imread(img_name)\n if random.choice([True, False]):\n img = cv2.flip(img,1)\n angle = -angle\n angle += np.random.normal(loc=0, scale=CONFIG['steer_sigma'])\n\n X_train[img_count] = img\n y_train[img_count] = angle\n img_count += 1\n if img_count == batch_size:\n yield X_train, y_train\n break", "def get_crops(x_train, y_train, offset=4):\n\ttopleft = iaa.Sequential([\n\t\tiaa.Crop(px=(4 - offset, offset, offset, 4 - offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\ttopright = iaa.Sequential([\n\t\tiaa.Crop(px=(4 - offset, 4 - offset, offset, offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tbotleft = iaa.Sequential([\n\t\tiaa.Crop(px=(offset, offset, 4 - offset, 4 - offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tbotright = iaa.Sequential([\n\t\tiaa.Crop(px=(offset, 4 - offset, 4 - offset, offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tcenter = iaa.Sequential([\n\t\tiaa.Crop(px=(2, 2, 2, 2)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\taugs = [topleft, topright, botleft, botright, center]\n\n\taug_imgs = []\n\tfor aug in tqdm(augs):\n\t\taug_imgs.append(aug.augment_images(x_train * 255))\n\n\taug_x_train = [item for sublist in aug_imgs for item in sublist]\n\taug_y_train = y_train * 5\n\n\treturn aug_x_train, aug_y_train", "def make_generators():\n \n # All images will be rescaled by 1./255\n train_datagen = ImageDataGenerator(rescale=1./255)\n test_datagen = ImageDataGenerator(rescale=1./255)\n\n \n train_generator = train_datagen.flow_from_directory(\n TRAIN_DATA_PATH,\n target_size= (150, 150),\n batch_size= 20,\n class_mode= 'sparse')\n\n validation_generator = test_datagen.flow_from_directory(\n VAL_DATA_PATH,\n target_size= (150, 150),\n batch_size= 20,\n class_mode= 'sparse')\n\n return train_generator, validation_generator", "def batch_generator(Dataset, batch_size, shuffle=True, repeat = 1, ignore_class = 255):\n\n \"\"\"\n Args : \n Dataset (class) : dataset class defined in cityscapes.py. \n batch_size (int) : batch size \n shuffle (bool) : shuffle dataset order \n ignore_class (int) : class number to be ignored \n\n Return : \n images (np.array) : images \n labels (np.array) : labels array in 2d \n \n \"\"\"\n \n idx_dataset = list(range(len(Dataset)))\n idx_dataset = idx_dataset*repeat\n \n\n if shuffle :\n from random import shuffle\n shuffle(idx_dataset)\n\n for idx in range(len(idx_dataset)//batch_size):\n \n imgs_to_stack = []\n labels_to_stack = []\n\n for _data_idx in range(idx*batch_size, (idx+1)*batch_size):\n data_idx = idx_dataset[_data_idx]\n image, label = load_image_train(Dataset[data_idx])\n imgs_to_stack.append(image)\n labels_to_stack.append(label)\n \n images = tf.stack(imgs_to_stack)\n labels = tf.stack(labels_to_stack)\n\n if ignore_class : \n idx_to_ignore = labels!=ignore_class\n labels = tf.where(idx_to_ignore, labels, 0)\n\n yield (images, labels)", "def get_batches(path, gen=image.ImageDataGenerator(), shuffle=True, batch_size=8, class_mode='categorical'):\n return gen.flow_from_directory(path,\n target_size=(ROWS, COLS),\n class_mode=class_mode,\n shuffle=shuffle,\n batch_size=batch_size)", "def generate_observations(self, eval_mode, augment_frames=None):\n episode_idx = 0\n augment_frames = (\n augment_frames if augment_frames is not None\n else self._augment_frames and not eval_mode)\n for t, obs in enumerate(self._flat_observations):\n if augment_frames:\n obs = image_utils.random_crop_image(obs)\n if self._split_by_episodes:\n yield obs, episode_idx\n else:\n yield obs\n if self._is_terminal[t]:\n episode_idx += 1", "def pklbatcher(inputs, targets, batch_size, shuffle=False, augment=False,\n img_shape=(321, 481, 3)):\n assert len(inputs) == len(targets)\n indices = inputs.keys()\n if shuffle:\n np.random.shuffle(indices)\n for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batch_size]\n else:\n excerpt = indices[start_idx:start_idx + batch_size]\n # Data augmentation\n im = []\n targ = []\n for i in range(len(excerpt)):\n img = inputs[excerpt[i]]['x']\n tg = targets[excerpt[i]]['y'] > 2\n if augment:\n # We use shuffle as a proxy for training\n if shuffle:\n img, tg = bsd_preprocess(img, tg)\n im.append(img)\n targ.append(tg)\n im = np.stack(im, axis=0)\n targ = np.stack(targ, axis=0)\n yield im, targ, excerpt", "def next_simple_dataset(dataset, batch_size: int, datatype):\n while True:\n x_batch = []\n y_batch = []\n for i in range(batch_size):\n try:\n x, y, data_unit, index = create_xy(dataset, datatype)\n # x = normalize(x)\n x_batch.append(x)\n y_batch.append(y)\n except StopIteration:\n break\n x_batch, y_batch = np.array(x_batch), np.array(y_batch)\n if datatype != DataType.test:\n x_batch = SEQ_CVXTZ.augment_images(x_batch).astype(\"float32\")\n x_batch = np.array([normalize(x) for x in x_batch])\n # org_shape = x_batch.shape\n # org_width = x_batch.shape[1]\n # corner = int((org_width - ROI_IMAGE_SIZE) // 2)\n # print(f\"0: org_shape:{org_shape} x_batch:{x_batch.shape} corner:{corner}\")\n # x_batch = x_batch[:, corner:(org_width - corner), corner:(org_width - corner), :]\n # resized_x_batch = []\n # for x in x_batch:\n # img = Image.fromarray(np.uint8(x))\n # img = img.resize((IMAGE_SIZE, IMAGE_SIZE), Image.LANCZOS)\n # resized_x_batch.append(normalize(np.array(img)))\n # print(f\"1: org_shape:{org_shape} corner:{corner} x_batch:{x_batch.shape}\")\n # yield np.array(resized_x_batch), y_batch\n yield np.array(x_batch), y_batch", "def batch_hard_generator(self, data_frames, mode, batch_size, samples_per_class=8):\n data_dir = os.path.join(self.root_dir, VeRiDataset.TRAIN_DIR)\n data_frames['imageName'] = data_frames['imageName'].apply(\n lambda i: os.path.join(data_dir, i))\n groups = data_frames.groupby('vehicleID')\n groups_name = groups.groups.keys()\n num_classes = batch_size // samples_per_class\n while True:\n ids = random.sample(groups_name, num_classes)\n df = pd.concat([groups.get_group(idx).sample(\n samples_per_class, replace=True)\n for idx in ids])\n yield list(df['imageName']), list(df['vehicleID'])", "def _train_aug(self, results):\n img = results['img']\n h, w, c = img.shape\n boxes = results['gt_bboxes']\n while True:\n scale = random.choice(self.ratios)\n new_h = int(self.crop_size[0] * scale)\n new_w = int(self.crop_size[1] * scale)\n h_border = self._get_border(self.border, h)\n w_border = self._get_border(self.border, w)\n\n for i in range(50):\n center_x = random.randint(low=w_border, high=w - w_border)\n center_y = random.randint(low=h_border, high=h - h_border)\n\n cropped_img, border, patch = self._crop_image_and_paste(\n img, [center_y, center_x], [new_h, new_w])\n\n mask = self._filter_boxes(patch, boxes)\n # if image do not have valid bbox, any crop patch is valid.\n if not mask.any() and len(boxes) > 0:\n continue\n\n results['img'] = cropped_img\n results['img_shape'] = cropped_img.shape\n results['pad_shape'] = cropped_img.shape\n\n x0, y0, x1, y1 = patch\n\n left_w, top_h = center_x - x0, center_y - y0\n cropped_center_x, cropped_center_y = new_w // 2, new_h // 2\n\n # crop bboxes accordingly and clip to the image boundary\n for key in results.get('bbox_fields', []):\n mask = self._filter_boxes(patch, results[key])\n bboxes = results[key][mask]\n bboxes[:, 0:4:2] += cropped_center_x - left_w - x0\n bboxes[:, 1:4:2] += cropped_center_y - top_h - y0\n if self.bbox_clip_border:\n bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)\n bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)\n keep = (bboxes[:, 2] > bboxes[:, 0]) & (\n bboxes[:, 3] > bboxes[:, 1])\n bboxes = bboxes[keep]\n results[key] = bboxes\n if key in ['gt_bboxes']:\n if 'gt_labels' in results:\n labels = results['gt_labels'][mask]\n labels = labels[keep]\n results['gt_labels'] = labels\n if 'gt_masks' in results:\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n\n # crop semantic seg\n for key in results.get('seg_fields', []):\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n return results", "def s2s_data_generator(s2s_df=duplets, all_catalog=catalog_images, batch_size=None):\n orig_index_list = duplets.index.tolist()\n all_shop_index_list = catalog_images.index.tolist()\n dummy = np.zeros((1, 3 * N))\n\n while True:\n \n q_list = list()\n p_list = list()\n n_list = list()\n dummy_list = list()\n \n index_list = copy.copy(orig_index_list)\n \n while len(index_list) > 0:\n\n index = random.choice(index_list)\n product_id = duplets.loc[index, 'product_id']\n \n q_temp = duplets.loc[index, 'street_images']\n q_img = os.path.join(Path, q_temp + '.jpeg')\n \n p_temp = duplets.loc[index, 'shop_images']\n p_img = os.path.join(Path, p_temp + '.jpeg')\n\n while True:\n idx = random.choice(all_shop_index_list)\n prod_idx = catalog_images.loc[idx, 'product_id']\n\n if prod_idx != product_id:\n temp = random.choice(catalog_images.loc[idx, 'shop_images'])\n n_img = os.path.join(Path, temp + '.jpeg')\n\n q_img = os.path.join(Path, q_index + '.jpeg')\n p_img = os.path.join(Path, p_index + '.jpeg')\n n_img = os.path.join(Path, n_index + '.jpeg')\n\n res = bbox_mappings[q_index]\n\n left = res['left']\n top = res['top']\n right = left + res['width']\n bottom = top + res['height']\n\n\n query_img = Image.open(q_img)\n query_crop = query_img.crop((left, top, right, bottom))\n positive_img = Image.open(p_img)\n negative_img = Image.open(n_img)\n\n \n query = np.array(query_crop.resize((300,300), Image.NEAREST))\n positive = np.array(positive_img.resize((300,300), Image.NEAREST))\n negative = np.array(negative_img.resize((300,300), Image.NEAREST))\n \n \n q_list.append(query_array)\n p_list.append(positive_array)\n n_list.append(negative_array)\n dummy_list.append(dummy)\n\n \n index_list.remove(index)\n\n if len(q_list) == batch_size or (len(index_list) == 0 and len(q_list) > 0):\n yield convert_data(q_list, p_list, n_list, dummy_list)\n q_list = list()\n p_list = list()\n n_list = list()\n dummy_list = list()", "def random_batch_generator(size, x, y=None, fixed_size=True, seed=None):\n ndata = x.shape[0]\n indices = range(0, ndata)\n if seed is not None:\n np.random.seed(seed)\n indices = np.random.permutation(indices)\n xr = x[indices]\n yr = y[indices] if y is not None else None\n for items in batch_generator(size, xr, yr, fixed_size):\n yield items", "def minibatch(x_train, y_train, batch_size, train_epochs):\n epoch = 0\n start = 0\n key = random.PRNGKey(0)\n\n while epoch < train_epochs:\n end = start + batch_size\n\n if end > x_train.shape[0]:\n key, split = random.split(key)\n permutation = random.permutation(split,\n np.arange(x_train.shape[0], dtype=np.int64))\n x_train = x_train[permutation]\n y_train = y_train[permutation]\n epoch += 1\n start = 0\n continue\n\n yield x_train[start:end], y_train[start:end]\n start = start + batch_size", "def generate(batch, size=32):\n\n # Using the data Augmentation in traning data\n ptrain = 'data224/train'\n pval = 'data224/test'\n\n datagen1 = ImageDataGenerator(\n samplewise_center=True,\n samplewise_std_normalization=True,\n shear_range=0.2,\n zoom_range=0.2,\n rotation_range=90,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\n\n datagen2 = ImageDataGenerator(samplewise_center=True,\n samplewise_std_normalization=True,)\n\n train_generator = datagen1.flow_from_directory(\n ptrain,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n validation_generator = datagen2.flow_from_directory(\n pval,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n count1 = 0\n for root, dirs, files in os.walk(ptrain):\n for each in files:\n count1 += 1\n\n count2 = 0\n for root, dirs, files in os.walk(pval):\n for each in files:\n count2 += 1\n\n return train_generator, validation_generator, count1, count2", "def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n for i, size in enumerate(self.group_sizes):\n if size > 0:\n indice = np.where(self.flag == i)[0]\n if not len(indice) == size:\n raise ValueError(\"the length of the indice should be equal to size\")\n indice = indice[list(torch.randperm(int(size),\n generator=g))].tolist()\n extra = int(\n math.ceil(\n size * 1.0 / self.samples_per_gpu / self.num_replicas)\n ) * self.samples_per_gpu * self.num_replicas - len(indice)\n indice += indice[:extra]\n indices += indice\n\n if not len(indices) == self.total_size:\n raise ValueError(\"the length of the indices should be equal to total_size\")\n\n indices = [\n indices[j] for i in list(\n torch.randperm(\n len(indices) // self.samples_per_gpu, generator=g))\n for j in range(i * self.samples_per_gpu, (i + 1) * self.samples_per_gpu)\n ]\n\n # subsample\n offset = self.num_samples * self.rank\n indices = indices[offset:offset + self.num_samples]\n if not len(indices) == self.num_samples:\n raise ValueError(\"the length of the indices should be equal to num_samplers in subsample\")\n\n return iter(indices)", "def generate_images(self, image_idx, is_training, batch_size=16):\n \n # arrays to store our batched data\n images, ages, races, genders = [], [], [], []\n while True:\n for idx in image_idx:\n person = self.df.iloc[idx]\n \n age = person['age']\n race = person['race_id']\n gender = person['gender_id']\n file = person['file']\n \n im = self.preprocess_image(file)\n \n ages.append(age / self.max_age)\n races.append(to_categorical(race, len(dataset_dict['race_id'])))\n genders.append(to_categorical(gender, len(dataset_dict['gender_id'])))\n images.append(im)\n \n # yielding condition\n if len(images) >= batch_size:\n yield np.array(images), [np.array(ages), np.array(races), np.array(genders)]\n images, ages, races, genders = [], [], [], []\n \n if not is_training:\n break", "def n_random_crop(img, height, width, n):\n crops = []\n img_width, img_height = img.shape\n for i in range(n):\n x = np.random.randint(0, img_width - width)\n y = np.random.randint(0, img_height - height)\n crops.append(img[x:x + height, y:y + width])\n return np.array(crops)", "def image_generator(batch_size, img_dir):\n input_filenames = glob.glob(img_dir + \"/*-in.jpg\")\n counter = 0\n while True:\n small_images = np.zeros(\n (batch_size, config.input_width, config.input_height, 3))\n large_images = np.zeros(\n (batch_size, config.output_width, config.output_height, 3))\n random.shuffle(input_filenames)\n if counter+batch_size >= len(input_filenames):\n counter = 0\n for i in range(batch_size):\n img = input_filenames[counter + i]\n img_in = np.array(Image.open(img))\n img_out = np.array(Image.open(img.replace(\"-in.jpg\", \"-out.jpg\")))\n\n img_in, img_out = _get_augmented_image(img_in, img_out)\n small_images[i] = img_in / 255.\n large_images[i] = img_out / 255.\n\n yield (small_images, large_images)\n counter += batch_size", "def __iter__(self):\n batch = []\n for i_batch in range(self.episode_num):\n classes = torch.randperm(len(self.idx_list))[: self.way_num]\n for c in classes:\n idxes = self.idx_list[c.item()]\n pos = torch.randperm(idxes.size(0))[: self.image_num]\n batch.append(idxes[pos])\n if len(batch) == self.episode_size * self.way_num:\n batch = torch.stack(batch).reshape(-1)\n yield batch\n batch = []", "def batches(self, batch_size): \n if self.shuffle:\n idx = np.arange(len(dataset.train_x))\n np.random.shuffle(idx)\n self.train_x = self.train_x[idx]\n \n n_batches = len(self.train_x) // batch_size\n for ii in range(0, len(self.train_x), batch_size):\n x = self.train_x[ii:ii+batch_size]\n \n yield self.scaler(x)", "def batch_generator(data, batch_size, config, target=False):\n listsize = min(len(data.pilist), 10)\n while 1:\n nrd = np.arange(len(data.pilist))\n np.random.shuffle(nrd)\n for ri in range(len(data.pilist) // listsize):\n cpilist = [data.pilist[nrd[rid]] for rid in range(listsize * ri, listsize * (ri + 1))]\n data.loadpatch(config, cpilist)\n if target:\n data_l = [data.xarray, np.zeros(shape=(len(data.xarray), 1))]\n else:\n data_l = [data.xarray, data.yarray - 1]\n\n for di in range(len(data_l[0])):\n data_l[0][di][:, :, :, 0] = data_l[0][di][:, :, :, 0] / np.max(data_l[0][di][:, :, :, 0])\n\n for repi in range(3):\n data_l = shuffle_aligned_list(data_l)\n\n batch_count = 0\n while True:\n if batch_count * batch_size + batch_size >= len(data_l[0]):\n if len(data.pilist) // listsize != 1:\n print('list end', ri * listsize)\n break\n else:\n batch_count = 0\n\n start = batch_count * batch_size\n end = start + batch_size\n batch_count += 1\n yield [d[start:end] for d in data_l]", "def generate_batch(self, batch_size=8, shuffle=True):\n if self._contour_dicom_folder:\n contour_files = glob(os.path.join(self._contour_dicom_folder, \"*.h5\"))\n if shuffle:\n contour_files = np.random.permutation(contour_files)\n contours_generator = self._contour_folder_gen(contour_files)\n else:\n contours_generator = self._contour_dicom_generator\n\n x_batch, y_batch, sources_batch = [], [], []\n batch_idx = 0\n for idx, (dataset, sources) in enumerate(contours_generator):\n if batch_idx > 0 and batch_idx % batch_size == 0:\n if self._include_sources:\n yield sources_batch, np.array(x_batch), np.array(y_batch)\n else:\n yield np.array(x_batch), np.array(y_batch)\n x_batch, y_batch, sources_batch = [], [], []\n batch_idx = 0\n try:\n x_data = self._parse_channels(dataset, self.x_channels)\n y_data = self._parse_channels(dataset, self.y_channels)\n x_batch.append(x_data)\n y_batch.append(y_data)\n sources_batch.append(sources)\n batch_idx += 1\n except ValueError:\n # Log Error\n err_msg = \"Missing all channels in {}\".format(sources[\"filename\"])\n self._log_error(err_msg)\n\n if self._include_sources:\n yield sources_batch, np.array(x_batch), np.array(y_batch)\n else:\n yield np.array(x_batch), np.array(y_batch)", "def custom_data_generator(img_paths, final_height, final_width):\n for img_path in img_paths:\n image = Image.open(img_path)\n resized_image = image.resize((final_width, final_height), Image.ANTIALIAS) # Image.LANCZOS\n img = np.array(resized_image)\n img = tf.image.convert_image_dtype(img, tf.float32)\n yield img, tf.constant([[]], dtype=tf.float32), tf.constant([], dtype=tf.int32)", "def generate_images(generator_model, output_dir, epoch):\n test_image_stack = generator_model.predict(np.random.normal(size=(10, 100)))\n test_image_stack = (test_image_stack * 255)\n test_image_stack = np.squeeze(np.round(test_image_stack).astype(np.uint8))\n tiled_output = tile_images(test_image_stack)\n tiled_output = Image.fromarray(tiled_output)\n outfile = os.path.join(output_dir, 'epoch_{}.png'.format(epoch))\n tiled_output.save(outfile)", "def sample_images(batches_done):\n val_imgs, val_labels = next(iter(val_dataloader))\n val_imgs = Variable(val_imgs.type(Tensor))\n val_labels = Variable(val_labels.type(Tensor))\n img_samples = None\n for i in range(10):\n img, label = val_imgs[i], val_labels[i]\n # Repeat for number of label changes\n imgs = img.repeat(c_dim, 1, 1, 1)\n labels = label.repeat(c_dim, 1)\n # Make changes to labels\n for sample_i, changes in enumerate(label_changes):\n for col, val in changes:\n labels[sample_i, col] = 1 - labels[sample_i, col] if val == -1 else val\n\n # Generate translations\n gen_imgs = generator(imgs, labels)\n # Concatenate images by width\n gen_imgs = torch.cat([x for x in gen_imgs.data], -1)\n img_sample = torch.cat((img.data, gen_imgs), -1)\n # Add as row to generated samples\n img_samples = img_sample if img_samples is None else torch.cat((img_samples, img_sample), -2)\n\n save_image(img_samples.view(1, *img_samples.shape), \"images/%s.png\" % batches_done, normalize=True)", "def random_crop(image, gt, crop_height, crop_width, random_state=None):\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n height, width = image.shape[:2]\n\n y = random_state.randint(0, height - crop_height)\n x = random_state.randint(0, width - crop_width)\n\n cropped_image = image[y:y + crop_height, x:x + crop_width, :]\n cropped_gt = gt[y:y + crop_height, x:x + crop_height]\n\n cropped_image = cv2.resize(cropped_image, (width, height), interpolation=cv2.INTER_NEAREST)\n cropped_gt = cv2.resize(cropped_gt, (width, height), interpolation=cv2.INTER_NEAREST)\n\n return cropped_image, cropped_gt", "def image_generator(file_paths, init_size=None, normalization=True, antialias=False):\r\n for file_path in file_paths:\r\n if file_path.endswith(\".png\") or file_path.endswith(\".jpg\"):\r\n # open a image\r\n image = Image.open(file_path)\r\n # to square\r\n image = Loader.crop_to_square(image)\r\n # resize by init_size\r\n if init_size is not None and init_size != image.size:\r\n if antialias:\r\n image = image.resize(init_size, Image.ANTIALIAS)\r\n else:\r\n image = image.resize(init_size)\r\n # delete alpha channel\r\n if image.mode == \"RGBA\":\r\n image = image.convert(\"RGB\")\r\n image = np.asarray(image)\r\n if normalization:\r\n image = image / 255.0\r\n yield image", "def get_test_generator(patch_size, batch_size, preprocess_func, output_reshape_func, test_data_dir='data/test/'):\n\n test_paths = util.get_data_list(test_data_dir)\n\n # generate train batch loader\n test_data_loader = CTBatchLoader(test_paths, batch_size, patch_size, num_threads_in_multithreaded=1,\n preprocess_func=preprocess_func, infinite=False)\n\n # wrapper to be compatible with keras\n return KerasGenerator(test_data_loader, output_reshapefunc=output_reshape_func,\n n=int(len(test_data_loader.indices) / batch_size))", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n #fake_image = [1.0 for _ in xrange(784)]\n fake_image = [1.0 for _ in range(784)]\n fake_label = 0\n #return [fake_image for _ in xrange(batch_size)], [\n # fake_label for _ in xrange(batch_size)]\n return [fake_image for _ in range(batch_size)], [\n fake_label for _ in range(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def data_generator(self, data):\n X, y = [], []\n while 1:\n np.random.shuffle(data)\n for line in data:\n img = Image.open(line[0])\n img = img.resize((32, 16))\n img = np.asarray(img, dtype=np.float32)\n img = img / 128. - 1.\n img = np.transpose(img, (2, 0, 1)) \n X.append(img)\n y.append(line[1])\n if len(X) == self.config.batch_size:\n batch = (np.asarray(X), np.asarray(y))\n X = []\n y = []\n yield batch", "def generate(self):\n z_sample = np.random.normal(0, 1,[self.batch_size, self.n_z])\n gen_images = self.sess.run(self.gen_img, feed_dict={self.z: z_sample})\n return gen_images", "def next_batch(self):\n next_train_index = self.curr_train_index + self.hparams.batch_size\n if next_train_index > self.num_train:\n # Increase epoch number\n epoch = self.epochs + 1\n self.reset()\n self.epochs = epoch\n batched_data = (\n self.train_images[self.curr_train_index:self.curr_train_index +\n self.hparams.batch_size],\n self.train_labels[self.curr_train_index:self.curr_train_index +\n self.hparams.batch_size])\n final_imgs = []\n images, labels = batched_data\n if self.hparams.augment_type == 'mixup':\n images, labels = augmentation_transforms.mixup_batch(\n images, labels, self.hparams.mixup_alpha)\n elif self.hparams.augment_type == 'image_freq':\n images, labels = augmentation_transforms.freq_augment(\n images,\n labels,\n amplitude=self.hparams.freq_augment_amplitude,\n magnitude=self.hparams.augmentation_magnitude,\n proportion_f=self.hparams.freq_augment_ffrac,\n probability=self.hparams.augmentation_probability)\n for data in images:\n if self.hparams.augment_type == 'autoaugment':\n epoch_policy = self.good_policies[np.random.choice(\n len(self.good_policies))]\n final_img = augmentation_transforms.apply_policy(epoch_policy, data)\n elif self.hparams.augment_type == 'random':\n epoch_policy = found_policies.random_policy(\n self.hparams.num_augmentation_layers,\n self.hparams.augmentation_magnitude,\n self.hparams.augmentation_probability)\n final_img = augmentation_transforms.apply_policy(epoch_policy, data)\n else:\n final_img = np.copy(data)\n if self.hparams.apply_flip_crop:\n final_img = augmentation_transforms.random_flip(\n augmentation_transforms.zero_pad_and_crop(data, 4))\n # Apply cutout\n if self.hparams.apply_cutout:\n final_img = augmentation_transforms.cutout_numpy(final_img)\n\n final_imgs.append(final_img)\n final_imgs = np.array(final_imgs, np.float32)\n if self.hparams.noise_type == 'radial':\n labels = augmentation_transforms.add_radial_noise(\n final_imgs, labels, self.hparams.frequency, self.hparams.amplitude,\n self.hparams.noise_class, self.hparams.normalize_amplitude)\n elif self.hparams.noise_type == 'random' or self.hparams.noise_type == 'fourier' or self.hparams.noise_type == 'f' or self.hparams.noise_type == '1/f':\n labels = augmentation_transforms.add_sinusoidal_noise(\n final_imgs, labels, self.hparams.frequency, self.hparams.amplitude,\n self.direction, self.hparams.noise_class,\n self.hparams.normalize_amplitude)\n elif self.hparams.noise_type == 'uniform':\n labels = augmentation_transforms.add_uniform_noise(\n labels, self.hparams.amplitude, self.hparams.noise_class)\n\n batched_data = (final_imgs, labels)\n self.curr_train_index += self.hparams.batch_size\n return batched_data", "def generator(samples, batch_size=32):\n \n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n samples = shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n \n images = []\n angles = []\n for batch_sample in batch_samples:\n \n name = \"./training_udacity/IMG/\"+batch_sample[0].strip().split('/')[-1]\n\n center_image = cv2.imread(name)\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n\n center_angle = float(batch_sample[3])\n throttle = float(batch_sample[4])\n brake = float(batch_sample[5])\n speed = float(batch_sample[6])\n \n images.append(center_image)\n angles.append(center_angle)\n \n augment = True\n if augment:\n # 1. Add Flipped Picture\n image_flipped = np.fliplr(center_image)\n measurement_flipped = -center_angle\n \n images.append(image_flipped)\n angles.append(measurement_flipped)\n \n # 2. Handle left and right Images\n # create adjusted steering measurements for the side camera images\n correction = 0.4\n steering_left = center_angle + correction\n steering_right = center_angle - correction\n \n left_name = \"./training_udacity/IMG/\"+batch_sample[1].strip().split('/')[-1]\n right_name = \"./training_udacity/IMG/\"+batch_sample[2].strip().split('/')[-1]\n\n img_left = cv2.imread(left_name)\n img_left = cv2.cvtColor(img_left, cv2.COLOR_BGR2RGB)\n\n img_right = cv2.imread(right_name)\n img_right = cv2.cvtColor(img_right, cv2.COLOR_BGR2RGB)\n\n images.append(img_left)\n images.append(img_right)\n \n angles.append(steering_left)\n angles.append(steering_right)\n\n# Sanity check the code above by plotting each picture\n# fig = plt.figure()\n# plt.imshow(center_image)\n# plt.axis('off')\n# fig.savefig(\"center.jpg\")\n#\n# fig = plt.figure()\n# plt.imshow(image_flipped)\n# plt.axis('off')\n# fig.savefig(\"flipped.jpg\")\n#\n# fig = plt.figure()\n# plt.imshow(img_left)\n# plt.axis('off')\n# fig.savefig(\"left.jpg\")\n#\n# fig = plt.figure()\n# plt.imshow(img_right)\n# plt.axis('off')\n# fig.savefig(\"right.jpg\")\n\n X_train = np.array(images)\n y_train = np.array(angles)\n \n yield shuffle(X_train, y_train)", "def GeneratorWithNoise(dataset,\n batch_size,\n noise_size,\n which_batches=range(0,1),\n mode='shuffled_sequential',\n num_batches=-1):\n\n for b in dataset.iterator(\n batch_size,\n dataset.get_num_examples()/batch_size,\n mode=mode,\n data_specs=dataset.get_data_specs(),\n return_tuple=True\n ):\n b = [b[i] for i in which_batches]\n t, bsize, dim = b[0].shape # time length, batch size, dim\n eps = np.random.normal(0,1,size=(t, bsize, noise_size)).astype(floatX)\n b.append(eps)\n yield b", "def generator(samples, batch_size=32):\n\n num_samples = len(samples)\n #batch_size = num_samples\n print('num_samples',num_samples)\n while 1: # Loop forever so the generator never terminates\n\n for offset in range(0, num_samples, batch_size): #this loop will be run for each iteration\n \t#print('iteration------------>',offset, batch_size)\n \tbatch_samples = samples[offset:offset+batch_size]\n\n \timages = []\n \tangles = []\n \tfor batch_sample in batch_samples:\n \t\tfor i in range(3): #include the center, right and left angles \n \t\t\tfile_name = root_path+batch_sample[i].split('/')[-3]+'/IMG/'+batch_sample[i].split('/')[-1]\n \t\t\timage = cv2.imread(file_name)\n \t\t\timages.append(image) # \n\n \t\tangle = float(batch_sample[3]) #steering angle is the fourth element in the input file\n \t\tangles.append(angle)\n \t\tangles.append(angle+angle_correction) #for right angle correction\n \t\tangles.append(angle-angle_correction) #for left angle correction\n\n \t############## Section 3: Augmenting the data to add balance and regularization to the learning\n \taugmented_images = []\n \taugmented_angles = []\n\n \tfor image,angle in zip(images, angles) : \n \t\taugmented_images.append(image) \n \t\taugmented_angles.append(angle)\n\n \t\taugmented_images.append(augment_brightness_camera_images(image) ) #brightness augmentation\n \t\taugmented_angles.append(angle)\n\n \t\taugmented_images.append(add_random_shadow(image)) #add random shadow\n \t\taugmented_angles.append(angle)\n\n\n \t\tflipped_image = cv2.flip(image,1) # Generated new data here\n \t\tflipped_angle = float(angle) * -1.0 #numpy array converts automatically to string\n \t\taugmented_images.append(flipped_image) #### Included the new data\n \t\taugmented_angles.append(flipped_angle) #### Included the new data to the training data set\n\n \t\taugmented_images.append(augment_brightness_camera_images(flipped_image) ) #brightness augmentation\n \t\taugmented_angles.append(flipped_angle)\n\n\n \tX_train = np.array(augmented_images)\n \ty_train = np.array(augmented_angles)\n\n\n \t#print(\"image shape\",np.array(images).shape)\n \t#print(\"augmented image shape\",np.array(augmented_images).shape)\n \t#print(\"X_train shape\",X_train[-1].shape)\n \tyield sklearn.utils.shuffle(X_train, y_train) #pass the iterator for containing the shuffled input data", "def toy_preprocess_scans(self, scan_ids, width, height, depth, clipping=True, loop=False,\n seed=42, shuffle=False):\n # Initialize image transformer\n kwds_generator = {'rotation_range': 5,\n 'width_shift_range': 0.03,\n 'height_shift_range': 0.03,\n 'zoom_range': 0.03,\n 'data_format': \"channels_first\", # z axis is first\n }\n image_gen = image_prep.ImageDataGenerator(**kwds_generator)\n\n scan_gen = self.preprocess_scans(scan_ids, width, height, depth, clipping, loop, shuffle)\n for ct_scan, origin, spacing in scan_gen:\n yield ct_scan, origin, spacing\n transformed_scan = image_gen.random_transform(ct_scan, seed=seed)\n if seed is not None:\n seed += 1\n yield transformed_scan, origin, spacing", "def generate_random_patches(filenames, size, seed=0, per_image=1):\n from copy import copy\n import itertools as itr\n\n filenames = copy(filenames)\n randgen = np.random.RandomState(seed)\n randgen.shuffle(filenames)\n failures = 0\n for fn in itr.cycle(filenames):\n img = asgray(load_image(fn))\n\n for l in range(per_image):\n # Random position\n x_to = img.shape[0]-size[0]+1\n y_to = img.shape[1]-size[1]+1\n\n if x_to >= 1 and y_to >= 1:\n x = randgen.randint(x_to) \n y = randgen.randint(y_to)\n yield img[x:x+size[0], y:y+size[1]]\n \n failures = 0\n else:\n failures += 1\n\n # The images are too small, let's stop iterating\n if failures >= 30:\n return", "def slide_crop(img, img_meta, model_cfg, save_dir):\r\n ori_shape = img_meta[0]['ori_shape']\r\n stem = Path(img_meta[0]['ori_filename']).stem\r\n save_dir = Path(save_dir)\r\n save_dir.mkdir(parents=True, exist_ok=True)\r\n\r\n h_stride, w_stride = model_cfg.test_cfg.stride\r\n h_crop, w_crop = model_cfg.test_cfg.crop_size\r\n batch_size, _, h_img, w_img = img.size()\r\n h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1\r\n w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1\r\n\r\n cnt = 0\r\n for h_idx in range(h_grids):\r\n for w_idx in range(w_grids):\r\n y1 = h_idx * h_stride\r\n x1 = w_idx * w_stride\r\n y2 = min(y1 + h_crop, h_img)\r\n x2 = min(x1 + w_crop, w_img)\r\n y1 = max(y2 - h_crop, 0)\r\n x1 = max(x2 - w_crop, 0)\r\n crop_img = img[:, :, y1:y2, x1:x2]\r\n \r\n cnt += 1\r\n msg1 = f'{x1},{y1},{x2},{y2}'\r\n msg2 = f'{ori_shape[0]},{ori_shape[1]},{ori_shape[2]}'\r\n msg3 = f'{batch_size},{h_img},{w_img}'\r\n bin_path = save_dir/f'{stem}-{\"-\".join([msg1, msg2, msg3])}.bin'\r\n crop_img.numpy().astype(np.float32).tofile(bin_path)\r\n\r\n return cnt", "def sample_image(generator, n_row, batches_done):\r\n # Sample noise\r\n z = Variable(float_tensor(np.random.normal(0, 1, (n_row ** 2, args.latent_dim))))\r\n labels = np.array([num for _ in range(n_row) for num in range(n_row)])\r\n labels = Variable(long_tensor(labels))\r\n gen_imgs = generator(z, labels)\r\n save_image(gen_imgs.data, \"images/%d.png\" % batches_done, nrow=n_row, normalize=True)", "def generator_fit(x, y, batch_size=128):\n while True:\n indices = np.random.randint(x.shape[0], size=batch_size)\n yield x[indices], y[indices]", "def build_generator(self):\n noise_shape = (self.dimensions_noise,)\n\n # This block of code can be a little daunting, but essentially it automatically calculates the required starting\n # array size that will be correctly upscaled to our desired image size.\n #\n # We have 5 Upsample2D layers which each double the images width and height, so we can determine the starting\n # x size by taking (x / 2^upsample_count) So for our target image size, 256x192, we do the following:\n # x = (192 / 2^5), y = (256 / 2^5) [x and y are reversed within the model]\n # We also need a 3rd dimension which is chosen relatively arbitrarily, in this case it's 64.\n model = Sequential()\n model.add(\n Dense(\n self.starting_filters\n * (self.img_size[0] // (2 ** self.upsample_layers))\n * (self.img_size[1] // (2 ** self.upsample_layers)),\n activation=\"relu\",\n input_shape=noise_shape,\n )\n )\n model.add(\n Reshape(\n (\n (self.img_size[0] // (2 ** self.upsample_layers)),\n (self.img_size[1] // (2 ** self.upsample_layers)),\n self.starting_filters,\n )\n )\n )\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 6x8 -> 12x16\n model.add(Conv2D(1024, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 12x16 -> 24x32\n model.add(Conv2D(512, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 24x32 -> 48x64\n model.add(Conv2D(256, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 48x64 -> 96x128\n model.add(Conv2D(128, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 96x128 -> 192x256\n model.add(Conv2D(64, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(Conv2D(32, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(Conv2D(self.channels, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"tanh\"))\n\n model.summary()\n\n noise = Input(shape=noise_shape)\n img = model(noise)\n\n return Model(noise, img)", "def cube_plus_crop(image, model_input_image_size, seed=None):\n vol_size = image.get_shape().as_list()\n # crop_locations = [1050, 2050]\n\n # Concat volume and label into a single volume for cropping\n comb_size = image.get_shape().as_list()\n crop_size = [comb_size[0]] + model_input_image_size + [comb_size[-1]]\n crop_size = [comb_size[0]] + model_input_image_size + [comb_size[-1]]\n with ops.name_scope(\n 'color_crop', 'random_crop', [image, crop_size]) as name:\n combined_volume = ops.convert_to_tensor(image, name='value')\n crop_size = ops.convert_to_tensor(\n crop_size, dtype=dtypes.int32, name='size')\n vol_shape = array_ops.shape(combined_volume)\n control_flow_ops.Assert(\n math_ops.reduce_all(vol_shape >= crop_size),\n ['Need vol_shape >= vol_size, got ', vol_shape, crop_size],\n summarize=1000)\n limit = vol_shape - crop_size + 1\n offset = tf.random_uniform(\n array_ops.shape(vol_shape),\n dtype=crop_size.dtype,\n maxval=crop_size.dtype.max,\n seed=seed) % limit\n # offset_2 = tf.random_uniform(\n # array_ops.shape(vol_shape),\n # dtype=crop_size.dtype,\n # maxval=crop_size.dtype.max,\n # seed=seed) % limit\n\n cropped_combined = array_ops.slice(\n combined_volume, offset, crop_size, name=name)\n cropped_volume = cropped_combined[:, :, :, :vol_size[-1]]\n cropped_label = cropped_combined[:, :, :, vol_size[-1]:]\n return cropped_volume, cropped_label", "def batch_generator(x, y, batch_size, augment_func):\n start = 0\n indices = np.arange(x.shape[0])\n while True:\n end = min(start + batch_size, x.shape[0])\n x_btch, y_btch = x[start:end], y[start:end]\n if x_btch.shape[0] < batch_size:\n np.random.shuffle(indices)\n x = x[indices]\n y = y[indices]\n x_btch = np.concatenate([x_btch, x[0:batch_size - x_btch.shape[0]]])\n y_btch = np.concatenate([y_btch, y[0:batch_size - y_btch.shape[0]]])\n x_augm, y_augm = [], []\n for x_b, y_b in zip(x_btch, y_btch):\n x_cur, y_cur = augment_func(x_b.copy(), y_b.copy())\n x_augm += [x_cur]\n y_augm += [y_cur]\n start += batch_size\n start %= x.shape[0]\n yield np.array(x_augm), np.array(y_augm)", "def get_batches_fn(batch_size):\n id_road = 7\n id_lane = 6\n id_car = 10\n\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n # Get corresponding label img path\n gt_image_file = image_file.replace('CameraRGB', 'CameraSeg')\n # Read rgb and label images\n img_in = scipy.misc.imread(image_file, mode='RGB')\n gt_in = scipy.misc.imread(gt_image_file)\n # Crop sky part of the image\n image = img_in[-out_shape[0]:, :]\n gt_image = gt_in[-out_shape[0]:, :, 0]\n # Obtain labels\n gt_road = ((gt_image == id_road) | (gt_image == id_lane))\n gt_car = (gt_image == id_car)\n gt_car[-105:, :] = False\n gt_bg = np.invert(gt_car | gt_road)\n # Augmentation\n if bool(random.getrandbits(1)):\n image, gt_bg, gt_car, gt_road = flip_img(\n image, gt_bg, gt_car, gt_road)\n\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_car = gt_car.reshape(*gt_car.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n\n gt_image = np.concatenate((gt_bg, gt_car, gt_road), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)", "def generate_samples(data, root_path, batch_size=128):\n while True:\n # Generate random batch of indices\n indices = np.random.permutation(data.count()[0])\n\n for batch in range(0, len(indices), batch_size):\n batch_i = indices[batch:(batch + batch_size)]\n\n x = np.empty([0, img_h, img_w, img_c], dtype=np.float32)\n y = np.empty([0], dtype=np.float32)\n\n x, y = augment_data(x, y, data, root_path, batch_i)\n x, y = flip_images(x, y)\n\n yield (x, y)", "def image_generator(img_list):\n while True:\n img = random.choice(img_list)\n label = os.path.basename(os.path.dirname(img)) # add label function according to the dataset tree\n img = preprocess_image(img)\n yield img, label", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in xrange(batch_size)], [\n fake_label for _ in xrange(batch_size)\n ]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def prepare_batch(self, iterator):\n elements = []\n\n for label, album_ids in iterator:\n for album_id in album_ids:\n image_path = os.path.join(self.image_folder, album_id)\n # If path doesn't exist, continue\n if not os.path.exists(image_path):\n continue\n images = [os.path.join(image_path, img_name)\n for img_name in sorted(os.listdir(image_path))]\n # If no photo available, continue\n if len(images) == 0:\n continue\n\n elements.append((label, images))\n\n random.shuffle(elements)\n\n return sorted(elements, key=lambda p: len(p[1]), reverse=True)", "def train_val_generators(batch_size, src_path, folder, save_prefix,\n input_aug_dict=None,\n color_mode=\"grayscale\",\n save_to_dir=None,\n target_size=(256, 256),\n validation_split=0.2,\n seed=1):\n\n aug_dict = {'rescale': 1. / 255} # always rescale the images to\n # the model\n if input_aug_dict is not None:\n aug_dict.update(input_aug_dict)\n\n datagen = ImageDataGenerator(**aug_dict,\n validation_split=validation_split)\n\n train_gen = datagen.flow_from_directory(\n src_path,\n classes=[folder],\n class_mode=None,\n color_mode=color_mode,\n target_size=target_size,\n batch_size=batch_size,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n seed=seed,\n subset='training')\n\n val_gen = datagen.flow_from_directory(\n src_path,\n classes=[folder],\n class_mode=None,\n color_mode=color_mode,\n target_size=target_size,\n batch_size=batch_size,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n seed=seed,\n subset='validation')\n\n return train_gen, val_gen", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in xrange(batch_size)], [\n fake_label for _ in xrange(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def generate_batch(self, batch_size, rand=None, *args, **kwargs):\n return [\n self.generate_datasets(rand, *args, **kwargs) for _ in range(batch_size)\n ]", "def generator(file_name, batch_size):\n from sklearn.utils import shuffle\n while 1: # Loop forever so the generator never terminates\n chunk_iter = pd.read_csv(file_name, chunksize=batch_size)\n for chunk in chunk_iter:\n images = []\n angles = []\n for row in chunk.itertuples():\n img = cv2.imread(row.center)\n ang = float(row.angle)\n # if 'r' flag is true flip the image\n if row.r: \n img = np.fliplr(img)\n ang = -ang\n images.append(img)\n angles.append(ang)\n \n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)", "def buildGenerator():\n inputs = tf.keras.layers.Input(shape=[256,256,3])\n\n down_stack = [\n downsample(64, 4, (None, 256, 256, 3), apply_batchnorm=False), # (bs, 128, 128, 64)\n downsample(128, 4, (None, 128, 128, 64)), # (bs, 64, 64, 128)\n downsample(256, 4, (None, 64, 64, 128)), # (bs, 32, 32, 256)\n downsample(512, 4, (None, 32, 32, 256)), # (bs, 16, 16, 512)\n downsample(512, 4, (None, 16, 16, 512)), # (bs, 8, 8, 512)\n downsample(512, 4, (None, 8, 8, 512)), # (bs, 4, 4, 512)\n downsample(512, 4, (None, 4, 4, 512)), # (bs, 2, 2, 512)\n downsample(512, 4, (None, 2, 2, 512)), # (bs, 1, 1, 512)\n ]\n\n up_stack = [\n upsample(512, 4, (None, 1, 1, 512), apply_dropout=True), # (bs, 2, 2, 1024)\n upsample(512, 4, (None, 2, 2, 1024), apply_dropout=True), # (bs, 4, 4, 1024)\n upsample(512, 4, (None, 4, 4, 1024), apply_dropout=True), # (bs, 8, 8, 1024)\n upsample(512, 4, (None, 8, 8, 1024)), # (bs, 16, 16, 1024)\n upsample(256, 4, (None, 16, 16, 1024)), # (bs, 32, 32, 512)\n upsample(128, 4, (None, 32, 32, 512)), # (bs, 64, 64, 256)\n upsample(64, 4, (None, 64, 64, 256)), # (bs, 128, 128, 128)\n ]\n\n initializer = tf.random_normal_initializer(0., 0.02)\n last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, 4,\n strides=2,\n padding='same',\n kernel_initializer=initializer,\n activation='tanh') # (bs, 256, 256, 3)\n\n x = inputs\n\n skips = []\n for down in down_stack:\n x = down(x)\n skips.append(x)\n\n skips = reversed(skips[:-1])\n\n for up, skip in zip(up_stack, skips):\n x = up(x)\n x = tf.keras.layers.Concatenate()([x, skip])\n\n x = last(x)\n\n return tf.keras.Model(inputs=inputs, outputs=x)", "def get_data_generator(train_data, validation_data):\n\n def batch_generator(mode=\"train\", batch_size=100):\n assert mode in [\"train\", \"val\"], \"The mode should be in {train, val}.\"\n if mode == \"train\":\n data = train_data.copy()\n elif mode == \"val\":\n data = validation_data.copy()\n\n while True:\n indices = np.random.permutation(np.arange(len(data)))\n data = data[indices]\n\n for i in range(len(data) // batch_size):\n yield data[i * batch_size:(i + 1) * batch_size]\n\n return batch_generator", "def _get_train_generator(self):\n while(True):\n random.shuffle(self.train)\n for data_element in self.train:\n if self.debug: \n print(\"training on: {}\".format(data_element))\n\n image, heatmap = self._generate_input_tuple(data_element)\n\n if self.debug: \n print(\"yields: {}\".format(data_element))\n\n yield (image, heatmap)" ]
[ "0.75824314", "0.6719259", "0.63412005", "0.6340442", "0.6336577", "0.6271357", "0.62369394", "0.61656475", "0.6124723", "0.61019236", "0.6051693", "0.6036598", "0.59852403", "0.5972355", "0.596388", "0.5952421", "0.5929974", "0.59115386", "0.5901182", "0.5863039", "0.5820809", "0.5819543", "0.5813546", "0.5812051", "0.5808443", "0.580115", "0.57827634", "0.5773646", "0.57719606", "0.5764916", "0.57606", "0.57267755", "0.57190984", "0.5718862", "0.5692184", "0.5685223", "0.56766033", "0.56742924", "0.56736547", "0.5664635", "0.5647028", "0.56393987", "0.5625895", "0.56184846", "0.56076527", "0.5602962", "0.5579732", "0.5574615", "0.5574577", "0.5571107", "0.55596113", "0.5558685", "0.5556196", "0.55547565", "0.55448174", "0.55443794", "0.5541989", "0.554036", "0.55400735", "0.55297935", "0.5522855", "0.55209255", "0.5520111", "0.55200607", "0.551606", "0.5515235", "0.55106395", "0.55033845", "0.54930025", "0.54914045", "0.5490563", "0.548908", "0.54814315", "0.5479738", "0.5479122", "0.5475063", "0.5474924", "0.54692316", "0.5466831", "0.5466146", "0.5460494", "0.54550177", "0.54528666", "0.54441476", "0.54438883", "0.5433421", "0.5427226", "0.54272133", "0.5415924", "0.5400566", "0.5399611", "0.5398303", "0.5396745", "0.53959835", "0.53950524", "0.53939134", "0.53924996", "0.538282", "0.53799903", "0.5377695" ]
0.74894965
1
To be used in conjunction with loss.binary_xentropy_with_sigmoid
def sigmoid_with_binary_xentropy(z): return sigmoid(z)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sigmoid(x):\r\n #pred_x = (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))\r\n pred_x = 1.0 / (1.0 + np.exp(-x))\r\n return pred_x\r\n pass", "def test_sigmoid_cross_entropy(self):\n loss_op = pointwise_losses.SigmoidCrossEntropy()\n\n y_pred = loss_op.final_activation_op({\n \"logits\": self.logits,\n \"metadata\": {\n \"mask\": self.mask\n }\n })\n assert np.isclose(y_pred[0][0].numpy(), 0.54905695, atol=1e-5)\n assert np.isclose(y_pred[2][4].numpy(), 0., atol=1e-5)\n\n loss = loss_op({\"mask\": self.mask}, self.y_true, y_pred)\n assert np.isclose(loss, 0.6905699, atol=1e-5)", "def _sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid_activation(x):\n return 1.0 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + exp(-x))", "def sigmoid(x):\n return 1 / (1 + exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\r\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1/(1+np.exp(-1*x))", "def sigmoid(x):\n return 1 / (1 * np.exp(-x))", "def sigmoid(x):\n return 1. / (1. + np.exp(-x))", "def sigmoid(x):\n return 1.0/(1 + np.exp(-x))", "def sigmoid(X):\n return 1 / (1 + np.exp(-X))", "def sigmoid(X):\n return 1 / (1 + np.exp(-X))", "def sigmoid(x):\n return 1.0 / (1.0 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + math.exp(-x))", "def sigmoid(x):\n return 1.0/(1.0+exp(-x))", "def sigmoid(x):\n\treturn 1 / (1 + m.exp(-x))", "def act_sigmoid_scaled(x):\n return tf.nn.sigmoid(x) * tf.math.log(max_sales) * 1.2", "def sigmoid(x):\r\n\r\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + math.exp(-x))", "def sigmoid(x):\n\n return 1 / (1 + math.exp(-x))", "def sigmoid(x):\n\n return 1 / (1 + math.exp(-x))", "def sigmoid(self, x):\n self.x = x\n output = 1 / (1 + np.exp(-x))\n return output", "def sigmoid(x):\n return 1/(1 + math.exp(-x))", "def sigmoid(x):\n return 1 / (1 + (e**(-x))) #changed the '-' to a '+' because it didnt work otherwise\n #return 1 / (1 + math.exp(-x))", "def sigmoid(X):\n g = 1/(1 + np.exp(-X))\n return g", "def activation_sigmoid_custom(self):\n self.value = 1 / (1 + np.e ** (-4.9 * self.value))", "def _sigmoid(self, x):\n return 1. / (1. + np.exp(-x)) # the `.` ensures that `1` is a float", "def sigmoid(x):\n\n s = 1 / (1 + np.exp(-x))\n\n return s", "def sigmoid(x):\n s = 1 / (1 + np.exp(-x))\n return s", "def sigmoid(self, x):\n\n #logging.debug(\"sigmoid received %s as input\" % (x))\n return 1.0 / ( 1.0 + np.exp(-x) )", "def sigmoid(x):\n return 1 / (1 - (power(e,-x)))", "def binary_crossentropy(y_pred, y_true):\n with tf.name_scope(\"BinaryCrossentropy\"):\n return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(y_pred,\n y_true))", "def binary_crossentropy(predictions, targets):\n return theano.tensor.nnet.binary_crossentropy(predictions, targets)", "def sigmoid(self, x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(self, x):\n return 1 / (1 + np.exp(-x))", "def hard_sigmoid(x):\n x = (0.2 * x) + 0.5\n x = F.threshold(-x, -1, -1)\n x = F.threshold(-x, 0, 0)\n return x", "def sigmoid(self, x):\n y = 1/(1 + np.exp((-x)))\n return y \n raise NotImplementedError(\"Sigmoid not implemented\")", "def binary_crossentropy(output, target):\r\n return -(target * tensor.log(output) + (1.0 - target) * tensor.log(1.0 - output))", "def hard_sigmoid(x):\r\n slope = 0.2\r\n shift = 0.5\r\n x = (x * slope) + shift\r\n x = tensor.clip(x, 0, 1)\r\n return x", "def sigmoid(self, x):\r\n self.x = x\r\n return 1 / (1 + np.exp(-x))", "def sigmoid(x: np.ndarray \n ) -> np.ndarray:\n return 1/(1+np.exp(-x))", "def sigmoid(x):\n\treturn 1.0/(1.0+math.exp(-(x-0.5)*12.0))", "def scaled_sigmoid(self, x):\r\n return (tf.keras.backend.sigmoid(x) * 30 - 5)", "def test_activation_sigmoid(self):\r\n senal = 1\r\n result = activation.soft_sigmoid(senal)\r\n self.assertEqual(result, 0.6963549298238342)", "def sigmoid(self, x):\n return 1 / (1 + np.exp(-4.9 * x))", "def _sigmoid(x):\n e=0\n try:\n e = _exp(-1*x)\n except OverflowError:\n return 0\n return 1./(1.+e)", "def sigmoid(x, exponent):\n \n return 1/(1+np.exp(-exponent*x))-0.5", "def _tanh_to_sigmoid(x):\n return x * 0.5 + 0.5", "def sigmoid_cross_entropy(y, label):\r\n losses = - np.log(y + g_epsilon) * label - np.log(1.0 - y + g_epsilon) * (1.0 - label)\r\n return losses", "def _sigmoid(x, b, w):\n return np.minimum(np.maximum(1.0 / (1.0 + np.exp(-b - np.sum(w * x, axis=1))), 1.0e-12), 1 - 1.0e-12)", "def binary_cross_entropy(y_true, y_pred, eps=1e-15):\n assert y_true.shape == y_pred.shape\n y_pred = np.clip(y_pred, eps, 1 - eps) # Avoid log(0)\n return - np.mean(\n y_true * np.log(y_pred) + \n (1 - y_true) * (np.log(1 - y_pred))\n )", "def sigmoid_update_hid(self,x):\n \n sigmoid_activation = T.reshape(self.bhid, [self.num_hidden,1]) +\\\n T.dot(T.transpose(self.W),x)\n \n return T.nnet.sigmoid(sigmoid_activation)", "def activation_sigmoid(self):\n self.value = 1 / (1 + np.e ** (-self.value))", "def sigmoid_(self, x):\n\t\tif x.size == 0 or x is None:\n\t\t\treturn None\n\t\treturn 1 / (1 + np.exp(-x))", "def swish(x):\n return x * tf.sigmoid(x)", "def sigmoid_xent(logits, targets):\n # reshape them into one huge batch\n logits = tf.concat(0, logits)\n targets = tf.concat(0, targets)\n\n return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n logits, targets))", "def sigmoid_cross_entropy(x, t, use_cudnn=True, normalize=True):\n return SigmoidCrossEntropy(use_cudnn, normalize)(x, t)", "def sigmoid(x):\n pos_mask = (x >= 0)\n neg_mask = (x < 0)\n z = np.zeros_like(x)\n z[pos_mask] = np.exp(-x[pos_mask])\n z[neg_mask] = np.exp(x[neg_mask])\n top = np.ones_like(x)\n top[neg_mask] = z[neg_mask]\n return top / (1 + z)", "def inverse_sigmoid(x):\n y = -1 * np.log((1-x)/x)\n return y", "def _sigmoid_loss(logits, targets):\n\n loss_comp = tf.nn.sigmoid_cross_entropy_with_logits(logits, targets)\n\n return tf.reduce_mean(loss_comp)", "def binary_cross_entropy(y_true, y_preds):\n return np.sum(y_true * np.log(y_preds) + (1 - y_true) * np.log(1 - y_preds))", "def sigmoid(inX):\n if inX < 0:\n return 1 - 1 / (1 + exp(inX))\n else:\n return 1 / (1 + exp(-inX))", "def sigmoid(self, X):\n\n return 1.0/(1.0+np.exp(-X))", "def sigmoid(X):\n if isinstance(X,(list,tuple)):\n X=np.array(X)\n return 1/(1+np.exp(-X))\n #return np.exp(X)/(1+np.exp(X))", "def basic_sigmoid(x):\r\n\r\n ### START CODE HERE ### (≈ 1 line of code)\r\n # math.exp(x) -> e ^ x ,e 的 x 次方\r\n s = 1.0 / (1 + 1/ math.exp(x))\r\n ### END CODE HERE ###\r\n\r\n return s", "def sigmoid(x):\n S = np.ones((np.size(x),))\n for i in range(len(x)):\n S[i] = 1/(1+np.exp(-x[i]))\n return S", "def sigmoid_cross_entropy(inputs, reduction='valid', **kwargs):\n args = ArgHelper.parse(locals())\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.SigmoidCrossEntropy\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(reduction=args['reduction']) \\\n .apply(inputs)\n else:\n return op_lib.blend(**args)", "def sigmoid(X,W,b):\n preActivation = np.dot(X, W) + b\n return (1.0)/(1.0 + np.exp(-preActivation))", "def xentropy_loss(self, logits, labels):\n labels = tf.cast(labels, tf.int32)\n logits = tf.reshape(logits, [tf.shape(logits)[0], -1, self.num_classes])\n labels = tf.reshape(labels, [tf.shape(labels)[0], -1])\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=labels, name=\"loss\")\n\n return loss", "def sigmoid(x):\n return amath.exp(amath.minimum(0, x)) / (1 + amath.exp(-abs(x)))", "def asigmoid(x):\n return -log(1 / x - 1)", "def get_sigmoid(x): \n output = np.zeros(x.shape)\n ind1 = (x >= 0)\n ind2 = (x < 0)\n output[ind1] = 1 / (1 + np.exp(-x[ind1]))\n output[ind2] = np.divide(np.exp(x[ind2]), (1 + np.exp(x[ind2])))\n\n return output", "def der_sigmoid(y):\n return y * (1 - y)", "def _sigmoid_m(self, X):\n result = np.zeros((X.shape[0], X.shape[1]), dtype='float32')\n for i in range(X.shape[0]):\n for j in range(X.shape[1]):\n result[i, j] = 1.0 / (1.0 + np.exp(-X[i, j]))\n return result", "def _bce_loss_with_logits(output, labels, **kwargs):\n return F.binary_cross_entropy_with_logits(output, labels, reduction='none', **kwargs)", "def binary_cross_entropy(true, pred, epsilon=1e-7):\n pred = ivy.clip(pred, epsilon, 1-epsilon)\n # noinspection PyTypeChecker\n return -(ivy.log(pred) * true + ivy.log(1 - pred) * (1 - true))", "def gan_bce_loss(x, as_real: bool):\n if as_real:\n return F.binary_cross_entropy_with_logits(x, torch.ones_like(x))\n else:\n return F.binary_cross_entropy_with_logits(x, torch.zeros_like(x))", "def sigmoid(z):\n \n return 1 / (1 + np.exp(-z))#your code here", "def sigmoid(x,shift=0,mult=1):\n return 1 / (1 + math.exp(-(x+shift)*mult))", "def swish(x):\n return x * k.activations.sigmoid(x)", "def class_balanced_sigmoid_cross_entropy(this,logits, label, name='cross_entropy_loss'):\n with tf.name_scope('class_balanced_sigmoid_cross_entropy'):\n y = tf.cast(label, tf.float32)\n\n count_neg = tf.reduce_sum(1. - y)\n count_pos = tf.reduce_sum(y)\n beta = count_neg / ((count_neg + count_pos)+this.EPS)\n\n pos_weight = beta / ((1 - beta)+this.EPS)\n cost = tf.nn.weighted_cross_entropy_with_logits(logits=logits, targets=y, pos_weight=pos_weight)\n cost = tf.reduce_mean(cost * (1 - beta))\n zero = tf.equal(count_pos, 0.0)\n return tf.where(zero, 0.0, cost, name=name)", "def class_balanced_sigmoid_cross_entropy(this,logits, label, name='cross_entropy_loss'):\n with tf.name_scope('class_balanced_sigmoid_cross_entropy'):\n y = tf.cast(label, tf.float32)\n\n count_neg = tf.reduce_sum(1. - y)\n count_pos = tf.reduce_sum(y)\n beta = count_neg / ((count_neg + count_pos)+this.EPS)\n\n pos_weight = beta / ((1 - beta)+this.EPS)\n cost = tf.nn.weighted_cross_entropy_with_logits(logits=logits, targets=y, pos_weight=pos_weight)\n cost = tf.reduce_mean(cost * (1 - beta))\n zero = tf.equal(count_pos, 0.0)\n return tf.where(zero, 0.0, cost, name=name)", "def sigmoid(value):\n return 1.0 / (1.0 + math.exp(-value))", "def weighted_sigmoid_binary_crossentropy(output, target, weight=1.):\n l = (1. + (weight - 1. ) * target)\n loss = (1. - target ) * output + l * ( T.log(1. + T.exp( - T.abs_(output)))\n + T.maximum(-output, 0))\n return loss", "def nt_xent_loss(y_true, y_pred):\n [x,v] = tf.unstack(y_pred, num=2)\n x = tf.math.l2_normalize(x, -1)\n v = tf.math.l2_normalize(v, -1)\n\n batch_size = tf.shape(x)[0]\n masks = tf.one_hot(tf.range(batch_size), batch_size)\n labels = tf.one_hot(tf.range(batch_size), batch_size * 2)\n\n logits_x_x = tf.matmul(x, x, transpose_b=True) / 0.1\n logits_x_x = logits_x_x - masks * 1e9\n\n logits_v_v = tf.matmul(v, v, transpose_b=True) / 0.1\n logits_v_v = logits_v_v - masks * 1e9\n\n logits_x_v = tf.matmul(x, v, transpose_b=True) / 0.1\n logits_v_x = tf.matmul(v, x, transpose_b=True) / 0.1\n\n loss_x = tf.nn.softmax_cross_entropy_with_logits(\n labels, tf.concat([logits_x_v, logits_x_x], 1))\n loss_v = tf.nn.softmax_cross_entropy_with_logits(\n labels, tf.concat([logits_v_x, logits_v_v], 1))\n\n loss = tf.reduce_mean(loss_x + loss_v)\n\n return loss", "def my_loss(y_pred,y_true,n_outputs):\n y_true = tf.one_hot(tf.cast(y_true,tf.int64), n_outputs, dtype=tf.float32)\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_true,y_pred))", "def sigmoid_forward(x):\n\n out = 1/(1+np.exp(-x))\n\n cache = x\n return out, cache", "def ragan_bce_loss(x, y, x_real_than_y: bool=True):\n if x_real_than_y:\n return F.binary_cross_entropy_with_logits(x - y.mean(), torch.ones_like(x)) + F.binary_cross_entropy_with_logits(y - x.mean(), torch.zeros_like(y))\n else:\n return F.binary_cross_entropy_with_logits(y - x.mean(), torch.ones_like(x)) + F.binary_cross_entropy_with_logits(x - y.mean(), torch.zeros_like(y))", "def binary_cross_entropy(input: Tensor, target: Tensor) -> Tensor:\n assert input.shape == target.shape, 'input and target have different shape!'\n assert len(input.shape) == 2, 'binary cross entropy only used in 2 dim matrix'\n assert input.shape[1] == 1, 'binary shape[1] should be 1'\n loss = target * log(input) + (1 - target) * log(1 - input)\n return -sum(loss, 0) / input.shape[0]", "def test_sigmoid_activation(self):\n self.assertEqual([0.5, 0.5], list(\n af.Sigmoid().output(np.array([0, 0]))))\n self.assertEqual([0.25, 0.25], list(\n af.Sigmoid().derivative(np.array([0, 0]))))", "def true_y_exp(x):\n y = torch.exp(x)#torch.sigmoid(x * 5) * 2 #exp(x)\n return y", "def binary_crossentropy(predictions, targets):\n predictions, targets = align_targets(predictions, targets)\n return theano.tensor.nnet.binary_crossentropy(predictions, targets)", "def loss(params: hk.Params, batch, label) -> jnp.ndarray:\r\n logits = net.apply(params, batch)\r\n labels = jax.nn.one_hot(label, n_classes)\r\n\r\n # Cross Entropy Loss\r\n softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))\r\n softmax_xent /= labels.shape[0]\r\n return softmax_xent" ]
[ "0.7591269", "0.75822216", "0.7455182", "0.74292374", "0.74232256", "0.74232256", "0.74083984", "0.74083984", "0.74083984", "0.74083984", "0.74083984", "0.74083984", "0.7393072", "0.73908216", "0.73857445", "0.73740834", "0.73655087", "0.73640925", "0.73640925", "0.73588455", "0.7335531", "0.73263943", "0.7313945", "0.7307331", "0.7304229", "0.7286178", "0.727832", "0.727832", "0.7261115", "0.72601986", "0.72254735", "0.7200133", "0.7199614", "0.7197506", "0.7162571", "0.7161017", "0.7142385", "0.71373093", "0.71281344", "0.7112181", "0.7108232", "0.7108232", "0.71017575", "0.71000236", "0.70985824", "0.7065315", "0.7064948", "0.70622176", "0.7035548", "0.70354646", "0.70274645", "0.70274276", "0.7009568", "0.7009022", "0.6991364", "0.69849396", "0.6982271", "0.6952069", "0.69472224", "0.6945532", "0.6937353", "0.6928716", "0.6928018", "0.692571", "0.69123226", "0.6911012", "0.690941", "0.6902809", "0.69013697", "0.68974537", "0.68940806", "0.6860475", "0.6857558", "0.6847488", "0.68251467", "0.6822909", "0.6814333", "0.68094724", "0.6800262", "0.67975825", "0.6784847", "0.6775884", "0.675487", "0.67412376", "0.67207944", "0.6719816", "0.67164385", "0.67059773", "0.67059773", "0.6696735", "0.6686515", "0.6674201", "0.66711605", "0.6632197", "0.6630386", "0.66280526", "0.6598259", "0.65980166", "0.6589962", "0.658665" ]
0.8279197
0
To be used in conjunction with loss.xentropy_with_softmax
def softmax_with_xentropy(z): return softmax(z)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loss(params: hk.Params, batch, label) -> jnp.ndarray:\r\n logits = net.apply(params, batch)\r\n labels = jax.nn.one_hot(label, n_classes)\r\n\r\n # Cross Entropy Loss\r\n softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))\r\n softmax_xent /= labels.shape[0]\r\n return softmax_xent", "def test_softmax_cross_entropy(self):\n loss_op = listwise_losses.SoftmaxCrossEntropy()\n\n y_pred = loss_op.final_activation_op({\n \"logits\": self.logits,\n \"metadata\": {\n \"mask\": self.mask\n }\n })\n assert np.isclose(y_pred[0][0].numpy(), 0.19868991, atol=1e-5)\n assert np.isclose(y_pred[2][4].numpy(), 0.0, atol=1e-5)\n\n loss = loss_op({\"mask\": self.mask}, self.y_true, y_pred)\n assert np.isclose(loss, 1.306335, atol=1e-5)", "def xentropy_loss(self, logits, labels):\n labels = tf.cast(labels, tf.int32)\n logits = tf.reshape(logits, [tf.shape(logits)[0], -1, self.num_classes])\n labels = tf.reshape(labels, [tf.shape(labels)[0], -1])\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=labels, name=\"loss\")\n\n return loss", "def my_softmax_cross_entropy(preds, labels):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)\n # loss = tf.nn.weighted_cross_entropy_with_logits(logits=preds, targets=labels, pos_weight=0.1)\n return tf.reduce_mean(loss)", "def softmax_cross_entropy(logit, onehot, axis=-1):\n return SoftmaxCrossEntropy(axis).forward(logit, onehot)", "def my_loss(y_pred,y_true,n_outputs):\n y_true = tf.one_hot(tf.cast(y_true,tf.int64), n_outputs, dtype=tf.float32)\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_true,y_pred))", "def _softmax(x):\n e = K.exp(x - K.max(x, axis=-1, keepdims=True))\n s = K.sum(e, axis=-1, keepdims=True)\n return e / s", "def loss(logits, labels):\n labels = tf.to_int64(labels)\n# labels = tf.to_float(labels)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='xentropy')\n# y_conv = tf.nn.softmax(logits)\n# cross_entropy = -tf.reduce_sum(labels*tf.log(y_conv))\n return tf.reduce_mean(cross_entropy, name='xentropy_mean')", "def softmax_cross_entropy(y, label):\r\n losses = np.sum((- np.log(y + g_epsilon) * label), axis=1)\r\n return losses\r\n pass", "def soft_cross_entropy(input, targets):\n student_likelihood = torch.nn.functional.log_softmax(input, dim=-1)\n targets_prob = torch.nn.functional.softmax(targets, dim=-1)\n return (- targets_prob * student_likelihood).sum(dim=-1).mean()", "def nt_xent_loss(y_true, y_pred):\n [x,v] = tf.unstack(y_pred, num=2)\n x = tf.math.l2_normalize(x, -1)\n v = tf.math.l2_normalize(v, -1)\n\n batch_size = tf.shape(x)[0]\n masks = tf.one_hot(tf.range(batch_size), batch_size)\n labels = tf.one_hot(tf.range(batch_size), batch_size * 2)\n\n logits_x_x = tf.matmul(x, x, transpose_b=True) / 0.1\n logits_x_x = logits_x_x - masks * 1e9\n\n logits_v_v = tf.matmul(v, v, transpose_b=True) / 0.1\n logits_v_v = logits_v_v - masks * 1e9\n\n logits_x_v = tf.matmul(x, v, transpose_b=True) / 0.1\n logits_v_x = tf.matmul(v, x, transpose_b=True) / 0.1\n\n loss_x = tf.nn.softmax_cross_entropy_with_logits(\n labels, tf.concat([logits_x_v, logits_x_x], 1))\n loss_v = tf.nn.softmax_cross_entropy_with_logits(\n labels, tf.concat([logits_v_x, logits_v_v], 1))\n\n loss = tf.reduce_mean(loss_x + loss_v)\n\n return loss", "def softmax_cross_entropy_loss(self, y, y_hat):\n batch_size = y.shape[0]\n return -(y - y_hat) / batch_size", "def softmax_cross_entropy_loss(logit, labels):\n p = softmax(logit)\n loss_i = - labels * np.log(p + 1e-8)\n return np.mean(loss_i)", "def cross_entropy_loss():\n return nn.CrossEntropyLoss()", "def test_aux_softmax_cross_entropy(self):\n loss_op = listwise_losses.AuxiliarySoftmaxCrossEntropy()\n\n y_pred = loss_op.final_activation_op({\n \"logits\": self.logits,\n \"metadata\": {\n \"mask\": self.mask\n }\n })\n\n assert np.isclose(y_pred[0][0].numpy(), 0.19868991, atol=1e-5)\n assert np.isclose(y_pred[2][4].numpy(), 0.0, atol=1e-5)\n\n loss = loss_op({\"mask\": self.mask}, self.y_true_aux, y_pred)\n assert np.isclose(loss, 0.88127804, atol=1e-5)", "def _softmax(self,x):\n e_x = np.exp(x - np.max(x))\n return np.nan_to_num(e_x / np.nan_to_num(e_x.sum(axis=0)))", "def softmax_loss(x, y):\n probs = np.exp(x - np.max(x, axis=1, keepdims=True))\n probs /= np.sum(probs, axis=1, keepdims=True)\n N = x.shape[0]\n loss = -np.sum(np.log(probs[np.arange(N), y])) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx", "def convert_softmax_with_cross_entropy(g, op, block):\n\n logits = g.get_node(op.input(\"Logits\")[0])\n labels = g.get_node(op.input(\"Label\")[0])\n ignore_index = op.attr(\"ignore_index\")\n axis = op.attr(\"axis\")\n if axis < 0:\n axis = len(infer_shape(logits)) + axis\n\n softmax = _op.nn.softmax(logits, axis=axis)\n\n g.add_node(op.output(\"Softmax\")[0], softmax)\n\n softmax = _op.log(softmax)\n soft_label = op.attr(\"soft_label\")\n if soft_label:\n loss = _op.sum(-labels * softmax, axis=axis)\n else:\n labels_one = _op.one_hot(\n labels,\n on_value=_expr.const(1.0, dtype=\"float32\"),\n off_value=_expr.const(0.0, dtype=\"float32\"),\n depth=infer_shape(logits)[axis],\n axis=axis + 1,\n dtype=\"float32\",\n )\n labels_one = _op.squeeze(labels_one, axis=axis)\n loss = _op.sum(-labels_one * softmax, axis=axis)\n loss = _op.expand_dims(loss, axis=axis)\n if ignore_index != -100: # noly when soft_label is False\n assert not soft_label, \"soft_label and ignore_index cannot be set at the same time.\"\n ignore_mask = _op.not_equal(labels, _expr.const(ignore_index, dtype=\"int64\"))\n ignore_mask = _op.cast(ignore_mask, \"float32\")\n loss = _op.multiply(loss, ignore_mask)\n\n g.add_node(op.output(\"Loss\")[0], loss)", "def softmax_loss1(x, y):\n # tmp = np.max(x, axis=1, keepdims=True)\n shifted_logits = x - np.max(x, axis=1, keepdims=True)\n Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)\n log_probs = shifted_logits - np.log(Z)\n probs = np.exp(log_probs)\n N = x.shape[0]\n # tmp2 = np.arange(N)\n tmp3 = log_probs[np.arange(N), y]\n # tmp4 = log_probs[[0,1,2],[2,5,0]]\n loss = -np.sum(log_probs[np.arange(N), y]) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx", "def loss_fn(y_true,y_pred): \n loss = tf.nn.softmax_cross_entropy_with_logits_v2(y_true,\n y_pred,\n axis=-1,\n )\n loss = tf.reduce_mean(loss,name=\"loss\")\n return loss", "def entropy(self, **kwargs) -> TensorType:", "def entropy(self, **kwargs) -> TensorType:", "def loss(output, y):\n #Computes softmax cross entropy between logits and labels.\n xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y)\n loss = tf.reduce_mean(xentropy)\n\n return loss", "def cross_entropy(y_observed, p):\n\n pass", "def _softmax(self, x):\n return np.exp(x - np.max(x)) / np.sum(np.exp(x - np.max(x)))", "def softmax_with_cross_entropy(predictions, target_index):\n # TODO: Copy from the previous assignment\n shape = predictions.shape\n probs = softmax(predictions)\n if probs.ndim == 1:\n probs = probs[np.newaxis, :]\n loss = cross_entropy_loss(probs, target_index)\n dprediction = probs.copy()\n dprediction[np.arange(probs.shape[0]), target_index] -= 1\n # Градиент делим на batch_size, так как при численном вычислении усредняем дельту по одной координате\n # Тогда как при аналитическом надо учесть это здесь\n return loss, np.resize(dprediction, shape)/probs.shape[0]", "def softmax_loss(x, y):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n N=x.shape[0]\n\n \n x-=np.max(x,axis=1,keepdims=True)\n temp=np.exp(x)\n dr_vec=np.sum(temp,axis=1,keepdims=True)\n\n nr=(x[np.arange(N),y]).reshape([N,1])\n loss=np.sum(-(nr)+np.log(dr_vec))\n \n loss=(loss/N)\n temp/=dr_vec\n temp[np.arange(N),y] -= 1\n \n dx = temp/N\n \n return loss, dx", "def softmax_categorical_crossentropy(y_pred, y_true):\n with tf.name_scope(\"SoftmaxCrossentropy\"):\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_pred,\n y_true))", "def softmax_cross_entropy(y, y_hat):\n loss = cross_entropy(y, softmax(y_hat))\n\n filter_ = ~tf.math.is_finite(loss)\n replace_ = tf.zeros_like(loss)\n\n return tf.where(filter_, replace_, loss)", "def softmax(x):\r\n output = np.exp(x)\r\n return output / np.sum(output, axis=1, keepdims=True)", "def softmax_loss(x, y):\n ############################################################################\n # TODO: You can use the previous softmax loss function here. # \n # Hint: Be careful on overflow problem #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n N = len(x)\n # We want to get the real y\n log_C = -np.max(x,axis=1,keepdims = True)\n # Get numerator\n e_all = np.exp(x+log_C)\n # Get the final prob\n prob = e_all/e_all.sum(axis=1,keepdims=True)\n # Find final loss\n loss = np.sum(-np.log(prob)[np.arange(N),y])/N\n # Get dx\n dx = prob\n dx[np.arange(N),y] -= 1\n dx /= N\n \n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return loss, dx", "def loss_sce(y_pred, y_true):\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred)\n\n return tf.reduce_mean(loss)", "def loss(self, X, y):\n\n # Initialize the loss to zero.\n loss = 0.0\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n exp_a = np.zeros((num_classes,num_train))\n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the normalized softmax loss. Store it as the variable loss.\n # (That is, calculate the sum of the losses of all the training \n # set margins, and then normalize the loss by the number of \n # training examples.)\n # ================================================================ #\n \n \n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n\n #p[:,i] = exp_a[:,i]/np.sum(exp_a[:,i]) # p now is a valid probability matrix\n #print(p[:,i])\n\n loss += Loss \n #print(Loss,i) \n \n pass\n loss /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss", "def crossentropy_to_crossentropy_with_softmax(fgraph):\r\n\r\n def search_make_one_sub():\r\n for node in fgraph.toposort():\r\n if node.op == crossentropy_categorical_1hot:\r\n nll, = node.outputs\r\n sm, one_of_n = node.inputs\r\n if sm.owner and sm.owner.op == softmax:\r\n x, = sm.owner.inputs\r\n new_nll, new_sm, new_am = crossentropy_softmax_argmax_1hot_with_bias(x,\r\n tensor.zeros_like(x[0]), one_of_n)\r\n fgraph.replace_all_validate([(nll, new_nll), (sm, new_sm)],\r\n reason=\"crossentropy_to_crossentropy_with_softmax\")\r\n return True\r\n if sm.owner and sm.owner.op == softmax_with_bias:\r\n x, b = sm.owner.inputs\r\n new_nll, new_sm, new_am = crossentropy_softmax_argmax_1hot_with_bias(x, b,\r\n one_of_n)\r\n fgraph.replace_all_validate([(nll, new_nll), (sm, new_sm)],\r\n reason=\"crossentropy_to_crossentropy_with_softmax\")\r\n return True\r\n\r\n return False\r\n\r\n while search_make_one_sub():\r\n pass\r\n return", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True)", "def cross_entropy(X, y, using_onehot=True):\n\tM = y.shape[0]\n\tif using_onehot :\n\t\tlog_likelihood = -np.log(np.max(X * y, -1))\n\telse:\n\t\tlog_likelihood = -np.log(X[range(M), y]) # 找到y对应的那个类别所对应的logit\n\tloss = np.sum(log_likelihood) / M\n\treturn loss", "def Weighted_Cross_Entropy(y_true, y_pred, eps = 1e-10):\n y_pred = tf.cast(y_pred, 'float64')\n y_true = tf.cast(y_true, 'float64')\n # deduce weights based on true pixel value\n class_weights = weights * y_true\n # compute your (unweighted) softmax cross entropy loss\n unweighted_losses = y_true*tf.math.log(y_pred + eps)\n ##print(unweighted_losses.dtype, weights.dtype)\n weighted_losses = unweighted_losses * class_weights\n # reduce the result to get your final loss\n loss = -tf.reduce_sum(weighted_losses)\n return loss", "def softmax_cross_entropy(inputs, axis=1, reduction='mean', **kwargs):\n args = ArgHelper.parse(locals())\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.SoftmaxCrossEntropy\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(\n axis=axis,\n reduction=args['reduction'],\n ).apply(inputs)\n else:\n return op_lib.blend(**args)", "def compute_entropy_loss(logits):\n policy = F.softmax(logits, dim=-1)\n log_policy = F.log_softmax(logits, dim=-1)\n return torch.sum(policy * log_policy)", "def softmax(x):\r\n sum_c = np.sum(np.exp(x), axis=1)\r\n sum_c = np.expand_dims(sum_c, axis=1)\r\n pred_x = np.divide(np.exp(x), sum_c)\r\n return pred_x", "def log_softmax(x: jnp.DeviceArray, *, axis: int = 0) -> jnp.DeviceArray:\n return x - jnp.expand_dims(jnp.log(jnp.sum(jnp.exp(x), axis=axis)), axis)", "def soft_cross_entropy_tinybert(input, targets):\n student_likelihood = torch.nn.functional.log_softmax(input, dim=-1)\n targets_prob = torch.nn.functional.softmax(targets, dim=-1)\n return (- targets_prob * student_likelihood).mean()", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True)", "def __softmax2(self, x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()", "def softmax_loss(x, y):\n def softmax(x):\n exps = np.exp(x)\n return exps / np.sum(exps, axis=1)[:,None]\n\n N = y.shape[0]\n p = softmax(x)\n log_likelihood = -np.log(p[range(N),y])\n loss = np.sum(log_likelihood) / N\n\n dx = p.copy()\n dx[range(N),y] -= 1\n dx = dx/N\n\n return loss, dx", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_train = X.shape[0]\n num_classe = W.shape[1]\n loss = 0.0\n\n for i in range(num_train): #pour chaque image de l'ensemble d'entrainement\n scores = X[i].dot(W)\n scores -= max(scores)\n\n correct_class_score = scores[y[i]] #y[i]=c\n e_syi = np.exp(correct_class_score)\n e_sj = np.sum(np.exp(scores))\n\n loss -= np.log(e_syi/e_sj)\n\n for k in range(num_classe): #pour chaque classe\n dW[:, k] += ((np.exp(scores[k])/e_sj) - (k == y[i])) * X[i].T\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n dW/= num_train\n\n # Add regularization to the loss.\n loss += reg * np.sum(W * W)\n dW += 2 * reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def cross_entropy(x, y):\n\n if len(y.shape) == 1:\n return F.cross_entropy(x, y)\n if y.shape[1] == 1:\n y = y.squeeze(1)\n return F.cross_entropy(x, y)\n\n return torch.mean(\n torch.div(\n F.binary_cross_entropy_with_logits(x, y, reduction=\"none\"),\n torch.sum(y, dim=1),\n )\n )", "def stablesoftmax(x):\r\n shiftx = x - np.max(x)\r\n exps = np.exp(shiftx)\r\n return exps / np.sum(exps)", "def binary_crossentropy(output, target):\r\n return -(target * tensor.log(output) + (1.0 - target) * tensor.log(1.0 - output))", "def softmax_loss(x, y):\n shifted_logits = x - np.max(x, axis=1, keepdims=True)\n Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)\n log_probs = shifted_logits - np.log(Z)\n probs = np.exp(log_probs)\n N = x.shape[0]\n loss = -np.sum(log_probs[np.arange(N), y]) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx", "def softmax_loss(x, y):\n\n eps = 1e-5\n \n N,C = x.shape\n p = softmax(x)\n llikelihood = -np.log(p[range(N),y] + eps)\n# print(llikelihood)\n loss = np.sum(llikelihood) / N\n\n dx = p\n dx[range(N),y] -= 1\n dx = dx/N\n \n return loss, dx", "def softmax_loss(x, y):\n loss, dx = None, None\n ###########################################################################\n # TODO: Implement the loss and gradient for softmax classification. This #\n # will be similar to the softmax loss vectorized implementation in #\n # cs231n/classifiers/softmax.py. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n num_train = x.shape[0]\n\n x = np.exp(x)\n temp_sum = np.sum(x, axis = 1, keepdims = True)\n x = x / temp_sum\n softmax_result = x\n trans_y = np.zeros((x.shape[0],x.shape[1]))\n trans_y[np.arange(x.shape[0]), y] += 1\n x = - np.log(x)\n x = x * trans_y\n x_sum = np.sum(x)\n loss = x_sum / num_train\n loss = loss + \n\n dx = softmax_result - trans_y\n dx = dx / num_train\n\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return loss, dx", "def cross_entropy(input: Tensor, target: Tensor) -> Tensor:\n norm_log = log_softmax(input, 1)\n\n np_one_hot = np.eye(input.shape[1])[target.data]\n tensor_one_hot = tensor(np_one_hot, 'one-hot', False, True)\n\n mask = -norm_log * tensor_one_hot\n mask_sum = sum(mask, 1)\n loss = sum(mask_sum, 0)\n\n return loss / input.shape[0]", "def softmax_loss(x, y):\n N, C = x.shape\n loss, dx = 0, np.zeros(x.shape) \n for i in range(N):\n loss += -np.log(np.exp(x[i,y[i]])/np.sum(np.exp(x[i,:])))\n dx[i,:] = np.exp(x[i,:])/np.sum(np.exp(x[i,:]))\n dx[i,y[i]] += (-1)\n \n loss /= N\n dx /= N\n return loss, dx", "def softmax_loss(x, y):\n # softmax\n num = np.exp(x)\n den = np.sum(num, axis=1)\n softmax = num/den[:, None]\n N = x.shape[0]\n\n # compute the los per class\n loss = softmax[np.arange(N), y]\n loss = -np.log(loss)\n\n # sum all the losses and divide by number of class\n # Also add the regularization loss term\n loss = np.sum(loss)/N \n \n dscores = softmax\n dscores[np.arange(N), y] -= 1\n dscores /= N\n\n return loss, dscores", "def conditional_entropy(self) -> float:\n pass", "def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))", "def softmax(y):\n# y = y.squeeze()\n epsilon = 0.001\n y = y.detach().numpy()\n y[y > 400] = 400 # For stability to prevent overflow\n denominator = epsilon + sum(np.exp(y)) # Further stability to prevent overflow\n numerator = np.exp(y)\n softmax = numerator / denominator\n return torch.Tensor(softmax)", "def crossentropy_loss(y_true, y_pred):\n ce = tf.keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True) \n return ce", "def cross_entropy(y, y_hat):\n return -tf.math.log(\n tf.gather_nd(y_hat, tf.reshape(y, (-1, 1)), batch_dims=1)\n )", "def cross_entropy(input1, target, size_average=True):\n logsoftmax = nn.LogSoftmax(dim=0)\n return torch.sum(-target * logsoftmax(input1))\n # if size_average:\n # return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))\n # else:\n # return torch.sum(torch.sum(-target * logsoftmax(input), dim=1))", "def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):\n raise NotImplementedError()", "def softmax_cross_entropy(logits, target, weight=None, reduce=True):\n target = target.long()\n # Convert 5d input to 4d, because it is faster in functional.cross_entropy\n if logits.dim() == 5:\n logits = compress_3d_to_2d(logits)\n target = compress_3d_to_2d(target)\n\n return nn.functional.cross_entropy(logits, target, weight=weight, reduce=reduce)", "def grad_softmax_cross_entropy_loss(logit, labels):\n return softmax(logit) - labels", "def cross_entropy(t,y):\r\n #print(-1*t*np.log(y))\r\n #print(np.shape(np.log(y)))\r\n #print(np.shape(t))\r\n return t*np.log(y)*(-1)", "def entropy_loss(policy_logits):\n\n softmax = nn.Softmax(dim=-1)\n logsoftmax = nn.LogSoftmax(dim=-1)\n\n policy = softmax(policy_logits.action_type)\n log_policy = logsoftmax(policy_logits.action_type)\n\n return torch.mean(torch.sum(-policy * log_policy, axis=-1))", "def my_softmax(x):\n x = x - np.max(x)\n exp_x = np.exp(x)\n softmax_x = exp_x / np.sum(exp_x)\n return softmax_x", "def loss_softmax_cross_entropy(self, unet, predict, ground_truth):\n\n loss = -F.mean(F.log(predict+1e-16) * ground_truth)\n\n chainer.report({\"loss\":loss}, unet)#mistery\n return loss", "def softmax(x):\r\n e_x = np.exp(x - np.expand_dims(np.max(x, axis=-1), axis=-1))\r\n return e_x / np.expand_dims(e_x.sum(axis=-1), axis=-1) # only difference\r", "def loss(logits, labels):\r\n labels = tf.to_int64(labels)\r\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\r\n labels=labels, logits=logits, name='xentropy')\r\n return tf.reduce_mean(cross_entropy, name='xentropy_mean')", "def loss(logits, labels):\r\n labels = tf.to_int64(labels)\r\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\r\n labels=labels, logits=logits, name='xentropy')\r\n return tf.reduce_mean(cross_entropy, name='xentropy_mean')", "def demo_np_vs_tf_softmax():\n X = sample_images(9, 10)\n W = np.random.randn(9, 4) * 0.0001\n y = np.array([0, 1, 1, 1, 3, 2, 0, 2, 0, 3])\n\n print(\"np softmax:\\n{}\".format(\"\\n-----\\n\".join(['{}'.format(el) for el in np_softmax_loss(W, X, y, 0.00001)])))\n print(\"\\n\\ntf softmax:\\n{}\".format(\"\\n-----\\n\".join(['{}'.format(el) for el in tf_softmax_loss(W, X, y, 0.00001)])))", "def loss(logits, labels):\n labels = tf.to_int64(labels)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits, labels, name='xentropy')\n return tf.reduce_mean(cross_entropy, name='xentropy_mean')", "def binary_crossentropy(predictions, targets):\n return theano.tensor.nnet.binary_crossentropy(predictions, targets)", "def softmax(x):\n #pass # TODO: Compute and return softmax(x)\n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def softmax(inputs):\n return np.exp(inputs) / float(sum(np.exp(inputs)))", "def softmax(inputs):\n return np.exp(inputs) / float(sum(np.exp(inputs)))", "def build_nt_loss(self, n_logits, n_target):\n n_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=n_logits, labels=n_target)\n n_loss = tf.reduce_mean(n_loss)\n return n_loss", "def softmax(x): \n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def binary_cross_entropy(y_true, y_preds):\n return np.sum(y_true * np.log(y_preds) + (1 - y_true) * np.log(1 - y_preds))", "def softmax_crossentropy_with_logits(logits,reference_answers):\r\n logits_for_answers = logits[np.arange(len(logits)),reference_answers]\r\n \r\n xentropy = - logits_for_answers + np.log(np.sum(np.exp(logits),axis=-1))\r\n \r\n return xentropy", "def act_sigmoid_scaled(x):\n return tf.nn.sigmoid(x) * tf.math.log(max_sales) * 1.2", "def softmax(x):\n \"\"\"\"\"\"\n return exp(x) / sum(exp(x), axis=0)", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n num_train = X.shape[0]\n # print(\"num_train:\", num_train)\n num_classes = W.shape[1]\n # print(\"num_classes:\", num_classes)\n \n for i in range(num_train):\n scores = X[i].dot(W) # scores is 1 * C\n correct_class = y[i]\n \n # LOSS DUE TO TRAINING SAMPLE = -log(exp^correct_score / sum(exp^all_other_scores))\n log_c = np.max(scores)\n scores -= log_c\n correct_class_score = scores[correct_class]\n exp_scores = np.exp(scores)\n sum_exp_scores = np.sum(np.exp(scores))\n proportion = np.exp(correct_class_score) / sum_exp_scores\n loss -= np.log(proportion)\n # print(proportion)\n \n # ALTERNATIVELY: (we split the log)\n# loss -= scores[y[i]]\n# loss += np.log(np.sum(np.exp(X[i].dot(W))))\n \n # UPDATE GRADIENT\n for j in range(num_classes):\n p = np.exp(scores[j]) / sum_exp_scores # \"probability\" of class j\n dW[:,j] += (p - (j == y[i])) * X[i,:]\n # dW is D by C\n\n loss /= num_train\n loss += reg * np.sum(W * W) \n dW /= num_train\n dW += reg * 2 * W\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n \n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\r\n # Initialize the loss and gradient to zero.\r\n loss = 0.0\r\n dW = np.zeros_like(W)\r\n num_train = X.shape[1] # d*n\r\n num_class = W.shape[0]\r\n\r\n #############################################################################\r\n # Compute the softmax loss and its gradient using explicit loops. #\r\n # Store the loss in loss and the gradient in dW. If you are not careful #\r\n # here, it is easy to run into numeric instability. Don't forget the #\r\n # regularization! #\r\n #############################################################################\r\n loss = 0.0\r\n for i in range(num_train):\r\n X_i = X[:,i] # D*1\r\n score_i = W.dot(X_i)\r\n score_i -= np.max(score_i) #C*1 but keepdims = false so it becomes 1*C\r\n exp_score_i = np.exp(score_i)\r\n probs_i = exp_score_i/np.sum(exp_score_i) #1*C\r\n correct_logprobs_i = -np.log(probs_i[y[i]])\r\n loss += correct_logprobs_i\r\n \r\n dscore_i = probs_i.reshape(num_class,-1)#c*1\r\n dscore_i[y[i]] -= 1 #C*1\r\n X_i = X_i.reshape(1,-1)# 1*D\r\n dW += dscore_i.dot(X_i)\r\n \r\n loss /= num_train\r\n loss += 0.5*reg*np.sum(W*W)\r\n\r\n dW /= num_train\r\n dW += reg*W\r\n \r\n return loss, dW", "def softmax(x):\n x_exp = np.exp(x)\n x_sum = np.sum(x_exp, axis=1, keepdims=True)\n s = x_exp / x_sum\n \n return s", "def generatorLoss(fakeOutput):\n return cross_entropy(tf.ones_like(fakeOutput), fakeOutput)", "def binary_cross_entropy(y_true, y_pred, eps=1e-15):\n assert y_true.shape == y_pred.shape\n y_pred = np.clip(y_pred, eps, 1 - eps) # Avoid log(0)\n return - np.mean(\n y_true * np.log(y_pred) + \n (1 - y_true) * (np.log(1 - y_pred))\n )", "def _bce_loss_with_logits(output, labels, **kwargs):\n return F.binary_cross_entropy_with_logits(output, labels, reduction='none', **kwargs)", "def calculate_cross_entropy(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def softmax(self, x):\n e_x = np.exp(x)\n return e_x / e_x.sum(axis=1, keepdims=True) # only difference", "def softmax(inputs):\n probs = np.exp(inputs)\n # print(probs.shape)\n # t = np.sum(probs, axis=0)\n # print(t.shape)\n\n probs /= np.sum(probs, axis=0)[np.newaxis,:]\n return probs", "def softmax(x):\r\n exps = np.exp(x)\r\n return exps / np.sum(exps)", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_classes = W.shape[1]\n #print('num_classes = ', num_classes)\n num_train = X.shape[0]\n #print('num_train = ', num_train)\n \n min_score = 0.0\n shifted_scores = np.zeros(W.shape[1])\n #max_score = np.zeros(W.shape[1])\n max_score = 0.0\n \n loss_array = np.zeros(y.shape[0])\n for i in range(num_train):\n scores = X[i].dot(W)\n #print('scores dimensions = ', scores.shape)\n #print('scores = ', scores)\n #print('i =', i, 'y = ', y[i])\n min_score = np.min(scores)\n max_score = np.max(scores)\n #print(min_score,max_score)\n shifted_scores = np.multiply(-1,scores + abs(min_score))\n #print(scores)\n #print(shifted_scores)\n exp_scores = np.exp(shifted_scores)\n norm = np.amax(exp_scores)\n norm_scores = np.divide(exp_scores,norm)\n loss_array[i] = np.multiply(-1,np.log(norm_scores[y[i]]/(np.sum(norm_scores)-norm_scores[y[i]])))\n #print(loss_array)\n for j in range(num_classes): \n\t\n if j == y[i]: \n dW[:,j] = np.multiply(norm_scores[y[i]],1-norm_scores[y[i]])\n else:\n dW[:,j] = np.multiply(-1,np.multiply(norm_scores[y[i]],norm_scores[y[j]]))\n\t\t\t\n\t\t\t\n loss = np.amax(loss_array)\n\n # Add regularization to the loss.\n loss = 0.5 * reg * np.sum(W * W) + loss\n \n \n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def test_softmax_base():\n x = np.array([[[2.0, 3.0, 4.0, 5.0],\n [3.0, 4.0, 5.0, 6.0],\n [7.0, 8.0, 8.0, 9.0]],\n [[1.0, 2.0, 3.0, 4.0],\n [5.0, 6.0, 7.0, 8.0],\n [6.0, 7.0, 8.0, 9.0]]])\n res = np.array([[[0.0320586, 0.08714432, 0.23688282, 0.64391426],\n [0.0320586, 0.08714432, 0.23688282, 0.64391426],\n [0.07232949, 0.19661193, 0.19661193, 0.53444665]],\n [[0.0320586, 0.08714432, 0.23688282, 0.64391426],\n [0.0320586, 0.08714432, 0.23688282, 0.64391426],\n [0.0320586, 0.08714432, 0.23688282, 0.64391426]]])\n obj.run(res=res, input=x)", "def loss_function(self, targets, outputs):\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=outputs)\n return tf.reduce_mean(cross_entropy)", "def cross_entropy(y_pred,y):\n \n epsilon = 0.001 # To prevent overflow and ensure numerical stability\n return sum(-y*np.log(y_pred+epsilon))", "def delta_cross_entropy_softmax(outputs, labels):\n \n m = labels.shape[0]\n grad = outputs\n \n grad[range(m),labels] -= torch.tensor(1.)\n\n grad = grad/m\n avg_grads = grad\n return avg_grads", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=0)" ]
[ "0.726201", "0.7222704", "0.7183105", "0.7172407", "0.7157092", "0.7130719", "0.7100268", "0.7088628", "0.70864207", "0.7078267", "0.70732236", "0.7006741", "0.7004395", "0.7000175", "0.69368124", "0.6921162", "0.69102913", "0.6891329", "0.6883931", "0.68837404", "0.68784714", "0.68784714", "0.68772256", "0.6862228", "0.6850462", "0.6829537", "0.68249416", "0.6822797", "0.6813928", "0.68125826", "0.6797588", "0.67964303", "0.6788515", "0.67829084", "0.6778467", "0.6768865", "0.6755015", "0.6750141", "0.6748647", "0.67397153", "0.67353505", "0.6727793", "0.67226416", "0.67187256", "0.67171866", "0.6706728", "0.6706647", "0.67020166", "0.6700532", "0.66961914", "0.6690315", "0.66867614", "0.66867113", "0.6679618", "0.66660595", "0.66611004", "0.665854", "0.664369", "0.6640769", "0.6636659", "0.6634923", "0.6632009", "0.6631533", "0.66243595", "0.6620343", "0.6610776", "0.6595661", "0.6591082", "0.65821695", "0.6580638", "0.6580638", "0.65687835", "0.65592915", "0.6557368", "0.655555", "0.6550128", "0.6550128", "0.6548678", "0.65462214", "0.6543173", "0.6533971", "0.6533826", "0.65207255", "0.65158105", "0.6513062", "0.65093243", "0.6498474", "0.6483018", "0.64823914", "0.6480208", "0.64752626", "0.64730364", "0.6472526", "0.6467811", "0.6462044", "0.6461839", "0.64601296", "0.6455038", "0.645216", "0.645216" ]
0.7691633
0
Reshapes 2D arrays to 1D
def resh(x): a = x.shape[0] b = x.shape[1] return x.reshape(a*b, 1), a, b
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def numpyReshape(array):\n return np.array(array, dtype = float).reshape(1, len(array))", "def flatten_numpy(ndarray):\n return np.reshape(ndarray, (-1,), 'F')", "def unstack(a, axis=0):\n shape = a.shape\n return [jnp.squeeze(b, axis=axis) for b in \\\n jnp.split(a, shape[axis], axis=axis)]", "def reshape(arr):\r\n reshape_arr = np.empty((3,240,320),dtype='float32')\r\n reshape_arr[0,:,:] = arr[:,:,0]\r\n reshape_arr[1,:,:] = arr[:,:,1]\r\n reshape_arr[2,:,:] = arr[:,:,2]\r\n return reshape_arr", "def flattenImage(input_array):\r\n shp = np.size(input_array)\r\n return np.reshape(input_array, (shp,))", "def make_2d(x):\n return x.reshape((1, len(x)))", "def flatten_layers(data):\n return data.reshape((data.shape[0], data.shape[1], -1))", "def flat_to_2d(data, det_width):\n return data.reshape((data.shape[0], data.shape[1], det_width, det_width))", "def flatten(x):\n return reshape(x, (x.shape[0], -1))", "def reformat(dataset):\n x = dataset[:, 1] \n x = np.stack(x) # reshape to (n, mel bands, timesteps)\n x = np.expand_dims(np.moveaxis(x, 1, -1), axis=3) # reformat x to (n, timesteps, mel bands, 1) \n y = dataset[:, 2] \n y = np.moveaxis(np.stack(y), 1, -1) # reformat y to (n, timesteps, 8)\n return x, y", "def flatten(self, arr):\n shape = arr.shape\n return arr.reshape(shape[0] * shape[1], *shape[2:])", "def _reshape_channels(x):\n assert x.dim() == 4\n batch_size, nc, h, w = x.size()\n x_t = x.view(batch_size, nc, -1).transpose(1, 2).contiguous()\n x_t = x_t.view(batch_size, h, w, nc)\n return x_t", "def _reshape(self, arr: np.ndarray) -> np.ndarray:\n return arr.reshape(self.TileHeight.value, self.TileWidth.value, self.bands,)", "def reshape_to_batch(array):\n if len(array.shape) == 2:\n array = numpy.expand_dims(array, axis=2)\n array = numpy.expand_dims(array, axis=0)\n return array", "def reshape(data):\n return K.reshape(x=data, shape=(K.shape(data)[0], 1, reshape_size))", "def reshape(x, shape):\n return Reshape(shape)(x)", "def data_reshape(image):\n image_mat = []\n if image.shape[-1] == 3:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j[0], j[1], j[2]])\n else:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j])\n return np.array(image_mat)", "def flatten_data(X):\n\n return X.reshape((-1, X.shape[-1]))", "def to_2d_array(self):\n return reshape_fns.to_2d(self._obj, raw=True)", "def Reshape(xdata, ydata, zdata):\r\n N = zdata.shape[0]\r\n Nx = list(ydata).count(ydata[0])\r\n Ny = N/Nx\r\n zz = np.copy(zdata)\r\n zz.shape = (Ny,Nx)\r\n xx = xdata[:Nx]\r\n yy = np.zeros(Ny)\r\n for u in range(Ny):\r\n yy[u] = ydata[Nx*u]\r\n return xx,yy,zz", "def flatten_image(x):\n *batch_shape, h, w, c = x.shape\n return x.reshape((*batch_shape, h * w * c))", "def to_1d_array(self):\n return reshape_fns.to_1d(self._obj, raw=True)", "def reshape_data(X, y):\n\n X_reshaped = X.reshape(-1, X.shape[-1])\n y_reshaped = y.reshape(-1)\n\n return X_reshaped, y_reshaped", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def output_reshape(ct):\n return np.moveaxis(ct, 1, -1)", "def __check_2d_and_reshape(X):\n if len(X.shape) == 1:\n X = np.reshape(X, (-1, X.shape[0]))\n return X", "def _reshape(self, data):\n\n\t\td = np.zeros((32,32,3))\n\t\td_r = data[0:1024].reshape(32,32)\n\t\td_g = data[1024:2048].reshape(32,32)\n\t\td_b = data[2048:].reshape(32,32)\n\n\t\tfor h in range(32):\n\t\t for w in range(32):\n\t\t for c in range(3):\n\n\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\tarray = np.array(d, dtype=np.uint8)\n\t\timg = Image.fromarray(array)\n\t\ttemp = img.resize(size = (64,64))\n\t\td = image.img_to_array(temp)\n\n\t\t#plt.imshow(d)\n\t\t#plt.show()\n\t\treturn d", "def expand_as(array, array_as):\r\n\r\n shape = list(array.shape)\r\n for i in range(len(array.shape), len(array_as.shape)):\r\n shape.append(1)\r\n\r\n return array.reshape(shape)", "def flatten_stimulus(stimulus):\n n, h, w = stimulus.shape\n return stimulus.reshape((n, h * w))", "def conver1D(array):\n l = array.shape\n total = np.zeros((0, l[1] * l[2]), dtype=np.float32)\n i = 0\n for i in range(24):\n tempData = array[i]\n array1D = []\n for x in tempData:\n for s in x:\n array1D.append(s)\n total = np.insert(total, i, array1D, axis=0)\n return total", "def flatten(a, start=0, count=2):\n s = a.shape\n return np.reshape(a, s[:start] + (-1,) + s[start+count:])", "def reshape(self, bottom, top):\n top[0].reshape(1)\n #top[2].reshape(1)", "def _reshape(self, data):\n\n\t\t\td = np.zeros((32,32,3))\n\t\t\td_r = data[0:1024].reshape(32,32)\n\t\t\td_g = data[1024:2048].reshape(32,32)\n\t\t\td_b = data[2048:].reshape(32,32)\n\n\t\t\tfor h in range(32):\n\t\t\t for w in range(32):\n\t\t\t for c in range(3):\n\n\t\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\t\tarray = np.array(d, dtype=np.uint8)\n\t\t\timg = Image.fromarray(array)\n\t\t\ttemp = img.resize(size = (64,64))\n\t\t\td = image.img_to_array(temp)\n\n\t\t\t#plt.imshow(d)\n\t\t\t#plt.show()\n\t\t\treturn d", "def _reshape_like(F, x, y):\n return x.reshape(y.shape) if F is ndarray else F.reshape_like(x, y)", "def reshape(input):\n\n input = input / 255\n input = trans.resize(input, (args.size, args.size))\n input = np.reshape(input, input.shape + (1,))\n input = np.reshape(input, (1,) + input.shape)\n return input", "def blockshaped(arr, nrows, ncols):\r\n\t h, w = arr.shape\r\n\t return (arr.reshape(h//nrows, nrows, -1, ncols)\r\n\t .swapaxes(1,2)\r\n\t .reshape(-1, nrows, ncols))", "def _data_reshape(self, data):\n data_offset = [int(size / 2) for size in data.shape[1:]]\n data_diff = [int(size / 2) for size in self.shape]\n data_diff_min = data_diff\n data_diff_max = []\n for i, elem in enumerate(data_diff):\n if self.shape[i] % 2 == 0:\n data_diff_max.append(elem)\n else:\n data_diff_max.append(elem + 1)\n data = data[:, (data_offset[0] - data_diff_min[0]):(data_offset[0] + data_diff_max[0]),\n (data_offset[1] - data_diff_min[1]):(data_offset[1] + data_diff_max[1]),\n (data_offset[2] - data_diff_min[2]):(data_offset[2] + data_diff_max[2])]\n\n if data.shape[1] == 1:\n data = data.reshape(data.shape[0], data.shape[2], data.shape[3])\n return data", "def reshape(self, *shape):\n return F.Reshape.apply(self, shape)", "def img_reshape(self, input_img):\n _img = np.transpose(input_img, (1, 2, 0)) \n _img = np.flipud(_img)\n _img = np.reshape(_img, (1, img_dim[0], img_dim[1], img_dim[2]))\n return _img", "def reshape_var(var):\n dims = np.shape(var)\n nx = dims[0]\n ny = dims[1]\n nz = dims[2]\n\n var_2d = var.reshape(nx * ny, nz)\n return var_2d", "def reshape(x, shape):\n if x.shape == shape:\n return chainer.as_variable(x)\n y, = Reshape(shape).apply((x,))\n return y", "def split(array, nrows, ncols):\r\n r, h = array.shape\r\n return (array.reshape(h//nrows, nrows, -1, ncols)\r\n .swapaxes(1, 2)\r\n .reshape(-1, nrows, ncols))", "def reshape(x, shape):\n return float(x) if shape is None else jnp.reshape(x, shape)", "def flatten(X):\n N = X.shape[-1]\n flat = np.zeros((N, 3072))\n for idx, i in enumerate(range(N)):\n # if not idx:\n # print(X[:,:,:,i].reshape(3072))\n flat[i] = X[:,:,:,i].reshape(3072)\n return flat", "def promote_shapes(*args):\n if len(args) < 2:\n return args\n else:\n shapes = [jnp.shape(arg) for arg in args]\n batch_shape = lax.broadcast_shapes(*shapes)\n num_dims = len(batch_shape)\n return [\n jnp.reshape(arg, (1,) * (num_dims - len(s)) + s)\n if len(s) < num_dims\n else arg\n for arg, s in zip(args, shapes)\n ]", "def _flatten(params):\n params, _ = tree_flatten(params)\n return jnp.concatenate([jnp.reshape(param, [-1]) for param in params])", "def _asarray1d(arr, copy=False):\n if copy:\n return asarray(arr).flatten()\n else:\n return asarray(arr).ravel()", "def _reshape_output(self, output):\n output = np.transpose(output, [0, 2, 3, 1])\n _, height, width, _ = output.shape\n dim1, dim2 = height, width\n dim3 = 3\n # There are CATEGORY_NUM=80 object categories:\n dim4 = (4 + 1 + CATEGORY_NUM)\n return np.reshape(output, (dim1, dim2, dim3, dim4))", "def flatten(x):\n all_dims_exc_first = np.prod([v.value for v in x.get_shape()[1:]])\n o = tf.reshape(x, [-1, all_dims_exc_first])\n return o", "def GPy_reformat_3D(array):\r\n n_timesteps = np.shape(array)[-1]\r\n if len(np.shape(array)) == 1:\r\n array = array.reshape(n_timesteps, 1)\r\n return [array, array, array]\r\n elif len(np.shape(array)) == 2:\r\n array = array.T\r\n array1 = array[:, 0, None]\r\n array2 = array[:, 1, None]\r\n array3 = array[:, 2, None]\r\n return [array1, array2, array3]\r\n else:\r\n return print(\"Error in GPy_reformat, input array is wrong shape.\")", "def mesh_flatten(x):\r\n N, V, dims = x.shape\r\n\r\n mesh_data = x\r\n mesh_data = np.transpose(mesh_data, axes=[2,1,0])\r\n mesh_data = mesh_data.reshape(-1, N)\r\n return mesh_data", "def reshape(self, *dims):\n if dims is None or (len(dims) == 1 and dims[0] is None):\n return self\n\n # unpack if necessary\n if len(dims) == 1 and (type(dims[0]) is list or type(dims[0]) is tuple):\n dims = dims[0]\n \n dims_computer = [d if callable(d) else lambda s: s.data(d) for d in dims]\n\n seqs = [s for s in self.unstructured()]\n dimensions = [tuple(d(s) for d in dims_computer) for s in seqs]\n data = {}\n \n for s,d in zip(seqs, dimensions):\n if d in data: data[d].append(s)\n else: data[d] = [s]\n\n return DataArray(data, dims=dims)", "def _flatten_batch(self, matrix_tups):\n out_vecs = []\n for t in matrix_tups:\n for v in t:\n new_shape = (v.shape[0],)\n if len(v.shape) > 1:\n new_shape = new_shape + (np.prod(v.shape[1:]),)\n out_vecs.append(v.reshape(new_shape))\n return jnp.concatenate(out_vecs, axis=1)", "def flatten_reshape(variable):\n dim = 1\n for d in variable.get_shape()[1:].as_list():\n dim *= d\n return tf.reshape(variable, shape=[-1, dim])", "def reshape_pixel_array(self, pixel_arr):\n reshaped_pixel_arr = []\n n = 28\n while n <= len(pixel_arr):\n reshaped_pixel_arr.append(pixel_arr[n-28:n])\n n+=28\n\n return reshaped_pixel_arr", "def expand_dims(array):\n return array[np.newaxis, np.newaxis, ...]", "def tohost(x):\n n_device, n_batch, *remaining_dims = x.shape\n return x.reshape((n_device * n_batch,) + tuple(remaining_dims))", "def vpack(arrays, shape, fill, dtype= None):\n array = np.full(shape, fill, dtype)\n for row, arr in zip(array, arrays):\n row[:len(arr)] = arr\n return array", "def to_2dnp_array(X):\r\n if isinstance(X, np.ndarray):\r\n if X.ndim == 1:\r\n return X.reshape((-1, 1))\r\n if X.ndim == 2:\r\n return X\r\n if isinstance(X, Number):\r\n X = [X]\r\n X = np.array(X)\r\n X = X.reshape([-1, np.prod(X.shape) // X.shape[0]])\r\n return X", "def concatonate(data):\n tmp = np.array(data)\n tmp = np.reshape(tmp, (tmp.shape[0] * tmp.shape[1], -1))\n return tmp", "def _reshape_batch(inputs, size, batch_size):\n batch_inputs = []\n for length_id in range(size):\n batch_inputs.append(np.array([inputs[batch_id][length_id]\n for batch_id in range(batch_size)], dtype=np.int32))\n return batch_inputs", "def reshape_1d(self, dat):\n if dat.ndim == 1:\n dat = np.ones_like(self.data) * dat[:, np.newaxis]\n \n if (dat.shape != self.data.shape):\n raise ShapeError('%s != %s: reshaped variables must have same shape as data.' \n % (repr(dat.shape), repr(self.data.shape)))\n\n try: \n mask = self.data.mask\n dat = dat[mask == False]\n except AttributeError:\n pass\n \n dat = np.reshape(dat, dat.size)\n \n return dat", "def _reshape_batch(inputs, size, batch_size):\n batch_inputs = []\n for length_id in range(size):\n batch_inputs.append(np.array([inputs[batch_id][length_id]\n for batch_id in range(batch_size)], dtype=np.int32))\n return batch_inputs", "def _reshape_batch(inputs, size, batch_size):\n batch_inputs = []\n for length_id in range(size):\n batch_inputs.append(np.array([inputs[batch_id][length_id]\n for batch_id in range(batch_size)], dtype=np.int32))\n return batch_inputs", "def flatten_data(data):\r\n result = []\r\n for mesurements in data:\r\n result.append(mesurements.flatten())\r\n return np.array(result)", "def atleast_1d(*arrays):\n if len(arrays) == 1:\n a = arrays[0]\n if isscalar(a):\n a = add_axes(a, 1)\n return a\n else:\n assert len(arrays) > 1\n return [atleast_1d(a) for a in arrays]", "def _flatten(self, inputT, size):\n return tf.reshape(inputT, (-1, size))", "def unblockshaped(arr, h, w):\n n, nrows, ncols = arr.shape\n return (arr.reshape(h//nrows, -1, nrows, ncols)\n .swapaxes(1,2)\n .reshape(h, w))", "def _np_transpose(image):\n return np.transpose(image, (2, 0, 1))", "def prepare_arrays(series: pd.Series) -> np.array:\n\n series = series.map(string_to_array)\n\n # transform the array of array into a 2d-array\n return np.stack(np.array(series.array))", "def expand(*arrays):\n arrays = list(map(boundify, arrays))\n assert_schema(arrays, same_dimension=True)\n\n dim_low = list(map(min, zip(*(a.datashape.dim_low for a in arrays))))\n dim_high = list(map(max, zip(*(a.datashape.dim_high for a in arrays))))\n\n result = []\n for a in arrays:\n ds = a.datashape.copy()\n ds.dim_low = dim_low\n ds.dim_high = dim_high\n if ds != a.datashape:\n a = a.redimension(ds.schema)\n result.append(a)\n\n return result", "def generalized_broadcast(arrays):\n arrays1 = np.broadcast_arrays(*[A[..., 0] for A in arrays])\n shapes_b = [A1.shape + (A.shape[-1],) for A1, A in zip(arrays1, arrays)]\n strides_b = [A1.strides + (A.strides[-1],) for A1, A in zip(arrays1, arrays)]\n arrays_b = [as_strided(A, shape=shape_Ab, strides=strides_Ab)\n for A, shape_Ab, strides_Ab in zip(arrays, shapes_b, strides_b)]\n return arrays_b", "def flatten(self):\n xv, yv = np.meshgrid(self.columns, self.index, indexing='xy')\n return np.array([xv.ravel(), yv.ravel(), self.values.ravel()])", "def batchify(data, batch_size):\n n_batch = data.shape[0] // batch_size\n data = data[:n_batch * batch_size]\n data = data.reshape((batch_size, n_batch)).T\n return data", "def expand_images(X):\n\n X_ex = np.empty((X.shape[0] * X.shape[1], X.shape[2])) * np.nan\n\n for n in range(0, X.shape[2]):\n X_ex[:,n] = X[:,:,n].flatten()\n\n return X_ex", "def relay_reshape(c, v, shp):\n nv = c.ref(v)\n assert shp.is_constant(tuple)\n trim = False\n if shp.value == ():\n shp = (1,)\n trim = True\n else:\n shp = shp.value\n res = relay.op.reshape(nv, newshape=shp)\n if trim:\n res = relay.op.take(res, relay.const(0), mode='fast')\n return res", "def features_to_np_array(self, images):\n \n images = list(images)\n \n images = np.stack(images, axis=0)\n \n return images", "def expand_dims_twice(array):\n return np.expand_dims(np.expand_dims(array, axis=1), axis=1)", "def atleast_2d(*arys):\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_2d')\n if a.ndim == 0:\n a = a.reshape(1, 1)\n elif a.ndim == 1:\n a = a[cupy.newaxis, :]\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res", "def transform(self, x: Array2D) -> Array2D:", "def reshape_output_shape(input_shape):\n shape_1 = input_shape[0]\n shape_2 = 384\n return(shape_1, shape_2)", "def slice_data_to_2D(x, y):\n if(x.shape != y.shape):\n print(\"Error: Images and Labels do not have the same shape\")\n else:\n x = np.array([(x[i, :, :, z]) for i in range(x.shape[0]) for z in range(x.shape[3])])\n y = np.array([(y[i, :, :, z]) for i in range(y.shape[0]) for z in range(y.shape[3])])\n return x,y", "def batch_flatten(this,x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def batch_flatten(this,x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def blockshaped(arr, nrows, ncols):\n h, w = arr.shape\n return (arr.reshape(h//nrows, nrows, -1, ncols)\n .swapaxes(1,2)\n .reshape(-1, nrows, ncols))", "def blockshaped(arr, nrows, ncols):\n h, w = arr.shape\n return (arr.reshape(h//nrows, nrows, -1, ncols)\n .swapaxes(1,2)\n .reshape(-1, nrows, ncols))", "def blockshaped(arr, nrows, ncols):\n h, w = arr.shape\n return (arr.reshape(h//nrows, nrows, -1, ncols)\n .swapaxes(1,2)\n .reshape(-1, nrows, ncols))", "def n2m(a):\n if not isinstance(a, np.ndarray): a = np.array(a)\n return multiprocessing.Array(a.dtype.char, a.flat, lock=False), tuple(a.shape), a.dtype.char, isinstance(a, np.matrix)", "def flatten_npar(np_array):\n \n itr = len(np_array)\n start = np_array[0]\n \n for i in range(1,itr):\n start = np.hstack((start,np_array[i]))\n \n return(np.array(start))", "def transform(self, images):\n return np.array([self.transform_single(i) for i in images])", "def flatten_array(X_input):\r\n X_input_flat = np.array([x.flatten() for x in X_input])\r\n return X_input_flat", "def transpose(x):\n return x[:, np.newaxis]", "def flatten(self):\n return DataArray([s for s in self.unstructured()])", "def reshape_obs(self, d_s):\n return np.column_stack(tuple(d_s.obs))", "def unbatch_stack(S, grid_shape):\n\tI, J = grid_shape\n\tC, M = S.shape[1], S.shape[2]\n\treturn S.reshape(-1, I, J, C, M, M)", "def reshape_d(sequence, batch_size, num_steps):\n batch_length = batch_size * num_steps\n num_batches = sequence // batch_size\n if num_batches * batch_length > (len(sequence) - 1):\n num_batches -= 1\n # Round up batch\n X = sequence[: num_batches * batch_length]\n y = sequence[1: num_batches * batch_length + 1]\n X_splits = np.split(X, batch_size)\n y_splits = np.split(y, batch_size)\n # Stack batches\n X = np.stack(X_splits)\n y = np.stack(y_splits)\n return X, y", "def _reshape_output_batch(self, number, output):\n #tt = cutotime('reshape')\n #tt.start()\n output = output.reshape(self.output_shapes[number]) # batch, h, w, 3, (5 + 80)\n #tt.stop()\n return output" ]
[ "0.74482", "0.71689045", "0.69794905", "0.6903017", "0.6864476", "0.68438625", "0.6818894", "0.67829883", "0.67705846", "0.6595469", "0.6590507", "0.6587642", "0.65708774", "0.65608907", "0.6499275", "0.64894766", "0.6464098", "0.6438216", "0.64029455", "0.63895625", "0.6345568", "0.6343146", "0.63418293", "0.632972", "0.632972", "0.632972", "0.632972", "0.632025", "0.6269138", "0.62084717", "0.6162795", "0.6151457", "0.61175424", "0.6110581", "0.61098784", "0.6090091", "0.6081851", "0.6081008", "0.60753375", "0.6068949", "0.6059581", "0.60568696", "0.6050144", "0.6048078", "0.6042351", "0.6040704", "0.5990686", "0.5988807", "0.598826", "0.5958945", "0.5955646", "0.5944717", "0.58977365", "0.589276", "0.5875319", "0.58699137", "0.586639", "0.58642083", "0.58582515", "0.5852828", "0.5847551", "0.5844221", "0.5831107", "0.58217525", "0.5811524", "0.58089167", "0.58089167", "0.5797645", "0.5789964", "0.5787474", "0.5773181", "0.57699466", "0.57670826", "0.57537127", "0.5750579", "0.57314485", "0.57313997", "0.57293457", "0.57221", "0.57217455", "0.57153064", "0.5709931", "0.57095313", "0.5705683", "0.57032335", "0.56849456", "0.56849456", "0.5684141", "0.5684141", "0.5684141", "0.56838506", "0.56812114", "0.5679447", "0.5674706", "0.56729287", "0.5663109", "0.566213", "0.5646419", "0.5645068", "0.5641032" ]
0.6693302
9
Iterative function that enumerates the set of all sequences of outcomes of given length.
def gen_all_sequences(outcomes, length): answer_set = set([()]) for dummy_idx in range(length): temp_set = set() for partial_sequence in answer_set: for item in outcomes: new_sequence = list(partial_sequence) new_sequence.append(item) temp_set.add(tuple(new_sequence)) answer_set = temp_set return answer_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_all_sequences(outcomes, length):\r\n \r\n ans = set([()])\r\n for dummy_idx in range(length):\r\n temp = set()\r\n for seq in ans:\r\n for item in outcomes:\r\n new_seq = list(seq)\r\n new_seq.append(item)\r\n temp.add(tuple(new_seq))\r\n ans = temp\r\n return ans", "def gen_all_sequences(outcomes, length):\n \n answer_set = set([()])\n for dummy_idx in range(length):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in outcomes:\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n temp_set.add(tuple(new_sequence))\n answer_set = temp_set\n return answer_set", "def gen_all_sequences(outcomes, length):\n \n answer_set = set([()])\n for dummy_idx in range(length):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in outcomes:\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n temp_set.add(tuple(new_sequence))\n answer_set = temp_set\n return answer_set", "def gen_all_sequences(outcomes, length):\n\n answer_set = set([()])\n for dummy_idx in range(length):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in outcomes:\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n temp_set.add(tuple(new_sequence))\n answer_set = temp_set\n return answer_set", "def gen_all_sequences(outcomes, length):\n answer_set = set([()])\n for dummy_idx in range(length):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in outcomes:\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n temp_set.add(tuple(new_sequence))\n answer_set = temp_set\n return answer_set", "def gen_all_sequences(outcomes, length): \n answer_set = set([()])\n for dummy_idx in range(length):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in outcomes:\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n temp_set.add(tuple(new_sequence))\n answer_set = temp_set\n return answer_set", "def gen_all_sequences(outcomes, length):\n\n answer_set = [()]\n for dummy_idx in range(length):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in outcomes:\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n temp_set.add(tuple(new_sequence))\n answer_set = temp_set\n return sorted(answer_set)", "def get_outcomes(num_die_sides):\n outcomes = []\n\n for value in range(1, num_die_sides + 1):\n outcomes.append(value)\n\n return outcomes\n\n\n \"\"\"\n Iterative function that enumerates the set of all sequences of\n outcomes of given length.\n DO NOT MODIFY.\n\n outcomes: possible values of a roll (ex. -- [1,2,3,4,5,6] for a 6-sided die)\n \"\"\"\n\n answer_set = set([()])\n for dummy_idx in range(length):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in outcomes:\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n temp_set.add(tuple(new_sequence))\n answer_set = temp_set\n return answer_set", "def gen_permutations(outcomes, length):\r\n \r\n ans = set([()])\r\n for dummy_idx in range(length):\r\n temp = set()\r\n for seq in ans:\r\n for item in outcomes:\r\n new_seq = list(seq)\r\n if new_seq.count(item) == 0:\r\n new_seq.append(item)\r\n temp.add(tuple(new_seq))\r\n ans = temp\r\n return ans", "def run_example1():\r\n #outcomes = set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n #outcomes = set(['Heads','Tails'])\r\n #outcomes = set([\"Red\", \"Green\", \"Blue\"])\r\n outcomes = set([\"Sunday\", \"Mondy\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"])\r\n \r\n length = 7\r\n seq_outcomes = gen_permutations(outcomes,length)\r\n print \"Computed\", len(seq_outcomes), \"sequences of\", str(length), \"outcomes\"\r\n #print \"Sequences were\", seq_outcomes\r", "def gen_sorted_sequences(outcomes, length): \r\n all_sequences = gen_permutations(outcomes, length)\r\n sorted_sequences = [tuple(sorted(sequence)) for sequence in all_sequences]\r\n return set(sorted_sequences)", "def possible_motifs_by_length(length, base_set=\"ACGU\"):\n args = [base_set for i in xrange(length)]\n for permutation in itertools.product(*args):\n yield \"\".join(permutation)", "def powerset(seq):\n if len(seq) <= 1:\n yield seq\n yield []\n else:\n for item in powerset(seq[1:]):\n #if(len(item) <= historyLength):\n yield [seq[0]]+item\n yield item", "def run_example2():\r\n # example for digits\r\n outcomes = set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n #outcomes = set([\"Red\", \"Green\", \"Blue\"])\r\n #outcomes = set([\"Sunday\", \"Mondy\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"])\r\n \r\n length = 2\r\n seq_outcomes = gen_all_sequences(outcomes, length)\r\n print \"Computed\", len(seq_outcomes), \"sorted sequences of\", str(length) ,\"outcomes\"\r\n print \"Sequences were\", seq_outcomes", "def powerset(seq):\n if len(seq) <= 1:\n yield seq\n yield []\n else:\n for item in powerset(seq[1:]):\n yield [seq[0]]+item\n yield item", "def powerset(seq):\n if len(seq) <= 1:\n yield seq\n yield []\n else:\n for item in powerset(seq[1:]):\n yield [seq[0]]+item\n yield item", "def powerset(seq):\n if len(seq) <= 1:\n yield seq\n yield []\n else:\n for item in powerset(seq[1:]):\n yield [seq[0]]+item\n yield item", "def run_example3():\r\n # example for digits\r\n #outcomes = [0, 1, 2, 3]\r\n #outcomes = set([\"Red\", \"Green\", \"Blue\"])\r\n outcomes = [\"Sunday\", \"Mondy\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"]\r\n \r\n length = len(outcomes)\r\n seq_outcomes = gen_permutations_re(outcomes)\r\n print \"Computed\", len(seq_outcomes), \"sorted sequences of\", str(length) ,\"outcomes\"\r\n #print \"Sequences were\", seq_outcomes\r", "def power_set(seq):\n seq = list(seq)\n \n #Empty set or one element sets\n if len(seq) <= 1:\n yield seq\n yield []\n \n else:\n for item in power_set(seq[1:]):\n yield [seq[0]]+item\n yield item", "def fast_forward_to_length(sequences, length):\n return itertools.dropwhile(lambda seq: len(seq) != length, sequences)", "def _powerset(iterable: Iterable) -> Iterator:\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))", "def e_seq():\n yield 2;\n for n in count(2, 2):\n yield 1\n yield n\n yield 1", "def __iter__(self):\n from sage.combinat.posets.posets import FinitePosets_n\n n = 0\n while True:\n for P in FinitePosets_n(n):\n yield P\n n += 1", "def gen_length_sequence(self, set_choice, seq_length):\n return_seq = []\n chosen_set = self.split_data[set_choice]\n\n for user_index, group in chosen_set.groupby('user_index'):\n for i in range(group.shape[0] - seq_length + 1):\n sub_group = group.iloc[i:i+seq_length]\n return_seq.append([user_index,\n sub_group['poi_index'].to_list(),\n sub_group['timestamp'].to_list(),\n sub_group['week'].to_list()])\n return return_seq", "def gen_length_sequence(self, set_choice, seq_length):\r\n return_seq = []\r\n chosen_set = self.split_data[set_choice]\r\n\r\n for user_index, group in chosen_set.groupby('user_index'):\r\n for i in range(group.shape[0] - seq_length + 1):\r\n sub_group = group.iloc[i:i+seq_length]\r\n return_seq.append([user_index,\r\n sub_group['poi_index'].to_list(),\r\n sub_group['timestamp'].to_list(),\r\n sub_group['week'].to_list()])\r\n return return_seq", "def AllCombinations(data, comblength):\n return [c for c in itertools.combinations(data, comblength)]", "def all_subsets_of_size(L, size):\r\n pass # Left as an exercise for the reader\r", "def leniter(i):\n return sum(1 for e in i)", "def _get_index_iterator(indexes, length):\n return combinations(indexes, length)", "def count_sequences(self, size):\n raise NotImplementedError", "def enumerate_kmers(alphabet: Union[str, List[str]], length: int):\n for value in itertools.product(alphabet, repeat=length):\n yield \"\".join(value)", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def runs(sequence, predicate, minlength=2):\n inrun = False\n for i,v in enumerate(sequence):\n if not inrun and predicate(v):\n inrun = True\n start = i\n elif inrun and not predicate(v):\n inrun = False\n stop = i - 1\n if stop - start >= minlength:\n yield start, stop\n\n if predicate(v) and inrun:\n stop = i\n if stop - start >= minlength:\n yield start, stop", "def arrays_to_sequences(token_list_iterable, sequence_length=2049):\n accum = []\n for l in token_list_iterable:\n accum.extend(l)\n\n if len(accum) > sequence_length:\n chunks = split_list(accum, sequence_length)\n yield from chunks[:-1]\n accum = chunks[-1]\n\n if len(accum) > 0:\n yield accum", "def bruteForcePopulation(N):\n return list(itertools.permutations(range(N), N))", "def get_permutatation_by_length(length, permutation_set):\n pass", "def group(seq, size):\n if not hasattr(seq, 'next'):\n seq = iter(seq)\n while True:\n yield [seq.next() for i in xrange(size)]", "def part_1():\n return itertools.permutations(range(5))", "def powerset(iterable):\n s = list(iterable)\n return itertools.chain.from_iterable( itertools.combinations(s, r)\n for r in range(len(s)+1) )", "def powerset(iterable, include_empty = True):\n s = list(iterable)\n i = chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))\n if not include_empty:\n next(i)\n return i", "def iter_chunks(sequence, chunk_size) :\n res = []\n for item in sequence :\n res.append(item)\n if len(res) >= chunk_size :\n yield res\n res = []\n if res : yield res", "def calc_run_lengths(sequence: List[int]) -> List[Run]:\n return [Run(object=g[0], length=len(list(g[1])))\n for g in itertools.groupby(sequence)]", "def window(seq, size=2, stride=1):\n it = iter(seq)\n result = []\n for elem in it:\n result.append(elem)\n if len(result) == size:\n yield result\n result = result[stride:]", "def full_nloop_iterator(self, start=None, length=1):\n from itertools import ifilter, imap\n\n g = self.path(start)\n\n ifull = ifilter(\n lambda x: x.is_loop() and x.is_full(),\n self._all_npath_extension(g,length))\n\n return imap(copy, ifull)", "def combinations(sequence, length, NULL=object()):\r\n if length <= 0:\r\n combos = [NULL]\r\n else:\r\n combos = []\r\n for i, item in enumerate(sequence, 1):\r\n rem_items = sequence[i:]\r\n rem_combos = combinations(rem_items, length-1)\r\n combos.extend(item if combo is NULL else [item, combo]\r\n for combo in rem_combos)\r\n return combos", "def makeChrom(length):\n output = []\n for i in range(length):\n output.append(randrange(14))\n return output", "def iter_n(sequence: Sequence[T], n: int) -> List[T]:\n\t\n\tfor i in range(len(sequence) - (n-1)):\n\t\tyield sequence[i:i+n]", "def powerset(iterable):\n\tset_list = list(iterable)\n\treturn list(chain.from_iterable(combinations(set_list, r)\n\t\t\t\t\t\t\t\tfor r in range(len(set_list)+1)))", "def iwindow(seq, n):\n it = iter(seq)\n result = tuple(islice(it, n))\n\n if len(result) == n:\n yield result\n\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def iter_combos(include_unknown=False):\n if include_unknown:\n return _combos\n else:\n return _combos[:-7]", "def repeat(iterable, count=None):\n if count is None:\n while True:\n for sample in iterable:\n yield sample\n else:\n for i in range(count):\n for sample in iterable:\n yield sample", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))", "def gen_flow_session(self, set_choice, seq_len):\r\n return_seq = []\r\n chosen_set = self.split_flow[set_choice]\r\n\r\n for poi_index, group in chosen_set.groupby('poi_index'):\r\n for i in range(group.shape[0] - seq_len + 1):\r\n flow_seq = group['flow'].iloc[i:i+seq_len]\r\n if flow_seq.sum() > 0:\r\n return_seq.append([poi_index, flow_seq.to_list()])\r\n return return_seq", "def print_permutations(values, length, accum):\n if length == 0:\n print(accum)\n else:\n for value in values:\n temp_accum = []\n temp_accum = temp_accum + accum\n temp_accum.append(value)\n print_permutations(values, length - 1, temp_accum)", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(1, len(s)+1))", "def permutations(iterable):\n pass", "def generate_alphabet_combinations(length: int = 2) -> List[str]:\n assert length > 0\n alphabets = string.ascii_lowercase\n\n return [\n ''.join(combination)\n for n in range(1, length+1)\n for combination in product(alphabets, repeat=n)\n ]", "def gen_permutations_re(outcomes):\r\n\r\n if len(outcomes) == 1:\r\n ans = set()\r\n temp = []\r\n temp.append(outcomes[0])\r\n ans.add(tuple(temp))\r\n return ans\r\n\r\n rest_permutations = gen_permutations_re(outcomes[1:])\r\n\r\n answer = []\r\n for perm in rest_permutations:\r\n perm = list(perm)\r\n for i in range(len(perm) + 1):\r\n temp = perm[:]\r\n temp.insert(i, outcomes[0])\r\n answer.append(tuple(temp))\r\n\r\n return set(answer)", "def find_long_runs(num_sequence, l):\n chunked = [(k, list(g)) for k, g in itertools.groupby(num_sequence)]\n retval = [(i, len(g)) for i, (k, g) in enumerate(chunked) if k and len(g) > l]\n return retval", "def powerset(iterable):\n\n s = list(iterable)\n\n return chain.from_iterable(combinations(s, r) for r in range(2, len(s) + 1))", "def powerset(s):\n return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))", "def myCombinations(iterable, r):\n for perm in itertools.permutations(iterable, r):\n if sorted(perm) == list(perm):\n yield perm", "def generate_strings(char_list, length):\n if length <= 0:\n yield []\n elif length == 1:\n for char in char_list:\n yield [char]\n else:\n for char in char_list:\n for l in generate_strings(char_list, length-1):\n yield [char] + l", "def window(seq, n):\n seq_it = iter(seq)\n result = tuple(it.islice(seq_it, n))\n if len(result) == n:\n yield result \n for elem in seq_it:\n result = result[1:] + (elem,)\n yield result", "def random_iterator(seq:Sequence[Any], maxlen=None) -> Any:\n if not hasattr(seq, \"__len__\") or not hasattr(seq, \"__getitem__\"):\n raise TypeError(\"Sequence must be indexable\")\n N = len(seq)\n order = list(range(N))\n random.shuffle(order)\n for i,j in enumerate(cycle(order)):\n if maxlen is not None and i > maxlen:\n return\n yield seq[j]", "def everygrams(seq):\n for n in range(1, len(seq) + 1):\n for ng in nltk.util.ngrams(seq, n):\n yield ng", "def gen(length):\n return itertools.product(LABELS,repeat=length)", "def simple_seq(seq):\n for i in seq:\n yield i", "def takeNGenerator(seq, n):\n\tindex = 0\n\twhile index + n <= len(seq):\n\t\tyield seq[index:index + n]\n\t\tindex = index + 1", "def chunk_seq(iseq: ISeq, maxlen: int) -> Iterable[ISeq]:\n return (iseq[i : i + maxlen] for i in range(0, len(iseq), maxlen))", "def powerset(iterable):\n\n \"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)\"\n s = list(iterable)\n return itertools.chain.from_iterable(\n itertools.combinations(s, r) for r in range(1, len(s) + 1)\n )", "def __iter__(self):\n for x in self.seq: yield x", "def powerset(iterable):\n xs = list(iterable)\n # note we return an iterator rather than a list\n return chain.from_iterable(combinations(xs,n) for n in range(len(xs)+1))", "def powerset(iterable):\n return map(set, chain.from_iterable(\n combinations(iterable, r) for r in range(len(iterable) + 1)))", "def part_2():\n return itertools.permutations(range(5, 10))", "def sequences(self):\n # i am one\n yield self\n # nothing further\n return", "def subsequences(sequence, length, circular=False):\n\n if circular:\n for i in xrange(len(sequence)):\n subsequence = sequence[i:i + length]\n yield subsequence + sequence[0:length - len(subsequence)]\n else:\n for i in xrange(len(sequence) - length + 1):\n yield sequence[i:i + length]", "def powerset(n):\n # chain r-combinations generator for r=0, 1,..., n\n return chain.from_iterable(combinations(range(n), r) for r in range(n+1))", "def sets(elements, set_size):\n return combinations(elements, set_size)", "def __iter__(self):\n return iproduct(*self.sets)", "def get_sequence(self, length, x, y):\r\n try:\r\n for i in super(DirectionGenerator, self).get_sequence(length, x, y):\r\n yield i + 1\r\n except Exception as ex:\r\n raise ex", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(2, len(s)+1))", "def windows_of_permutations(n, step):\n def gen(p):\n for i in range(0, NB_AVIONS-n, step):\n for perm in all_permutations(range(i, i+n))(p):\n yield perm\n return gen", "def generate_subsequences(x, sequence_length):\n\n X, y = [], []\n \n for i in range(len(x)):\n x_start = i\n x_end = i+sequence_length \n \n y_start = x_start+1\n y_end = x_end+1\n \n if y_end > len(x):\n break\n \n x_batch = x[x_start:x_end] \n y_batch = x[y_start:y_end]\n \n X.append(x_batch)\n y.append(y_batch)\n \n return np.stack(X), np.stack(y)", "def run_generations(init_len):\n num_graphs = 0\n current_gen = [nx.path_graph(init_len)]\n complete_graph_list = current_gen.copy()\n while len(current_gen) and current_gen[0].size() < (3*init_len - 7):\n current_gen = generation_next(current_gen)\n num_graphs += show_graph_list(current_gen)\n complete_graph_list.extend(filter_bridge_case(current_gen))\n print(num_graphs)\n return complete_graph_list", "def allcombinations(orgset, k):\n return itertools.chain(*[combination(orgset, i) for i in range(1, k + 1)])", "def powerset(xs):\n cards = list(reversed(xrange(len(xs)))) + [len(xs)]\n return list(chain.from_iterable(combinations(xs, n) for n in cards))", "def window(seq, n=2):\n it = iter(seq)\n result = tuple(islice(it, n))\n if len(result) == n:\n yield result\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def window(seq, n=2):\n it = iter(seq)\n result = tuple(islice(it, n))\n if len(result) == n:\n yield result\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def lexicographic(alphabet):\n for n in count():\n for e in product(alphabet, repeat = n):\n yield e", "def random_sequences(length_from, length_to,\n vocab_lower, vocab_upper,\n batch_size):\n #if length_from > length_to:\n #raise ValueError('length_from > length_to')\n\n def random_length():\n if length_from == length_to:\n return length_from\n return np.random.randint(length_from, length_to)\n \n while True:\n yield [\n np.random.randint(low=vocab_lower,\n high=vocab_upper,\n size=random_length()).tolist()\n for _ in range(batch_size)\n ]", "def all_subsets(self, ss):\n return chain(*map(lambda x: combinations(ss, x), range(1, len(ss)+1)))", "def cyclen(n, iterable):\n return chain.from_iterable(repeat(tuple(iterable), n))", "def all_pairs_number_of_walks(G, walk_length):\n # TODO This algorithm can certainly be parallelized.\n return {v: single_source_number_of_walks(G, v, walk_length) for v in G}", "def count(seq):\n\treturn sum(1 for x in seq)", "def generateSubSequences(k, ch):\n seq = [\"\".join(c) for c in itertools.product(ch, repeat = k)]\n# discussion about the best way to do this:\n# https://stackoverflow.com/questions/7074051/what-is-the-best-way-to-generate-all-possible-three-letter-strings\n return seq", "def distinct(length, digits=DIGITS):\n return (int(''.join(p)) for p in permutations(digits, length))" ]
[ "0.72901285", "0.72600734", "0.72600734", "0.723398", "0.7221575", "0.7214704", "0.7166947", "0.66999054", "0.66996527", "0.6592231", "0.6587351", "0.6566868", "0.63469136", "0.62606", "0.6040663", "0.6040663", "0.6040663", "0.596155", "0.59232736", "0.58177376", "0.5734803", "0.57291275", "0.57020736", "0.5675124", "0.5663127", "0.56556934", "0.5637283", "0.56114596", "0.5588629", "0.5582791", "0.55774665", "0.5576412", "0.5560903", "0.5552407", "0.55434513", "0.5539018", "0.5533457", "0.5527952", "0.552452", "0.5502629", "0.5480968", "0.54737794", "0.54692805", "0.5456407", "0.5454223", "0.54498655", "0.5440449", "0.54376376", "0.5418179", "0.5386519", "0.53772295", "0.53709924", "0.53709924", "0.53709924", "0.53692925", "0.53634566", "0.53592646", "0.5358757", "0.5351683", "0.5346387", "0.5337061", "0.533697", "0.53358483", "0.5327358", "0.5316891", "0.53167236", "0.5316469", "0.53128046", "0.52850974", "0.528366", "0.5279938", "0.5261612", "0.52548426", "0.52527446", "0.5249947", "0.5242892", "0.5238167", "0.5232904", "0.5232239", "0.5228839", "0.52225775", "0.5221305", "0.52212286", "0.5220615", "0.52093875", "0.5201019", "0.5193932", "0.51903236", "0.5188696", "0.5178612", "0.5178612", "0.5177007", "0.5174968", "0.51729095", "0.51670194", "0.51664996", "0.5161755", "0.51508313", "0.51430845" ]
0.7217363
6
Compute the maximal score for a Yahtzee hand according to the upper section of the Yahtzee score card.
def score(hand): max_score = [] for dice in hand: max_score.append(hand.count(dice) * dice) return max(max_score)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def score(hand):\n if (hand==()):\n return 0\n score_board=[0,0,0,0,0,0,0,0,0,0,0,0]\n for dice in hand:\n score_board[dice-1]+=dice\n max_score=max(score_board)\n return max_score", "def max_score(self):\n return self.raw_possible", "def highCard(self):\n return max(self)", "def score(hand):\n max_score = []\n for die in hand:\n max_score.append(hand.count(die) * die)\n return max(max_score)", "def scoreSevenHand(hand):\n handCombos = list(itertools.combinations(hand, 5))\n return max(scoreFiveHand(hc) for hc in handCombos)", "def score(hand):\r\n \r\n if not hand:\r\n return 0\r\n \r\n max_score = 0\r\n \r\n for dice in hand:\r\n temp = list(hand).count(dice) * dice\r\n if temp > max_score:\r\n max_score = temp\r\n \r\n return max_score", "def negamax(self):\n if self.check_winner():\n return 1\n elif self.full():\n return 0\n else:\n bestScore = -10\n for r, c in self.empty_cells():\n self.grid[r][c] = self.player\n self.next_player() \n score = -self.negamax()\n if score > bestScore:\n bestScore = score\n self.grid[r][c] = GameModel.EMPTY\n self.next_player()\n return bestScore", "def getHighScore(self):\n return max(self.scores)", "def best_hand(cards):\n return max(generate_all_hands(cards))", "def max_score(self):\r\n return self.lcp.get_max_score()", "def personal_best(scores):\n return max(scores)", "def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)", "def score(hand):\n occurrences = [] \n for die in hand:\n if die > len(occurrences):\n occurrences.extend([0 for dummy_idx in range(len(occurrences) ,die)]) \n occurrences[die - 1] += 1\n maxi = 0\n for idx in range(len(occurrences)):\n if (idx+1) * occurrences[idx] > maxi:\n maxi = (idx + 1) * occurrences[idx]\n return maxi", "def personal_best(scores: list) -> int:\n return max(scores)", "def max_score(self):\n return max(self._extract_set('score') or [0])", "def get_high_score(self) -> float:\n return max(self._scores)", "def score(cards):\n \n values = sorted(map(lambda x: x[0], cards))\n\n if same_suit(cards) and values[0] == 10 and values[4] == 14: # royal flush\n return (10, 14, 0) \n\n if same_suit(cards) and values[4] - values[0] == 4 and len(set(values)) == 5: # straigh flush\n return (9, values[4], 0)\n\n if len(set(values)) == 2 and values[1] == values[3]: # four of a kind\n if values[0] != values[1]:\n high_card = values[0]\n else: high_card = values[4]\n return (8, values[2], high_card)\n\n if len(set(values)) == 2 and values[1] != values[3]: # full house\n return (7, values[2], 0)\n\n if same_suit(cards): # flush\n return (6, values[4], 0)\n\n if values[4] - values[0] == 4 and len(set(values)) == 5: # straight\n return (5, values[4], 0)\n\n if len(set(values)) == 3: # three of a kind or two pair\n # three of a kind\n if values[0] == values[2]:\n return (4, values[0], max(values[3:5]))\n if values[1] == values[3]:\n return (4, values[1], max(values[0], values[4]))\n if values[2] == values[4]: \n return (4, values[2], max(values[0:2]))\n else: # two pair\n return (3, max(values[1], values[3]), dict((values.count(i), i) for i in values)[1])\n\n if len(set(values)) == 4: # one pair\n high_value_card = dict((values.count(i), i) for i in values)[2]\n s = set(values)\n s.remove(high_value_card)\n return (2, high_value_card, max(s))\n\n return (1, values[4], 0)", "def max_score(self):\n return self.points", "def get_big_joker_value(deck):\n \n return max(deck)", "def get_big_joker_value(deck: List[int]) -> int:\n return max(deck)", "def worst_score(self):\r\n pass", "def score(hand):\n counted = []\n scores = []\n for element in hand:\n if element not in counted:\n scores.append(hand.count(element)*element)\n counted.append(element)\n return max(scores)", "def getMaxAlignmentScore(self):\n # get max of each row\n # max_scores = [max(i) for i in self.matrix]\n\n # return the max of the max vaules\n return numpy.max(self.matrix)", "def max(scores):\n return __builtin__.max(scores) if len(scores) else 0", "def get_max_score(self):\r\n maxscore = 0\r\n for responder in self.responders.values():\r\n maxscore += responder.get_max_score()\r\n return maxscore", "def get_big_joker_value(deck_of_cards):\n big_joker_value = max(deck_of_cards)\n return big_joker_value\n # big_joker is the largest card, thus max() function", "def highest_value():\n maximum_number = 0\n for i in xrange(length):\n challenger = frames[i]\n if abs(challenger) > maximum_number:\n maximum_number = abs(challenger)\n return maximum_number", "def best_hand(hands):\r\n best_val = 0\r\n sum = 0\r\n hand = None\r\n for h in hands:\r\n for t in h:\r\n sum = sum + t[1]\r\n if sum > best_val:\r\n best_val = sum\r\n hand = h\r\n\r\n return hand", "def findMaxFactor(self):\n factorMax = 0\n factorMaxInd = ''\n for ue in list(self.ues.keys()):\n if len(self.ues[ue].bearers[0].buffer.pckts)>0 and self.ues[ue].pfFactor>factorMax:\n factorMax = self.ues[ue].pfFactor\n factorMaxInd = ue\n if factorMaxInd=='':\n ue = list(self.ues.keys())[self.ind_u]\n q = 0\n while len(self.ues[ue].bearers[0].buffer.pckts)==0 and q<len(self.ues):\n self.updIndUE()\n ue = list(self.ues.keys())[self.ind_u]\n q = q + 1\n factorMaxInd = ue\n\n return factorMaxInd", "def pwm_max_score(self):\n if self.max_score is None:\n score = 0\n for row in self.pwm:\n score += log(max(row) / 0.25 + 0.01)\n self.max_score = score\n \n return self.max_score", "def strategy(hand, num_die_sides):\n #return (0.0, ())\n maxval = 0.0\n maxseq= ()\n allholds = gen_all_holds(hand)\n for seq in allholds:\n val = expected_value(seq, num_die_sides, len(hand)-len(seq))\n if val > maxval:\n maxval = val\n maxseq = seq\n \n \n \n return (maxval, maxseq)", "def max_score(self):\r\n max_score = None\r\n if self.check_if_done_and_scored():\r\n max_score = self._max_score\r\n return max_score", "def get_max_score(self):\r\n return sum(self.maxpoints.values())", "def largest_BE_A(Z):\n \n \n \n return A", "def max_score_column(self, scores):\r\n x = max(scores)\r\n list = []\r\n for c in range(0, len(scores)):\r\n if scores[c] == x:\r\n list += str(c)\r\n if self.tiebreak == 'LEFT':\r\n return int(list[0])\r\n elif self.tiebreak == 'RIGHT':\r\n return int(list[-1])\r\n else:\r\n return int(random.choice(list))", "def score(self):\n hand = sorted(self.hand)\n score = -self.chips\n index = 0\n while index < len(hand):\n if index == 0 or hand[index-1] != hand[index]-1:\n score += hand[index]\n index += 1\n return score", "def best_p(zscore):\n for p, z in HANDY_Z_SCORE_CHEATSHEET:\n if zscore > z:\n break\n\n return (p, z)", "def getNextHighest(self):\r\n maxScore = -1\r\n idx = -1\r\n for i, s in enumerate(self.scores):\r\n if s.score > maxScore:\r\n maxScore = s.score\r\n idx = i\r\n if idx != -1:\r\n score = self.scores[idx]\r\n del self.scores[idx]\r\n return score\r\n else:\r\n return None", "def maximizer(evaluate):\n def strategy(player, board):\n def score_move(move):\n return evaluate(player, Othello.make_move(move, player, list(board)))\n return max(Othello.legal_moves(player, board), key=score_move)\n return strategy", "def score(hand):\n current_hand = {}\n for dice in hand:\n if not current_hand.get(dice):\n current_hand[dice] = dice\n else:\n current_hand[dice] += dice\n\n #compute the current score for each dice\n\n return max(current_hand.values())", "def max_value(policy_lookup, state, player):\n\taction_values = list(get_policy_actions(policy_lookup, state, player).values())\n\tif action_values:\n\t\treturn np.max(action_values)\n\treturn 0", "def max_e_score(self, entity):\n return float(entity['es_bb'][1])", "def _find_best_hand(hands, key=hand_rank):\n\n best_hand = None\n for hand in hands:\n best_hand_of_combination = max(itertools.combinations(hand, 5), key=key)\n\n if not best_hand:\n best_hand = best_hand_of_combination\n elif best_hand_of_combination > best_hand:\n best_hand = best_hand_of_combination\n\n return best_hand", "def max_score_column(self, scores):\r\n max_s = max(scores)\r\n max_i = []\r\n for r in range(len(scores)):\r\n if scores[r] == max_s:\r\n max_i += [r]\r\n max_i\r\n if self.tiebreak == 'LEFT':\r\n return max_i[0]\r\n elif self.tiebreak == 'RIGHT':\r\n return max_i[-1]\r\n else:\r\n return random.choice(max_i)", "def maximum_score(self):\n max_score = self.values('question').annotate(\n top_answer=Max('score')\n )\n max_score = sum(d['top_answer'] for d in max_score)\n return max_score", "def free_bacon(opponent_score):\n # BEGIN PROBLEM 2\n a, b = opponent_score % 10, opponent_score // 10 # separation into digits\n return (max(a, b) + 1)\n # END PROBLEM 2", "def max_score_test(self):\n max_score_tuple = self.results.max_score(molecules=[\"DDSPDLPK\"])\n assert max_score_tuple[0] == 1 # score\n assert max_score_tuple[3].scaling_factor == 100 # intensity\n\n assert self.results.max_score(molecules=[\"_DDSPDLPK_\"]) == [0, None, None, None]\n return", "def maxcompChooseWord(hand, wordList, n):\n # 电脑给出最优解\n point = 0\n maxword = ''\n for word in wordList:\n newword1 = copy.deepcopy(word)\n newword2 = copy.deepcopy(word)\n if isValidWord(newword1, hand, wordList):\n p = getWordScore(newword2, n)\n if p > point:\n point = p\n maxword = word\n if point == 0:\n return None\n else:\n return maxword, point", "def get_highscore(self, score):\n scores = list(self.history_score.values())\n \n # Compare current score with the last placing in leaderboard.\n if score > max(scores):\n return 0\n else:\n if score < min(scores):\n return 2\n else:\n return 1", "def max_val(board):\n v = -math.inf\n if terminal(board):\n return utility(board)\n for action in actions(board):\n v = max(v,min_val(result(board,action)))\n return v", "def free_bacon(opponent_score):\n # BEGIN PROBLEM 2\n digits = opponent_score\n max_digit = digits % 10 # take last digit as starting point\n while digits > 0:\n digit = digits % 10 # take last digit\n digits = digits // 10 # eliminate last digit from digits\n if digit > max_digit:\n max_digit = digit\n return max_digit + 1\n # END PROBLEM 2", "def max_value(board): # the X player wants to maximize the score\n if terminal(board):\n return utility(board), None\n else:\n v = -math.inf\n move = None\n for action in actions(board):\n val, _ = min_value(result(board, action))\n # Check if returned Value is less than v if not return v and current action\n if val > v:\n # Assign v the maximum value for future evaluation\n v = max(v,val)\n # Keep track of action\n move = action\n # If best move then return it\n if v == 1:\n return v, move\n return v, move", "def best_action(self):\n child_score = self.child_Q() + self.mcts.c_puct * self.child_U()\n masked_child_score = child_score\n return np.argmax(masked_child_score)", "def schwefel221fcn(x: np.ndarray) -> np.ndarray:\n scores = np.max(np.abs(x), axis=1)\n return scores", "def aces_high(card):\n if isinstance(card, Value):\n if card == Value.Ace:\n return 14\n return card.value\n\n if card.joker:\n return 15\n if card.value == Value.Ace:\n return 14\n return card.value.value", "def max_value(gameState):\n if terminal_test(gameState): return -1", "def mamajek08_logRpHK_Ro_max():\n return mamajek08_Ro_logRpHK(-5.0)", "def score_int( hand ):\n m = matches(hand)\n #print( m )\n #royal_flush -- a special case of straight flush.\n if flush(hand) and straight(hand) and hand[4].rank == 14:\n return 80000 + 100*order(hand[4])\n #straight_flush\n elif flush(hand) and straight(hand):\n return 80000 + 100*order(hand[4])\n #four_of_a_kind\n elif len(m) == 2 and m[0].count == 4:\n return 70000 + 100*order(m[0].card)\n #full_house\n elif len(m) == 2 and m[0].count == 3 and m[1].count == 2:\n return 60000 + 100*order(m[0].card) + order(m[1].card)\n #flush\n elif flush(hand):\n return 50000 + 100*order(hand[4])\n #straight\n elif straight(hand):\n return 40000 + 100*order(hand[4])\n #three_of_a_kind\n elif len(m) == 3 and m[0].count == 3:\n return 30000 + 100*order(m[0].card)\n #two_pair\n elif len(m) == 3 and m[0].count == 2 and m[1].count == 2:\n return 20000 + 100*order(m[0].card) + order(m[1].card)\n #one_pair\n elif len(m) == 4 and m[0].count == 2 and m[1].count == 1:\n return 10000 + 100*order(m[0].card) + order(m[1].card)\n # Simple high card. Is this adequate? We'll know if we get ties.\n else:\n return 100*order(hand[4]) # or 100*order(m[0].card)", "def calc_max_Harmony(self):\n\n state = torch.linalg.pinv(self.W).matmul(-self.B - self.inpS)\n stateC = self.toConceptual(state)\n harmony = self.calc_harmony(state=state)\n return harmony, state, stateC", "def max_value(self, game, depth):\n if self.time_left() < self.TIMER_THRESHOLD: # Timeout check\n raise SearchTimeout()\n\n if game.is_loser(self) or game.is_winner(self) or depth == 0: # Terminal test, checks base cases\n return self.score(game,self) # returns the score, UTILITY of the current state\n legal_moves = game.get_legal_moves() # obtain all legal moves for game, ACTIONs that can be taken\n best_score = -math.inf # abstraction assignment of neg. infinity(lowest possible value for MAX score)\n for m in legal_moves: # iterate through all available actions\n new_state = game.forecast_move(m) # for each available move, forecast the resulting state from that ACTION\n # RESULT of ACTION\n score = self.max_value(new_state, depth - 1) # recursively uses the new state\n best_score = max(best_score,score) # calculates the minimizing score between the states\n return best_score # propagates minimizing score for given state", "def worst_B(Ag):\n bottom = 0\n for i in range(len(Ag)):\n etop = np.max(cf.TD20[int(Ag[i]) - 1])\n bottom += etop\n return bottom", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def best_score(a_dictionary):\n for key in a_dictionary:\n if key is None:\n return None\n else:\n max_val = max(a_dictionary)\n return max_val", "def latest(scores: list) -> int:\n return scores[-1]", "def maximal_valance(self) -> int:\n max_valances = {'H': 1, 'B': 4, 'C': 4, 'N': 4, 'O': 3, 'F': 1,\n 'Si': 4, 'P': 6, 'S': 6, 'Cl': 4, 'Br': 4, 'I': 6}\n\n if self.label in max_valances:\n return max_valances[self.label]\n\n else:\n logger.warning(f'Could not find a valid valance for {self}. '\n f'Guessing at 6')\n return 6", "def get_best_candidate(self):\n if not self.scores:\n return None\n return self.te_list[self.scores.index(max(self.scores))]", "def test_get_max_score(self):\r\n max_score = self.peer_grading.max_score()\r\n self.assertEquals(max_score, None)", "def comp_choose_word(hand, word_list):\n maxscore = 0\n maxword = \"\" \n for n in range(calculate_handlen(hand)):\n perms = get_perms(hand, n)\n for word in perms:\n wordscore = get_word_score(word, HAND_SIZE)\n if wordscore > maxscore:\n if word not in word_list:\n continue\n else:\n maxscore = wordscore\n maxword = word\n return maxword\n # TO DO...", "def fuction_call(chest):\n\n for i in chest:\n max_i = maximum(chest,i)\n if max_i >= 2:\n print(\"The maximum size of a set Matyoshka Dolls with outermost doll\",i,\"is\",max_i)", "def standardComposition_Max(self):\n temp = np.fmax(self.rulesList[0], self.rulesList[1])\n for r in self.rulesList[2:]:\n temp = np.fmax(temp, r)\n\n self.fuzzy_output = temp", "def get_small_joker_value(deck):\n \n return max(deck) - 1", "def solve(self, cipher):\n A = cipher\n N = len(A)\n _sum = sum(A)\n _max = -1 << 65 # 64-bit\n\n s = 0\n for ind, val in enumerate(A):\n s += (ind + 1) * val\n\n _max = max(_max, s)\n for i in xrange(N):\n s = s + _sum - N * A[N - 1 - i]\n _max = max(_max, s)\n\n return _max", "def chi_max(self):\n chis = [tr.chi_max for tr in self._trc]\n return max(chis) if chis else None", "def highestBetNotFold(self):\n return max([0]+[p._bet for p in self.players.values() if p.serial in self.in_game and p.notFold()])", "def max_score(ar, scorer: Optional[topk_scorer]=None):\n assert len(ar) > 0, \"dc.max_score is not defined for empty arrays\"\n \n if scorer is None:\n scorer = alpha_length_normalized()\n\n def op_max_score(seqs, score):\n res = np.array([score] + [scorer(s.logprobs, s) for s in seqs])\n return res.max()\n\n return ar.reduce(op_max_score, np.finfo(np.float32).min)", "def __get_best_score(scores):\n best = max(scores.items(), key=operator.itemgetter(1))[0]\n print(\"The best classification for this corpus is: \" + str(best))\n return best", "def get_highest_bid(self):\n return reduce(max, [p.pot_money for p in self.in_game_players], 0)", "def get_max_score(location_list, grid, shape):", "def mamajek08_logRpHK_max():\n return -3.8918287373004357", "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]", "def max_score_column(self, scores):\r\n m = max(scores)\r\n lc = []\r\n for i in range(len(scores)):\r\n if scores[i] == m:\r\n lc += [i]\r\n if len(lc) > 1:\r\n if self.tiebreak == \"LEFT\":\r\n column = lc[0]\r\n elif self.tiebreak == \"RIGHT\":\r\n column = lc[-1]\r\n else:\r\n column = random.choice(lc)\r\n return column\r\n else:\r\n return lc[0]", "def _get_max_dist_from_tail(self, snake, board, food_evaluation):\n\n if snake.health_points == self.MAX_HEALTH:\n return 1000\n elif food_evaluation == CANT_FIND:\n return 2\n else:\n return 2 + (self.MAX_HEALTH - snake.health_points) / (\n self.MAX_HEALTH) * (board.width + board.height)", "def final_value(player, board):\n diff = Othello.score(player, board)\n if diff < 0:\n return MIN_VALUE\n elif diff > 0:\n return MAX_VALUE\n return diff", "def max_curve(trial_scores: np.ndarray) -> np.ndarray:\n ret = np.empty(len(trial_scores))\n keep = -1e9\n for i, score in enumerate(trial_scores):\n keep = max(keep, score)\n ret[i] = keep\n return ret", "def __negamax(self, alpha, beta, tt=None):\n alpha_orig = alpha\n lookup = None if (tt is None) else tt.lookup(self)\n if lookup is not None:\n flag, best = lookup['flag'], lookup['best']\n if flag == 0:\n return best\n elif flag == -1:\n alpha = max(alpha, best[0])\n elif flag == +1:\n beta = min(beta, best[0])\n\n if alpha >= beta:\n return best\n\n if self.won():\n return (-2, None)\n if self.tied():\n return (0, None)\n if lookup is None:\n best = (-1, None)\n for x, y in self.fields:\n if self.fields[x, y] == self.empty:\n value = -self.move(x, y).__negamax(-beta, -alpha, tt)[0]\n if value > best[0]:\n best = (value, (x, y))\n if value > alpha:\n alpha = value\n if alpha >= beta:\n break\n if tt is not None:\n tt.store(game=self,\n best=best,\n flag=1 if (best[0] <= alpha_orig)\n else (-1 if (best[0] >= beta) else 0))\n\n return best", "def score(self) -> int:\n card_values = {\n '0': 0,\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n '10': 10,\n 'JACK': 10,\n 'QUEEN': 10,\n 'KING': 10,\n 'ACE': 11}\n hand_value = []\n for i in self.cards:\n hand_value.append(card_values[i.value])\n while sum(hand_value) > 21 and 11 in hand_value:\n for i, j in enumerate(hand_value):\n if j == 11:\n hand_value[i] = 1\n break\n else:\n pass\n return sum(hand_value)", "def _get_maximum_from_heatmap(self, heatmap):\n assert heatmap.size(0) == 1 and heatmap.size(1) == 1\n max_map = torch.eq(heatmap, self.pool(heatmap)).float()\n heatmap = heatmap * max_map\n score = heatmap.view(-1)\n score, pos_idx = score.topk(self.max_num_people)\n mask = score > self.keypoint_threshold\n score = score[mask]\n pos_idx = pos_idx[mask]\n return pos_idx, score", "def get_worst_fitness(self):\n f = min(self.characters, key=operator.attrgetter('fitness'))\n self.worst_fitness = round(f.fitness, 3)", "def calculate_score(hand,hand_value):\n first,second,third,fourth,fifth,*_=[rank for rank,suit in hand]\n if fifth==12:\n fifth=-1\n return calculate_score_pairs(hand_value,first,second,third,fourth,fifth)", "def optimal_max(board):\n # Board full?\n if terminal(board):\n return [None, utility(board)]\n\n available_actions = list(actions(board))\n\n # Naive baseline comparison is negative infinity\n global_optimum = [None, -math.inf]\n\n # For each move, what would opponent do next? Update best move.\n for action in available_actions:\n # Anticipates optimal adversarial moves\n local_optimum = optimal_min(result(board, action))\n\n # Compares local vs global optima\n if global_optimum[1] <= local_optimum[1]:\n global_optimum = [action, local_optimum[1]]\n\n return global_optimum", "def part_1(tape: Tape) -> int:\n\n result = max_thruster_signal(tape)\n\n print(f\"part 1: highest thruster signal is {result}\")\n return result", "def best_score(list_bb):\n\n # Compute the number of predicted boxes\n n = len(list_bb)\n\n # if there are more than 0 predicted boxes, search for the 2 boxes\n if n != 0:\n tab_score = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n score_val = score(list_bb[i], list_bb[j])\n tab_score[i, j] = score_val\n\n # Find the maximum\n amax = np.unravel_index(tab_score.argmax(), tab_score.shape)\n\n return union(list_bb[amax[0]], list_bb[amax[1]])\n else:\n return []", "def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):\n scores_for_ground_truths = []\n for ground_truth in ground_truths:\n score = metric_fn(prediction, ground_truth)\n scores_for_ground_truths.append(score)\n return max(scores_for_ground_truths)", "def calcul_max_loss(self, percent_allowable_loss):\n if self.capital * percent_allowable_loss / 100 > self.minimal_buy:\n return self.capital * percent_allowable_loss / 100\n else:\n return self.minimal_buy", "def score_on_hands(cards_on_hand):\r\n score = 0\r\n straightCount = 0\r\n max_card = 0\r\n suite_dict = {}\r\n face_dict = {}\r\n transfer_dict = {'A':1,'J':11,'Q':12,'K':13}\r\n card_face = []\r\n '''Circulate the player's hand, build a list of points and a suit dict'''\r\n for index in range(len(cards_on_hand)):\r\n if str(cards_on_hand[index])[1] in transfer_dict:\r\n card_face.append(transfer_dict.get(str(cards_on_hand[index])[1]))\r\n elif str(cards_on_hand[index])[1] == '1':\r\n card_face.append(10)\r\n else:\r\n card_face.append(int(str(cards_on_hand[index])[1]))\r\n suite_dict[str(cards_on_hand[index])[0]] = 1\r\n '''Because 1 can be treated as 1 or 14, so if 1 exists, add 14 to the end of the list to calculate flush'''\r\n if 1 in card_face:\r\n card_face.append(14)\r\n\r\n '''Check straight, if it is straight, straight should be 4'''\r\n for face in range(len(card_face)-1):\r\n if card_face[face] +1 == card_face[face+1] :\r\n straightCount +=1\r\n\r\n '''Detect the number of cards of the same number'''\r\n for face in card_face:\r\n\r\n if face not in face_dict:\r\n face_dict[face] = 1\r\n else:\r\n face_dict[face] += 1\r\n\r\n '''Store the maximum number of points'''\r\n max_card = card_face[len(card_face)-1]\r\n\r\n '''Calculate player score'''\r\n if straightCount == 4:\r\n score+= 8\r\n\r\n if len(suite_dict) == 1:\r\n score+= 9\r\n\r\n for values in face_dict.values():\r\n if values == 2:\r\n score += 3\r\n elif values == 3:\r\n score += 7\r\n elif values == 4:\r\n score += 11\r\n\r\n return (score, max_card)" ]
[ "0.7198057", "0.69047964", "0.6868163", "0.68407416", "0.68071705", "0.67827207", "0.67775255", "0.6729456", "0.6722615", "0.67012155", "0.66802895", "0.66660815", "0.6656922", "0.6642546", "0.6636847", "0.66343987", "0.66135156", "0.6605659", "0.65514874", "0.6536933", "0.65284574", "0.6485154", "0.64590675", "0.6442747", "0.6436084", "0.642072", "0.641884", "0.63745767", "0.63730747", "0.63632774", "0.6332755", "0.62957597", "0.62865233", "0.6257223", "0.62525207", "0.6250548", "0.6228645", "0.6221567", "0.6216562", "0.6205863", "0.62027895", "0.6196087", "0.6168207", "0.6160829", "0.61603576", "0.61380184", "0.61063963", "0.6096022", "0.60947454", "0.60935354", "0.60925895", "0.6087964", "0.60835093", "0.6082882", "0.6074564", "0.60556614", "0.60362136", "0.602142", "0.60135096", "0.5986195", "0.5972626", "0.5963515", "0.5963515", "0.5963515", "0.5963515", "0.5963515", "0.5963515", "0.59616274", "0.59487766", "0.594081", "0.59358", "0.59350693", "0.5920547", "0.59196085", "0.5893645", "0.58917105", "0.5889379", "0.5888776", "0.5884221", "0.5883124", "0.5882283", "0.58673275", "0.5866971", "0.5864779", "0.5863784", "0.5855556", "0.5854727", "0.585429", "0.5845717", "0.5843712", "0.5841906", "0.5840546", "0.5837877", "0.58358324", "0.5826647", "0.5811778", "0.580931", "0.5804834", "0.58040243", "0.5787795" ]
0.6956893
1
Compute the expected value based on held_dice given that there are num_free_dice to be rolled, each with num_die_sides.
def expected_value(held_dice, num_die_sides, num_free_dice): scores = [] die_sides = [(die + 1) for die in range(num_die_sides)] pos_outcomes = gen_all_sequences(die_sides, num_free_dice) for outcome in pos_outcomes: scores.append(score(held_dice + outcome)) expected_result = float(sum(scores))/len(scores) return expected_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = get_outcomes(num_die_sides)\n print \"outcomes:\", outcomes\n\n # generate all possible sequences of rolls\n all_rolls = list(gen_all_sequences(outcomes, num_free_dice))\n results = [max_repeats(roll) for roll in all_rolls]\n value = 0.0 \n\n\n for result in all_rolls:\n curr_hand = tuple(list(held_dice) + list(result))\n value += score(curr_hand)\n\n return value / len(all_rolls)", "def expected_value(held_dice, num_die_sides, num_free_dice):\n result = 0\n outcomes = range(1, num_die_sides + 1)\n possible = sorted(gen_all_sequences(outcomes, num_free_dice))\n for hand in possible:\n result += score(held_dice + hand)\n return float(result)/len(possible)", "def expected_value(held_dice, num_die_sides, num_free_dice):\n\n outcome = ()\n for die in range(1, num_die_sides + 1):\n outcome +=(die, )\n possible_outcomes = gen_all_sequences(outcome, num_free_dice)\n output = 0\n for single_output in possible_outcomes:\n current_score = score(single_output + held_dice)\n output += current_score\n\n return output/(len(possible_outcomes)*1.0)", "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n die_outcomes = set(range(1, num_die_sides + 1))\r\n \r\n possible_sequences = gen_all_sequences(die_outcomes, num_free_dice)\r\n \r\n total_score = 0.0\r\n for sequence in possible_sequences:\r\n total_score += score(held_dice + sequence)\r\n \r\n return float(total_score / len(possible_sequences))", "def expected_value(held_dice, num_die_sides, num_free_dice):\n list_scores = []\n die_sides = [die for die in range(1, num_die_sides + 1)]\n possible_seq = gen_all_sequences(die_sides, num_free_dice)\n for item in possible_seq:\n list_scores.append(score(held_dice + item))\n \n return float(sum(list_scores)) / len(list_scores)", "def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = [number+1 for number in range(num_die_sides)]\n die_seqs = list(gen_all_sequences(outcomes, num_free_dice))\n for idx in range(len(die_seqs)):\n seq = list(die_seqs[idx])\n seq.extend(list(held_dice))\n die_seqs[idx] = tuple(seq)\n scr = 0.0\n for seq in die_seqs:\n scr += score(seq) \n return scr / len(die_seqs)", "def expected_value(held_dice, num_die_sides, num_free_dice):\n all_sequences = gen_all_sequences(range(1,num_die_sides+1), num_free_dice)\n iter_seque=[]\n score_seque=[]\n for seq in all_sequences:\n iter_seque.append(list(seq)+list(held_dice))\n score_seque.append(score(iter_seque[-1]))\n return float(sum(score_seque))/float(len(score_seque))", "def strategy(hand, num_die_sides):\n all_holds = list(gen_all_holds(hand))\n expect=[]\n for held_dice in all_holds:\n expect.append(expected_value(held_dice, num_die_sides, len(hand)-len(held_dice)))\n max_expect_index = expect.index(max(expect))\n return (max(expect), (all_holds[max_expect_index]))", "def strategy(hand, num_die_sides):\r\n \r\n best_hold = (0.0, ())\r\n current_score = 0\r\n \r\n for held_dice in gen_all_holds(hand):\r\n score = expected_value(held_dice, num_die_sides, len(hand) - len(held_dice))\r\n if score > current_score:\r\n current_score = score\r\n best_hold = (current_score, held_dice)\r\n \r\n return best_hold", "def strategy(hand, num_die_sides):\n all_holds = gen_all_holds(hand)\n expected_values = {}\n for hold in all_holds:\n num_free_dice = len(hand) - len(hold)\n current_expexted_value = expected_value(hold, num_die_sides, num_free_dice)\n expected_values[current_expexted_value] = hold\n\n max_value = max(expected_values.keys())\n return tuple((max_value, expected_values[max_value]))", "def strategy(hand, num_die_sides):\n result = (0.0, ())\n current_value = float('-inf')\n \n for item in gen_all_holds(hand):\n value = expected_value(item, num_die_sides, len(hand) - len(item))\n if value > current_value:\n current_value = value\n result = (current_value, item)\n \n return result", "def chance(dice):\n return sum(dice)", "def roll_die(number_of_rolls: int, number_of_sides: int) -> int:\r\n if number_of_rolls <= 0 or number_of_sides <= 0:\r\n return 0\r\n\r\n max_total = number_of_sides * number_of_rolls\r\n\r\n return random.randint(number_of_rolls, max_total)", "def strategy(hand, num_die_sides):\n\n possible_holds = gen_all_holds(hand)\n best_val = 0\n best_score = 0\n dice_to_hold = []\n\n for hold in possible_holds:\n hold_val = expected_value(hold, NUM_DIE_SIDES, NUM_DICE - len(hold))\n\n hand_score = score(hold) + score(hand)\n if hand_score > best_val:\n # best_val = hold_val\n best_score = hand_score\n dice_to_hold = hold\n hand_copy = list(hand)\n sugg_hand = hand_copy.append(dice_to_hold)\n return (hand_score, sugg_hand)", "def roll_die(sides = 6, maxi = 6):\n d = 1000\n # discard highest roll(s)\n while d > maxi:\n d = random.randint(1,sides)\n return d", "def strategy(hand, num_die_sides):\n best_move = (0.0, ())\n all_holds = gen_all_holds(hand)\n for hold in all_holds:\n # hand can be less than 5\n num_free_dice = len(hand) - len(hold)\n expected = expected_value(hold, num_die_sides, num_free_dice)\n if expected > best_move[0]:\n best_move = (expected, hold)\n return best_move", "def strategy(hand, num_die_sides):\r\n \r\n best_value = 0.0\r\n best_hold = ()\r\n \r\n possible_holds = gen_all_holds(hand)\r\n \r\n for hold in possible_holds:\r\n current_value = expected_value(hold, num_die_sides, len(hand) - len(hold))\r\n if current_value > best_value:\r\n best_value = current_value\r\n best_hold = hold\r\n \r\n return (best_value, best_hold)", "def roll_dice(self):\r\n return randint(1,self.sides)", "def rollDie(self):\n return random.randint(1, self.sides)", "def throw_dice():\n return randint(1, 6) + randint(1, 6)", "def test_roll_dice(self):\n # create partial current and keeper list to pass into roll_dice\n self.roll.current_dice_list = [1, 2, 3]\n self.roll.keeper_dice_list = [1, 2, 3]\n\n self.roll.roll_dice()\n\n self.assertEqual(len(self.roll.current_dice_list), 5)\n self.assertEqual(len(self.roll.keeper_dice_list), 0)\n\n for i, dice in enumerate(self.roll.current_dice_list):\n self.assertTrue(1 <= dice <= 6)", "def roll(self):\n rolls = []\n if self.dice_array is not None:\n for dice in self.dice_array:\n rolls.append(np.random.randint(1, dice+1))\n else:\n for _ in range(0,self.number):\n rolls.append(np.random.randint(1, self.sides+1))\n #Fast way from stack overflow to determine if all\n #entries in \"rolls\" are equal, i.e. when doubles are rolled\n #but for arbitrary number of dice\n doubles = not rolls or [rolls[0]]*len(rolls) == rolls\n return np.sum(rolls), rolls, doubles", "def roll_die(number_of_rolls, number_of_sides):\n\n roll = random.randint(1, number_of_sides) # Used recursion for this\n if number_of_rolls == 0:\n return 0 # Base case is 0. If it's 1, then I can roll a 7 with 6 sides\n else:\n return roll + roll_die(number_of_rolls - 1, number_of_sides) # Subtract 1 roll and keep calling function", "def yatzy(dice):\n if (dice[0] == dice[1] == dice[2] == dice[3] == dice[4]):\n return 50\n return 0", "def roll_dice(roll, modifiers):\n try:\n if modifiers[\"Advantage\"] and not modifiers[\"Disadvantage\"]:\n modifiers[\"Advantage\"] = False\n return max(roll_dice(roll, modifiers), roll_dice(roll,modifiers))\n if modifiers[\"Disadvantage\"] and not modifiers[\"Advantage\"]:\n modifiers[\"Disadvantage\"] = False\n return min(roll_dice(roll, modifiers), roll_dice(roll, modifiers))\n num_dice = int(roll.split(\"D\")[0])\n if modifiers[\"Critical\"]:\n num_dice*=2\n num_dice+=modifiers[\"Brutal\"]\n die_type = roll.split(\"D\")[1]\n if die_type[0] == \"4\" or die_type[0] == \"6\" or die_type[0] == \"8\":\n die_type = int(die_type[0])\n elif die_type[:3] == \"100\" or die_type[0] == \"%\":\n die_type = 100\n elif die_type[:2] == \"10\" or die_type[:2] == \"12\" or die_type[:2] == \"20\":\n die_type = int(die_type[:2])\n else:\n die_type = 6\n roll_total = 0\n critical_success = False\n critical_failure = False\n for die in range(num_dice):\n die_result = random.randint(1,die_type)\n if die_result == 1 and modifiers[\"Lucky\"] or die_result <= 2 and modifiers[\"Great Weapon\"]:\n die_result = random.randint(1,die_type)\n if die_result < modifiers[\"Minimum Roll\"]:\n die_result = modifiers[\"Minimum Roll\"]\n if die_result == 20 and die_type == 20:\n critical_success = True\n if die_result == 1 and die_type == 20:\n critical_failure = True\n roll_total += die_result\n return roll_total\n except ValueError:\n return \"Error\"", "def roll_die(self):\n number = randint(1, self.sides) \n print(number)", "def d(qty, sides):\r\n value = 0\r\n while qty > 0:\r\n value = value + random.randint(1, sides)\r\n qty = qty - 1\r\n return value", "def roll_dice():\n return (random.randint(1, 6) + random.randint(1, 6))", "def strategy(hand, num_die_sides):\n #return (0.0, ())\n maxval = 0.0\n maxseq= ()\n allholds = gen_all_holds(hand)\n for seq in allholds:\n val = expected_value(seq, num_die_sides, len(hand)-len(seq))\n if val > maxval:\n maxval = val\n maxseq = seq\n \n \n \n return (maxval, maxseq)", "def score_yamp(dices):\n for dice_num in range(MIN_DICE, MAX_DICE + 1):\n if count_equal(dices, dice_num) == DICE_COUNT:\n return 50\n\n return 0", "def roll_dice(check_double=True):\n\n roll = np.random.choice(np.arange(1, 7), 2)\n\n if check_double:\n return roll.sum(), roll[0] == roll[1]\n else:\n return roll.sum()", "async def dice(self, ctx, diceroll: str = '1d6'):\n times, num = diceroll.split('d')\n times = int(times) if times else 1\n num = int(num) if num else 6\n maxscore = times*num\n score = random.randint(times, maxscore)\n await ctx.send(ctx._(\"roll_result\").format(score=score, maxscore=maxscore))", "def roll_1d10() -> int:\n ten_percent = Die(10)\n ten_percent.roll_die()\n chance = ten_percent.get_value()\n return chance", "def chance_points(dice_list):\n return sum(dice_list)", "def testRoll(self):\n \n nsides=3\n die = BaseDie(nsides)\n lighted_die = LightedDie(nsides,colors={1:'blue',2:'yellow',3:'gold'})\n\n self.assertEqual(die.last_roll,None)\n\n die.roll()\n lighted_die.roll()\n\n for d in [die,lighted_die]:\n self.assertTrue(d.last_roll>0 and d.last_roll <= nsides)", "def reroll_selected_dice(selected_dice, yatzy_dice):\n for die in selected_dice:\n yatzy_dice[die] = random_die()", "def big_straight(dice):\n if sorted(dice) == [2, 3, 4, 5, 6]:\n return sum(dice)\n return 0", "def fives(dice):\n return sum([x for x in dice if x == 5])", "def calculate_score(dice):\n # version_1\n\n if len(dice) > 6:\n raise Exception(\"Cheating Cheater!\")\n\n counts = Counter(dice)\n\n if len(counts) == 6:\n return 1500\n\n if len(counts) == 3 and all(val == 2 for val in counts.values()):\n return 1500\n\n score = 0\n\n ones_used = fives_used = False\n\n for num in range(1, 6 + 1):\n\n pip_count = counts[num]\n\n if pip_count >= 3:\n\n if num == 1:\n\n ones_used = True\n\n elif num == 5:\n\n fives_used = True\n\n score += num * 100\n\n # handle 4,5,6 of a kind\n pips_beyond_3 = pip_count - 3\n\n score += score * pips_beyond_3\n\n # bug if 2 threesomes? Let's test it\n\n # 1s are worth 10x\n if num == 1:\n score *= 10\n\n if not ones_used:\n score += counts.get(1, 0) * 100\n\n if not fives_used:\n score += counts.get(5, 0) * 50\n\n return score", "def select_dice(score, opponent_score, dice_swapped):\n # BEGIN PROBLEM 4\n dice = six_sided\n if dice_swapped == True:\n dice = four_sided\n # END PROBLEM 3\n if (score + opponent_score) % 7 == 0:\n dice = reroll(dice)\n return dice", "def roll(dice):\n\n dice = str(dice).upper().strip()\n dice_mod = 0\n if dice == 'FLUX':\n return randint(1, 6) - randint(1, 6)\n else:\n if dice == 'GOODFLUX':\n flux1 = randint(1, 6)\n flux2 = randint(1, 6)\n if flux1 < flux2:\n return flux2 - flux1\n else:\n return flux1 - flux2\n else:\n if dice == 'BADFLUX':\n flux1 = randint(1, 6)\n flux2 = randint(1, 6)\n if flux1 > flux2:\n return flux2 - flux1\n else:\n return flux1 - flux2\n \n ichar1 = dice.find('DD')\n if ichar1 == -1:\n ichar1 = dice.find('D')\n if ichar1 == 0:\n num_dice = 1\n\n if ichar1 <> -1:\n if ichar1 <> 0:\n num_dice = int(dice[0:ichar1])\n# print 'Number of dice =', num_dice\n ichar2 = dice.find('+')\n if ichar2 <> -1:\n dice_mod = int(dice[ichar2:len(dice)])\n# print 'dice mod =', dice_mod\n else:\n ichar2 = dice.find('-')\n if ichar2 <> -1:\n dice_mod = int(dice[ichar2:len(dice)])\n# print 'dice mod =', dice_mod\n\n if ichar2 <> -1:\n dice_type = dice[ichar1: ichar2]\n dice_type = dice_type.rstrip()\n else:\n dice_type = dice[ichar1: len(dice)]\n# print 'dice type =', dice_type, 'Len = ', len(dice_type)\n\n if dice_type == 'D6': \n return die_rolls(6, num_dice) + dice_mod\n else:\n if dice_type == 'D66' and num_dice == 1 and dice_mod == 0:\n return randint(1, 6) * 10 + randint(1, 6)\n else:\n if dice_type == 'D100' and num_dice == 1: \n return (randint(1, 10) - 1) * 10 + randint(1, 10) + dice_mod \n else:\n if dice_type == 'D10': \n return die_rolls(10, num_dice) + dice_mod\n else: \n if dice_type == 'D20': \n return die_rolls(20, num_dice) + dice_mod\n else:\n if dice_type == 'D30': \n return die_rolls(30, num_dice) + dice_mod\n else:\n if dice_type == 'D12': \n return die_rolls(12, num_dice) + dice_mod\n else:\n if dice_type == 'D8': \n return die_rolls(8, num_dice) + dice_mod\n else:\n if dice_type == 'D4': \n return die_rolls(4, num_dice) + dice_mod\n else:\n if dice_type == 'D9': \n return die_rolls(9, num_dice) + dice_mod\n else:\n if dice_type == 'D3': \n return die_rolls(3, num_dice) + dice_mod\n else:\n if dice_type == 'DD':\n return (die_rolls(6, num_dice) + dice_mod) * 10\n \n print\n print \"** DICE ERROR! '%s' is unknown **\" % dice\n print \n print \"roll() is a dice rolling program.\"\n print\n print \"The types of dice to roll are (in string values):\"\n print \"roll('D6') -- roll one 6-sided die\"\n print \"roll('1D6') -- roll one 6-sided die\"\n print \"roll('2D6') -- roll two 6-sided dice\"\n print \"roll('D10') -- roll a 10-sided die\"\n print \"roll('D100') -- roll a 100-sided die (1 - 100)\"\n print \"roll('D66') -- roll for a D66 chart\"\n print \"roll('2DD+3') -- roll (2D6+3) x 10\"\n print\n print \"-/+ DMs can be added to rolls:\"\n print \"roll('3D6+6') -- add +6 DM to roll\"\n print \"roll('4D4-4') -- add -4 DM to roll\"\n print\n return 0", "def roll(self):\n return randint(1, self.sides)", "def roll(self):\n return randint(1, self.sides)", "def roll_dice():\n die1 = random.randrange(1, 7)\n die2 = random.randrange(1, 7)\n return (die1, die2) # pack die face values into a tuple", "def score_full(dices):\n three_matched_num = 0\n for dice_num in range(MIN_DICE, MAX_DICE + 1):\n threes = count_equal(dices, dice_num) == 3\n if threes:\n three_matched_num = dice_num\n break\n\n two_matched_num = 0\n for dice_num in range(MIN_DICE, MAX_DICE + 1):\n if dice_num != three_matched_num:\n twos = count_equal(dices, dice_num)\n if twos == 2:\n two_matched_num = twos\n break\n\n if three_matched_num > 0 and two_matched_num > 0:\n return 25\n else:\n return 0", "def ability(self):\n random.seed()\n rolls = [random.randint(1,6) for i in range(4)]\n return sum(sorted(rolls)[1:4])", "def yatzy_rule(n):\n def ones(dice):\n \"\"\" Count ones in list. \"\"\"\n return sum([x for x in dice if x == 1])\n\n def twos(dice):\n \"\"\" Count twos in list. \"\"\"\n return sum([x for x in dice if x == 2])\n\n def threes(dice):\n \"\"\" Count threes in list. \"\"\"\n return sum([x for x in dice if x == 3])\n\n def fours(dice):\n \"\"\" Count fours in list. \"\"\"\n return sum([x for x in dice if x == 4])\n\n def fives(dice):\n \"\"\" Count fives in list. \"\"\"\n return sum([x for x in dice if x == 5])\n\n def sixes(dice):\n \"\"\" Count sixes in list. \"\"\"\n return sum([x for x in dice if x == 6])\n\n def pair(dice):\n \"\"\" Return sum of highest pair in list. \"\"\"\n\n def max_or_zero(list):\n \"\"\" Returns maximum value of a list; 0 if list is empty. \"\"\"\n try:\n return max(list)\n except ValueError:\n return 0\n\n return 2 * max_or_zero([i for i, j in combinations(dice, 2) if i == j])\n \n def double_pair(dice):\n \"\"\" TODO! \"\"\"\n\n # Sentinel value.\n return 1\n\n def threes(dice):\n \"\"\" Find a set of three equal values in list dice\n and return its sum. Returns 0 if nothing found.\"\"\"\n for i, j, k in combinations(dice, 3):\n if i == j == k:\n return 3 * i\n\n return 0\n\n def fours(dice):\n \"\"\" Find a set of four equal values in list dice\n and return its sum. Returns 0 if nothing found.\"\"\"\n for i, j, k, l in combinations(dice, 4):\n if i == j == k == l:\n return 4 * i\n\n return 0\n\n def small_straight(dice):\n \"\"\" Checks the list dice for the exact combination\n [1, 2, 3, 4, 5] (the small straight) and returns\n its sum. Returns 0 if nothing found.\"\"\"\n if sorted(dice) == [1, 2, 3, 4, 5]:\n return sum(dice)\n return 0\n\n def big_straight(dice):\n \"\"\" Checks the list dice for the exact combination\n [2, 3, 4, 5, 6] (the large straight) and returns\n its sum. Returns 0 if nothing found.\"\"\"\n if sorted(dice) == [2, 3, 4, 5, 6]:\n return sum(dice)\n return 0\n\n def house(dice):\n \"\"\" Try to find a house in the list of cards\n i.e. [2, 2, 2, 3, 3] or [5, 5, 4, 4, 4] and\n return its sum. Returns 0 if nothing found.\"\"\"\n s = sorted(dice)\n if ((s[0] == s[1] == s[2] and s[3] == s[4]) or\n (s[0] == s[1] and s[2] == s[3] == s[4])):\n return sum(dice)\n return 0\n\n def chance(dice):\n \"\"\" Returns the sum of dice. \"\"\"\n return sum(dice)\n\n def yatzy(dice):\n \"\"\" If every value in list dice is equal, return its sum.\n Else, return 0. \"\"\"\n if (dice[0] == dice[1] == dice[2] == dice[3] == dice[4]):\n return 50\n return 0\n\n return [ones, twos, threes, fours, fives, sixes, pair, double_pair,\n threes, fours, small_straight, big_straight, house, chance, yatzy][n]", "def roll_dice():\n roll = random.randint(1, 6)\n return roll", "def test_reroll_dice(self):\n self.roll.current_dice_list = [1, 2, 3, ]\n self.roll.keeper_dice_list = [1, 2]\n\n self.roll.reroll_dice(self.roll.current_dice_list)\n\n self.assertEqual(len(self.roll.current_dice_list), 5)\n self.assertEqual(len(self.roll.keeper_dice_list), 0)\n self.assertEqual(self.roll.current_dice_list[3], 1)\n self.assertEqual(self.roll.current_dice_list[4], 2)", "def roll(self):\n\t\treturn randint(1, self.num_sides)", "def roll(self) -> int:\n return self.rand.randint(1, self.sides)", "def roll(self):\r\n import random as _random\r\n return _random.randint(1, self.__sides_count)", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n num_roll = 0\n sum = 0\n pig_out = False # Pig Out rule\n while num_roll < num_rolls:\n roll = dice()\n if roll == 1:\n pig_out = True\n sum += roll\n num_roll += 1\n if pig_out: return 1\n else: return sum\n # END PROBLEM 1", "def test_roll_once(self):\n\n self.assertIn(self.new_die.roll(), self.possible_values, \"Rolled value was not in possible die values\")", "def wyldingHand(self, level):\n if level == 0:\n die_result = random.randint(1,6)\n elif level == 1:\n die_result = random.randint(1,10)\n elif level == 2:\n die_result = random.randint(1,6) + random.randint(1,6)\n elif level == 3:\n die_result = random.randint(1,8) + random.randint(1,8)\n\n return die_result", "def roll(self):\n return cbrandom.throwDices(\"1d20\")", "def roll(self):\n total = 0\n\n if self.num_dice is not None and self.dice_type is not None:\n for _ in range(self.num_dice):\n total += randint(1, self.dice_type)\n elif self.min_value is not None and self.max_value is not None:\n total = randint(self.min_value, self.max_value)\n\n return total + self.plus", "def roll_dice(self):\n self.roll = (random.randint(1,6), random.randint(1,6))\n return self.roll", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def throw_dice(N: int, faces: int, total: int) -> int:\n if total == 0:\n return 1\n\n # dp[i][j] returns the number of ways to get to the sum `i` using `j` dice\n dp = [[0 for _ in range(total + 1)] for _ in range(N)]\n\n # Initialize the array for the first die, which can only achieve the total for each face it\n # rolls\n for curr_roll in range(1, min(faces + 1, total + 1)):\n dp[0][curr_roll] = 1\n\n # For each die, iterate through each potential total and simulate a roll from the die. We can\n # add the number of ways to reach `current_total - current_roll` using n - 1 die (if we are\n # currently using n die).\n for die in range(1, N):\n for curr_total in range(1, total + 1):\n for curr_roll in range(1, min(curr_total, faces + 1)):\n dp[die][curr_total] += dp[die - 1][curr_total - curr_roll]\n return dp[-1][-1]", "def die_roll():\n roll = random.randint(1,6)\n return roll", "def house(dice):\n s = sorted(dice)\n if ((s[0] == s[1] == s[2] and s[3] == s[4]) or\n (s[0] == s[1] and s[2] == s[3] == s[4])):\n return sum(dice)\n return 0", "def roll(self):\n return random.randint(1,self.sides)\n #return int(self.sides*random.random() + 1.0)", "def roll_dice(num_dice, die_type):\n result = 0\n for i in range(num_dice):\n result += random.randint(1, die_type)\n\n return result", "def rolldie():\n return int(random.random()*6)+1 # or use randrange()", "def roll(dice):\n rolled_dice = []\n for die in dice[1]:\n rolled_dice.append(randint(1, CUBE_DICE_MAX_VALUE()))\n dice[1] = rolled_dice\n return dice", "def _test_misc(self, state):\n state.result = 0\n dice_count = state.selection.dice_count\n dice_eyes = state.selection.dice_eyes\n\n if dice_count > 200:\n print(self._lang[\"too_many_dice\"])\n return state\n\n if state.dice == \"auto\":\n state.rolls = self._roll_dice(dice_count, 1, dice_eyes)\n\n # create sum of all rolled dice and modifier\n for _, value in enumerate(state.rolls):\n state.result += value\n\n state.result += state.mod\n return state", "def test_roll_value_changes(self):\n\n holding_value = self.new_die.roll()\n for i in range(10):\n if self.new_die.roll() != holding_value:\n print(\"Rolled die value {} is different from Holding Value {}\".format(self.new_die.currentValue, holding_value))\n self.assertTrue(True)\n return\n\n self.assertTrue(False, \"Die value did not change from Holding Value for 10 rolls\")", "def roll_2_dice():\n return random.randint(2, 13)", "def fours(dice):\n for i, j, k, l in combinations(dice, 4):\n if i == j == k == l:\n return 4 * i\n\n return 0", "def roll_the_dices(num_of_iterations: int) -> None:\n # initial variables\n player_wins: int = 0\n theoretical_win_chance: float = round(15/36, 4)\n\n # main loop\n for _ in range(num_of_iterations):\n croupier_roll = random.randint(1, 6)\n player_roll = random.randint(1, 6)\n if player_roll < croupier_roll:\n player_wins += 1\n\n experimental_win_chance = round(player_wins / num_of_iterations, 4)\n print(f\"Results: \\n\"\n f\"Theoretical probability of winning a single game: {theoretical_win_chance:.2%}\\n\"\n f\"Experimental probability of winning a single game: {experimental_win_chance:.2%}\")", "def rollDices():\n for i in range(5):\n dices[i] = randint(1, 6)", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n \"*** YOUR CODE HERE ***\"\n count, return_sum = 0, 0\n while count < num_rolls:\n roll = dice()\n if roll == 1:\n count += 1\n while count < num_rolls:\n dice()\n count += 1\n return 1\n return_sum += roll\n count += 1\n return return_sum\n # END PROBLEM 1", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n roll_sum = 0 # sums values of rolled dice\n ones_total = 0 # counts number of times the value 1 is rolled\n while num_rolls>0:\n current_roll = dice()\n if current_roll==1:\n ones_total += 1\n roll_sum += current_roll\n num_rolls -= 1\n if ones_total > 0:\n return ones_total\n else:\n return roll_sum\n # END PROBLEM 1", "def evaluate_dice(\n\tpreds: np.ndarray,\n\tlabels: np.ndarray,\n\ttolerance = 0.95\n\t):\n\n\tAVG_DICE = 0.0\n\tempty = 0.0\n\tfor true, pred in zip(labels, preds):\n\t\tif not np.sum(true):\n\t\t\tempty += 1.\n\t\tAVG_DICE += dice(true[0], pred[0] > tolerance)\n\n\treturn AVG_DICE / (preds.shape[0] - empty) if empty != preds.shape[0] else 0.0", "def maximum_roll(self):\n if self.dice_array is None:\n return self.number * self.sides\n else:\n return np.sum(self.dice_array)", "def roll(self):\n self.currentValue = choice(self.possibleValues)\n self.value = AngryDie.ANGRY_VALUES[self.currentValue]\n return self.currentValue", "def roll(number, sides):\n total = 0\n for _ in range(number):\n total += random.randint(1, sides + 1)\n return total", "def fours(dice):\n return sum([x for x in dice if x == 4])", "def select_dice(score, opponent_score):\r\n if (score+opponent_score)%7 == 0:\r\n return four_sided\r\n return six_sided", "def cube(user_dice):\n for dice in dice_list:\n if dice in user_dice:\n x = user_dice.split(dice)\n throw = random.randint(1, int(dice[1:]))\n try:\n if x[0]:\n multiply = x[0]\n multiply = int(multiply)\n else:\n multiply = 1\n except ValueError:\n print(\"Niepoprawne dane\")\n break\n try:\n if x[1]:\n modifier = x[1]\n modifier = int(modifier)\n else:\n modifier = 0\n except ValueError:\n print(\"Niepoprawne dane\")\n break\n my_list = []\n for i in range(multiply):\n my_list.append(throw)\n return sum(my_list) + modifier", "def roll(self):\n return random.randrange(1, sides + 1)", "def _rollOneDie(self):\n return random.randint(1, 6)", "def yatzy_dice():\n return [random_die() for _ in range(5)]", "def dice(x, y):\n return 2 * np.sum(x * y) / (np.sum(x) + np.sum(y))", "def fives_points(dice_list):\n return dice_list.count(5)* 5", "def temporary_score(self, dice_roll):\n\n temporary_score = 0\n if dice_roll > 1:\n temporary_score += dice_roll\n else:\n temporary_score = 0\n return temporary_score", "def diceRoll():\n return random.randint(1, 6) # generates a random integer between 1 and 6 (inclusive) and returns it.", "def simple_roll(dice):\n return roll(dice).total", "def roll_dice(player: int) -> int:\n sides = 6\n roll_again = input(\"Player {}: Press ENTER to roll your dice...\".format(player))\n num_rolled = roll(sides)\n print(\"You rolled {}.\".format(num_rolled))\n return num_rolled", "def roll_cheating_dice(number,faces):\n \n dice_number = 0\n cheating_list = list(range(1,faces+1))\n cheating_list.append(3)\n for i in range(number):\n dice_number += random.choice(cheating_list)\n return dice_number", "def yahtzee_points(dice_list):\n if of_a_kind_size(dice_list) >= 5:\n return 50\n else:\n return 0", "def roll_dice():\n result = random.randint(1, 101)\n if result <= 5:\n return True\n else:\n return False", "def roll(self):\n return random.choice(self.sides)", "def diceRoll():\n return randint(1,6)", "def roll(self):\n #dieValue = [] \n self._value = random.randrange(Die.SIDES) + 1\n self._update()\n #dieValue.append(self._value)\n #print(dieValue)\n #print(self._value)\n self._valueA = random.randrange(Die.SIDES) + 1\n #self._update2()\n #print(self._valueA)", "def Die(sides=6, symbol=None):\n\n return DiePSpace(sides, symbol).value" ]
[ "0.8766034", "0.8516424", "0.84216785", "0.8412181", "0.82945627", "0.8200519", "0.8118519", "0.7291412", "0.7017441", "0.6975462", "0.6942069", "0.6937762", "0.69110143", "0.6777357", "0.67685777", "0.67684424", "0.67683524", "0.6656086", "0.6635097", "0.6356114", "0.6343377", "0.6300391", "0.62937456", "0.62922883", "0.6267302", "0.6256338", "0.6249873", "0.62396353", "0.6210527", "0.6209155", "0.62018657", "0.61841923", "0.61817795", "0.61554176", "0.61409914", "0.6138191", "0.60956335", "0.60917264", "0.6080854", "0.60622203", "0.60615504", "0.60286", "0.60286", "0.6025406", "0.60250324", "0.6022283", "0.6020633", "0.60200185", "0.59929466", "0.5992048", "0.5972981", "0.5964438", "0.59580517", "0.5951374", "0.5948847", "0.5948493", "0.5948301", "0.59450126", "0.5928877", "0.5928877", "0.5928877", "0.5928877", "0.5926391", "0.5921797", "0.59187657", "0.591855", "0.59139454", "0.5913241", "0.58976614", "0.5871159", "0.58506185", "0.58476853", "0.58474034", "0.5844794", "0.58437645", "0.58423996", "0.58378226", "0.5837416", "0.5815352", "0.58144456", "0.58081096", "0.579554", "0.57877195", "0.5779064", "0.5772503", "0.5731563", "0.57228816", "0.5721693", "0.5717823", "0.5716926", "0.57117385", "0.57049716", "0.56977934", "0.5696818", "0.5696509", "0.569244", "0.5685737", "0.5681744", "0.5678933", "0.5671875" ]
0.8236513
5
Generate all possible choices of dice from hand to hold.
def gen_all_holds(hand): held_dice = [()] for dice in hand: for dummy_dice in held_dice: held_dice = held_dice + [tuple(dummy_dice) + (dice, )] return set(held_dice)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_all_holds(hand):\r\n possible_holds = set([()])\r\n \r\n for dice in hand:\r\n temp_holds = possible_holds.copy()\r\n for hold in temp_holds:\r\n temp_seq = list(hold)\r\n temp_seq.append(dice)\r\n possible_holds.add(tuple(temp_seq))\r\n \r\n return possible_holds", "def yatzy_dice():\n return [random_die() for _ in range(5)]", "def dice():\n return random.randrange(1, 7)", "def reroll_selected_dice(selected_dice, yatzy_dice):\n for die in selected_dice:\n yatzy_dice[die] = random_die()", "def dealHand(n):\n hand={}\n numVowels = n / 3\n \n for i in range(numVowels):\n x = VOWELS[random.randrange(0,len(VOWELS))]\n hand[x] = hand.get(x, 0) + 1\n \n for i in range(numVowels, n): \n x = CONSONANTS[random.randrange(0,len(CONSONANTS))]\n hand[x] = hand.get(x, 0) + 1\n \n return hand", "def deal_hand(n):\n hand = {}\n num_vowels = n // 3\n\n for i in range(num_vowels):\n x = VOWELS[random.randrange(0, len(VOWELS))]\n hand[x] = hand.get(x, 0) + 1\n\n for i in range(num_vowels, n):\n x = CONSONANTS[random.randrange(0, len(CONSONANTS))]\n hand[x] = hand.get(x, 0) + 1\n\n return hand", "def rollDices():\n for i in range(5):\n dices[i] = randint(1, 6)", "def dealHand(n):\r\n hand={}\r\n numVowels = n // 3\r\n \r\n for i in range(numVowels):\r\n x = VOWELS[random.randrange(0,len(VOWELS))]\r\n hand[x] = hand.get(x, 0) + 1\r\n \r\n for i in range(numVowels, n): \r\n x = CONSONANTS[random.randrange(0,len(CONSONANTS))]\r\n hand[x] = hand.get(x, 0) + 1\r\n \r\n return hand", "def slot_choke(self):\n if self.choke:\n _choke = [1 for x in range(8)]\n else:\n _choke = [random.randint(0,4) for x in range(8)]\n \n return _choke", "def roll_dices():\n dices = []\n\n for i in range(DICE_COUNT):\n dice = random.randrange(MIN_DICE, MAX_DICE + 1)\n dices.append(dice)\n\n return dices", "def get_outcomes(num_die_sides):\n outcomes = []\n\n for value in range(1, num_die_sides + 1):\n outcomes.append(value)\n\n return outcomes\n\n\n \"\"\"\n Iterative function that enumerates the set of all sequences of\n outcomes of given length.\n DO NOT MODIFY.\n\n outcomes: possible values of a roll (ex. -- [1,2,3,4,5,6] for a 6-sided die)\n \"\"\"\n\n answer_set = set([()])\n for dummy_idx in range(length):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in outcomes:\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n temp_set.add(tuple(new_sequence))\n answer_set = temp_set\n return answer_set", "def dealHand(n: int) -> d_si:\n hand = {} # type: Dict [str, int]\n numVowels = n // 3\n\n for _ in range(numVowels):\n x = VOWELS[random.randrange(0,len(VOWELS))]\n hand[x] = hand.get(x, 0) + 1\n\n for _ in range(numVowels, n): # Or (n - numVowels)\n x = CONSONANTS[random.randrange(0,len(CONSONANTS))]\n hand[x] = hand.get(x, 0) + 1\n\n return hand", "def select_hands_for_players(self):\n return [random.choice(h.possible_hands) for h in self.holdem_ranges]", "def strategy(hand, num_die_sides):\n\n possible_holds = gen_all_holds(hand)\n best_val = 0\n best_score = 0\n dice_to_hold = []\n\n for hold in possible_holds:\n hold_val = expected_value(hold, NUM_DIE_SIDES, NUM_DICE - len(hold))\n\n hand_score = score(hold) + score(hand)\n if hand_score > best_val:\n # best_val = hold_val\n best_score = hand_score\n dice_to_hold = hold\n hand_copy = list(hand)\n sugg_hand = hand_copy.append(dice_to_hold)\n return (hand_score, sugg_hand)", "def throw_table(n, d=6, type='classical'):\n table = None\n roll = range(1, d+1)\n \n if type == 'classical':\n table = list(itertools.product(roll, repeat=n))\n else:\n table = list(itertools.combinations(roll, n))\n if type == 'bosonic':\n # TODO: This only works for 2 dice!!!!\n for i in roll:\n table.append((i,i))\n\n return table", "def pick_dice(sorted_dice):\n print(f'\\nYour sorted dice result is: {sorted_dice}')\n player_picks = input(fill('Here is your sorted dice result. Please enter 1-4 unique numbers in the range of 1-5 to'\n ' represent the selection of dice you want to hold. the numbers represents the location '\n 'of die in the dice list from left to right. For example if you want to hold 2 dice that '\n 'are on the left of the sorted dice list, you will enter \"12\". Warning: if you enter '\n 'anything else, the system will treat it as if you choose not to hold any dice: ',\n TXT_WIDTH()))\n dice = [[], []]\n if re.match(r'^(?!.*(.).*\\1)[1-5]{1,4}$', player_picks):\n picks_list = [int(pick) for pick in player_picks]\n index_list = [pick - 1 for pick in picks_list]\n for index in index_list:\n dice[0].append(sorted_dice[index])\n for die in range(TOTAL_NUMBER_OF_DICE() - len(dice[0])):\n dice[1].append(0)\n else:\n for die in sorted_dice:\n dice[1].append(0)\n return dice", "def roll_dice():\n numbers = ['1', '2', '3', '4', '5', '6']\n return random.choice(numbers)", "def shuffle_choices(self, choices, rng):\r\n # Separate out a list of the stuff to be shuffled\r\n # vs. the head/tail of fixed==true choices to be held back from the shuffle.\r\n # Rare corner case: A fixed==true choice \"island\" in the middle is lumped in\r\n # with the tail group of fixed choices.\r\n # Slightly tricky one-pass implementation using a state machine\r\n head = []\r\n middle = [] # only this one gets shuffled\r\n tail = []\r\n at_head = True\r\n for choice in choices:\r\n if at_head and choice.get('fixed') == 'true':\r\n head.append(choice)\r\n continue\r\n at_head = False\r\n if choice.get('fixed') == 'true':\r\n tail.append(choice)\r\n else:\r\n middle.append(choice)\r\n rng.shuffle(middle)\r\n return head + middle + tail", "def dice(name):", "def throw_dice():\n dice_1 = random.randrange(1,7)\n dice_2 = random.randrange(1,7)\n return sorted((dice_1,dice_2))", "def wyldingHand(self, level):\n if level == 0:\n die_result = random.randint(1,6)\n elif level == 1:\n die_result = random.randint(1,10)\n elif level == 2:\n die_result = random.randint(1,6) + random.randint(1,6)\n elif level == 3:\n die_result = random.randint(1,8) + random.randint(1,8)\n\n return die_result", "def gen_all_holds(hand):\n \n answer_set = set([()])\n for dummy_idx in range(len(hand)):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in range(1,len(hand)+1):\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n if set(tuple(new_sequence)).issubset(set(range(1,len(hand)+1))):\n temp_set.add(tuple(set(new_sequence)))\n answer_set = answer_set.union(temp_set)\n answer_set2 = set([()])\n for seq in answer_set:\n temp_seq = []\n for element in seq: \n temp_el = hand[element -1]\n temp_seq.append(temp_el)\n answer_set2.add(tuple(temp_seq))\n return answer_set2", "def gen_all_holds(hand):\n without_repeat = []\n mask_seque = list(gen_all_sequences([0,1], len(hand)))\n for dum_i in mask_seque:\n without_repeat.append(())\n \n for dum_i in range(len(mask_seque)):\n for dum_j in range(len(mask_seque[dum_i])):\n if (mask_seque[dum_i][dum_j]==1):\n without_repeat[dum_i]=list(without_repeat[dum_i])\n without_repeat[dum_i].append(hand[dum_j])\n without_repeat[dum_i]=tuple(without_repeat[dum_i])\n \n without_repeat = set(tuple(without_repeat))\n return without_repeat", "def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices", "def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices", "def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices", "def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices", "def rollDie():\n return random.choice([1, 2, 3, 4, 5, 6])", "def strategy(hand, num_die_sides):\n all_holds = list(gen_all_holds(hand))\n expect=[]\n for held_dice in all_holds:\n expect.append(expected_value(held_dice, num_die_sides, len(hand)-len(held_dice)))\n max_expect_index = expect.index(max(expect))\n return (max(expect), (all_holds[max_expect_index]))", "def determine_roll(self):\n dice_to_roll = []\n to_roll = input(\"Roll dice: \")\n if 'a' in to_roll:\n dice_to_roll.append(self.die_a)\n\n if 'b' in to_roll:\n dice_to_roll.append(self.die_b)\n\n return dice_to_roll", "def roll_dice():\n die1 = random.randrange(1, 7)\n die2 = random.randrange(1, 7)\n return (die1, die2) # pack die face values into a tuple", "def deal(deck): \r\n hand = []\r\n for n in range(2): \r\n deck, hand = draw(deck, hand)\r\n \r\n return deck, hand", "def challenge() : \n\treturn [random.randint(1,9) for i in range(5)]", "def roll_dice(num_of_dice=1):\r\n sides = 6\r\n return [random.randrange(1, sides+1) for _ in xrange(num_of_dice)]", "def deal_hands( self ):\n \tself.shuffle()\n \thand_one = []\n \thand_two = []\n\n \tfor counter in range(5):\n \t\thand_one.append(self.deal())\n \t\thand_two.append(self.deal())\n\n \treturn hand_one, hand_two", "def hand_throw(hand: list, choice_to_roll=None) -> list:\n\n if choice_to_roll is None:\n choice_to_roll = range(len(hand))\n\n new_throw = dice_throw(len(choice_to_roll))\n\n dice_index_in_new_throw = 0\n for dice_number in choice_to_roll:\n hand[dice_number] = new_throw[dice_index_in_new_throw]\n dice_index_in_new_throw += 1\n\n return hand", "def yatzy_rule(n):\n def ones(dice):\n \"\"\" Count ones in list. \"\"\"\n return sum([x for x in dice if x == 1])\n\n def twos(dice):\n \"\"\" Count twos in list. \"\"\"\n return sum([x for x in dice if x == 2])\n\n def threes(dice):\n \"\"\" Count threes in list. \"\"\"\n return sum([x for x in dice if x == 3])\n\n def fours(dice):\n \"\"\" Count fours in list. \"\"\"\n return sum([x for x in dice if x == 4])\n\n def fives(dice):\n \"\"\" Count fives in list. \"\"\"\n return sum([x for x in dice if x == 5])\n\n def sixes(dice):\n \"\"\" Count sixes in list. \"\"\"\n return sum([x for x in dice if x == 6])\n\n def pair(dice):\n \"\"\" Return sum of highest pair in list. \"\"\"\n\n def max_or_zero(list):\n \"\"\" Returns maximum value of a list; 0 if list is empty. \"\"\"\n try:\n return max(list)\n except ValueError:\n return 0\n\n return 2 * max_or_zero([i for i, j in combinations(dice, 2) if i == j])\n \n def double_pair(dice):\n \"\"\" TODO! \"\"\"\n\n # Sentinel value.\n return 1\n\n def threes(dice):\n \"\"\" Find a set of three equal values in list dice\n and return its sum. Returns 0 if nothing found.\"\"\"\n for i, j, k in combinations(dice, 3):\n if i == j == k:\n return 3 * i\n\n return 0\n\n def fours(dice):\n \"\"\" Find a set of four equal values in list dice\n and return its sum. Returns 0 if nothing found.\"\"\"\n for i, j, k, l in combinations(dice, 4):\n if i == j == k == l:\n return 4 * i\n\n return 0\n\n def small_straight(dice):\n \"\"\" Checks the list dice for the exact combination\n [1, 2, 3, 4, 5] (the small straight) and returns\n its sum. Returns 0 if nothing found.\"\"\"\n if sorted(dice) == [1, 2, 3, 4, 5]:\n return sum(dice)\n return 0\n\n def big_straight(dice):\n \"\"\" Checks the list dice for the exact combination\n [2, 3, 4, 5, 6] (the large straight) and returns\n its sum. Returns 0 if nothing found.\"\"\"\n if sorted(dice) == [2, 3, 4, 5, 6]:\n return sum(dice)\n return 0\n\n def house(dice):\n \"\"\" Try to find a house in the list of cards\n i.e. [2, 2, 2, 3, 3] or [5, 5, 4, 4, 4] and\n return its sum. Returns 0 if nothing found.\"\"\"\n s = sorted(dice)\n if ((s[0] == s[1] == s[2] and s[3] == s[4]) or\n (s[0] == s[1] and s[2] == s[3] == s[4])):\n return sum(dice)\n return 0\n\n def chance(dice):\n \"\"\" Returns the sum of dice. \"\"\"\n return sum(dice)\n\n def yatzy(dice):\n \"\"\" If every value in list dice is equal, return its sum.\n Else, return 0. \"\"\"\n if (dice[0] == dice[1] == dice[2] == dice[3] == dice[4]):\n return 50\n return 0\n\n return [ones, twos, threes, fours, fives, sixes, pair, double_pair,\n threes, fours, small_straight, big_straight, house, chance, yatzy][n]", "def dice_throw(number_of_dices: int, dice_size=6) -> list:\n\n result = []\n for i in range(number_of_dices):\n result.append(randint(1, dice_size))\n\n return result", "def strategy(hand, num_die_sides):\n all_holds = gen_all_holds(hand)\n expected_values = {}\n for hold in all_holds:\n num_free_dice = len(hand) - len(hold)\n current_expexted_value = expected_value(hold, num_die_sides, num_free_dice)\n expected_values[current_expexted_value] = hold\n\n max_value = max(expected_values.keys())\n return tuple((max_value, expected_values[max_value]))", "def choices(symbols, k):\n return [R.choice(symbols) for _ in range(k)]", "def random_moves(length):\n ans = \"\"\n for dummy_num in range(length):\n ans += random.choice([\"u\",\"d\",\"l\",\"r\"])\n return ans", "def roll(self):\n return random.choice(self.sides)", "def get_outcomes(num_die_sides):\n outcomes = []\n\n for value in range(1, num_die_sides + 1):\n outcomes.append(value)\n\n return outcomes", "def dealHand():\n import random\n import string\n \n vowels = 'aeiou'\n constant = 'bcdfghjklmnpqrstvwxyz'\n \n maxint = max(list(map(len, wordlist)))\n \n n = random.randint(5, maxint) \n \n # 1/3 vowls\n n_vowl = n // 3\n n_constant = n - n//3\n \n get_vowl = random.choices(vowels, k = n_vowl)\n get_constant = random.choices(constant, k = n_constant)\n \n strings = ''.join(get_vowl + get_constant)\n \n hand = getFreqDict(strings)\n \n return hand", "def gen_all_holds(hand):\n from_hand = [()]\n for item in hand:\n for subset in from_hand:\n from_hand = from_hand + [tuple(subset) + (item, )]\n \n return set(from_hand)", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def gen_all_holds(hand):\n\n mask = sorted(gen_all_sequences((1,0), len(hand)))\n answer_set = []\n for current_mask in mask:\n temp = []\n for indx in range(len(current_mask)):\n if current_mask[indx] == 1:\n temp.append(hand[indx]);\n answer_set.append(tuple(temp))\n return set(answer_set)", "def throw_dice():\n return randint(1, 6) + randint(1, 6)", "def new_dice():\n dice = [[], [0, 0, 0, 0, 0]]\n return dice", "def run_example():\n num_die_sides = 6\n hand = (1,2,5,5,5)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def strategy(hand, num_die_sides):\r\n \r\n best_hold = (0.0, ())\r\n current_score = 0\r\n \r\n for held_dice in gen_all_holds(hand):\r\n score = expected_value(held_dice, num_die_sides, len(hand) - len(held_dice))\r\n if score > current_score:\r\n current_score = score\r\n best_hold = (current_score, held_dice)\r\n \r\n return best_hold", "def make_hands(names):\n\tdeck = []\n\tfor face in FACES:\n\t\tfor suit in SUITS:\n\t\t\tdeck.append([face, suit])\n\trandom.shuffle(deck)\n\tdealt = 0\n\tplayer = 0\n\thands = []\n\twhile dealt < (len(names)*5):\n\t\thand = []\n\t\ti = 0\n\t\twhile i < 5:\n\t\t\thand.append(deck.pop())\n\t\t\ti += 1\n\t\tdealt += 5\n\t\tplayer += 1\n\t\thands.append(hand)\n\treturn hands", "def roll_cheating_dice(number,faces):\n \n dice_number = 0\n cheating_list = list(range(1,faces+1))\n cheating_list.append(3)\n for i in range(number):\n dice_number += random.choice(cheating_list)\n return dice_number", "def roll_dice_logic(d_roll: str) -> list[int]:\n\n escaped_d_roll = re.sub(r'[-+]', '', d_roll)\n d_number, d_face = re.split(r'[dD]', escaped_d_roll)\n d_face = int(d_face)\n d_number = int(d_number) if d_number else 1\n return [random.randint(1, d_face) for _n in range(0, d_number)]", "def generate_all_hands(cards):\n if len(cards) < 5:\n raise ValueError('Too few cards')\n card_arrays = itertools.combinations(cards, 5)\n hands = []\n for card_array in card_arrays:\n new_hand = Hand(card_array)\n hands.append(new_hand)\n return hands", "def make_passphrase(self):\n rolls = self.rawroll.replace(\" \", \"\")\n \n # Cut last 4 rolls from given string for special characters\n #\n # They aren't present when rolls are generated, we \"roll the dice\" for it\n # on demand\n if(self.special):\n if(self.generate):\n special_roll = []\n else:\n special_roll = rolls[-4:]\n rolls = rolls[:-4]\n \n if(len(rolls) <= 0):\n stderr.write(\"You must specify your dice rolls\\n\")\n exit(1)\n \n if(not rolls.isnumeric() or (not self.generate and ('0' in rolls or '7' in rolls or '8' in rolls or '9' in rolls))):\n stderr.write(\"Dice roll can be only numbers on dice\\n\")\n exit(3)\n \n if(len(rolls) % 5 != 0):\n stderr.write(\"Number of dice rolls must be multiple of 5\")\n if(self.special):\n stderr.write(\" + 4 rolls for special character\")\n stderr.write(\"\\n\")\n exit(2)\n \n while(rolls):\n roll = rolls[:5]\n rolls = rolls[5:]\n \n self.get_word(roll)\n \n if(self.special):\n self.add_special_character(special_roll)", "def run_example():\r\n num_die_sides = 6\r\n hand = (1, 1, 1, 5, 6)\r\n hand_score, hold = strategy(hand, num_die_sides)\r\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\r\n num_die_sides = 6\r\n hand = (1, 1, 1, 5, 6)\r\n hand_score, hold = strategy(hand, num_die_sides)\r\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def deal(self):\n hands = sample(self.deck, 13) #random sample so no need to shuffle\n hand1, hand2, flip = hands[:6], hands[6:-1], hands[-1]\n return hand1, hand2, flip", "def strategy(hand, num_die_sides):\r\n \r\n best_value = 0.0\r\n best_hold = ()\r\n \r\n possible_holds = gen_all_holds(hand)\r\n \r\n for hold in possible_holds:\r\n current_value = expected_value(hold, num_die_sides, len(hand) - len(hold))\r\n if current_value > best_value:\r\n best_value = current_value\r\n best_hold = hold\r\n \r\n return (best_value, best_hold)", "def chance(dice):\n return sum(dice)", "def roll_dice(self):\r\n return randint(1,self.sides)", "def __dice_generator(self):\n self.current_dice = np.random.randint(1, 6 + 1)", "def draw_random_sample(choices, probabilities, n):\n values = np.array(range(len(choices)))\n probs = np.array(probabilities)\n bins = np.add.accumulate(probs)\n inds = values[np.digitize(random_sample(n), bins)]\n samples = []\n for i in inds:\n samples.append(deepcopy(choices[int(i)]))\n return samples", "def throw_dice(self):\n self.dice = []\n for i in range (6):\n die = random.randint(1, 6)\n self.dice.append(die)\n self.num_throws += 1", "def touching_choice(self,p):\n choose = []\n while len(choose) < 2:\n #for i in range(2):\n poss = random.randint(0,sum(self.tendency))\n\n if poss<= self.tendency[0]:\n if 'head' not in choose:\n choose.append('head')\n continue\n elif poss<= self.tendency[0]+self.tendency[1]:\n if 'foot1' not in choose:\n choose.append('foot1')\n continue\n elif poss<= self.tendency[0]+self.tendency[1]+self.tendency[2]:\n if 'foot2' not in choose:\n choose.append('foot2')\n continue\n elif poss<= self.tendency[0]+self.tendency[1]+self.tendency[2]+self.tendency[3]:\n if 'foot3' not in choose:\n choose.append('foot3')\n continue\n elif poss<= self.tendency[0]+self.tendency[1]+self.tendency[2]+self.tendency[3]+self.tendency[4]:\n if 'foot4' not in choose:\n choose.append('foot4')\n continue\n elif poss<= self.tendency[0]+self.tendency[1]+self.tendency[2]+self.tendency[3]+self.tendency[4]+self.tendency[5]:\n if 'back' not in choose:\n choose.append('back')\n continue\n elif poss<= self.tendency[0]+self.tendency[1]+self.tendency[2]+self.tendency[3]+self.tendency[4]+self.tendency[5]+self.tendency[6]:\n if 'stomach' not in choose:\n choose.append('stomach')\n continue\n else:\n if 'tail' not in choose:\n choose.append('tail')\n continue\n\n return choose", "def deal_udacity(numhands, n=5, deck = [r+s for r in '23456789TJQKA' for s in 'SHDC']):\n random.shuffle(deck)\n return [deck[n*i:n*(i+1)] for i in range(numhands)]", "def makeChrom(length):\n output = []\n for i in range(length):\n output.append(randrange(14))\n return output", "def generate_deck(suits=4, type_cards=13):\n cards = []\n for suite in range(suits):\n for type_card in range(1, type_cards+1):\n # Setting the key-value pair for every card\n if (type_card == 1):\n cards.append({'A':type_cards+1})\n elif (type_card == 11):\n cards.append({'J': type_card})\n elif (type_card == 12):\n cards.append({'Q': type_card})\n elif (type_card == 13):\n cards.append({'K': type_card})\n else:\n cards.append({type_card:type_card})\n # Randomize the set of cards in the deck\n random.shuffle(cards)\n return cards", "def select_dice(dice_swapped):\n # BEGIN PROBLEM 3\n if dice_swapped:\n return four_sided\n else:\n return six_sided\n # END PROBLEM 3", "def multiple_choice(correct_choice, all_choices):\r\n # format for character is {'あ': 'ah'}\r\n # format for character is {'japanese character': 'english sound'}\r\n\r\n # get 3 different characters from all_choices, randomly\r\n # add all 3 'values', of the k:v pair, to the choices\r\n # if the input from the user != the 'key' of the correct character then it is wrong\r\n # if wrong, try again.\r", "def handDecision(handIn):", "def choosePiece(pieceList):\n dice = [1, 2, 3, 4, 5, 6]\n if len(pieceList) > 1:\n diceRoll = random.choice(dice)\n print(\"Dice Roll:\", diceRoll)\n if not any(piece for piece in pieceList if piece.value == diceRoll):\n # Piece is dead, finds next highest/lowest\n nextUp = -1\n nextDown = -1\n for i in range(diceRoll + 1,6):\n if any(piece for piece in pieceList if piece.value == i):\n nextUp = i\n break\n for i in range(diceRoll - 1, -1, -1):\n if any(piece for piece in pieceList if piece.value == i):\n nextDown = i\n break\n if nextUp == -1:\n print(\"Piece\", diceRoll, \"is dead.\")\n diceRoll = nextDown\n elif nextDown == -1:\n print(\"Piece\", diceRoll, \"is dead.\")\n diceRoll = nextUp\n else:\n print(\"Piece \", diceRoll, \" is dead. Choose \", nextDown, \" or \", nextUp, \".\", sep = '')\n diceRoll = input(\"> \")\n # Obtains user input\n while(diceRoll != str(nextUp) and diceRoll != str(nextDown)):\n diceRoll = input(\"Invalid choice. Please try again.\\n> \")\n diceRoll = int(diceRoll, base = 10)\n else:\n diceRoll = pieceList[0].value\n print(\"Only 1 piece left.\")\n\n return [piece for piece in pieceList if piece.value == diceRoll][0]", "def chance_outcomes(self):\n\n assert self.is_chance()\n\n action_list = self.legal_actions()\n dealed_card = [*self.hand[0], *self.hand[1], *self.pub] # dealed cards\n\n if self.phase == PREFLOP: # preflop\n prob_list = [1 for _ in action_list]\n elif self.phase == FLOP: # flop\n prob_list = [1 if all([d not in dealed_card for d in a.deal])\n else 0 for a in action_list]\n elif self.phase == TURN: # turn\n prob_list = [1 if all([d not in dealed_card for d in a.deal])\n else 0 for a in action_list]\n else: # river\n prob_list = [1 if all([d not in dealed_card for d in a.deal])\n else 0 for a in action_list]\n prob_list = np.array(prob_list)\n prob_list = prob_list / np.sum(prob_list)\n\n return action_list, prob_list", "def dice_gen():\r\n import random\r\n\r\n rdm = random.Random()\r\n rdm_gen = []\r\n\r\n table_txt = open(\"dice-table.txt\", \"r\")\r\n while True:\r\n line = table_txt.readline()\r\n if len(line) == 0:\r\n break\r\n line = list(map(int, line.split()))\r\n rdm_gen.append(line[rdm.randint(0,10)])\r\n\r\n table_txt.close()\r\n return(rdm_gen)", "def hunt_choices(self,\n round_number,\n my_food,\n my_reputation,\n m,\n reps):\n\n # update with current data\n self.n_players.append(len(reps))\n self.g_rep.append(global_rep(reps))\n\n # NB. I just noticed that we are given all the player_reputations all at\n # once.. This allows us to plan for the whole round all together. This\n # allows us to equilibrate our own reputation with more freedom. For\n # example, let's assume that we go for a strategy which allows us to\n # pre-determine how many hunts/slacks to perform to maintain our\n # reputation. Then we can freely distribute the hunts/slacks so as to\n # maximize the expected gain.\n\n # initial strategy\n actions = [ 's' for r in reps ]\n\n self.actions.append(actions)\n return actions", "def part_1():\n return itertools.permutations(range(5))", "def play():\n decks = make_decks()\n\n deck_pulls = {deck: [] for deck in decks}\n for i in range(100):\n deck = random.choice(decks)\n deck_pulls[deck].append(deck.pull())\n\n return decks, deck_pulls", "def strategy(hand, num_die_sides):\n result = (0.0, ())\n current_value = float('-inf')\n \n for item in gen_all_holds(hand):\n value = expected_value(item, num_die_sides, len(hand) - len(item))\n if value > current_value:\n current_value = value\n result = (current_value, item)\n \n return result", "def main():\n\n args = get_args()\n random.seed(args.seed)\n wod = []\n\n for name, low, high in read_csv(args.file):\n reps = random.randint(low, high)\n if args.easy:\n reps = int(reps / 2)\n wod.append((name, reps))\n\n wod = random.sample(wod, k=args.num_exercises)\n print(tabulate(wod, headers=('Exercise', 'Reps')))", "def roll(self):\n return cbrandom.throwDices(\"1d20\")", "def diceRoll():\n return randint(1,6)", "def possible_rolls(D1,n):\n possibilities = []\n for D2 in range(1,7):\n for D3 in range(1,7):\n if D1+D2+D3 == n:\n possibilities.append((D1,D2,D3))\n return possibilities", "def test(self, state):\n\n # manual dice should have been typed in by this point, if they don't\n # exist exit\n if state.dice == \"manual\" and (\n state.rolls is None or state.rolls == []):\n return state\n\n test_dict = {\"attr\": self._test_1dice,\n \"fight_talent\": self._test_1dice,\n \"advantage\": self._test_1dice,\n \"skill\": self._test_3dice,\n \"spell\": self._test_3dice,\n \"misc\": self._test_misc}\n\n state = test_dict[state.selection.category](state)\n\n return state", "def draw_hands(n_players=1):\n if n_players > 6:\n assert \"too many players. someone can't play.\"\n\n deck = make_deck()\n\n random.shuffle(deck)\n\n hands = []\n\n for i in range(n_players):\n hands.append(deck[15*i:15*(i+1)])\n\n bag = deck[n_players*15:]\n\n return hands, bag", "def _test_misc(self, state):\n state.result = 0\n dice_count = state.selection.dice_count\n dice_eyes = state.selection.dice_eyes\n\n if dice_count > 200:\n print(self._lang[\"too_many_dice\"])\n return state\n\n if state.dice == \"auto\":\n state.rolls = self._roll_dice(dice_count, 1, dice_eyes)\n\n # create sum of all rolled dice and modifier\n for _, value in enumerate(state.rolls):\n state.result += value\n\n state.result += state.mod\n return state", "def generate_list_of_wrong_choices(possible_wrong_choices, no_of_wrong_choices):\n\n wrong_choices = random.sample(list(possible_wrong_choices), min(len(possible_wrong_choices), no_of_wrong_choices))\n\n # Return a list of the text of the wrong choices\n return [wrong_choice.text for wrong_choice in wrong_choices]", "def com_pick_normal(self):\n global num_dots\n # Because the computer decision making is based on code alone, it will be\n # more complex than the user's.\n a = (num_dots - 1)\n b = (num_dots - 2)\n c = (num_dots - 3)\n d = (num_dots - 4)\n # It is the computers best strategy to leave a number of balls divisible\n # by 5 when possible. This code checks for that possibility and chooses\n # thus, otherwise picks randomly as it will not matter until the user\n # makes a mistake. Credit: Scott Heggen's tip in A5\n if a % 5 == 0:\n com_take = 1\n elif b % 5 == 0:\n com_take = 2\n elif c % 5 == 0:\n com_take = 3\n elif d % 5 == 0:\n com_take = 4\n else:\n com_take = random.randint(1, 4)\n return com_take", "def puzzle_generator():\r\n print(\"Generating puzzles...\")\r\n puzzle_container = []\r\n while len(puzzle_container) < 25:\r\n next_state_tuple = ()\r\n check_dict = {}\r\n \r\n initial_state_tuple = ([[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]], (0, 0))\r\n for i in range(20):\r\n state_container = next_possible_states([initial_state_tuple], check_dict, True)\r\n try:\r\n next_state_tuple = random.choice(state_container)\r\n initial_state_tuple = next_state_tuple\r\n except IndexError:\r\n if initial_state_tuple not in puzzle_container:\r\n puzzle_container.append(initial_state_tuple)\r\n break\r\n if initial_state_tuple not in puzzle_container:\r\n puzzle_container.append(initial_state_tuple)\r\n \r\n if len(puzzle_container) == 25:\r\n print(\"25 distinct puzzles are succesfully generated!\")\r\n return puzzle_container\r\n else:\r\n print(\"Puzzle generation failed!\")", "def roll(dice):\n rolled_dice = []\n for die in dice[1]:\n rolled_dice.append(randint(1, CUBE_DICE_MAX_VALUE()))\n dice[1] = rolled_dice\n return dice", "def occupy_huts():\n huts = []\n occupants = ['enemy', 'friend', 'unoccupied']\n while len(huts) < 5:\n computer_choice = random.choice(occupants)\n huts.append(computer_choice)\n return huts", "def gen_all_holds(hand):\n all_holds_set = [()]\n for entry in hand:\n for subset in all_holds_set:\n # create subsets of hand set\n all_holds_set = all_holds_set + [tuple(subset) + (entry,)]\n return set(sorted(all_holds_set))", "def CallSuitLogic(hand): #FIXME\r\n\r\n call = 0\r\n suit = 1\r\n\r\n return [call, suit]", "def default_variation(random, candidates, args):\r\n return candidates", "def default_variation(random, candidates, args):\r\n return candidates", "def drink_make(drink_styles): \n # Define empty list for drink\n drink_ingredients = [] \n # Loop through styles and add random ingredient to drink list\n for style, selected in drink_styles.iteritems():\n # Test whether style selected by user\n if selected == True:\n drink_ingredients.append(random.choice(ingredients[style]))\n # Return drink\n return drink_ingredients", "def run_example1():\r\n #outcomes = set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n #outcomes = set(['Heads','Tails'])\r\n #outcomes = set([\"Red\", \"Green\", \"Blue\"])\r\n outcomes = set([\"Sunday\", \"Mondy\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"])\r\n \r\n length = 7\r\n seq_outcomes = gen_permutations(outcomes,length)\r\n print \"Computed\", len(seq_outcomes), \"sequences of\", str(length), \"outcomes\"\r\n #print \"Sequences were\", seq_outcomes\r", "def _gen_questions_by_goals(self):\n\n try:\n num_questions_per_goal = int(floor(self.numQuest / len(self.ILOUsed)))\n\n # Ensure that the number of questions requested are less than unique ILO's to be used.\n assert ((self.numQuest / len(self.ILOUsed)) >= 1)\n\n except ZeroDivisionError:\n print(\"No ILO's selected, or number of question in exam is set to 0\")\n return\n\n except AssertionError:\n print(\"There aren't enough questions for the number of ILO's chosen, increase the number of questions \" \\\n + \"or reduce the number of ILO's covered in this exam\")\n return\n\n rest = self.numQuest % len(self.ILOUsed)\n\n for ilo in self.ILOUsed:\n # Retrieve all questions that belongs to ilo\n self._get_questions_for_ilo(ilo[0], num_questions_per_goal)\n\n while rest > 0:\n ilo = random.choice(self.ILOUsed)\n self._get_questions_for_ilo(ilo[0], 1)\n rest -= 1\n\n return" ]
[ "0.66850966", "0.6451011", "0.6337711", "0.63297033", "0.63039804", "0.6226844", "0.6226421", "0.62156814", "0.6166249", "0.61634064", "0.61588705", "0.61474586", "0.6074418", "0.60378057", "0.6029834", "0.60106313", "0.598691", "0.59858495", "0.594391", "0.59296936", "0.5920476", "0.59076583", "0.58997256", "0.589188", "0.589188", "0.589188", "0.589188", "0.5879852", "0.5872513", "0.58707523", "0.5868509", "0.58283466", "0.58247423", "0.57830155", "0.57805836", "0.5748503", "0.57419497", "0.5710496", "0.5707563", "0.569996", "0.56699187", "0.5666528", "0.56537694", "0.56474596", "0.5636332", "0.5622441", "0.5622441", "0.5622441", "0.5610702", "0.56075937", "0.5606999", "0.56058925", "0.5593364", "0.5586542", "0.5584487", "0.55836725", "0.55765283", "0.55610555", "0.55529153", "0.55529153", "0.5552747", "0.55521595", "0.55358934", "0.55321664", "0.55211014", "0.5518272", "0.54919976", "0.54890734", "0.5478224", "0.54705316", "0.54683816", "0.54640573", "0.5462508", "0.5459691", "0.54580194", "0.5455911", "0.54514855", "0.5441697", "0.5435112", "0.54254067", "0.54195666", "0.54160243", "0.5410128", "0.54070497", "0.54067105", "0.54059935", "0.540057", "0.5396971", "0.53893083", "0.53892195", "0.5384812", "0.5382951", "0.5364451", "0.53486085", "0.5346411", "0.5343121", "0.5343121", "0.53304374", "0.53290915", "0.5325266" ]
0.7070763
0
Compute the hold that maximizes the expected value when the discarded dice are rolled.
def strategy(hand, num_die_sides): best_hold = (0.0, ()) current_score = 0 for held_dice in gen_all_holds(hand): score = expected_value(held_dice, num_die_sides, len(hand) - len(held_dice)) if score > current_score: current_score = score best_hold = (current_score, held_dice) return best_hold
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = get_outcomes(num_die_sides)\n print \"outcomes:\", outcomes\n\n # generate all possible sequences of rolls\n all_rolls = list(gen_all_sequences(outcomes, num_free_dice))\n results = [max_repeats(roll) for roll in all_rolls]\n value = 0.0 \n\n\n for result in all_rolls:\n curr_hand = tuple(list(held_dice) + list(result))\n value += score(curr_hand)\n\n return value / len(all_rolls)", "def strategy(hand, num_die_sides):\n all_holds = gen_all_holds(hand)\n expected_values = {}\n for hold in all_holds:\n num_free_dice = len(hand) - len(hold)\n current_expexted_value = expected_value(hold, num_die_sides, num_free_dice)\n expected_values[current_expexted_value] = hold\n\n max_value = max(expected_values.keys())\n return tuple((max_value, expected_values[max_value]))", "def strategy(hand, num_die_sides):\n all_holds = list(gen_all_holds(hand))\n expect=[]\n for held_dice in all_holds:\n expect.append(expected_value(held_dice, num_die_sides, len(hand)-len(held_dice)))\n max_expect_index = expect.index(max(expect))\n return (max(expect), (all_holds[max_expect_index]))", "def strategy(hand, num_die_sides):\r\n \r\n best_value = 0.0\r\n best_hold = ()\r\n \r\n possible_holds = gen_all_holds(hand)\r\n \r\n for hold in possible_holds:\r\n current_value = expected_value(hold, num_die_sides, len(hand) - len(hold))\r\n if current_value > best_value:\r\n best_value = current_value\r\n best_hold = hold\r\n \r\n return (best_value, best_hold)", "def strategy(hand, num_die_sides):\n #return (0.0, ())\n maxval = 0.0\n maxseq= ()\n allholds = gen_all_holds(hand)\n for seq in allholds:\n val = expected_value(seq, num_die_sides, len(hand)-len(seq))\n if val > maxval:\n maxval = val\n maxseq = seq\n \n \n \n return (maxval, maxseq)", "def strategy(hand, num_die_sides):\n result = (0.0, ())\n current_value = float('-inf')\n \n for item in gen_all_holds(hand):\n value = expected_value(item, num_die_sides, len(hand) - len(item))\n if value > current_value:\n current_value = value\n result = (current_value, item)\n \n return result", "def expected_value(held_dice, num_die_sides, num_free_dice):\n result = 0\n outcomes = range(1, num_die_sides + 1)\n possible = sorted(gen_all_sequences(outcomes, num_free_dice))\n for hand in possible:\n result += score(held_dice + hand)\n return float(result)/len(possible)", "def strategy(hand, num_die_sides):\n best_move = (0.0, ())\n all_holds = gen_all_holds(hand)\n for hold in all_holds:\n # hand can be less than 5\n num_free_dice = len(hand) - len(hold)\n expected = expected_value(hold, num_die_sides, num_free_dice)\n if expected > best_move[0]:\n best_move = (expected, hold)\n return best_move", "def expected_value(held_dice, num_die_sides, num_free_dice):\n\n outcome = ()\n for die in range(1, num_die_sides + 1):\n outcome +=(die, )\n possible_outcomes = gen_all_sequences(outcome, num_free_dice)\n output = 0\n for single_output in possible_outcomes:\n current_score = score(single_output + held_dice)\n output += current_score\n\n return output/(len(possible_outcomes)*1.0)", "def expected_value(held_dice, num_die_sides, num_free_dice):\n all_sequences = gen_all_sequences(range(1,num_die_sides+1), num_free_dice)\n iter_seque=[]\n score_seque=[]\n for seq in all_sequences:\n iter_seque.append(list(seq)+list(held_dice))\n score_seque.append(score(iter_seque[-1]))\n return float(sum(score_seque))/float(len(score_seque))", "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n die_outcomes = set(range(1, num_die_sides + 1))\r\n \r\n possible_sequences = gen_all_sequences(die_outcomes, num_free_dice)\r\n \r\n total_score = 0.0\r\n for sequence in possible_sequences:\r\n total_score += score(held_dice + sequence)\r\n \r\n return float(total_score / len(possible_sequences))", "def expected_value(held_dice, num_die_sides, num_free_dice):\n list_scores = []\n die_sides = [die for die in range(1, num_die_sides + 1)]\n possible_seq = gen_all_sequences(die_sides, num_free_dice)\n for item in possible_seq:\n list_scores.append(score(held_dice + item))\n \n return float(sum(list_scores)) / len(list_scores)", "def maximum_roll(self):\n if self.dice_array is None:\n return self.number * self.sides\n else:\n return np.sum(self.dice_array)", "def strategy(hand, num_die_sides):\n\n possible_holds = gen_all_holds(hand)\n best_val = 0\n best_score = 0\n dice_to_hold = []\n\n for hold in possible_holds:\n hold_val = expected_value(hold, NUM_DIE_SIDES, NUM_DICE - len(hold))\n\n hand_score = score(hold) + score(hand)\n if hand_score > best_val:\n # best_val = hold_val\n best_score = hand_score\n dice_to_hold = hold\n hand_copy = list(hand)\n sugg_hand = hand_copy.append(dice_to_hold)\n return (hand_score, sugg_hand)", "def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = [number+1 for number in range(num_die_sides)]\n die_seqs = list(gen_all_sequences(outcomes, num_free_dice))\n for idx in range(len(die_seqs)):\n seq = list(die_seqs[idx])\n seq.extend(list(held_dice))\n die_seqs[idx] = tuple(seq)\n scr = 0.0\n for seq in die_seqs:\n scr += score(seq) \n return scr / len(die_seqs)", "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n \r\n scores = []\r\n \r\n die_sides = [(die + 1) for die in range(num_die_sides)]\r\n \r\n pos_outcomes = gen_all_sequences(die_sides, num_free_dice)\r\n\r\n for outcome in pos_outcomes:\r\n scores.append(score(held_dice + outcome))\r\n \r\n expected_result = float(sum(scores))/len(scores)\r\n \r\n return expected_result", "def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN PROBLEM 8\n\n \"\"\"maxi, number_of_dice, ret = 0, 10, 0\n while number_of_dice > 0:\n avg = make_averaged(roll_dice)(number_of_dice, dice)\n maxi = max(maxi, avg)\n if avg >= maxi:\n ret = number_of_dice\n number_of_dice -= 1\n return ret\"\"\"\n\n\n\n counterA = 1\n num_rolls=1\n max_value = 0\n best_num_rolls = 0\n while counterA <= 10:\n num_rolls = counterA\n average_function = make_averaged(roll_dice)(counterA, dice)\n if average_function > max_value:\n max_value = average_function\n best_num_rolls = counterA\n counterA +=1\n return best_num_rolls\n\n \"\"\"counterA = 1\n maxvalue = 0\n maxvaluenumber = 0\n while(counterA<=10):\n num_rolls = counterA\n average_for_roll = make_averaged(roll_dice(num_rolls, dice), num_samples)\n counterB = average_for_roll(roll_dice(counterA, dice))\n if(counterB>maxvalue):\n maxvalue = counterB\n maxvaluenumber = counterA\n counterA +=1\n return maxvaluenumber\"\"\"\n # END PROBLEM 8", "def roll_die(sides = 6, maxi = 6):\n d = 1000\n # discard highest roll(s)\n while d > maxi:\n d = random.randint(1,sides)\n return d", "def roll(self):\n self.currentValue = choice(self.possibleValues)\n self.value = AngryDie.ANGRY_VALUES[self.currentValue]\n return self.currentValue", "def big_straight(dice):\n if sorted(dice) == [2, 3, 4, 5, 6]:\n return sum(dice)\n return 0", "def _calc_hp(self, average=False):\n dice = self.hd + self.constitution\n if average:\n return round((dice * self.level).average)\n\n return max(sum((dice * self.level).roll()), 1)", "def score(hand):\r\n \r\n if not hand:\r\n return 0\r\n \r\n max_score = 0\r\n \r\n for dice in hand:\r\n temp = list(hand).count(dice) * dice\r\n if temp > max_score:\r\n max_score = temp\r\n \r\n return max_score", "def roll_die(number_of_rolls: int, number_of_sides: int) -> int:\r\n if number_of_rolls <= 0 or number_of_sides <= 0:\r\n return 0\r\n\r\n max_total = number_of_sides * number_of_rolls\r\n\r\n return random.randint(number_of_rolls, max_total)", "def roll_dice(roll, modifiers):\n try:\n if modifiers[\"Advantage\"] and not modifiers[\"Disadvantage\"]:\n modifiers[\"Advantage\"] = False\n return max(roll_dice(roll, modifiers), roll_dice(roll,modifiers))\n if modifiers[\"Disadvantage\"] and not modifiers[\"Advantage\"]:\n modifiers[\"Disadvantage\"] = False\n return min(roll_dice(roll, modifiers), roll_dice(roll, modifiers))\n num_dice = int(roll.split(\"D\")[0])\n if modifiers[\"Critical\"]:\n num_dice*=2\n num_dice+=modifiers[\"Brutal\"]\n die_type = roll.split(\"D\")[1]\n if die_type[0] == \"4\" or die_type[0] == \"6\" or die_type[0] == \"8\":\n die_type = int(die_type[0])\n elif die_type[:3] == \"100\" or die_type[0] == \"%\":\n die_type = 100\n elif die_type[:2] == \"10\" or die_type[:2] == \"12\" or die_type[:2] == \"20\":\n die_type = int(die_type[:2])\n else:\n die_type = 6\n roll_total = 0\n critical_success = False\n critical_failure = False\n for die in range(num_dice):\n die_result = random.randint(1,die_type)\n if die_result == 1 and modifiers[\"Lucky\"] or die_result <= 2 and modifiers[\"Great Weapon\"]:\n die_result = random.randint(1,die_type)\n if die_result < modifiers[\"Minimum Roll\"]:\n die_result = modifiers[\"Minimum Roll\"]\n if die_result == 20 and die_type == 20:\n critical_success = True\n if die_result == 1 and die_type == 20:\n critical_failure = True\n roll_total += die_result\n return roll_total\n except ValueError:\n return \"Error\"", "def roll(self):\n roll = random.random()\n sum = 0\n for item in self.mask:\n sum += item.prob\n if sum >= roll: return item.elem\n return None", "def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN PROBLEM 9\n \"*** YOUR CODE HERE ***\"\n k, max_value, max_num = 1, 0, 0\n roll = make_averaged(roll_dice, num_samples)\n while k <= 10:\n current_value = roll(k, dice)\n #print('k: ' + str(k) + ' current_value: ' + str(current_value))\n if current_value > max_value:\n max_value, max_num = current_value, k\n k += 1\n return max_num\n # END PROBLEM 9", "def temporary_score(self, dice_roll):\n\n temporary_score = 0\n if dice_roll > 1:\n temporary_score += dice_roll\n else:\n temporary_score = 0\n return temporary_score", "def roll(self):\n return cbrandom.throwDices(\"1d20\")", "def chance(dice):\n return sum(dice)", "def remaining_space_in_hold(self):\n balls = self.config['balls_to_hold'] - self.balls_held\n if balls < 0:\n balls = 0\n return balls", "def roll_all(bank):\n global best_earning, best_round\n round_ct = 0\n\n while bank > 0:\n round_ct += 1\n round_roll = roll_2_dice()\n bank = evaluate_roll(round_roll, bank)\n\n # print(\"Round \", round_ct, \" | Rolled \", round_roll, \" | Pot: $\", format(pot, ',.2f'), sep='') # debug\n\n best_earning, best_round = track_best(bank, round_ct)\n\n return round_ct", "def score(hand):\n current_hand = {}\n for dice in hand:\n if not current_hand.get(dice):\n current_hand[dice] = dice\n else:\n current_hand[dice] += dice\n\n #compute the current score for each dice\n\n return max(current_hand.values())", "def max_duffel_bag_value(cakes, capacity):\n curr_w = 0 # current weight of the bag\n price = 0\n cakes.sort(reverse=True, key=lambda c: c[1]/c[0] if c[0] else sys.maxsize)\n for cake in cakes:\n if cake[0] == 0: return sys.maxsize # infinite number of cakes can be taken\n while (curr_w + cake[0]) <= capacity:\n curr_w += cake[0]\n price += cake[1]\n return price", "def roll_1d10() -> int:\n ten_percent = Die(10)\n ten_percent.roll_die()\n chance = ten_percent.get_value()\n return chance", "def holding_returns(self):\n buy_prices = dict_to_df(self._last_buy_prices) \\\n .reindex(self._trading_days, method='ffill')\n holding_returns = (self.holdings * self._close_prices / buy_prices) - 1\n return valid_range(holding_returns)[buy_prices.columns]", "def score_yamp(dices):\n for dice_num in range(MIN_DICE, MAX_DICE + 1):\n if count_equal(dices, dice_num) == DICE_COUNT:\n return 50\n\n return 0", "def max_value(gameState):\n if terminal_test(gameState): return -1", "def get_damage_roll(self):\n\t\tif self.difficulty == 1:\n\t\t\treturn 4\n\t\tif self.difficulty == 2:\n\t\t\treturn 6\n\t\tif self.difficulty == 3:\n\t\t\treturn 8\n\t\tif self.difficulty > 3:\n\t\t\treturn 10", "def roll(self) -> int:\n return self.rand.randint(1, self.sides)", "def _cost_caught_by_police(self):\n if self.fine_frequency != 0:\n if self.number_of_courses % self.fine_frequency == 0 and self.number_of_courses != 0:\n if self.number_of_courses % self.fine_frequency_paid_by_driver == 0 and self.number_of_courses != 0:\n self.fine_paid_number_of_courses += 1\n fine_value = np.random.choice([100, 200, 500], p=[0.25, 0.4, 0.35])\n self.total_penalty_points += self._add_penalty_points() # adding penalty points\n return fine_value\n else:\n return 0\n else:\n return 0\n else:\n return 0", "def get_big_joker_value(deck: List[int]) -> int:\n return max(deck)", "def roll(self):\n self.rolled = random.randint(1, 6)\n return self.rolled", "def pull(self):\n chance = np.random.uniform()\n return chance < self.winning_prob", "def highestBetNotFold(self):\n return max([0]+[p._bet for p in self.players.values() if p.serial in self.in_game and p.notFold()])", "def get_big_joker_value(deck):\n \n return max(deck)", "def ability(self):\n random.seed()\n rolls = [random.randint(1,6) for i in range(4)]\n return sum(sorted(rolls)[1:4])", "def playRound(budget: int) -> tuple:\n sum = sumOfDice(random.randint(1,6), random.randint(1,6))\n if sum == 7:\n budget += 4\n return (\"Win\",budget)\n else:\n budget -= 1\n return (\"Loss\",budget)", "def returns_over_max_drawdown(tot_returns_dict, year, lifetime_maximum_drawdown):\n\n return round(tot_returns_dict[year] / abs(lifetime_maximum_drawdown), 2)", "def house(dice):\n s = sorted(dice)\n if ((s[0] == s[1] == s[2] and s[3] == s[4]) or\n (s[0] == s[1] and s[2] == s[3] == s[4])):\n return sum(dice)\n return 0", "def roll(self):\n return randint(1, self.sides)", "def roll(self):\n return randint(1, self.sides)", "def bacon_strategy(score, opponent_score, margin=8, num_rolls=4):\n # BEGIN PROBLEM 10\n if free_bacon(opponent_score) >= margin:\n return 0\n return num_rolls\n # END PROBLEM 10", "def roll(self):\n return randint(1,6)", "def roll(self):\r\n import random as _random\r\n return _random.randint(1, self.__sides_count)", "def small_straight(dice):\n if sorted(dice) == [1, 2, 3, 4, 5]:\n return sum(dice)\n return 0", "def yatzy(dice):\n if (dice[0] == dice[1] == dice[2] == dice[3] == dice[4]):\n return 50\n return 0", "def calculate_pool_reward(height: uint32) -> uint64:\n\n if height == 0:\n return uint64(int((7 / 8) * 21000000 * _mojo_per_chia))\n elif height < 3 * _blocks_per_year:\n return uint64(int((7 / 8) * 2 * _mojo_per_chia))\n elif height < 6 * _blocks_per_year:\n return uint64(int((7 / 8) * 1 * _mojo_per_chia))\n elif height < 9 * _blocks_per_year:\n return uint64(int((7 / 8) * 0.5 * _mojo_per_chia))\n elif height < 12 * _blocks_per_year:\n return uint64(int((7 / 8) * 0.25 * _mojo_per_chia))\n else:\n return uint64(int((7 / 8) * 0.125 * _mojo_per_chia))", "def evaluate_roll(roll, pot):\n if roll == 7:\n pot += 4\n else:\n pot -= 1\n\n return pot", "def rollDie(self):\n return random.randint(1, self.sides)", "def easy_solve(n=30):\n # Use p1 and p2 to store expected payoffs per player\n p_1 = []\n p_2 = []\n # Get each possible dice roll in order\n arr = list(range(1, n + 1))\n x = (1/2)*(np.sqrt(2*n**2 + 2*n - 3) - 1)\n for i in range(n):\n # Let player 1 choose ith possible integer\n player_1 = arr[i]\n # Use aforementioned strategy\n if arr[i] <= int(x):\n player_2 = arr[i] + 1\n # Append expected value per player according to which integer is larger\n p_1.append((1/n)*sum(arr[:player_1]))\n p_2.append((1/n)*sum(arr[player_2-1:]))\n else:\n player_2 = arr[i] - 1\n p_2.append((1/n)*sum(arr[:player_2]))\n p_1.append((1/n)*sum(arr[player_1-1:]))\n return p_1, p_2", "def __play_delear(self, state : State):\n # print (\"Playing as dealer\")\n dealer_sum = state.max_safe_sum(dealer=True)\n assert (-1 <= dealer_sum <= 31)\n while (0 <= dealer_sum < 25):\n # Keep hitting\n card, suite = self.draw()\n state.update_state (card, suite, dealer=True)\n dealer_sum = state.max_safe_sum(dealer=True)\n assert (-1 <= dealer_sum <= 31)\n\n return dealer_sum", "def score_full(dices):\n three_matched_num = 0\n for dice_num in range(MIN_DICE, MAX_DICE + 1):\n threes = count_equal(dices, dice_num) == 3\n if threes:\n three_matched_num = dice_num\n break\n\n two_matched_num = 0\n for dice_num in range(MIN_DICE, MAX_DICE + 1):\n if dice_num != three_matched_num:\n twos = count_equal(dices, dice_num)\n if twos == 2:\n two_matched_num = twos\n break\n\n if three_matched_num > 0 and two_matched_num > 0:\n return 25\n else:\n return 0", "def calcul_max_loss(self, percent_allowable_loss):\n if self.capital * percent_allowable_loss / 100 > self.minimal_buy:\n return self.capital * percent_allowable_loss / 100\n else:\n return self.minimal_buy", "def four_d6_drop_lowest() -> list:\n rolls: List[int] = []\n for x in range(1, 7):\n new_val: int = 0\n i: int = 0\n while i < 7:\n roll: int = multi_die(3, 6)\n if roll >= new_val:\n new_val = roll\n i += 1\n rolls.append(new_val)\n return rolls", "def roll(self):\n total = 0\n\n if self.num_dice is not None and self.dice_type is not None:\n for _ in range(self.num_dice):\n total += randint(1, self.dice_type)\n elif self.min_value is not None and self.max_value is not None:\n total = randint(self.min_value, self.max_value)\n\n return total + self.plus", "def score(hand):\r\n \r\n max_score = []\r\n \r\n for dice in hand:\r\n max_score.append(hand.count(dice) * dice)\r\n \r\n return max(max_score)", "def roll(self):\n self.current_roll = random.randint(self.min, self.max)\n return self.current_roll", "def get_roll_value_for_knack(self) -> int:\n # get stats and skills for our check\n try:\n mods: ModifierHandler = self.character.mods\n base = mods.get_total_roll_modifiers(\n self.check.get_stats_list(self.character),\n self.check.get_skills_list(self.character),\n )\n except AttributeError:\n return 0\n return StatWeight.get_weighted_value_for_knack(base)", "async def dice(self, ctx, diceroll: str = '1d6'):\n times, num = diceroll.split('d')\n times = int(times) if times else 1\n num = int(num) if num else 6\n maxscore = times*num\n score = random.randint(times, maxscore)\n await ctx.send(ctx._(\"roll_result\").format(score=score, maxscore=maxscore))", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def holding(self) -> float:\n return self._holding", "def calculate_pool_reward(height: uint32) -> uint64:\n\n if height == 0:\n return uint64(int((9 / 10) * 21000062 * _mojo_per_ethgreen))\n elif height < 3 * _blocks_per_year:\n return uint64(int((8 / 10) * 20 * _mojo_per_ethgreen))\n elif height < 6 * _blocks_per_year:\n return uint64(int((8 / 10) * 10 * _mojo_per_ethgreen))\n elif height < 9 * _blocks_per_year:\n return uint64(int((8 / 10) * 5 * _mojo_per_ethgreen))\n elif height < 12 * _blocks_per_year:\n return uint64(int((8 / 10) * 2.5 * _mojo_per_ethgreen))\n else:\n return uint64(int((8 / 10) * 1.25 * _mojo_per_ethgreen))", "def calculate_upper_boundary(self, divisor):\n\n # see how high you can go\n quotas = [0] * self.states\n fair_shares = [0] * self.states\n counter = 0\n highest_divisor = 0\n prev_divisor = 0\n estimator = 1000000000\n while counter < 1000:\n for i, population in enumerate(self.populations):\n if divisor is None:\n return None\n quotas[i] = population / divisor\n fair_shares[i] = math.floor(quotas[i])\n if sum(fair_shares) != self.num_seats:\n estimator = estimator / 10\n prev_divisor = divisor\n divisor = highest_divisor + estimator\n else:\n highest_divisor = divisor\n divisor = prev_divisor + estimator\n if highest_divisor == divisor:\n break\n counter += 1\n return math.floor(highest_divisor * 1000) / 1000", "def score(hand):\n if (hand==()):\n return 0\n score_board=[0,0,0,0,0,0,0,0,0,0,0,0]\n for dice in hand:\n score_board[dice-1]+=dice\n max_score=max(score_board)\n return max_score", "def rough_outcome(self) -> float:\n # HUYNH YOU PRICK WHY THE FUCK DO YOU MAKE US WRITE THIS SHIT EVEN IT'S NOT USED ANYWHERE\n # pick move based on this may not be optimal but better than random\n # return 1 if win immediately\n # return -1 if all states reachable will result the other player win\n # return 0 if otherwise ??? what the fuck does this mean\n # look two states forward\n pass", "def large_straight_points(dice_list):\n if straight_size(dice_list) >= 5 or check_yahtzee(dice_list):\n return 40\n else:\n return 0", "def calculate_hit(self):\n weapon = self.game_data['player inventory']['equipped weapon']\n weapon_power = self.game_data['player inventory'][weapon]['power']\n max_strength = weapon_power\n min_strength = max_strength - 7\n return random.randint(min_strength, max_strength)", "def test_roll_value_changes(self):\n\n holding_value = self.new_die.roll()\n for i in range(10):\n if self.new_die.roll() != holding_value:\n print(\"Rolled die value {} is different from Holding Value {}\".format(self.new_die.currentValue, holding_value))\n self.assertTrue(True)\n return\n\n self.assertTrue(False, \"Die value did not change from Holding Value for 10 rolls\")", "def roll(self):\n\t\treturn randint(1, self.num_sides)", "def do_praise_roll(self, base=0):\n roll = do_dice_check(self.caller.char_ob, stat=\"charm\", skill=\"propaganda\")\n roll *= int(self.caller.Dominion.assets.prestige_mod)\n roll += base\n return max(roll, self.MIN_VALUE)", "def get_small_joker_value(deck):\n \n return max(deck) - 1", "def d(qty, sides):\r\n value = 0\r\n while qty > 0:\r\n value = value + random.randint(1, sides)\r\n qty = qty - 1\r\n return value", "def calculateFrequentRolls():\n\n # initialize outcomeCounts to all 0s. The index corresponds to the outcome\n # NOTE: index positions 0 and 1 are not possible\n outcomeCounts = dict()\n for count in range(DIE_SIDES*2+1):\n outcomeCounts[count] = 0\n\n rollAndTallyOutcomes(outcomeCounts)\n\n print(\"outcomeCounts:\",outcomeCounts) # For debugging\n\n highestCount = max(outcomeCounts.values())\n\n mostFrequentRolls = findOutcomes(outcomeCounts, highestCount)\n\n print(\"mostFrequentRolls:\", mostFrequentRolls,\n \"and highestCount:\",highestCount) # For debugging\n\n return mostFrequentRolls, highestCount", "def calculate_score(dice):\n # version_1\n\n if len(dice) > 6:\n raise Exception(\"Cheating Cheater!\")\n\n counts = Counter(dice)\n\n if len(counts) == 6:\n return 1500\n\n if len(counts) == 3 and all(val == 2 for val in counts.values()):\n return 1500\n\n score = 0\n\n ones_used = fives_used = False\n\n for num in range(1, 6 + 1):\n\n pip_count = counts[num]\n\n if pip_count >= 3:\n\n if num == 1:\n\n ones_used = True\n\n elif num == 5:\n\n fives_used = True\n\n score += num * 100\n\n # handle 4,5,6 of a kind\n pips_beyond_3 = pip_count - 3\n\n score += score * pips_beyond_3\n\n # bug if 2 threesomes? Let's test it\n\n # 1s are worth 10x\n if num == 1:\n score *= 10\n\n if not ones_used:\n score += counts.get(1, 0) * 100\n\n if not fives_used:\n score += counts.get(5, 0) * 50\n\n return score", "def simple_roll(dice):\n return roll(dice).total", "def drawdown_calculator(excess_returns):\r\n\r\n df = pd.DataFrame(excess_returns, columns=['net'])\r\n df['cum_ret'] = (1 + df['net']).cumprod() - 1\r\n df['high_mark'] = np.maximum.accumulate(df['cum_ret'].fillna(0))\r\n df.loc[0, 'high_mark'] = np.nan\r\n df['drawdown'] = (1 + df['cum_ret']) / (1 + df['high_mark']) - 1\r\n max_drawdown = np.min(df['drawdown'])\r\n\r\n df.loc[0, 'duration'] = 0.\r\n for i in range(1, len(df)):\r\n df.loc[i, 'duration'] = 0 if df.loc[i, 'drawdown'] == 0 else 1 + df.loc[i-1, 'duration']\r\n max_drawdown_duration = np.max(df['duration'])\r\n\r\n return max_drawdown, max_drawdown_duration", "def roll_dice(self):\r\n return randint(1,self.sides)", "def calc_stay_prob(rollouts):\n states = rollouts.states\n actions = rollouts.actions\n rewards = rollouts.rewards\n\n num_test_episodes = states.shape[0]\n num_trials = states.shape[1]\n count_trial_stayed = 0.01 + np.zeros((2, 2, num_test_episodes)) # [common/uncommon, reward/unrewarded]\n count_trial_all = 0.01 + np.zeros((2, 2, num_test_episodes))\n for epi in range(num_test_episodes):\n for t in range(0, num_trials-2, 2):\n uncommon_transition = int(actions[epi, t] != states[epi, t+1]-1)\n count_trial_all[uncommon_transition, (0 if rewards[epi, t+1] else 1), epi] += 1\n count_trial_stayed[uncommon_transition, (0 if rewards[epi, t+1] else 1), epi] += \\\n int(actions[epi, t+2] == actions[epi, t])\n return np.divide(count_trial_stayed, count_trial_all), count_trial_stayed, count_trial_all", "def small_straight(dice):\n \n if dice ==[1,2,3,4,5]:\n return sum(dice)\n else:\n return 0", "def max_profit(prices: List[int]) -> int:", "def roll(self):\n return random.randint(1,self.sides)\n #return int(self.sides*random.random() + 1.0)", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def rolldie():\n return int(random.random()*6)+1 # or use randrange()", "def roll_dice():\n return (random.randint(1, 6) + random.randint(1, 6))", "def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN PROBLEM 9\n averaged_dice = make_averaged(roll_dice, num_samples)\n max_score = 0\n result = 0\n for num_rolls in range(1, 11):\n average_turn_score = averaged_dice(num_rolls, dice)\n if average_turn_score > max_score:\n max_score = average_turn_score\n result = num_rolls\n elif average_turn_score == max_score: # if tied, lower num rolls\n if num_rolls < result:\n max_score = average_turn_score\n result = num_rolls\n return result\n # END PROBLEM 9", "def upper_bound(stock):\n counter=0\n for i in stock_price(stock):\n if i >= resistance(stock):\n counter+=1\n return counter", "def wyldingHand(self, level):\n if level == 0:\n die_result = random.randint(1,6)\n elif level == 1:\n die_result = random.randint(1,10)\n elif level == 2:\n die_result = random.randint(1,6) + random.randint(1,6)\n elif level == 3:\n die_result = random.randint(1,8) + random.randint(1,8)\n\n return die_result" ]
[ "0.7388774", "0.70694953", "0.69726455", "0.696321", "0.69587135", "0.6900279", "0.6876599", "0.6836615", "0.67820185", "0.67149234", "0.67129946", "0.6587366", "0.65404105", "0.6535527", "0.65227497", "0.6499015", "0.62842536", "0.6144699", "0.60962355", "0.6076315", "0.5911923", "0.58954316", "0.5892068", "0.58653563", "0.5827538", "0.5795526", "0.57942176", "0.5789509", "0.57706076", "0.57664585", "0.5756134", "0.5752197", "0.5729279", "0.5718807", "0.5716193", "0.5713704", "0.57077897", "0.56749356", "0.56733793", "0.5671847", "0.56642306", "0.56617206", "0.5658829", "0.56530786", "0.5647515", "0.5647154", "0.56465864", "0.56465", "0.56355286", "0.56286466", "0.56286466", "0.5626308", "0.5623226", "0.5619804", "0.5612423", "0.56068534", "0.5604187", "0.5599037", "0.55952644", "0.5588775", "0.5588026", "0.5587488", "0.5583039", "0.55668724", "0.55651313", "0.5560642", "0.55596125", "0.5548771", "0.554371", "0.55357605", "0.5532735", "0.55312294", "0.55287105", "0.551831", "0.55176115", "0.5513712", "0.55089456", "0.5499304", "0.54967684", "0.5495124", "0.54918903", "0.54841083", "0.54703593", "0.5468928", "0.5461104", "0.5460204", "0.5459321", "0.5451461", "0.54512775", "0.54494274", "0.54465795", "0.54457545", "0.54457545", "0.54457545", "0.54457545", "0.5442862", "0.5426945", "0.54261416", "0.54041576", "0.53926754" ]
0.7348299
1
Compute the dice to hold and expected score for an example hand
def run_example(): num_die_sides = 6 hand = (1, 1, 1, 5, 6) hand_score, hold = strategy(hand, num_die_sides) print "Best strategy for hand", hand, "is to hold", hold, "with expected score", hand_score
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strategy(hand, num_die_sides):\n\n possible_holds = gen_all_holds(hand)\n best_val = 0\n best_score = 0\n dice_to_hold = []\n\n for hold in possible_holds:\n hold_val = expected_value(hold, NUM_DIE_SIDES, NUM_DICE - len(hold))\n\n hand_score = score(hold) + score(hand)\n if hand_score > best_val:\n # best_val = hold_val\n best_score = hand_score\n dice_to_hold = hold\n hand_copy = list(hand)\n sugg_hand = hand_copy.append(dice_to_hold)\n return (hand_score, sugg_hand)", "def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = get_outcomes(num_die_sides)\n print \"outcomes:\", outcomes\n\n # generate all possible sequences of rolls\n all_rolls = list(gen_all_sequences(outcomes, num_free_dice))\n results = [max_repeats(roll) for roll in all_rolls]\n value = 0.0 \n\n\n for result in all_rolls:\n curr_hand = tuple(list(held_dice) + list(result))\n value += score(curr_hand)\n\n return value / len(all_rolls)", "def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = [number+1 for number in range(num_die_sides)]\n die_seqs = list(gen_all_sequences(outcomes, num_free_dice))\n for idx in range(len(die_seqs)):\n seq = list(die_seqs[idx])\n seq.extend(list(held_dice))\n die_seqs[idx] = tuple(seq)\n scr = 0.0\n for seq in die_seqs:\n scr += score(seq) \n return scr / len(die_seqs)", "def strategy(hand, num_die_sides):\r\n \r\n best_hold = (0.0, ())\r\n current_score = 0\r\n \r\n for held_dice in gen_all_holds(hand):\r\n score = expected_value(held_dice, num_die_sides, len(hand) - len(held_dice))\r\n if score > current_score:\r\n current_score = score\r\n best_hold = (current_score, held_dice)\r\n \r\n return best_hold", "def expected_value(held_dice, num_die_sides, num_free_dice):\n result = 0\n outcomes = range(1, num_die_sides + 1)\n possible = sorted(gen_all_sequences(outcomes, num_free_dice))\n for hand in possible:\n result += score(held_dice + hand)\n return float(result)/len(possible)", "def expected_value(held_dice, num_die_sides, num_free_dice):\n\n outcome = ()\n for die in range(1, num_die_sides + 1):\n outcome +=(die, )\n possible_outcomes = gen_all_sequences(outcome, num_free_dice)\n output = 0\n for single_output in possible_outcomes:\n current_score = score(single_output + held_dice)\n output += current_score\n\n return output/(len(possible_outcomes)*1.0)", "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n \r\n scores = []\r\n \r\n die_sides = [(die + 1) for die in range(num_die_sides)]\r\n \r\n pos_outcomes = gen_all_sequences(die_sides, num_free_dice)\r\n\r\n for outcome in pos_outcomes:\r\n scores.append(score(held_dice + outcome))\r\n \r\n expected_result = float(sum(scores))/len(scores)\r\n \r\n return expected_result", "def expected_value(held_dice, num_die_sides, num_free_dice):\n list_scores = []\n die_sides = [die for die in range(1, num_die_sides + 1)]\n possible_seq = gen_all_sequences(die_sides, num_free_dice)\n for item in possible_seq:\n list_scores.append(score(held_dice + item))\n \n return float(sum(list_scores)) / len(list_scores)", "def strategy(hand, num_die_sides):\n all_holds = list(gen_all_holds(hand))\n expect=[]\n for held_dice in all_holds:\n expect.append(expected_value(held_dice, num_die_sides, len(hand)-len(held_dice)))\n max_expect_index = expect.index(max(expect))\n return (max(expect), (all_holds[max_expect_index]))", "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n die_outcomes = set(range(1, num_die_sides + 1))\r\n \r\n possible_sequences = gen_all_sequences(die_outcomes, num_free_dice)\r\n \r\n total_score = 0.0\r\n for sequence in possible_sequences:\r\n total_score += score(held_dice + sequence)\r\n \r\n return float(total_score / len(possible_sequences))", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\n num_die_sides = 6\n hand = (1,2,5,5,5)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def expected_value(held_dice, num_die_sides, num_free_dice):\n all_sequences = gen_all_sequences(range(1,num_die_sides+1), num_free_dice)\n iter_seque=[]\n score_seque=[]\n for seq in all_sequences:\n iter_seque.append(list(seq)+list(held_dice))\n score_seque.append(score(iter_seque[-1]))\n return float(sum(score_seque))/float(len(score_seque))", "def strategy(hand, num_die_sides):\r\n \r\n best_value = 0.0\r\n best_hold = ()\r\n \r\n possible_holds = gen_all_holds(hand)\r\n \r\n for hold in possible_holds:\r\n current_value = expected_value(hold, num_die_sides, len(hand) - len(hold))\r\n if current_value > best_value:\r\n best_value = current_value\r\n best_hold = hold\r\n \r\n return (best_value, best_hold)", "def score_hands():\n #\"http://projecteuler.net/project/poker.txt\"\n poker_txt= urllib.request.urlopen( \"file:poker.txt\" ).read().decode(\"ASCII\")\n outcome= collections.Counter()\n for line in poker_txt.splitlines():\n if not line: continue\n cards= line.split()\n assert len(cards) == 10\n h_1 = to_hand(cards[:5])\n h_2 = to_hand(cards[5:])\n s_1= score_tuple(h_1)\n s_2= score_tuple(h_2)\n assert s_1 != s_2, \"Problem scoring {0!r} {1!r}\".format(h_1,h_2)\n winner= 1 if s_1 > s_2 else 2\n # The most obscure case:\n # if s_1[:2] == s_2[:2]:\n # print( \"Close\", cards[:5], s_1, cards[5:], s_2, winner )\n outcome[winner] += 1\n # Paranoid double check on two scoring systems.\n if score_int(h_1) > score_int(h_2) if winner == 1 else score_int(h_1) < score_int(h_2):\n pass\n else:\n print( \"{!r} {!r} Player {:d}\".format(cards[:5],cards[5:],winner) )\n print( s_1, \":\", s_2 )\n print( score_int(h_1), score_int(h_2) )\n raise AssertionError( \"Logic Problem\" )\n return outcome", "def strategy(hand, num_die_sides):\n result = (0.0, ())\n current_value = float('-inf')\n \n for item in gen_all_holds(hand):\n value = expected_value(item, num_die_sides, len(hand) - len(item))\n if value > current_value:\n current_value = value\n result = (current_value, item)\n \n return result", "def dice_score(ground_truth, prediction):\r\n\r\n # Normalize\r\n prediction /= np.amax(prediction)\r\n ground_truth /= np.amax(ground_truth)\r\n\r\n true_positive_mask = np.logical_and(ground_truth==1, prediction==1)\r\n false_positive_mask = np.logical_and(ground_truth==0, prediction==1)\r\n false_negative_mask = np.logical_and(ground_truth==1, prediction==0)\r\n\r\n TP = np.count_nonzero(true_positive_mask)\r\n FP = np.count_nonzero(false_positive_mask)\r\n FN = np.count_nonzero(false_negative_mask)\r\n\r\n DSC = 2*TP / (2*TP + FP + FN)\r\n\r\n return DSC", "def strategy(hand, num_die_sides):\n best_move = (0.0, ())\n all_holds = gen_all_holds(hand)\n for hold in all_holds:\n # hand can be less than 5\n num_free_dice = len(hand) - len(hold)\n expected = expected_value(hold, num_die_sides, num_free_dice)\n if expected > best_move[0]:\n best_move = (expected, hold)\n return best_move", "def chance(dice):\n return sum(dice)", "def strategy(hand, num_die_sides):\n all_holds = gen_all_holds(hand)\n expected_values = {}\n for hold in all_holds:\n num_free_dice = len(hand) - len(hold)\n current_expexted_value = expected_value(hold, num_die_sides, num_free_dice)\n expected_values[current_expexted_value] = hold\n\n max_value = max(expected_values.keys())\n return tuple((max_value, expected_values[max_value]))", "def score(hand):\r\n \r\n if not hand:\r\n return 0\r\n \r\n max_score = 0\r\n \r\n for dice in hand:\r\n temp = list(hand).count(dice) * dice\r\n if temp > max_score:\r\n max_score = temp\r\n \r\n return max_score", "def score_yamp(dices):\n for dice_num in range(MIN_DICE, MAX_DICE + 1):\n if count_equal(dices, dice_num) == DICE_COUNT:\n return 50\n\n return 0", "def dice_score(binary_image, binary_control):\n # figure_of_control(binary_control, 'Optimal given threshold')\n match = creation_of_match_array(binary_image, binary_control)\n # figure_of_control(match, 'deviation of optimal threshold and otsu')\n true = sum(sum(match))\n false = np.size(match) - true\n score = 2 * true / (2 * true + false)\n # print(\"True hits: \", true)\n # print(\"False hits: \", false)\n # print('Dice score: ', score)\n return score", "def score(hand):\n current_hand = {}\n for dice in hand:\n if not current_hand.get(dice):\n current_hand[dice] = dice\n else:\n current_hand[dice] += dice\n\n #compute the current score for each dice\n\n return max(current_hand.values())", "def calculate_score(dice):\n # version_1\n\n if len(dice) > 6:\n raise Exception(\"Cheating Cheater!\")\n\n counts = Counter(dice)\n\n if len(counts) == 6:\n return 1500\n\n if len(counts) == 3 and all(val == 2 for val in counts.values()):\n return 1500\n\n score = 0\n\n ones_used = fives_used = False\n\n for num in range(1, 6 + 1):\n\n pip_count = counts[num]\n\n if pip_count >= 3:\n\n if num == 1:\n\n ones_used = True\n\n elif num == 5:\n\n fives_used = True\n\n score += num * 100\n\n # handle 4,5,6 of a kind\n pips_beyond_3 = pip_count - 3\n\n score += score * pips_beyond_3\n\n # bug if 2 threesomes? Let's test it\n\n # 1s are worth 10x\n if num == 1:\n score *= 10\n\n if not ones_used:\n score += counts.get(1, 0) * 100\n\n if not fives_used:\n score += counts.get(5, 0) * 50\n\n return score", "def expected_score(hand, deck, verbose=False):\n remaining = list(set(deck) - set(hand))\n Ex_scores = []\n for flip in remaining:\n Ex_scores.append(Cribbage.score_hand(hand, flip, verbose))\n\n return mean(Ex_scores)", "def score(hand):\r\n \r\n max_score = []\r\n \r\n for dice in hand:\r\n max_score.append(hand.count(dice) * dice)\r\n \r\n return max(max_score)", "def yatzy(dice):\n if (dice[0] == dice[1] == dice[2] == dice[3] == dice[4]):\n return 50\n return 0", "def test_get_score(self):\n hand = dice_hand.Dicehand()\n res = hand.get_round_score()\n exp = res == hand.current_score\n self.assertTrue(exp)", "def evaluate_dice(\n\tpreds: np.ndarray,\n\tlabels: np.ndarray,\n\ttolerance = 0.95\n\t):\n\n\tAVG_DICE = 0.0\n\tempty = 0.0\n\tfor true, pred in zip(labels, preds):\n\t\tif not np.sum(true):\n\t\t\tempty += 1.\n\t\tAVG_DICE += dice(true[0], pred[0] > tolerance)\n\n\treturn AVG_DICE / (preds.shape[0] - empty) if empty != preds.shape[0] else 0.0", "def handDecision(handIn):", "def yatzy_rule(n):\n def ones(dice):\n \"\"\" Count ones in list. \"\"\"\n return sum([x for x in dice if x == 1])\n\n def twos(dice):\n \"\"\" Count twos in list. \"\"\"\n return sum([x for x in dice if x == 2])\n\n def threes(dice):\n \"\"\" Count threes in list. \"\"\"\n return sum([x for x in dice if x == 3])\n\n def fours(dice):\n \"\"\" Count fours in list. \"\"\"\n return sum([x for x in dice if x == 4])\n\n def fives(dice):\n \"\"\" Count fives in list. \"\"\"\n return sum([x for x in dice if x == 5])\n\n def sixes(dice):\n \"\"\" Count sixes in list. \"\"\"\n return sum([x for x in dice if x == 6])\n\n def pair(dice):\n \"\"\" Return sum of highest pair in list. \"\"\"\n\n def max_or_zero(list):\n \"\"\" Returns maximum value of a list; 0 if list is empty. \"\"\"\n try:\n return max(list)\n except ValueError:\n return 0\n\n return 2 * max_or_zero([i for i, j in combinations(dice, 2) if i == j])\n \n def double_pair(dice):\n \"\"\" TODO! \"\"\"\n\n # Sentinel value.\n return 1\n\n def threes(dice):\n \"\"\" Find a set of three equal values in list dice\n and return its sum. Returns 0 if nothing found.\"\"\"\n for i, j, k in combinations(dice, 3):\n if i == j == k:\n return 3 * i\n\n return 0\n\n def fours(dice):\n \"\"\" Find a set of four equal values in list dice\n and return its sum. Returns 0 if nothing found.\"\"\"\n for i, j, k, l in combinations(dice, 4):\n if i == j == k == l:\n return 4 * i\n\n return 0\n\n def small_straight(dice):\n \"\"\" Checks the list dice for the exact combination\n [1, 2, 3, 4, 5] (the small straight) and returns\n its sum. Returns 0 if nothing found.\"\"\"\n if sorted(dice) == [1, 2, 3, 4, 5]:\n return sum(dice)\n return 0\n\n def big_straight(dice):\n \"\"\" Checks the list dice for the exact combination\n [2, 3, 4, 5, 6] (the large straight) and returns\n its sum. Returns 0 if nothing found.\"\"\"\n if sorted(dice) == [2, 3, 4, 5, 6]:\n return sum(dice)\n return 0\n\n def house(dice):\n \"\"\" Try to find a house in the list of cards\n i.e. [2, 2, 2, 3, 3] or [5, 5, 4, 4, 4] and\n return its sum. Returns 0 if nothing found.\"\"\"\n s = sorted(dice)\n if ((s[0] == s[1] == s[2] and s[3] == s[4]) or\n (s[0] == s[1] and s[2] == s[3] == s[4])):\n return sum(dice)\n return 0\n\n def chance(dice):\n \"\"\" Returns the sum of dice. \"\"\"\n return sum(dice)\n\n def yatzy(dice):\n \"\"\" If every value in list dice is equal, return its sum.\n Else, return 0. \"\"\"\n if (dice[0] == dice[1] == dice[2] == dice[3] == dice[4]):\n return 50\n return 0\n\n return [ones, twos, threes, fours, fives, sixes, pair, double_pair,\n threes, fours, small_straight, big_straight, house, chance, yatzy][n]", "def evaluate_official(y_true, y_pred):\n try:\n # Compute tumor+kidney Dice\n tk_pd = np.greater(y_pred, 0)\n tk_gt = np.greater(y_true, 0)\n intersection = np.logical_and(tk_pd, tk_gt).sum()\n tk_dice = 2*intersection/(tk_pd.sum() + tk_gt.sum())\n except ZeroDivisionError:\n return 0.0, 0.0\n\n try:\n # Compute tumor Dice\n tu_pd = np.greater(y_pred, 1)\n tu_gt = np.greater(y_true, 1)\n intersection = np.logical_and(tu_pd, tu_gt).sum()\n tu_dice = 2*intersection/(tu_pd.sum() + tu_gt.sum())\n except ZeroDivisionError:\n return tk_dice, 0.0\n\n return tk_dice, tu_dice", "def temporary_score(self, dice_roll):\n\n temporary_score = 0\n if dice_roll > 1:\n temporary_score += dice_roll\n else:\n temporary_score = 0\n return temporary_score", "async def dice(self, ctx, diceroll: str = '1d6'):\n times, num = diceroll.split('d')\n times = int(times) if times else 1\n num = int(num) if num else 6\n maxscore = times*num\n score = random.randint(times, maxscore)\n await ctx.send(ctx._(\"roll_result\").format(score=score, maxscore=maxscore))", "def strategy(hand, num_die_sides):\n #return (0.0, ())\n maxval = 0.0\n maxseq= ()\n allholds = gen_all_holds(hand)\n for seq in allholds:\n val = expected_value(seq, num_die_sides, len(hand)-len(seq))\n if val > maxval:\n maxval = val\n maxseq = seq\n \n \n \n return (maxval, maxseq)", "def score_full(dices):\n three_matched_num = 0\n for dice_num in range(MIN_DICE, MAX_DICE + 1):\n threes = count_equal(dices, dice_num) == 3\n if threes:\n three_matched_num = dice_num\n break\n\n two_matched_num = 0\n for dice_num in range(MIN_DICE, MAX_DICE + 1):\n if dice_num != three_matched_num:\n twos = count_equal(dices, dice_num)\n if twos == 2:\n two_matched_num = twos\n break\n\n if three_matched_num > 0 and two_matched_num > 0:\n return 25\n else:\n return 0", "def wyldingHand(self, level):\n if level == 0:\n die_result = random.randint(1,6)\n elif level == 1:\n die_result = random.randint(1,10)\n elif level == 2:\n die_result = random.randint(1,6) + random.randint(1,6)\n elif level == 3:\n die_result = random.randint(1,8) + random.randint(1,8)\n\n return die_result", "def score_on_hands(cards_on_hand):\r\n score = 0\r\n straightCount = 0\r\n max_card = 0\r\n suite_dict = {}\r\n face_dict = {}\r\n transfer_dict = {'A':1,'J':11,'Q':12,'K':13}\r\n card_face = []\r\n '''Circulate the player's hand, build a list of points and a suit dict'''\r\n for index in range(len(cards_on_hand)):\r\n if str(cards_on_hand[index])[1] in transfer_dict:\r\n card_face.append(transfer_dict.get(str(cards_on_hand[index])[1]))\r\n elif str(cards_on_hand[index])[1] == '1':\r\n card_face.append(10)\r\n else:\r\n card_face.append(int(str(cards_on_hand[index])[1]))\r\n suite_dict[str(cards_on_hand[index])[0]] = 1\r\n '''Because 1 can be treated as 1 or 14, so if 1 exists, add 14 to the end of the list to calculate flush'''\r\n if 1 in card_face:\r\n card_face.append(14)\r\n\r\n '''Check straight, if it is straight, straight should be 4'''\r\n for face in range(len(card_face)-1):\r\n if card_face[face] +1 == card_face[face+1] :\r\n straightCount +=1\r\n\r\n '''Detect the number of cards of the same number'''\r\n for face in card_face:\r\n\r\n if face not in face_dict:\r\n face_dict[face] = 1\r\n else:\r\n face_dict[face] += 1\r\n\r\n '''Store the maximum number of points'''\r\n max_card = card_face[len(card_face)-1]\r\n\r\n '''Calculate player score'''\r\n if straightCount == 4:\r\n score+= 8\r\n\r\n if len(suite_dict) == 1:\r\n score+= 9\r\n\r\n for values in face_dict.values():\r\n if values == 2:\r\n score += 3\r\n elif values == 3:\r\n score += 7\r\n elif values == 4:\r\n score += 11\r\n\r\n return (score, max_card)", "def score_int( hand ):\n m = matches(hand)\n #print( m )\n #royal_flush -- a special case of straight flush.\n if flush(hand) and straight(hand) and hand[4].rank == 14:\n return 80000 + 100*order(hand[4])\n #straight_flush\n elif flush(hand) and straight(hand):\n return 80000 + 100*order(hand[4])\n #four_of_a_kind\n elif len(m) == 2 and m[0].count == 4:\n return 70000 + 100*order(m[0].card)\n #full_house\n elif len(m) == 2 and m[0].count == 3 and m[1].count == 2:\n return 60000 + 100*order(m[0].card) + order(m[1].card)\n #flush\n elif flush(hand):\n return 50000 + 100*order(hand[4])\n #straight\n elif straight(hand):\n return 40000 + 100*order(hand[4])\n #three_of_a_kind\n elif len(m) == 3 and m[0].count == 3:\n return 30000 + 100*order(m[0].card)\n #two_pair\n elif len(m) == 3 and m[0].count == 2 and m[1].count == 2:\n return 20000 + 100*order(m[0].card) + order(m[1].card)\n #one_pair\n elif len(m) == 4 and m[0].count == 2 and m[1].count == 1:\n return 10000 + 100*order(m[0].card) + order(m[1].card)\n # Simple high card. Is this adequate? We'll know if we get ties.\n else:\n return 100*order(hand[4]) # or 100*order(m[0].card)", "def get_metrics(self, phase):\n dice = {}\n l = len(self.base_dice_scores[phase])\n for i, d in enumerate(self.base_dice_scores[phase]):\n for k in d:\n if k not in dice:\n dice[k] = 0\n dice[k] += d[k] / l\n \n dice_neg = np.mean(self.dice_neg_scores[phase])\n dice_pos = np.mean(self.dice_pos_scores[phase])\n dices = [dice, dice_neg, dice_pos]\n iou = np.nanmean(self.iou_scores[phase])\n return dices, iou", "def dice(hm, hf):\n return 2 * np.count_nonzero(hm & hf) / float(np.count_nonzero(hm) + np.count_nonzero(hf))", "def _calc_hp(self, average=False):\n dice = self.hd + self.constitution\n if average:\n return round((dice * self.level).average)\n\n return max(sum((dice * self.level).roll()), 1)", "def calculate_score(hand,hand_value):\n first,second,third,fourth,fifth,*_=[rank for rank,suit in hand]\n if fifth==12:\n fifth=-1\n return calculate_score_pairs(hand_value,first,second,third,fourth,fifth)", "def score(self, hand, position):\n\n try:\n assert self.grid[position][1] == \"---\"\n except AssertionError:\n print self\n print position\n raise FilledInError\n except KeyError:\n print \"\\nCheck your code. This is not a valid position:\", position, \"\\n\"\n raise\n\n if position.startswith(\"n\"): # Return sum of relevant number\n n = int(position[1])\n return sum(d for d in hand.dice if d == n)\n\n elif position in [\"k3\", \"k4\", \"ch\"]: # Return total sum\n if position == \"k3\" and hand.max_tally()[0] < 3:\n return 0 # The is not a three of a kind\n elif position == \"k4\" and hand.max_tally()[0] < 4:\n return 0 # The is not a four of a kind\n return sum(hand.dice)\n\n elif position in [\"fh\", \"ss\", \"ls\", \"yz\", \"yb\"]: # Return fixed score\n if position == \"fh\":\n tallies = hand.get_dicedict().values()\n if 1 in tallies:\n return 0 # This is not a full house\n\n elif position in [\"ss\", \"ls\"]:\n ds = \"\".join(str(x) for x in hand.sort_by_value())\n if position == [\"ss\"]:\n if \"1234\" not in ds and \"2345\" not in ds and \"3456\" not in ds:\n return 0\n else:\n if \"12345\" not in ds and \"23456\" not in ds:\n return 0\n\n else:\n if hand.max_tally()[0] < 5:\n return 0 # This is not a yahtzee\n if position == \"yb\" and self.grid[\"yz\"] == \"---\":\n return 0 # YB only scores points if there already is a YZ\n\n return fixed_scores[position]\n\n else:\n raise InvalidPositionError", "def roll_dice(roll, modifiers):\n try:\n if modifiers[\"Advantage\"] and not modifiers[\"Disadvantage\"]:\n modifiers[\"Advantage\"] = False\n return max(roll_dice(roll, modifiers), roll_dice(roll,modifiers))\n if modifiers[\"Disadvantage\"] and not modifiers[\"Advantage\"]:\n modifiers[\"Disadvantage\"] = False\n return min(roll_dice(roll, modifiers), roll_dice(roll, modifiers))\n num_dice = int(roll.split(\"D\")[0])\n if modifiers[\"Critical\"]:\n num_dice*=2\n num_dice+=modifiers[\"Brutal\"]\n die_type = roll.split(\"D\")[1]\n if die_type[0] == \"4\" or die_type[0] == \"6\" or die_type[0] == \"8\":\n die_type = int(die_type[0])\n elif die_type[:3] == \"100\" or die_type[0] == \"%\":\n die_type = 100\n elif die_type[:2] == \"10\" or die_type[:2] == \"12\" or die_type[:2] == \"20\":\n die_type = int(die_type[:2])\n else:\n die_type = 6\n roll_total = 0\n critical_success = False\n critical_failure = False\n for die in range(num_dice):\n die_result = random.randint(1,die_type)\n if die_result == 1 and modifiers[\"Lucky\"] or die_result <= 2 and modifiers[\"Great Weapon\"]:\n die_result = random.randint(1,die_type)\n if die_result < modifiers[\"Minimum Roll\"]:\n die_result = modifiers[\"Minimum Roll\"]\n if die_result == 20 and die_type == 20:\n critical_success = True\n if die_result == 1 and die_type == 20:\n critical_failure = True\n roll_total += die_result\n return roll_total\n except ValueError:\n return \"Error\"", "def house(dice):\n s = sorted(dice)\n if ((s[0] == s[1] == s[2] and s[3] == s[4]) or\n (s[0] == s[1] and s[2] == s[3] == s[4])):\n return sum(dice)\n return 0", "def score(hand):\n occurrences = [] \n for die in hand:\n if die > len(occurrences):\n occurrences.extend([0 for dummy_idx in range(len(occurrences) ,die)]) \n occurrences[die - 1] += 1\n maxi = 0\n for idx in range(len(occurrences)):\n if (idx+1) * occurrences[idx] > maxi:\n maxi = (idx + 1) * occurrences[idx]\n return maxi", "def score(hand):\n if (hand==()):\n return 0\n score_board=[0,0,0,0,0,0,0,0,0,0,0,0]\n for dice in hand:\n score_board[dice-1]+=dice\n max_score=max(score_board)\n return max_score", "def score(self):\n hand = sorted(self.hand)\n score = -self.chips\n index = 0\n while index < len(hand):\n if index == 0 or hand[index-1] != hand[index]-1:\n score += hand[index]\n index += 1\n return score", "def _test_misc(self, state):\n state.result = 0\n dice_count = state.selection.dice_count\n dice_eyes = state.selection.dice_eyes\n\n if dice_count > 200:\n print(self._lang[\"too_many_dice\"])\n return state\n\n if state.dice == \"auto\":\n state.rolls = self._roll_dice(dice_count, 1, dice_eyes)\n\n # create sum of all rolled dice and modifier\n for _, value in enumerate(state.rolls):\n state.result += value\n\n state.result += state.mod\n return state", "def calculate_score(sorted_dice, choice):\n score = NO_MATCH_VALUE()\n str_d = \"\".join(map(str, sorted_dice))\n if choice in range(1, 6+1):\n score = sorted_dice.count(choice) * choice\n elif choice == REC_THREE_OF_A_KIND() and re.search(r'([\\d])\\1{2}', str_d):\n score = sum(sorted_dice)\n elif choice == REC_FOUR_OF_A_KIND() and re.search(r'([\\d])\\1{3}', str_d):\n score = sum(sorted_dice)\n elif choice == REC_FULL_HOUSE():\n if re.search(r'([\\d])\\1{2}([\\d])\\2', str_d) or re.search(r'([\\d])\\1([\\d])\\2{2}', str_d):\n score = FULL_HOUSE_VALUE()\n elif choice == REC_S_STRAIGHT() and re.search(r'(1234|2345|3456|12234|23345|34456|12334|23445|34556)', str_d):\n score = SMALL_STRAIGHT_VALUE()\n elif choice == REC_L_STRAIGHT() and re.match(r'(12345|23456)', str_d):\n score = LARGE_STRAIGHT_VALUE()\n elif choice == REC_YAHTZEE() and re.search(r'([\\d])\\1{4}', str_d):\n score = FIRST_YAHTZEE_VALUE()\n elif choice == REC_CHANCE():\n score = sum(sorted_dice)\n return score", "def stand(p_hand, d_hand):\n\n PLAYER_WIN = \"Player Wins!\\n\\n\\n\"\n DEALER_WIN = \"Dealer Wins!\\n\\n\\n\"\n DRAW = \"Game a draw\\n\\n\\n\"\n MAX = 22\n\n # Tie Game\n if p_hand.handSum() >= MAX and d_hand.handSum() >= MAX:\n return \"Both lose. Both hands over 21.\"\n\n if p_hand.handSum() == d_hand.handSum():\n return DRAW\n\n # Player Wins\n if p_hand.handSum() > d_hand.handSum() and (\n p_hand.handSum() < MAX):\n return PLAYER_WIN\n\n if p_hand.handSum() < MAX and d_hand.handSum() >= MAX:\n return PLAYER_WIN\n\n # Dealer Wins\n if d_hand.handSum() > p_hand.handSum() and (\n d_hand.handSum() < MAX):\n return DEALER_WIN\n\n if p_hand.handSum() > d_hand.handSum() and (\n p_hand.handSum() >= MAX):\n return DEALER_WIN", "def score(hand):\n max_score = []\n for die in hand:\n max_score.append(hand.count(die) * die)\n return max(max_score)", "def chance_points(dice_list):\n return sum(dice_list)", "def roll_dice(self):\r\n return randint(1,self.sides)", "def throw_dice():\n return randint(1, 6) + randint(1, 6)", "def get_points(self):\n self.round_points = 0\n for die in self.dice:\n if die == 1:\n self.round_points += 100\n elif die == 5:\n self.round_points += 50\n return self.round_points", "def take_turn(num_rolls, opponent_score, dice=six_sided):\n # Leave these assert statements here; they help check for errors.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls >= 0, 'Cannot roll a negative number of dice in take_turn.'\n assert num_rolls <= 10, 'Cannot roll more than 10 dice.'\n assert opponent_score < 100, 'The game should be over.'\n # BEGIN PROBLEM 2\n score = 0\n # free bacon rule implementation\n if num_rolls == 0:\n score = free_bacon(opponent_score)\n else:\n score = roll_dice(num_rolls, dice)\n # hogtimus prime rule implementation\n if score == 19:\n score = 23\n if score == 17:\n score = 19\n if score == 13:\n score = 17\n if score == 11:\n score = 13\n if score == 7:\n score = 11\n if score == 5:\n score = 7\n if score == 3:\n score = 5\n if score == 2:\n score = 3\n # when pigs fly rule implementation\n if score > 25 - num_rolls:\n score = 25\n score -= num_rolls\n return score\n\n # END PROBLEM 2", "def roll_1d10() -> int:\n ten_percent = Die(10)\n ten_percent.roll_die()\n chance = ten_percent.get_value()\n return chance", "def ability(self):\n random.seed()\n rolls = [random.randint(1,6) for i in range(4)]\n return sum(sorted(rolls)[1:4])", "def roll_dice(check_double=True):\n\n roll = np.random.choice(np.arange(1, 7), 2)\n\n if check_double:\n return roll.sum(), roll[0] == roll[1]\n else:\n return roll.sum()", "def dice_loss(yhat, ytrue, epsilon=1e-6):\n # compute Dice components\n intersection = torch.sum(yhat * ytrue, (1,2,3))\n cardinal = torch.sum(yhat + ytrue, (1,2,3))\n\n return torch.mean(1. - (2 * intersection / (cardinal + epsilon)))", "def select_dice(score, opponent_score, dice_swapped):\n # BEGIN PROBLEM 4\n dice = six_sided\n if dice_swapped == True:\n dice = four_sided\n # END PROBLEM 3\n if (score + opponent_score) % 7 == 0:\n dice = reroll(dice)\n return dice", "def test_add_chance(self):\n chance_fixtures = [[1, 2, 3, 4, 5],\n [1, 1, 1, 1, 1],\n [6, 6, 6, 6, 6],\n [1, 1, 1, 1, 2],\n [1, 1, 1, 3, 3],\n [1, 2, 3, 4, 6],\n ]\n\n for fixture in chance_fixtures:\n score = self.roll.add_chance(fixture)\n\n self.assertEqual(score, sum(fixture))\n self.assertNotEqual(score, 0)\n self.assertEqual(len(fixture), 5)", "def roll_die(number_of_rolls: int, number_of_sides: int) -> int:\r\n if number_of_rolls <= 0 or number_of_sides <= 0:\r\n return 0\r\n\r\n max_total = number_of_sides * number_of_rolls\r\n\r\n return random.randint(number_of_rolls, max_total)", "def test_hand_values(hand, result):\n from poker_rankings import PokerHand\n from collections import defaultdict\n heroes_hand = PokerHand(hand)\n assert heroes_hand._hand_value == result", "def ards_score(self):\n if self.ards_wickets == 10:\n var1 = \"All Out\"\n return str('{0} {1}').format(self.ards_runs, var1)\n else:\n var1 = self.ards_wickets\n return str('{0}-{1}').format(self.ards_runs, var1)", "def roll_dice():\n return (random.randint(1, 6) + random.randint(1, 6))", "def pre_flop_strength(hand):\n highs = {}\n highs[4] = [\n \"AA\", \"AKs\", \"AQs\", \"AJs\", \"ATs\", \"AKo\", \"KK\", \"KQs\", \"KJs\", \"AQo\",\n \"QQ\", \"QJs\", \"JJ\", \"TT\"\n ]\n highs[3] = [\n \"A5s\", \"A4s\", \"A3s\", \"KTs\", \"KQo\", \"QTs\", \"AJo\", \"JTs\", \"T9s\", \"99\",\n \"98s\", \"88\", \"87s\", \"77\", \"66\"\n ]\n highs[2] = [\n \"A9s\", \"A8s\", \"A7s\", \"A6s\", \"A2s\", \"K9s\", \"K8s\", \"Q9s\", \"KJo\", \"QJo\",\n \"J9s\", \"ATo\", \"KTo\", \"QTo\", \"JTo\", \"T8s\", \"A9o\", \"J9o\", \"T9o\", \"97s\",\n \"98o\", \"86s\", \"76s\", \"75s\", \"65s\", \"55\", \"44\", \"33\", \"22\"\n ]\n highs[1] = [\n \"K7s\", \"K6s\", \"K5s\", \"K4s\", \"K3s\", \"Q8s\", \"Q7s\", \"Q6s\", \"Q5s\", \"Q4s\",\n \"J8s\", \"J7s\", \"J6s\", \"J5s\", \"T7s\", \"T6s\", \"K9o\", \"Q9o\", \"96s\", \"A8o\",\n \"K8o\", \"Q8o\", \"J8o\", \"T8o\", \"85s\", \"A7o\", \"K7o\", \"Q7o\", \"T7o\", \"97o\",\n \"87o\", \"74s\", \"A6o\", \"K6o\", \"86o\", \"76o\", \"64s\", \"63s\", \"A5o\", \"75o\",\n \"65o\", \"54s\", \"53s\", \"A4o\", \"43s\", \"A3o\"\n ]\n card0, card1 = hand\n if card0[0] == card1[0]:\n pair = \"\".join([card0[0], card1[0]])\n elif card0[1] == card1[1]:\n pair = \"\".join([card0[0], card1[0], \"s\"])\n else:\n pair = \"\".join([card0[0], card1[0], \"o\"])\n for strenght in highs:\n if pair in highs[strenght]:\n return strenght\n return 0", "def rough_outcome(self) -> float:\n # HUYNH YOU PRICK WHY THE FUCK DO YOU MAKE US WRITE THIS SHIT EVEN IT'S NOT USED ANYWHERE\n # pick move based on this may not be optimal but better than random\n # return 1 if win immediately\n # return -1 if all states reachable will result the other player win\n # return 0 if otherwise ??? what the fuck does this mean\n # look two states forward\n pass", "def rollDie(self):\n return random.randint(1, self.sides)", "def test_int():\n cards= \"5H 5C 6S 7S KD 2C 3S 8S 8D TD\".split()\n h1, h2 = to_hand(cards[:5]), to_hand(cards[5:])\n s_h1, s_h2 = score_int(h1), score_int(h2)\n assert s_h1 == 10000+100*(5)+(13)\n assert s_h2 == 10000+100*(8)+(10)\n assert s_h1 < s_h2\n\n cards= \"5D 8C 9S JS AC\t 2C 5C 7D 8S QH\".split()\n s_h1, s_h2 = score_int(to_hand(cards[:5])), score_int(to_hand(cards[5:]))\n assert s_h1 == 100*(14)\n assert s_h2 == 100*(12)\n assert s_h1 > s_h2\n\n cards= \"2D 9C AS AH AC 3D 6D 7D TD QD\".split()\n s_h1, s_h2 = score_int(to_hand(cards[:5])), score_int(to_hand(cards[5:]))\n #print( h1, \"=\", s_h1, \":\", h2, \"=\", s_h2 )\n assert s_h1 == 30000+100*(14)\n assert s_h2 == 50000+100*(12)\n assert s_h1 < s_h2\n\n cards= \"4D 6S 9H QH QC 3D 6D 7H QD QS\".split()\n s_h1, s_h2 = score_int(to_hand(cards[:5])), score_int(to_hand(cards[5:]))\n assert s_h1 == 10000+100*(12)+(9)\n assert s_h2 == 10000+100*(12)+(7)\n assert s_h1 > s_h2\n\n cards= \"2H 2D 4C 4D 4S 3C 3D 3S 9S 9D\".split()\n s_h1, s_h2 = score_int(to_hand(cards[:5])), score_int(to_hand(cards[5:]))\n assert s_h1 == 60000+100*(4)+(2)\n assert s_h2 == 60000+100*(3)+(9)\n assert s_h1 > s_h2", "def roll_dice():\n die1 = random.randrange(1, 7)\n die2 = random.randrange(1, 7)\n return (die1, die2) # pack die face values into a tuple", "def test_strategy(self):\n self.first_play_test(C)\n for i in range(10):\n history_1 = [C] * i\n history_2 = [C] * i\n self.responses_test(history_1, history_2, [C])\n # Now cooperate 10% less than opponent\n history_1 = [C] * 11\n history_2 = [D] * 11\n self.responses_test(history_1, history_2, [D], random_seed=10)\n history_1 = [C] * 11\n history_2 = [D] * 10 + [C]\n self.responses_test(history_1, history_2, [D], random_seed=10)\n # Test beyond 10 rounds\n history_1 = [C] * 11\n history_2 = [D] * 5 + [C] * 6\n self.responses_test(history_1, history_2, [D, D, D, D], random_seed=20)\n history_1 = [C] * 11\n history_2 = [C] * 9 + [D] * 2\n self.responses_test(history_1, history_2, [C, D, D, C], random_seed=25)", "def hit_stand_ev_diff(hand, shoe, dealer_hand, dealer_probabilities):\n dealer_end_probs = dealer_probabilities[dealer_hand]\n # maps a player's hand to his or her (hit_ev, stand_ev, max_ev)\n player_payoffs = get_player_payoff(dealer_end_probs)\n\n ev = 0 # contains weighted ev\n total = 0 # contains total weights, to normalized at the end\n val, hard = hand\n for card in shoe:\n weight = shoe[card] # number of a card in the shoe\n total += weight\n if hard and 11 <= val <= 21:\n new_hand = (val + card, hard)\n if new_hand[0] > 21:\n ev -= weight # default loss\n else:\n ev += weight * player_payoffs[new_hand][-1]\n elif not hard and 12 <= val <= 21:\n new_val = val + card\n new_hard = False\n if new_val > 21: # go back to hard value, take A = 1\n new_val -= 10\n new_hard = True\n ev += weight * player_payoffs[(new_val, new_hard)][-1]\n elif hard and 4 <= val <= 10:\n new_val = val + card\n new_hard = True\n if card == 1: # go to soft value, take A = 11\n new_val += 10\n new_hard = False\n ev += weight * player_payoffs[(new_val, new_hard)][-1]\n else:\n raise RuntimeError(\"Should not get here: \" + str(hand))\n return (1.0 * ev / total) - player_payoffs[hand][1] # hit ev - stand ev", "def _score_hand(hand):\n\n score = 0\n ace = False\n\n for next_card in hand:\n\n # get the value of the card\n card_value = next_card[0]\n\n # if it is an ace and we do not hold one, the value is 11 instead of 1\n if card_value == 1 and not ace:\n ace = True\n card_value = 11\n\n # add up the value to the score\n score += card_value\n\n # if we would bust, check if there is an ace and substract\n # 10 from the value (11 - 1). Also, set the ace variable to False.\n if score > 21 and ace:\n score -= 10\n ace = False\n\n return score", "def dice(x, y):\n return 2 * np.sum(x * y) / (np.sum(x) + np.sum(y))", "def compare_hands(self):\r\n\r\n # Slows down the pace of the game with pauses\r\n self.loading(0.25)\r\n\r\n # If the round ends in a tie, the try_again will be set to true so that the program knows\r\n # to restart the round without incrementing the round number or changing the win/lose record\r\n if (self.player_rock is True and self.opp_rock is True) or (\r\n self.player_paper is True and self.opp_paper is True) or (\r\n self.player_scissors is True and self.opp_scissors is True):\r\n\r\n self.try_again = True\r\n\r\n self.player_tie()\r\n\r\n else:\r\n\r\n # If there is no draw, then the code proceeds to determine the winner and the loser.\r\n self.try_again = False\r\n\r\n if self.player_rock is True and self.opp_scissors is True:\r\n\r\n self.player_win()\r\n\r\n elif self.player_rock is True and self.opp_paper is True:\r\n\r\n self.player_lose()\r\n\r\n elif self.player_paper is True and self.opp_rock is True:\r\n\r\n self.player_win()\r\n\r\n elif self.player_paper is True and self.opp_scissors is True:\r\n\r\n self.player_lose()\r\n\r\n elif self.player_scissors is True and self.opp_paper is True:\r\n\r\n self.player_win()\r\n\r\n elif self.player_scissors is True and self. opp_rock is True:\r\n\r\n self.player_lose()\r\n\r\n # Clear the summary entry box\r\n self.summary_entry.delete(0, \"end\")\r\n\r\n # Insert a new value which lets the player know if they won that round\r\n self.summary_entry.insert(0, self.summary)", "def dice_metric(y_true, y_pred):\n y_true_f = K.cast(K.greater(y_true, 0.5), 'float32')\n y_pred_f = K.cast(K.greater(y_pred, 0.5), 'float32')\n return dice(y_true_f, y_pred_f)", "def calculate_score_pairs(hand_value,*args):\n # ratios=[1,10,100,1000,10000]\n ratios = CONST.RATIOS[:]\n return sum(map(lambda a,b:a/b, args, ratios))+hand_value", "def duck_shooting1():\r\n score = 0\r\n duck = input(\"Do you want to shoot duck 1 2 3 or 4 \\n\")\r\n if duck == '1':\r\n if chance_hit() == 1:\r\n print(\"good job you got 500 points\")\r\n score += 500\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '2':\r\n if chance_hit() == 1:\r\n print(\"good job you got 1000 points\")\r\n score += 1000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '3':\r\n if chance_hit() == 1:\r\n print(\"good job you got 5000 points\")\r\n score += 5000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '4':\r\n if chance_hit() == 1:\r\n print(\"good job you got 3000 points\")\r\n score += 3000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\r\n \"That is not a duck you silly Goose. Now you have to start over!\")\r\n start()\r\n duck = input(\"Do you want to shoot duck 1 2 3 or 4 \\n\")\r\n if duck == '1':\r\n if chance_hit() == 1:\r\n print(\"good job you got 500 points\")\r\n score += 500\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '2':\r\n if chance_hit() == 1:\r\n print(\"good job you got 1000 points\")\r\n score += 1000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '3':\r\n if chance_hit() == 1:\r\n print(\"good job you got 5000 points\")\r\n score += 5000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '4':\r\n if chance_hit() == 1:\r\n print(\"good job you got 3000 points\")\r\n score += 3000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\r\n \"That is not a duck you silly Goose. Now you have to start over!\")\r\n start()\r\n duck = input(\"Do you want to shoot duck 1 2 3 or 4 \\n\")\r\n if duck == '1':\r\n if chance_hit() == 1:\r\n print(\"good job you got 500 points\")\r\n score += 500\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '2':\r\n if chance_hit() == 1:\r\n print(\"good job you got 1000 points\")\r\n score += 1000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '3':\r\n if chance_hit() == 1:\r\n print(\"good job you got 5000 points\")\r\n score += 5000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '4':\r\n if chance_hit() == 1:\r\n print(\"good job you got 3000 points\")\r\n score += 3000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\r\n \"That is not a duck you silly Goose. Now you have to start over!\")\r\n start()\r\n return score", "def yahtzee_points(dice_list):\n if of_a_kind_size(dice_list) >= 5:\n return 50\n else:\n return 0", "def roll_die(self):\n number = randint(1, self.sides) \n print(number)", "def test_AKs_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 20.7)", "def get_win_prob(self, state, playerid):\n evaluator = Evaluator()\n\n def get_card_class(card_int_list):\n res = [Card.new(Card.int_to_str(c)) for c in card_int_list if c != -1]\n return res\n\n def WinProbability(hand, board):\n rank = evaluator.evaluate(board, hand)\n percentage = 1.0 - evaluator.get_five_card_rank_percentage(rank)\n return percentage\n\n hand_cards = get_card_class(state.player_states[playerid].hand)\n board_cards = get_card_class(state.community_card)\n if any([True for h in hand_cards if h in board_cards]):\n Card.print_pretty_cards(hand_cards)\n Card.print_pretty_cards(board_cards)\n num_players = len([ p for p in state.player_states if not p.emptyplayer])\n\n win = 0\n round = 0\n\n board_cards_to_draw = 5 - len(board_cards) # 2\n rest_cards = self._pick_unused_card(board_cards + hand_cards)\n #print(\"rest cards\")\n #Card.print_pretty_cards(rest_cards)\n \n #choiced = random.sample(unused, card_num)\n \n for i in range(self.simulation_number):\n\n unused_cards = random.sample(rest_cards, (num_players - 1) * 2 + board_cards_to_draw)\n board_sample = unused_cards[len(unused_cards)-board_cards_to_draw:]\n unused_cards = unused_cards[:len(unused_cards)-board_cards_to_draw]\n\n opponents_hole = [unused_cards[2 * i:2 * i + 2] for i in range(num_players - 1)]\n\n try:\n opponents_score = [WinProbability(hole, board_sample) for hole in opponents_hole]\n my_rank = WinProbability(hand_cards, board_sample)\n if my_rank >= max(opponents_score):\n win += 1\n round+=1\n except Exception as inst:# Exception, e:\n #print e.message\n continue\n #print(\"Win:{}\".format(win))\n #print('round:{}'.format(round))\n if round == 0: \n if len(board_cards) > 1:\n try:\n return WinProbability(board_cards, hand_cards)\n except:\n return 0.6\n else: \n return 0.6\n win_prob = win / float(round)\n return win_prob", "def play_game(game,standings_):\n rand_nmr = random.random()\n\n standings_.loc[standings_.TEAMS==game['Home'],'MP'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'MP'] += 1\n\n if rand_nmr < game['Prob Home']:\n n_goals = goals() # a random number of goals is added to the goal tally, all games and in 1-0,2-0,3-0 or 4-0. This can be improved\n standings_.loc[standings_.TEAMS==game['Home'],'W'] += 1\n standings_.loc[standings_.TEAMS==game['Home'],'F'] += n_goals\n standings_.loc[standings_.TEAMS==game['Away'],'L'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'A'] += n_goals\n standings_.loc[standings_.TEAMS==game['Home']][\"h2h\"].apply(lambda x:x.append(game['Away']))\n\n return 0\n\n elif rand_nmr < game['Prob Home'] + game['Prob Draw']:\n # all draws end in 0-0 this can be improved\n standings_.loc[standings_.TEAMS==game['Home'],'D'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'D'] += 1\n\n return 1\n\n else:\n n_goals = goals() # a random number of goals is added to the goal tally, all games and in 1-0,2-0,3-0 or 4-0. This can be improved\n standings_.loc[standings_.TEAMS==game['Away'],'W'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'F'] += n_goals\n standings_.loc[standings_.TEAMS==game['Home'],'A'] += 1\n standings_.loc[standings_.TEAMS==game['Home'],'L'] += n_goals\n standings_.loc[standings_.TEAMS==game['Away']][\"h2h\"].apply(lambda x:x.append(game['Home']))\n\n return 2", "def getScoreGivenDice(scoreEnum, dice):\n\n if not validateScore(scoreEnum, dice):\n return 0\n else:\n if scoreEnum.value in range(0,6):\n return sum(map(lambda x : x if x == (scoreEnum.value + 1) else 0, dice.view()))\n elif scoreEnum.name == 'FULL_H':\n return 25\n elif scoreEnum.name == 'SM_STRT':\n return 30\n elif scoreEnum.name == 'LG_STRT':\n return 40\n elif scoreEnum.name == 'YAHTZEE':\n return 50\n else:\n return sum(dice.view())", "def select_dice(score, opponent_score):\r\n if (score+opponent_score)%7 == 0:\r\n return four_sided\r\n return six_sided", "def fives(dice):\n return sum([x for x in dice if x == 5])", "def roll(dice):\n\n dice = str(dice).upper().strip()\n dice_mod = 0\n if dice == 'FLUX':\n return randint(1, 6) - randint(1, 6)\n else:\n if dice == 'GOODFLUX':\n flux1 = randint(1, 6)\n flux2 = randint(1, 6)\n if flux1 < flux2:\n return flux2 - flux1\n else:\n return flux1 - flux2\n else:\n if dice == 'BADFLUX':\n flux1 = randint(1, 6)\n flux2 = randint(1, 6)\n if flux1 > flux2:\n return flux2 - flux1\n else:\n return flux1 - flux2\n \n ichar1 = dice.find('DD')\n if ichar1 == -1:\n ichar1 = dice.find('D')\n if ichar1 == 0:\n num_dice = 1\n\n if ichar1 <> -1:\n if ichar1 <> 0:\n num_dice = int(dice[0:ichar1])\n# print 'Number of dice =', num_dice\n ichar2 = dice.find('+')\n if ichar2 <> -1:\n dice_mod = int(dice[ichar2:len(dice)])\n# print 'dice mod =', dice_mod\n else:\n ichar2 = dice.find('-')\n if ichar2 <> -1:\n dice_mod = int(dice[ichar2:len(dice)])\n# print 'dice mod =', dice_mod\n\n if ichar2 <> -1:\n dice_type = dice[ichar1: ichar2]\n dice_type = dice_type.rstrip()\n else:\n dice_type = dice[ichar1: len(dice)]\n# print 'dice type =', dice_type, 'Len = ', len(dice_type)\n\n if dice_type == 'D6': \n return die_rolls(6, num_dice) + dice_mod\n else:\n if dice_type == 'D66' and num_dice == 1 and dice_mod == 0:\n return randint(1, 6) * 10 + randint(1, 6)\n else:\n if dice_type == 'D100' and num_dice == 1: \n return (randint(1, 10) - 1) * 10 + randint(1, 10) + dice_mod \n else:\n if dice_type == 'D10': \n return die_rolls(10, num_dice) + dice_mod\n else: \n if dice_type == 'D20': \n return die_rolls(20, num_dice) + dice_mod\n else:\n if dice_type == 'D30': \n return die_rolls(30, num_dice) + dice_mod\n else:\n if dice_type == 'D12': \n return die_rolls(12, num_dice) + dice_mod\n else:\n if dice_type == 'D8': \n return die_rolls(8, num_dice) + dice_mod\n else:\n if dice_type == 'D4': \n return die_rolls(4, num_dice) + dice_mod\n else:\n if dice_type == 'D9': \n return die_rolls(9, num_dice) + dice_mod\n else:\n if dice_type == 'D3': \n return die_rolls(3, num_dice) + dice_mod\n else:\n if dice_type == 'DD':\n return (die_rolls(6, num_dice) + dice_mod) * 10\n \n print\n print \"** DICE ERROR! '%s' is unknown **\" % dice\n print \n print \"roll() is a dice rolling program.\"\n print\n print \"The types of dice to roll are (in string values):\"\n print \"roll('D6') -- roll one 6-sided die\"\n print \"roll('1D6') -- roll one 6-sided die\"\n print \"roll('2D6') -- roll two 6-sided dice\"\n print \"roll('D10') -- roll a 10-sided die\"\n print \"roll('D100') -- roll a 100-sided die (1 - 100)\"\n print \"roll('D66') -- roll for a D66 chart\"\n print \"roll('2DD+3') -- roll (2D6+3) x 10\"\n print\n print \"-/+ DMs can be added to rolls:\"\n print \"roll('3D6+6') -- add +6 DM to roll\"\n print \"roll('4D4-4') -- add -4 DM to roll\"\n print\n return 0", "def calc_dice(self, groundTruth, prediction, non_seg_score=1.0):\n assert groundTruth.shape == prediction.shape \n groundTruth = np.asarray(groundTruth).astype(np.bool)\n prediction = np.asarray(prediction).astype(np.bool) \n # If both segmentations are all zero, the dice will be 1.\n im_sum = groundTruth.sum() + prediction.sum()\n if im_sum == 0:\n return non_seg_score\n # Compute Dice coefficient\n intersection = np.logical_and(groundTruth, prediction)\n return 2. * intersection.sum() / im_sum", "def num_allowed_dice(score, opponent_score):\r\n k = score + opponent_score\r\n if k%10 == 7:\r\n return 1\r\n else:\r\n return 10", "def select_dice(score, opponent_score):\r\n k = score + opponent_score\r\n if k%7 == 0:\r\n return four_sided_dice\r\n else:\r\n return six_sided_dice", "def die_roll():\n roll = random.randint(1,6)\n return roll", "def test_affect_of_strategy(self):\n self.responses_test([C, C, C], [C, C, C], [C, C, C])\n # Make sure that the retaliations are increasing\n # Retaliate once and forgive\n self.responses_test([C], [D], [D])\n self.responses_test([C, D], [D, C], [C])\n self.responses_test([C, D, C], [D, C, C], [C])\n # Retaliate twice and forgive\n self.responses_test([C, D, C], [D, C, D], [D, D])\n self.responses_test([C, D, C, D, D], [D, C, D, C, C], [C])\n # Opponent defection during retaliation doesn't increase retaliation period\n self.responses_test([C, D, C, D, D], [D, C, D, D, C], [C])\n # Retaliate thrice and forgive\n self.responses_test([C, D, C, D, D, C], [D, C, D, C, C, D], [D, D, D])\n history_1 = [C, D, C, D, D, C, D, D, D]\n history_2 = [D, C, D, C, C, D, C, C, C]\n self.responses_test(history_1, history_2, [C])", "def diceRoll():\n return random.randint(1, 6) # generates a random integer between 1 and 6 (inclusive) and returns it." ]
[ "0.7691115", "0.76907384", "0.7485615", "0.7483924", "0.7410487", "0.740706", "0.7378829", "0.72477937", "0.7241062", "0.72278166", "0.72267485", "0.72267485", "0.72267485", "0.7205913", "0.71917546", "0.71524763", "0.70465297", "0.70109093", "0.69958997", "0.6902083", "0.6896336", "0.6862049", "0.68480587", "0.6795796", "0.67722243", "0.67636114", "0.6745681", "0.66115", "0.6567821", "0.6548723", "0.6537222", "0.64732265", "0.64526", "0.64460146", "0.6439945", "0.6439163", "0.6421482", "0.64186186", "0.64046633", "0.6386598", "0.6358354", "0.6304807", "0.6300256", "0.62916213", "0.62843144", "0.627843", "0.6272544", "0.626899", "0.6247453", "0.6243436", "0.62396204", "0.62231207", "0.62031996", "0.61850244", "0.618462", "0.61778617", "0.6166714", "0.61431974", "0.6131671", "0.6131588", "0.613108", "0.6120053", "0.6090321", "0.60861707", "0.6084349", "0.60824573", "0.6079261", "0.60403967", "0.6035357", "0.603041", "0.6023164", "0.6021712", "0.6017582", "0.60141605", "0.60050815", "0.5998448", "0.5997385", "0.59885824", "0.598526", "0.5981604", "0.5974637", "0.59723294", "0.597034", "0.5937741", "0.59331304", "0.59308827", "0.59222937", "0.5905678", "0.5903502", "0.5902581", "0.58989406", "0.58985865", "0.58946073", "0.58714175", "0.5860031", "0.5859566", "0.58588535", "0.5858247", "0.585197" ]
0.7173642
16
Find names in a sentence based on a FIRST_NAMES file
def find_names(sentence=None, last_names_enabled=True, no_names_enabled=False): if not sentence: raise Exception(ParameterMissing, "This method requires sentence as input") if not isinstance(sentence, str): raise Exception(TypeError, "This method requires string as input") first_names = get_first_names_pack() if not first_names: raise Exception(VariableNotSet, "Variable FIRST_NAMES is not set in settings.py") if last_names_enabled: last_names = get_last_names_pack() if not last_names: raise Exception(VariableNotSet, "Variable LAST_NAMES is not set in settings.py") first_names = list(set(first_names).union(set(last_names))) if no_names_enabled: no_names = get_no_names_pack() if not no_names: raise Exception(VariableNotSet, "Variable NO_NAMES is not set in settings.py") first_names = list(set(first_names).difference(set(no_names))) punctuation = '!@#$%^&*()_+<>?:.,;' for c in sentence: if c in punctuation: sentence = sentence.replace(c, " ") words = sentence.lower().split() res = set(words).intersection(first_names) to_return = [w.title() for w in res] return to_return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_names(text):\n\n names = []\n\n # spacy doc\n doc = nlp(text)\n\n # pattern\n pattern = [{'LOWER': 'prime'},\n {'LOWER': 'minister'},\n {'POS': 'ADP', 'OP': '?'},\n {'POS': 'PROPN'}]\n\n # Matcher class object\n matcher = Matcher(nlp.vocab)\n matcher.add(\"names\", None, pattern)\n\n matches = matcher(doc)\n\n # finding patterns in the text\n\n for i in range(0, len(matches)):\n\n # match: id, start, end\n token = doc[matches[i][1]:matches[i][2]]\n # append token to list\n names.append(str(token))\n\n # Only keep sentences containing Indian PMs\n\n for name in names:\n if (name.split()[2] == 'of') and (name.split()[3] != \"India\"):\n names.remove(name)\n\n return names", "def find_names_position(sentence=None, last_names_enabled=True, no_names_enabled=False):\n if not sentence:\n raise Exception(ParameterMissing, \"This method requires sentence as input\")\n\n if not isinstance(sentence, str):\n raise Exception(TypeError, \"This method requires string as input\")\n\n names_found = find_names(sentence, last_names_enabled=last_names_enabled, no_names_enabled=no_names_enabled)\n\n to_return = []\n for name in names_found:\n begin_positions = [m.start() for m in re.finditer(name, sentence)]\n for begin in begin_positions:\n to_return.append((begin, begin + len(name)))\n # begin = sentence.lower().index(name.lower())\n # end = begin + len(name)\n # to_return.append((begin, end))\n\n return to_return", "def fetch_candidate_name(self):\r\n # variable to save possible matches\r\n possible_names = []\r\n\r\n # source text is input document in text format\r\n nlp_text = self.doc # := nlp(self.stringtext)\r\n\r\n # Add patterns to match proper names\r\n patterns = [[{'POS': 'PROPN'}]]\r\n self.matcher.add('NAME', patterns) \r\n matches = self.matcher(nlp_text) \r\n\r\n # fetch the matches\r\n for match_id, start, end in matches:\r\n span = nlp_text[start:end] \r\n possible_names += [span.text] \r\n if len(possible_names) >= 2: \r\n break\r\n\r\n # Extract candidates\r\n doc_entities = self.doc.ents\r\n\r\n # Subset to person type entities\r\n doc_persons = filter(lambda x: x.label_ == 'PERSON', doc_entities)\r\n doc_persons = filter(lambda x: len(\r\n x.text.strip().split()) >= 2, doc_persons)\r\n doc_persons = map(lambda x: x.text.strip(), doc_persons)\r\n doc_persons = list(doc_persons)\r\n\r\n # Assume the first Person entity with more than two tokens is the candidate's name\r\n if len(doc_persons) > 0:\r\n return possible_names + [doc_persons[0]]\r\n\r\n return \"NOT FOUND\"", "def get_names(lines): \n next = False \n names = []\n for line in lines:\n if next:\n if len(line) == 1:\n break\n else:\n tmp = line.split()\n names.append(tmp[1])\n if line.startswith('Sequences loaded ...'):\n next = True\n return names", "def find_names(s):\n \"*** YOUR CODE HERE ***\"", "def process_names():\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n\n # Final name list\n final_name_list = []\n\n # Parsing different name formats and standardizing to create csv\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' + last_name)\n else:\n final_name_list.append(name)\n\n # Writing final name list to a file\n with open(output_names_file, \"w\") as txt_file:\n txt_file.write(\"first_name,last_name\" + \"\\n\")\n for name in final_name_list:\n txt_file.write(name + \"\\n\") # works with any number of elements in a line\n\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',', engine='python')", "def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n # ...", "def match_names(s):\n result = re.findall(r'^[A-Z][a-z]+','name: Bob, age: 14, name: Amanda, age: 17, name: Tim, age: 30')\n print result", "def sample_first_name(first_name_file, num_samples):\n\n df = pd.read_csv(first_name_file, header=None)\n df.columns = [\"name\", \"gender\", \"count\"]\n df = df[(df[\"count\"] > 10)]\n names = df[\"name\"].sample(n=num_samples, random_state=2021, replace=True).apply(str.title)\n\n return list(names.values)", "def getName(sentence): #Jasper, Suraj\n userWords = sentence.lower()\n userWords = userWords.split()\n \n # ways of introduction:\n # \"Hello, my name is ___\"\n # \"Hi, I'm ____\"\n # \"Howdy, I'm called ____\"\n # Order: Greeting -> pronoun -> Name -> question (optional)\n # eg. \"Hello, I'm Jasper. How are you?\"\n\n if (userWords[0] in greetings): #the added code that stops iam from being added into the name if 2 greeting are added\n userWords.pop(0) #pop and not .remove because\n \n \n if (userWords[0] == \"i\" and len(userWords) > 1):\n if (userWords[1] in [\"m\",\"am\"]):\n userWords.insert(0, \" \".join(userWords[0:2]))\n userWords.pop(2)\n userWords.pop(1)\n \n userName = \"\"\n for userWord in userWords: #iterate throught the user's words\n foundWord = False #sets True when there's a similar word in the other list\n for word in greetings: #iterates and compares the chosen word from the user's list of words to the words list\n if userWord == word and foundWord == False:\n foundWord = True\n if foundWord == False:\n userName = userName + userWord + \" \"\n return userName #this is the found name", "def _get_names(self):\n if len(self.firstnames):\n return self.firstnames, self.lastnames\n\n if os.path.exists(\"/code/api/app/utils/names.txt\"):\n with open(\"/code/api/app/utils/names.txt\") as file_with_names:\n names = file_with_names.readlines()\n else:\n # why yes, these are names of African Hollywood actors (according to Wikipedia)\n names = [\"Mehcad Brooks\", \"Malcolm Barrett\", \"Nick Cannon\", \"Lamorne Morris\", \"Neil Brown Jr.\",\n \"William Jackson Harper\", \"Marques Houston\", \"Jennifer Hudson\", \"Alicia Keys\", \"Meghan Markle\",\n \"Beyonce Knowles\", \"Jesse Williams\", \"Lance Gross\", \"Hosea Chanchez\", \"Daveed Diggs\",\n \"Damon Wayans Jr.\", \"Columbus Short\", \"Terrence Jenkins\", \"Ron Funches\", \"Jussie Smollett\",\n \"Donald Glover\", \"Brian Tyree Henry\", \"Gabourey Sidibe\", \"Trai Byers\", \"Robert Ri'chard\",\n \"Arjay Smith\", \"Tessa Thompson\", \"J.Lee\", \"Lauren London\", \"DeVaughn Nixon\", \"Rob Brown\", ]\n for _name in names:\n split_name = _name.strip().split(\" \")\n self.firstnames.append(split_name[0])\n lastname = \" \".join(split_name[1:]) if len(split_name) > 1 else \"\"\n self.lastnames.append(lastname)\n return self.firstnames, self.lastnames", "def process_name(name):\n def getnames_form3(a):\n \"\"\"\n Case with two commas: the name is of the format\n von Last, Jr, First\n like in: von Hicks, III, Michael\n \"\"\"\n full_last = a[0].strip()\n full_first = a[2].strip()\n junior = a[1].strip()\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior.strip()]\n\n def getnames_form2(a):\n \"\"\"\n Case with one comma: the name is of the format\n von Last, First\n like in: von Hicks, Michael\n \"\"\"\n full_last = a[0].strip()\n full_first = a[1].strip()\n junior = ''\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior]\n\n def getnames_form1(a):\n \"\"\"\n Case with NO commas: the name is of the format\n First von Last\n like in: Michael von Hicks\n \"\"\"\n last = a[0].split(' ')\n nfn = 0\n for l in last:\n if l != \"\" and not l[0].islower():\n nfn += 1\n else:\n break\n if nfn == len(last):\n nfn = -1\n\n full_first = ' '.join(last[:nfn])\n full_first = full_first.replace('.', ' ')\n full_last = ' '.join(last[nfn:])\n junior = \" \"\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior.strip()]\n\n def get_vonlast(full_last):\n von = \"\"\n last = \"\"\n\n for l in full_last.split(' '):\n if len(l) > 0 and l[0].islower():\n von += l.lower() + \" \"\n else:\n last += l + \" \"\n return von, last\n\n # Start the processing\n a = name.split(',')\n if len(a) == 3:\n fullname = getnames_form3(a)\n elif len(a) == 2:\n fullname = getnames_form2(a)\n elif len(a) == 1:\n fullname = getnames_form1(a)\n else:\n fullname = []\n\n return fullname", "def extract_names(pages: Iterable[tuple[int, list[str]]]) -> DataT:\n found_first = False\n current_name: dict[str, Any] | None = None\n current_label: str | None = None\n current_lines: list[str] = []\n in_headings = True\n\n def start_label(label: str, line: str) -> None:\n nonlocal current_label, current_lines\n assert current_name is not None\n assert current_label is not None\n if label in current_name:\n if label in (\"Syntype\", \"Type Locality\"):\n label = f\"Syntype {line}\"\n assert (\n label not in current_name\n ), f\"duplicate label {label} in {current_name}\"\n current_name[current_label] = current_lines\n current_label = label\n current_lines = [line]\n\n for page, lines in pages:\n if current_name is not None:\n current_name[\"pages\"].append(page)\n for line in lines:\n if not found_first:\n if line.strip() in (\"TYPE SPECIMENS\", \"SPECIMENS\"):\n found_first = True\n continue\n # ignore family/genus headers\n if re.match(\n (\n r\"^\\s*(Genus|Family|Subfamily|Suborder|Order) [A-Z][a-zA-Z]+\"\n r\" [a-zA-Z\\.’, \\-]+(, \\d{4})?$\"\n ),\n line,\n ):\n in_headings = True\n continue\n # ignore blank lines\n if not line:\n continue\n if in_headings:\n if line.startswith(\" \"):\n continue\n else:\n in_headings = False\n if line.startswith(\" \"):\n current_lines.append(line)\n elif re.match(r\"^[A-Z][A-Z a-z-]+: \", line):\n start_label(line.split(\":\")[0], line)\n elif line.startswith(\"Lectotype as designated\"):\n start_label(\"Lectotype\", line)\n elif line.startswith(\"Neotype as designated\"):\n start_label(\"Neotype\", line)\n elif line.startswith(\n (\n \"This specimen\",\n \"Type \",\n \"No type\",\n \"There are\",\n \"No additional\",\n \"All \",\n \"Subspecies of \",\n \"Neotype designated \",\n \"Padre Island\",\n )\n ):\n start_label(\"comments\", line)\n elif line.startswith(\n (\"Secondary junior\", \"Primary junior\", \"Junior primary\")\n ):\n start_label(\"homonymy\", line)\n elif re.match(r\"^[\\d/]+\\. \", line):\n start_label(line.split(\".\")[0], line)\n elif line.startswith(\"USNM\"):\n start_label(line.split(\".\")[0], line)\n elif (\n current_label not in (\"name\", \"verbatim_citation\", \"homonymy\")\n and \":\" not in line\n ):\n # new name\n if current_name is not None:\n assert current_label is not None\n current_name[current_label] = current_lines\n assert any(\n field in current_name\n for field in (\n \"Holotype\",\n \"Type Locality\",\n \"Lectotype\",\n \"Syntype\",\n \"Syntypes\",\n \"No name-bearing status\",\n \"Neotype\",\n )\n ), current_name\n yield current_name\n current_name = {\"pages\": [page]}\n current_label = \"name\"\n current_lines = [line]\n elif current_label == \"name\":\n if re.search(\n r\"\\d|\\b[A-Z][a-z]+\\.|\\baus\\b|\\bDas\\b|\\bPreliminary\\b|\\., \", line\n ):\n start_label(\"verbatim_citation\", line)\n else:\n # probably continuation of the author\n current_lines.append(line)\n elif (\n current_label == \"verbatim_citation\"\n or current_label == \"homonymy\"\n or line.startswith(\"= \")\n ):\n start_label(\"synonymy\", line)\n else:\n assert False, f\"{line!r} with label {current_label}\"\n assert current_label is not None\n assert current_name is not None\n current_name[current_label] = current_lines\n yield current_name", "def get_surnames(filename):\n result = []\n with open(filename, \"r\") as file:\n for line in file.readlines():\n surname = line.split('\\t')[1]\n result.append(surname)\n return result", "def match_name(sentence):\n if \"WIFE\" in sentence:\n return \"WIFE\"\n elif \"MAHAVIR\" in sentence or \"FATHER\" in sentence or \"SINGH\" in sentence: \n return \"MAHAVIR\"\n elif \"TEENAGER\" in sentence:\n return \"TEENAGER\"\n elif \"GIRL\" in sentence or \"WOMAN\" in sentence: \n return \"WOMAN\"\n elif \"GUY\" in sentence or \"MAN\" in sentence or \"BROTHER\" in sentence: \n return \"MAN\"\n elif \"COACH\" in sentence:\n return \"COACH\"\n elif \"COMMENT\" in sentence:\n return \"COMMENTATOR\"\n elif sentence[-2:] == \"ER\" or sentence[-3:] == \"IAN\" or sentence[-2:] == \"OR\" or sentence[-1:] == \"D\":\n return \"MISC\"\n \n return sentence", "def first_words_func():\n return_list = []\n for lyric in lyrics:\n for line in lyric.split(\"\\n\"):\n return_list.append(line.split(\" \")[0])\n return (return_list)", "def read_names(male_names_file_path, female_names_file_path):\n\n names = set()\n\n with open(male_names_file_path, \"r\") as f1:\n for name in f1:\n names.add(name.strip().lower())\n\n with open(female_names_file_path, \"r\") as f2:\n for name in f2:\n names.add(name.strip().lower())\n\n return names", "def find_pseudonyms(original_name, gender, topk):\n firstnames = load_firstnames(gender)\n model = load_model()\n whitelist = LetterBag(slugify.slugify(\n WORD_SPLIT_PATTERN.sub(\"\", original_name)))\n for firstname in firstnames:\n if not whitelist.includes(firstname):\n continue\n for lastname, proba in generate_word(model, whitelist.sub(firstname), topk):\n yield firstname.surface, lastname, proba", "def match_name(pattern, rows):\n matching = []\n for row in rows:\n # Use regex matching to check whether first name or last name contains the pattern\n if re.search(r'%s' % pattern.lower(), row[0].lower()) != None or re.search(r'%s' % pattern.lower(), row[1].lower()) != None:\n matching.append(row)\n\n # print the matched records\n print_records(matching)", "def load_firstnames(gender):\n return load_resource(\"resources/%s.txt\" % gender)", "def _first_name_sql(self, first_name, tolerance=1):\n nicknames = self._lookup_name(first_name)\n first_name_selects = []\n first_name_conditions = []\n for i, name in enumerate(nicknames):\n col_name = \"match_first_name_{}\".format(i)\n select = \" lower('{}') as {} \".format(name, col_name)\n first_name_selects.append(select)\n edit_distance = \"\"\"\n (levenshtein(lower(first_name), {col}) <= {tolerance}\n OR levenshtein(lower(nickname), {col}) <= {tolerance})\n \"\"\".format(col=col_name, tolerance=tolerance)\n first_name_conditions.append(edit_distance)\n name_select = \", \".join(first_name_selects)\n name_conditions = \" OR \".join(first_name_conditions)\n return name_select, name_conditions", "def _match_short_names(self, token_set_one, token_set_two):\n copy_set_one = token_set_one.copy()\n copy_set_two = token_set_two.copy()\n matching_dict = {}\n\n\n for token in token_set_one:\n res = self.dotted_name_re.search(token)\n if res:\n initials = res.group('name')\n for other_token in token_set_two:\n if other_token.startswith(initials):\n copy_set_one.remove(token)\n try:\n copy_set_two.remove(other_token)\n except KeyError:\n continue\n matching_dict[token] = other_token\n break\n else:\n return False, None, None, None\n\n return True, copy_set_one, copy_set_two, matching_dict", "def getnames(f):\n # Assumes file is sorted with girl names first, boy names second, and the\n # most popular name at the top of each list.\n\n lineoftext = f.readline()\n girlname,sex,count = processline(lineoftext)\n\n while sex != \"M\":\n name,sex,count = processline(f.readline())\n boyname=name\n\n return girlname,boyname", "def extract_subject_names(file_names):\n return file_names.apply(lambda name: name.split('_')[1])", "def countByName(lastName, firstName, filename):\r\n\r\n nameCounter = 1 #This variable serves as a counter and it ranges from 0 to 5, which accounts to the line numbers.\r\n isCorrectName = False #This variable evaluates whether the names compare to the names on the text.\r\n gmedals = 0 #Counts the amount of gold medals\r\n smedals = 0 #Counts the amount of silver medals\r\n bmedals = 0 #Counts the amount of bronze medals\r\n\r\n with open(filename, 'r', encoding='utf-8') as file:\r\n for line in file:\r\n line = line.strip().upper()\r\n if nameCounter == 1:\r\n if line == lastName.upper():\r\n isCorrectName = True\r\n else:\r\n isCorrectName = False\r\n if nameCounter == 2 and isCorrectName is True:\r\n if line == firstName.upper():\r\n isCorrectName = True\r\n else:\r\n isCorrectName = False\r\n if nameCounter == 4:\r\n if isCorrectName is True and line == '1':\r\n gmedals += 1\r\n else:\r\n pass\r\n if isCorrectName is True and line == '2':\r\n smedals += 1\r\n else:\r\n pass\r\n if isCorrectName is True and line == '3':\r\n bmedals += 1\r\n\r\n if nameCounter == 5:\r\n nameCounter = 0\r\n isCorrectName = False\r\n\r\n nameCounter += 1\r\n\r\n return gmedals, smedals, bmedals", "def find_feature_titles_in_file(feature_index, feature_names, file):\n\n dict_of_features_in_this_file = {}\n for feature_name, feature_titles in feature_names.items():\n try:\n features_found = [feature for feature in feature_titles if feature in feature_index]\n if len(features_found) == 1:\n dict_of_features_in_this_file[feature_name] = features_found[0]\n else:\n raise FeatureNotFoundError\n\n except FeatureNotFoundError:\n sys.exit(\n 'ERROR: Finding zero or more than one occurrence of feature {} in the header of input file'\n 'file {}! Please check variable feature_names in the function main().'\n 'Running the code is terminated.'.format(feature_titles, file))\n return dict_of_features_in_this_file", "def find_match(people, STRs):\n for person in people:\n if compare_str(person, STRs):\n return person[\"name\"]\n return \"No match\"", "def starts_with(self, matchstr, **kwargs):\r\n \r\n valid_kwargs = ['num_results', 'case_sensitive']\r\n validator.validate(kwargs.keys(), valid_kwargs)\r\n\r\n final_list = []\r\n case_sensitive = False\r\n num_results = 0\r\n \r\n if 'num_results' in kwargs:\r\n num_results = int(kwargs['num_results'])\r\n \r\n if len(matchstr) == 0:\r\n if num_results:\r\n return self.__sorted_names[0:num_results]\r\n return self.__sorted_names[:]\r\n\r\n if 'case_sensitive' in kwargs:\r\n if kwargs['case_sensitive']:\r\n case_sensitive = True\r\n\r\n tag_names_that_start_with_char = []\r\n \r\n if case_sensitive:\r\n if matchstr[0] not in self.__name_index:\r\n return []\r\n else:\r\n if matchstr[0].lower() not in self.__name_index and matchstr[0].upper() not in self.__name_index:\r\n return []\r\n \r\n if case_sensitive:\r\n idxs = self.__name_index[matchstr[0]]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']]\r\n else:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n \r\n else:\r\n if matchstr[0].lower() in self.__name_index:\r\n idxs = self.__name_index[matchstr[0].lower()]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']]\r\n else:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n\r\n if matchstr[0].upper() in self.__name_index:\r\n idxs = self.__name_index[matchstr[0].upper()]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char += [self.__sorted_names[idxs['first']]]\r\n else:\r\n tag_names_that_start_with_char += self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n \r\n if len(matchstr) == 1:\r\n if num_results == 0:\r\n return tag_names_that_start_with_char[:]\r\n else:\r\n return tag_names_that_start_with_char[0:num_results]\r\n \r\n if case_sensitive:\r\n for t in tag_names_that_start_with_char:\r\n if (t.find(matchstr) == 0):\r\n final_list.append(copy(t))\r\n if num_results > 0 and len(final_list) == num_results:\r\n return final_list\r\n else:\r\n for t in tag_names_that_start_with_char:\r\n if (t.lower().find(matchstr.lower()) == 0):\r\n final_list.append(copy(t))\r\n if num_results > 0 and len(final_list) == num_results:\r\n return final_list\r\n\r\n return final_list", "def check_named_entity(check):\r\n\tglobal word_buffer\r\n\tglobal temp\r\n\t\r\n\t\r\n\tif check == \"All\": \r\n\t# @return - Return Named Entities identified from the begining of the sentence except for the Named Entity at the end\r\n \r\n\t if temp == 1: \r\n \r\n\t\tnamed_entity = join_named_entity(word_buffer)\r\n\r\n\t\tword_buffer = []\r\n\t\t\r\n\t\ttemp = 0\r\n\r\n\t\treturn named_entity\r\n\telse:\r\n\t# @ return - Return Named Entity present at the end of the sentence, if available\r\n\r\n\t if len(word_buffer)>1: \r\n\t \r\n named_entity = join_named_entity(word_buffer)\r\n \r\n\t\treturn named_entity", "def finddocname(string):\r\n for x in doclist:\r\n foundvar = f\"-->Doc name = {x.title()}\"\r\n if x in string:\r\n print(foundvar)\r\n break", "def test_first_name(self, unromanized, romanized, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__first_name=unromanized,\n profile__romanized_first_name=romanized,\n )\n assert CDDWriter.first_name(profile) == expected", "def main(files: List[Path]):\n show_filenames = len(files) > 1\n for file in files:\n with file.open() as f:\n for m in find_camel(f):\n print(pretty_match(m, filename=file if show_filenames else None))", "def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r", "def __get_names(self): \n names_str = self.names_text.get(1.0, END)\n names = names_str.splitlines()\n return names", "def extract_names(filename):\n # +++your code here+++\n # Opening the file\n f = open(filename, 'rU')\n # Reading all of the lines\n lines = f.readlines()\n # Empty list to hold the year, names, and ranks\n ranks_names = []\n for line in lines:\n # search for the year\n year = re.search(r'\\s(\\d\\d\\d\\d)</h3>', line)\n # if the year is found, append it to the list\n if year: \n ranks_names.append(year.group(1))\n # search for the rank, male name, and female name\n rank_male_female = re.search(r'(\\d+)</td><td>(\\w+)</td><td>(\\w+)</td>', line)\n # If they are found then append the male name plus its rank, as well as the \n # female name plus its rank\n if rank_male_female:\n ranks_names.append(rank_male_female.group(2) + ' ' + rank_male_female.group(1))\n ranks_names.append(rank_male_female.group(3) + ' ' + rank_male_female.group(1))\n # Sort the list alphabetically\n ranks_names.sort()\n # Return the list\n return ranks_names", "def name_list(file_name):\n \n li = open(file_name)\n list_of_names = []\n\n for name in li:\n (first,last) = str.split(name,' ')\n list_of_names.append(Name(first,last))\n return list_of_names", "def match_specific_name(name: str, specific_names: list) -> str:\n c = clean_specific_name(name)\n if c == \"\":\n return c\n else:\n y = \"\"\n for x in specific_names:\n matchlist = x.variations.split(\";\")\n if c in matchlist:\n y = x.name\n return y", "def parse_names(lines, oti_file_name):\n print \" * Parsing names\"\n # Read the real texture file names form the file.\n real_names = []\n if os.path.isfile(oti_file_name):\n with open(oti_file_name, \"rU\") as oti_fd:\n real_names = oti_fd.read().splitlines()\n\n names = {}\n for i, line in enumerate(lines):\n name = \".\"\n if i < len(real_names):\n name = real_names[i]\n names[\"%s\" % i] = {\"alias\": line, \"name\": name}\n return names", "def getlistofpossibletitles(fileitem,fname):\n title = []\n oddtitles = open(\"oddtitles.txt\", 'r')\n content = oddtitles.read()\n oddtitles.close()\n\n content = content.split(\"\\n\")\n for line in content:\n elements = line.split(',')\n if fileitem in elements[0]:\n #print(elements[1])\n title.append(elements[1].title())\n\n \n title.append(fileitem)\n title.append(fileitem.title())\n lookfor = fileitem.replace(\".\",\" \")\n title.append(lookfor)\n title.append(lookfor.title())\n lookfor = fileitem.replace('-',\" \")\n title.append(lookfor)\n title.append(lookfor.title())\n with open(fname, \"r\") as dataf:\n for line in dataf:\n if lookfor.upper() in line.upper():\n line = line.replace(\"\\n\",\"\")\n title.append(line)\n title.append(line.title())\n return title", "def initialise_first_name(name):\n\t# Split name on the comma and space\n\ttokens = name.split(\", \")\n\tlast_name = tokens[0]\n\t# Split tokens[1] which is a \"Title First Name\" string\n\tfirst_tokens = tokens[1].split(\" \")\n\t# Replace the first name with just the first initial\n\tfirst_tokens[1] = first_tokens[1][0]\n\t# Concatenate everything back together and return\n\treturn last_name + ', ' + \" \".join(first_tokens)", "def test_first_name_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_first_name(input_val)\n self.assertEqual(output_val, self.line.first_name)", "def parse_infile_names(self):\n\n rv, slist = UTIL.list_minus_pref_suf(self.infiles,'out.ss_review.','.txt')\n if rv < 0: return\n if rv > 0:\n if self.verb > 1: print('++ trying to get SID from glob form')\n slist = UTIL.list_minus_glob_form(self.infiles, strip='dir')\n else:\n if self.verb > 1: print(\"++ have SIDs from 'out.ss_reiview' form\")\n\n if len(slist) == 0:\n if self.verb > 1: print(\"-- empty SID list\")\n return\n\n # make sure names are unique and not empty\n if not UTIL.vals_are_unique(slist):\n if self.verb > 1: print('-- SIDs not detected: not unique')\n return\n minlen = min([len(ss) for ss in slist])\n if minlen < 1:\n if self.verb > 1: print('-- SIDs not detected: some would be empty')\n return\n\n # we have a subject list\n self.snames = slist\n\n # now go for GID, start by replacing SIDs in infiles\n newfiles = [fname.replace(slist[ind], 'SUBJ') for ind, fname in\n enumerate(self.infiles)]\n\n if UTIL.vals_are_constant(newfiles):\n print('-- no groups detected from filenames')\n return\n\n # okay, try to make a group list\n glist = UTIL.list_minus_glob_form(newfiles)\n\n # cannot have dirs in result\n for gid in glist:\n if gid.find('/') >= 0:\n if self.verb>1: print('-- no GIDs, dirs vary in multiple places')\n return\n\n minlen = min([len(ss) for ss in glist])\n if minlen < 1:\n if self.verb > 1: print('-- GIDs not detected: some would be empty')\n return\n\n if self.verb > 1: print(\"++ have GIDs from infiles\")\n self.gnames = glist", "def test_find_first_author_initial(self):\n inv_search = 'firstauthor:\"ellis, j*\"'\n spi_search = 'find fa j ellis'\n self._compare_searches(inv_search, spi_search)", "def MatchProtNames(ProteomeDict, MS_names, MS_seqs):\n matchedNames, seqs, Xidx = [], [], []\n counter = 0\n for i, MS_seq in enumerate(MS_seqs):\n MS_seqU = MS_seq.upper()\n MS_name = MS_names[i].strip()\n if MS_name in ProteomeDict and MS_seqU in ProteomeDict[MS_name]:\n Xidx.append(i)\n seqs.append(MS_seq)\n matchedNames.append(MS_name)\n else:\n try:\n newname = getKeysByValue(ProteomeDict, MS_seqU)[0]\n assert MS_seqU in ProteomeDict[newname]\n Xidx.append(i)\n seqs.append(MS_seq)\n matchedNames.append(newname)\n except BaseException:\n print(MS_name, MS_seqU)\n counter += 1\n continue\n\n assert counter == 0, \"Proteome is missing %s peptides\" % (counter)\n assert len(matchedNames) == len(seqs)\n return matchedNames, seqs, Xidx", "def search_entries(search):\n _, filenames = default_storage.listdir(\"entries\")\n result = []\n for filename in filenames: \n if filename.endswith(\".md\"):\n nameonly = re.sub(r\"\\.md$\", \"\", filename)\n \n if nameonly.lower() == search.lower():\n #print(\"name only :\", nameonly)\n #print(\"search :\", search)\n return (nameonly)\n elif search.lower() in nameonly.lower():\n result.append(nameonly)\n return(result)", "def extract_names(filename):\n # +++your code here+++\n f = open(filename, 'r')\n fl = read_file(filename)\n\n l = []\n lFiltFinal = []\n\n year_match = re.search(r'Popularity\\sin\\s(\\d\\d\\d\\d)', f.read())\n year = year_match.group(1)\n\n for line in fl:\n #if '<h3 align=\"center\">Popularity in' in line:\n #year = line[-10:-6]\n if '<tr align=\"right\"><td>' in line:\n rank = line[line.find('<td>')+len('<td>'):line.find('</td>')]\n boys = line[line.index('</td><td>')+len('</td><td>'):line.index('</td><td>',line.index('</td><td>')+1)]\n girls = line[line.index('</td><td>',line.index('</td><td>')+1)+len('</td><td>'):-6]\n l.append([boys,rank])\n l.append([girls,rank])\n\n lFilt = list(unique_by_first_n(1, l))\n\n lFiltFinal.append(year)\n for key in lFilt:\n lFiltFinal.append( key[0] + ' ' + key[1])\n\n lFiltFinal.sort()\n return lFiltFinal", "def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n return sorted([name.split()[0] for name in names], key=len)[0]", "def _find_names(place):\n tags = place['tags']\n tags_names = ['name', 'place_name', 'alt_name']\n names = []\n for tag in tags_names:\n try:\n names.extend(tags[tag].split(';'))\n except KeyError:\n pass\n if not names:\n print \"Place has no name (#{})\".format(place['id'])\n return names", "def test_author_many_lastnames(self):\n inv_search = 'author:\"alvarez gaume, j* r* r*\"'\n spi_search = 'find a alvarez gaume, j r r'\n self._compare_searches(inv_search, spi_search)", "def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n split_names = [name.split(' ') for name in names]\n first_name = [first for first, last in split_names]\n shortest = first_name[0]\n for name in first_name:\n if len(name) < len(shortest):\n shortest = name\n\n return shortest", "def persons_from_names(self, name_table, known_persons=None, output_file=None, output_file_format=None, status_messages=True):\r\n\r\n\t\t# Save start time:\r\n\t\tzeit=int(time.time())\r\n\r\n\t\t####\r\n\t\t## Prepare input table\r\n\t\t####\r\n\r\n\t\t# Recognize input format\r\n\t\tif \"pandas\" in str(type(name_table)):\r\n\t\t\tinput_format = \"pandas\"\r\n\t\telif \"list\" in str(type(name_table)):\r\n\t\t\tinput_format = \"records\"\r\n\t\telif \"str\" in str(type(name_table)):\r\n\t\t\tif \".csv\" in name_table:\r\n\t\t\t\tinput_format = \"csv\"\r\n\t\t\telif \"xls\" in name_table:\r\n\t\t\t\tinput_format = \"xls\"\r\n\r\n\t\t# Convert table to internal data format\r\n\t\tif input_format != \"records\":\r\n\t\t\tname_table = self._convert_table_to_records(name_table, input_format)\r\n\r\n\t\t# Identify forename col\r\n\t\tname_table_format = self._identify_cols(name_table, \"default table\")\r\n\r\n\t\t# Add id column if missing\r\n\t\tif name_table_format[\"columns\"][\"id_column\"] is None:\r\n\t\t\tself._add_id_col(name_table)\r\n\t\t\tname_table_format[\"columns\"][\"id_column\"]=\"name_id\"\r\n\r\n\t\t# Same as above for known persons table\r\n\t\tif known_persons is not None:\r\n\t\t\t# Convert table to internal data format\r\n\t\t\tif input_format != \"records\":\r\n\t\t\t\tknown_persons = self._convert_table_to_records(known_persons, input_format)\r\n\r\n\t\t\t# Identify forename col\r\n\t\t\tknown_persons_format = self._identify_cols(known_persons, self._table_with_unique_names)\r\n\r\n\t\t\tif known_persons_format[\"columns\"][\"id_column\"] is None:\r\n\t\t\t\tself._add_id_col(known_persons)\r\n\t\t\t\tknown_persons_format[\"columns\"][\"id_column\"]=\"name_id\"\r\n\r\n\t\t\tif known_persons_format[\"columns\"][\"year_column\"] is None:\r\n\t\t\t\tself._add_empty_col(known_persons, \"year\")\r\n\t\t\t\tknown_persons_format[\"columns\"][\"year_column\"]=\"year\"\r\n\r\n\t\t####\r\n\t\t## Sort input data into a tree structure according to surname\r\n\t\t####\r\n\r\n\t\t# Internal data structure by surname\r\n\t\tself._flat_tree=collections.OrderedDict()\r\n\r\n\t\tif status_messages:\r\n\t\t\tprint(\"Tree creation in progress...\")\r\n\t\tself._make_flat_tree(name_table, self._flat_tree, name_table_format)\r\n\t\tif known_persons is not None:\r\n\t\t\t# Identify forename col\r\n\t\t\tself._make_flat_tree(known_persons, self._flat_tree, known_persons_format)\r\n\r\n\t\t####\r\n\t\t## Person identification from forename\r\n\t\t####\r\n\r\n\t\tif status_messages:\r\n\t\t\tprint(\"Clustering in progress...\")\r\n\t\tcluster_list={}\r\n\t\tself._cluster_number = 0\r\n\r\n\t\t# to record in which clusters the original records and their virtual ones are assigned \r\n\t\tcluster_number_list = {}\r\n\t\tself._cluster(self._flat_tree, cluster_list, cluster_number_list)\r\n\r\n\t\tif self._split_by_time_gap and name_table_format[\"columns\"][\"year_column\"] is not None:\r\n\t\t\tif status_messages:\r\n\t\t\t\tprint(format(\"Splitting entries with more than {} years between chronologically succeeding entries...\", self._maximum_time_gap))\r\n\t\t\tself._time_gap(cluster_list, self._maximum_time_gap, cluster_number_list, action=\"split\")\r\n\r\n\t\tif self._detect_marriages:\r\n\t\t\tif status_messages:\r\n\t\t\t\tprint(\"Detecting marriages and combining entries with marriage-related surname change...\")\r\n\t\t\tself._rework_for_marriages(cluster_list, cluster_number_list)\r\n\r\n\t\tif self._empty_clusters_remove:\r\n\t\t\tif status_messages:\r\n\t\t\t\tprint(\"Tidying up...\")\r\n\t\t\tself._remove_empty_cluster(cluster_list)\r\n\r\n\t\t####\r\n\t\t## Processing results\r\n\t\t####\r\n\r\n\t\t# Save authors to file \r\n\t\tif output_file is not None:\r\n\t\t\tif status_messages:\r\n\t\t\t\tprint(\"Saving the results\")\r\n\t\t\tself._save_to_file(cluster_list, output_file_format, output_file, name_table_format)\r\n\r\n\t\t# if status_messages:\r\n\t\t# \tprint( \"Name matching completed in {} seconds. Identified {} persons.\".format( str( int(time.time()) - zeit ) , str(len(cluster_list)) ) )\r\n\r\n\t\tif input_format==\"pandas\":\r\n\t\t\treturn self._convert_records_to_pandas(self._make_flat_result(cluster_list, name_table_format))\r\n\t\telif input_format==\"records\" and \"dict\" in str(type(name_table[0])):\r\n\t\t\treturn [ dict(record) for record in self._make_flat_result(cluster_list, name_table_format) ]\r\n\t\telse:\r\n\t\t\treturn self._make_flat_result(cluster_list, name_table_format)", "def extract_names_from_metadata_sheet(self):\n\n # parse all of the names (not orgs) and add them to a counter\n names_counter = Counter()\n with open(METADATA_CSV, encoding='utf-8') as file:\n csv_file = csv.DictReader(file)\n\n for line in csv_file:\n for element in ['author', 'recipients', 'cced']:\n for person_or_org in [p.strip() for p in line[element].split(';')]:\n # if at least a comma -> most likely a person\n if len(person_or_org.split(',')) > 1:\n names_counter[person_or_org] += 1\n\n # for each element in the counter, add them to the people set.\n for name in names_counter:\n self.people.add(Person(name_raw=name, count=names_counter[name], aliases=[name]))\n self.merge_all_duplicates()", "def readSurnames():\n surnamesRead = []\n with open(\"Files/Surnames.txt\", 'r', encoding='utf8') as f:\n for line in f:\n if line == \"\\n\":\n continue\n surnamesRead.append(line.rstrip('\\n').rstrip().lstrip())\n f.close()\n return surnamesRead", "def first_word_of_each_line(filepath):\n with open(filepath, 'r') as my_file:\n for line in my_file:\n line = line.strip()\n words = line.split()\n word = words[0]\n yield word", "def test_author_full_first(self):\n invenio_search = 'author:\"ellis, john*\" or exactauthor:\"ellis, j *\" or exactauthor:\"ellis, j\" or exactauthor:\"ellis, jo\" or exactauthor:\"ellis, joh\" or author:\"ellis, john, *\"'\n spires_search = 'find a ellis, john'\n self._compare_searches(invenio_search, spires_search)", "def name_search(self, search):\n if isinstance(search, str):\n name_re = re.compile(search)\n else:\n name_re = search\n matches = [\n entry\n for entry in self\n if entry is not None and name_re.search(entry.name)\n ]\n return matches", "def _read_names_file(self):\n filename = os.path.join(self.path, 'names.csv')\n lookup = collections.defaultdict(list)\n with open(filename) as f:\n reader = csv.reader(f)\n for line in reader:\n matches = set(line)\n for match in matches:\n lookup[match].append(matches)\n return lookup", "def name_extractor(file):\n \n import os\n import re\n \n name_list = []\n rank_dict = {}\n \n year = re.search(r'(\\d+)\\.html$', file) \n current = open(file) \n match = re.findall(r'<tr\\salign=\"right\"><td>(\\d+).*?>(\\w+).*?>(\\w+)', current.read())\n current.close\n\n \n for one_touple in match: #Check for existing match, only accept lower rank value into dictionary\n \n for index in range(1,2):\n \n if one_touple[index] in rank_dict:\n if rank_dict[one_touple[index]] < one_touple[0]:\n continue\n rank_dict[one_touple[index]] = one_touple[0]\n \n for one_item in rank_dict:\n \n ranking = rank_dict[one_item] #Build target list from dictionary formatted as \"Name rank\"\n name_list.append(f\"{one_item} {ranking}\") \n \n name_list = sorted(name_list)\n name_list.insert(0,year.group(1))\n \n return name_list", "def find_student_by_full_name(self, full_name):\n name_list = full_name.split(\" \")\n for student in self.students:\n if student.first_name == name_list[0]:\n if student.last_name == name_list[1]:\n print(student.first_name, student.last_name + \" found in our class.\")\n return student\n print(full_name + \" student is not in our class.\")\n return False", "def extract_names(filename):\n f = open(filename,'rU') \n name_data = f.read()\n year_data= re.search(r'Popularity\\sin\\s(\\d\\d\\d\\d)', name_data)\n if not year_data :\n print ' no year found '\n sys.exit(1)\n name_year=year_data.group(1) \n #print 'year :'\n #print name_year\n tuples=re.findall(r'<td>(\\d+)</td><td>(\\w+)</td><td>(\\w+)</td>',name_data)\n #print 'tuples'\n #print tuples\n dict_name = {}\n for a,b,c in tuples :\n #print a + ' boy name: ' + b + ' , girl name : ' + c\n if b not in dict_name :\n dict_name[b] = a\n if c not in dict_name :\n dict_name[c] = a \n #print dict_name \n lst_names = sorted(dict_name.keys()) \n result_names_sorted = []\n result_names_sorted.append(name_year)\n for name in lst_names :\n #print name + \" : \" + dict_name[name]\n result_names_sorted.append(name + ' ' + dict_name[name])\n #print result_names_sorted \n\n return result_names_sorted", "def test_named_entities(self) -> None:\n for named_entitity_rule in self.rules.named_entities:\n identity: str = named_entitity_rule[\"identity\"]\n type: Optional[str] = named_entitity_rule.get(\"type\")\n subtype: Optional[str] = named_entitity_rule.get(\"subtype\")\n invalid: Optional[str] = named_entitity_rule.get(\"invalid\")\n valid: Optional[str] = named_entitity_rule.get(\"valid\")\n\n for named_entity in self.report.get_named_entities(identity, type, subtype):\n text: str = \" \".join([w.text for w in named_entity.words])\n if valid and (not re.search(valid, text, re.I)):\n self.add_error(\n named_entitity_rule[\"message\"],\n self.report.get_words_position(named_entity.words),\n )\n elif invalid and re.search(invalid, text, re.I):\n self.add_error(\n named_entitity_rule[\"message\"],\n self.report.get_words_position(named_entity.words),\n )", "def extract_names(filename):\n\n # Extracting the year\n year_match = re.search(r'\\d\\d\\d\\d', filename)\n if not year_match:\n sys.stderr.write('Could not find a year!\\n')\n sys.exit()\n year = year_match.group()\n\n # Opening the file\n try:\n with open(filename) as file:\n data = file.read()\n except FileNotFoundError:\n sys.stderr.write('There is no such file in the directory!\\n')\n sys.exit()\n\n # Finding patterns\n regex = re.compile(r'<td>\\w+')\n names = regex.findall(data)\n for i in range(len(names)):\n names[i] = names[i].replace('<td>', '')\n\n # Creating a dictionary with names data\n names_dict = {}\n for i in range(0, len(names) - 2, 3):\n key = names[i]\n names_dict[key] = [names[i + 1], names[i + 2]]\n\n # Creating a list with result\n boy_names = []\n girl_names = []\n result = [year]\n for key, value in names_dict.items():\n if value[0] not in boy_names:\n result.append(value[0] + ' ' + key)\n boy_names.append(value[0])\n if value[1] not in girl_names:\n result.append(value[1] + ' ' + key)\n girl_names.append(value[1])\n\n result.sort()\n # result.insert(0, year)\n\n return result", "def get_last_name(first_name):\n \"\"\"Order last names so that names closest to first name are first.\n For example first name \"Kikke\" -> last names should be \"Kilari\",\n \"Kolari\", [all other names by random]\"\"\"\n def name_comparator(last_name):\n \"\"\"Return a number describing how close the two names are.\"\"\"\n score = 0\n\n # check if first n letters of first and last name matches\n for i in range(1, 4):\n if len(first_name) >= i and len(last_name) >= 2:\n # if previous letter does not match, don't continue\n if i > 1 and score > (i - 1) * -1:\n break\n\n # lower score by one per each matching letter\n if first_name[i - 1: i] == last_name[i - 1: i]:\n score -= 1\n\n \"\"\"detect names with umlauts and give them higher score if both have\n them, lower score if only one has them.\"\"\"\n regex = compile(r'[äöå]')\n if score == 0:\n if regex.search(first_name) and regex.search(last_name):\n score -= 1\n else:\n if bool(regex.search(last_name)) != bool(regex.search(last_name)):\n score += 1\n\n return score\n\n last_names_random = sample(last_names, len(last_names))\n last_names_sorted = sorted(last_names_random, key=name_comparator)\n\n \"\"\"Walk through names and check on each name if you should stop. Since\n the matching names are first they are more likely to be selected.\"\"\"\n for i in range(0, 10):\n if random() >= 0.7:\n return last_names_sorted[i]\n\n return last_names_sorted[0]", "def testTitleTemplateFindNames(self):\n\n\t\ttests = {\n\t\t\t'${abc.def.1}-$abc-${123}': {\n\t\t\t\t'abc.def.1': ['abc', 'def', 1],\n\t\t\t\t'123': [123]\n\t\t\t},\n\t\t\t'${abc..def} $$ ${qwe}': {'qwe': ['qwe']}\n\t\t}\n\n\t\tfor test in tests:\n\t\t\tt = TitleTemplate(test)\n\t\t\tself.assertEqual(t.getFieldNames(), tests[test])", "def __ui_search_persons_by_name(self):\n searched_name = input(\"Introduce the name: \").strip().lower()\n if searched_name == \"\":\n print(\"You cannot search persons by an empty name!\\n\")\n return\n\n searched_persons = self.__person_service.find_persons_by_name(searched_name)\n\n if len(searched_persons) == 0:\n print('There is no person whose name contains \"{}\"!\\n'.format(searched_name))\n else:\n print(\"\")\n for person in searched_persons:\n print(person)\n print(\"\")", "def search_by_name(self, name):\r\n return self.__filter(self.get_all_persons(), lambda x: name.lower().strip() in x.name.lower().strip())", "def find_match(second_file, title):\r\n # Initialize variables/ open files\r\n seq2 = \"\"\r\n header2 = \"\"\r\n match_fh = open(second_file, \"r\")\r\n # parse through lines of file\r\n for lines in match_fh:\r\n # If > found assume its header\r\n if lines[0] == \">\":\r\n # header2 = lines\r\n # If a header has been found, pull strain name, orgainism and subtype for new header\r\n if len(header2) > 0:\r\n matches2 = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n subtype_match2 = re.findall(\"(Subtype:[A-Za-z0-9]+)\", header2)\r\n organ2 = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n header2 = \">\" + organ2[0] + \"|\" + matches2[0] + \"|\" + subtype_match2[0]\r\n # if new header equals input header then return it and the sequence\r\n if header2 == title:\r\n match_fh.close()\r\n print(\"match\")\r\n return header2, seq2\r\n # Reset the header and seq\r\n header2 = lines\r\n seq2 = \"\"\r\n\r\n else:\r\n # if it is part of the sequence\r\n seq2 = seq2 + lines\r\n\r\n # to return the last entry in the file, since loop won't be able to return it\r\n matches2 = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n subtype_match2 = re.findall(\"(Subtype:[A-Za-z0-9]+)\", header2)\r\n organ2 = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n header2 = \">\" + organ2[0] + \"|\" + matches2[0] + \"|\" + subtype_match2[0]\r\n match_fh.close()\r\n return header2, seq2", "def sample_names(self):\n with open(self.sample_sheet) as sample_sheet:\n for line in sample_sheet:\n if 'Sample_ID' in line:\n for subline in sample_sheet:\n data = subline.split(',')\n self.samples.append(data[0])", "def search(words):\n newlist = [w for w in words if 'son' in w]\n return newlist", "def test_find_first_author(self):\n inv_search = 'firstauthor:ellis'\n spi_search = 'find fa ellis'\n self._compare_searches(inv_search, spi_search)", "def get_names():\n\n #Initialize entities dictionary\n entities = {'entity': 'source_file'}\n\n # Construct the raw_directory path\n project_root = os.environ['PYTHONPATH']\n raw_directory = '{}/data/raw/'.format(project_root)\n \n for file in os.listdir(raw_directory):\n if file.endswith('.json'):\n \n # Construct the full file path\n full_path = '{}{}'.format(raw_directory, file)\n \n # Open each JSON file\n with open(full_path, 'r') as source_file:\n data = source_file.read()\n parsed_data = json.loads(data)\n \n # Iterate through the dictionary parsed_data\n for key in parsed_data:\n if 'SocialTag' in key:\n name = parsed_data[key]['name']\n entities.update({name: file})\n\n return entities", "def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n names_splitted = [name.split() for name in names]\n names_splitted.sort(key=sort_length)\n names_sorted = [\" \".join(name) for name in names_splitted]\n return names_sorted[0]", "def load_names(self):\n temp_names = []\n\n with open(self.NAMES_FILE) as f:\n for line in f:\n if len(line.strip()) > 0:\n temp_names.append(line.strip())\n\n return temp_names", "def load_names(path):\n global taxid_names, scientific_names, synonyms, lowercase_names\n with open(path, 'r') as r:\n for line in r:\n (taxid, name, unique, kind) = re.split(r'\\s*\\|\\s*', line.strip('|\\n\\t '), 3)\n if kind == 'scientific name':\n taxid_names[taxid] = name\n scientific_names[name] = taxid\n else:\n synonyms[name] = taxid\n lowercase_names[name.lower()] = taxid", "def load_first_names(data):\n first_name_objects = [FirstName(data=first_name) for first_name in data]\n FirstName.objects.bulk_create(first_name_objects)", "def find_possible(search_string):\n codes = []; names = []\n search_string = search_string.lower()\n for c,n in name_given_code.items():\n\n if (search_string in n):\n codes.append(c)\n names.append(n)\n\n return codes, names", "def shortest_first_name(names):\n # input: ['Julian Sequeira', 'Arnold Schwarzenegger', 'Keanu Reeves', 'Julbob Pybites', 'Brad Pitt', 'Al Pacino', 'Matt Damon', 'Sandra Bullock', 'Bob Belderbos', 'Alec Baldwin']\n names = dedup_and_title_case_names(names)\n\n # fname_lname = [('Keanu', 'Reeves'), ('Julbob', 'Pybites'), ('Julian', 'Sequeira'), ('Matt', 'Damon'), ('Arnold', 'Schwarzenegger'), ('Brad', 'Pitt'), ('Alec', 'Baldwin'), ('Bob', 'Belderbos'), ('Sandra', 'Bullock'), ('Al', 'Pacino')]\n fname_lname = [tuple(_.split()) for _ in names]\n \n # iterate through [(fname_lname)] and assign 'first name' to [f_name]\n f_name = [x[0] for x in fname_lname]\n\n # grab min [f_name] using len()\n f_name = min(f_name, key=len)\n\n return f_name", "def extract_names(register):\n names = []\n for i in range(len(register) - 1): # len() -> no of columns\n first_name = str(register.iloc[i][2]).capitalize()\n last_name = str(register.iloc[i][1]).upper()\n name = last_name + ' ' + first_name\n names.append(name)\n names = list(set(names))\n return names", "def test_05_get_person_by_name(self):\n p1 = Person.query.first()\n p1_data = p1.wrap()\n p1_f_name = p1_data[\"first_name\"]\n # find by first name only\n # get part of name and search\n q_string = \"?first_name={}\".format(p1_f_name[:3]) # TODO - verify the length\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 1)\n\n # find by first name and last name\n p1_l_name = p1_data[\"last_name\"]\n q_string = \"?first_name={}&last_name={}\".format(p1_f_name[:3], p1_l_name)\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 1)\n\n # find by first name and non-existing last name\n q_string = \"?first_name={}&last_name={}\".format(p1_f_name[:3], \"iAmNotThere\")\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 0)", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('marie', 'curie', 'francis')\n self.assertEqual(formatted_name, 'Marie Francis Curie')", "def search(self, text, location=\"\"):\n return [\n obj.name[len(location) + 1 : -3] for obj in self._file_model.search(text) if obj.name.startswith(location)\n ]", "def test_first_author_full_initial(self):\n inv_search = 'firstauthor:\"klebanov, ig* r*\" or exactfirstauthor:\"klebanov, i r\"'\n spi_search = \"find fa klebanov, ig.r.\"\n self._compare_searches(inv_search, spi_search)", "def extract_names_TEXT(inputTEXT, source=\"gnrd\", sEngine=0):\r\n start_time = time.time()\r\n if source == \"gnrd\":\r\n final_result = get_sn_text(inputTEXT, sEngine)\r\n src_urls = [\"http://gnrd.globalnames.org/\"]\r\n elif source == \"taxonfinder\":\r\n final_result = get_tf_sn_text(inputTEXT)\r\n src_urls = [\"http://taxonfinder.org/\"]\r\n else:\r\n return json.dumps({'status_code': 400, 'message': \"Error: Invalid source name\"}) \r\n \r\n end_time = time.time()\r\n execution_time = end_time-start_time\r\n \r\n #service result creation time\r\n creation_time = datetime.datetime.now().isoformat()\r\n meta_data = {'creation_time': creation_time, 'execution_time': float(\"{:4.2f}\".format(execution_time)), 'source_urls': src_urls }\r\n\r\n final_result['meta_data'] = meta_data\r\n\r\n final_result['total_names'] = len(final_result['scientificNames'])\r\n \r\n return json.dumps(final_result)", "def appear_only_at_sentence_beginning(word, title, sents):\n assert (word in title), \"The word should be a title word\"\n appear_at_sentence_beginning = False\n \n for sent in sents:\n sent_start = True\n for w in sent:\n if sent_start and w == word and word[0].isupper():\n appear_at_sentence_beginning = True\n elif w == word: # appeared cap in the middle of sentence\n return False\n sent_start = False\n \n if appear_at_sentence_beginning:\n return True\n else:\n return False", "def test_legal_names(self):\n test_list = generate_products()\n names_list = []\n for i in test_list:\n names_list.append(i[0])\n for name in names_list:\n nameparts = name.split()\n self.assertEqual(len(nameparts), 2,\n msg=\"missing noun, space, or adj\")\n the_adj = nameparts[0]\n self.assertIn(the_adj, ADJECTIVES, msg='Bad Adj')\n the_noun = nameparts[1]\n self.assertIn(the_noun, NOUNS, msg='Bad Noun')", "def obtain_patient_names():\r\n split_names = ['train', 'valid', 'test']\r\n for split_name in split_names:\r\n csv_file = 'config/fetal_hc_train_{0:}.csv'.format(split_name)\r\n with open(csv_file, 'r') as f:\r\n lines = f.readlines()\r\n data_lines = lines[1:]\r\n patient_names = []\r\n for data_line in data_lines:\r\n patient_name = data_line.split(',')[0]\r\n patient_name = patient_name.split('/')[-1][:-4]\r\n print(patient_name)\r\n patient_names.append(patient_name)\r\n output_filename = 'config/fetal_hc_train_{0:}_patient.txt'.format(split_name)\r\n with open(output_filename, 'w') as f:\r\n for patient_name in patient_names:\r\n f.write('{0:}\\n'.format(patient_name))", "def find_by_name(command, name): # fine\r\n if command == 'FindByFName':\r\n for student in StudentRoster:\r\n if name == student.first:\r\n print(student_format(student))\r\n elif command == 'FindByLName':\r\n for student in StudentRoster:\r\n if name == student.last:\r\n print(student_format(student))", "def get_initials(the_fullname):\n my_initials = ''\n for name in the_fullname.split():\n my_initials += name[0].upper()\n return my_initials", "def populate_proper_names():\n in_dir = os.path.join(buildconfig.FORM_INDEX_DIR, 'proper_names')\n in_file = os.path.join(in_dir, 'all.txt')\n names = []\n counter = 0\n with open(in_file) as filehandle:\n for line in filehandle:\n data = line.strip().split('\\t')\n if len(data) == 3:\n counter += 1\n sortable, name, common = data\n if common.lower() == 'true':\n common = True\n else:\n common = False\n\n names.append(ProperName(lemma=name,\n sort=sortable,\n common=common))\n if counter % 1000 == 0:\n ProperName.objects.bulk_create(names)\n names = []\n\n ProperName.objects.bulk_create(names)", "def main(name_num, input_one, input_two, input_three, output):\n\n # Make and clean up corpus\n corpus = make_source_text(input_one, input_two, input_three, output)\n clean_text = cleanup(corpus)\n text_list = tokenize(clean_text)\n source_names = list_names(corpus)\n name_list = []\n\n # Loop until a unique name has been created\n while name_num > len(name_list):\n dictionary = nth_order_markov(2, text_list)\n first_letter = start_tokens(dictionary)\n markov_list = create_name(first_letter, dictionary)\n # Make word and remove whitespace\n first_name = \"\".join(markov_list).strip()\n # Check if name is match for any name in list; if so, start over\n # Name list contains the source names; ensures generated names are unique\n if first_name in source_names or len(first_name) < 2:\n print(\"duplicate name\", first_name)\n continue\n else:\n # Add valid names to name list\n name_list.append(first_name)\n # Return the list of valid names\n return name_list", "def loadFirsts(self, names):\n\n if os.path.exists(names):\n self.firsts, self.w_firsts = self.load(names)\n else:\n self.firsts = [names]\n self.w_firsts = None\n\n return", "def split_name(fullname):", "def test_names():\n first = get_name(\"As\")\n assert first == \"Arsenic\"\n\n second = get_name(\"Be\")\n assert second == \"Beryllium\"\n\n third = get_name(\"Li\")\n assert third == \"Lithium\"", "def grep_words(words, file_name):\n data = read_file(file_name, split_to_lines=False)\n for word in words:\n if word in data:\n return True\n return False", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')\n self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')\n self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')", "def __find_string_in_response(self, fullResponse, searchFor):\n check = True\n rawResponse = fullResponse;\n if \"result\" not in rawResponse.text:\n check = False\n else:\n responseJSON = rawResponse.json()\n length_responseJSON = len(responseJSON[\"result\"])\n for i in range(0,length_responseJSON,1):\n check = searchFor in responseJSON[\"result\"][i][\"first_name\"]\n if check == False:\n return check\n return check", "def extract_name(sentence):\n pos = sentence.find(' ')\n return sentence[:pos]", "def test_first_last_middle_name(self):\n\t\tformatted_name = get_formatted_name('Wolfgang','mozart','amadues')\n\t\tself.assertEqual(formatted_name,'Wolfgang Amadues Mozart')", "def load_names(args):\n # NAMES is a json document which is just a list of names\n if os.path.isfile(args.names):\n with open(args.names, 'r') as n:\n try:\n names = json.load(n)\n except:\n sys.exit(\"ERROR: {0} is invalid JSON\".format(args.names))\n else:\n sys.exit(\"ERROR {0} file not found.\".format(args.names))\n if len(names) <= 1:\n sys.exit(\"ERROR: {0} needs to have more than 1 name in it\".format(args.names))\n return names" ]
[ "0.6360774", "0.6280207", "0.6206798", "0.6189551", "0.6042509", "0.5802803", "0.5793913", "0.5733172", "0.57170224", "0.5716283", "0.56726676", "0.56491053", "0.563351", "0.5597997", "0.5563706", "0.5563537", "0.55441076", "0.55139315", "0.5505184", "0.5496029", "0.54784536", "0.54717904", "0.5466944", "0.54575783", "0.5425145", "0.5419574", "0.5407387", "0.538537", "0.5381204", "0.536705", "0.53645355", "0.53440154", "0.53412294", "0.53404486", "0.5337018", "0.5335769", "0.5332277", "0.5329058", "0.5328006", "0.5327221", "0.53262013", "0.5317711", "0.5311422", "0.5303261", "0.52784574", "0.5276857", "0.5266361", "0.52429813", "0.5242717", "0.5238023", "0.5220579", "0.52183896", "0.5213432", "0.521274", "0.5209633", "0.5207647", "0.520539", "0.5181693", "0.517622", "0.516231", "0.5160491", "0.515689", "0.51560736", "0.5146956", "0.5140575", "0.51405215", "0.5131145", "0.5116773", "0.5114191", "0.5108391", "0.5107323", "0.5100554", "0.50955546", "0.5094906", "0.50913316", "0.50873524", "0.5086348", "0.50781256", "0.50736445", "0.50651133", "0.5060662", "0.5058471", "0.50553733", "0.50454855", "0.5031856", "0.50278354", "0.50261515", "0.5024651", "0.5024639", "0.50225484", "0.5017875", "0.5012255", "0.50113916", "0.5010415", "0.50049376", "0.50049376", "0.49969283", "0.49965987", "0.49919403", "0.49867582" ]
0.6673137
0
Find names position in a sentence based on a FIRST_NAMES file
def find_names_position(sentence=None, last_names_enabled=True, no_names_enabled=False): if not sentence: raise Exception(ParameterMissing, "This method requires sentence as input") if not isinstance(sentence, str): raise Exception(TypeError, "This method requires string as input") names_found = find_names(sentence, last_names_enabled=last_names_enabled, no_names_enabled=no_names_enabled) to_return = [] for name in names_found: begin_positions = [m.start() for m in re.finditer(name, sentence)] for begin in begin_positions: to_return.append((begin, begin + len(name))) # begin = sentence.lower().index(name.lower()) # end = begin + len(name) # to_return.append((begin, end)) return to_return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_names(lines): \n next = False \n names = []\n for line in lines:\n if next:\n if len(line) == 1:\n break\n else:\n tmp = line.split()\n names.append(tmp[1])\n if line.startswith('Sequences loaded ...'):\n next = True\n return names", "def find_names(s):\n \"*** YOUR CODE HERE ***\"", "def fetch_candidate_name(self):\r\n # variable to save possible matches\r\n possible_names = []\r\n\r\n # source text is input document in text format\r\n nlp_text = self.doc # := nlp(self.stringtext)\r\n\r\n # Add patterns to match proper names\r\n patterns = [[{'POS': 'PROPN'}]]\r\n self.matcher.add('NAME', patterns) \r\n matches = self.matcher(nlp_text) \r\n\r\n # fetch the matches\r\n for match_id, start, end in matches:\r\n span = nlp_text[start:end] \r\n possible_names += [span.text] \r\n if len(possible_names) >= 2: \r\n break\r\n\r\n # Extract candidates\r\n doc_entities = self.doc.ents\r\n\r\n # Subset to person type entities\r\n doc_persons = filter(lambda x: x.label_ == 'PERSON', doc_entities)\r\n doc_persons = filter(lambda x: len(\r\n x.text.strip().split()) >= 2, doc_persons)\r\n doc_persons = map(lambda x: x.text.strip(), doc_persons)\r\n doc_persons = list(doc_persons)\r\n\r\n # Assume the first Person entity with more than two tokens is the candidate's name\r\n if len(doc_persons) > 0:\r\n return possible_names + [doc_persons[0]]\r\n\r\n return \"NOT FOUND\"", "def find_names(text):\n\n names = []\n\n # spacy doc\n doc = nlp(text)\n\n # pattern\n pattern = [{'LOWER': 'prime'},\n {'LOWER': 'minister'},\n {'POS': 'ADP', 'OP': '?'},\n {'POS': 'PROPN'}]\n\n # Matcher class object\n matcher = Matcher(nlp.vocab)\n matcher.add(\"names\", None, pattern)\n\n matches = matcher(doc)\n\n # finding patterns in the text\n\n for i in range(0, len(matches)):\n\n # match: id, start, end\n token = doc[matches[i][1]:matches[i][2]]\n # append token to list\n names.append(str(token))\n\n # Only keep sentences containing Indian PMs\n\n for name in names:\n if (name.split()[2] == 'of') and (name.split()[3] != \"India\"):\n names.remove(name)\n\n return names", "def find_names(sentence=None, last_names_enabled=True, no_names_enabled=False):\n if not sentence:\n raise Exception(ParameterMissing, \"This method requires sentence as input\")\n\n if not isinstance(sentence, str):\n raise Exception(TypeError, \"This method requires string as input\")\n\n first_names = get_first_names_pack()\n if not first_names:\n raise Exception(VariableNotSet, \"Variable FIRST_NAMES is not set in settings.py\")\n\n if last_names_enabled:\n last_names = get_last_names_pack()\n if not last_names:\n raise Exception(VariableNotSet, \"Variable LAST_NAMES is not set in settings.py\")\n first_names = list(set(first_names).union(set(last_names)))\n\n if no_names_enabled:\n no_names = get_no_names_pack()\n if not no_names:\n raise Exception(VariableNotSet, \"Variable NO_NAMES is not set in settings.py\")\n first_names = list(set(first_names).difference(set(no_names)))\n\n punctuation = '!@#$%^&*()_+<>?:.,;'\n\n for c in sentence:\n if c in punctuation:\n sentence = sentence.replace(c, \" \")\n\n words = sentence.lower().split()\n res = set(words).intersection(first_names)\n\n to_return = [w.title() for w in res]\n\n return to_return", "def test_word_positions_in_file(self):\n pass", "def countByName(lastName, firstName, filename):\r\n\r\n nameCounter = 1 #This variable serves as a counter and it ranges from 0 to 5, which accounts to the line numbers.\r\n isCorrectName = False #This variable evaluates whether the names compare to the names on the text.\r\n gmedals = 0 #Counts the amount of gold medals\r\n smedals = 0 #Counts the amount of silver medals\r\n bmedals = 0 #Counts the amount of bronze medals\r\n\r\n with open(filename, 'r', encoding='utf-8') as file:\r\n for line in file:\r\n line = line.strip().upper()\r\n if nameCounter == 1:\r\n if line == lastName.upper():\r\n isCorrectName = True\r\n else:\r\n isCorrectName = False\r\n if nameCounter == 2 and isCorrectName is True:\r\n if line == firstName.upper():\r\n isCorrectName = True\r\n else:\r\n isCorrectName = False\r\n if nameCounter == 4:\r\n if isCorrectName is True and line == '1':\r\n gmedals += 1\r\n else:\r\n pass\r\n if isCorrectName is True and line == '2':\r\n smedals += 1\r\n else:\r\n pass\r\n if isCorrectName is True and line == '3':\r\n bmedals += 1\r\n\r\n if nameCounter == 5:\r\n nameCounter = 0\r\n isCorrectName = False\r\n\r\n nameCounter += 1\r\n\r\n return gmedals, smedals, bmedals", "def index_of(self, last_name, first_name):\n self.is_at_with_exception()\n self.refresh_table()\n i = 0\n for item in self._table['first_name_column']:\n if item.text == first_name:\n if self._table['last_name_column'][i].text == last_name:\n return i\n else:\n i = i + 1\n return -1", "def MatchProtNames(ProteomeDict, MS_names, MS_seqs):\n matchedNames, seqs, Xidx = [], [], []\n counter = 0\n for i, MS_seq in enumerate(MS_seqs):\n MS_seqU = MS_seq.upper()\n MS_name = MS_names[i].strip()\n if MS_name in ProteomeDict and MS_seqU in ProteomeDict[MS_name]:\n Xidx.append(i)\n seqs.append(MS_seq)\n matchedNames.append(MS_name)\n else:\n try:\n newname = getKeysByValue(ProteomeDict, MS_seqU)[0]\n assert MS_seqU in ProteomeDict[newname]\n Xidx.append(i)\n seqs.append(MS_seq)\n matchedNames.append(newname)\n except BaseException:\n print(MS_name, MS_seqU)\n counter += 1\n continue\n\n assert counter == 0, \"Proteome is missing %s peptides\" % (counter)\n assert len(matchedNames) == len(seqs)\n return matchedNames, seqs, Xidx", "def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n # ...", "def find_word(self,word):\r\n self.start_pos = []\r\n #check each row\r\n for i in range(0,len(self.wordsearch)):\r\n #check each column\r\n for j in range(0, len(self.wordsearch[i])):\r\n #find all coordinates which have the first letter of the word and store them\r\n if self.wordsearch[i][j] == self.word[0]:\r\n self.start_pos.append([i,j])\r\n \r\n \r\n #print(count)\r\n for pos in self.start_pos:\r\n if self.check_start(self.word, pos):\r\n \r\n return", "def _first_name_sql(self, first_name, tolerance=1):\n nicknames = self._lookup_name(first_name)\n first_name_selects = []\n first_name_conditions = []\n for i, name in enumerate(nicknames):\n col_name = \"match_first_name_{}\".format(i)\n select = \" lower('{}') as {} \".format(name, col_name)\n first_name_selects.append(select)\n edit_distance = \"\"\"\n (levenshtein(lower(first_name), {col}) <= {tolerance}\n OR levenshtein(lower(nickname), {col}) <= {tolerance})\n \"\"\".format(col=col_name, tolerance=tolerance)\n first_name_conditions.append(edit_distance)\n name_select = \", \".join(first_name_selects)\n name_conditions = \" OR \".join(first_name_conditions)\n return name_select, name_conditions", "def name_extractor(file):\n \n import os\n import re\n \n name_list = []\n rank_dict = {}\n \n year = re.search(r'(\\d+)\\.html$', file) \n current = open(file) \n match = re.findall(r'<tr\\salign=\"right\"><td>(\\d+).*?>(\\w+).*?>(\\w+)', current.read())\n current.close\n\n \n for one_touple in match: #Check for existing match, only accept lower rank value into dictionary\n \n for index in range(1,2):\n \n if one_touple[index] in rank_dict:\n if rank_dict[one_touple[index]] < one_touple[0]:\n continue\n rank_dict[one_touple[index]] = one_touple[0]\n \n for one_item in rank_dict:\n \n ranking = rank_dict[one_item] #Build target list from dictionary formatted as \"Name rank\"\n name_list.append(f\"{one_item} {ranking}\") \n \n name_list = sorted(name_list)\n name_list.insert(0,year.group(1))\n \n return name_list", "def process_name(name):\n def getnames_form3(a):\n \"\"\"\n Case with two commas: the name is of the format\n von Last, Jr, First\n like in: von Hicks, III, Michael\n \"\"\"\n full_last = a[0].strip()\n full_first = a[2].strip()\n junior = a[1].strip()\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior.strip()]\n\n def getnames_form2(a):\n \"\"\"\n Case with one comma: the name is of the format\n von Last, First\n like in: von Hicks, Michael\n \"\"\"\n full_last = a[0].strip()\n full_first = a[1].strip()\n junior = ''\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior]\n\n def getnames_form1(a):\n \"\"\"\n Case with NO commas: the name is of the format\n First von Last\n like in: Michael von Hicks\n \"\"\"\n last = a[0].split(' ')\n nfn = 0\n for l in last:\n if l != \"\" and not l[0].islower():\n nfn += 1\n else:\n break\n if nfn == len(last):\n nfn = -1\n\n full_first = ' '.join(last[:nfn])\n full_first = full_first.replace('.', ' ')\n full_last = ' '.join(last[nfn:])\n junior = \" \"\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior.strip()]\n\n def get_vonlast(full_last):\n von = \"\"\n last = \"\"\n\n for l in full_last.split(' '):\n if len(l) > 0 and l[0].islower():\n von += l.lower() + \" \"\n else:\n last += l + \" \"\n return von, last\n\n # Start the processing\n a = name.split(',')\n if len(a) == 3:\n fullname = getnames_form3(a)\n elif len(a) == 2:\n fullname = getnames_form2(a)\n elif len(a) == 1:\n fullname = getnames_form1(a)\n else:\n fullname = []\n\n return fullname", "def extract_names(filename):\n # +++your code here+++\n # Opening the file\n f = open(filename, 'rU')\n # Reading all of the lines\n lines = f.readlines()\n # Empty list to hold the year, names, and ranks\n ranks_names = []\n for line in lines:\n # search for the year\n year = re.search(r'\\s(\\d\\d\\d\\d)</h3>', line)\n # if the year is found, append it to the list\n if year: \n ranks_names.append(year.group(1))\n # search for the rank, male name, and female name\n rank_male_female = re.search(r'(\\d+)</td><td>(\\w+)</td><td>(\\w+)</td>', line)\n # If they are found then append the male name plus its rank, as well as the \n # female name plus its rank\n if rank_male_female:\n ranks_names.append(rank_male_female.group(2) + ' ' + rank_male_female.group(1))\n ranks_names.append(rank_male_female.group(3) + ' ' + rank_male_female.group(1))\n # Sort the list alphabetically\n ranks_names.sort()\n # Return the list\n return ranks_names", "def test_first_name_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_first_name(input_val)\n self.assertEqual(output_val, self.line.first_name)", "def test_find_first_author_initial(self):\n inv_search = 'firstauthor:\"ellis, j*\"'\n spi_search = 'find fa j ellis'\n self._compare_searches(inv_search, spi_search)", "def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r", "def find_feature_titles_in_file(feature_index, feature_names, file):\n\n dict_of_features_in_this_file = {}\n for feature_name, feature_titles in feature_names.items():\n try:\n features_found = [feature for feature in feature_titles if feature in feature_index]\n if len(features_found) == 1:\n dict_of_features_in_this_file[feature_name] = features_found[0]\n else:\n raise FeatureNotFoundError\n\n except FeatureNotFoundError:\n sys.exit(\n 'ERROR: Finding zero or more than one occurrence of feature {} in the header of input file'\n 'file {}! Please check variable feature_names in the function main().'\n 'Running the code is terminated.'.format(feature_titles, file))\n return dict_of_features_in_this_file", "def process_names():\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n\n # Final name list\n final_name_list = []\n\n # Parsing different name formats and standardizing to create csv\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' + last_name)\n else:\n final_name_list.append(name)\n\n # Writing final name list to a file\n with open(output_names_file, \"w\") as txt_file:\n txt_file.write(\"first_name,last_name\" + \"\\n\")\n for name in final_name_list:\n txt_file.write(name + \"\\n\") # works with any number of elements in a line\n\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',', engine='python')", "def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n split_names = [name.split(' ') for name in names]\n first_name = [first for first, last in split_names]\n shortest = first_name[0]\n for name in first_name:\n if len(name) < len(shortest):\n shortest = name\n\n return shortest", "def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n return sorted([name.split()[0] for name in names], key=len)[0]", "def count_name(text, adj):\n for x in re.finditer(r'[A-Z][a-z]*[\\s][A-Z][a-z]*',text):\n adj[x.group()] += 1\n return", "def _get_header_position(header_row: List[str], column_title: str) -> int:\n for pos, column in enumerate(header_row):\n if column_title.lower() in column.lower():\n return pos\n\n raise Exception(\"Expected column header not found for {}\".format(column_title))", "def find_match(second_file, title):\r\n # Initialize variables/ open files\r\n seq2 = \"\"\r\n header2 = \"\"\r\n match_fh = open(second_file, \"r\")\r\n # parse through lines of file\r\n for lines in match_fh:\r\n # If > found assume its header\r\n if lines[0] == \">\":\r\n # header2 = lines\r\n # If a header has been found, pull strain name, orgainism and subtype for new header\r\n if len(header2) > 0:\r\n matches2 = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n subtype_match2 = re.findall(\"(Subtype:[A-Za-z0-9]+)\", header2)\r\n organ2 = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n header2 = \">\" + organ2[0] + \"|\" + matches2[0] + \"|\" + subtype_match2[0]\r\n # if new header equals input header then return it and the sequence\r\n if header2 == title:\r\n match_fh.close()\r\n print(\"match\")\r\n return header2, seq2\r\n # Reset the header and seq\r\n header2 = lines\r\n seq2 = \"\"\r\n\r\n else:\r\n # if it is part of the sequence\r\n seq2 = seq2 + lines\r\n\r\n # to return the last entry in the file, since loop won't be able to return it\r\n matches2 = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n subtype_match2 = re.findall(\"(Subtype:[A-Za-z0-9]+)\", header2)\r\n organ2 = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n header2 = \">\" + organ2[0] + \"|\" + matches2[0] + \"|\" + subtype_match2[0]\r\n match_fh.close()\r\n return header2, seq2", "def getnames(f):\n # Assumes file is sorted with girl names first, boy names second, and the\n # most popular name at the top of each list.\n\n lineoftext = f.readline()\n girlname,sex,count = processline(lineoftext)\n\n while sex != \"M\":\n name,sex,count = processline(f.readline())\n boyname=name\n\n return girlname,boyname", "def __get_names(self): \n names_str = self.names_text.get(1.0, END)\n names = names_str.splitlines()\n return names", "def getPosition(fname, pos):\n count = 0\n infile = open(fname, \"r\")\n n_line = infile.readline()\n temp = []\n for line in infile:\n words = line.split(',')\n temp.append(Player(words[0], int(words[1]), int(words[2]), int(words[3]),\n int(words[4]), float(words[5]), pos, 1))\n count += 1\n infile.close()\n return temp, count", "def match_name(sentence):\n if \"WIFE\" in sentence:\n return \"WIFE\"\n elif \"MAHAVIR\" in sentence or \"FATHER\" in sentence or \"SINGH\" in sentence: \n return \"MAHAVIR\"\n elif \"TEENAGER\" in sentence:\n return \"TEENAGER\"\n elif \"GIRL\" in sentence or \"WOMAN\" in sentence: \n return \"WOMAN\"\n elif \"GUY\" in sentence or \"MAN\" in sentence or \"BROTHER\" in sentence: \n return \"MAN\"\n elif \"COACH\" in sentence:\n return \"COACH\"\n elif \"COMMENT\" in sentence:\n return \"COMMENTATOR\"\n elif sentence[-2:] == \"ER\" or sentence[-3:] == \"IAN\" or sentence[-2:] == \"OR\" or sentence[-1:] == \"D\":\n return \"MISC\"\n \n return sentence", "def _get_names(self):\n if len(self.firstnames):\n return self.firstnames, self.lastnames\n\n if os.path.exists(\"/code/api/app/utils/names.txt\"):\n with open(\"/code/api/app/utils/names.txt\") as file_with_names:\n names = file_with_names.readlines()\n else:\n # why yes, these are names of African Hollywood actors (according to Wikipedia)\n names = [\"Mehcad Brooks\", \"Malcolm Barrett\", \"Nick Cannon\", \"Lamorne Morris\", \"Neil Brown Jr.\",\n \"William Jackson Harper\", \"Marques Houston\", \"Jennifer Hudson\", \"Alicia Keys\", \"Meghan Markle\",\n \"Beyonce Knowles\", \"Jesse Williams\", \"Lance Gross\", \"Hosea Chanchez\", \"Daveed Diggs\",\n \"Damon Wayans Jr.\", \"Columbus Short\", \"Terrence Jenkins\", \"Ron Funches\", \"Jussie Smollett\",\n \"Donald Glover\", \"Brian Tyree Henry\", \"Gabourey Sidibe\", \"Trai Byers\", \"Robert Ri'chard\",\n \"Arjay Smith\", \"Tessa Thompson\", \"J.Lee\", \"Lauren London\", \"DeVaughn Nixon\", \"Rob Brown\", ]\n for _name in names:\n split_name = _name.strip().split(\" \")\n self.firstnames.append(split_name[0])\n lastname = \" \".join(split_name[1:]) if len(split_name) > 1 else \"\"\n self.lastnames.append(lastname)\n return self.firstnames, self.lastnames", "def extract_names(pages: Iterable[tuple[int, list[str]]]) -> DataT:\n found_first = False\n current_name: dict[str, Any] | None = None\n current_label: str | None = None\n current_lines: list[str] = []\n in_headings = True\n\n def start_label(label: str, line: str) -> None:\n nonlocal current_label, current_lines\n assert current_name is not None\n assert current_label is not None\n if label in current_name:\n if label in (\"Syntype\", \"Type Locality\"):\n label = f\"Syntype {line}\"\n assert (\n label not in current_name\n ), f\"duplicate label {label} in {current_name}\"\n current_name[current_label] = current_lines\n current_label = label\n current_lines = [line]\n\n for page, lines in pages:\n if current_name is not None:\n current_name[\"pages\"].append(page)\n for line in lines:\n if not found_first:\n if line.strip() in (\"TYPE SPECIMENS\", \"SPECIMENS\"):\n found_first = True\n continue\n # ignore family/genus headers\n if re.match(\n (\n r\"^\\s*(Genus|Family|Subfamily|Suborder|Order) [A-Z][a-zA-Z]+\"\n r\" [a-zA-Z\\.’, \\-]+(, \\d{4})?$\"\n ),\n line,\n ):\n in_headings = True\n continue\n # ignore blank lines\n if not line:\n continue\n if in_headings:\n if line.startswith(\" \"):\n continue\n else:\n in_headings = False\n if line.startswith(\" \"):\n current_lines.append(line)\n elif re.match(r\"^[A-Z][A-Z a-z-]+: \", line):\n start_label(line.split(\":\")[0], line)\n elif line.startswith(\"Lectotype as designated\"):\n start_label(\"Lectotype\", line)\n elif line.startswith(\"Neotype as designated\"):\n start_label(\"Neotype\", line)\n elif line.startswith(\n (\n \"This specimen\",\n \"Type \",\n \"No type\",\n \"There are\",\n \"No additional\",\n \"All \",\n \"Subspecies of \",\n \"Neotype designated \",\n \"Padre Island\",\n )\n ):\n start_label(\"comments\", line)\n elif line.startswith(\n (\"Secondary junior\", \"Primary junior\", \"Junior primary\")\n ):\n start_label(\"homonymy\", line)\n elif re.match(r\"^[\\d/]+\\. \", line):\n start_label(line.split(\".\")[0], line)\n elif line.startswith(\"USNM\"):\n start_label(line.split(\".\")[0], line)\n elif (\n current_label not in (\"name\", \"verbatim_citation\", \"homonymy\")\n and \":\" not in line\n ):\n # new name\n if current_name is not None:\n assert current_label is not None\n current_name[current_label] = current_lines\n assert any(\n field in current_name\n for field in (\n \"Holotype\",\n \"Type Locality\",\n \"Lectotype\",\n \"Syntype\",\n \"Syntypes\",\n \"No name-bearing status\",\n \"Neotype\",\n )\n ), current_name\n yield current_name\n current_name = {\"pages\": [page]}\n current_label = \"name\"\n current_lines = [line]\n elif current_label == \"name\":\n if re.search(\n r\"\\d|\\b[A-Z][a-z]+\\.|\\baus\\b|\\bDas\\b|\\bPreliminary\\b|\\., \", line\n ):\n start_label(\"verbatim_citation\", line)\n else:\n # probably continuation of the author\n current_lines.append(line)\n elif (\n current_label == \"verbatim_citation\"\n or current_label == \"homonymy\"\n or line.startswith(\"= \")\n ):\n start_label(\"synonymy\", line)\n else:\n assert False, f\"{line!r} with label {current_label}\"\n assert current_label is not None\n assert current_name is not None\n current_name[current_label] = current_lines\n yield current_name", "def get_last_name(first_name):\n \"\"\"Order last names so that names closest to first name are first.\n For example first name \"Kikke\" -> last names should be \"Kilari\",\n \"Kolari\", [all other names by random]\"\"\"\n def name_comparator(last_name):\n \"\"\"Return a number describing how close the two names are.\"\"\"\n score = 0\n\n # check if first n letters of first and last name matches\n for i in range(1, 4):\n if len(first_name) >= i and len(last_name) >= 2:\n # if previous letter does not match, don't continue\n if i > 1 and score > (i - 1) * -1:\n break\n\n # lower score by one per each matching letter\n if first_name[i - 1: i] == last_name[i - 1: i]:\n score -= 1\n\n \"\"\"detect names with umlauts and give them higher score if both have\n them, lower score if only one has them.\"\"\"\n regex = compile(r'[äöå]')\n if score == 0:\n if regex.search(first_name) and regex.search(last_name):\n score -= 1\n else:\n if bool(regex.search(last_name)) != bool(regex.search(last_name)):\n score += 1\n\n return score\n\n last_names_random = sample(last_names, len(last_names))\n last_names_sorted = sorted(last_names_random, key=name_comparator)\n\n \"\"\"Walk through names and check on each name if you should stop. Since\n the matching names are first they are more likely to be selected.\"\"\"\n for i in range(0, 10):\n if random() >= 0.7:\n return last_names_sorted[i]\n\n return last_names_sorted[0]", "def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n names_splitted = [name.split() for name in names]\n names_splitted.sort(key=sort_length)\n names_sorted = [\" \".join(name) for name in names_splitted]\n return names_sorted[0]", "def first_words_func():\n return_list = []\n for lyric in lyrics:\n for line in lyric.split(\"\\n\"):\n return_list.append(line.split(\" \")[0])\n return (return_list)", "def extract_names(filename):\n # +++your code here+++\n f = open(filename, 'r')\n fl = read_file(filename)\n\n l = []\n lFiltFinal = []\n\n year_match = re.search(r'Popularity\\sin\\s(\\d\\d\\d\\d)', f.read())\n year = year_match.group(1)\n\n for line in fl:\n #if '<h3 align=\"center\">Popularity in' in line:\n #year = line[-10:-6]\n if '<tr align=\"right\"><td>' in line:\n rank = line[line.find('<td>')+len('<td>'):line.find('</td>')]\n boys = line[line.index('</td><td>')+len('</td><td>'):line.index('</td><td>',line.index('</td><td>')+1)]\n girls = line[line.index('</td><td>',line.index('</td><td>')+1)+len('</td><td>'):-6]\n l.append([boys,rank])\n l.append([girls,rank])\n\n lFilt = list(unique_by_first_n(1, l))\n\n lFiltFinal.append(year)\n for key in lFilt:\n lFiltFinal.append( key[0] + ' ' + key[1])\n\n lFiltFinal.sort()\n return lFiltFinal", "def getName(sentence): #Jasper, Suraj\n userWords = sentence.lower()\n userWords = userWords.split()\n \n # ways of introduction:\n # \"Hello, my name is ___\"\n # \"Hi, I'm ____\"\n # \"Howdy, I'm called ____\"\n # Order: Greeting -> pronoun -> Name -> question (optional)\n # eg. \"Hello, I'm Jasper. How are you?\"\n\n if (userWords[0] in greetings): #the added code that stops iam from being added into the name if 2 greeting are added\n userWords.pop(0) #pop and not .remove because\n \n \n if (userWords[0] == \"i\" and len(userWords) > 1):\n if (userWords[1] in [\"m\",\"am\"]):\n userWords.insert(0, \" \".join(userWords[0:2]))\n userWords.pop(2)\n userWords.pop(1)\n \n userName = \"\"\n for userWord in userWords: #iterate throught the user's words\n foundWord = False #sets True when there's a similar word in the other list\n for word in greetings: #iterates and compares the chosen word from the user's list of words to the words list\n if userWord == word and foundWord == False:\n foundWord = True\n if foundWord == False:\n userName = userName + userWord + \" \"\n return userName #this is the found name", "def position(file_, pattern):\n pattern = pattern[1:-1]\n pattern = pattern.replace('(', '\\(')\n pattern = pattern.replace(')', '\\)')\n file_obj = open(file_, 'rU')\n for line_number, line in enumerate(file_obj):\n m = re.search(pattern, line)\n if m is not None:\n return line_number, m.pos\n file_obj.close()\n return 0, 0", "def check_named_entity(check):\r\n\tglobal word_buffer\r\n\tglobal temp\r\n\t\r\n\t\r\n\tif check == \"All\": \r\n\t# @return - Return Named Entities identified from the begining of the sentence except for the Named Entity at the end\r\n \r\n\t if temp == 1: \r\n \r\n\t\tnamed_entity = join_named_entity(word_buffer)\r\n\r\n\t\tword_buffer = []\r\n\t\t\r\n\t\ttemp = 0\r\n\r\n\t\treturn named_entity\r\n\telse:\r\n\t# @ return - Return Named Entity present at the end of the sentence, if available\r\n\r\n\t if len(word_buffer)>1: \r\n\t \r\n named_entity = join_named_entity(word_buffer)\r\n \r\n\t\treturn named_entity", "def initialise_first_name(name):\n\t# Split name on the comma and space\n\ttokens = name.split(\", \")\n\tlast_name = tokens[0]\n\t# Split tokens[1] which is a \"Title First Name\" string\n\tfirst_tokens = tokens[1].split(\" \")\n\t# Replace the first name with just the first initial\n\tfirst_tokens[1] = first_tokens[1][0]\n\t# Concatenate everything back together and return\n\treturn last_name + ', ' + \" \".join(first_tokens)", "def column_index(input_file, name):\n col, com = find_columns(input_file)\n col_name = name\n contents = open(input_file, 'r').readlines()\n for line in contents:\n if com[col.index(col_name)] in line:\n line_index = contents.index(line)+1\n return line_index", "def findSmallest(distancesWithNames):\n smallest = distancesWithNames[0][2]\n smallestIndex = -1\n for i in range(len(distancesWithNames)):\n if smallest >= distancesWithNames[i][2]:\n smallest = distancesWithNames[i][2]\n smallestIndex = i\n return smallestIndex", "def test_find_first_author(self):\n inv_search = 'firstauthor:ellis'\n spi_search = 'find fa ellis'\n self._compare_searches(inv_search, spi_search)", "def extract_name(sentence):\n pos = sentence.find(' ')\n return sentence[:pos]", "def appear_only_at_sentence_beginning(word, title, sents):\n assert (word in title), \"The word should be a title word\"\n appear_at_sentence_beginning = False\n \n for sent in sents:\n sent_start = True\n for w in sent:\n if sent_start and w == word and word[0].isupper():\n appear_at_sentence_beginning = True\n elif w == word: # appeared cap in the middle of sentence\n return False\n sent_start = False\n \n if appear_at_sentence_beginning:\n return True\n else:\n return False", "def extract_names(filename):\n f = open(filename,'rU') \n name_data = f.read()\n year_data= re.search(r'Popularity\\sin\\s(\\d\\d\\d\\d)', name_data)\n if not year_data :\n print ' no year found '\n sys.exit(1)\n name_year=year_data.group(1) \n #print 'year :'\n #print name_year\n tuples=re.findall(r'<td>(\\d+)</td><td>(\\w+)</td><td>(\\w+)</td>',name_data)\n #print 'tuples'\n #print tuples\n dict_name = {}\n for a,b,c in tuples :\n #print a + ' boy name: ' + b + ' , girl name : ' + c\n if b not in dict_name :\n dict_name[b] = a\n if c not in dict_name :\n dict_name[c] = a \n #print dict_name \n lst_names = sorted(dict_name.keys()) \n result_names_sorted = []\n result_names_sorted.append(name_year)\n for name in lst_names :\n #print name + \" : \" + dict_name[name]\n result_names_sorted.append(name + ' ' + dict_name[name])\n #print result_names_sorted \n\n return result_names_sorted", "def match_names(s):\n result = re.findall(r'^[A-Z][a-z]+','name: Bob, age: 14, name: Amanda, age: 17, name: Tim, age: 30')\n print result", "def starts_with(self, matchstr, **kwargs):\r\n \r\n valid_kwargs = ['num_results', 'case_sensitive']\r\n validator.validate(kwargs.keys(), valid_kwargs)\r\n\r\n final_list = []\r\n case_sensitive = False\r\n num_results = 0\r\n \r\n if 'num_results' in kwargs:\r\n num_results = int(kwargs['num_results'])\r\n \r\n if len(matchstr) == 0:\r\n if num_results:\r\n return self.__sorted_names[0:num_results]\r\n return self.__sorted_names[:]\r\n\r\n if 'case_sensitive' in kwargs:\r\n if kwargs['case_sensitive']:\r\n case_sensitive = True\r\n\r\n tag_names_that_start_with_char = []\r\n \r\n if case_sensitive:\r\n if matchstr[0] not in self.__name_index:\r\n return []\r\n else:\r\n if matchstr[0].lower() not in self.__name_index and matchstr[0].upper() not in self.__name_index:\r\n return []\r\n \r\n if case_sensitive:\r\n idxs = self.__name_index[matchstr[0]]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']]\r\n else:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n \r\n else:\r\n if matchstr[0].lower() in self.__name_index:\r\n idxs = self.__name_index[matchstr[0].lower()]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']]\r\n else:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n\r\n if matchstr[0].upper() in self.__name_index:\r\n idxs = self.__name_index[matchstr[0].upper()]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char += [self.__sorted_names[idxs['first']]]\r\n else:\r\n tag_names_that_start_with_char += self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n \r\n if len(matchstr) == 1:\r\n if num_results == 0:\r\n return tag_names_that_start_with_char[:]\r\n else:\r\n return tag_names_that_start_with_char[0:num_results]\r\n \r\n if case_sensitive:\r\n for t in tag_names_that_start_with_char:\r\n if (t.find(matchstr) == 0):\r\n final_list.append(copy(t))\r\n if num_results > 0 and len(final_list) == num_results:\r\n return final_list\r\n else:\r\n for t in tag_names_that_start_with_char:\r\n if (t.lower().find(matchstr.lower()) == 0):\r\n final_list.append(copy(t))\r\n if num_results > 0 and len(final_list) == num_results:\r\n return final_list\r\n\r\n return final_list", "def _get_pos_name(pos_code, names=\"parent\", english=True, pos_map=POS_MAP):\n if names not in (\"parent\", \"child\", \"all\", \"raw\"):\n raise ValueError(\n \"names must be one of 'parent', 'child', 'all', or \"\n \"'raw'; not '{0}'\".format(names)\n )\n logger.debug(\n \"Getting {0} POS name for '{1}' formatted as '{2}'.\".format(\n \"English\" if english else \"Chinese\", pos_code, names\n )\n )\n if names == \"raw\":\n return pos_code\n pos_code = pos_code.lower() # Issue #10\n for i in range(1, len(pos_code) + 1):\n try:\n pos_key = pos_code[0:i]\n pos_entry = pos_map[pos_key]\n break\n except KeyError:\n if i == len(pos_code):\n logger.warning(\"part of speech not recognized: '{0}'\".format(pos_code))\n return None # Issue #20\n pos = (pos_entry[1 if english else 0],)\n if names == \"parent\":\n logger.debug(\"Part of speech name found: '{0}'\".format(pos[0]))\n return pos[0]\n if len(pos_entry) == 3 and pos_key != pos_code:\n sub_map = pos_entry[2]\n logger.debug(\n \"Found parent part of speech name '{0}'. Descending to \"\n \"look for child name for '{1}'\".format(pos_entry[1], pos_code)\n )\n sub_pos = _get_pos_name(pos_code, names, english, sub_map)\n\n if names == \"all\":\n # sub_pos can be None sometimes (e.g. for a word '甲')\n pos = pos + sub_pos if sub_pos else pos\n else:\n pos = (sub_pos,)\n\n name = pos if names == \"all\" else pos[-1]\n logger.debug(\"Part of speech name found: '{0}'\".format(name))\n return name", "def find_match(people, STRs):\n for person in people:\n if compare_str(person, STRs):\n return person[\"name\"]\n return \"No match\"", "def shortest_first_name(names):\n # input: ['Julian Sequeira', 'Arnold Schwarzenegger', 'Keanu Reeves', 'Julbob Pybites', 'Brad Pitt', 'Al Pacino', 'Matt Damon', 'Sandra Bullock', 'Bob Belderbos', 'Alec Baldwin']\n names = dedup_and_title_case_names(names)\n\n # fname_lname = [('Keanu', 'Reeves'), ('Julbob', 'Pybites'), ('Julian', 'Sequeira'), ('Matt', 'Damon'), ('Arnold', 'Schwarzenegger'), ('Brad', 'Pitt'), ('Alec', 'Baldwin'), ('Bob', 'Belderbos'), ('Sandra', 'Bullock'), ('Al', 'Pacino')]\n fname_lname = [tuple(_.split()) for _ in names]\n \n # iterate through [(fname_lname)] and assign 'first name' to [f_name]\n f_name = [x[0] for x in fname_lname]\n\n # grab min [f_name] using len()\n f_name = min(f_name, key=len)\n\n return f_name", "def extract_names(filename):\n raw_text = read_html(filename) \n \n #searching for the year\n year = re.search('(<h3 align=\"center\">Popularity in )(\\d\\d\\d\\d)',raw_text).group(2)\n \n #searching for the list of names\n list_of_names = re.findall('<td>(\\d+)</td><td>(\\w+)</td><td>(\\w+)</td>',raw_text)\n \n #pair each name with it's rank\n name_and_rank = [] \n for line in list_of_names:\n name_and_rank.append((line[1], line[0]))\n name_and_rank.append((line[2], line[0]))\n \n # sort the list alphabetically\n name_and_rank = sorted(name_and_rank, key = lambda x:x[0])\n name_and_rank = dict(name_and_rank)\n\n return year, name_and_rank[:20]", "def parse_names(lines, oti_file_name):\n print \" * Parsing names\"\n # Read the real texture file names form the file.\n real_names = []\n if os.path.isfile(oti_file_name):\n with open(oti_file_name, \"rU\") as oti_fd:\n real_names = oti_fd.read().splitlines()\n\n names = {}\n for i, line in enumerate(lines):\n name = \".\"\n if i < len(real_names):\n name = real_names[i]\n names[\"%s\" % i] = {\"alias\": line, \"name\": name}\n return names", "def test_first_name(self, unromanized, romanized, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__first_name=unromanized,\n profile__romanized_first_name=romanized,\n )\n assert CDDWriter.first_name(profile) == expected", "def get_sample_idx(sample, header):\n\n for item in header:\n if sample in item:\n return header.index(item)\n\n print(sample + \" not found in header, check input files.\")\n sys.exit()", "def _match_short_names(self, token_set_one, token_set_two):\n copy_set_one = token_set_one.copy()\n copy_set_two = token_set_two.copy()\n matching_dict = {}\n\n\n for token in token_set_one:\n res = self.dotted_name_re.search(token)\n if res:\n initials = res.group('name')\n for other_token in token_set_two:\n if other_token.startswith(initials):\n copy_set_one.remove(token)\n try:\n copy_set_two.remove(other_token)\n except KeyError:\n continue\n matching_dict[token] = other_token\n break\n else:\n return False, None, None, None\n\n return True, copy_set_one, copy_set_two, matching_dict", "def search(self, name):\n\n name = name.lower().strip()\n exact_names = get_close_matches(name, self.possible_names, n=1)\n if not exact_names:\n return None\n else:\n exact_name = exact_names[0]\n id = self.df_possible_names[self.df_possible_names['name'] == exact_name].index[0] \n return self.df_possible_names.loc[id, 'id']", "def extract_names_from_metadata_sheet(self):\n\n # parse all of the names (not orgs) and add them to a counter\n names_counter = Counter()\n with open(METADATA_CSV, encoding='utf-8') as file:\n csv_file = csv.DictReader(file)\n\n for line in csv_file:\n for element in ['author', 'recipients', 'cced']:\n for person_or_org in [p.strip() for p in line[element].split(';')]:\n # if at least a comma -> most likely a person\n if len(person_or_org.split(',')) > 1:\n names_counter[person_or_org] += 1\n\n # for each element in the counter, add them to the people set.\n for name in names_counter:\n self.people.add(Person(name_raw=name, count=names_counter[name], aliases=[name]))\n self.merge_all_duplicates()", "def findLegHeaders(words, header, how='match'):\n locs = []\n for i, line in enumerate(words):\n match = header.match(line)\n if match is not None:\n locs.append(i)\n\n return locs", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('marie', 'curie', 'francis')\n self.assertEqual(formatted_name, 'Marie Francis Curie')", "def match_pokemon(name):\n with open('pokemon.txt') as file:\n if name.title() + '\\n' in file.read():\n return name\n with open('pokemon.txt') as file:\n line = file.readline().strip('\\n')\n while line:\n if fuzz.ratio(name.title(), line) > 80:\n return line\n line = file.readline().strip('\\n')\n return None", "def test_named_entities(self) -> None:\n for named_entitity_rule in self.rules.named_entities:\n identity: str = named_entitity_rule[\"identity\"]\n type: Optional[str] = named_entitity_rule.get(\"type\")\n subtype: Optional[str] = named_entitity_rule.get(\"subtype\")\n invalid: Optional[str] = named_entitity_rule.get(\"invalid\")\n valid: Optional[str] = named_entitity_rule.get(\"valid\")\n\n for named_entity in self.report.get_named_entities(identity, type, subtype):\n text: str = \" \".join([w.text for w in named_entity.words])\n if valid and (not re.search(valid, text, re.I)):\n self.add_error(\n named_entitity_rule[\"message\"],\n self.report.get_words_position(named_entity.words),\n )\n elif invalid and re.search(invalid, text, re.I):\n self.add_error(\n named_entitity_rule[\"message\"],\n self.report.get_words_position(named_entity.words),\n )", "def finddocname(string):\r\n for x in doclist:\r\n foundvar = f\"-->Doc name = {x.title()}\"\r\n if x in string:\r\n print(foundvar)\r\n break", "def test_first_last_middle_name(self):\n\t\tformatted_name = get_formatted_name('Wolfgang','mozart','amadues')\n\t\tself.assertEqual(formatted_name,'Wolfgang Amadues Mozart')", "def getlistofpossibletitles(fileitem,fname):\n title = []\n oddtitles = open(\"oddtitles.txt\", 'r')\n content = oddtitles.read()\n oddtitles.close()\n\n content = content.split(\"\\n\")\n for line in content:\n elements = line.split(',')\n if fileitem in elements[0]:\n #print(elements[1])\n title.append(elements[1].title())\n\n \n title.append(fileitem)\n title.append(fileitem.title())\n lookfor = fileitem.replace(\".\",\" \")\n title.append(lookfor)\n title.append(lookfor.title())\n lookfor = fileitem.replace('-',\" \")\n title.append(lookfor)\n title.append(lookfor.title())\n with open(fname, \"r\") as dataf:\n for line in dataf:\n if lookfor.upper() in line.upper():\n line = line.replace(\"\\n\",\"\")\n title.append(line)\n title.append(line.title())\n return title", "def dls_header_search(self, buf, f_name):\r\n self.file_size = len(buf)\r\n self.my_dls = []\r\n\r\n raw_file = buf\r\n dls_count = 0\r\n start_offset = 0\r\n end_offset = 0\r\n\r\n while end_offset != self.file_size:\r\n try:\r\n start_offset = end_offset\r\n page_len = struct.unpack(\"<I\", raw_file[start_offset + 8:start_offset + 12])[0]\r\n end_offset = start_offset + page_len\r\n\r\n if raw_file[start_offset:start_offset + 4] == b'1SLD' or raw_file[start_offset:start_offset + 4] == b'2SLD':\r\n self.my_dls.append({'Start Offset': start_offset, 'End Offset': end_offset})\r\n dls_count += 1\r\n else:\r\n self.logfile.write(\"%s: Error in length of page when finding page headers.\" % (f_name))\r\n break\r\n except:\r\n self.logfile.write(\"%s: Error in length of page when finding page headers.\" % (f_name))\r\n break\r\n\r\n if dls_count == 0:\r\n # Return false to caller so that the next file will be searched\r\n return False\r\n else:\r\n # Return true so that the DLSs found can be parsed\r\n return True", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')\n self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')\n self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')", "def find_named_entities(pos_tags):\n contains_proper_noun = False\n tokens = list()\n for tags in pos_tags:\n if tags['tag'] == '^':\n contains_proper_noun = True\n\n if contains_proper_noun:\n for tags in pos_tags:\n if len(tags['token']) == 1:\n tags['token'] = NLPUtils.character_to_unicode(tags['token'])\n tokens.append(tags['token'])\n try:\n text = ' '.join(tokens)\n headers = {\n 'Accept': 'application/json',\n }\n # print(text)\n data = [\n ('text', text),\n ('confidence', '0.25'),\n ('support', '20')\n ]\n\n r = requests.post('http://model.dbpedia-spotlight.org/en/annotate', headers=headers, data=data,\n timeout=10)\n # print(str(r.content.decode()))\n res = r.json()\n\n entities = list()\n if 'Resources' in res:\n for i in res['Resources']:\n # res_str = str(i).replace(',','\\n')\n # print(res_str)\n\n if i['@types'] is not None:\n original = i['@surfaceForm']\n entity_tmp = i['@URI']\n entity_tmp = re.sub('.*/', '', entity_tmp)\n entity_tmp = re.sub('\\(.*\\)', '', entity_tmp)\n entity = re.sub('_', ' ', entity_tmp).strip()\n\n if entity.lower() in text.lower() and ' ' in entity:\n entities.append((entity, int(i['@offset'])))\n # print(entities)\n new_pos_tags = list()\n curr_pos = 0\n tokens_to_omit = 0\n for tags in pos_tags:\n # if re.match(\"U\\+[a-zA-Z0-9]{1,5}\",tags['token']):\n # print(tags['token'])\n # tags['token'] = NLPUtils.unicode_to_character(tags['token'])\n # print(tags['token'])\n\n token = tags['token']\n for e in entities:\n curr_dict = dict()\n if curr_pos == e[1]:\n tokens_to_omit = len(re.split(' ', e[0]))\n curr_dict['token'] = e[0]\n curr_dict['tag'] = '^'\n new_pos_tags.append(curr_dict)\n # +1 for whitespace\n curr_pos += len(token) + 1\n if tokens_to_omit == 0:\n new_pos_tags.append(tags)\n else:\n tokens_to_omit -= 1\n\n # decode unicode sequence\n new_pos_tags = NLPUtils.unicode_to_character_pos_tagged(new_pos_tags)\n return new_pos_tags\n # decode uniocde character\n pos_tags = NLPUtils.unicode_to_character_pos_tagged(pos_tags)\n except Exception as e:\n print(e)\n return None\n\n return pos_tags", "def linear_search(cls, student_list, name):\n for i in range(len(student_list)):\n if name == student_list[i].name:\n return i\n return cls.NOT_FOUND", "def SentenceSplitsStops(f):\n\tcounter=0\n\twith open(filename) as f:\n\t\tread = csv.reader(f)\n\t\tfor row in read:\n\t\t\t#Original\n\t\t\tzin0=row[0]\n\t\t\t#Human Translation\n\t\t\tzin1=row[1]\n\t\t\t#Machine Translation\n\t\t\tzin2=row[2]\n\t\t\tcounter+=1\n\t\t\t#FULL STOPS\n\t\t\t#print(abs((zin0.count('.') - zin1.count('.'))))\n\t\t\tprint(abs((zin0.count('.') - zin2.count('.'))))", "def find_possible(search_string):\n codes = []; names = []\n search_string = search_string.lower()\n for c,n in name_given_code.items():\n\n if (search_string in n):\n codes.append(c)\n names.append(n)\n\n return codes, names", "def sample_first_name(first_name_file, num_samples):\n\n df = pd.read_csv(first_name_file, header=None)\n df.columns = [\"name\", \"gender\", \"count\"]\n df = df[(df[\"count\"] > 10)]\n names = df[\"name\"].sample(n=num_samples, random_state=2021, replace=True).apply(str.title)\n\n return list(names.values)", "def _extract_first_from(name, sources):\n for i, source in enumerate(sources):\n if not source:\n continue\n if name in source:\n return (i, source[name])\n raise KeyError(name)", "def get_surnames(filename):\n result = []\n with open(filename, \"r\") as file:\n for line in file.readlines():\n surname = line.split('\\t')[1]\n result.append(surname)\n return result", "def main(files: List[Path]):\n show_filenames = len(files) > 1\n for file in files:\n with file.open() as f:\n for m in find_camel(f):\n print(pretty_match(m, filename=file if show_filenames else None))", "def compare_to_first_sequence(headers, sequences, substitution_mat):\n iheaders = iter(headers)\n isequences = iter(sequences)\n ref_name = next(iheaders)\n ref_seq = next(isequences)\n for target_header, target_seq in zip(iheaders, isequences):\n header = \"%s vs %s\" % (ref_name, target_header)\n score_lst = compute_score_by_position(substitution_mat, ref_seq, target_seq)\n yield header, score_lst", "def _get_pos_name(pos_code, names='parent', english=True, pos_map=POS_MAP):\n pos_code = pos_code.lower() # Issue #10\n if names not in ('parent', 'child', 'all'):\n raise ValueError(\"names must be one of 'parent', 'child', or \"\n \"'all'; not '%s'\" % names)\n logger.debug(\"Getting %s POS name for '%s' formatted as '%s'.\" %\n ('English' if english else 'Chinese', pos_code, names))\n for i in range(1, len(pos_code) + 1):\n try:\n pos_key = pos_code[0:i]\n pos_entry = pos_map[pos_key]\n break\n except KeyError:\n if i == len(pos_code):\n if pos_code not in INVALID_POS:\n logger.warning(\"part of speech not recognized: '%s'\"\n % pos_code)\n return None # Issue #20\n pos = (pos_entry[1 if english else 0], )\n if names == 'parent':\n logger.debug(\"Part of speech name found: '%s'\" % pos[0])\n return pos[0]\n if len(pos_entry) == 3 and pos_key != pos_code:\n sub_map = pos_entry[2]\n logger.debug(\"Found parent part of speech name '%s'. Descending to \"\n \"look for child name for '%s'\" % (pos_entry[1], pos_code))\n sub_pos = _get_pos_name(pos_code, names, english, sub_map)\n\n if names == 'all':\n # sub_pos can be None sometimes (e.g. for a word '甲')\n pos = pos + sub_pos if sub_pos else pos\n else:\n pos = (sub_pos, )\n\n name = pos if names == 'all' else pos[-1]\n logger.debug(\"Part of speech name found: '%s'\" % repr(name)\n if isinstance(name, tuple) else name)\n return name", "def first_name(seed):\n if consistent_hash(seed, 1):\n first = femaleNames\n else:\n first = maleNames\n return first[consistent_hash(seed, len(first))]", "def parse_infile_names(self):\n\n rv, slist = UTIL.list_minus_pref_suf(self.infiles,'out.ss_review.','.txt')\n if rv < 0: return\n if rv > 0:\n if self.verb > 1: print('++ trying to get SID from glob form')\n slist = UTIL.list_minus_glob_form(self.infiles, strip='dir')\n else:\n if self.verb > 1: print(\"++ have SIDs from 'out.ss_reiview' form\")\n\n if len(slist) == 0:\n if self.verb > 1: print(\"-- empty SID list\")\n return\n\n # make sure names are unique and not empty\n if not UTIL.vals_are_unique(slist):\n if self.verb > 1: print('-- SIDs not detected: not unique')\n return\n minlen = min([len(ss) for ss in slist])\n if minlen < 1:\n if self.verb > 1: print('-- SIDs not detected: some would be empty')\n return\n\n # we have a subject list\n self.snames = slist\n\n # now go for GID, start by replacing SIDs in infiles\n newfiles = [fname.replace(slist[ind], 'SUBJ') for ind, fname in\n enumerate(self.infiles)]\n\n if UTIL.vals_are_constant(newfiles):\n print('-- no groups detected from filenames')\n return\n\n # okay, try to make a group list\n glist = UTIL.list_minus_glob_form(newfiles)\n\n # cannot have dirs in result\n for gid in glist:\n if gid.find('/') >= 0:\n if self.verb>1: print('-- no GIDs, dirs vary in multiple places')\n return\n\n minlen = min([len(ss) for ss in glist])\n if minlen < 1:\n if self.verb > 1: print('-- GIDs not detected: some would be empty')\n return\n\n if self.verb > 1: print(\"++ have GIDs from infiles\")\n self.gnames = glist", "def first_word_of_each_line(filepath):\n with open(filepath, 'r') as my_file:\n for line in my_file:\n line = line.strip()\n words = line.split()\n word = words[0]\n yield word", "def test_first_last_name(self):\n\t\tformatted_name = get_formatted_name('janos', 'jk')\n\t\tself.assertEqual(formatted_name, 'Janos Jk')", "def name_comparator(last_name):\n score = 0\n\n # check if first n letters of first and last name matches\n for i in range(1, 4):\n if len(first_name) >= i and len(last_name) >= 2:\n # if previous letter does not match, don't continue\n if i > 1 and score > (i - 1) * -1:\n break\n\n # lower score by one per each matching letter\n if first_name[i - 1: i] == last_name[i - 1: i]:\n score -= 1\n\n \"\"\"detect names with umlauts and give them higher score if both have\n them, lower score if only one has them.\"\"\"\n regex = compile(r'[äöå]')\n if score == 0:\n if regex.search(first_name) and regex.search(last_name):\n score -= 1\n else:\n if bool(regex.search(last_name)) != bool(regex.search(last_name)):\n score += 1\n\n return score", "def search_start_end_index_in_sentence(sent, np):\n\n nps = [x for x in np.split() if x]\n if len(nps) == 0:\n return (-1, -1)\n elif len(nps) == 1:\n indices = search_one_token_reducing_suffix(sent, np)\n if len(indices) > 0:\n return (indices[0], search_next_whitespace(sent, indices[0]))\n else:\n return (-1, -1)\n else:\n # search start:\n start = search_correct_position(sent, nps)\n end = search_correct_position(sent, nps, True)\n if end != -1:\n end = search_next_whitespace(sent, end)\n return (start,end)", "def get_N_char_positions(run_path, sid):\n full, no_call, filename = '', [], 'consensus.fa'\n with open(os.path.join(os.path.join(run_path, sid), filename)) as fin:\n for idx, line in enumerate(fin):\n if not line.startswith('>'):\n full = full+line.strip()\n full = list(full)\n for idx, e in enumerate(full):\n if e == 'N':\n no_call.append(idx)\n return no_call", "def find_file(lines, MF=1):\n v = [l[slices['MF']] for l in lines]\n n = len(v)\n cmpstr = '%2s' % MF # search string\n i0 = v.index(cmpstr) # first occurrence\n i1 = n - v[::-1].index(cmpstr) # last occurrence\n return lines[i0: i1]", "def FindHeaderLength():\n\n lookup = 'Lateral um'\n \n with open(filename) as myFile:\n for FoundPosition, line in enumerate(myFile, 1):\n if lookup in line:\n print 'Scan Data found at line:', FoundPosition\n break\n \n return FoundPosition+4", "def get_headline_position(self, headline: Headline) -> Tuple[int, int]:\n return self.get_regex_position(headline.name)", "def match_name(pattern, rows):\n matching = []\n for row in rows:\n # Use regex matching to check whether first name or last name contains the pattern\n if re.search(r'%s' % pattern.lower(), row[0].lower()) != None or re.search(r'%s' % pattern.lower(), row[1].lower()) != None:\n matching.append(row)\n\n # print the matched records\n print_records(matching)", "def index(xy):\n if len(xy) != 2:\n return 'Not a valid string'\n\n with open('ch.txt', encoding=\"utf-8\") as file:\n chars = file.read()\n print(chars)\n for i in range(len(chars)):\n if chars.contains(xy[0]): \n return 'Yeah'", "def count_positions(fname):\r\n with open(fname) as f:\r\n for i, l in enumerate(f):\r\n pass\r\n return i + 1", "def listPosition(word):\n if len(word) == 1: return 1\n pos = 0\n for c in set(word):\n if c < word[0]:\n letters = list(word)\n letters.remove(c)\n pos += arrangements(letters)\n pos += listPosition(word[1:])\n return pos", "def get_head_pos( head, ngram ):\n try:\n tokens = ngram.split( ' ' )\n return str([ i for i, t in enumerate( tokens ) if t.startswith( head + \"/\" )][0] + 1 )\n except ValueError:\n return None", "def toprefix(self):\n file = self.read1()\n count = 0\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n if s_i.startswith(\"To\"):\n count = count + 1\n self.print(count)\n logging.debug(\"Starting with to\")\n return count", "def __parse_names(content: str) -> (list[str], int):\n offset = 0\n\n names = list()\n\n while offset < len(content) and content[offset] not in \"[=:]>\":\n if content[offset].isspace():\n offset += 1\n continue\n\n m = __ARG_REGEX.match(content[offset::])\n\n if m is None:\n break\n\n names.append(m.string[m.start(): m.end()])\n offset += m.end()\n\n return names, offset", "def SEARCH(find_text, within_text, start_num=1):\n # .lower() isn't always correct for unicode. See http://stackoverflow.com/a/29247821/328565\n return within_text.lower().index(find_text.lower(), start_num - 1) + 1", "def name_first(twitter_data, a, b):\r\n \r\n a_name = twitter_data[a][\"name\"]\r\n b_name = twitter_data[b][\"name\"]\r\n if a_name < b_name:\r\n return -1\r\n if a_name > b_name:\r\n return 1\r\n return username_first(twitter_data, a, b)", "def test_first_author_full_initial(self):\n inv_search = 'firstauthor:\"klebanov, ig* r*\" or exactfirstauthor:\"klebanov, i r\"'\n spi_search = \"find fa klebanov, ig.r.\"\n self._compare_searches(inv_search, spi_search)", "def get_mentions(fname):\n capture = re.compile('NPRI in ?.* ?(news|print)', re.I)\n with open(fname) as fp:\n for line in fp:\n line = line.strip()\n if capture.search(line):\n yield get_info(line)", "def find_by_name(command, name): # fine\r\n if command == 'FindByFName':\r\n for student in StudentRoster:\r\n if name == student.first:\r\n print(student_format(student))\r\n elif command == 'FindByLName':\r\n for student in StudentRoster:\r\n if name == student.last:\r\n print(student_format(student))", "def Load_Pos_Names(self):\n dir_paths = os.walk(self.pos_path) # Return Generator\n if self.text==True:\n pattern = r\"^wsj+_\\d+\\.pos\\.text$\"\n regex = re.compile(pattern)\n\n # Enter subfolders. ex) 01, 02, 03....\n File_List = []\n for dir_path in dir_paths:\n root_dir = dir_path[0]\n file_path_list = dir_path[2]\n\n # Read only text files\n for file_name in file_path_list:\n if regex.match(file_name) is not None:\n File_List.append(file_name)\n\n return File_List\n else:\n pattern = r\"^wsj+_\\d+\\.pos\\.text.+$\"\n regex = re.compile(pattern)\n\n # Enter subfolders. ex) 01, 02, 03....\n File_List = []\n for dir_path in dir_paths:\n root_dir = dir_path[0]\n file_path_list = dir_path[2]\n\n # Read only text files\n for file_name in file_path_list:\n if regex.match(file_name) is not None:\n File_List.append(file_name)\n\n return File_List" ]
[ "0.5892083", "0.58443296", "0.5784747", "0.57840866", "0.5768939", "0.5741232", "0.5701107", "0.5652323", "0.54787356", "0.5471315", "0.53691196", "0.5327336", "0.53244394", "0.5305583", "0.5301656", "0.5268954", "0.5267728", "0.5263014", "0.5252052", "0.5240446", "0.5232655", "0.5205766", "0.5199973", "0.5193679", "0.51889163", "0.517899", "0.51580447", "0.5155355", "0.515433", "0.512415", "0.51238954", "0.51171", "0.51158786", "0.5095813", "0.5082063", "0.50753945", "0.5071154", "0.50683624", "0.5052731", "0.5048795", "0.5035639", "0.5029095", "0.502643", "0.5020494", "0.5016712", "0.50084007", "0.50044227", "0.49976674", "0.4996572", "0.4984341", "0.49830896", "0.49821845", "0.49499124", "0.49366188", "0.49297714", "0.49266934", "0.4926241", "0.49227914", "0.49153027", "0.4914329", "0.49113443", "0.4899527", "0.48962992", "0.4894811", "0.4894061", "0.48919478", "0.48919478", "0.48904818", "0.4878571", "0.48744377", "0.48690292", "0.4867439", "0.4865565", "0.48623037", "0.48534992", "0.4852612", "0.48468146", "0.48427373", "0.48337632", "0.48220474", "0.48185074", "0.48175958", "0.48156393", "0.48111445", "0.48090345", "0.4806105", "0.48057285", "0.48031077", "0.48018128", "0.47955567", "0.47915375", "0.4791335", "0.47845516", "0.4783375", "0.47740376", "0.477299", "0.47718123", "0.4771003", "0.47697186", "0.47694483" ]
0.70489156
0
Find names in a sentence based on a FIRST_NAMES file and replace them
def replace_names(sentence=None, to_replace="X", fixed_size=0, last_names_enabled=True, no_names_enabled=False): if not sentence: raise Exception(ParameterMissing, "This method requires sentence as input") if not isinstance(sentence, str): raise Exception(TypeError, "This method requires string as input") positions_found = find_names_position(sentence, last_names_enabled=last_names_enabled, no_names_enabled=no_names_enabled) words_to_do = set() for position in positions_found: begin, end = position word = sentence[begin: end] words_to_do.add(word) for word in words_to_do: if fixed_size > 0: size = fixed_size else: size = end - begin replace = to_replace * size sentence = sentence.replace(word, replace) return sentence
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_names(text):\n new_text = text\n for p, r in names_mapping:\n new_text = p.sub(r, new_text)\n return new_text", "def process_names():\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n\n # Final name list\n final_name_list = []\n\n # Parsing different name formats and standardizing to create csv\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' + last_name)\n else:\n final_name_list.append(name)\n\n # Writing final name list to a file\n with open(output_names_file, \"w\") as txt_file:\n txt_file.write(\"first_name,last_name\" + \"\\n\")\n for name in final_name_list:\n txt_file.write(name + \"\\n\") # works with any number of elements in a line\n\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',', engine='python')", "def find_names(sentence=None, last_names_enabled=True, no_names_enabled=False):\n if not sentence:\n raise Exception(ParameterMissing, \"This method requires sentence as input\")\n\n if not isinstance(sentence, str):\n raise Exception(TypeError, \"This method requires string as input\")\n\n first_names = get_first_names_pack()\n if not first_names:\n raise Exception(VariableNotSet, \"Variable FIRST_NAMES is not set in settings.py\")\n\n if last_names_enabled:\n last_names = get_last_names_pack()\n if not last_names:\n raise Exception(VariableNotSet, \"Variable LAST_NAMES is not set in settings.py\")\n first_names = list(set(first_names).union(set(last_names)))\n\n if no_names_enabled:\n no_names = get_no_names_pack()\n if not no_names:\n raise Exception(VariableNotSet, \"Variable NO_NAMES is not set in settings.py\")\n first_names = list(set(first_names).difference(set(no_names)))\n\n punctuation = '!@#$%^&*()_+<>?:.,;'\n\n for c in sentence:\n if c in punctuation:\n sentence = sentence.replace(c, \" \")\n\n words = sentence.lower().split()\n res = set(words).intersection(first_names)\n\n to_return = [w.title() for w in res]\n\n return to_return", "def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n # ...", "def update_name(name, mapping):\n words_name = name.split(\" \")\n if words_name not in expected:\n for word in words_name:\n if word in mapping:\n name = name.replace(word, mapping[word])\n \n if word == word.lower():\n if word not in allowed_lowercase:\n name = name.replace(word, word.capitalize())\n \n if words_name[0] not in expected:\n if words_name[0] not in mapping:\n if words_name[0] == \"Fernando\":\n name = \"Avenida \" + name\n elif words_name[0] == \"rua\":\n pass\n else:\n name = \"Rua \" + name\n\n return name", "def process_name(name):\n def getnames_form3(a):\n \"\"\"\n Case with two commas: the name is of the format\n von Last, Jr, First\n like in: von Hicks, III, Michael\n \"\"\"\n full_last = a[0].strip()\n full_first = a[2].strip()\n junior = a[1].strip()\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior.strip()]\n\n def getnames_form2(a):\n \"\"\"\n Case with one comma: the name is of the format\n von Last, First\n like in: von Hicks, Michael\n \"\"\"\n full_last = a[0].strip()\n full_first = a[1].strip()\n junior = ''\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior]\n\n def getnames_form1(a):\n \"\"\"\n Case with NO commas: the name is of the format\n First von Last\n like in: Michael von Hicks\n \"\"\"\n last = a[0].split(' ')\n nfn = 0\n for l in last:\n if l != \"\" and not l[0].islower():\n nfn += 1\n else:\n break\n if nfn == len(last):\n nfn = -1\n\n full_first = ' '.join(last[:nfn])\n full_first = full_first.replace('.', ' ')\n full_last = ' '.join(last[nfn:])\n junior = \" \"\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior.strip()]\n\n def get_vonlast(full_last):\n von = \"\"\n last = \"\"\n\n for l in full_last.split(' '):\n if len(l) > 0 and l[0].islower():\n von += l.lower() + \" \"\n else:\n last += l + \" \"\n return von, last\n\n # Start the processing\n a = name.split(',')\n if len(a) == 3:\n fullname = getnames_form3(a)\n elif len(a) == 2:\n fullname = getnames_form2(a)\n elif len(a) == 1:\n fullname = getnames_form1(a)\n else:\n fullname = []\n\n return fullname", "def populate_proper_names():\n in_dir = os.path.join(buildconfig.FORM_INDEX_DIR, 'proper_names')\n in_file = os.path.join(in_dir, 'all.txt')\n names = []\n counter = 0\n with open(in_file) as filehandle:\n for line in filehandle:\n data = line.strip().split('\\t')\n if len(data) == 3:\n counter += 1\n sortable, name, common = data\n if common.lower() == 'true':\n common = True\n else:\n common = False\n\n names.append(ProperName(lemma=name,\n sort=sortable,\n common=common))\n if counter % 1000 == 0:\n ProperName.objects.bulk_create(names)\n names = []\n\n ProperName.objects.bulk_create(names)", "def test_replacements_applied_before_force_name():\n\n conf = r\"\"\"\n {\"always_rename\": true,\n \"select_first\": true,\n\n \"force_name\": \"Scrubs\",\n\n \"input_filename_replacements\": [\n {\"is_regex\": true,\n \"match\": \"S01E02 - \",\n \"replacement\": \"\"}\n ]\n }\n \"\"\"\n\n out_data = run_tvnamer(\n with_files = ['S01E02 - Some File.avi'],\n with_config = conf)\n\n expected_files = ['S01E02 - Some File.avi']\n\n verify_out_data(out_data, expected_files, expected_returncode = 2)", "def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r", "def initialise_first_name(name):\n\t# Split name on the comma and space\n\ttokens = name.split(\", \")\n\tlast_name = tokens[0]\n\t# Split tokens[1] which is a \"Title First Name\" string\n\tfirst_tokens = tokens[1].split(\" \")\n\t# Replace the first name with just the first initial\n\tfirst_tokens[1] = first_tokens[1][0]\n\t# Concatenate everything back together and return\n\treturn last_name + ', ' + \" \".join(first_tokens)", "def getName(sentence): #Jasper, Suraj\n userWords = sentence.lower()\n userWords = userWords.split()\n \n # ways of introduction:\n # \"Hello, my name is ___\"\n # \"Hi, I'm ____\"\n # \"Howdy, I'm called ____\"\n # Order: Greeting -> pronoun -> Name -> question (optional)\n # eg. \"Hello, I'm Jasper. How are you?\"\n\n if (userWords[0] in greetings): #the added code that stops iam from being added into the name if 2 greeting are added\n userWords.pop(0) #pop and not .remove because\n \n \n if (userWords[0] == \"i\" and len(userWords) > 1):\n if (userWords[1] in [\"m\",\"am\"]):\n userWords.insert(0, \" \".join(userWords[0:2]))\n userWords.pop(2)\n userWords.pop(1)\n \n userName = \"\"\n for userWord in userWords: #iterate throught the user's words\n foundWord = False #sets True when there's a similar word in the other list\n for word in greetings: #iterates and compares the chosen word from the user's list of words to the words list\n if userWord == word and foundWord == False:\n foundWord = True\n if foundWord == False:\n userName = userName + userWord + \" \"\n return userName #this is the found name", "def convert_from_text(self, file_name):\n with open(file_name, 'r') as reader:\n words_list = []\n for line in reader:\n words_list.extend(line.split())\n\n for word in set(words_list):\n if word.isalpha():\n self.insert_word(word.lower())\n else:\n self.insert_word(''.join([c for c in word if c.isalpha()]).lower())", "def test_first_name(self, unromanized, romanized, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__first_name=unromanized,\n profile__romanized_first_name=romanized,\n )\n assert CDDWriter.first_name(profile) == expected", "def update_professor_first_names(first_name_updates: List[NameUpdate]):\n for prof_id, new_first_name in first_name_updates:\n prof = Professor.objects.get(id=prof_id)\n prof.first_name = new_first_name\n prof.save()", "def replace_words_fun(self):\n\n cleaned_doc = []\n for word in str(self.doc).split():\n if word.lower() in self.replacement_list.keys():\n cleaned_doc.append(self.replacement_list[word.lower()])\n else:\n cleaned_doc.append(word)\n self.doc = ' '.join(cleaned_doc)", "def updateNameDatabase(name):\n \n with open(\"knownPeople.txt\") as f:\n knownPeople = f.readlines()\n f.close()\n\n knownPeople = [x.strip() for x in knownPeople] \n if name not in knownPeople:\n knownPeople.append(name)\n\n with open('knownPeople.txt', 'w') as f:\n for item in knownPeople:\n f.write(\"%s\\n\" % item)\n f.close()", "def load_first_names(data):\n first_name_objects = [FirstName(data=first_name) for first_name in data]\n FirstName.objects.bulk_create(first_name_objects)", "def simplify_fore_name(name, lower=False):\n if pandas.isna(name):\n return None\n assert isinstance(name, str)\n name_ = name.replace(\".\", \" \")\n words = name_.split()\n for word in words:\n word = word.strip(string.punctuation)\n if len(word) <= 1:\n continue\n if word.upper() == word and len(word) <= 3:\n continue\n if lower:\n word = word.lower()\n return word", "def parse_names(lines, oti_file_name):\n print \" * Parsing names\"\n # Read the real texture file names form the file.\n real_names = []\n if os.path.isfile(oti_file_name):\n with open(oti_file_name, \"rU\") as oti_fd:\n real_names = oti_fd.read().splitlines()\n\n names = {}\n for i, line in enumerate(lines):\n name = \".\"\n if i < len(real_names):\n name = real_names[i]\n names[\"%s\" % i] = {\"alias\": line, \"name\": name}\n return names", "def update_name(name, mapping): \n words = name.split()\n for w in range(len(words)):\n if words[w] in mapping:\n #print words[w]\n words[w] = mapping[words[w]]\n name = \" \".join(words)\n return name", "def search_and_replace(s_word: str, r_word: str, file):\n text = read_file(file)\n for j in range(len(text)):\n words = text[j].split(' ')\n for i in range(len(words)):\n # if word is last in line and it is not empty line\n if words[i][-1:] == '\\n' and len(words[i]) > 2:\n if words[i][-2] in string.punctuation:\n sym = words[i][-2]\n if words[i][:-2] == s_word:\n words[i] = r_word + sym + '\\n'\n # if word's last symbol is punctuation\n elif words[i][-1] in string.punctuation:\n sym = words[i][-1]\n if words[i][:-1] == s_word:\n words[i] = r_word + sym\n # if last 2 symbols is 's (e.g. John's; cat's; land's)\n elif words[i][-2:] == '\\'s':\n if words[i][:-2] == s_word:\n words[i] = r_word + '\\'s'\n elif words[i] == s_word:\n words[i] = r_word\n text[j] = ' '.join(words)\n write_file(file, text)", "def fix_name_table(font):\n modified = False\n name_records = font_data.get_name_records(font)\n\n copyright_data = name_records[0]\n years = re.findall('20[0-9][0-9]', copyright_data)\n year = min(years)\n copyright_data = u'Copyright %s Google Inc. All Rights Reserved.' % year\n\n if copyright_data != name_records[0]:\n print('Updated copyright message to \"%s\"' % copyright_data)\n font_data.set_name_record(font, 0, copyright_data)\n modified = True\n\n for name_id in [1, 3, 4, 6]:\n record = name_records[name_id]\n for source in NAME_CORRECTIONS:\n if source in record:\n oldrecord = record\n record = record.replace(source, NAME_CORRECTIONS[source])\n break\n if record != name_records[name_id]:\n font_data.set_name_record(font, name_id, record)\n print('Updated name table record #%d from \"%s\" to \"%s\"' % (\n name_id, oldrecord, record))\n modified = True\n\n trademark_names = ['Noto', 'Arimo', 'Tinos', 'Cousine']\n trademark_name = None\n font_family = name_records[1]\n for name in trademark_names:\n if font_family.find(name) != -1:\n trademark_name = name\n break\n if not trademark_name:\n print('no trademarked name in \\'%s\\'' % font_family)\n else:\n trademark_line = TRADEMARK_TEMPLATE % trademark_name\n if name_records[7] != trademark_line:\n old_line = name_records[7]\n font_data.set_name_record(font, 7, trademark_line)\n modified = True\n print('Updated name table record 7 from \"%s\" to \"%s\"' % (old_line, trademark_line))\n\n if name_records[11] != NOTO_URL:\n font_data.set_name_record(font, 11, NOTO_URL)\n modified = True\n print('Updated name table record 11 to \"%s\"' % NOTO_URL)\n\n if name_records[_LICENSE_ID] != _SIL_LICENSE:\n font_data.set_name_record(font, _LICENSE_ID, _SIL_LICENSE)\n modified = True\n print('Updated license id')\n\n if name_records[_LICENSE_URL_ID] != _SIL_LICENSE_URL:\n font_data.set_name_record(font, _LICENSE_URL_ID, _SIL_LICENSE_URL)\n modified = True\n print('Updated license url')\n\n # TODO: check preferred family/subfamily(16&17)\n\n return modified", "def fix_names(users):\n for user in users:\n id = user['id']\n first_name = user['first_name'].strip()\n last_name = user['last_name'].strip()\n if not first_name and not last_name:\n # Empty name: skip\n print (f'Skipping empty name in record {id}')\n continue\n elif first_name == last_name:\n full_name = first_name\n elif first_name.endswith(last_name):\n full_name = first_name\n elif not last_name:\n full_name = first_name\n elif not first_name:\n full_name = last_name\n else:\n # In this case, the user has most likely entered the name\n # correctly split, so skip\n full_name = first_name + last_name\n print (f'Skipping already split name: {first_name} / {last_name} ({id})')\n continue\n \n print (f'Working on \"{full_name}\" ({id})')\n\n # Handle email addresses\n if '@' in full_name:\n print (f' - fixing email address')\n # Remove domain part\n e_name = full_name[:full_name.find('@')]\n if '+' in e_name:\n # Remove alias\n e_name = e_name[:e_name.find('+')]\n # Try to split name parts\n e_name = e_name.replace('.', ' ')\n e_name = e_name.replace('_', ' ')\n e_name = e_name.strip()\n if len(e_name) < 4:\n # Probably just initials: leave email as is\n pass\n else:\n full_name = e_name\n \n # Parse name\n name = nameparser.HumanName(full_name)\n name.capitalize()\n first_name = name.first\n last_name = name.last\n print (f' - splitting name into: {first_name} / {last_name} ({id})')\n yield (first_name, last_name, id)", "def final_rename(understat_no_similar, fpl_no_similar, join = 'inner'): \n name_mapper = {'Adrián':'Adrián Bernabé', # Contains both seasons corrections\n 'Alisson':'Alisson Ramses Becker',\n 'Allan':'Allan Marques Loureiro',\n 'André Gomes':'André Filipe Tavares Gomes',\n 'Angelino':'José Ángel Esmorís Tasende',\n 'Bernard':'Bernard Anício Caldeira Duarte', # Everton\n 'Bernardo Silva':'Bernardo Mota Veiga de Carvalho e Silva', # Manchester City\n 'Bernardo':'Bernardo Fernandes da Silva Junior', # \n 'Borja Bastón':'Borja González Tomás',\n 'Chicharito':'Javier Hernández Balcázar',\n 'David Luiz':'David Luiz Moreira Marinho', \n 'Ederson':'Ederson Santana de Moraes',\n 'Emerson':'Emerson Palmieri dos Santos',\n 'Fabinho':'Fabio Henrique Tavares',\n 'Felipe Anderson':'Felipe Anderson Pereira Gomes',\n 'Fred':'Frederico Rodrigues de Paula Santos', # Manchester United\n 'Hélder Costa': 'Hélder Wander Sousa de Azevedo e Costa', # Leeds\n 'Joelinton':'Joelinton Cássio Apolinário de Lira', # Chelsea\n 'Jonny':'Jonathan Castro Otto', # Wolves\n 'Jorginho':'Jorge Luiz Frello Filho', # Chelsea\n 'Jota':'José Ignacio Peleteiro Romallo',\n 'Kepa':'Kepa Arrizabalaga',\n 'Kiko Femenía':'Francisco Femenía Far',\n 'Lucas Moura':'Lucas Rodrigues Moura da Silva',\n 'Pedro': 'Pedro Rodríguez Ledesma', # Chelsea\n 'Raphinha':'Raphael Dias Belloli',\n 'Ricardo Pereira':'Ricardo Domingos Barbosa Pereira',\n 'Rodri':'Rodrigo Hernandez',\n 'Rúben Dias':'Rúben Santos Gato Alves Dias',\n 'Rúben Vinagre':'Rúben Gonçalo Silva Nascimento Vinagre',\n 'Semi Ajayi':'Oluwasemilogo Adesewo Ibidapo Ajayi',\n 'Trézéguet':'Mahmoud Ahmed Ibrahim Hassan', # Aston Villa\n 'Wesley':'Wesley Moraes',\n 'Willian':'Willian Borges Da Silva',\n }\n understat_no_similar['player_name'] = understat_no_similar['player_name'].map(name_mapper)\n manual_merge = pd.merge(fpl_no_similar, understat_no_similar, left_on=['player_name', 'kickoff_time'],\n right_on=['player_name', 'date'], how=join) # Merge using player name and date of game\n return manual_merge", "def replace_includes(self, file_name):\n\n indexBegin = 0\n indexEnd = 0\n text = self.dir_helper.read_file(file_name)\n while indexBegin != -1:\n indexBegin = text.find('\\input{', indexBegin+1)\n indexEnd = text.find('}', indexBegin+1)\n text_to_replace = text[indexBegin:indexEnd+1]\n if indexBegin != -1:\n # print 'text_to_replace : ' + text_to_replace\n new_path = self.construct_path(text_to_replace)\n new_text = self.replace_includes(file_name = new_path)\n text = text.replace(text_to_replace, new_text)\n\n return text", "def persons_from_names(self, name_table, known_persons=None, output_file=None, output_file_format=None, status_messages=True):\r\n\r\n\t\t# Save start time:\r\n\t\tzeit=int(time.time())\r\n\r\n\t\t####\r\n\t\t## Prepare input table\r\n\t\t####\r\n\r\n\t\t# Recognize input format\r\n\t\tif \"pandas\" in str(type(name_table)):\r\n\t\t\tinput_format = \"pandas\"\r\n\t\telif \"list\" in str(type(name_table)):\r\n\t\t\tinput_format = \"records\"\r\n\t\telif \"str\" in str(type(name_table)):\r\n\t\t\tif \".csv\" in name_table:\r\n\t\t\t\tinput_format = \"csv\"\r\n\t\t\telif \"xls\" in name_table:\r\n\t\t\t\tinput_format = \"xls\"\r\n\r\n\t\t# Convert table to internal data format\r\n\t\tif input_format != \"records\":\r\n\t\t\tname_table = self._convert_table_to_records(name_table, input_format)\r\n\r\n\t\t# Identify forename col\r\n\t\tname_table_format = self._identify_cols(name_table, \"default table\")\r\n\r\n\t\t# Add id column if missing\r\n\t\tif name_table_format[\"columns\"][\"id_column\"] is None:\r\n\t\t\tself._add_id_col(name_table)\r\n\t\t\tname_table_format[\"columns\"][\"id_column\"]=\"name_id\"\r\n\r\n\t\t# Same as above for known persons table\r\n\t\tif known_persons is not None:\r\n\t\t\t# Convert table to internal data format\r\n\t\t\tif input_format != \"records\":\r\n\t\t\t\tknown_persons = self._convert_table_to_records(known_persons, input_format)\r\n\r\n\t\t\t# Identify forename col\r\n\t\t\tknown_persons_format = self._identify_cols(known_persons, self._table_with_unique_names)\r\n\r\n\t\t\tif known_persons_format[\"columns\"][\"id_column\"] is None:\r\n\t\t\t\tself._add_id_col(known_persons)\r\n\t\t\t\tknown_persons_format[\"columns\"][\"id_column\"]=\"name_id\"\r\n\r\n\t\t\tif known_persons_format[\"columns\"][\"year_column\"] is None:\r\n\t\t\t\tself._add_empty_col(known_persons, \"year\")\r\n\t\t\t\tknown_persons_format[\"columns\"][\"year_column\"]=\"year\"\r\n\r\n\t\t####\r\n\t\t## Sort input data into a tree structure according to surname\r\n\t\t####\r\n\r\n\t\t# Internal data structure by surname\r\n\t\tself._flat_tree=collections.OrderedDict()\r\n\r\n\t\tif status_messages:\r\n\t\t\tprint(\"Tree creation in progress...\")\r\n\t\tself._make_flat_tree(name_table, self._flat_tree, name_table_format)\r\n\t\tif known_persons is not None:\r\n\t\t\t# Identify forename col\r\n\t\t\tself._make_flat_tree(known_persons, self._flat_tree, known_persons_format)\r\n\r\n\t\t####\r\n\t\t## Person identification from forename\r\n\t\t####\r\n\r\n\t\tif status_messages:\r\n\t\t\tprint(\"Clustering in progress...\")\r\n\t\tcluster_list={}\r\n\t\tself._cluster_number = 0\r\n\r\n\t\t# to record in which clusters the original records and their virtual ones are assigned \r\n\t\tcluster_number_list = {}\r\n\t\tself._cluster(self._flat_tree, cluster_list, cluster_number_list)\r\n\r\n\t\tif self._split_by_time_gap and name_table_format[\"columns\"][\"year_column\"] is not None:\r\n\t\t\tif status_messages:\r\n\t\t\t\tprint(format(\"Splitting entries with more than {} years between chronologically succeeding entries...\", self._maximum_time_gap))\r\n\t\t\tself._time_gap(cluster_list, self._maximum_time_gap, cluster_number_list, action=\"split\")\r\n\r\n\t\tif self._detect_marriages:\r\n\t\t\tif status_messages:\r\n\t\t\t\tprint(\"Detecting marriages and combining entries with marriage-related surname change...\")\r\n\t\t\tself._rework_for_marriages(cluster_list, cluster_number_list)\r\n\r\n\t\tif self._empty_clusters_remove:\r\n\t\t\tif status_messages:\r\n\t\t\t\tprint(\"Tidying up...\")\r\n\t\t\tself._remove_empty_cluster(cluster_list)\r\n\r\n\t\t####\r\n\t\t## Processing results\r\n\t\t####\r\n\r\n\t\t# Save authors to file \r\n\t\tif output_file is not None:\r\n\t\t\tif status_messages:\r\n\t\t\t\tprint(\"Saving the results\")\r\n\t\t\tself._save_to_file(cluster_list, output_file_format, output_file, name_table_format)\r\n\r\n\t\t# if status_messages:\r\n\t\t# \tprint( \"Name matching completed in {} seconds. Identified {} persons.\".format( str( int(time.time()) - zeit ) , str(len(cluster_list)) ) )\r\n\r\n\t\tif input_format==\"pandas\":\r\n\t\t\treturn self._convert_records_to_pandas(self._make_flat_result(cluster_list, name_table_format))\r\n\t\telif input_format==\"records\" and \"dict\" in str(type(name_table[0])):\r\n\t\t\treturn [ dict(record) for record in self._make_flat_result(cluster_list, name_table_format) ]\r\n\t\telse:\r\n\t\t\treturn self._make_flat_result(cluster_list, name_table_format)", "def match_name(sentence):\n if \"WIFE\" in sentence:\n return \"WIFE\"\n elif \"MAHAVIR\" in sentence or \"FATHER\" in sentence or \"SINGH\" in sentence: \n return \"MAHAVIR\"\n elif \"TEENAGER\" in sentence:\n return \"TEENAGER\"\n elif \"GIRL\" in sentence or \"WOMAN\" in sentence: \n return \"WOMAN\"\n elif \"GUY\" in sentence or \"MAN\" in sentence or \"BROTHER\" in sentence: \n return \"MAN\"\n elif \"COACH\" in sentence:\n return \"COACH\"\n elif \"COMMENT\" in sentence:\n return \"COMMENTATOR\"\n elif sentence[-2:] == \"ER\" or sentence[-3:] == \"IAN\" or sentence[-2:] == \"OR\" or sentence[-1:] == \"D\":\n return \"MISC\"\n \n return sentence", "def find_names(text):\n\n names = []\n\n # spacy doc\n doc = nlp(text)\n\n # pattern\n pattern = [{'LOWER': 'prime'},\n {'LOWER': 'minister'},\n {'POS': 'ADP', 'OP': '?'},\n {'POS': 'PROPN'}]\n\n # Matcher class object\n matcher = Matcher(nlp.vocab)\n matcher.add(\"names\", None, pattern)\n\n matches = matcher(doc)\n\n # finding patterns in the text\n\n for i in range(0, len(matches)):\n\n # match: id, start, end\n token = doc[matches[i][1]:matches[i][2]]\n # append token to list\n names.append(str(token))\n\n # Only keep sentences containing Indian PMs\n\n for name in names:\n if (name.split()[2] == 'of') and (name.split()[3] != \"India\"):\n names.remove(name)\n\n return names", "def find_pseudonyms(original_name, gender, topk):\n firstnames = load_firstnames(gender)\n model = load_model()\n whitelist = LetterBag(slugify.slugify(\n WORD_SPLIT_PATTERN.sub(\"\", original_name)))\n for firstname in firstnames:\n if not whitelist.includes(firstname):\n continue\n for lastname, proba in generate_word(model, whitelist.sub(firstname), topk):\n yield firstname.surface, lastname, proba", "def find_names_position(sentence=None, last_names_enabled=True, no_names_enabled=False):\n if not sentence:\n raise Exception(ParameterMissing, \"This method requires sentence as input\")\n\n if not isinstance(sentence, str):\n raise Exception(TypeError, \"This method requires string as input\")\n\n names_found = find_names(sentence, last_names_enabled=last_names_enabled, no_names_enabled=no_names_enabled)\n\n to_return = []\n for name in names_found:\n begin_positions = [m.start() for m in re.finditer(name, sentence)]\n for begin in begin_positions:\n to_return.append((begin, begin + len(name)))\n # begin = sentence.lower().index(name.lower())\n # end = begin + len(name)\n # to_return.append((begin, end))\n\n return to_return", "def candidate_first_name(self, candidate_first_name):\n\n self._candidate_first_name = candidate_first_name", "def candidate_first_name(self, candidate_first_name):\n\n self._candidate_first_name = candidate_first_name", "def read_names(male_names_file_path, female_names_file_path):\n\n names = set()\n\n with open(male_names_file_path, \"r\") as f1:\n for name in f1:\n names.add(name.strip().lower())\n\n with open(female_names_file_path, \"r\") as f2:\n for name in f2:\n names.add(name.strip().lower())\n\n return names", "def match_specific_name(name: str, specific_names: list) -> str:\n c = clean_specific_name(name)\n if c == \"\":\n return c\n else:\n y = \"\"\n for x in specific_names:\n matchlist = x.variations.split(\";\")\n if c in matchlist:\n y = x.name\n return y", "def replace(self, pat, repl):\n re_pat = re.compile(pat)\n for infilename in self.file_names:\n infile = open(infilename, 'r')\n for line in infile:\n line = line.rstrip()\n line1 = re_pat.sub(repl, line)\n if line1 != line:\n print 'Repl: %s' % (line1, )", "def set_name(song: str) -> str:\n # Discard unwanted lines\n junk = ['', '[Chorus]', '[Bridge]']\n lines = [line for line in song.split('\\n') if line not in junk and len(\n line.split(' ')) != 1]\n\n # Choose random line, start and stop indicies\n line = choice(lines).split(' ')\n start = randint(0, len(line)-2)\n stop = randint(start+1, len(line)-1)\n line = line[start:stop+1]\n\n # Add words within range to string and capitalise the first word\n song_name = []\n punc = set([',', '.', '\"'])\n for idx, word in enumerate(line):\n # Check for trailing punctuation and remove unless ellipsis\n if idx == len(line)-1 and word[-1] in punc and word[-3:] != \"...\":\n word = word[:-1]\n song_name.append(capwords(word))\n return ' '.join(song_name)", "def parse_infile_names(self):\n\n rv, slist = UTIL.list_minus_pref_suf(self.infiles,'out.ss_review.','.txt')\n if rv < 0: return\n if rv > 0:\n if self.verb > 1: print('++ trying to get SID from glob form')\n slist = UTIL.list_minus_glob_form(self.infiles, strip='dir')\n else:\n if self.verb > 1: print(\"++ have SIDs from 'out.ss_reiview' form\")\n\n if len(slist) == 0:\n if self.verb > 1: print(\"-- empty SID list\")\n return\n\n # make sure names are unique and not empty\n if not UTIL.vals_are_unique(slist):\n if self.verb > 1: print('-- SIDs not detected: not unique')\n return\n minlen = min([len(ss) for ss in slist])\n if minlen < 1:\n if self.verb > 1: print('-- SIDs not detected: some would be empty')\n return\n\n # we have a subject list\n self.snames = slist\n\n # now go for GID, start by replacing SIDs in infiles\n newfiles = [fname.replace(slist[ind], 'SUBJ') for ind, fname in\n enumerate(self.infiles)]\n\n if UTIL.vals_are_constant(newfiles):\n print('-- no groups detected from filenames')\n return\n\n # okay, try to make a group list\n glist = UTIL.list_minus_glob_form(newfiles)\n\n # cannot have dirs in result\n for gid in glist:\n if gid.find('/') >= 0:\n if self.verb>1: print('-- no GIDs, dirs vary in multiple places')\n return\n\n minlen = min([len(ss) for ss in glist])\n if minlen < 1:\n if self.verb > 1: print('-- GIDs not detected: some would be empty')\n return\n\n if self.verb > 1: print(\"++ have GIDs from infiles\")\n self.gnames = glist", "def task_1_fix_names_start_letter(data: DT) -> DT:\n for dic in data:\n if dic.get('name'):\n dic['name'] = dic['name'].capitalize()\n return data", "def test_first_name_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_first_name(input_val)\n self.assertEqual(output_val, self.line.first_name)", "def fetch_candidate_name(self):\r\n # variable to save possible matches\r\n possible_names = []\r\n\r\n # source text is input document in text format\r\n nlp_text = self.doc # := nlp(self.stringtext)\r\n\r\n # Add patterns to match proper names\r\n patterns = [[{'POS': 'PROPN'}]]\r\n self.matcher.add('NAME', patterns) \r\n matches = self.matcher(nlp_text) \r\n\r\n # fetch the matches\r\n for match_id, start, end in matches:\r\n span = nlp_text[start:end] \r\n possible_names += [span.text] \r\n if len(possible_names) >= 2: \r\n break\r\n\r\n # Extract candidates\r\n doc_entities = self.doc.ents\r\n\r\n # Subset to person type entities\r\n doc_persons = filter(lambda x: x.label_ == 'PERSON', doc_entities)\r\n doc_persons = filter(lambda x: len(\r\n x.text.strip().split()) >= 2, doc_persons)\r\n doc_persons = map(lambda x: x.text.strip(), doc_persons)\r\n doc_persons = list(doc_persons)\r\n\r\n # Assume the first Person entity with more than two tokens is the candidate's name\r\n if len(doc_persons) > 0:\r\n return possible_names + [doc_persons[0]]\r\n\r\n return \"NOT FOUND\"", "def fix_names(title):\n name_dict = {\n 'NAME Dra dSph':'DraI',\n 'NAME Leo I dSph':'LeoI',\n 'NAME Leo B':'LeoII',\n 'NAME UMi Galaxy':'UMiI',\n 'NAME Sculptor Dwarf Galaxy':'SclI',\n 'NAME Carina dSph':'CarI',\n 'NAME Dwarf Elliptical Galaxy in Sex':'SxtI',\n 'NAME Fornax Dwarf Spheroidal':'FnxI',\n 'NAME Bootes Dwarf Spheroidal Galaxy':'BooI',\n 'NAME Cetus II':'CetII',\n 'NAME Col I':'ColI',\n 'NAME Gru II':'GruII',\n 'NAME Coma Dwarf Galaxy':'CBerI',\n 'NAME Segue 1':'Seg1'}\n\n try:\n return name_dict[title]\n except KeyError:\n return title", "def _install_partner_firstname(self):\n # Find records with empty firstname and lastname\n records = self.search([(\"firstname\", \"=\", False),\n (\"lastname\", \"=\", False)])\n\n # Force calculations there\n records._inverse_name()", "def load_names(path):\n global taxid_names, scientific_names, synonyms, lowercase_names\n with open(path, 'r') as r:\n for line in r:\n (taxid, name, unique, kind) = re.split(r'\\s*\\|\\s*', line.strip('|\\n\\t '), 3)\n if kind == 'scientific name':\n taxid_names[taxid] = name\n scientific_names[name] = taxid\n else:\n synonyms[name] = taxid\n lowercase_names[name.lower()] = taxid", "def TransformNames(self) -> _n_2_t_0[str]:", "def _match_short_names(self, token_set_one, token_set_two):\n copy_set_one = token_set_one.copy()\n copy_set_two = token_set_two.copy()\n matching_dict = {}\n\n\n for token in token_set_one:\n res = self.dotted_name_re.search(token)\n if res:\n initials = res.group('name')\n for other_token in token_set_two:\n if other_token.startswith(initials):\n copy_set_one.remove(token)\n try:\n copy_set_two.remove(other_token)\n except KeyError:\n continue\n matching_dict[token] = other_token\n break\n else:\n return False, None, None, None\n\n return True, copy_set_one, copy_set_two, matching_dict", "def find_names(s):\n \"*** YOUR CODE HERE ***\"", "def updateCountryNames(self):\n try:\n with open('countryNameMapping.json', 'r') as file:\n name_mapping = json.loads(file.read())\n except:\n sys.exit('countryNameMapping.json file is unavailable in current directory.')\n \n for key, value in name_mapping.items():\n self.covid_df.replace(key, value, inplace=True)\n \n try:\n with open('countryNameISO2.json', 'r') as file:\n self.name_iso2_mapping = json.loads(file.read())\n except:\n print('countryNameISO2.json file is unavailable in current directory, creating file...')\n self.writeCountryCodeFile()\n print('Re-importing required JSONs...')\n self.updateCountryNames()", "def fusion(first_fh, fused_fh, compare_file):\r\n # initialize\r\n ha_seq = \"\"\r\n ha_header = \"\"\r\n # parse through file\r\n for line in first_fh:\r\n # if a > is found assume it is header\r\n if line[0] == \">\":\r\n # ha_header = line\r\n # if the header is found (length > 0)\r\n if len(ha_header) > 0:\r\n # pull needed information from header to make new one\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n # print(ha_header)\r\n # Call find_match function, input the file to search and the new header created.\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n # if return is equal then write to new file with two sequences fused\r\n if na_header == ha_header:\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n # reset variables\r\n ha_header = line\r\n ha_seq = \"\"\r\n\r\n else:\r\n # if it is part of the sequence\r\n ha_seq = ha_seq + line\r\n\r\n # To return/write the last entries in the files, won't get written in loop\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n if na_header == ha_header:\r\n # print(\"matches2\")\r\n # print(ha_header)\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n\r\n # Close Files\r\n first_fh.close()\r\n fused_fh.close()", "def restore_names(input_file, output_file):\n\n if not dataModel.loadModel(input_file):\n print(\"Couldn't open input file\")\n return 1\n\n model = dataModel.getModel()\n\n restore_names_in(model.getCompartments())\n restore_names_in(model.getMetabolitesX())\n restore_names_in(model.getModelValues())\n restore_names_in(model.getReactions())\n restore_names_in(model.getEvents())\n\n dataModel.saveModel(output_file, True)\n\n return 0", "def _first_name_sql(self, first_name, tolerance=1):\n nicknames = self._lookup_name(first_name)\n first_name_selects = []\n first_name_conditions = []\n for i, name in enumerate(nicknames):\n col_name = \"match_first_name_{}\".format(i)\n select = \" lower('{}') as {} \".format(name, col_name)\n first_name_selects.append(select)\n edit_distance = \"\"\"\n (levenshtein(lower(first_name), {col}) <= {tolerance}\n OR levenshtein(lower(nickname), {col}) <= {tolerance})\n \"\"\".format(col=col_name, tolerance=tolerance)\n first_name_conditions.append(edit_distance)\n name_select = \", \".join(first_name_selects)\n name_conditions = \" OR \".join(first_name_conditions)\n return name_select, name_conditions", "def loadFirsts(self, names):\n\n if os.path.exists(names):\n self.firsts, self.w_firsts = self.load(names)\n else:\n self.firsts = [names]\n self.w_firsts = None\n\n return", "def load_firstnames(gender):\n return load_resource(\"resources/%s.txt\" % gender)", "def analyze_input_files(name_file,title_file,known_for_file):\n\n tconst_set = set()\n\n remove_first_line(title_file)\n title_in = codecs.open(title_file,'r','utf-8')\n title_table = title_in.read().splitlines(True)\n title_in.close()\n\n #Prepare set of tconst values from title table\n for t in title_table:\n r = t.rstrip().split(\"\\t\")\n tconst_set.add(r[0])\n\n remove_first_line(name_file)\n f_in = codecs.open(name_file, 'r','utf-8')\n f_out_name = codecs.open('name_temp.tsv', 'w','utf-8')\n f_out_relation = codecs.open(known_for_file, 'w','utf-8')\n table = f_in.read().splitlines(True)\n\n for i in table:\n k = i.rstrip().split(\"\\t\")\n line_for_name = \"\\t\".join(k[:-1]) + \"\\n\"\n f_out_name.write(line_for_name)\n\n actor_relations = k[-1].split(\",\")\n for relation in actor_relations:\n if relation in tconst_set:\n line_for_relation = k[0] + \"\\t\" + relation + \"\\n\"\n f_out_relation.write(line_for_relation)\n\n f_in.close()\n f_out_name.close()\n f_out_relation.close()\n\n # Delete original file and rename temp file to original name\n os.remove(name_file)\n os.rename('name_temp.tsv',name_file)", "def Augment_English_File(self, Sentences_File):\r\n # Explain Of The Function #\r\n\r\n # Integer #\r\n Index = 0\r\n\r\n # Stop Words #\r\n English_Stop_Words = set(stopwords.words('english'))\r\n\r\n try:\r\n # Remove Unnecessary Chars #\r\n while Index < len(Sentences_File):\r\n # 1 #\r\n Sentences_File[Index] = re.sub('[\\t]', '', Sentences_File[Index])\r\n if '\\t' in Sentences_File[Index]:\r\n Sentences_File[Index].replace('\\t', '')\r\n # 2 #\r\n Sentences_File[Index] = re.sub('[,]', ' ', Sentences_File[Index])\r\n if ',' in Sentences_File[Index]:\r\n Sentences_File[Index].replace(',', ' ')\r\n # 3 #\r\n Sentences_File[Index] = re.sub('[:]', '', Sentences_File[Index])\r\n if ':' in Sentences_File[Index]:\r\n Sentences_File[Index].replace(':', ' ')\r\n # 4 #\r\n Sentences_File[Index] = re.sub('[.]', '', Sentences_File[Index])\r\n if '.' in Sentences_File[Index]:\r\n Sentences_File[Index].replace('.', ' ')\r\n # 5 #\r\n Sentences_File[Index] = re.sub('[\\']', '', Sentences_File[Index])\r\n if '\\'' in Sentences_File[Index]:\r\n Sentences_File[Index].replace('\\'', ' ')\r\n # 6 #\r\n Sentences_File[Index] = re.sub('[`]', '', Sentences_File[Index])\r\n if '`' in Sentences_File[Index]:\r\n Sentences_File[Index].replace('`', ' ')\r\n\r\n # 7 #\r\n Sentences_File[Index] = re.sub('[?]', '', Sentences_File[Index])\r\n if '?' in Sentences_File[Index]:\r\n Sentences_File[Index].replace('?', ' ')\r\n\r\n # 8 #\r\n Sentences_File[Index] = re.sub('[;]', '', Sentences_File[Index])\r\n if ';' in Sentences_File[Index]:\r\n Sentences_File[Index].replace(';', ' ')\r\n\r\n # Split The Sentence To Words #\r\n Sentences_File_Split = Sentences_File[Index].split()\r\n\r\n # Integer #\r\n Local_Index = 0\r\n\r\n # Iterate On List.Split() , And Remove Stop Words #\r\n while Local_Index < len(Sentences_File_Split):\r\n if Sentences_File_Split[Local_Index].lower() in English_Stop_Words:\r\n Sentences_File_Split.pop(Local_Index)\r\n pass\r\n else:\r\n Local_Index += 1\r\n pass\r\n\r\n # Clear Sentence After All The Change's #\r\n Sentences_File[Index] = None\r\n Sentences_File[Index] = ' '.join(Specific_Word for Specific_Word in Sentences_File_Split)\r\n\r\n # Update The Index For Next Sentence #\r\n Index += 1\r\n pass\r\n\r\n # Make Data Augmentation #\r\n Index = 0\r\n while Index < len(Sentences_File):\r\n New_Sentence_After_Augmentation = self.Make_Data_Augmentation_To_Sentence(Sentences_File[Index])\r\n Sentences_File[Index] = New_Sentence_After_Augmentation\r\n Index += 1\r\n pass\r\n\r\n return Sentences_File\r\n\r\n except Exception as Object_Exception:\r\n # Write Exception To Console #\r\n print()\r\n print(\"===========================================================================\")\r\n print(\"\\tThere Have Exception In Function - \" + \"Augment_English_File !\")\r\n print(\"\\tBecause - \" + str(Object_Exception))\r\n print(\"===========================================================================\")\r\n print()\r\n\r\n return None\r\n pass", "def _get_names(self):\n if len(self.firstnames):\n return self.firstnames, self.lastnames\n\n if os.path.exists(\"/code/api/app/utils/names.txt\"):\n with open(\"/code/api/app/utils/names.txt\") as file_with_names:\n names = file_with_names.readlines()\n else:\n # why yes, these are names of African Hollywood actors (according to Wikipedia)\n names = [\"Mehcad Brooks\", \"Malcolm Barrett\", \"Nick Cannon\", \"Lamorne Morris\", \"Neil Brown Jr.\",\n \"William Jackson Harper\", \"Marques Houston\", \"Jennifer Hudson\", \"Alicia Keys\", \"Meghan Markle\",\n \"Beyonce Knowles\", \"Jesse Williams\", \"Lance Gross\", \"Hosea Chanchez\", \"Daveed Diggs\",\n \"Damon Wayans Jr.\", \"Columbus Short\", \"Terrence Jenkins\", \"Ron Funches\", \"Jussie Smollett\",\n \"Donald Glover\", \"Brian Tyree Henry\", \"Gabourey Sidibe\", \"Trai Byers\", \"Robert Ri'chard\",\n \"Arjay Smith\", \"Tessa Thompson\", \"J.Lee\", \"Lauren London\", \"DeVaughn Nixon\", \"Rob Brown\", ]\n for _name in names:\n split_name = _name.strip().split(\" \")\n self.firstnames.append(split_name[0])\n lastname = \" \".join(split_name[1:]) if len(split_name) > 1 else \"\"\n self.lastnames.append(lastname)\n return self.firstnames, self.lastnames", "def extract_names(pages: Iterable[tuple[int, list[str]]]) -> DataT:\n found_first = False\n current_name: dict[str, Any] | None = None\n current_label: str | None = None\n current_lines: list[str] = []\n in_headings = True\n\n def start_label(label: str, line: str) -> None:\n nonlocal current_label, current_lines\n assert current_name is not None\n assert current_label is not None\n if label in current_name:\n if label in (\"Syntype\", \"Type Locality\"):\n label = f\"Syntype {line}\"\n assert (\n label not in current_name\n ), f\"duplicate label {label} in {current_name}\"\n current_name[current_label] = current_lines\n current_label = label\n current_lines = [line]\n\n for page, lines in pages:\n if current_name is not None:\n current_name[\"pages\"].append(page)\n for line in lines:\n if not found_first:\n if line.strip() in (\"TYPE SPECIMENS\", \"SPECIMENS\"):\n found_first = True\n continue\n # ignore family/genus headers\n if re.match(\n (\n r\"^\\s*(Genus|Family|Subfamily|Suborder|Order) [A-Z][a-zA-Z]+\"\n r\" [a-zA-Z\\.’, \\-]+(, \\d{4})?$\"\n ),\n line,\n ):\n in_headings = True\n continue\n # ignore blank lines\n if not line:\n continue\n if in_headings:\n if line.startswith(\" \"):\n continue\n else:\n in_headings = False\n if line.startswith(\" \"):\n current_lines.append(line)\n elif re.match(r\"^[A-Z][A-Z a-z-]+: \", line):\n start_label(line.split(\":\")[0], line)\n elif line.startswith(\"Lectotype as designated\"):\n start_label(\"Lectotype\", line)\n elif line.startswith(\"Neotype as designated\"):\n start_label(\"Neotype\", line)\n elif line.startswith(\n (\n \"This specimen\",\n \"Type \",\n \"No type\",\n \"There are\",\n \"No additional\",\n \"All \",\n \"Subspecies of \",\n \"Neotype designated \",\n \"Padre Island\",\n )\n ):\n start_label(\"comments\", line)\n elif line.startswith(\n (\"Secondary junior\", \"Primary junior\", \"Junior primary\")\n ):\n start_label(\"homonymy\", line)\n elif re.match(r\"^[\\d/]+\\. \", line):\n start_label(line.split(\".\")[0], line)\n elif line.startswith(\"USNM\"):\n start_label(line.split(\".\")[0], line)\n elif (\n current_label not in (\"name\", \"verbatim_citation\", \"homonymy\")\n and \":\" not in line\n ):\n # new name\n if current_name is not None:\n assert current_label is not None\n current_name[current_label] = current_lines\n assert any(\n field in current_name\n for field in (\n \"Holotype\",\n \"Type Locality\",\n \"Lectotype\",\n \"Syntype\",\n \"Syntypes\",\n \"No name-bearing status\",\n \"Neotype\",\n )\n ), current_name\n yield current_name\n current_name = {\"pages\": [page]}\n current_label = \"name\"\n current_lines = [line]\n elif current_label == \"name\":\n if re.search(\n r\"\\d|\\b[A-Z][a-z]+\\.|\\baus\\b|\\bDas\\b|\\bPreliminary\\b|\\., \", line\n ):\n start_label(\"verbatim_citation\", line)\n else:\n # probably continuation of the author\n current_lines.append(line)\n elif (\n current_label == \"verbatim_citation\"\n or current_label == \"homonymy\"\n or line.startswith(\"= \")\n ):\n start_label(\"synonymy\", line)\n else:\n assert False, f\"{line!r} with label {current_label}\"\n assert current_label is not None\n assert current_name is not None\n current_name[current_label] = current_lines\n yield current_name", "def MatchProtNames(ProteomeDict, MS_names, MS_seqs):\n matchedNames, seqs, Xidx = [], [], []\n counter = 0\n for i, MS_seq in enumerate(MS_seqs):\n MS_seqU = MS_seq.upper()\n MS_name = MS_names[i].strip()\n if MS_name in ProteomeDict and MS_seqU in ProteomeDict[MS_name]:\n Xidx.append(i)\n seqs.append(MS_seq)\n matchedNames.append(MS_name)\n else:\n try:\n newname = getKeysByValue(ProteomeDict, MS_seqU)[0]\n assert MS_seqU in ProteomeDict[newname]\n Xidx.append(i)\n seqs.append(MS_seq)\n matchedNames.append(newname)\n except BaseException:\n print(MS_name, MS_seqU)\n counter += 1\n continue\n\n assert counter == 0, \"Proteome is missing %s peptides\" % (counter)\n assert len(matchedNames) == len(seqs)\n return matchedNames, seqs, Xidx", "def sample_first_name(first_name_file, num_samples):\n\n df = pd.read_csv(first_name_file, header=None)\n df.columns = [\"name\", \"gender\", \"count\"]\n df = df[(df[\"count\"] > 10)]\n names = df[\"name\"].sample(n=num_samples, random_state=2021, replace=True).apply(str.title)\n\n return list(names.values)", "def _remove_title_from_name(titles: tuple, text: str) -> str:\n for title in titles:\n if f'{title}.' in text:\n return text.replace(f'{title}.', empty_string).replace(' ', space).strip()\n elif title in text:\n return text.replace(title, empty_string).replace(' ', space).strip()\n return text", "def rename_proteins(names_csv):\n\n names_frame = pd.read_csv(names_csv)\n\n for _, row in names_frame.iterrows():\n mol_target = row['name']\n alternate_name = row['alternate_name']\n # Remove the replacement of '_0' - this was inconsistently applied as some folders are '_1'\n # The Protein code will be modified to be of format 'xtal_directory:alternate_name'\n new_name = str(mol_target).strip() + ':' + str(alternate_name).strip()\n\n prots = Protein.objects.filter(code=mol_target)\n for prot in prots:\n logger.debug(\"Changing prot.code to '%s'\", new_name)\n prot.code = new_name\n prot.save()", "def test_find_first_author_initial(self):\n inv_search = 'firstauthor:\"ellis, j*\"'\n spi_search = 'find fa j ellis'\n self._compare_searches(inv_search, spi_search)", "def test_name_arg_skips_replacements():\n\n conf = r\"\"\"\n {\"always_rename\": true,\n \"select_first\": true,\n\n \"force_name\": \"Scrubs\",\n\n \"input_filename_replacements\": [\n {\"is_regex\": true,\n \"match\": \"Scrubs\",\n \"replacement\": \"Blahblahblah\"}\n ]\n }\n \"\"\"\n\n out_data = run_tvnamer(\n with_files = ['S01E02 - Some File.avi'],\n with_config = conf)\n\n expected_files = ['Scrubs - [01x02] - My Mentor.avi']\n\n verify_out_data(out_data, expected_files)", "def setCaptainNames(self):\n self.captainNames = anwp.func.names.getNames('system_names.txt',self.maxCaptainNames+100, self.rand.randint(1,100))\n self.currentCaptainName = 0", "def replace(lines):\n for index, line in enumerate(lines):\n if not line == '\\n':\n token_line = tokenizer.tokenize_line(line)\n for ind, tok in enumerate(token_line):\n if token_line[ind][1] in replacement_dic.keys() and token_line[ind][1] not in ignore_variable:\n if ind > 1 and token_line[ind-2][1] in import_list:\n continue\n if token_line[ind][0] == token.NAME and token_line[ind+1][1] == '(':\n continue\n token_line[ind][1] = replacement_dic.get(token_line[ind][1])\n\n lines[index] = tokenizer.untokenize_line(token_line)\n return lines", "def update_from_document(self, document_path):\n with open(document_path, 'r') as document_file:\n for sentence in document_file:\n words = sentence.strip().split()\n for word in words:\n self._add_new_word(word)", "def assignWordList(filename, thisDataEntry):\n oldArr = []\n newArr = []\n try:\n with open(filename, encoding=\"latin-1\") as file:\n lines = [line.rstrip() for line in file]\n idx = 0\n while(lines[idx] != \"***\"):\n oldArr.append(lines[idx].lower())\n idx += 1\n idx += 1 #Skip the delimitter\n for x in range(idx, len(lines)):\n newArr.append(lines[x].lower())\n file.close()\n except IOError:\n print(\"Error opening: \" + str(filename))\n for x in oldArr:\n thisDataEntry.old[x] = 0\n for y in newArr:\n thisDataEntry.new[y] = 0", "def _load_personas(self, names, is_custom=False):\n names = names or [path.stem for path in\n self.persona_dir[is_custom].iterdir()\n if path.is_dir()]\n for name in names:\n try:\n self.update_persona_dicts(self.process_name(name),\n is_custom=is_custom)\n except:\n warnings.warn(f'Could not load files for {name}.')", "def get_names(lines): \n next = False \n names = []\n for line in lines:\n if next:\n if len(line) == 1:\n break\n else:\n tmp = line.split()\n names.append(tmp[1])\n if line.startswith('Sequences loaded ...'):\n next = True\n return names", "def remove_words(headlines,removal_words):\r\n for i in range(len(headlines)):\r\n for word in removal_words:\r\n headline = headlines[i].lower()\r\n pos = headline.find(word) #Find start position of word we want to remove\r\n if pos != -1:\r\n end_pos = pos + len(word) #End position of word we want to remove\r\n try:\r\n if headline[end_pos+1] == \"s\":\r\n new_headline = headline[:pos]+headline[(end_pos + 1):] #Remove entire word, even if plural.\r\n headlines[i] = new_headline # Replace old headline with new headline.\r\n else:\r\n new_headline = headline[:pos] + headline[end_pos:]\r\n headlines[i] = new_headline\r\n except IndexError: #Final word of headline might be singular.\r\n new_headline = headline[:pos] + headline[end_pos:]\r\n headlines[i] = new_headline\r\n return headlines", "def main(files: List[Path]):\n show_filenames = len(files) > 1\n for file in files:\n with file.open() as f:\n for m in find_camel(f):\n print(pretty_match(m, filename=file if show_filenames else None))", "def update_network_name(info_file, new_example_file, default_name, model_name):\n # load file\n with info_file.open() as fr:\n lines = fr.read()\n\n if default_name != model_name:\n old_name_list = [default_name, default_name.upper()]\n new_name_list = [model_name, model_name.upper()]\n\n # replace file\n for i in range(len(old_name_list)):\n lines = re.sub(old_name_list[i], new_name_list[i], lines)\n\n # save new example file\n with new_example_file.open(\"w\") as fw:\n fw.write(lines)\n\n return new_example_file", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def sort_by_surname_desc(names):\n names = dedup_and_title_case_names(names)\n names1 = []\n for n in names:\n x = n.split(\" \")\n names1.append(x[1] + \" \" + x[0])\n return names1\n # ...", "def set_first_name(self, first_name):\n self.first_name = first_name", "def applies_capitalization(self, token_list, original_sentence):\n new_token_list = []\n original_token_list = original_sentence.split(\" \")\n for idx, token in enumerate(token_list):\n if idx < len(original_token_list):\n if token.lower() == original_token_list[idx].lower():\n new_token_list.append(original_token_list[idx])\n else:\n new_token_list.append(token)\n else:\n new_token_list.append(token)\n\n return new_token_list", "def replace_parts(file, file_out, replacements):\n # Read in original file\n with open(file, \"r\") as f:\n lines = f.readlines()\n\n # Replace lines in file\n for i, line in enumerate(lines[:]):\n # Replace file name and tag\n for key, val in replacements.items():\n if key in line:\n lines[i] = line.replace(str(key), str(val))\n\n with open(file_out, \"w\") as f:\n f.writelines(lines)", "def test_author_many_lastnames(self):\n inv_search = 'author:\"alvarez gaume, j* r* r*\"'\n spi_search = 'find a alvarez gaume, j r r'\n self._compare_searches(inv_search, spi_search)", "def gibber(self): \n for x in self.consonants:\n if (x in self.sentence):\n \t self.sentence = self.sentence.replace(x, x+'o'+unicode(x).lower())", "def titlecase(original: str, delimiter: str = \" \", small_words: list = None) -> str:\n _small_words = [\"of\", \"in\", \"at\", \"to\", \"the\", \"on\", \"an\", \"a\"]\n if small_words:\n _small_words = list(set(_small_words + small_words))\n\n original_splitted = original.split(delimiter)\n result = []\n\n for word in original_splitted:\n word = word.lower()\n if word in _small_words:\n result.append(word)\n else:\n result.append(word.capitalize())\n\n return delimiter.join(result)", "def convert_name(self, human_name):\n\n human_name = HumanName(human_name)\n if human_name.suffix:\n self.metadata[\"gutenberg_name_suffix\"] = human_name.suffix\n human_name.suffix = \"\"\n if human_name.nickname:\n # LOGGER.debug(\"%s nickname: %s\", str(human_name), human_name.nickname)\n no_nickname = copy.copy(human_name)\n no_nickname.nickname = \"\"\n first_name_match = re.match(\n re.sub(r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\", human_name.first, re.UNICODE),\n human_name.nickname,\n re.UNICODE\n )\n # LOGGER.debug(\n # \"%s, %s\",\n # re.sub(\n # r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\",\n # human_name.first,\n # re.UNICODE\n # ),\n # human_name.nickname\n # )\n if first_name_match and len(first_name_match.group(0)) >= len(human_name.first):\n human_name.first = first_name_match.group(0)\n human_name.nickname = human_name.nickname[len(human_name.first):].strip()\n # LOGGER.debug(\"Adding %s to aliases\", str(no_nickname))\n self.metadata[\"aliases\"] = set([str(no_nickname)])\n middle_name_match = re.match(\n re.sub(r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\", human_name.middle, re.UNICODE),\n human_name.nickname,\n re.UNICODE\n )\n # LOGGER.debug(\n # \"%s, %s\",\n # re.sub(\n # r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\",\n # human_name.middle, re.UNICODE\n # ),\n # human_name.nickname\n # )\n if middle_name_match and len(middle_name_match.group(0)) >= len(human_name.middle):\n human_name.middle = middle_name_match.group(0)\n human_name.nickname = human_name.nickname[len(human_name.middle):].strip()\n # LOGGER.debug(\"Adding %s to aliases\", str(no_nickname))\n self.metadata[\"aliases\"].add(str(no_nickname))\n return human_name", "def countByName(lastName, firstName, filename):\r\n\r\n nameCounter = 1 #This variable serves as a counter and it ranges from 0 to 5, which accounts to the line numbers.\r\n isCorrectName = False #This variable evaluates whether the names compare to the names on the text.\r\n gmedals = 0 #Counts the amount of gold medals\r\n smedals = 0 #Counts the amount of silver medals\r\n bmedals = 0 #Counts the amount of bronze medals\r\n\r\n with open(filename, 'r', encoding='utf-8') as file:\r\n for line in file:\r\n line = line.strip().upper()\r\n if nameCounter == 1:\r\n if line == lastName.upper():\r\n isCorrectName = True\r\n else:\r\n isCorrectName = False\r\n if nameCounter == 2 and isCorrectName is True:\r\n if line == firstName.upper():\r\n isCorrectName = True\r\n else:\r\n isCorrectName = False\r\n if nameCounter == 4:\r\n if isCorrectName is True and line == '1':\r\n gmedals += 1\r\n else:\r\n pass\r\n if isCorrectName is True and line == '2':\r\n smedals += 1\r\n else:\r\n pass\r\n if isCorrectName is True and line == '3':\r\n bmedals += 1\r\n\r\n if nameCounter == 5:\r\n nameCounter = 0\r\n isCorrectName = False\r\n\r\n nameCounter += 1\r\n\r\n return gmedals, smedals, bmedals", "def process_file(self, filename, order=2):\n fp = open(filename)\n self.skip_gutenberg_header(fp)\n\n for line in fp:\n for word in line.rstrip().split():\n self.process_word(word, order)\n\n #print(\">>>DEBUG the suffix map\")\n #i = 0\n #for k,v in self.suffix_map.items():\n # print(\"key is {}, value is {}\".format(k, v))\n # i += 1\n # if i > 10:\n # break", "def testMapTitle(self) -> None:\n def testNewTitle(name:str, solution:list[float]):\n self._nameClassifierBuilder._initializeNameMapping()\n title = self._nameClassifierBuilder._getTitle(name)\n self._nameClassifierBuilder._mapTitle(title)\n self.assertEquals(solution, self._nameClassifierBuilder._currentNameMapping)\n\n solution = [1.0,0.0,0.0,0.0,0.0,0.0]\n testNewTitle(\"jslghaldfaCollgja lgn awfggad\", solution)\n \n solution = [0.0,0.0,1.0,0.0,0.0,0.0]\n testNewTitle(\"fsdj Mrs. afjdlgaj\", solution)\n\n solution = [0.0,0.0,0.0,0.0,0.0,1.0]\n testNewTitle(\"jslghaldfagja lgn awfggad\", solution)", "def name_lookup(first_name):\n if first_name == \"Joe\": \n last_name = \"Warren\"\n elif first_name == \"Scott\": \n last_name = \"Rixner\"\n elif first_name == \"John\": \n last_name = \"Greiner\"\n elif first_name == \"Stephen\":\n last_name = \"Wong\"\n else: \n last_name = \"Error: Not an instructor\"\n return last_name", "def find_replace(text, list):\r\n\tfor pair in list:\r\n\t\tif pair[0] in text:\r\n\t\t\ttext = text.replace(pair[0], pair[1])\r\n\treturn text", "def replace_word_candidate(self, word):\n capital_flag = word[0].isupper()\n word = word.lower()\n if capital_flag and word in self.teencode_dict:\n return self.replace_teencode(word).capitalize()\n elif word in self.teencode_dict:\n return self.replace_teencode(word)\n\n for couple in self.word_couples:\n for i in range(2):\n if couple[i] == word:\n if i == 0:\n if capital_flag:\n return couple[1].capitalize()\n else:\n return couple[1]\n else:\n if capital_flag:\n return couple[0].capitalize()\n else:\n return couple[0]", "def add_title(df, search_for):\n # Copy the dataset.\n new_df = df.copy()\n # Create new column with the title in the name\n new_df['Title'] = df.Name.str.extract(r', (\\w*)\\.')\n # Create regex to find title\n regex = []\n for title in search_for:\n regex.append(r', ' + title + r'\\.')\n # Find the which names contain any of the titles to search for.\n title_in_list = df.Name.str.contains('|'.join(regex))\n # Replace the titles that are not in list for 'Other'.\n new_df.loc[~title_in_list, 'Title'] = 'Other'\n\n return new_df", "def clean_names_list(names):\n pure_names = []\n nan = re.compile('nan', re.IGNORECASE)\n title = re.compile('surname', re.IGNORECASE)\n for name in names:\n if nan.search(name):\n continue\n elif title.search(name):\n continue\n else:\n pure_names.append(name)\n return pure_names", "def translationText(language, listOfWords):\n txt = open(language+\".txt\", mode=\"r\").readlines()\n translatedWords = []\n for word in listOfWords:\n for line in txt:\n if line.split()[0] == word:\n translatedWords.append(line.split()[1])\n return translatedWords", "def get_initials(the_fullname):\n my_initials = ''\n for name in the_fullname.split():\n my_initials += name[0].upper()\n return my_initials", "def merge_nonjunk_into_new_name(self, event=None):\n # Delete all original names\n aid_list = self.all_aid_list\n aid_list_filtered = ut.filterfalse_items(\n aid_list, self.ibs.get_annot_isjunk(aid_list)\n )\n # Rename annotations\n self.ibs.set_annot_names_to_same_new_name(aid_list_filtered)\n self.update_callback()\n self.backend_callback()\n self.show_page()", "def unify_profile_name(first_name: str, last_name: str):\n concat_title = first_name + \" \" + last_name\n # Strip leading and trailing spaces and then replace double white space two times\n # (3 -> 2 -> 1)\n concat_title = concat_title.strip().replace(\" \", \" \"). replace(\" \", \" \")\n\n # The unified title is again the lowercase version without spaces\n unified_title = concat_title.replace(\" \", \"\").lower()\n unified_title = re.sub('[-_.,:;\\|/\\{\\}\\(\\)\\[\\]\\'\\\"\\+]','', unified_title)\n trimmed_unified_title = unified_title[:150]\n return trimmed_unified_title, concat_title" ]
[ "0.59615916", "0.58315635", "0.57794553", "0.568856", "0.56879234", "0.5680582", "0.5637345", "0.56369686", "0.5613315", "0.5589724", "0.5437056", "0.5363194", "0.5351406", "0.53407454", "0.5291481", "0.52826524", "0.5278052", "0.52475464", "0.52253675", "0.5195565", "0.51936525", "0.5191152", "0.51725", "0.51689225", "0.51536816", "0.51389253", "0.51385516", "0.5115972", "0.5113018", "0.51078", "0.50831366", "0.50831366", "0.5083049", "0.50828385", "0.507844", "0.5078332", "0.5075912", "0.50685054", "0.50650406", "0.5063811", "0.5063164", "0.5050842", "0.50113416", "0.50097364", "0.5002032", "0.4994434", "0.49925727", "0.49860612", "0.49822518", "0.49746558", "0.49682933", "0.49681902", "0.49514645", "0.4947481", "0.49376932", "0.4928543", "0.4922846", "0.49183255", "0.49071", "0.49041072", "0.48941427", "0.48847547", "0.48753184", "0.48687994", "0.48634705", "0.48512042", "0.48498157", "0.48383927", "0.48354462", "0.4834718", "0.48333743", "0.48310298", "0.48310298", "0.48310298", "0.48310298", "0.48310298", "0.48310298", "0.48310298", "0.48310298", "0.48310298", "0.48228204", "0.48208812", "0.48163208", "0.4814487", "0.48134997", "0.4811089", "0.48064408", "0.4800316", "0.47953308", "0.47926927", "0.47915757", "0.47906184", "0.4786107", "0.47850358", "0.47842374", "0.47830087", "0.4782626", "0.4781431", "0.47805583", "0.4777263" ]
0.5814242
2
Display messages based on the window
def displayMessages(window,messages=['']): # update messages text message_in_line = '' for msg in messages: message_in_line += '\n'+msg window['messages'].update(f'{message_in_line}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def showMessage(self):", "def display_message(self, message):\n if self.web_crawler_window is None and self.webpage_classifier_window is None:\n self.machine_learner_window.display_message(message)\n elif self.web_crawler_window is None and self.machine_learner_window is None:\n self.webpage_classifier_window.display_message(message)\n elif self.webpage_classifier_window is None and self.machine_learner_window is None:\n self.web_crawler_window.display_message(message)", "def display_messages(self, layout):", "def display_message(window, msg):\n v = create_output_panel(window, '')\n _append(v, msg)", "def display_message():", "def show_messages(self):\n console.alert(\n \"Info\",\n \"If StaSh does not launch anymore after you changed the config, run the 'launch_stash.py' script with \\n'--no-cfgfile'.\",\n \"Ok\",\n hide_cancel_button=True,\n )\n while True:\n self.wait_modal()\n if not self.subview_open:\n break\n console.alert(\n \"Info\",\n \"Some changes may only be visible after restarting StaSh and/or Pythonista.\",\n \"Ok\",\n hide_cancel_button=True,\n )", "def show(self, window):\r\n\r\n return", "def show_messages(self):\n self.masterlog.revealme()", "def show_messages(self):\n for msg in self.messages:\n print msg['text']", "def showinfo(self, msg):\n tkinter.messagebox.showinfo('Information', msg)", "def __window_alert(self, text):\n print str(text)\n config.VERBOSE(config.VERBOSE_DEBUG, '[DEBUG] alertmsg: ' + str(text))", "def print2message():\n return OverrideManager(\"Output Window\")", "def event_loop(self):\n if self.message_counter:\n if not self.msg:\n self.showdialog()\n else:\n self.msg.setText(\n \"COMET encounterd {} error(s)\".format(self.message_counter).ljust(\n 70\n )\n )", "def show(self):\n self.present(orientations=ORIENTATIONS)\n # launch a background thread\n # we can not use ui.in_background here\n # because some dialogs would not open anymoe\n thr = threading.Thread(target=self.show_messages)\n thr.daemon = True\n thr.start()", "def doMessageWindow(msg):\n _loadMsgSettings()\n if settings.has_key(msg):\n return\n global dialog\n dialog = QtGui.QDialog()\n msgDialog = ui.message.Ui_Dialog()\n msgDialog.setupUi(dialog)\n msgDialog.messageLabel.setText(msg)\n dialog.exec_()\n if msgDialog.showAgainCheckBox.isChecked():\n settings[msg] = True\n _saveMsgSettings()", "def show_msgdialog(self):\n log_msg = log.getBufferAsString()\n if not log_msg:\n return\n\n # initialise message dialog\n msg_dialog = msgdialog.MessageDialog(None, -1, \"\")\n msg_dialog.msg_list.InsertColumn(0, \"\")\n\n # clear dialog and show new messages\n msg_dialog.msg_list.Freeze()\n msg_dialog.msg_list.DeleteAllItems()\n for line in log_msg.split('\\n'):\n msg_dialog.msg_list.Append([line, ])\n msg_dialog.msg_list.SetColumnWidth(0, -1)\n msg_dialog.msg_list.Thaw()\n msg_dialog.ShowModal()\n msg_dialog.Destroy()", "def modeMsgBox(self, messageText):\n self.createMessage(messageText)", "def msg_show(self,msg):\r\n self.frame.Show()\r\n self.frame.Raise()", "def display_abort_msg(self):\r\n \r\n labelfont = ('times', 20, 'bold') \r\n msg_window = Toplevel(self.root) # Child of root window \r\n msg_window.geometry(\"650x180+300+300\") # Size of window, plus x and y placement offsets \r\n msg_window.title(\"Error Message\")\r\n msg_window.config(bg='red')\r\n msg_window.config(borderwidth=5)\r\n msg_window.config(relief=\"sunken\")\r\n self.msgStr = StringVar()\r\n self.msgStr.set(\" Session was ABORTED \\r due to an unrecoverable input or output error \")\r\n\r\n label1 = ttk.Label(msg_window,textvariable = self.msgStr, background=\"White\",foreground=\"Red\")\r\n #option must be -column, -columnspan, -in, -ipadx, -ipady, -padx, -pady, -row, -rowspan, or -sticky\r\n label1.config(font=labelfont) \r\n label1.grid(row=1,column=1, padx = 20, pady = 20, sticky='nesw')\r\n\r\n button1 = ttk.Button(msg_window, text=' OK ',command = msg_window.destroy)\r\n button1.grid(row=2,column=1, padx=20, pady=10)", "def MessageWindow(screen, title, text, width=40, help=None, timer_ms=None, \n run_type=RT_EXECUTEANDPOP):\n \n g = GridFormHelp(screen, title, help, 1, 3)\n\n t = TextboxReflowed(width, text)\n g.add(t, 0, 0)\n\n if timer_ms:\n g.form.w.settimer(timer_ms)\n\n (button, is_esc) = ActivateWindow(g, run_type)\n\n return {'is_esc': is_esc, \n 'grid': g,\n }", "def message_box(self):\n root = tk.Toplevel(self.top)\n root.attributes('-topmost', True)\n root.geometry(\"+650+100\")\n root.withdraw()\n messagebox.showinfo('Oh oh', 'Wrong message. Try again!')\n try:\n root.destroy()\n except:\n pass", "def msg_about(self):\n self.window.withdraw()\n msg.showinfo(\"About Text Reader\",\n \"A Python GUI created to convert text from files to speech and describe the text in 5 most \"\n \"popular words.\")\n self.window.deiconify()", "def message_box(subject, content):\r\n root = tk.Tk()\r\n root.attributes(\"-topmost\", True)\r\n root.withdraw()\r\n messagebox.showinfo(subject, content)\r\n try:\r\n root.destroy()\r\n except:\r\n pass", "def show_message(message, col=c.r, update=False):\n g.content = generate_songlist_display()\n g.message = col + message + c.w\n\n if update:\n screen_update()", "def game_win(self):\n self.win = True\n self.msg.set_text(u'YOU WIN <Press Space>')\n self.msg.show(True)", "def msg_window(text):\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n msg.setText(text)\n msg.setWindowTitle(\"Info\")\n msg.exec_()", "def showMessage(self, msg):\r\n super(SplashScreen, self).showMessage(\r\n msg, self.labelAlignment, QColor(_QtCore.Qt.white))\r\n QApplication.processEvents()", "def showMessage(self, message):\r\n print message", "def open_main_window(self):\r\n track_terms_dic = ''\r\n sg.theme(self.look)\r\n\r\n layout = [[sg.Text('Welcome to tweeet monitor ')],\r\n [sg.Text('Please enter Details ')],\r\n [sg.Text('User Mail', size=(15, 1)), sg.InputText()],\r\n [sg.Text('Timout', size=(15, 1)), sg.InputText('', enable_events=True, key='-DIGITS-')],\r\n [sg.Text('')],\r\n [sg.Text('You can select an existing list or create a new one '),\r\n sg.Combo(self.files, default_value='Select Track Terms List ', key='-COMBO1-')],\r\n [sg.Text('')],\r\n [sg.Button('Select Exists List'), sg.Button('Create a New List')],\r\n [sg.Text('\\n')],\r\n [sg.Button('Start Monitor'), sg.Button('Exit')]\r\n ]\r\n\r\n window = sg.Window('Monitor tweeter', layout)\r\n # Event Loop\r\n while True:\r\n event, values = window.read()\r\n\r\n if event == sg.WIN_CLOSED:\r\n exit()\r\n elif event == 'Select Exists List' or event == 'Create a New List' or event == 'Start Monitor':\r\n user_mail = values[0]\r\n timeout = values['-DIGITS-']\r\n list_dic = values['-COMBO1-']\r\n\r\n if self.check(user_mail) == 'Invalid Email':\r\n self.info_popup_window('You Enter not valid mail ', 'Info', self.look)\r\n elif event == 'Select Exists List':\r\n if list_dic == 'Select Track Terms List ':\r\n self.info_popup_window('Track Terms List ', 'Info', self.look)\r\n else:\r\n file_name = self.path + self.bachslash + list_dic\r\n os.system(file_name)\r\n track_terms_dic = list_dic\r\n elif event == 'Create a New List':\r\n track_terms_dic = self.open_window()\r\n track_terms_dic = track_terms_dic + '.txt'\r\n elif event == 'Start Monitor':\r\n if track_terms_dic == '':\r\n self.info_popup_window('Please, Create new Dictionary or select one ', 'Info', self.look)\r\n elif track_terms_dic != '':\r\n file_name = self.path + self.bachslash + track_terms_dic\r\n my_file = open(file_name, \"r\")\r\n content = my_file.read()\r\n content = content.split(\"\\n\")\r\n content = self.cleanList(content)\r\n # print(content)\r\n my_file.close()\r\n now = datetime.now()\r\n date_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\r\n dict_list = {'User': user_mail,\r\n 'Timeout': timeout,\r\n 'Dictionary': list_dic,\r\n 'Create Date': date_time,\r\n 'track_terms_list': content\r\n }\r\n header = ['user_mail', 'Timeout', 'Dictionary', 'Create Date', 'list words']\r\n if os.path.isfile(self.file_track_terms_audit) == False:\r\n # check if the file exsist = if not: create file and print header to the file\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n write.writerow(header)\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n else:\r\n self.values_list = list(dict_list.values())\r\n # print ('self.values_list :****',self.values_list)\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n self.values_list = [self.values_list]\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n print('self.values_list:', self.values_list)\r\n\r\n window.close()\r\n\r\n print('track_terms_dic: ', track_terms_dic)\r\n print('dict_list:', dict_list)\r\n return (dict_list)\r\n\r\n # always check for closed window\r\n if event in (sg.WIN_CLOSED, 'Exit'):\r\n break\r\n\r\n if event == '-LIST-' and len(values['-LIST-']):\r\n sg.popup('Selected ', values['-LIST-'])\r\n\r\n if len(values['-DIGITS-']) and values['-DIGITS-'][-1] not in ('0123456789'):\r\n # delete last char from input\r\n window['-DIGITS-'].update(values['-DIGITS-'][:-1])\r\n\r\n window.close()", "def status_display(self, message, level=0, field=0):\n #print(message)\n self.statusbar_txt.set(message)", "def statusbar_msg(self, msg):\n self.statusbar.clearMessage()\n self.statusbar.showMessage(msg)", "def win_popup(self):\n content = BoxLayout(orientation='vertical')\n message_label = Label(text=self.win_message)\n button_layer = BoxLayout(orientation='horizontal')\n dismiss_button = Button(text='QUIT', size_hint=(1, 1))\n next_button = Button(id='next', text='NEXT ROUND', size_hint=(1, 1))\n button_layer.add_widget(dismiss_button)\n button_layer.add_widget(next_button)\n content.add_widget(message_label)\n content.add_widget(button_layer)\n popup = Popup(title=self.winner,\n content=content, size_hint=(0.3, 0.25))\n dismiss_button.bind(on_release=(lambda a: self.exit_game()),\n on_press=popup.dismiss)\n next_button.bind(on_release=(lambda a: self.next_round()),\n on_press=popup.dismiss)\n popup.open()", "def __window_print(self):\n pass", "def show(self,window):\n self.showFunctions(window)", "def new_window_messages(self, button_see_all_msgs):\r\n # changing the button command to closing the window\r\n button_see_all_msgs.config(command=lambda: self.close_window(button_see_all_msgs))\r\n\r\n # creating the chat Tk object\r\n self.messages_window = Tk()\r\n self.messages_window.resizable(False, False)\r\n self.messages_window.config(bg=self.bg_color)\r\n self.messages_window.protocol(\"WM_DELETE_WINDOW\",\r\n lambda: self.close_window(button_see_all_msgs))\r\n\r\n chat_label = Label(self.messages_window, text=\"Hello \" + self.username +\r\n \"\\nHere are your messages\",\r\n bg=self.bg_color, font=self.title_font)\r\n chat_label.pack(padx=20, pady=10)\r\n chat_frame = Frame(self.messages_window)\r\n chat_frame.pack(padx=15, pady=15)\r\n scrollbar_chat = Scrollbar(chat_frame)\r\n scrollbar_chat.pack(side=RIGHT, fill=Y)\r\n text_chat = Text(chat_frame, width=30, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_chat.set)\r\n text_chat.pack()\r\n scrollbar_chat.config(command=text_chat.yview)\r\n for msg, encryption_data, sender_user in self.msg_list:\r\n text_chat.insert(END, \"from: \" + sender_user + \"\\n\")\r\n text_chat.insert(END, msg + \"\\n\\n\")\r\n text_chat.config(state=DISABLED)", "def message(self, message):\n messagebox.showinfo(\n GT_('Menu'),\n message\n )", "def showMessage(self, message, surface=None, bg_color=None, rect=None):\r\n if surface is None:\r\n surface = self.infoPanel \r\n if bg_color is None:\r\n bg_color = gu.INFO_PANEL_COLOR\r\n if rect is None:\r\n rect = gu.INFO_RECT\r\n \r\n surface.fill(bg_color) # clear from previous messages\r\n \r\n lines = message.split(\"\\n\")\r\n font = pygame.font.Font(None, 25)\r\n dy = 20\r\n for i, line in enumerate(lines):\r\n txt_surf = font.render(line, False, gu.WHITE)\r\n new_rect = txt_surf.get_rect().move(0, i*dy)\r\n surface.blit(txt_surf, new_rect)\r\n \r\n self.screen.blit(surface, rect)\r\n self.wait()\r\n pygame.display.update()", "def showInfoWindow():\n\treturn 0", "def windowsMessageBox(message, title=\"Natlink configure program\"):\n MessageBox(message, title)", "def showMessage(self, msg): \n QtGui.QMessageBox.information(None, \"Info\", msg)", "def show(self):\n # * displays the window, after using either the iconify or the withdraw methods\n self.wm_deiconify()\n # * this method can be called after the event which needs to happen before the window event\n self.wait_window()", "def about(self):\n self.main_window.message(\n width=200, aspect=100, justify=tkinter.CENTER,\n text=\"Jeu de Ping\\n\\n\"\n \"(C) Maximin Duvillard, August 2022.\\nLicence = GPL\")", "def window_info_toggle():\n window_info.hide() if window_info.showing else window_info.show()", "def show( self ):\n if self.visible == 1 and time() - self.lastMotion > self.delay:\n self.visible = 2\n if self.visible == 2 and self.msgVar.get()!='':\n self.deiconify()", "def show_data(self, msg):\n\n message = msg\n # self.ECGWin.append(message)\n self.getter.get(message)\n # self.ECGWin.append(msg2)\n # self.ECGWin.append(msg3)", "def show_msg(self):\n if self.result and self.success_msg:\n print color_str('g', '\\n'.join(self.success_msg))\n elif self.result == False and self.fail_msg:\n print color_str('r', '\\n'.join(self.fail_msg))\n if self.stat_msg:\n print color_str('b', '\\n'.join(self.stat_msg))", "def _cb(self, hwnd, extra):\n if hwnd in self.windows:\n pass\n\n window = Window(\n hwnd=hwnd,\n text=win32gui.GetWindowText(hwnd),\n rectangle=win32gui.GetWindowRect(hwnd))\n\n self.windows[hwnd] = window", "def displayHelpMessage(self):\n if self.dialogBox == None:\n if len(self.help) > 0:\n message = self.help.pop()\n if 'SCANNING RESEARCH' in message:\n color = ['cyan']\n elif 'SCANNING INDUSTRY' in message:\n color = ['orange']\n elif 'SCANNING MILITARY' in message:\n color = ['red']\n self.createDialogBox(x=-0.1,y=0.7,texts=[message],textColors=color)", "def updateDisplay(self, msg):\n t = msg.data\n self.displayLbl.SetLabel(\"%s\" % t)\n self.SetTitle(\"%s\" % t)", "def show_message(self, message):\n self.sense.show_message(\n message,\n scroll_speed=self.SCROLL_SPEED,\n text_colour=self.TEXT_COLOUR\n )", "def display_message(self, message, level=\"information\"):\n box = QMessageBox(self.win)\n box.setText(message)\n box.setWindowTitle(QApplication.applicationName())\n if level == \"critical\":\n box.setIcon(QMessageBox.Critical)\n elif level == \"warning\":\n box.setIcon(QMessageBox.Warning)\n else:\n box.setIcon(QMessageBox.Information)\n box.exec_()", "def winScreen(self):\n # creates welcome screen if state is STATE_INACTIVE\n if self.getState() == STATE_COMPLETE:\n label = GLabel(text=\"Congratulations! You win!\", x = GAME_WIDTH/2,\n y = 50, font_size = 50, font_name = 'arcade',\n linecolor = introcs.RGB(0,0,0))\n label.halign = 'center'\n label.valign = 'middle'\n self.setText(label)\n # welcome screen is None if state is not STATE_INACTIVE\n else:\n self.setText(None)\n # draws the welcome screen\n #self.getText().x = consts.GAME_WIDTH / 2\n #self.getText().y = consts.GAME_HEIGHT / 2\n self.draw()", "def logicDelayDisplay(self,message,msec=1000):\n print(message)\n self.info = qt.QDialog()\n self.infoLayout = qt.QVBoxLayout()\n self.info.setLayout(self.infoLayout)\n self.label = qt.QLabel(message,self.info)\n self.infoLayout.addWidget(self.label)\n qt.QTimer.singleShot(msec, self.info.close)\n self.info.exec_()", "def __window_prompt(self, text):\n return True", "def display_message(self, message):\n context_id = self.status_bar.get_context_id(\"\")\n self.status_bar.show()\n self.status_bar.push(context_id, message)", "def __window_dump(self, text):\n self.alert(text)", "def change_text(self):\n \n currentWidgetName = self.dW.stackedWidget.currentWidget().objectName()\n \n text = self.frameText.value(currentWidgetName, '')\n \n self.showMessage(text)", "def __showErrorMessage(self):\r\n # show messages if not in the test mode and there is an errorMessage\r\n if self.testingMode == False and self.errorMessage != None: \r\n self.setStyleSheet(\"QMessageBox{background: self.primaryColor; }\"); # change the color theme in case of an error\r\n self.msgBox.warning(self, \"Error\", self.errorMessage, QMessageBox.Ok, QMessageBox.Ok)\r\n self.setStyleSheet(\"background-color:\" + MAIN_WINDOW_SECONDARY_COLOR + \";\"); # return the color theme to its original\r\n self.errorMessage = None", "def showWindow(*args, **kwargs)->None:\n pass", "def __show_app_message(cls, message, level):\n\n ST_MIN_VERSION = 3070\n\n if (int(sublime.version()) >= ST_MIN_VERSION):\n cls.show_popup(message, level)\n else:\n sublime.status_message(message)", "def display_messages(self):\n\n\t\twhile self.joined:\n\t\t\tif len(self.messages) != 0:\n\t\t\t\tfor msg in self.messages:\n\t\t\t\t\t#: If the message is empty, ignore it.\n\t\t\t\t\tif msg == \"\":\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t#: If the message is close\", then the server has told the client\n\t\t\t\t\t#: to shut down, so it will. This is not an issue, as users\n\t\t\t\t\t#: messages will always have an identifier and : before their\n\t\t\t\t\t#: message, thus,the only messages that don't include an\n\t\t\t\t\t#: identifier will be from the server itself.\n\t\t\t\t\telif msg[:5] == \"close\":\n\n\t\t\t\t\t\treason = msg[6:]\n\n\t\t\t\t\t\tprint(\"This client was closed due to {}.\".format(reason))\n\t\t\t\t\t\tself.quit(True)\n\n\t\t\t\t\t#: Otherwise, print the message to the commandline.\n\t\t\t\t\telif not self.silent:\n\t\t\t\t\t\tprint('\\r' + msg, end='')\n\n\t\t\t\t\t\tprint(\"\\nYou: \", end='')\n\t\t\t\t\t\tself.displayed_you = True\n\n\t\t\t\t\t#: Remove the processed message\n\t\t\t\t\tself.messages.remove(msg)", "def showMessage(self, text, location, font, fontSize, colour=(255,255,255),\n input=False, secs=None):\n self.fill()\n self.text(text, location, font, fontSize, colour=colour)\n pygame.display.update()\n if input:\n currentEvent = self.input.input()\n while not self.input.checkInput(currentEvent):\n currentEvent = self.input.input()\n if not secs:\n self.timer.wait(secs)", "def display(self):\n\t\tprint('The button in the window was clicked!')", "def ev_windowshown(self, event: WindowEvent) -> None:", "def show_popup(cls, content, level):\n\n current_view = sublime.active_window().active_view()\n message = cls.get_message_template(content, level)\n\n current_view.show_popup(content=message, max_width=400)", "def read_messages(self, msg_num):\r\n self.clear_screen()\r\n user_label = Label(self.root, text=\"Hello \" + self.username, font=self.title_font,\r\n bg=self.bg_color, height=2)\r\n user_label.pack(pady=5, padx=50)\r\n lbl_msg = Label(self.root, text=\"Message \" + str(msg_num), font=self.title_font,\r\n bg=self.bg_color)\r\n lbl_msg.pack(pady=5, padx=10)\r\n self.refresh_button = Button(self.root, text=\"Refresh page\", font=self.text_font,\r\n bg=self.bg_color, command=lambda: self.refresh(msg_num))\r\n self.refresh_button.pack(padx=10, pady=10)\r\n messages_frame = Frame(self.root)\r\n messages_frame.pack(padx=30, pady=15)\r\n scrollbar_msg = Scrollbar(messages_frame)\r\n scrollbar_msg.pack(side=RIGHT, fill=Y)\r\n text_widget = Text(messages_frame, width=50, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_msg.set)\r\n text_widget.pack()\r\n scrollbar_msg.config(command=text_widget.yview)\r\n button_send = Button(self.root, text=\"go back\", font=self.text_font,\r\n height=2, width=20, command=self.go_back_read)\r\n button_send.pack(pady=5, side=BOTTOM)\r\n button_send = Button(self.root, text=\"see/close message\\ncontrol panel\",\r\n font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.new_window_messages(button_send))\r\n button_send.pack(pady=5, side=BOTTOM)\r\n if self.msg_list:\r\n if msg_num < len(self.msg_list):\r\n next_msg = Button(self.root, text=\"next message\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.read_messages(msg_num + 1))\r\n next_msg.pack(pady=5, padx=5, side=RIGHT)\r\n if msg_num > 1:\r\n previous_msg = Button(self.root, text=\"previous message\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.read_messages(msg_num - 1))\r\n previous_msg.pack(pady=5, padx=5, side=LEFT)\r\n text_widget.insert(END, \"from: \" + self.msg_list[msg_num - 1][2] + \"\\n\")\r\n text_widget.tag_add('sender', '1.0', '1.end')\r\n text_widget.tag_config('sender', font='none 14')\r\n\r\n text_widget.insert(END, self.msg_list[msg_num - 1][0])\r\n text_widget.tag_add('msg', '2.0', END)\r\n text_widget.tag_config('msg', font='none 12')\r\n\r\n text_widget.config(state=DISABLED)", "def draw_message_box(self):\r\n length = len(self.__lines)\r\n\r\n # Build the Dialogue Box\r\n background = pygame.transform.scale(BACKGROUND_PNG, (WINDOW_WIDTH, WINDOW_HEIGHT // 3))\r\n rect = background.get_rect()\r\n rect.center = (WINDOW_WIDTH // 2, 2 * WINDOW_HEIGHT // 3 + 100)\r\n SCREEN.blit(background, rect)\r\n\r\n for offset in range(length):\r\n draw_text_abs(self.__lines[offset], 24, WINDOW_WIDTH // 2, 2 * WINDOW_HEIGHT // 3 + (offset * 45 + 50))\r\n\r\n pygame.display.update()", "def main_window_text(self) -> None:\n tk.Label(text='Название книги:').grid(row=0, column=0, padx=10, pady=10)\n tk.Label(text='Автор:').grid(row=1, column=0, padx=10)\n tk.Label(text='Жанр:').grid(row=2, column=0, padx=10, pady=10)\n entry_title = tk.Entry(width=45)\n entry_title.grid(row=0, column=1, sticky=tk.W)\n entry_author = tk.Entry(width=45)\n entry_author.grid(row=1, column=1, sticky=tk.W)\n entry_genre = tk.Entry(width=45)\n entry_genre.grid(row=2, column=1, sticky=tk.W)", "def message_display(text, loc, size, color=None):\n # gameDisplay = pygame.display.set_mode((width, height))\n largeText = pygame.font.Font('freesansbold.ttf', size)\n TextSurf, TextRect = text_objects(text, largeText, color)\n TextRect.center = (loc[0], loc[1])\n gameDisplay.blit(TextSurf, TextRect)\n\n pygame.display.update()", "def menu_screen(win):\n\tpass", "def showmessage(self):\n return self.message", "def showmessage(self):\n return self.message", "def handle_gui_example_one_intent(self, message):\n self.gui.show_text(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec placerat varius turpis porta scelerisque. Nam feugiat, lectus a ultricies tempus, mi sem tempor felis, vitae laoreet nisi ipsum vitae mauris.\")", "def show_window(self):\n self.show()", "def _info(self, message):\r\n dlg = wx.MessageDialog(self, message,\r\n 'xmi2magik',\r\n wx.OK | wx.ICON_INFORMATION\r\n )\r\n dlg.ShowModal()\r\n dlg.Destroy()", "def showme(message):\n print(message)", "def display(self,message):\r\n \r\n print(message)", "def showMessage(self, message):\r\n util.raiseNotDefined()", "def chat_window(window, chat_lines, write_box):\n for i in xrange(25):\n chat_lines[i] = Entry(Point(130,245-(i*9)),80)\n chat_lines[i].draw(window)\n chat_lines[i].setFill(\"white\")\n write_box.draw(window) # draw it to the window\n help(chat_lines)", "def set_display_message(self, title=\"\", speaker=\"\"):\r\n if self.recording:\r\n self.talkInfoString.setText(\"RECORDING\\n\\nTime remaining:\")\r\n else:\r\n self.talkInfoString.setText(\"NEXT TALK\\nTitle: %s\\nSpeaker: %s\\n\\nTime until recording:\" % (title, speaker))", "def show_about():\r\n\tmsg = messagebox\r\n\tmsg.showinfo(\"\", '''Creator: Ellis, Kevin\r\nOrganization: n/a\r\nDescription: Retrieve the network information from a database\r\nDate: 2020208\r\nVersion: 1.4''')", "def show_message(messages):\n for message in messages:\n printed_message = f\"{message}\"\n print(printed_message)", "def _showMessage(self, msg: str) -> None:\n\n raise NotImplementedError()", "def display_message_on_ableton(self, message):\n self._show_message(message)", "def display_message(self, message):\n text = self.font.render(message, True,\n self.display_states[self.display_names[self.current_display_state]]['text'])\n temp_width = text.get_rect().width\n self.gameDisplay.blit(text, ((self.SCREEN_WIDTH / 2) - (temp_width/2), 100))", "def alert(self, msg):\r\n messagedialog = Gtk.MessageDialog(self, type=1, buttons=1, message_format=msg)\r\n messagedialog.run()\r\n messagedialog.destroy()", "def display_messenger_status(self):\n caller = self.caller\n unread = caller.messages.pending_messengers\n read = caller.messages.messenger_history\n if not (read or unread):\n caller.msg(\n \"You have no messengers waiting for you, and have never received any messengers.\"\n + \" {wEver{n. At all. Not {rone{n.\"\n )\n if read:\n caller.msg(\"You have {w%s{n old messages you can re-read.\" % len(read))\n if unread:\n caller.msg(\n \"{mYou have {w%s{m new messengers waiting to be received.\" % len(unread)\n )", "def ShowMessage(msg, title=None, kind='info'):\n kind = kind.lower()\n if (kind.startswith('info')):\n if (title is None): title = 'Information'\n opts = wx.OK|wx.ICON_INFORMATION\n elif (kind.startswith('error')):\n if (title is None): title = 'Error'\n opts = wx.OK|wx.ICON_ERROR\n elif (kind.startswith('warn')):\n if (title is None): title = 'Warning'\n opts = wx.OK|wx.ICON_WARNING\n else:\n opts = wx.OK\n if (title is None):\n title = \"\"\n dial = wx.MessageDialog(None, msg, title, opts)\n dial.ShowModal()", "def updateStatus(self, message):\r\n self.statusBar().showMessage(message, 5000)\r\n if self.kinfilename is not None:\r\n self.setWindowTitle(\"Visualization Tool - %s\" % \\\r\n os.path.basename(unicode(self.kinfilename)))", "def messageScrolled(self,message):\n from dialogs import speDialog\n if sys.platform!='win32':message='<font size=-2>%s</font>'%message\n speDialog.create(self, message, self.path)", "def delayDisplay(self,message,msec=1000):\n print(message)\n self.info = qt.QDialog()\n self.infoLayout = qt.QVBoxLayout()\n self.info.setLayout(self.infoLayout)\n self.label = qt.QLabel(message,self.info)\n self.infoLayout.addWidget(self.label)\n qt.QTimer.singleShot(msec, self.info.close)\n self.info.exec_()", "def delayDisplay(self,message,msec=1000):\n print(message)\n self.info = qt.QDialog()\n self.infoLayout = qt.QVBoxLayout()\n self.info.setLayout(self.infoLayout)\n self.label = qt.QLabel(message,self.info)\n self.infoLayout.addWidget(self.label)\n qt.QTimer.singleShot(msec, self.info.close)\n self.info.exec_()", "def show_help(self):\n\n message = QMessageBox()\n message.setWindowTitle(\"Help\")\n message.setMinimumHeight(1000)\n message.setMinimumWidth(1000)\n\n message.setText(\"1) How to annotate?\\n\"\n \"Move the mouse up and down inside the doted rectangle.\\n\\n\\n\"\n\n \"2) Why is 'wide mode' inactivated?\\n\"\n \"Wide mode and record mode are not allowed to work together.\\n\"\n \"Make sure to exit record mode to access wide mode. \\n\\n\\n\"\n\n \"3) Mouse shortcuts (outside the diagram widget):\\n\\n\"\n \"\\t Right click\\tPlay/pause\\n\"\n \"\\t Scroll\\t\\tFast forward/ backward\\n\"\n \"\\t Dubble click\\tSave\\n\"\n \"\\t Wheel click\\tToggle record mode\\n\\n\\n\"\n\n \"4) Keyboard shortcuts:\\n\\n\"\n \"\\t CTRL+S\\t\\tSave\\n\"\n \"\\t CTRL+O\\t\\tOpen video\\n\"\n \"\\t CTRL+I\\t\\tOpen annotation\\n\"\n \"\\t CTRL+N\\t\\tNew file\\n\"\n \"\\t CTRL+C\\t\\tClear annotation\\n\"\n \"\\t CTRL+Q\\t\\tQuit\\n\"\n \"\\t CTRL+H\\t\\tHelp\\n\\n\"\n \"\\t S\\t\\tPlay/ stop\\n\"\n \"\\t Z\\t\\tFast bakward 50 ms\\n\"\n \"\\t C\\t\\tFast forward 50 ms\\n\"\n \"\\t A\\t\\tFast bakward 200 ms\\n\"\n \"\\t D\\t\\tFast forward 200 ms\\n\"\n \"\\t Q\\t\\tFast bakward 5 s\\n\"\n \"\\t E\\t\\tFast forward 5 s\\n\"\n \"\\t R\\t\\tToggle record mode\\n\\n\"\n \"\\t 1\\t\\tPlayback rate: 0.5\\n\"\n \"\\t 2\\t\\tPlayback rate: 0.75\\n\"\n \"\\t 3\\t\\tPlayback rate: 1\\n\"\n \"\\t 4\\t\\tPlayback rate: 1.25\\n\"\n \"\\t 5\\t\\tPlayback rate: 1.5\\n\"\n \"\\t 6\\t\\tPlayback rate: 1.75\\n\")\n\n x = message.exec_() # this will show our messagebox", "def _conf_message(self):\n pygame.font.init()\n sw, sh = self._screen.get_size()\n \n # Creating messages to be displayed on screen\n # and rendering them\n header_str = \"Recording complete!\"\n sub_str = \"[SCROLL DIAL to SAVE or RECORD OVER]\"\n \n heading = self._render_text( header_str , self._medium_font )\n l1w, l1h = heading.get_size()\n\n myfont = pygame.font.SysFont(\"freesansbold.ttf\", 30)\n subheading = myfont.render( sub_str, True, (250, 227, 137))\n l3w, l3h = subheading.get_size()\n \n # Clear screen\n self._screen.fill(self._bgcolor)\n \n # Creating the objects to be displayed on the screen\n self._screen.blit(heading, (sw/2-l1w/2, sh * 0.2))\n self._screen.blit(subheading, (sw/2-l3w/2, sh * 0.8))\n \n # Creating Save and Record Again option with boxes around them\n i = 0\n for item in self._options:\n x = 0.1 * (sw * 0.5) + (sw * 0.5 * i)\n pygame.gfxdraw.rectangle(self._screen, (x, sh *0.5, 0.8 *sw * 0.5, 0.2*sh), (250, 227, 137))\n subheader_string = self._options[i]\n subheader = self._render_text(subheader_string, self._medium_font)\n lw, lh = subheader.get_size()\n self._screen.blit(subheader, (x + (0.4 *(sw*0.5))-lw/2, 0.6*sh - lh/2))\n if i != self._current_option:\n pygame.gfxdraw.rectangle(self._screen, (x, sh *0.5, 0.4*sw, 0.2*sh), self._fgcolor)\n i = i + 1\n pygame.display.update()", "def inputalarm():\r\n window = tk.Toplevel()\r\n window.title(\"Warning\")\r\n\r\n central = tk.Label(window,\r\n text=\"Please enter content in the text boxes. The more information you provide to your \"\r\n \"comments the better later indexing will be.\",\r\n wraplength=200)\r\n central.pack()", "def showMessage(self, msg):\n msgBox = QMessageBox()\n msgBox.setText(msg)\n #msgBox.setInformativeText(\"Do you want to save your changes?\")\n #msgBox.setStandardButtons(QMessageBox::Save | QMessageBox::Discard | QMessageBox::Cancel);\n #msgBox.setDefaultButton(QMessageBox::Save);\n ret = msgBox.exec();", "def showMenu():\n print( \"1. Create New User\" )\n print( \"2. Authorize\" )\n print( \"3. Send SMS\" )\n print( \"4. Send Email\" )\n print( \"5. Get Recently Sent Message\" )\n print( \"6. Exit\" )", "def ShowMessage(self, title=u\"\", message=u\"\", msgType=INFOBAR_INFO):\n self.Title.SetLabel(title)\n self.Message.SetLabel(message)\n self.MessageType = msgType\n self.Show(True)", "def info(text, window=None):\n message(text, u'Informação', M_INFO, B_OK, window)", "def delayDisplay(self,message,msec=200):\n print(message)\n self.info = qt.QDialog()\n self.infoLayout = qt.QVBoxLayout()\n self.info.setLayout(self.infoLayout)\n self.label = qt.QLabel(message,self.info)\n self.infoLayout.addWidget(self.label)\n qt.QTimer.singleShot(msec, self.info.close)\n self.info.exec_()" ]
[ "0.74672174", "0.740839", "0.7318725", "0.7254243", "0.72349834", "0.71429914", "0.6881725", "0.6847033", "0.6815938", "0.6629776", "0.6621973", "0.66084623", "0.6538907", "0.652988", "0.6500956", "0.6434533", "0.64031684", "0.6329774", "0.63157547", "0.630548", "0.62997496", "0.6296191", "0.6293993", "0.62727886", "0.6271266", "0.62703264", "0.62658626", "0.62625295", "0.6250218", "0.62329245", "0.6220311", "0.6216499", "0.62125856", "0.6195084", "0.6190273", "0.61831456", "0.617778", "0.6170455", "0.61563224", "0.6154009", "0.615317", "0.61479175", "0.61266696", "0.61205953", "0.61205137", "0.6114695", "0.6092941", "0.6088264", "0.6085197", "0.60850495", "0.6081693", "0.6059746", "0.60571796", "0.60568047", "0.60542864", "0.6052148", "0.6040651", "0.60374653", "0.60348356", "0.60288763", "0.60175955", "0.6015552", "0.6012755", "0.60118604", "0.59977454", "0.59917635", "0.5988504", "0.59649235", "0.595826", "0.59481585", "0.5930267", "0.5930267", "0.59254915", "0.5922902", "0.5921125", "0.5915934", "0.5912442", "0.5898732", "0.58987254", "0.58909905", "0.58882314", "0.58834666", "0.5881122", "0.58807224", "0.5876168", "0.5874529", "0.5874192", "0.5870364", "0.5869803", "0.58664465", "0.5858122", "0.5858122", "0.58548313", "0.58513784", "0.5846632", "0.5841294", "0.5841043", "0.5839051", "0.5836192", "0.5826512" ]
0.7462986
1
Ensure we don't include the same file twice.
def test_unique_keplerids(): total_count = query_one("SELECT COUNT(*) FROM tpf;") filename_count = query_one("SELECT COUNT(DISTINCT filename) FROM tpf;") assert total_count == filename_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_file_conflict(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\")\n self.sync_all()\n\n self.write_file(dir0, \"foo\", \"bar\")\n time.sleep(0.1)\n self.write_file(dir1, \"foo\", \"baz\")\n self.sync_all()\n # File with later mtime wins\n self.assertFile(dir0, \"foo\", \"baz\")\n self.assertFile(dir1, \"foo\", \"baz\")", "def test_dupe_imports(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_data_test()", "def _verify_includes(self):\n files_seen = {}\n for node in self.ast_list:\n # Ignore #include <> files. Only handle #include \"\".\n # Assume that <> are used for only basic C/C++ headers.\n if isinstance(node, ast.Include) and not node.system:\n module = self._get_module(node)\n filename = module.normalized_filename\n\n normalized_filename = module.normalized_filename\n\n if is_cpp_file(filename):\n self._add_warning(\n \"should not #include C++ source file '{}'\".format(\n node.filename),\n node)\n\n if normalized_filename == self.normalized_filename:\n self._add_warning(\n \"'{}' #includes itself\".format(node.filename),\n node)\n\n if normalized_filename in files_seen:\n include_node = files_seen[normalized_filename]\n line_num = get_line_number(self.metrics, include_node)\n self._add_warning(\n \"'{}' already #included on line {}\".format(\n node.filename,\n line_num),\n node)\n\n files_seen[normalized_filename] = node", "def skip_require():\n global ignore_once\n ignore_once = True", "def test_dupe_imports(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # Now let's do some db sanity checks.\r\n self._delicious_xml_data_test()", "def testDuplicateFiles(self):\n\n INPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 foo/../file1_1.cc\nFILE 2 bar/../file1_1.cc\nFILE 3 baz/../file1_1.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 2\n1008 4 46 3\n100c 4 44 1\n\"\"\"\n EXPECTED_OUTPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 file1_1.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 1\n1008 4 46 1\n100c 4 44 1\n\"\"\"\n self.assertParsed(INPUT, [], EXPECTED_OUTPUT)", "def check_duplicates(self, file_path):\n\t\tif not file_path:\n\t\t\treturn file_path\n\t\tif not self.settings.get('deduplicate_files', True):\n\t\t\t# Deduplication disabled.\n\t\t\treturn file_path\n\t\twas_new, existing_path = hashjar.add_hash(file_path) # Check if the file exists already.\n\t\tif not was_new:\n\t\t\tprint(\"\\tFile already exists! Resolving...\")\n\t\t\t# Quick and dirty comparison, assumes larger filesize means better quality.\n\t\t\tif os.path.isfile(file_path) and os.path.isfile(existing_path):\n\t\t\t\tif os.path.getsize(file_path) > os.path.getsize(existing_path):\n\t\t\t\t\tprint('\\t\\tNew file was better quality. Removing old file.')\n\t\t\t\t\tos.remove(existing_path)\n\t\t\t\t\tfor ele in self.loader.get_elements_for_file(existing_path):\n\t\t\t\t\t\tele.remap_file(existing_path, file_path)\n\t\t\t\t\treturn file_path\n\t\t\t\telse:\n\t\t\t\t\tprint(\"\\tOld file was better quality, removing newer file.\")\n\t\t\t\t\tos.remove(file_path)\n\t\t\t\t\treturn existing_path\n\t\treturn file_path", "def _verify_include_files_used(self, file_uses, included_files):\n for include_file, use in file_uses.items():\n if not use & USES_DECLARATION:\n node, module = included_files[include_file]\n if module.ast_list is not None:\n msg = \"'{}' does not need to be #included\".format(\n node.filename)\n if use & USES_REFERENCE:\n msg += '; use a forward declaration instead'\n self._add_warning(msg, node)", "def include_file(self, filename):\n # Only include Python files for now.\n if filename[-3:] == '.py':\n return True\n return False", "def test_file_dir_conflict(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\")\n self.write_file(dir1, \"foo/bar\", \"baz\")\n self.sync_all()\n # Directory wins. File is deleted in dir0\n self.assertFile(dir0, \"foo/bar\", \"baz\")\n self.assertFile(dir1, \"foo/bar\", \"baz\")", "def should_be_included(self):\n return True", "def test_add_quote_but_file_contains_quote_already(self):\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes1.txt\")\n quote = api.Quote(\" This is an added quote.\", \"Another author\", \"Publication\", [\"tag1, tag2\"])\n api.add_quote(path, quote)\n\n with self.assertRaisesRegexp(Exception, re.escape(\n 'the quote \"This is an added quote.\" is already in the quote file {0}.'.format(path))):\n api.add_quote(path, quote)", "def test_conf_contain_only_include_file(self):\n\n # dummy configuration for include file 1\n conf = {\n 'runners': {\n 'inline': {\n 'local_tmp_dir': \"include_file1_local_tmp_dir\"\n }\n }\n }\n\n include_file_1 = self.save_conf('include_file_1', conf)\n\n # dummy configuration for include file 2\n conf = {\n 'runners': {\n 'inline': {\n 'local_tmp_dir': \"include_file2_local_tmp_dir\"\n }\n }\n }\n\n include_file_2 = self.save_conf('include_file_2', conf)\n\n # test configuration\n conf = {\n 'include': [include_file_1, include_file_2]\n }\n path = self.save_conf('twoincludefiles', conf)\n\n stderr = StringIO()\n with no_handlers_for_logger():\n log_to_stream('mrjob.conf', stderr)\n InlineMRJobRunner(conf_paths=[path])\n self.assertEqual(\n \"\",\n stderr.getvalue())", "def same_file(wavecar1, wavecar2, wavecar3):\n same = False\n if (filecmp.cmp(wavecar1, wavecar2, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar1, wavecar2))\n same = True\n if (filecmp.cmp(wavecar1, wavecar3, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar1, wavecar3))\n same = True\n if (filecmp.cmp(wavecar2, wavecar3, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar2, wavecar3))\n same = True\n\n if same:\n print(\"It seems that you are using same files to do finite difference, exit\")\n print(\"\\tComment the 'same_file' checker if you know what you are doing\")\n raise SystemExit", "def copy_file_check(self):\n pass", "def test_duplicate_with_url(self):\r\n # Load up base course and verify it is available\r\n call_command('import', self.content_dir, self.good_dir)\r\n store = modulestore()\r\n self.assertIsNotNone(store.get_course(self.BASE_COURSE_KEY))\r\n\r\n # Now load up duped course and verify it doesn't load\r\n call_command('import', self.content_dir, self.dupe_dir)\r\n self.assertIsNone(store.get_course(self.DIFF_KEY))", "def _check_file_not_used(self):\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(\n set(self._get_xml_referenced_files())\n )\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [\n f for f in (module_files - referenced_files)\n if f.split(os.path.sep)[0] not in excluded_dirs\n ]\n self.msg_args = no_referenced_files\n return not no_referenced_files", "def is_already_ignored(file_rel, ignore_filename):\n with open(ignore_filename) as ignore_file:\n for line in ignore_file:\n l = line.strip()\n if l == file_rel:\n return True\n return False", "def testSingleFile(self):\n env = self.env\n\n # If only one concat-able source file is present, passes through\n cs = env.ConcatSource('foo1.cc', ['a.cc'])\n self.assertEqual(map(str, cs), ['a.cc'])", "def pragma_once(ctx: PresubmitContext) -> None:\n\n for path in ctx.paths:\n with open(path) as file:\n for line in file:\n if line.startswith('#pragma once'):\n break\n else:\n raise PresubmitFailure('#pragma once is missing!', path=path)", "def checkExisting(self, dst):\n if dst.exists():\n msg = 'Refusing to clobber existing file \"%s\"' % (\n dst.path,)\n logging.msg(msg)\n raise errors.NoClobber(msg)", "def add_included_file(self, includedfile):\n assert includedfile.get_file().get_module() == self._othermodule\n if not includedfile.get_including_file().is_test_file():\n self._is_test_only_dependency = False\n self._includedfiles.append(includedfile)", "def test_oldincludedir(self):\n self.chck_triple('oldincludedir')", "def test_filter_file_exceptions_early_dupes():\n exceptions = Exceptions(os.path.join(os.path.dirname(__file__),\n 'early_exceptions.yaml'))\n\n package = Package('test', os.path.dirname(__file__))\n files = [os.path.join(os.path.dirname(__file__),\n 'unlikelystring'),\n os.path.join(os.path.dirname(__file__),\n 'unlikelystring')]\n\n filtered_files = exceptions.filter_file_exceptions_early(package, files)\n\n assert not filtered_files", "def _generate_header_files(self):\n return True", "def test_multiple_calls_no_duplicates(self):\n # Given I have Romaine's core\n from tests.common import romaine\n core = romaine.Core()\n\n # When I locate features in /tmp/romaine_tests/features\n core.locate_features('/tmp/romaine_tests/features')\n # And I locate features in /tmp/romaine_tests/features\n core.locate_features('/tmp/romaine_tests/features')\n\n # Then the core's feature_paths_list variable contains no duplicates\n feature_file_paths = list(core.feature_file_paths)\n for item in feature_file_paths:\n self.assertEqual(\n feature_file_paths.count(item),\n 1,\n )", "def test_merge(self):\r\n filename = os.path.join(CONFIGURATION.source_messages_dir, random_name())\r\n generate.merge(CONFIGURATION.source_locale, target=filename)\r\n self.assertTrue(os.path.exists(filename))\r\n os.remove(filename)", "def test_css_bottom_files_belong(self):\n top, std, bottom = heavy_lifting.organize_css_files(self.fake_file_list)\n for fle in bottom:\n self.assertIn(os.path.basename(fle), list_css_bottom_files())", "def test_import_order():\n file_paths = glob.iglob('*/*.py')\n for file_path in file_paths:\n with open(file_path, 'r') as file_obj:\n file_contents = file_obj.read()\n new_file_contents = isort.code(file_contents)\n fail_msg = '{} imports are not compliant'.format(\n file_path)\n yield case.assertEqual, new_file_contents, file_contents, fail_msg", "def validate_include(self, included):\n if \"file\" not in included:\n raise AssertionError(\"Missing file in include statement\")\n else:\n path = Path(included[\"file\"])\n if path.exists():\n with path.open() as file:\n data = json.load(file)\n if \"include\" in data:\n data = self.validate_include(data)\n if \"remove\" in included: # removed\n for key, value in included[\"remove\"].items():\n self.remove(data, key, value)\n if \"add\" in included:\n for key, value in included[\n \"add\"\n ].items(): # needed to remove cloned data\n self.remove(data, key, value)\n data = merge(data, included[\"add\"])\n\n return data\n else:\n raise FileNotFoundError(f\"File {path.absolute()} doesn't exists.\")", "def test_file_update_delete_conflict(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\", \"bar\")\n self.sync_all()\n\n self.write_file(dir0, \"foo\", \"baz\")\n self.delete_file(dir1, \"foo\")\n self.sync_all()\n self.assertFileAbsent(dir0, \"foo\")\n self.assertFileAbsent(dir1, \"foo\")", "def test_includedir(self):\n self.chck_triple('includedir')", "def remove_dupes(infile):\n filename = infile.replace('.csv', '-unique.csv')\n s = set()\n with open(filename, 'w') as outfile:\n for line in open(infile):\n if line not in s:\n outfile.write(line)\n s.add(line)", "def __call__( self, fn, *args, **kw ):\n\n # some basic sanity check first \n if isinstance(fn, str) and len(fn) == 0:\n raise IncludeError(\"can not 'include' empty filenames\")\n \n # don't include file if not allowed (has to be exact match in py code)\n if fn in self._once:\n self.msg.debug( 'file \"%s\" is blocked; not included', fn )\n return\n\n # locate the file\n name = FindFile( os.path.expanduser( os.path.expandvars( fn ) ), optionsPath, os.R_OK )\n if not name:\n name = FindFile( os.path.basename( fn ), optionsPath, os.R_OK )\n if name:\n self.msg.warning( 'using %s instead of %s', name, fn )\n else:\n raise IncludeError( 'include file %s can not be found' % fn )\n\n self.msg.debug( 'located %s as %s', fn, name )\n\n # print if 'show' is set to non-null\n show = self._show\n if 'show' in kw:\n show = kw[ 'show' ]\n\n # notify of start of file inclusion\n if show:\n self.msg.info( 'including file \"%s\" with ID %d', fn, self.fid )\n else:\n self.msg.info( 'including file \"%s\"', fn )\n self._fcurrent = name\n\n # actual inclusion\n if show and self._doTrace( name ):\n # traced\n _filecache[ name ] = open( name, 'r' ).readlines()\n _linecache[ name ] = 0, self.fid\n self.fid += 1\n\n from past.builtins import execfile\n sys.settrace( self._trace_include )\n execfile( name, self._workspace, self._workspace )\n sys.settrace( sys._getframe(0).f_trace )\n\n # finish file printout\n ncur, fid = _linecache[ name ]\n buf = _filecache[ name ]\n for i in range( ncur, len(buf) ):\n self._oneline( fid, i, silentMarker, buf )\n\n del _filecache[ name ]\n del _linecache[ name ]\n\n self.msg.info( 'end of \"%s\"', fn )\n\n else:\n # non-traced\n #execfile( name, self._workspace, self._workspace )\n exec(compile(open(name).read(), name, 'exec'), self._workspace, self._workspace)\n \n\n if hasattr( self, '_collect' ):\n if not self._collect % 10:\n import gc\n gc.collect()\n else:\n self._collect += 1", "def test_component_resolution_same_file_err():\n\n with pytest.raises(InterpStackTrace) as exc_info:\n snippet_eval(ComponentSnippet(modulea.ComponentResolutionSameFileErr()))\n assert 'DefinitelyNotExistingComponent' in str(exc_info.value)", "def fix_include_statement(line, header_map):\n name_to_include = find_include_statement(line)\n if name_to_include:\n header_folders = header_map[name_to_include]\n if len(header_folders) == 1: # require that file exist only in single folder\n str = '#include <{0}/{1}>'.format(header_folders[0], name_to_include)\n print(line)\n print(str)\n print(\"---\")\n return str\n return line", "def includeme(config):", "def duplicate_file():\n file = TEST_CONTENT_REPO / PACKS_DIR / \"Sample01\" / TEST_PLAYBOOKS_DIR / \"playbook-sample_test1.yml\"\n new_file = TEST_CONTENT_REPO / PACKS_DIR / \"Sample02\" / TEST_PLAYBOOKS_DIR / \"playbook-sample_test1.yml\"\n try:\n copyfile(file, new_file)\n yield\n finally:\n new_file.unlink()", "def Ignore(self, relative_file):\n return False", "def testIgnoredPrefixesDuplicateFiles(self):\n\n INPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 /src/build/foo/../file1_1.cc\nFILE 2 /src/build/bar/../file1_2.cc\nFILE 3 D:/src/build2/baz/../file1_2.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 2\n1008 4 46 3\n100c 4 44 1\n\"\"\"\n EXPECTED_OUTPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 file1_1.cc\nFILE 2 file1_2.cc\nFILE 3 file1_2.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 2\n1008 4 46 3\n100c 4 44 1\n\"\"\"\n IGNORED_PREFIXES = ['\\\\src\\\\build\\\\', 'D:\\\\src\\\\build2\\\\']\n self.assertParsed(INPUT, IGNORED_PREFIXES, EXPECTED_OUTPUT)", "def test_differ_times_one_file(generate_differ_times_one_file):\n fname = generate_differ_times_one_file\n with pytest.raises(Exception):\n process_files([fname])", "def samefile(self, other):\n other = os.fspath(other)\n if not isabs(other):\n other = abspath(other)\n if self == other:\n return True\n if not hasattr(os.path, \"samefile\"):\n return False\n return error.checked_call(os.path.samefile, self.strpath, other)", "def place_files_in_duplicates_directory(self, files: Set[str]) -> None:\n for f in files:\n src = os.path.join(self.get_directory(), f)\n dst = os.path.join(self.get_directory(), \"duplicates\", f)\n os.replace(src, dst)", "def test_strain_not_in_two_files(generate_no_strain_one_file):\n fname = generate_no_strain_one_file\n with pytest.raises(Exception):\n process_files([fname, fname])", "def test_check_header_dups(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = [\r\n 'SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n errors = []\r\n\r\n errors = check_header_dups(header, errors)\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should give errors with dups\r\n header = [\r\n 'SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix', 'run_prefix',\r\n 'Description']\r\n errors = []\r\n\r\n errors = check_header_dups(header, errors)\r\n\r\n expected_errors = [\r\n 'run_prefix found in header 2 times. Header fields must be unique.\\t0,3',\r\n 'run_prefix found in header 2 times. Header fields must be unique.\\t0,4']\r\n\r\n self.assertEqual(errors, expected_errors)", "def validate_no_duplicate_paths(self, resources):\r\n paths = set()\r\n for item in resources:\r\n file_name = item.get('path')\r\n if file_name in paths:\r\n raise ValueError(\r\n '%s path was specified more than once in the metadata' %\r\n file_name)\r\n paths.add(file_name)", "def test__remove_excl_file_2(self):\n rsync = RsyncMethod(self.settings, self.meta, self.log, self.comms, False)\n self.assertEqual(rsync.exclude_file, os.path.join(os.environ['HOME'],\"test_myocp\",\"myocp_excl\"))\n rsync.exclude_file = os.path.join(os.environ['HOME'],\"temp/myocp_excl\")\n with open(rsync.exclude_file, 'w') as fp:\n fp.write('{}')\n rsync._remove_exclude_file()\n self.assertFalse(os.path.exists(rsync.exclude_file))\n self.assertEqual(self.log.getVal('info').split('|')[0], 'Settings file loaded.')\n self.assertEqual(self.log.getVal('info').split('|')[1], 'Settings file verified.')\n #self.assertEqual(self.log.getVal('info').split('|')[2], 'rsync exclusions file removed.')", "def _verify_archive_equality(self, file1, file2):\r\n temp_dir_1 = mkdtemp()\r\n temp_dir_2 = mkdtemp()\r\n try:\r\n extract_source(file1, temp_dir_1)\r\n extract_source(file2, temp_dir_2)\r\n return directories_equal(temp_dir_1, temp_dir_2)\r\n\r\n finally:\r\n shutil.rmtree(temp_dir_1)\r\n shutil.rmtree(temp_dir_2)", "def test_file_empty_dir_conflict(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\")\n self.write_dir(dir1, \"foo\")\n self.sync_all()\n # Directory wins. File is deleted in dir0\n self.assertDirPresent(dir0, \"foo\")\n self.assertDirPresent(dir1, \"foo\")", "def check_one(filename):\n\n # The file may have been removed from the filesystem.\n # ===================================================\n\n if not isfile(filename):\n if filename in mtimes:\n sys.exit(1) # trigger restart\n else:\n # We haven't seen the file before. It has probably been loaded \n # from a zip (egg) archive.\n return\n\n\n # Or not, in which case, check the mod time.\n # ==========================================\n\n mtime = os.stat(filename).st_mtime\n if filename not in mtimes: # first time we've seen it\n mtimes[filename] = mtime\n if mtime > mtimes[filename]:\n sys.exit(1) # trigger restart", "def samefile(path1, path2):\n try:\n return os.path.samefile(path1, path2)\n except OSError as err:\n if err.errno == 2: # ENOENT\n return False\n else:\n raise", "def hash_check_files(self):\n temp_error = 0\n if not self.hash_log_curr:\n self.hash_log_curr = self.hash_curr_files\n else:\n for key, value in self.hash_curr_files.iteritems():\n if key in self.hash_log_curr:\n #test for valid hash\n if self.valid is not None:\n #test any valid hahses are given\n if key in self.valid:\n # a hash code that is ok to duplicate\n self.print_to_log('Valid Duplicate HashCode, skipping: ' + value[5])\n self.hash_log_curr[key][3] = str(int(self.hash_log_curr[key][3]) + 1)\n self.hash_log_curr[key][4] = value[4]\n continue\n # not valid duplicate hash\n # a dupulicate hash found which is a failure and should abort import\n self.hash_log_curr[key][0] = 'Fail'\n self.hash_log_curr[key][3] = str(int(self.hash_log_curr[key][3]) + 1)\n self.hash_log_curr[key][4] = value[4]\n self.hash_log_curr[key][5] += ', ' + value[5]\n self.print_to_log('Duplicate hash found for file: ' + value[5])\n temp_error = 1\n else:\n #a new hash, no issues\n self.hash_log_curr[key] = value\n self.print_to_log('New Hash for file: ' + value[5])\n self.error = temp_error", "def checkConflicts(self):\n\t\treturn", "def check_cython_includes(filename, includes):\n from os.path import exists, isfile, join\n for directory in includes:\n path = join(directory, filename) + \".pxd\"\n if exists(path) and isfile(path):\n return path\n path = join(directory, *filename.split('.')) + \".pxd\"\n if exists(path) and isfile(path):\n return path", "def test_sharedstatedir(self):\n self.chck_triple('sharedstatedir')", "def test_differ_times_two_files(generate_differ_times_two_files):\n fname = generate_differ_times_two_files\n with pytest.raises(Exception):\n process_files([fname[0], fname[1]])", "def _check_version_conflict(self, namespace, filename):\n try:\n collection = models.Collection.objects.get(\n namespace=namespace, name=filename.name)\n collection.versions.get(version=filename.version)\n except dj_exc.ObjectDoesNotExist:\n pass\n else:\n raise CollectionExistsError(\n f'Collection \"{filename.namespace}-{filename.name}'\n f'-{filename.version}\" already exists.')", "def try_include(filename):\n print_debug(f\"including {filename} {RELEASE_ENV}\")\n try:\n with open(filename) as f:\n exec(compile(f.read(), filename, \"exec\"), globals())\n\n print_debug(f\"loaded additional settings file '{filename}'\")\n\n except FileNotFoundError:\n print_debug(f\"additional settings file '{filename}' was not found, skipping\")", "def test_load_missing_file(self):\n # Technically there's a race condition here, but... I'm not\n # particularly fussed about it.\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n track = Track.from_filename(filename)", "def test_file_integrity_remove_file_in_case_of_fail():\n test_file = open('./testfile.tmp', 'a')\n test_file.close()\n test_file_path = os.path.realpath('./testfile.tmp')\n test_file_md5 = hashlib.md5(open(test_file_path, 'rb').read()).hexdigest()\n\n bad_md5 = 'some_noise_%s' % test_file_md5\n\n PackageDownloadHelper.check_file_integrity(test_file_path, bad_md5)\n\n assert not os.path.isfile(test_file_path)", "def test_component_resolution_same_file():\n\n assert snippet_eval(ComponentSnippet(modulea.ComponentResolutionSameFile())) == \"hi\\n\"", "def standard_include(infiles):\n if infiles:\n definitions = { }\n for infile in infiles:\n if path_exists(infile):\n with open(infile, 'r') as infile_file:\n infile_json = json_load(infile_file)\n definitions = merge_dictionaries(infile_json, definitions)\n else:\n LOG.error('Missing file: %s', infile)\n return JsonAsset(definitions=definitions)\n else:\n return JsonAsset()\n return None", "def test_partial_twice_dependent_object_import(self):\n pass", "def updateIncludeFiles(self):\n for filename, filetype in self._get_include_files():\n lines = open(filename).readlines()\n found_version_line = False\n\n if self.Verbose:\n print 'Reading %s' % filename\n \n if filetype is 'PyRex':\n lines, write_out = self._update_pyrex_file(lines, filename)\n elif filetype is 'Header':\n lines, write_out = self._update_header_file(lines, filename)\n else:\n raise TypeError, \"Unknown include file type %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)", "def _applyIncludes(self, origfile, _file = file):\n opt = \"include_config\"\n try:\n try:\n includes = self._config.get(\"general\", opt, raw = True).strip()\n except ConfigParser.NoOptionError:\n opt = \"include-config\"\n includes = self._config.get(\"general\", opt, raw = True).strip()\n except ConfigParser.NoOptionError:\n # don't even ignore\n pass\n else:\n self._config.remove_option(\"general\", opt)\n if not len(includes):\n return\n\n origpath = os.path.dirname(os.path.abspath(origfile))\n includes = [\n util.filename.toLocale(\n config_file, self._charset_, self.runtime.path_encoding\n )\n for config_file in util.splitCommand(includes) if config_file\n ]\n\n for config_file in includes:\n try:\n config_fp = _file(os.path.join(origpath, config_file))\n self._config.readfp(config_fp, config_fp.name)\n config_fp.close()\n except IOError, exc:\n raise ConfigNotFoundError(\"%s: %s\" % (\n config_file, str(exc)\n ))", "def test_commonfs_truecase():\n f1 = tempfile.mkstemp()\n f2 = tempfile.mkstemp()\n rc1 = r.RemovalCandidate(f1[1])\n rc2 = r.RemovalCandidate(f2[1])\n assert r.commonfs([rc1,rc2])", "def _need_generate(paths):\r\n if not os.path.exists(paths.generated_dir):\r\n return True\r\n\r\n if not os.path.exists(paths.index_file):\r\n return True\r\n\r\n # Use the index file to determine if regeneration is necessary\r\n with open(paths.index_file, 'r',newline='\\n') as index_file:\r\n indexed = [item for item in\r\n index_file.read().split('\\n') if len(item) != 0 and\r\n not item.startswith(\"#\")]\r\n return indexed != paths.resource_files", "def test_css_top_files_belong(self):\n top, std, bottom = heavy_lifting.organize_css_files(self.fake_file_list)\n for fle in top:\n self.assertIn(os.path.basename(fle), list_css_top_files())", "def shouldhave(self, thisfile):\n if not os.path.isfile(thisfile):\n self.logtxt(\"ERROR: expected file (%s/%s) does not exist!\" %\n (os.getcwd(), thisfile), 'error')", "def test_insert_file_tag(self): \n content = \"Here is an included file: <toplevelcontent> {% insert_file public_html/fakeinclude.html %}</toplevelcontent>\" \n insertfiletagpage = create_page_in_admin(self.testproject,\"testincludefiletagpage\",content)\n \n response = self._test_page_can_be_viewed(self.signedup_user,insertfiletagpage)\n \n \n # Extract rendered content from included file, see if it has been rendered\n # In the correct way\n somecss = find_text_between('<somecss>','</somecss>',response.content)\n nonexistant = find_text_between('<nonexistant>','</nonexistant>',response.content)\n scary = find_text_between('<scary>','</scary>',response.content)\n \n self.assertTrue(somecss != \"\",\"Nothing was rendered when including an existing file. Some css should be here\")\n self.assertTrue(nonexistant != \"\",\"Nothing was rendered when including an existing file. Some css should be here\")\n self.assertTrue(scary != \"\",\"Nothing was rendered when trying to go up the directory tree with ../ At least some error should be printed\")\n \n self.assertTrue(\"body {width:300px;}\" in somecss,\"Did not find expected\"\n \" content 'body {width:300px;}' when including a test\"\n \" css file. Instead found '%s'\" % somecss)\n self.assertTrue(\"Error including file\" in nonexistant,\"Expected a\"\n \" message 'Error including file' when including \"\n \"non-existant file. Instead found '%s'\" % nonexistant)\n self.assertTrue(\"Error including file\" in scary ,\n \"Expected a message 'Error including file' when trying to include filepath with ../\"\n \" in it. Instead found '%s'\" %scary)", "def test_twice_dependent_object_import(self):\n pass", "def _include_file(self, root_parts, f):\n if len(root_parts) and root_parts[0] == \"lwc\":\n # only include expected file extensions within lwc components\n return f.lower().endswith((\".js\", \".js-meta.xml\", \".html\", \".css\", \".svg\"))\n return True", "def test_do_not_ignore_empty_files(self):\n\n node_mock = MagicMock()\n node_mock.stream.return_value.__enter__.return_value.read.return_value.decode.return_value = ''\n with self.assertAddsMessages(pylint.testutils.Message(\n msg_id='invalid-file-header',\n line=1,\n args=self.EXPECTED_HEADER)):\n self.checker.process_module(node_mock)", "def removeDuplicateUrl(inputfile, outputfile):\n\t\n\tlines_seen = set()\n\toutfile = open(outputfile, \"w\")\n\tfor line in open(inputfile, \"r\"):\n \t\tif line not in lines_seen:\n\t\t\toutfileput.write(line)\n\t\t\tlines_seen.add(line)\n\n\toutputfile.close()", "def test_identical(self):\n write this test!", "def test_unique(self):\n leading_digits = re.compile(r'^\\d+')\n seen_numbers = set()\n path = self._migrations_path()\n for filename in listdir(path):\n match = leading_digits.match(filename)\n if match:\n number = match.group()\n if number in seen_numbers:\n self.fail('There is more than one migration #%s in %s.' %\n (number, path))\n seen_numbers.add(number)", "def test_stress_not_in_two_files(generate_no_stress_one_file):\n fname = generate_no_stress_one_file\n with pytest.raises(Exception):\n process_files([fname, fname])", "def duplicated_line(filename):\n duplicate=0\n with open(filename,encoding=\"utf-8\",errors='ignore') as f:\n scripts=f.readlines()\n #Removes whitespace and blank line \n scripty = filter(lambda x: not re.match(r'^\\s*$', x), scripts)\n #Removes Comments\n script = filter(lambda x: not re.match(r'(?m)^ *#.*\\n?', x), scripty)\n script=list(script)\n with open(filename,encoding=\"utf8\",errors='ignore') as f:\n files=f.readlines()\n #Removes whitespace and blank line \n filey = filter(lambda x: not re.match(r'^\\s*$', x), files)\n #Removes Comments\n file = filter(lambda x: not re.match(r'(?m)^ *#.*\\n?', x), filey)\n file=list(file)\n for cnt, line in enumerate(file):\n if cnt <= len(file)-4:\n for i,item in enumerate(script):\n #Dont compare with that same line and the next 3 line, and don't compare with the last 3 lines\n if cnt != i and i!=cnt+1 and i!=cnt+2 and i!=cnt+3 and i<= len(script)-4 :\n if line == item and file[cnt+1]==script[i+1] and file[cnt+2]==script[i+2] and file[cnt+3]==script[i+3]:\n duplicate+=4\n #delete the duplicates in file and script\n del file[i:i+4]\n del script[i:i+4]\n\n return duplicate", "def _compare_file(path1, path2):\n\n try:\n return _open_file(path1) == _open_file(path2)\n except OSError:\n return False", "def compare_files(fp1, fp2):\n\n line1 = fp1.readline()\n line2 = fp2.readline()\n\n while line1 and line2:\n if line1.startswith('#') and line2.startswith('#'):\n pass\n elif not line1 == line2:\n return False\n \n line1 = fp1.readline()\n line2 = fp2.readline()\n\n if line1 or line2:\n return False\n\n return True", "def no_overwrite_example():", "def test_provider_system_hook_file_shred(change_dir, clean_files):\n files = ['stuff', 'thing', 'foo']\n for f in files:\n file = open(f, \"w\")\n file.write(f)\n file.close()\n\n tackle('.', no_input=True, context_file='shred.yaml')\n\n for f in files:\n assert not os.path.isfile(f)", "def test_cleanup_html_lookup_file():\n # there are 4 duplicate entries, all having to do with stroke\n expected_lines = 63\n\n #original location\n test_filename = 'test/fixture/html_lookup_file.txt'\n #create a copy to maintain integrity of file\n test_filename_working = 'test/fixture/html_lookup_file_copy.txt'\n copyfile(test_filename, test_filename_working)\n\n dedup_medfind.cleanup_html_lookup_file(test_filename_working)\n # no return value, we must open and read the file to assert\n with open(test_filename_working, 'r', encoding='utf-8') as fs:\n lines = fs.readlines()\n # delete our working copy before performing asserts\n os.remove(test_filename_working)\n # finally, we do our asserts based upon the test file we create\n for line in lines:\n print(line.strip())\n assert expected_lines == len(lines)", "def _abort_on_conflicting_untracked_paths(self) -> None:\n repo = get_git_repo()\n\n if not repo or self._base_commit is None:\n return\n\n changed_paths = set(\n self._status.added\n + self._status.modified\n + self._status.removed\n + self._status.unmerged\n )\n untracked_paths = {\n self._fname_to_path(repo, str(path))\n for path in (self._dirty_paths_by_status.get(StatusCode.Untracked, []))\n }\n overlapping_paths = untracked_paths & changed_paths\n\n if overlapping_paths:\n raise ActionFailure(\n \"Some paths that changed since the baseline commit now show up as untracked files. \"\n f\"Please commit or stash your untracked changes in these paths: {overlapping_paths}.\"\n )", "def remove_duplicates(file):\n file_tmp = 'tmp'\n with open(file) as f, open(file_tmp, 'w') as o:\n for line in unique_everseen(f):\n o.write(line)\n # rename file_tmp to file\n os.remove(file)\n os.rename(file_tmp, file)", "def check_duplicated_data(self, path, target):\n files_in_path = [file for file in self.get_csv_in_path(path)]\n print(\"check duplicated for file {} in path {} , files\".format(target, path))\n if target in files_in_path:\n print('The {} is already exist'.format(target))\n return True\n return False", "def insignificant(path):\n\n # This part is simply an implementation detail for the code base that the\n # script was developed against. Ideally this would be moved out to a config\n # file.\n return path.endswith('Dll.H') or path.endswith('Forward.H') or \\\n path.endswith('templates.H')", "def check_duplicate(fp1, fp2):\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False", "def check_one_file(filein, observations, hash, update, conf, errors):\n try:\n import boto.s3.key\n except ImportError:\n pass\n\n # set up nap here so we don't have to pass conf further down,\n # makes it eaiser to unit test\n nap = NapContext(conf.data['sleepiness'])\n\n # test what type got passed in to :filein:\n filename = \"\"\n # we don't know if boto will be installed, must be better way\n # to detect if `filein` a string (ahem, unicode thingy) or an\n # AWS key than the try/except here\n s3 = False\n # do I have a local filesystem path or s3 bucket key?\n if isinstance(filein, six.string_types):\n filename = os.path.abspath(filein)\n try:\n if type(filein) is boto.s3.key.Key:\n s3 = True\n filename = 's3://{0}/{1}'.format(filein.bucket.name,\n filein.name)\n except NameError:\n pass\n\n if conf.app.ignore_re and re.match(conf.app.ignore_re, filename):\n logging.debug('skipped {0}'.format(filename))\n return\n\n # normalize filename, take hash for key\n filename_key = hashlib.sha224(filename.encode('utf-8')).hexdigest()\n logging.info('{0}'.format(filename))\n logging.debug('sha224 of path {0}'.format(filename_key))\n\n # dispatch, these are ripe for refactoring to take\n # a file object\n if s3:\n seen_now = analyze_s3_key(filein, hash, nap)\n else:\n seen_now = analyze_file(filename, hash, nap)\n\n logging.debug('seen_now {0}'.format(seen_now))\n\n # make sure things match\n if filename_key in observations and not update:\n news = {}\n looks_the_same = compare_sightings(\n seen_now, observations[filename_key], news\n )\n if not looks_the_same:\n track_error(filename, \"%r has changed\" % filename, errors)\n elif any(news):\n update = observations[filename_key]\n update.update(news)\n observations[filename_key] = update\n observations.sync()\n logging.debug('new memory {0}'.format(news))\n # update observations\n else:\n observations[filename_key] = seen_now\n observations.sync()\n logging.info('update observations')", "def check_all():\n for name, module in sorted(sys.modules.items()): # module files\n filepath = getattr(module, '__file__', None)\n if filepath is None:\n # we land here when a module is an attribute of another module\n # i.e., it exists twice in the sys.modules table, once as its\n # canonical representation, and again having been imported\n # within another module\n continue\n filepath = filepath.endswith(\".pyc\") and filepath[:-1] or filepath\n check_one(filepath)\n\n for filepath in extras: # additional files\n check_one(filepath)", "def _file_not_found(self, config_file):\n self._files_not_found.append(config_file)", "def test_warn_duplicate_label(self, caplog: pytest.LogCaptureFixture) -> None:\n with tempfile.NamedTemporaryFile(\"w\") as file:\n with open(self.EXAMPLE_YAML_FILE, \"r\", encoding=\"utf-8\") as existing:\n file.writelines(existing.readlines())\n with open(self.EXAMPLE_YAML_FILE, \"r\", encoding=\"utf-8\") as existing:\n file.writelines(existing.readlines())\n file.flush()\n _ = YAMLParser().parse(file.name)\n assert (\n \"cobib.parsers.yaml\",\n 30,\n \"An entry with label 'Cao_2019' was already encountered earlier on in the YAML file! \"\n \"Please check the file manually as this cannot be resolved automatically by coBib.\",\n ) in caplog.record_tuples", "def test__create_excl_file_1(self):\n rsync = RsyncMethod(self.settings, self.meta, self.log, self.comms, False)\n rsync.exclude_file = os.path.join(os.environ['HOME'],\"temp/myocp_excl\")\n rsync._create_exclude_file()\n self.assertTrue(os.path.exists(rsync.exclude_file))\n self.assertEqual(self.log.getVal('info').split('|')[0], 'Settings file loaded.')\n self.assertEqual(self.log.getVal('info').split('|')[1], 'Settings file verified.')\n #self.assertEqual(self.log.getVal('info').split('|')[2], 'rsync exclusions file created at %s/temp/myocp_excl' % os.environ['HOME'])\n with open(rsync.exclude_file, 'r') as fp:\n self.assertEqual(fp.read(), \".a\\n.b\\nc\\nd\")\n os.unlink(rsync.exclude_file)", "def test_self_write(self):\n self.assertFalse(os.path.exists(self.f1))\n self.assertFalse(os.path.exists(self.f2))\n self.sync.pickle_write()\n self.assertTrue(os.path.exists(self.f1))\n self.assertTrue(os.path.exists(self.f2))", "def fix_mangled_includes(ln):\n m = proginput_re.search(ln)\n if m:\n fn = m.group(1)\n dName = os.path.dirname(fn)\n fName = os.path.basename(fn)\n for fn2 in os.listdir(dName):\n if fn2.replace(\"_\",\"\") == fName:\n ln = \"\\\\programinput{\" + os.path.join(dName,fn2) + \"}\\n\"\n break\n return ln", "def validate_files(dir, files_to_merge):\r\n for path in files_to_merge:\r\n pathname = dir.joinpath(path)\r\n if not pathname.exists():\r\n raise Exception(\"I18N: Cannot generate because file not found: {0}\".format(pathname))", "def CheckIncludeLine(fn, filename, clean_lines, linenum, include_state, error):\n fn(filename, clean_lines, linenum, include_state,\n makeErrorFn(error, ['build/include_order', 'build/include_alpha', 'readability/streams'], []))", "def conditional_copy(asciitest_out_dir, doc_file):\n # path join uses backslash win32 which is not cmake compatible\n\n filename = save_cmake_filename(doc_file)\n\n filename1 = os.path.join(asciitest_out_dir, filename + \".temp\").replace(\"\\\\\",\"/\")\n filename2 = os.path.join(asciitest_out_dir, filename).replace(\"\\\\\",\"/\")\n\n update_if_different(filename1, filename2)", "def _check_import(isamAppliance, id, filepath):\n tmpdir = get_random_temp_dir()\n tmp_original_file = os.path.join(tmpdir, os.path.basename(\"tempfile.txt\"))\n\n export_file(isamAppliance, instance_id=id, filepath=tmp_original_file, check_mode=False, force=True)\n\n if files_same(tmp_original_file, filepath):\n logger.debug(\"files are the same, so we don't want to do anything\")\n shutil.rmtree(tmpdir)\n return False\n else:\n logger.debug(\"files are different, so we return True to indicate the new file should be imported\")\n shutil.rmtree(tmpdir)\n return True", "def fix_header_guard(path, header_guard):\n ifndef = re.compile('^#ifndef [^\\s]+_H_$')\n define = re.compile('^#define [^\\s]+_H_$')\n endif = re.compile('^#endif +// *[^\\s]+_H_$')\n fixed_ifndef = False\n fixed_define = False\n fixed_endif = False\n fixed_pragma_once = False\n\n for line in fileinput.input(path, inplace=1):\n (new_line, changes) = re.subn(ifndef, '#ifndef %s' % header_guard, line)\n if changes:\n fixed_ifndef = True\n sys.stdout.write(new_line)\n continue\n (new_line, changes) = re.subn(define, '#define %s' % header_guard, line)\n if changes:\n fixed_define = True\n sys.stdout.write(new_line)\n continue\n (new_line,\n changes) = re.subn(endif, '#endif // %s' % header_guard, line)\n if changes:\n fixed_endif = True\n sys.stdout.write(new_line)\n continue\n if pragma_once.match(line):\n fixed_pragma_once = True\n sys.stdout.write('#ifndef %s\\n' % header_guard)\n sys.stdout.write('#define %s\\n' % header_guard)\n continue\n sys.stdout.write(line)\n\n if fixed_pragma_once:\n with open(path, 'a') as file:\n file.write('\\n')\n file.write('#endif // %s\\n' % header_guard)\n\n if (fixed_ifndef and fixed_define and fixed_endif) or fixed_pragma_once:\n print('Fixed!')\n return True\n\n print('Not fixed...')\n return False", "def _is_valid_unique_fname(self, fname):\n return (fname.startswith(self._lockfilename)\n and len(fname) > len(self._lockfilename))" ]
[ "0.6422824", "0.6178515", "0.61354905", "0.6065656", "0.6057481", "0.6002684", "0.5994762", "0.5943255", "0.5886908", "0.5771777", "0.56517184", "0.5636331", "0.561987", "0.5594352", "0.5577625", "0.55584747", "0.5556574", "0.5552825", "0.5539634", "0.5462027", "0.5457787", "0.54530734", "0.54296017", "0.54276377", "0.54190814", "0.5406736", "0.5369177", "0.5338809", "0.5332449", "0.5328992", "0.5328907", "0.53221565", "0.53105056", "0.52953", "0.5293744", "0.52880436", "0.5276653", "0.5274434", "0.5273014", "0.52720964", "0.5265794", "0.52606", "0.5236658", "0.5235795", "0.52224654", "0.52203596", "0.52179205", "0.5216582", "0.5213779", "0.5212927", "0.52111894", "0.52015525", "0.5175038", "0.5157154", "0.51565397", "0.5151576", "0.5142984", "0.51306206", "0.51162314", "0.51144207", "0.5114338", "0.5112698", "0.5112259", "0.5110801", "0.51083195", "0.51057726", "0.51047665", "0.51009434", "0.509411", "0.50902224", "0.5078901", "0.50679207", "0.50665146", "0.5056457", "0.50558645", "0.5054547", "0.5050613", "0.5044247", "0.50380474", "0.5029905", "0.50290966", "0.5028559", "0.5025626", "0.5021532", "0.502005", "0.5019735", "0.50109327", "0.5005937", "0.5003939", "0.5001832", "0.49912128", "0.49890256", "0.49890155", "0.49888587", "0.49879515", "0.49833816", "0.49798962", "0.49748853", "0.49720314", "0.49708048", "0.49680096" ]
0.0
-1
Normalize samples individually to unit norm. Each sample (i.e. each row of the data matrix) with at least one non zero component is rescaled independently of other samples so that its norm (l1 or l2) equals one.
def normalize(array, norm="l2"): scaler = Normalizer(copy=True, norm=norm) return scaler.fit_transform(array)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize_all(self):\n #for i, vector in enumerate(self.real_vectors):\n # self.real_vectors[i] /= np.linalg.norm(vector)\n self.vectors /= np.linalg.norm(self.vectors, axis=1).reshape(-1,1)\n for i, vector in enumerate(self.real_vectors):\n vector.set(self.vectors[i])", "def normalize(self):\n self._data /= self.norm()", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def normalize(self):\n self.desc += \", normalize\"\n self._vecs /= np.linalg.norm(self._vecs, axis=1)[:, np.newaxis]\n self.reindex()", "def normalize(self):\n self._vectors = [vector.normalized() for vector in self._vectors]", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def test_normalization(self):\n u = np.array([np.array([0.7, 1.2]), np.array([0.5, 1.6])])\n with tf.Session() as sess:\n n = sess.run(AbstractModel.l2_normalization_layer(u, axis=1))\n magnitude = np.linalg.norm(n, axis=1)\n np.testing.assert_allclose(magnitude, np.array([1.0, 1.0]))", "def l2_normalize(data, axis=-1, eps=1e-6):\n ret = data / (np.linalg.norm(data, axis=axis, keepdims=True) + eps)\n return ret", "def normalize(self):\n self.number_of_vectors = self.values.shape[0]\n norm_2 = np.linalg.norm(self.values, axis=1)\n norm_1 = np.sum(self.values_planar, axis=1)\n norm_2 = np.repeat(norm_2, self.number_of_objectives).reshape(\n self.number_of_vectors, self.number_of_objectives\n )\n norm_1 = np.repeat(norm_1, self.number_of_objectives).reshape(\n self.number_of_vectors, self.number_of_objectives\n )\n norm_2[norm_2 == 0] = np.finfo(float).eps\n self.values = np.divide(self.values, norm_2)\n self.values_planar = np.divide(self.values_planar, norm_1)", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def normalize(X, norm=..., *, axis=..., copy=..., return_norm=...):\n ...", "def testNormalize(self):\n v1 = Vector.ones(4)\n n = v1.norm()\n assert n == 2\n assert v1.normalize() == [ 0.5, 0.5, 0.5, 0.5 ]", "def normalize(self):\n det = self._mat[0][0]*self._mat[1][1] - self._mat[0][1]*self._mat[1][0]\n for i in range(2):\n for j in range(2):\n self._mat[i][j] = (self._mat[i][j])/(np.sqrt(det))", "def normalize(self):\n norm_val = self.sum2/self.sum1\n self.sum1=0\n\n for sentence in self.data_set:\n sentence.weight *= norm_val\n self.sum1 += sentence.weight", "def norm(data, max_list, min_list):\n max_list, min_list = np.array(max_list), np.array(min_list)\n diff = max_list - min_list\n for i in np.arange(data.shape[1]):\n data[:, i] = (data[:, i]-min_list[i])/diff[i]\n\n data[data > 1] = 0.99\n data[data < 0] = 0.00\n return data", "def _normalise(self):\n if not self.is_unit():\n n = self.norm\n if n > 0:\n self.q = self.q / n", "def samele_wise_normalization(data):\n if np.max(data) == np.min(data):\n return np.ones_like(data, dtype=np.float32) * 1e-6\n else:\n return 1.0 * (data - np.min(data)) / (np.max(data) - np.min(data))", "def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)", "def normalize_features(block, norm=1):\n for k in block:\n for b in block[k]:\n nrm = np.sqrt((block[k][b].reshape((block[k][b].shape[0],-1))**2).sum(axis=1).mean(axis=0))\n if nrm > 0.0:\n block[k][b] *= norm/nrm", "def normalize(self):\n self.vector /= np.linalg.norm(self.vector)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def normalize_l2norm(data,tol=0):\n data_sqrt=np.sqrt(np.square(data).sum(axis=1))\n data_sqrt.shape=(data_sqrt.shape[0],1)\n #tol=0#1e-8\n data=data/(data_sqrt+tol)\n return data", "def BatchNormalize(S):\n mu = np.mean(S, axis=0)\n v = np.mean((S-mu)**2, axis=0)\n S = (S - mu) / np.sqrt(v + epsilon)\n return S", "def normalize(vectors):\n if len(np.asarray(vectors).shape) == 1:\n return vectors / np.linalg.norm(vectors)\n norm = np.linalg.norm(vectors, axis=1)\n return vectors / norm[:, np.newaxis]", "def normalisation_l2(x):\n res = np.zeros(x.shape)\n print(x.shape)\n for i in range(x.shape[0]):\n res[i] = x[i]/(np.linalg.norm(x[i],2)+1e-5)\n std = res.std()\n mean = res.mean()\n print(\"normalisation done\")\n return(mean,std,res)", "def normalizerows(x):\n # Compute x_norm as the norm 2 of x. Use np.linalg.norm(..., ord=2, axis= ..., keepdims=True)\n x_norm = np.linalg.norm(x, ord=2, axis=1, keepdims=True)\n\n #Divide x by norm\n x = x / x_norm\n\n return x", "def test_normalize(self):\n\n a1 = vectors.Vector(4, 0, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(1, 0, 0))\n\n a1 = vectors.Vector(0, 4, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 1, 0))\n\n a1 = vectors.Vector(0, 0, 4)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 0, 1))", "def normalize_l2(x):\n return x / (npla.norm(x))", "def set_normalization(self, dataloader):\n mean = 0\n square = 0\n for (data_in, _) in dataloader:\n mean += data_in.mean()\n square += data_in.pow(2).mean()\n\n mean /= len(dataloader)\n square /= len(dataloader)\n std = np.sqrt(square - mean ** 2)\n\n # The input data should be roughly normally distributed after\n # passing through net_fixed.\n self.scale_in.bias.data.fill_(- mean / std)\n self.scale_in.weight.data.fill_(1 / std)", "def normalize(self):\r\n max = np.amax(self.matrix)\r\n min = np.amin(self.matrix)\r\n\r\n self.matrix = ((self.matrix - min) / (max - min))", "def normalize(inp):\n\n out = inp / np.linalg.norm(inp, axis=1, keepdims=True)\n\n return out", "def normalize_ds(dataset):\n dataset = copy.copy(dataset)\n\n dim_dataset = dataset.shape\n\n for n_row in range(dim_dataset[0]):\n k = dataset[n_row,:]\n k_norm =(k - np.min(k))/(np.max(k) - np.min(k))\n dataset[n_row,:] = k_norm\n\n return dataset", "def normalise(self):\n return self / self.mean(axis=1).reshape(self.shape[0], 1)", "def l2_normalize(data, eps, axis=None):\n return cpp.nn.l2_normalize(data, eps, axis)", "def _normalize(X: np.ndarray) -> np.ndarray:\n # return X * np.sqrt(1 / np.sum(X ** 2, axis=1))[:, None]\n return X * np.sqrt(X.shape[1] / np.sum(X ** 2, axis=1))[:, None]", "def normalize(X, axis=-1, order=2):\n l2 = np.atleast_1d(np.linalg.norm(X, order, axis))\n l2[l2 == 0] = 1\n return X / np.expand_dims(l2, axis)", "def l1_normalize(x: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name\n return x / x.sum()", "def _norm_data(data):\n if data is None:\n return data\n data_min = np.min(data)\n c_norm = np.max(data) - data_min\n return (data - data_min) / c_norm if (c_norm != 0) else (data - data_min)", "def normalizeColumns(W):\n for i in range(W.shape[1]):\n W[:, i] /= np.linalg.norm(W[:, i]) + 0.001\n\n return W", "def norm1(X):\r\n # pass\r\n if X.shape[0]*X.shape[1] == 0:\r\n return 0\r\n return abs(X).sum()\r\n # return LA.norm(X, 1)\r", "def __call__(self, features):\n norm = []\n for data in features:\n if all(x == 0 for x in data):\n norm.append(data)\n else:\n scale = sum(x*x for x in data) ** 0.5\n normalized_data = [x / scale for x in data]\n norm.append(normalized_data)\n \n return norm", "def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True", "def norm_data(self):\n if (self.nrows, self.ncolumns) < self.data.shape:\n self.data = self.data[0:self.nrows, 0:self.ncolumns]\n if self.data.dtype != np.float64:\n self.data = self.data.astype(np.float64)\n self.meanval = self.data.mean()\n self.stdval = self.data.std()", "def _compute_normalization(self, normalize=True):\n if normalize:\n if self._img_norm is None:\n if np.sum(self._data) == 0:\n self._img_norm = 1\n else:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._data /= (self._img_norm * self._normalization_correction)\n self._normalization_status = 0\n else:\n self._normalization_status = 1\n self._img_norm = 1\n warnings.warn('Overflow encountered while computing '\n 'normalization constant. Normalization '\n 'constant will be set to 1.', NonNormalizable)\n else:\n self._normalization_status = 2", "def normalise(self):\n fitness_sum = np.sum(self.fitness)\n for i in range(self.loops):\n self.normalised_fitness[i] = self.fitness[i] / fitness_sum", "def normalize(self):\n\t\tnorm = self.norm()\n\t\tif norm == 0:\n\t\t\traise ValueError(\"Can't normalize zero vector\")\n\t\treturn self / norm", "def normalize_data(img):\n nor = np.linalg.norm(img, axis = 1)\n nor = np.reshape(nor, (len(img), 1))\n img = np.divide(img, nor)\n return img", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def csr_l2normalize(mat, copy=False, **kargs):\n if copy is True:\n mat = mat.copy()\n nrows = mat.shape[0]\n nnz = mat.nnz\n ind, val, ptr = mat.indices, mat.data, mat.indptr\n # normalize\n for i in range(nrows):\n rsum = 0.0 \n for j in range(ptr[i], ptr[i+1]):\n rsum += val[j]**2\n if rsum == 0.0:\n continue # do not normalize empty rows\n rsum = 1.0/np.sqrt(rsum)\n for j in range(ptr[i], ptr[i+1]):\n val[j] *= rsum\n \n if copy is True:\n return mat", "def csr_l2normalize(mat, copy=False, **kargs):\n if copy is True:\n mat = mat.copy()\n nrows = mat.shape[0]\n nnz = mat.nnz\n ind, val, ptr = mat.indices, mat.data, mat.indptr\n # normalize\n for i in range(nrows):\n rsum = 0.0 \n for j in range(ptr[i], ptr[i+1]):\n rsum += val[j]**2\n if rsum == 0.0:\n continue # do not normalize empty rows\n rsum = 1.0/np.sqrt(rsum)\n for j in range(ptr[i], ptr[i+1]):\n val[j] *= rsum\n \n if copy is True:\n return mat", "def normalise(self):\n s = self._sum()\n if s != 0:\n for element, value in self.focals.items():\n self.focals[element] /= s", "def batch_norm(self, inputs):\n x = inputs\n x = self.bn(x)\n return x", "def L2_norm(x, axis=-1):\n return keras.backend.l2_normalize(x, axis=axis)", "def normalizeRows(x):\n\n ### YOUR CODE HERE\n # using l2 norm to normalize\n x = x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))\n ### END YOUR CODE\n\n return x", "def normalizeRows(x):\n\n ### YOUR CODE HERE\n norm2 = np.linalg.norm(x,2,axis = 1).reshape(x.shape[0],-1)\n x = x/norm2\n ### END YOUR CODE\n\n return x", "def normalize_features(X):\n std = X.std(axis=0)\n std = np.where(std == 0, 1, std) # to avoid division by zero\n x_normed = (X - X.mean(axis=0)) / std\n return x_normed", "def norm(self):\r\n old_origin = np.array(self.origin)\r\n self.origin = [0, 0, 0]\r\n old_origin[0] = old_origin[0] / self.x[0]\r\n old_origin[1] = old_origin[1] / self.y[1]\r\n old_origin[2] = old_origin[2] / self.z[2]\r\n self.data = ndimage.shift(self.data, -old_origin, mode='wrap')", "def csr_l2normalize(mat, copy=False, **kargs):\n if copy is True:\n mat = mat.copy()\n nrows = mat.shape[0]\n nnz = mat.nnz\n ind, val, ptr = mat.indices, mat.data, mat.indptr\n # normalize\n for i in range(nrows):\n rsum = 0.0\n for j in range(ptr[i], ptr[i+1]):\n rsum += val[j]**2\n if rsum == 0.0:\n continue # do not normalize empty rows\n rsum = 1.0/np.sqrt(rsum)\n for j in range(ptr[i], ptr[i+1]):\n val[j] *= rsum\n\n if copy is True:\n return mat", "def csr_l2normalize(mat, copy=False, **kargs):\n if copy is True:\n mat = mat.copy()\n nrows = mat.shape[0]\n nnz = mat.nnz\n ind, val, ptr = mat.indices, mat.data, mat.indptr\n # normalize\n for i in range(nrows):\n rsum = 0.0\n for j in range(ptr[i], ptr[i + 1]):\n rsum += val[j] ** 2\n if rsum == 0.0:\n continue # do not normalize empty rows\n rsum = 1.0 / np.sqrt(rsum)\n for j in range(ptr[i], ptr[i + 1]):\n val[j] *= rsum\n\n if copy is True:\n return mat", "def calc_norm(self, corpus):\n logger.info(\"Performing %s normalization...\" % (self.norm))\n norms = []\n numnnz = 0\n docno = 0\n for bow in corpus:\n docno += 1\n numnnz += len(bow)\n norms.append(matutils.unitvec(bow, self.norm))\n self.num_docs = docno\n self.num_nnz = numnnz\n self.norms = norms", "def normalize(normal_map, norm_thres=0.5):\n norm = np.linalg.norm(normal_map, axis=-1)\n valid = norm > norm_thres\n normal_map[valid] = normalize_vec(normal_map[valid], axis=1)\n return normal_map", "def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)", "def test_scale_features_L1_norm(self):\n # Get some data\n data = array([[0.564, 20.661], [-18.512, 41.168], [-0.009, 20.440]])\n cdata = CData(data)\n\n # Correct answer computed in Mathematica\n # TODO: can we compute the right answer in Python?\n answer = array([[0.029552, 0.25114], [-0.969976, 0.500407], [-0.000471575, 0.248453]])\n\n # Perform L1 normalization and check answer\n cdata.scale_features('L1 norm')\n self.assertTrue(allclose(cdata.data, answer))", "def _l2_normalize(x, axis=None, eps=1e-12):\n return x * jax.lax.rsqrt((x * x).sum(axis=axis, keepdims=True) + eps)", "def normalise(x, dim=1):\n norm = torch.sqrt( torch.pow(x,2.).sum(dim) )\n if dim>0:\n x /= norm.unsqueeze(dim)\n return x", "def _scale(self, normalize, mat):\n mat = mat.astype(float)\n if normalize:\n mat = sklearn_norm(mat,\n feature_range=(0, 1),\n axis=0,\n copy=True)\n else:\n return mat\n return mat", "def norm_data(data):\n return (data-np.min(data))/(np.max(data)-np.min(data))", "def _normalize(a: np.ndarray, u: float=0, s: float=1) -> np.ndarray:\n a_norm = (a - np.mean(a)) / (np.std(a) + STABILITY)\n a_rescaled = a_norm * s + u\n\n return a_rescaled", "def normalize(x):\n\n x_norm = np.linalg.norm(x, axis=1, keepdims=True)\n print(x_norm)\n x = x / x_norm\n ### END\n\n return x", "def normalised(cls, mat, axis=-1, order=2):\n norm = np.linalg.norm(\n mat, axis=axis, ord=order, keepdims=True)\n norm[norm == 0] = 1\n return mat / norm", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X", "def normalize2(data):\n return old_div(data,np.max([np.max(data),-1.0*np.min(data)]))", "def test_normalization_scalar(features: List[List[float]]) -> List[List[float]]:\n normalized_features = []\n for feature in features:\n sum_squares = 0\n for i in feature:\n sum_squares += i * i\n sum_squares_root = np.sqrt(sum_squares)\n if sum_squares == 0:\n normalized_features.append(feature)\n else:\n normalized_features.append([x / sum_squares_root for x in feature])\n return normalized_features", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)", "def filter_normalize_(self, ref_point: 'ModelParameters', order=2):\n for l in range(len(self.parameters)):\n # normalize one-dimensional bias vectors\n if len(self.parameters[l].size()) == 1:\n self.parameters[l] *= (ref_point.parameters[l].norm(order) / self.parameters[l].norm(order))\n # normalize two-dimensional weight vectors\n for f in range(len(self.parameters[l])):\n self.parameters[l][f] *= ref_point.filter_norm((l, f), order) / (self.filter_norm((l, f), order))", "def norm(self):\n self.assertTrue(np.allclose(self.vectors.norm('dog.n.01'), 0.97757602))\n self.assertTrue(np.allclose(self.vectors.norm('mammal.n.01'), 0.03914723))", "def __call__(self, features: List[List[float]]) -> List[List[float]]:\n # raise NotImplementedError\n features_normalized = []\n for feature in features:\n norm = np.linalg.norm(feature)\n if norm != 0:\n feature_normalized = feature/norm\n features_normalized.append(feature_normalized.tolist())\n else:\n features_normalized.append([0] * len(feature))\n return features_normalized", "def norm(self):\n return np.sqrt(np.dot(self._data, self._data))", "def convert_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n axis_l = [axis]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.l2_normalize(x, epsilon, axis_l)\n g.add_node(op.output(\"Out\")[0], out)", "def normalize(self):\n n = 1.0 / self.norm()\n self.mV = [ x * n for x in self.mV ]\n return self", "def normalized(self):\n return self.from_points(\n vector.normalized() for vector in self._vectors)", "def norm(self):\n raise NotImplementedError", "def nuclearnorm(X):\r\n if X.size == 0:\r\n return 0\r\n return LA.norm(X) if is_vector(X) else LA.norm(X, 'nuc')\r\n\r\n\r\n pass", "def _compute_input_normalization(*amps):\n if len(amps) < 2:\n raise ValueError('At least 2 amplitudes must be provided.')\n n_bosons = len(amps)\n left_range = range(n_bosons)\n right_ranges = list(itertools.permutations(left_range))\n total = 0.\n for right_range in right_ranges:\n i_prod = 1.\n for idx1, idx2 in zip(left_range, right_range):\n # if `idx1` and `idx2` are equal the contribution is given\n # by the inner product of an amplitude with itself. Given\n # that we are assuming the amplitudes to be normalized,\n # the result is always 1 and we can just skip it\n if idx1 == idx2:\n pass\n # otherwise we update the partial product computing the\n # inner product of the two relevant amplitudes (states)\n i_prod *= np.vdot(amps[idx1], amps[idx2])\n total += i_prod\n return np.sqrt(total)", "def normalize(self):\n\n if not self.magnitude():\n return Vector(0, 0)\n\n l = 1 / self.magnitude()\n return self.scale(l)", "def normalized(a, axis=-1, order=2):\n l2 = np.atleast_1d(np.linalg.norm(a, order, axis))\n l2[l2==0] = 1\n return a / np.expand_dims(l2, axis)", "def normalize(sequence):\n return [_norm(s) for s in sequence]", "def normalise(x):\n x = np.copy(x)\n n_cols = x.shape[1]\n for col_index in range(n_cols):\n col = x[:, col_index]\n factor = np.max(col)\n x[:, col_index] = col / factor\n\n return x", "def _normalize(\n ds: xr.Dataset,\n *,\n dim: Sequence[str],\n kind: str = ADDITIVE,\n) -> xr.Dataset:\n if \"norm\" in ds:\n norm = ds.norm\n else:\n norm = ds.data.mean(dim=dim)\n norm.attrs[\"_group_apply_reshape\"] = True\n\n return xr.Dataset(\n dict(data=apply_correction(ds.data, invert(norm, kind), kind), norm=norm)\n )", "def normalizeData(pre_signal):\n\n if sp.any(sp.isnan(pre_signal)):\n print('there are NaNs in the data matrix, making them zero')\n\n pre_signal[sp.isnan(pre_signal)] = 0\n mean_vector = sp.mean(pre_signal, axis=0, keepdims=True)\n normed_signal = pre_signal - mean_vector\n norm_vector = sp.linalg.norm(normed_signal, axis=0, keepdims=True)\n norm_vector[norm_vector == 0] = 1e-116\n normed_signal = normed_signal / norm_vector\n\n return normed_signal, mean_vector, norm_vector", "def norm_with_l2(original_mat):\n normed_mat = np.zeros(original_mat.shape, dtype=np.float32)\n if len(original_mat.shape) == 2:\n for ind_r in range(original_mat.shape[0]):\n a = np.square(original_mat[ind_r]*1.0)\n b = np.sum(a)\n c = np.sqrt(b)\n normed_mat[ind_r] = (original_mat[ind_r] * 1.0) / c\n # normed_mat[ind_r] = (original_mat[ind_r] * 1.0) / np.sqrt(np.sum(np.square(original_mat[ind_r])*1.0))\n return normed_mat", "def minmax_normalize(samples, out=None):\n if out is None:\n dtype = np.common_type(np.empty(0, 'float32'), samples)\n out = np.array(samples, dtype=dtype, copy=True)\n else:\n out[:] = samples\n\n sample_mins = np.min(samples, -1)[..., None]\n sample_maxes = np.max(samples, -1)[..., None]\n out -= sample_mins\n out /= (sample_maxes - sample_mins)\n return out", "def normalize(self, lam):\n return (lam.T / np.sum(lam, axis=1)).T", "def _normalize(M):\r\n\r\n minVal = np.min(M)\r\n maxVal = np.max(M)\r\n\r\n Mn = M - minVal;\r\n\r\n if maxVal == minVal:\r\n return np.zeros(M.shape);\r\n else:\r\n return Mn / (maxVal-minVal)", "def norm(self):", "def _localNormalizeData(self,values,names,feat):\n self.muAndSigmaFeatures[feat] = (0.0,1.0)", "def normalize_temp(temp):\n\n\tnorme = np.empty(temp.shape[2])\n\tfor i in range(temp.shape[2]):\n\t\tnorme[i] = np.linalg.norm(temp[:, :, i])\n\t\ttemp[:, :, i] = temp[:, :, i] / norme[i]\n\treturn (norme)", "def normalize(self, row, col):\n if self.symmetric:\n return self.mat[row, col] / sqrt(self.train_normalization[row] * self.train_normalization[col])\n\n else:\n return self.mat[row, col] / sqrt(self.test_normalization[row] * self.train_normalization[col])" ]
[ "0.7402892", "0.7139929", "0.7094509", "0.70395654", "0.7027736", "0.6996117", "0.6938368", "0.6919151", "0.6916701", "0.6915404", "0.68970287", "0.6822748", "0.6807383", "0.6790507", "0.6774343", "0.676279", "0.6744965", "0.6737617", "0.67026675", "0.66677684", "0.6663399", "0.66509825", "0.66509825", "0.66310364", "0.66310227", "0.6628871", "0.6627201", "0.6622744", "0.66190743", "0.661529", "0.6577678", "0.6564522", "0.6563594", "0.6553337", "0.6548368", "0.6543033", "0.652395", "0.65098614", "0.64971197", "0.64679927", "0.64594465", "0.6457543", "0.64509946", "0.6450055", "0.64429533", "0.6437216", "0.642653", "0.64245516", "0.6422898", "0.6420926", "0.6420926", "0.6405018", "0.6405018", "0.64041394", "0.63926756", "0.6390307", "0.63863593", "0.6385389", "0.63814795", "0.6376248", "0.63613313", "0.6356612", "0.6337666", "0.6337291", "0.6330567", "0.6327809", "0.63110125", "0.6303918", "0.63027763", "0.6301696", "0.6287562", "0.6254844", "0.6250773", "0.6244819", "0.62432504", "0.6242265", "0.62402594", "0.62400097", "0.6239081", "0.62334377", "0.62293684", "0.6227585", "0.62016964", "0.6197977", "0.61949974", "0.6179344", "0.61710715", "0.6168316", "0.616577", "0.6163693", "0.6161524", "0.615931", "0.6155958", "0.6154644", "0.6149986", "0.61485344", "0.61436343", "0.6140239", "0.61378914", "0.6133381", "0.61312985" ]
0.0
-1
Normalise an array between a given range.
def normalize_range(array, floor=0, ceil=1): scaler = MinMaxScaler(feature_range=(floor, ceil), copy=True) return scaler.fit_transform(array)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(array):\n high = array.max()\n low = array.min()\n rng = high - low\n array[:] = 1.0 - ((high - array) / rng)", "def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr", "def _normalize(array):\n\treturn (array - np.min(array))/(np.max(array)-np.min(array))", "def normalize_data(X, range_d = None):\n n,d = X.shape\n\n if range_d is None:\n range_d = np.zeros([2,d])\n range_d[0,:] = np.min(X, axis = 0)\n range_d[1,:] = np.max(X, axis = 0)\n\n X = (X - range_d[0,:]) / (range_d[1,:] - range_d[0,:])\n\n return X", "def normalize(X, low, high, dtype=None):\n X = np.asarray(X)\n minX, maxX = np.min(X), np.max(X)\n # normalize to [0...1].\n X = X - float(minX)\n X = X / float((maxX - minX))\n # scale to [low...high].\n X = X * (high-low)\n X = X + low\n if dtype is None:\n return np.asarray(X)\n return np.asarray(X, dtype=dtype)", "def normalize(X, low, high, dtype=None):\n X = np.asarray(X)\n minX, maxX = np.min(X), np.max(X)\n # normalize to [0...1].\n X = X - float(minX)\n X = X / float((maxX - minX))\n # scale to [low...high].\n X = X * (high-low)\n X = X + low\n if dtype is None:\n return np.asarray(X)\n return np.asarray(X, dtype=dtype)", "def normalize(array):\n array_min, array_max = array.min(), array.max()\n return ((array - array_min)/(array_max - array_min))", "def normalize(A: np.array) -> np.array:\n for i in range(A.shape[1]):\n A[:, i] = (A[:, i] - np.min(A[:, i])) / (np.max(A[:, i] - np.min(A[:, i])))\n return A", "def normalize(data, vmin=0, vmax=1):\n data = np.array(data, dtype=np.float64)\n return (vmin + (data - data.min()) * (vmax - vmin) / (data.max() - data.min())).tolist()", "def normalize_data(data, min=0, max=1):\r\n import numpy as np\r\n assert isinstance(data, np.ndarray)\r\n\r\n max_value = np.max(data)\r\n min_value = np.min(data)\r\n\r\n scaled = np.interp(data, [min_value, max_value], [min, max])\r\n # convert to float64\r\n scaled = scaled.astype(np.float64)\r\n\r\n return scaled", "def normalize(self, arr):\r\n\r\n\t\t#Set the cap for arr at self.value_max and self.value_max\r\n\t\t#this prevents outliers of breaking the previously predicted p_func\r\n\t\tarr_capped = arr * (arr <= self.value_max) + self.value_max * (arr > self.value_max)\t#cap to value_max\r\n\t\tarr_capped = arr_capped * (arr_capped >= self.value_min) + self.value_min * (arr_capped < self.value_min)\t#cap to value_min\r\n\r\n\t\t#Normalize array\r\n\t\tnorm_factor = self.get_norm_factor(arr_capped)\r\n\t\tnormalized = arr * norm_factor\r\n\r\n\t\treturn(normalized)", "def normalize(arr: np.ndarray) -> np.ndarray:\n if max(arr) - min(arr) == 0:\n logger.warning(\n \"Normalize averted a div/0, the input data was:\\n {0}\".format(arr)\n )\n return np.ones(len(arr))\n return (arr - min(arr)) / (max(arr) - min(arr))", "def normalize(array):\n\treturn array/np.max(array)", "def normalize_array(arr, method=\"min_max\"):\r\n \r\n ret = torch.tensor(arr)\r\n if method == \"min_max\":\r\n ret -= torch.min(ret)\r\n ret /= torch.max(ret)\r\n elif method == \"mean_std\":\r\n ret -= torch.mean(ret)\r\n ret /= torch.std(ret)\r\n else:\r\n raise Exception(\"Invalid normalization method\")\r\n\r\n return 1 + ret", "def normalise_0_1(arraylike):\n array_min = np.min(arraylike)\n array_max = np.max(arraylike)\n normalised = (arraylike - array_min) / (array_max - array_min)\n # convert to float\n normalised = np.array(normalised).astype(float)\n return normalised, array_min, array_max", "def rescale_array(array, old_range, new_range, dtype):\n if not HAS_NUMPY:\n LOGGER.error(\"The Python library numpy is required for this operation\")\n return\n\n old_min, old_max = old_range\n if array.min() < old_min or array.max() > old_max:\n ## truncate:\n array = numpy.clip(array, old_min, old_max)\n new_min, new_max = new_range\n old_delta = float(old_max - old_min)\n new_delta = float(new_max - new_min)\n if old_delta == 0:\n return ((array - old_min) + (new_min + new_max) / 2).astype(dtype)\n else:\n return (new_min + (array - old_min) * new_delta / old_delta).astype(dtype)", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def normalize(array, norm=\"l2\"):\n scaler = Normalizer(copy=True, norm=norm)\n return scaler.fit_transform(array)", "def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x", "def normalize_array(cube, new_max, new_min):\n minimum, maximum = np.min(cube), np.max(cube)\n if maximum - minimum != 0:\n m = (new_max - new_min) / (maximum - minimum)\n b = new_min - m * minimum\n cube = m * cube + b\n return cube", "def normalize(data):\n data_range = data.max() - data.min()\n #if data_range == 0.:\n # sys.exit(\"data.max() - data.min() == 0. !\")\n if stddev != 0.:\n data = (data - data.min()) / data_range\n\n return data", "def normalise_between_2_values(arraylike, min_value, max_value, invert=False):\n # normalise array between min and max values\n normalised = (arraylike - min_value) / (max_value - min_value)\n # replace anything above 1 with 1\n normalised[normalised > 1] = 1\n # replace anything below 0 with 0\n normalised[normalised < 0] = 0\n # if desired, invert the normalised values\n if invert:\n normalised = abs(normalised - 1)\n return normalised", "def normalize(data):\n\n\t#return [float(x) / pow(2, 15) for x in data]\n\n\tl = [float(x) / pow(2, 15) for x in data]\n\treturn np.asarray(l)", "def normalize_minmax(data):\n _min = np.float(np.min(data))\n _max = np.float(np.max(data))\n if (_max-_min)!=0:\n img = (data - _min) / (_max-_min)\n else:\n img = np.zeros_like(data) \n return img", "def normalizeToRange(data,max=255,min=0):\n if min: return (max-min)*normalize(data)+min\n else: return max*normalize2(data) # speeds up operation", "def normalization(x, x_min=-5.12, x_max=5.12):\n for i in range(len(x.vect)):\n x.vect[i] = x_min + x.vect[i]*(x_max-x_min)\n return x", "def normalized(array):\n ptp = np.ptp(array)\n if ptp == 0:\n ptp = 1\n return (array - np.min(array)) / ptp", "def hist_normalize_linear(data, new_min, new_max):\n data_min = np.ma.min(data)\n data_max = np.ma.max(data)\n scaled = (data - data_min) * ((new_max - new_min) / (data_max - data_min))\n scaled.mask = data.mask\n return scaled", "def normalize(arr):\n arr = arr.astype('float')\n # Do not touch the alpha channel\n for i in range(1):\n minval = arr[...,i].min()\n maxval = arr[...,i].max()\n if minval != maxval:\n arr[...,i] -= minval\n arr[...,i] *= (255.0/(maxval-minval))\n return arr", "def normalize(data):\n min = np.min(data)\n if min:\n data = data + min\n return old_div(data,np.max(data))\n else: # if min is 0\n return old_div(data,np.max(data))", "def normalize(array):\n\n # calculate the mean of array\n array_mean = numpy.mean(array)\n if _DEBUG:\n print \"Mean of gr is:\"\n print array_mean\n\n # divide all elements by the mean\n norm_list = []\n for item in array:\n norm_list.append(item/array_mean - 1)\n\n # return the result\n return norm_list", "def normalize(self,arr):\n arr = arr/(arr.max()/255.0)\n return arr", "def rescale_to_range(\n array: vtk.vtkDoubleArray,\n to_range: typing.Tuple[float, float],\n rel_tol: float = sys.float_info.epsilon,\n abs_tol: float = sys.float_info.epsilon,\n) -> vtk.vtkDoubleArray:\n to_span = to_range[1] - to_range[0]\n assert to_span >= 0\n\n # The values need to span a positive range to be able to scale to `to_range`.\n # We use at least a small span derived from the tolerances.\n array_range = array.GetValueRange()\n array_span = array_range[1] - array_range[0]\n array_center = array_range[0] + array_span / 2\n from_range = (\n array_range\n if not math.isclose(array_span, 0.0, rel_tol=rel_tol, abs_tol=abs_tol)\n else (\n array_center - max(rel_tol * abs(array_center), abs_tol),\n array_center + max(rel_tol * abs(array_center), abs_tol),\n )\n )\n from_span = from_range[1] - from_range[0]\n\n assert not math.isclose(from_span, 0.0, rel_tol=rel_tol, abs_tol=abs_tol)\n factor = to_span / from_span\n\n result = vtk.vtkDoubleArray()\n result.SetNumberOfValues(array.GetNumberOfValues())\n for id in range(array.GetNumberOfValues()):\n result.InsertValue(\n id, to_range[0] + (array.GetValue(id) - from_range[0]) * factor\n )\n\n return result", "def normalise(self):\n\n # Find extrema\n xmin = self.segments[0].lower_bound\n xmax = self.segments[0].upper_bound\n for seg in self.segments[1:]:\n xmin = min(xmin, seg.lower_bound)\n xmax = max(xmax, seg.upper_bound)\n\n range = xmax-xmin \n\n # Normalise\n for seg in self.segments:\n seg.lower_bound = (seg.lower_bound-xmin)/range\n seg.upper_bound = (seg.upper_bound-xmin)/range\n\n # Return\n return xmin, xmax", "def normalize(array, inplace=False):\n if inplace:\n array -= ds_mean\n array /= ds_std\n else:\n array = (array - ds_mean) / ds_std\n return array", "def normalize(my_array: np.ndarray) -> np.ndarray:\n\n return np.abs(my_array)/np.max(np.abs(my_array))", "def normalize(x, x_max, x_min):\n return (x - x_min) / (x_max - x_min)", "def normalize(img):\n # TODO: implement this function.\n min_img = min([min(i) for i in img])\n max_img = max([max(i) for i in img])\n\n for i in range(len(img)):\n \tfor j in range(len(img[0])):\n \t\timg[i][j] = ((img[i][j] - min_img) / (max_img - min_img))\n #raise NotImplementedError\n return img", "def normalize(vals):\n min_val = torch.min(vals)\n max_val = torch.max(vals)\n return (vals - min_val) / (max_val - min_val)", "def min_max_normalization(input_data):\n\n # Insert debugging assertions\n assert type(input_data) is np.ndarray, \"The 'input_data' must be numpy array.\"\n\n # Get the minimum and maximun values of the input numpy array along the axis \n Max = np.max(input_data, axis = 0)\n Min = np.min(input_data, axis = 0)\n\n # Min-max normalization \n normalized_input_data = (input_data - Min) / (Max - Min + sys.float_info.min)\n\n # Return normalized input data\n return normalized_input_data", "def normalise(da):\n return (da - da.min()) / (da.max() - da.min())", "def normalize(A, normRange, normAxis):\n\n shape = A.shape\n # make a slice to take the mean in the right dimension\n # slice(None) effectively means ':', or all the elements\n normSlice = [slice(normRange[0], normRange[1]) if a is normAxis else slice(None) for a in range(len(shape))]\n norm = np.mean(A[normSlice], axis=normAxis)\n\n # make a slice to pad the numbers to make the broadcasting work\n # again, slice(None) means ':' and None means an empty dimension (note difference!)\n\n try:\n return A/norm\n except:\n subSlice=[slice(None) if axis == norm.shape[0] else None for axis in shape]\n return A / norm[subSlice]\n finally:\n pass", "def normalize_array(array):\n\n return array / np.sum(array, axis=1)[:, np.newaxis]", "def normalize_array(a, norm_max=255):\n c = a - np.min(a.flatten())\n c = c / np.max(c)\n centered = c * norm_max\n return centered", "def normalize2(data):\n return old_div(data,np.max([np.max(data),-1.0*np.min(data)]))", "def _normalize(M):\r\n\r\n minVal = np.min(M)\r\n maxVal = np.max(M)\r\n\r\n Mn = M - minVal;\r\n\r\n if maxVal == minVal:\r\n return np.zeros(M.shape);\r\n else:\r\n return Mn / (maxVal-minVal)", "def normalize(values):\n return (values - np.mean(values)) / np.std(values)", "def normalise_modular_range(value, min, max):\n return numpy.mod(value-min, max-min)+min", "def scale_range(data, minTo, maxTo):\n minFrom = np.min(data)\n maxFrom = np.max(data)\n \n scaled_data = []\n \n for point in data:\n new_point = minTo + (maxTo - minTo) * ((point - minFrom)/(maxFrom - minFrom))\n scaled_data.append(new_point)\n \n return scaled_data", "def __normalize__(self, numpy_array: np.ndarray) -> np.ndarray:\n\n numpy_array = numpy_array + np.abs(self.min_value)\n numpy_array = numpy_array / np.abs(self.min_value)\n\n return numpy_array", "def __normalize(self, value, lower_bound, upper_bound):\n\n min_max_diff = self.max - self.min\n bound_diff = upper_bound - lower_bound\n return (value - self.min) / min_max_diff * bound_diff + lower_bound", "def normalize(x, min_x, max_x):\n\treturn (x - min_x) / (max_x - min_x)", "def normalize(av, vmin=0., vmax=1.):\n if vmin == vmax:\n return np.ones_like(av)*vmin\n elif vmax < vmin:\n warnings.warn(\"swapping vmin and vmax, because vmax < vmin.\")\n vmin, vmax = vmax, vmin\n\n norm_one = (av - np.min(av))/(np.max(av)-np.min(av))\n return norm_one * (vmax-vmin) + vmin", "def rescale_arr(arr, amin, amax):\r\n\r\n # old bounds\r\n m = arr.min()\r\n M = arr.max()\r\n # scale/offset\r\n s = float(amax - amin) / (M - m)\r\n d = amin - s * m\r\n\r\n # Apply clip before returning to cut off possible overflows outside the\r\n # intended range due to roundoff error, so that we can absolutely guarantee\r\n # that on output, there are no values > amax or < amin.\r\n return np.clip(s * arr + d, amin, amax)", "def normalise(x):\n return (x - jnp.min(x)) / (jnp.max(x) - jnp.min(x))", "def normalize(self, type_range = 0):\n \n for i, chromosome in enumerate(self.population):\n self.population[i] = chromosome / np.sum(np.abs(chromosome)) * self.individual_type.gene_count\n\n #for i, chromosome in enumerate(self.population):\n # if type_range == 0:\n # self.population[i] = 2 * (chromosome - np.min(chromosome))/np.ptp(chromosome) - 1\n # elif type_range == 1:\n # self.population[i] = (chromosome - np.min(chromosome))/np.ptp(chromosome)", "def norm(data, max_list, min_list):\n max_list, min_list = np.array(max_list), np.array(min_list)\n diff = max_list - min_list\n for i in np.arange(data.shape[1]):\n data[:, i] = (data[:, i]-min_list[i])/diff[i]\n\n data[data > 1] = 0.99\n data[data < 0] = 0.00\n return data", "def normalize(x, lower=-1, upper=1):\n x_norm = (upper - lower)*((x - np.min(x)) / (np.max(x) - np.min(x))) + lower\n return x_norm", "def normalize_data(data):\n mean = np.mean(data)\n std = np.std(data)\n return (data - mean) / std", "def normalize_features(array):\n \n array_normalized = (array-array.mean())/array.std()\n mu = array.mean()\n sigma = array.std()\n\n return array_normalized, mu, sigma", "def adjustRange(a, vmin=0, vmax=255):\n new_a = (\n (\n # Represent array as floats ranging between 0 and 1.\n a.astype(dtype=float) / np.nanmax(a)\n\n # Fill given range.\n * (vmax - vmin) + vmin\n )\n # Convert back to regular array.\n .astype(dtype=np.uint8)\n )\n\n return new_a", "def _normalize_range():\n clipped = tf.clip_by_value(inputs, self.minimum, self.maximum)\n return -1 + 2 * (clipped - self.minimum) / length", "def normalise(a):\n return (a - np.nanmin(a)) / np.nanmax(a - np.nanmin(a))", "def normalize(arr):\n\n total = sum(arr)\n\n return list(map(lambda x: x / total, arr))", "def normalize(a, new_max=1.0):\n a = (a - a.min())\n a = a/a.max()\n a *= new_max\n return a", "def normalize(a):\n a = np.array(a)\n return a / np.linalg.norm(a)", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def _min_max_scale(arr, new_range=(0, 255)):\n # get array's current min and max\n mn = arr.min()\n mx = arr.max()\n\n # check if scaling needs to be done to be in new_range\n if mn < new_range[0] or mx > new_range[1]:\n # perform min-max scaling\n scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]\n else:\n # return array if already in range\n scaled = arr\n\n return scaled", "def normalize(array):\n quat = np.array(array)\n return quat / np.sqrt(np.dot(quat, quat))", "def normalise(array,tot=1.0):\r\n tot1 = np.sum(np.abs(array)**2)\r\n if tot1 == 0.0 :\r\n print 'bg.normalise : warning sum array = 0'\r\n arrayout = np.copy(array)\r\n else :\r\n arrayout = array * np.sqrt(tot / tot1)\r\n return arrayout", "def normalize(data):\n # normalize data and return\n # https://stackoverflow.com/questions/29661574/normalize-numpy-array-columns-in-python\n return (data - data.min(axis=0)) / data.ptp(axis=0)", "def normalize(x, minimum=None, maximum=None):\n if minimum is None:\n minimum = np.nanmin(x)\n if maximum is None:\n maximum = np.nanmax(x)\n return (x - minimum) / (maximum - minimum)", "def minmax_normalize(samples, out=None):\n if out is None:\n dtype = np.common_type(np.empty(0, 'float32'), samples)\n out = np.array(samples, dtype=dtype, copy=True)\n else:\n out[:] = samples\n\n sample_mins = np.min(samples, -1)[..., None]\n sample_maxes = np.max(samples, -1)[..., None]\n out -= sample_mins\n out /= (sample_maxes - sample_mins)\n return out", "def denormalize(images, min_, max_):\n return [((i + 1) / 2 * (max_ - min_)) + min_ for i in images]", "def normaliseInt(array,tot=1.0):\r\n tot1 = np.sum(array)\r\n arrayout = array * tot / tot1\r\n return arrayout", "def normalize(array: np.ndarray, value: float | None = None) -> np.ndarray:\n if value is None:\n val = array.max()\n else:\n val = value\n array = array / val\n return array", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def normalizeToRange(v, a, b):\n return (v - a) / (b - a)", "def normalize(self):\r\n max = np.amax(self.matrix)\r\n min = np.amin(self.matrix)\r\n\r\n self.matrix = ((self.matrix - min) / (max - min))", "def zero_norm(arr):\n arr = 2 * (arr - min(arr)) / (max(arr) - min(arr)) - 1\n return arr - np.sum(arr) / len(arr)", "def normalise_slice(s):\n\n s = s - s.min()\n s = s / s.max()\n return s", "def percentile_normalization(data: np.ndarray, percentile: int = 1) -> np.ndarray:\n\n min_percentile = np.percentile(data, percentile)\n max_percentile = np.percentile(data, 100 - percentile)\n\n # limit maximum intensity of data by max_percentile\n data[data >= max_percentile] = max_percentile\n\n # limit minimum intensity of data by min_percentile\n data[data <= min_percentile] = min_percentile\n\n return data", "def normalize(arr, stats=False):\n arr = np.array(arr)\n mean = arr.mean()\n std = arr.std()\n normed = (arr - mean) / std\n if not stats:\n return normed\n return normed, mean, std", "def normalize_array(image_array):\n\n array = image_array.astype(np.float)\n array /= 255.0\n return array", "def _normalize_(x: np.array) -> np.array:\n if x.max() != 0:\n x = x / x.max()\n return np.clip(x, 0, 1)# ensure that no values are >1\n else:\n raise ZeroDivisionError('Image Normalization')", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def auto_norm(data_set):\n min_val = data_set.min(0)\n max_val = data_set.max(0)\n ranges = max_val - min_val\n\n # norm_set = zeros(shape(data_set))\n row_m = data_set.shape[0]\n norm_set = data_set - tile(min_val, (row_m, 1))\n norm_set = norm_set / tile(ranges, (row_m, 1))\n\n return norm_set, ranges, min_val", "def normalize_series(series):\n return (series - series.mean()) / (series.max() - series.min())", "def normalise(values):\n max_value = max(values)\n min_value = min(values)\n factor = 32767.0 / max(max_value, abs(min_value))\n return (int(v * factor) for v in values)", "def normalize_values(values: ArrayLike, norm: str | float | bool = True) -> np.ndarray:\n values = np.asarray(values)\n assert norm\n\n if isinstance(norm, str):\n if norm == \"first\":\n divisor = values[0]\n elif norm == \"max\":\n divisor = max(values)\n else:\n raise ValueError(f\"Invalid normalization, got {norm=}\")\n else:\n divisor = float(norm)\n\n return values / divisor", "def minmax_normalize(X):\n # X -= X.min()\n # X /= X.max()\n # X -= 0.5\n X = (X-X.min()) / (X.max() - X.min())\n return X", "def normalize(x):\n\n return (x - x.values.min()) / (x.values.max() - x.values.min())", "def norm_data(data):\n return (data-np.min(data))/(np.max(data)-np.min(data))", "def normalise_slice(slice, max_val=255):\n slice = slice - slice.min()\n slice = slice / np.float(slice.max())\n slice = slice * max_val\n return(slice)", "def normalization_func(img):\n vmin, vmax = img.min(), img.max()\n if vmin != vmax:\n im = (img - vmin) / (vmax - vmin)\n else:\n im = np.ones(img.shape)\n return im", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def normalize(im: np.ndarray) -> np.ndarray:\n im = im.astype(np.float32)\n return (im - im.min()) / (im.max() - im.min())", "def normalize(image):\n min = np.min(image)\n max = np.max(image)\n normalImg = 255*(image - min) / (max - min)\n return normalImg", "def normalizer(sampler, vmin, vmax, scaling='linear',\n bias=0.5, contrast=1):\n def result(x, y):\n raw = sampler(x, y)\n r = normalize(raw, vmin, vmax, bias, contrast, scaling)\n return r\n return result" ]
[ "0.79416615", "0.78045434", "0.75153875", "0.74919647", "0.74441695", "0.74441695", "0.74232936", "0.7257156", "0.71199733", "0.7106798", "0.7072435", "0.6989358", "0.6980073", "0.6953394", "0.69305813", "0.691604", "0.69146186", "0.68793344", "0.68729293", "0.6854594", "0.68457603", "0.6818627", "0.6813774", "0.67609537", "0.6752896", "0.67371076", "0.6729181", "0.6721583", "0.67179036", "0.67095417", "0.67011994", "0.66982853", "0.6677721", "0.6645898", "0.66240686", "0.66196704", "0.65884596", "0.6587064", "0.65760976", "0.6575951", "0.65606296", "0.65567106", "0.65503234", "0.65464014", "0.6546287", "0.65016955", "0.65001327", "0.64949954", "0.64915323", "0.64768845", "0.64665496", "0.6464156", "0.64468634", "0.6437474", "0.64163923", "0.6414372", "0.6407691", "0.63991255", "0.6390046", "0.6386366", "0.6358779", "0.6355277", "0.6354997", "0.63527054", "0.6352057", "0.6349129", "0.6322077", "0.6322077", "0.631599", "0.6310477", "0.630788", "0.6305421", "0.6303408", "0.6301776", "0.6290899", "0.62876785", "0.6284864", "0.62833774", "0.62801945", "0.6275051", "0.6273132", "0.6268967", "0.6267082", "0.62570745", "0.62449884", "0.6242126", "0.62339437", "0.62286025", "0.62256837", "0.62160915", "0.6211449", "0.6203594", "0.62006116", "0.61870915", "0.61829394", "0.61828953", "0.61625916", "0.61551124", "0.61545193", "0.61528003" ]
0.7987929
0
Normalise an array by its maximum absolute value. Scales and translates each feature individually such that the maximal absolute value of each feature in the array will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity.
def normalize_max_absolute(array): scaler = MaxAbsScaler(copy=True) return scaler.fit_transform(array)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_normalization(array):\n return 1/np.max(array) * array.squeeze(axis=1)", "def normalize(array):\n\treturn array/np.max(array)", "def normalize(my_array: np.ndarray) -> np.ndarray:\n\n return np.abs(my_array)/np.max(np.abs(my_array))", "def maxabs_scale(X, *, axis=..., copy=...):\n ...", "def normalize(array):\n high = array.max()\n low = array.min()\n rng = high - low\n array[:] = 1.0 - ((high - array) / rng)", "def normalize(self, arr):\r\n\r\n\t\t#Set the cap for arr at self.value_max and self.value_max\r\n\t\t#this prevents outliers of breaking the previously predicted p_func\r\n\t\tarr_capped = arr * (arr <= self.value_max) + self.value_max * (arr > self.value_max)\t#cap to value_max\r\n\t\tarr_capped = arr_capped * (arr_capped >= self.value_min) + self.value_min * (arr_capped < self.value_min)\t#cap to value_min\r\n\r\n\t\t#Normalize array\r\n\t\tnorm_factor = self.get_norm_factor(arr_capped)\r\n\t\tnormalized = arr * norm_factor\r\n\r\n\t\treturn(normalized)", "def normalize_array(a, norm_max=255):\n c = a - np.min(a.flatten())\n c = c / np.max(c)\n centered = c * norm_max\n return centered", "def _normalize(array):\n\treturn (array - np.min(array))/(np.max(array)-np.min(array))", "def normalize(A: np.array) -> np.array:\n for i in range(A.shape[1]):\n A[:, i] = (A[:, i] - np.min(A[:, i])) / (np.max(A[:, i] - np.min(A[:, i])))\n return A", "def normalise_max_abs(vector):\n\n # Check vector shape\n assert len(vector.shape) == 2\n assert vector.shape[0] < vector.shape[1]\n\n # Normalise\n for i in range(vector.shape[0]):\n maxabs = np.nanmax(np.abs(vector[i]))\n vector[i] = safe_divide(vector[i], maxabs)\n\n return vector", "def minmax_normalize(X):\n # X -= X.min()\n # X /= X.max()\n # X -= 0.5\n X = (X-X.min()) / (X.max() - X.min())\n return X", "def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr", "def normalize_features(array):\n \n array_normalized = (array-array.mean())/array.std()\n mu = array.mean()\n sigma = array.std()\n\n return array_normalized, mu, sigma", "def normalized(array):\n ptp = np.ptp(array)\n if ptp == 0:\n ptp = 1\n return (array - np.min(array)) / ptp", "def normalize(array):\n array_min, array_max = array.min(), array.max()\n return ((array - array_min)/(array_max - array_min))", "def normalize(a, new_max=1.0):\n a = (a - a.min())\n a = a/a.max()\n a *= new_max\n return a", "def normalize(array, norm=\"l2\"):\n scaler = Normalizer(copy=True, norm=norm)\n return scaler.fit_transform(array)", "def min_max_normalization(input_data):\n\n # Insert debugging assertions\n assert type(input_data) is np.ndarray, \"The 'input_data' must be numpy array.\"\n\n # Get the minimum and maximun values of the input numpy array along the axis \n Max = np.max(input_data, axis = 0)\n Min = np.min(input_data, axis = 0)\n\n # Min-max normalization \n normalized_input_data = (input_data - Min) / (Max - Min + sys.float_info.min)\n\n # Return normalized input data\n return normalized_input_data", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def normalise(x):\n x = np.copy(x)\n n_cols = x.shape[1]\n for col_index in range(n_cols):\n col = x[:, col_index]\n factor = np.max(col)\n x[:, col_index] = col / factor\n\n return x", "def mms_scale(values):\r\n mms = MinMaxScaler()\r\n return mms.fit_transform(values)", "def minmax_normalize(samples, out=None):\n if out is None:\n dtype = np.common_type(np.empty(0, 'float32'), samples)\n out = np.array(samples, dtype=dtype, copy=True)\n else:\n out[:] = samples\n\n sample_mins = np.min(samples, -1)[..., None]\n sample_maxes = np.max(samples, -1)[..., None]\n out -= sample_mins\n out /= (sample_maxes - sample_mins)\n return out", "def normalize(array: np.ndarray, value: float | None = None) -> np.ndarray:\n if value is None:\n val = array.max()\n else:\n val = value\n array = array / val\n return array", "def npmaxabs(arr: np.ndarray) -> float:\n return np.max(np.abs(arr))", "def scale_dataset(ds):\n for i in range(0,ds.dims):\n fmax = ds.data[0][i]\n for j in range(1,len(ds)):\n curr = ds.data[j][i]\n if curr > fmax:\n fmax = curr \n if fmax > 0:\n for j in range(0,len(ds)):\n ds.data[j][i] /= fmax", "def normalize(arr):\n arr = arr.astype('float')\n # Do not touch the alpha channel\n for i in range(1):\n minval = arr[...,i].min()\n maxval = arr[...,i].max()\n if minval != maxval:\n arr[...,i] -= minval\n arr[...,i] *= (255.0/(maxval-minval))\n return arr", "def normalise_0_1(arraylike):\n array_min = np.min(arraylike)\n array_max = np.max(arraylike)\n normalised = (arraylike - array_min) / (array_max - array_min)\n # convert to float\n normalised = np.array(normalised).astype(float)\n return normalised, array_min, array_max", "def test_scale_features_min_max_norm(self):\n data = array([[0.564, 20.661], [-18.512, 41.168], [-0.009, 20.440]])\n cdata = CData(data)\n\n # correct answer computed with Mathematica\n # TODO: can we compute the right answer in Python?\n answer = array([[1, 0.0106619], [0, 1], [0.969962, 0]])\n\n # perform min-max norm scaling on features and check answer\n cdata.scale_features('min-max norm')\n self.assertTrue(allclose(cdata.data, answer))", "def standardise(self):\n if self.vector.shape is ():\n return\n if self.dimensionality() != 1:\n # TODO: implement\n raise NotImplementedError\n max_value = 1.0 * max(self.vector)\n if max_value == 0.0:\n # Nothing to do\n return\n self.vector = self.vector.astype('float64') / max_value", "def _normalize_(x: np.array) -> np.array:\n if x.max() != 0:\n x = x / x.max()\n return np.clip(x, 0, 1)# ensure that no values are >1\n else:\n raise ZeroDivisionError('Image Normalization')", "def _scale_array(arr, clip=True):\n if clip:\n scaled = np.clip(arr, 0, 255)\n else:\n scale_range = (max([arr.min(), 0]), min([arr.max(), 255]))\n scaled = _min_max_scale(arr, new_range=scale_range)\n\n return scaled", "def normalize2(data):\n return old_div(data,np.max([np.max(data),-1.0*np.min(data)]))", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def normalize_minmax(data):\n _min = np.float(np.min(data))\n _max = np.float(np.max(data))\n if (_max-_min)!=0:\n img = (data - _min) / (_max-_min)\n else:\n img = np.zeros_like(data) \n return img", "def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )", "def scale_matrix_by_max(A):\n # Section 1: Find the max value of the matrix\n max = 0\n for row in A:\n for col in row:\n if abs(col) > max:\n max = col\n\n # Section 2: Create a copy of the matrix A\n new = copy_matrix(A)\n rows = len(new)\n cols = len(new[0])\n\n # Section 3: Reduce each value of copied matrix by max value\n for i in range(rows):\n for j in range(cols):\n new[i][j] = new[i][j] / max\n\n return new", "def normalize(sample, maxval):\n sample = (2 * (sample.astype(np.float32) / maxval) - 1.) * 1024\n #sample = sample / np.std(sample)\n return sample", "def normalize(self,arr):\n arr = arr/(arr.max()/255.0)\n return arr", "def __normalize__(self, numpy_array: np.ndarray) -> np.ndarray:\n\n numpy_array = numpy_array + np.abs(self.min_value)\n numpy_array = numpy_array / np.abs(self.min_value)\n\n return numpy_array", "def maxabs(a, axis=None):\n maxa = a.max(axis=axis)\n mina = a.min(axis=axis)\n p = abs(maxa) > abs(mina) # bool, or indices where +ve values win\n n = abs(mina) > abs(maxa) # bool, or indices where -ve values win\n if axis == None:\n if p: return maxa\n else: return mina\n shape = list(a.shape)\n shape.pop(axis)\n out = np.zeros(shape, dtype=a.dtype)\n out[p] = maxa[p]\n out[n] = mina[n]\n return out", "def normalize_col_scale01(data,tol=1e-6,data_min=None,data_max=None,clip=False,clip_min=1e-3,clip_max=1e3):\n if clip:\n data[data<clip_min]=clip_min\n data[data>clip_max]=clip_max\n if data_max is None:\n data_max=np.max(data,axis=0)\n data_max.reshape((1,data_max.shape[0]))\n if data_min is None:\n data_min=np.min(data,axis=0)\n data_min.reshape((1,data_min.shape[0]))\n #tol=0#1e-8\n return (data-data_min)/(data_max-data_min+tol),data_min,data_max", "def normalize(X):\n # z-score\n mean = np.mean(X, axis=(0, 1, 2, 3))\n std = np.std(X, axis=(0, 1, 2, 3))\n # avoid dividing zero by adding a very small number\n X = (X - mean) / (std + 1e-7)\n\n return X", "def rescale_arr(arr, amin, amax):\r\n\r\n # old bounds\r\n m = arr.min()\r\n M = arr.max()\r\n # scale/offset\r\n s = float(amax - amin) / (M - m)\r\n d = amin - s * m\r\n\r\n # Apply clip before returning to cut off possible overflows outside the\r\n # intended range due to roundoff error, so that we can absolutely guarantee\r\n # that on output, there are no values > amax or < amin.\r\n return np.clip(s * arr + d, amin, amax)", "def normalisation_l_inf(x):\n res = np.zeros(x.shape)\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n res[i,j] = x[i,j]/(np.max(x[i,j])+1e-5)\n return(res)", "def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x", "def normalize_array(arr, method=\"min_max\"):\r\n \r\n ret = torch.tensor(arr)\r\n if method == \"min_max\":\r\n ret -= torch.min(ret)\r\n ret /= torch.max(ret)\r\n elif method == \"mean_std\":\r\n ret -= torch.mean(ret)\r\n ret /= torch.std(ret)\r\n else:\r\n raise Exception(\"Invalid normalization method\")\r\n\r\n return 1 + ret", "def normalize_matrix(matrix, min_val, max_val):\n return (max_val - min_val) * (matrix - np.min(matrix)) / (np.max(matrix) - np.min(matrix)) + min_val", "def norm_img(img):\n img_arr = np.array(img).astype(float)\n max_val = np.amax(img_arr)\n if max_val > 0:\n img_arr /= max_val\n return img_arr", "def normalize_range(array, floor=0, ceil=1):\n scaler = MinMaxScaler(feature_range=(floor, ceil), copy=True)\n return scaler.fit_transform(array)", "def _scale_features(self, features):\n assert isinstance(features, np.ndarray), \"Input is not a numpy array!\"\n\n return self.scaler.transform(features.reshape(1, -1))", "def normalize(arr: np.ndarray) -> np.ndarray:\n if max(arr) - min(arr) == 0:\n logger.warning(\n \"Normalize averted a div/0, the input data was:\\n {0}\".format(arr)\n )\n return np.ones(len(arr))\n return (arr - min(arr)) / (max(arr) - min(arr))", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def normalize(values):\n\n\tmax_value = float(max(map(abs, values)) or 1)\n\treturn [val / max_value for val in values]", "def __scale(data, max_value_list, min_value_list, scale_value_list, process_cols_list):\n features = np.array(data.features, dtype=float)\n for i in process_cols_list:\n value = features[i]\n if value > max_value_list[i]:\n value = max_value_list[i]\n elif value < min_value_list[i]:\n value = min_value_list[i]\n\n features[i] = (value - min_value_list[i]) / scale_value_list[i]\n _data = copy.deepcopy(data)\n _data.features = features\n return _data", "def normalize_data(data, min=0, max=1):\r\n import numpy as np\r\n assert isinstance(data, np.ndarray)\r\n\r\n max_value = np.max(data)\r\n min_value = np.min(data)\r\n\r\n scaled = np.interp(data, [min_value, max_value], [min, max])\r\n # convert to float64\r\n scaled = scaled.astype(np.float64)\r\n\r\n return scaled", "def max_change(arr):\n return np.max(arr) - np.min(arr)", "def normalize_vector_array (vector_array ):\r\n norms = np.linalg.norm (vector_array, axis=1 )\r\n norms = np.where (norms == 0, 1, norms ) # these filtered values belong to arrays that already are normalized\r\n\r\n return vector_array / norms.reshape (-1, 1 )", "def _scale(self, normalize, mat):\n mat = mat.astype(float)\n if normalize:\n mat = sklearn_norm(mat,\n feature_range=(0, 1),\n axis=0,\n copy=True)\n else:\n return mat\n return mat", "def max_scale_image(self):\n maximum = np.argmax(self.transform, 0)\n return self.scale_array[maximum] * (self.support.sum(0) > 0)", "def normalize(array, inplace=False):\n if inplace:\n array -= ds_mean\n array /= ds_std\n else:\n array = (array - ds_mean) / ds_std\n return array", "def normalize_wrt_max(self):\n\n x_min = min(self.x)\n x_max = max(self.x)\n y_min = min(self.y)\n y_max = max(self.y)\n\n x_range = x_max - x_min\n y_range = y_max - y_min\n max_range = max(x_range, y_range)\n\n x = np.array(self.x)\n y = np.array(self.y)\n x -= x_min\n y -= y_min\n x = x / float(max_range)\n y = y / float(max_range)\n\n self.x = x.tolist()\n self.y = y.tolist()", "def normalize(self):\r\n max = np.amax(self.matrix)\r\n min = np.amin(self.matrix)\r\n\r\n self.matrix = ((self.matrix - min) / (max - min))", "def max_decode(M):\r\n return scipy.array([ f.val.argmax() for f in M])", "def normalize_array(array):\n\n return array / np.sum(array, axis=1)[:, np.newaxis]", "def _normalize(M):\r\n\r\n minVal = np.min(M)\r\n maxVal = np.max(M)\r\n\r\n Mn = M - minVal;\r\n\r\n if maxVal == minVal:\r\n return np.zeros(M.shape);\r\n else:\r\n return Mn / (maxVal-minVal)", "def normalise(array,tot=1.0):\r\n tot1 = np.sum(np.abs(array)**2)\r\n if tot1 == 0.0 :\r\n print 'bg.normalise : warning sum array = 0'\r\n arrayout = np.copy(array)\r\n else :\r\n arrayout = array * np.sqrt(tot / tot1)\r\n return arrayout", "def normalise(a):\n return (a - np.nanmin(a)) / np.nanmax(a - np.nanmin(a))", "def invert(array: np.ndarray) -> np.ndarray:\n return -array + array.max() + array.min()", "def max_normalize(val, old_max):\n new_max = 100.0\n normalized = (val / old_max) * new_max\n # Cap at new_max\n if normalized > new_max:\n return new_max\n return normalized", "def absmax(self):\n raise NotImplementedError", "def _normalize(x):\n tol = 1e-10\n dims = x.shape\n\n x = x.flatten()\n inverse = (np.sum(x**2) + tol) ** -.5\n x = x * inverse\n x = np.reshape(x, dims)\n\n return x", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X", "def normalize(data):\n # normalize data and return\n # https://stackoverflow.com/questions/29661574/normalize-numpy-array-columns-in-python\n return (data - data.min(axis=0)) / data.ptp(axis=0)", "def max_abs(self):\n\n return np.nanmax(self.abs_data)", "def softmax(Z):\n # Change the dynamic range before normalization, to avoid precision errors\n Z -= Z.max(1)[:, np.newaxis]\n expZ = np.exp(Z)\n return expZ / expZ.sum(1)[:, np.newaxis]", "def zzX_max_norm(f):\n if poly_univariate_p(f):\n return zzx_max_norm(f)\n else:\n return max([ zzX_max_norm(coeff) for coeff in f ])", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def normalise_between_2_values(arraylike, min_value, max_value, invert=False):\n # normalise array between min and max values\n normalised = (arraylike - min_value) / (max_value - min_value)\n # replace anything above 1 with 1\n normalised[normalised > 1] = 1\n # replace anything below 0 with 0\n normalised[normalised < 0] = 0\n # if desired, invert the normalised values\n if invert:\n normalised = abs(normalised - 1)\n return normalised", "def maxnorm(self, priors=None):\n def maxnorm_f(x): return x.max(axis=0)\n return self.utility(maxnorm_f, priors)", "def normalize_row_scale01(data,tol=1e-6,data_min=None,data_max=None,clip=False,clip_min=1e-3,clip_max=1e3):\n if clip:\n data[data<clip_min]=clip_min\n data[data>clip_max]=clip_max\n\n if data_max is None:\n data_max=np.max(data,axis=1)\n data_max.shape=(data_max.shape[0],1)\n #if clip:\n # data_max[data_max>clip_max]=clip_max\n if data_min is None:\n data_min=np.min(data,axis=1)\n data_min.shape=(data_min.shape[0],1)\n #if clip:\n # data_min[data_min<clip_min]=clip_min\n #tol=1e-6#1e-8\n return (data-data_min)/(data_max-data_min+tol),data_min,data_max", "def normalize(list, max_value=1):\n maxi = max(list)\n mini = min(list)\n\n if maxi == mini or len(list) == 1:\n return list\n \n norm = []\n\n for item in list:\n new = max_value * ((item - mini) / (maxi - mini))\n norm.append(new)\n\n return norm", "def normalize_array(image_array):\n\n array = image_array.astype(np.float)\n array /= 255.0\n return array", "def normalise(values):\n max_value = max(values)\n min_value = min(values)\n factor = 32767.0 / max(max_value, abs(min_value))\n return (int(v * factor) for v in values)", "def demean_normalize(one_d_array):\n\n temp_arr = one_d_array - np.nanmean(one_d_array)\n\n return temp_arr/np.nanstd(temp_arr)", "def normalizeFeatureVector(self):\n # Normalize features\n total = 0.0\n for v in self.features.values(): total += abs(v)\n if total == 0.0: \n total = 1.0\n for k,v in self.features.iteritems():\n self.features[k] = float(v) / total", "def z_score_normalization(data):\n # import data\n\n features = data[:, 0:-1]\n target = data[:, -1]\n\n # First 10 rows\n print('Training Data:\\n\\n' + str(features))\n print('\\n')\n print('Targets:\\n\\n' + str(target))\n\n # Data standarization\n standardized_data = preprocessing.scale(features)\n\n # First 10 rows of new feature vector\n print('\\nNew feature vector:\\n')\n print(standardized_data[:10])\n print('\\n\\n')\n\n new_data = np.append(standardized_data, target.reshape(target.shape[0], -1), axis=1)\n print('\\nNew array\\n')\n print(new_data)\n\n return new_data", "def min_max_normalize_one_image(image):\n\n image = image.astype(np.float32)\n for i in range(len(image)):\n max_int = image[i].max()\n min_int = image[i].min()\n image[i] = (image[i] - min_int) / (max_int - min_int)\n\n return image", "def inverse_transform(self, X):\n X = np.asarray(X, dtype=np.float64)\n X -= self.min_\n X /= self.scale_\n return X", "def normalize(values):\n return (values - np.mean(values)) / np.std(values)", "def normalize(vals):\n min_val = torch.min(vals)\n max_val = torch.max(vals)\n return (vals - min_val) / (max_val - min_val)", "def inverse_transform(self, X):\n # No warning for y, since there's no y variable.\n # This correpsonds to function signature in scikit-learn's code base\n X = X.copy() # type: pd.DataFrame\n X.loc[:, self._feature_mask_] *= self.scale_\n X.loc[:, self._feature_mask_] += self.min_\n return X", "def normalize_array(cube, new_max, new_min):\n minimum, maximum = np.min(cube), np.max(cube)\n if maximum - minimum != 0:\n m = (new_max - new_min) / (maximum - minimum)\n b = new_min - m * minimum\n cube = m * cube + b\n return cube", "def normalize(x):\n\n return (x - x.values.min()) / (x.values.max() - x.values.min())", "def standardize(X, axis=0, ddof=0):\n\n # Modified from scikit-learn.preprocessing.scale()!\n\n #X = np.asarray(X)\n X = np.asarray(X, dtype=np.float) # XXX: what about dtype? convert to float64? for higher precision? let client decide?\n Xr = np.rollaxis(X, axis) # view on X to enable broadcasting on the axis we are interested in\n \n mean_ = Xr.mean(axis=0)\n std_ = Xr.std(axis=0, ddof=ddof)\n std_[std_ == 0.0] = 1.0 # avoid NaNs due to div/zero\n\n # center mean on zero\n Xr -= mean_\n\n # Verify that mean_1 is 'close to zero'. If X contains very\n # large values, mean_1 can also be very large, due to a lack of\n # precision of mean_. In this case, a pre-scaling of the\n # concerned feature is efficient, for instance by its mean or\n # maximum.\n mean_1 = Xr.mean(axis=0)\n if not np.allclose(mean_1, 0.0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when centering the data \"\n \"and might not be solved. Dataset may \"\n \"contain too large values. You may need \"\n \"to prescale your features.\")\n Xr -= mean_1\n mean_ += mean_1\n\n # scale to unit variance\n Xr /= std_\n\n # If mean_2 is not 'close to zero', it comes from the fact that\n # std_ is very small so that mean_2 = mean_1/std_ > 0, even if\n # mean_1 was close to zero. The problem is thus essentially due\n # to the lack of precision of mean_. A solution is then to\n # substract the mean again.\n mean_2 = Xr.mean(axis=0)\n if not np.allclose(mean_2, 0.0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when scaling the data \"\n \"and might not be solved. The standard \"\n \"deviation of the data is probably \"\n \"very close to 0.\")\n Xr -= mean_2\n mean_ += mean_2\n\n # Additional check if variances are 'close to one'\n std_1 = Xr.std(axis=0, ddof=ddof)\n if not np.allclose(std_1, 1.0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when scaling the data \"\n \"and might not be solved. Standard deviation \"\n \"not close to one after scaling.\")\n\n return X, mean_, std_", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def normalize_features(X):\n std = X.std(axis=0)\n std = np.where(std == 0, 1, std) # to avoid division by zero\n x_normed = (X - X.mean(axis=0)) / std\n return x_normed", "def normalize(array):\n\n # calculate the mean of array\n array_mean = numpy.mean(array)\n if _DEBUG:\n print \"Mean of gr is:\"\n print array_mean\n\n # divide all elements by the mean\n norm_list = []\n for item in array:\n norm_list.append(item/array_mean - 1)\n\n # return the result\n return norm_list", "def standardize_atlas(atlas_ni):\n atlas_data = atlas_ni.get_data()\n max_features = atlas_ni.get_data().reshape(-1, atlas_ni.shape[-1] ).max(axis=0)\n std_data = (np.abs(atlas_data) / max_features).reshape(atlas_ni.shape)\n return image.new_img_like(atlas_ni,std_data )", "def normalise(raw_data, normalise_by_column=False):\n data = raw_data\n if normalise_by_column:\n #normaliza valores usando o maximo de cada coluna\n col_maxes = raw_data.max(axis=0)\n #divide cada valor pelo maximo correspondente de cada coluna\n data = raw_data / col_maxes[np.newaxis,:] \n else:\n #divide todos os valores pelo maximo do dataset (tudo na mesma escala)\n data = raw_data / raw_data.max()\n\n return data" ]
[ "0.7536234", "0.71070904", "0.70744723", "0.6854876", "0.6705524", "0.66903067", "0.6577902", "0.6577724", "0.65433544", "0.6540987", "0.63960224", "0.63942844", "0.638665", "0.63782936", "0.6344834", "0.6317285", "0.62940407", "0.62483877", "0.6247812", "0.6247812", "0.6189317", "0.61798406", "0.61779034", "0.61678255", "0.61672604", "0.6161706", "0.6159368", "0.61536014", "0.6150235", "0.61247945", "0.6061173", "0.60529536", "0.6043856", "0.59981847", "0.59702986", "0.5962681", "0.59605", "0.59582824", "0.59564924", "0.59557164", "0.5947423", "0.59288305", "0.590164", "0.5901191", "0.59003717", "0.58981276", "0.5887554", "0.5886954", "0.5876078", "0.58703625", "0.5860592", "0.5852369", "0.5852335", "0.5843414", "0.5841525", "0.5836329", "0.58297163", "0.5828692", "0.58240134", "0.5818835", "0.5806193", "0.57917994", "0.5788697", "0.5776443", "0.57539105", "0.57294023", "0.5719658", "0.5714758", "0.57145834", "0.5709965", "0.57087106", "0.57086307", "0.570416", "0.57030624", "0.5698606", "0.56985646", "0.56770927", "0.56760246", "0.5669645", "0.56664485", "0.56509966", "0.5647775", "0.56336576", "0.56329924", "0.563294", "0.5632626", "0.56285703", "0.56276715", "0.5627261", "0.5621974", "0.5621624", "0.5612724", "0.5608696", "0.56042993", "0.55999905", "0.5598806", "0.55980045", "0.55961007", "0.55947644", "0.5591687" ]
0.8425474
0
Return a diagonal mask computed from an array. Useful when the data is the same if you transpose the array, eg in a heatmap.
def get_diagonal_mask(data): mask = np.zeros_like(data, dtype=np.bool) mask[np.triu_indices_from(mask)] = True return mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_diagonal(self, array):\n diags = [array[::-1, :].diagonal(i) for i in range(-array.shape[0] + 1, array.shape[1])]\n\n # Now back to the original array to get the upper-left-to-lower-right diagonals,\n # starting from the right, so the range needed for shape (x,y) was y-1 to -x+1 descending.\n diags.extend(array.diagonal(i) for i in range(array.shape[1] - 1, -array.shape[0], -1))\n return diags", "def diagonal(a, offset=0, axis1=0, axis2=1):\n # TODO(okuta): check type\n return a.diagonal(offset, axis1, axis2)", "def makeMaskFromArray(array):\n if array is None: return None\n cls = globals()[\"Mask%s\" % suffixes[str(array.dtype.type)]]\n return cls(array)", "def dilate(array):\n # kernel = [[1] * 7] * 7 # blocky 3-pixel dilation\n y, x = np.ogrid[-3:4, -3:4]\n kernel = ((x * x) + (y * y) <= 3.5**2) # disk-like 3-pixel radial dilation\n return scipy.ndimage.binary_dilation(array, structure=kernel)", "def diagonal(nd):\n assert nd.ndim == 2, \"diagonal requires 2 dimensional ndarray\"\n shape_min = hl.min(nd.shape[0], nd.shape[1])\n return hl.nd.array(hl.range(hl.int32(shape_min)).map(lambda i: nd[i, i]))", "def solution(array):\n rows = array.shape[0]\n cols = array.shape[1]\n result = np.ones((rows,cols))\n result[1:rows-1,1:cols-1] = 0\n return result", "def flatten_array(array, mask=None):\n if isinstance(array, (list, tuple)):\n if mask is None:\n return array\n array = np.asarray(array)\n if isinstance(array, np.ndarray):\n if mask is not None:\n if not isinstance(array, np.ndarray):\n raise Exception(f\"Mask type {repr(type(mask))} should be the same as array type {repr(type(array))}\")\n return array[mask]\n else:\n return array.reshape(-1)\n elif torch.is_tensor(array):\n if mask is not None:\n if not torch.is_tensor(mask):\n raise Exception(f\"Mask type {repr(type(mask))} should be the same as array type {repr(type(array))}\")\n return array[mask]\n else:\n return array.reshape(-1)\n else:\n raise Exception(f\"Unrecognized array type {repr(type(array))} during array flattening (mask type is {repr(type(mask))}')\")", "def diag(cls, elements, domain):\n return DDM.diag(elements, domain).to_dfm()", "def diag_indices_from(arr):\r\n if not arr.ndim >= 2:\r\n raise ValueError(\"input array must be at least 2-d\")\r\n # For more than d=2, the strided formula is only valid for arrays with\r\n # all dimensions equal, so we check first.\r\n if not np.alltrue(np.diff(arr.shape) == 0):\r\n raise ValueError(\"All dimensions of input must be of equal length\")\r\n\r\n return diag_indices(arr.shape[0], arr.ndim)", "def create_diagonal(m: NumpyRealArray) -> NumpyRealArray:\n indices = (..., *np.diag_indices(m.shape[-1]))\n retval = np.zeros((*m.shape, m.shape[-1]), dtype=m.dtype)\n retval[indices] = m\n return retval", "def writeLaserMask(self, array):\n offset = self.activeOffset\n shape = self.activeShape\n stride = self.activeStride\n \n target = pg.subArray(array, offset, shape, stride)\n target[:] = 1", "def _maskedCollapse(array_in, method): \n import numpy.ma as ma\n \n # Perform an numpy.ma array collapse along the z-axis\n if method == 'sum':\n print('(3d_collapse): Masked sum collapse of extracted slices ...')\n collapsed_array = ma.sum(array_in, axis=0)\n \n elif method == 'mean':\n print('(3d_collapse): Masked mean of extracted slices:')\n collapsed_array = ma.mean(array_in, axis=0)\n \n elif method == 'median':\n print('(3d_collapse): Masked median of extracted slices:')\n collapsed_array = ma.extras.median(array_in, axis=0)\n \n # Returns an array of type numpy.array \n return collapsed_array.data", "def mask(mask_key, data):\r\n _m = array.array(\"B\", mask_key)\r\n _d = array.array(\"B\", data)\r\n for i in xrange(len(_d)):\r\n _d[i] ^= _m[i % 4]\r\n return _d.tostring()", "def make_mask(data, pad):\n def subsequent_mask(size):\n \"\"\" helper function for creating the masks. \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n mask = (data != pad).unsqueeze(-2)\n mask = mask & Variable(\n subsequent_mask(data.size(-1)).type_as(mask.data))\n return mask", "def make_mask(data, pad):\n\n def subsequent_mask(size):\n \"\"\" helper function for creating the masks. \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n mask = (data != pad).unsqueeze(-2)\n mask = mask & Variable(\n subsequent_mask(data.size(-1)).type_as(mask.data))\n return mask", "def zero_diag(mat):\n\n return replace_diag(mat, np.zeros(mat.shape[0]))", "def apply_diagonal_inplace(self, array: 'Nparray') -> None:\n beta_ptr = 0\n\n if array.size == 2 * self.norb():\n beta_ptr = self.norb()\n\n elif array.size != self.norb():\n raise ValueError('Non-diagonal array passed'\n ' into apply_diagonal_inplace')\n\n if not array.flags['C_CONTIGUOUS']:\n array = numpy.copy(array)\n\n if fqe.settings.use_accelerated_code:\n aarray = array[:self.norb()]\n barray = array[beta_ptr:]\n _apply_diagonal_inplace(self.coeff, aarray, barray,\n self._core.string_alpha_all(),\n self._core.string_beta_all())\n else:\n alpha = numpy.zeros((self._core.lena(),), dtype=numpy.complex128)\n beta = numpy.zeros((self._core.lenb(),), dtype=numpy.complex128)\n\n for alp_cnf in range(self._core.lena()):\n occupation = self._core.string_alpha(alp_cnf)\n diag_ele = 0.0\n for ind in integer_index(occupation):\n diag_ele += array[ind]\n alpha[alp_cnf] = diag_ele\n for bet_cnf in range(self._core.lenb()):\n occupation = self._core.string_beta(bet_cnf)\n diag_ele = 0.0\n for ind in integer_index(occupation):\n diag_ele += array[beta_ptr + ind]\n beta[bet_cnf] = diag_ele\n\n for alp_cnf in range(self._core.lena()):\n for bet_cnf in range(self._core.lenb()):\n self.coeff[alp_cnf,\n bet_cnf] *= alpha[alp_cnf] + beta[bet_cnf]", "def subsequent_mask(mask_size):\n mask_shape = (1, mask_size, mask_size)\n # Create a lower-triangle matrix at the primary diagonal (0th)\n # such that all the elements above the diagonal are 0.\n mask = np.tril(np.ones(mask_shape), k=0).astype('uint8')\n mask = torch.from_numpy(mask)\n return mask", "def apply_mask(self, array):\n # assert that the array and Mask.data are of the same size\n assert array.shape == self.shape, \"array and mask should be of the same shape\"\n\n array_copy = array.copy()\n\n # Applying mask\n # apply func_true where Mask.data is True\n array_copy[self.data] = map(self.func_true, array_copy[self.data])\n\n # apply func_false where Mask.data is False\n array_copy[np.invert(self.data)] = map(self.func_false, array_copy[np.invert(self.data)])\n\n return array_copy", "def diag(self):\n in_diag = (self.rows == self.cols)\n diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0.\n diag[self.rows[in_diag]] = self.vals[in_diag]\n return diag", "def build_mask(dqarr, bitvalue):\n bitvalue = interpret_bit_flags(bitvalue, mnemonic_map=pixel)\n\n if bitvalue is None:\n return (np.ones(dqarr.shape, dtype=np.uint8))\n return np.logical_not(np.bitwise_and(dqarr, ~bitvalue)).astype(np.uint8)", "def diagonal(self):\n M = self.rep\n m, n = self.shape\n return [M[i, i] for i in range(min(m, n))]", "def padded_mask(array, first_pad, second_pad):\n # Split the array into two boolean arrays\n # Make a NaN mask, 1 where NaN\n nan_mask = np.isnan(array)\n # Make a mask where array 1s are 1 and everything else (0 and NaN) is 0\n first_round_mask = (array == 1)\n # Dilate <first_pad> times\n for i in range(first_pad):\n first_round_mask = dilate(first_round_mask, nan_mask)\n # Save this state of the mask\n second_round_mask = np.copy(first_round_mask)\n # Dilate <second_pad> times\n for i in range(second_pad):\n second_round_mask = dilate(second_round_mask, nan_mask)\n # Compare the first and second round dilations\n result = (second_round_mask & ~first_round_mask).astype(float)\n # Conserve NaNs\n result[nan_mask] = np.nan\n return result", "def row_col_diag(arr):\n three_sets = np.zeros((8,3), dtype=int)\n for i in range(arr.shape[0]):\n three_sets[i] = arr[i]\n for i in range(arr.shape[1]):\n three_sets[i+3] = arr[:,i]\n three_sets[6] = np.diag(arr)\n three_sets[7] = np.diag(np.flipud(arr))\n return three_sets", "def fill_diagonal(a, val):\r\n return fill_diagonal_(a, val)", "def diagonal(matrix):\n if sp.sparse.issparse(matrix):\n diag = np.array(matrix.diagonal())\n else:\n diag = np.diagonal(matrix).copy()\n return diag", "def evolve_diagonal(self, array: 'Nparray',\n inplace: bool = False) -> 'Nparray':\n beta_ptr = 0\n\n if array.size == 2 * self.norb():\n beta_ptr = self.norb()\n\n elif array.size != self.norb():\n raise ValueError('Non-diagonal array passed into evolve_diagonal')\n\n if inplace:\n data = self.coeff\n else:\n data = numpy.copy(self.coeff).astype(numpy.complex128)\n\n if not array.flags['C_CONTIGUOUS']:\n array = numpy.copy(array)\n\n if fqe.settings.use_accelerated_code:\n aarray = array[:self.norb()]\n barray = array[beta_ptr:]\n _evolve_diagonal_inplace(data, aarray, barray,\n self._core.string_alpha_all(),\n self._core.string_beta_all())\n else:\n for alp_cnf in range(self._core.lena()):\n occupation = self._core.string_alpha(alp_cnf)\n diag_ele = 0.0\n for ind in integer_index(self._core.string_alpha(alp_cnf)):\n diag_ele += array[ind]\n\n if diag_ele != 0.0:\n data[alp_cnf, :] *= numpy.exp(diag_ele)\n\n for bet_cnf in range(self._core.lenb()):\n occupation = self._core.string_beta(bet_cnf)\n diag_ele = 0.0\n for ind in integer_index(occupation):\n diag_ele += array[beta_ptr + ind]\n\n if diag_ele:\n data[:, bet_cnf] *= numpy.exp(diag_ele)\n\n return data", "def writeScanMask(self, array):\n offset = self.scanOffset\n shape = self.scanShape\n stride = self.scanStride\n \n target = pg.subArray(array, offset, shape, stride)\n target[:] = 1", "def DiagonalGate():\n\n def f(x): # pylint: disable=invalid-name\n # x : [batch, 1, length, depth]\n x = jnp.pad(x, [(0, 0), (0, 0), (1, 1), (0, 0)],\n mode='constant', constant_values=0.0)\n depth = x.shape[-1] // 3\n assert 3 * depth == x.shape[-1], ('Depth must be divisible by 3', depth,\n x.shape)\n xs = [\n x[:, :, :-2, :depth], x[:, :, 1:-1, depth:2 * depth],\n x[:, :, 2:, 2 * depth:3 * depth]\n ]\n return jnp.concatenate(xs, axis=3)\n return tl.Fn('DiagonalGate', f)", "def fold_diag(pixels):\n copy = blank_image(len(pixels), len(pixels[0])) \n for r in range(len(pixels)):\n for c in range(len(pixels[0])):\n copy[r][c] = pixels[r][c]\n for r in range(len(pixels)):\n for c in range(r):\n copy[r][c] = [255, 255, 255]\n return copy", "def create_diagonal_mask(low_to_high_map, target_value=1):\n low_to_high_map.drop_duplicates()\n grouped = low_to_high_map.groupby(low_to_high_map.columns[1])\n ordered_low_level_names = list()\n group_matrices = []\n for name, group in grouped:\n group_size = group.shape[0]\n # build up row/col names, order doesn't matter within a group = they are all equal\n ordered_low_level_names = ordered_low_level_names + group.iloc[:, 0].tolist()\n # set the diagonal matrix to be the target value\n single_group_matrix = np.full(shape=(group_size, group_size), fill_value=target_value)\n group_matrices.append(single_group_matrix)\n # add the individual matrices along the diagonal\n relationship_matrix = scipy.linalg.block_diag(*group_matrices)\n # convert to pandas dataframe and set names\n relationship_df = pd.DataFrame(relationship_matrix, columns=ordered_low_level_names, index=ordered_low_level_names)\n\n return relationship_df", "def get_diagonal_words(self, array, reverse=False):\n if reverse:\n array = numpy.fliplr(array)\n diagonal_array = self.get_diagonal(array)\n words = []\n for row in diagonal_array:\n word = ''.join(row)\n if len(word) > 1:\n words.append(word)\n return words", "def from_masked(cls, arr: np.masked.masked_array) -> JaggedArray:\n return cls._from_arr_and_mask(arr.compressed(), arr.mask)", "def crop_to_nonzero(arrayin, mask=None):\r\n\r\n if type(arrayin) == np.ndarray :\r\n array = arrayin\r\n elif type(arrayin) == list :\r\n array = arrayin[0]\r\n\r\n if mask==None :\r\n mask = array\r\n #most left point \r\n for i in range(mask.shape[1]):\r\n tot = np.sum(np.abs(mask[:,i]))\r\n if tot > 0.0 :\r\n break\r\n left = i\r\n #most right point \r\n for i in range(mask.shape[1]-1,-1,-1):\r\n tot = np.sum(np.abs(mask[:,i]))\r\n if tot > 0.0 :\r\n break\r\n right = i\r\n #most up point \r\n for i in range(mask.shape[0]):\r\n tot = np.sum(np.abs(mask[i,:]))\r\n if tot > 0.0 :\r\n break\r\n top = i\r\n #most down point\r\n for i in range(mask.shape[0]-1,-1,-1):\r\n tot = np.sum(np.abs(mask[i,:]))\r\n if tot > 0.0 :\r\n break\r\n bottom = i\r\n if type(arrayin) == np.ndarray :\r\n arrayout = array[top:bottom+1,left:right+1]\r\n elif type(arrayin) == list :\r\n arrayout = []\r\n for i in arrayin :\r\n arrayout.append(i[top:bottom+1,left:right+1])\r\n return arrayout", "def cut_transformed_array_borders(array): \n for col in range(array.shape[1]): \n col_=array[:, col]\n \n where=np.where(col_>0)\n \n if len(where[0])>0:\n \n col_[[np.min(where[0]),np.min(where[0])+1, np.max(where[0]), np.max(where[0])-1 ]]=0\n \n array[:,col]=col_\n \n for row in range(array.shape[0]): \n row_=array[row,:]\n \n where=np.where(row_>0)\n if len(where[0])>0:\n\n row_[[np.min(where[0]),np.min(where[0])+1, np.max(where[0]), np.max(where[0])-1 ]]=0\n \n array[row,:]=row_\n \n return array", "def diag(self, X):\n return self.kernel.diag(X)", "def diag(cls, diagonal, domain, shape=None):\n if shape is None:\n N = len(diagonal)\n shape = (N, N)\n return cls.from_rep(SDM.diag(diagonal, domain, shape))", "def get_forward_diagonal(self):\n start = 2\n end = 7\n step = 2\n\n return self.grid[start:end:step] # array[start:end:step]", "def diagonal(self):\n return self.rep.diagonal()", "def diag(x):\r\n xx = as_tensor_variable(x)\r\n if xx.type.ndim == 1:\r\n return alloc_diag(xx)\r\n elif xx.type.ndim == 2:\r\n return extract_diag(xx)\r\n else:\r\n raise TypeError('diag requires vector or matrix argument', x)", "def fill_diagonal(a, val):\r\n if a.ndim < 2:\r\n raise ValueError(\"array must be at least 2-d\")\r\n if a.ndim == 2:\r\n # Explicit, fast formula for the common case. For 2-d arrays, we\r\n # accept rectangular ones.\r\n step = a.shape[1] + 1\r\n else:\r\n # For more than d=2, the strided formula is only valid for arrays with\r\n # all dimensions equal, so we check first.\r\n if not np.alltrue(np.diff(a.shape) == 0):\r\n raise ValueError(\"All dimensions of input must be of equal length\")\r\n step = np.cumprod((1,) + a.shape[:-1]).sum()\r\n\r\n # Write the value out into the diagonal.\r\n a.flat[::step] = val", "def diag_inv(A):\n return diag(1. / diag(A))", "def get_reverse_diagonal(self):\n start = 0\n end = 9\n step = 4\n\n return self.grid[start:end:step] # array[start:end:step]", "def invert(array):\n\n f = [1, 1, 1]\n\n result = np.array(array)\n\n for row in range(result.shape[0]):\n for pixel in range(result.shape[1]):\n result[row][pixel] = f - result[row][pixel]\n\n return result", "def create_mask(shape):\n return np.zeros(shape).astype(bool)", "def getMask(self):\r\n mask = np.array(self.array, dtype=np.float32)\r\n mask[mask == 0] = np.nan\r\n return mask", "def fim_mask(model, dataset, samples, threshold):\n fisher_diagonal = fisher_matrix(model, dataset, samples)\n mask = [tensor < threshold for tensor in fisher_diagonal]\n return mask", "def DiagExpand(A):\n \n G = np.zeros(A.shape + A.shape[-1:])\n Gd = np.diagonal(G, axis1=-2, axis2=-1)\n Gd.setflags(write=True)\n Gd[:] = A\n \n return G", "def unmask_nD(X, mask):\n\n # Much faster than nisl unmask, and uses three times less memory !\n if mask.dtype != np.bool:\n raise ValueError(\"mask must be a boolean array\")\n if X.ndim != 2:\n raise ValueError(\"X must be a 2-dimensional array\")\n\n data = np.zeros(mask.shape + (X.shape[0],), dtype=X.dtype)\n data[mask, :] = X.T\n return data", "def diagonalize(width, height):\n a = create_board(width, height)\n\n for row in range(1, height - 1):\n for col in range(1, width - 1):\n if row == col:\n a[row][col] = 1\n else:\n a[row][col] = 0\n\n return a", "def celluloid(array):\n shades = np.linspace(1, 0, num=4)\n\n result = np.zeros(array.shape)\n\n for row in range(array.shape[0]):\n for pixel in range(array.shape[1]):\n for color in range(array.shape[2]):\n for shade in shades:\n if array[row][pixel][color] >= shade:\n result[row][pixel][color] = shade\n break\n\n return result", "def get_off_diagonal(matrix):\n\toff_diag = scipy.array(matrix, dtype=matrix.dtype)\n\toff_diag[scipy.diag_indices_from(matrix)] = 0\n\treturn off_diag", "def diagonalize(width,height):\r\n A = createBoard(height, width) \r\n \r\n for row in range(height):\r\n for col in range(width):\r\n if row == col:\r\n A[row][col] = 1\r\n else:\r\n A[row][col] = 0 \r\n\r\n return A", "def mask_(matrices, maskval=0.0, mask_diagonal=True):\n\n b, h, w = matrices.size()\n\n indices = torch.triu_indices(h, w, offset=0 if mask_diagonal else 1)\n matrices[:, indices[0], indices[1]] = maskval", "def mask(self):\n return np.ones((self.size, self.size))", "def diag(self):\n assert len(self.shape) == 1 or len(self.shape) == 2\n if len(self.shape) == 1:\n dim = self.shape[0]\n qim = self.qhape[0]\n shape = [dim, dim]\n qhape = [qim, qim]\n d = self.dirs[0]\n dirs = [d, -d]\n sects = {}\n for k, v in self.sects.items():\n new_k = (k[0], k[0])\n sects[new_k] = np.diag(v)\n res = type(self)(\n shape,\n qhape=qhape,\n qodulus=self.qodulus,\n sects=sects,\n dirs=dirs,\n dtype=self.dtype,\n )\n return res\n else:\n assert self.invar\n assert self.compatible_indices(self, 0, 1)\n d = self.dirs[0]\n if self.dirs[1] + d != 0:\n warnings.warn(\n \"Automatically flipping dir 1 in diag.\", stacklevel=2\n )\n self = self.flip_dir(1)\n dim = self.shape[0]\n qim = self.qhape[0]\n shape = [dim]\n qhape = [qim]\n dirs = [d]\n sects = {}\n for qnum in qim:\n try:\n diag_block = self[(qnum, qnum)]\n sects[(qnum,)] = np.diag(diag_block)\n except KeyError:\n # The diagonal block was not found, so we move on.\n pass\n res = type(self)(\n shape,\n qhape=qhape,\n qodulus=self.qodulus,\n sects=sects,\n dtype=self.dtype,\n dirs=dirs,\n invar=False,\n )\n return res", "def _dilate_mask(mask, dilation_radius=5):\n disk = morphology.disk(dilation_radius, dtype=np.bool)\n dilated_mask = morphology.binary_dilation(\n np.squeeze(mask, axis=2), selem=disk)[..., np.newaxis]\n return dilated_mask", "def darken(array, amount):\n width = len(array[0])\n height = len(array)\n new_array = np.array(np.zeros((height, width)))\n m = np.max(array) * amount\n for row in range(height):\n for col in range(width):\n new_array[row, col] = ((array[row,col] - m) if (array[row,col] >= m) else (0))\n return new_array", "def get_diagonal(matrix):\n\tdegree_vector = tf.reduce_sum(matrix, 1)\n\tdiagonal = tf.diag(degree_vector, name = 'diagonal')\n\treturn diagonal", "def contour_array(self, a, masked_values=None, head=None, **kwargs):\n return self.__cls.contour_array(a=a, masked_values=masked_values,\n head=head, **kwargs)", "def get_diagonal(self, position):\n\n upper_left = self.get_upper_left_diagonal(position)\n lower_right = self.get_lower_right_diagonal(position)\n upper_right = self.get_upper_right_diagonal(position)\n lower_left = self.get_lower_left_diagonal(position)\n return upper_left | lower_right | upper_right | lower_left", "def generate_mask(image, threshold):\n # TODO: Test this and optimize to only include pixels inward of the\n # horizon\n x_pix, y_pix = image.shape\n image_median = np.median(image)\n image_mean = np.mean(image)\n image_std = np.std(image)\n image_max = image.max()\n\n # generate mask\n mask = np.where(threshold < image, False, True)\n return mask", "def create_dark_mask():\n black_dx = 60\n black_dy = 20\n dark_mask = np.zeros((black_dx, black_dy))\n for k in range(black_dy):\n dark_mask[:, k] = (np.abs(k - black_dy // 2) / (black_dy / 2.)) ** 2\n return dark_mask", "def cut_array_border(array): \n array[:, [0, array.shape[1]-1]]=0\n array[[0, array.shape[0]-1], :]=0\n \n \n return array", "def is_diagonal(row, col):\n return 1 if row == col else 0", "def block_diag_full(W_):\n assert(W_.ndim == 3)\n bsize = W_.shape[0]\n full = np.concatenate([\n np.concatenate([ np.diag(W_[:,i,j]) for j in range(W_.shape[2]) ], axis=1)\n for i in range(W_.shape[1]) ], axis=0)\n return full", "def diagonalize(width, height):\n A = createBoard(width, height)\n\n for row in range(1, height - 1):\n for col in range(1, width - 1):\n if row == col:\n A[row][col] = 1\n else:\n A[row][col] = 0\n\n return A", "def filter_isolated_pixels(array):\n filtered_array = np.copy(array)\n id_regions, num_ids = ndimage.label(filtered_array,\n structure=np.ones((3, 3)))\n id_sizes = np.array(ndimage.sum(array, id_regions, range(num_ids+1)))\n area_mask = (id_sizes == 1)\n filtered_array[area_mask[id_regions]] = 0\n return filtered_array", "def manual_mask(\n cls, array, mask, roe_corner=(1, 0), scans=None, exposure_info=None\n ):\n\n array = abstract_array.convert_array(array=array)\n\n array = frame_util.rotate_array_from_roe_corner(\n array=array, roe_corner=roe_corner\n )\n mask = frame_util.rotate_array_from_roe_corner(\n array=mask, roe_corner=roe_corner\n )\n\n array[mask == True] = 0.0\n\n scans = abstract_frame.Scans.rotated_from_roe_corner(\n roe_corner=roe_corner, shape_2d=array.shape, scans=scans\n )\n\n return Frame(\n array=array,\n mask=mask,\n original_roe_corner=roe_corner,\n scans=scans,\n exposure_info=exposure_info,\n )", "def _get_first_unmasked_data(array, axis):\n mask = da.ma.getmaskarray(array)\n numerical_mask = da.where(mask, -1.0, 1.0)\n indices_first_positive = da.argmax(numerical_mask, axis=axis)\n indices = da.meshgrid(\n *[da.arange(array.shape[i]) for i in range(array.ndim) if i != axis],\n indexing='ij')\n indices.insert(axis, indices_first_positive)\n first_unmasked_data = np.array(array)[tuple(indices)]\n return first_unmasked_data", "def diagonal(\n self, kernel: Kernel, inputs: Float[Array, \"N D\"]\n ) -> DiagonalLinearOperator:\n diag = vmap(lambda x: kernel(x, x))(inputs)\n\n return DiagonalLinearOperator(diag=diag)", "def mask_extended(cat_table):\n return np.invert(select_extended(cat_table))", "def dilate(mask, forbidden):\n new_mask = np.copy(mask)\n # Shift right\n new_mask[:, 1:] |= mask[:, :-1]\n # Shift left\n new_mask[:, :-1] |= mask[:, 1:]\n # Shift up\n new_mask[1:, :] |= mask[:-1, :]\n # Shift down\n new_mask[:-1, :] |= mask[1:, :]\n\n # Shift up-right\n new_mask[1:, 1:] |= mask[:-1, :-1]\n # Shift down-right\n new_mask[:-1, 1:] |= mask[1:, :-1]\n # Shift down-left\n new_mask[:-1, :-1] |= mask[1:, 1:]\n # Shift up-left\n new_mask[1:, :-1] |= mask[:-1, 1:]\n\n new_mask[forbidden] = False\n\n return new_mask", "def get_dominant_cycle(tas):\n nt,nlat,nlon = tas.shape\n to_mask = MV.zeros((nlat,nlon))\n for i in range(nlat):\n for j in range(nlon):\n to_mask[i,j]=annual_cycle_dominant(tas[:,i,j])\n to_mask.setAxisList(tas.getAxisList()[1:])\n return to_mask", "def diagonal(d, axis=0):\n assert d.ndim == 1\n n = d.shape[0]\n times = lambda x: d * x\n trans = lambda x: _hermitian(d) * x\n times, trans = apply_along_axis(times, trans, axis)\n return Operator(times=times, trans=trans, shape=(n,n))", "def get_diagonals() -> Callable[[np.ndarray], List[np.ndarray]]:\n \n # create list to store diagonals\n diags = []\n \n # The diagonals function is recursive. How it works is best shown by example.\n # 1d: arr = [0, 1] then the diagonal is also [0, 1].\n \n # 2d: arr = [[0, 1],\n # [2, 3]]\n # The numpy diagonal method gives the main diagonal = [0, 3], a 1d array\n # which is recursively passed to the diagonals function.\n # To get the opposite diagonal we first use the numpy flip function to\n # reverse the order of the elements along the given dimension, 0 in this case.\n # This gives [[2, 3],\n # 0, 1]]\n # The numpy diagonal method gives the main diagonal = [2, 1], a 2d array\n # which is recursively passed to the diagonals function.\n\n # 3d: arr = [[[0, 1],\n # [2, 3]],\n # [[4, 5],\n # [6, 7]]]\n # The numpy diagonal method gives the main diagonals in the 3rd dimension\n # as rows.\n # [[0, 6],\n # [1, 7]]\n # Note that the diagonals of this array are [0, 7] and [6, 1] which are\n # retrieved by a recurive call to the diagonals function.\n # We now have 2 of the 4 diagonals of the orginal 3d arr.\n # To get the opposite diagonals we first use the numpy flip function which\n # gives\n # [[[4, 5],\n # [6, 7]],\n # [[0, 1],\n # [2, 3]]]\n # and a call to the numpy diagonal method gives\n # [[4, 2],\n # [5, 3]]\n # The diagonals of this array are [4, 3] and [2, 5]\n # We now have all 4 diagonals of the original 3d arr.\n\n def diagonals(arr: np.ndarray) -> List[np.ndarray]:\n if arr.ndim == 1:\n diags.append(arr)\n else:\n diagonals(arr.diagonal())\n diagonals(np.flip(arr, 0).diagonal())\n return diags\n\n return diagonals", "def __generate_mask(self):\n mask = np.concatenate([np.ones(len(self.fixed[0])),\n np.zeros(self.num_points),\n np.ones(len(self.fixed[1]))])\n return mask", "def nonans(array):\n return array[~np.isnan(array)]", "def _mask_array(mask, *args):\n invalid = ~mask # True if invalid\n args_masked = []\n for arg in args:\n if arg.size > 1 and arg.shape != invalid.shape:\n raise ValueError('Shape mismatch between mask and array.')\n arg_masked = arg.astype(np.float64)\n if arg.size == 1:\n pass\n elif invalid.size == 1:\n arg_masked = np.nan if invalid.item() else arg_masked\n elif arg.size > 1:\n arg_masked[invalid] = np.nan\n args_masked.append(arg_masked)\n return args_masked[0] if len(args_masked) == 1 else args_masked", "def diagonal_conv(n):\n\n\t# Building bi-dimensional array\n\tlim = int(math.sqrt(n))\n\tconv = np.zeros((lim,lim))\n\n\t# If element on diagonal set to 1/lim\n\tfor i in range(0,lim ):\n\t\tfor j in range(0,lim ):\n\t\t\tif j == i:\n\t\t\t\tconv[i,j]= 1/lim\n\n\t# Reshape to uni-dimensional array\n\tconv = np.reshape(conv,n)\n\n\treturn conv", "def diagonalise(self, input, batch):\n if len(input.size()) == 1:\n return torch.diag(input)\n if len(input.size()) == 2:\n if not batch:\n return torch.diag(vec(input))\n else:\n bdiag = torch.Tensor().to(self.device)\n for i in range(input.size()[0]):\n bdiag = torch.cat((bdiag, torch.diag(input[i]).unsqueeze(0)), axis=0)\n return bdiag\n\n if len(input.size()) == 3 and batch:\n bdiag = torch.Tensor()\n for i in range(input.size()[0]):\n bdiag = torch.cat((bdiag, torch.diag(vec(input[i])).unsqueeze(0)), axis=0)\n\n return bdiag\n else:\n print('Dimension of inpout tensor should only be 1,2,3.')", "def diagonal(step='Metropolis', iters=5000):\n X = mc.Uniform('X', lower=-1., upper=1., value=[0., 0.])\n\n @mc.potential\n def near_diag(X=X):\n if abs(X[0] - X[1]) < .1:\n return 0\n else:\n return -inf\n\n mod = setup_and_sample(vars(), step, iters)\n mod.shape = pl.array([[-1,-1], [-1,-.9], [.9,1], [1,1], [1,.9], [-.9,-1], [-1,-1]])\n mod.true_mean = [0,0]\n mod.true_iqr = ['(-.5,.5)', '(-.5,5)']\n return mod", "def flatten_cols(self, arr: np.array) -> torch.Tensor:\n snake_ = []\n k = 1\n for i in range(arr.shape[1]):\n snake_ += list(arr[:, i][::k])\n k *= -1\n return torch.tensor(snake_).unsqueeze(-1)", "def row_mask(self, row, state):\n\n mask = self.all_rows_mask & ~(1 << row)\n factor = None\n dim = 0\n\n for col, func in Panel.Leds[row].items():\n value = func(state)\n\n # non-dimmable LED\n if value is False:\n continue\n if value is True:\n mask |= 1 << col\n continue\n\n # dimmable LED\n if value >= 0.01:\n mask |= 1 << col\n\n if value <= 0.99:\n dim = 1 << col\n factor = value * value\n\n return (mask, dim, factor)", "def diagonal(self) -> Float[Array, \" N\"]:\n return self.value * jnp.ones(self.size)", "def diagonal(cube_edge: int=128,\n radius: int=10,\n foreground: int=1,\n dtype=np.uint8):\n if 2 * radius > cube_edge:\n raise ValueError(\"Given radius '{}' is larger than than cube edge length {}\"\n .format(radius, cube_edge))\n stack = np.zeros((cube_edge, cube_edge, cube_edge), dtype=bool)\n cylinder = [\n ((0, 0, 0), (cube_edge - 1, cube_edge - 1, cube_edge - 1), radius)\n ]\n stack = add_cylinder_px(stack, *cylinder[0])\n return volume_bool_to_dtype(stack, fg=foreground, dtype=dtype)", "def getdiag(self):\n out = []\n for x in xrange(0, self.lendiag()):\n out.append(self.retrieve(x))\n return out", "def invert(array: np.ndarray) -> np.ndarray:\n return -array + array.max() + array.min()", "def onlydiag(self):\n for y in xrange(0, len(self.a)):\n if not (isinstance(self.a[y], fakelist) and (len(self.a[y].a) == 0 or (len(self.a[y].a) == 1 and y in self.a[y].a))):\n return False\n return True", "def get_diagonal_subtensor_view(x, i0, i1):\r\n # We have to cast i0 and i0 to int because python 2.4 (and maybe later)\r\n # do not support indexing with 0-dim, 'int*' ndarrays.\r\n i0 = int(i0)\r\n i1 = int(i1)\r\n if x.shape[i0] < x.shape[i1]:\r\n raise NotImplementedError('is this allowed?')\r\n idx = [slice(None)] * x.ndim\r\n idx[i0] = slice(x.shape[i1] - 1, None, None)\r\n xview = x.__getitem__(tuple(idx))\r\n strides = list(xview.strides)\r\n strides[i1] -= strides[i0]\r\n xview.strides = strides\r\n return xview", "def bit_invert(array: np.ndarray) -> np.ndarray:\n try:\n return np.invert(array)\n except TypeError:\n raise ValueError(\n f\"The datatype {array.dtype} could not be safely inverted. This usually means the array is a float-like datatype. Cast to an integer-like datatype first.\"\n )", "def get_palace_diagonal_red(self):\n\n return self._palace_diagonal_red", "def diag(diag_elements):\n return tf.diag(tf.reshape(diag_elements, [-1]))", "def generate_inpaint_mask(n_samples, n_colors, spatial_width):\n mask = np.zeros((n_samples, n_colors, spatial_width, spatial_width), dtype=bool)\n # simple mask -- just mask out half the image\n mask[:,:,:,spatial_width/2:] = True\n return mask.ravel()", "def _invert_nonzero(arr):\n arr_inv = arr.copy()\n nz = np.nonzero(arr)\n arr_inv[nz] = 1 / arr[nz]\n return arr_inv", "def inflate_mask(mask):\n kernel = np.ones((12, 12), np.uint8)\n return cv2.dilate(mask, kernel, 1)", "def compute_mask(D, H, W, window_size, shift_size, device):\n img_mask = torch.zeros((1, D, H, W, 1), device=device)\n cnt = 0\n for d in (slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice(-shift_size[0], None)):\n for h in (slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice(-shift_size[1], None)):\n for w in (slice(-window_size[2]), slice(-window_size[2], -shift_size[2]), slice(-shift_size[2], None)):\n img_mask[:, d, h, w, :] = cnt\n cnt += 1\n mask_windows = window_partition(img_mask, window_size)\n mask_windows = mask_windows.squeeze(-1)\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))\n return attn_mask", "def mask(self, mask, logger=logger):\n if self.nodata is not None:\n da_masked = self._obj.where(mask != 0, self.nodata)\n else:\n logger.warning(\"Nodata value missing, skipping mask\")\n da_masked = self._obj\n return da_masked", "def get_mask(data):\n # saturated CCD count\n saturation_adu = 63000\n\n mask_sat = (data[:, 20:-20] >= saturation_adu)\n\n mask_bad = np.zeros_like(data[:, 20:-20], dtype=np.int16)\n # currently no bad pixels in FOCES CCD\n\n mask = np.int16(mask_sat)*4 + np.int16(mask_bad)*2\n\n return mask", "def set_diag(x, new_diag):\n arr_shape = x.shape\n off_diag = (1 - _torch.eye(arr_shape[-1])) * x\n diag = _torch.einsum(\"ij,...i->...ij\", _torch.eye(new_diag.shape[-1]), new_diag)\n return diag + off_diag" ]
[ "0.72953194", "0.61421484", "0.6136961", "0.58667374", "0.5818971", "0.558432", "0.5531007", "0.55216306", "0.5488027", "0.5458054", "0.5457682", "0.5436873", "0.54321617", "0.5413237", "0.54027534", "0.5357985", "0.5352605", "0.53214806", "0.53060347", "0.5296002", "0.5281449", "0.52566785", "0.52535075", "0.52481425", "0.5234038", "0.5229512", "0.5224252", "0.52169454", "0.52145994", "0.52086914", "0.51973146", "0.519227", "0.51844645", "0.5173754", "0.5160202", "0.51462257", "0.51326394", "0.5129325", "0.5127363", "0.51034296", "0.50998974", "0.50866735", "0.5077205", "0.5025731", "0.5024435", "0.50228333", "0.5007683", "0.49943757", "0.49891445", "0.49863753", "0.4986353", "0.4964397", "0.4952363", "0.49441957", "0.49340355", "0.4926007", "0.48962528", "0.48934662", "0.48799598", "0.4877861", "0.4871452", "0.48665395", "0.48639128", "0.48638198", "0.48592806", "0.48582056", "0.4851837", "0.48507643", "0.4842818", "0.48370528", "0.48263824", "0.48183182", "0.48128846", "0.48030007", "0.4801827", "0.4800016", "0.4792564", "0.47864765", "0.47819287", "0.47799015", "0.47662315", "0.47658223", "0.47595796", "0.47486454", "0.47421873", "0.47415078", "0.47395015", "0.4731617", "0.4720171", "0.47199196", "0.47122133", "0.47094703", "0.47089878", "0.47040045", "0.46964994", "0.4696326", "0.46799123", "0.46786475", "0.4669641", "0.46626097" ]
0.706273
1
Generate a generator that input a group of example in numpy.array and their labels, return the examples and labels by the given batch size.
def minibatches(inputs=None, targets=None, batch_size=None, allow_dynamic_batch_size=False, shuffle=True): if len(inputs) != len(targets): raise AssertionError( "The length of inputs and targets should be equal") if shuffle: indices = np.arange(len(inputs)) np.random.shuffle(indices) # for start_idx in range(0, len(inputs) - batch_size + 1, batch_size): # chulei: handling the case where the number of samples is not a multiple # of batch_size, avoiding wasting samples for start_idx in range(0, len(inputs), batch_size): end_idx = start_idx + batch_size if end_idx > len(inputs): if allow_dynamic_batch_size: end_idx = len(inputs) else: break if shuffle: excerpt = indices[start_idx:end_idx] else: excerpt = slice(start_idx, end_idx) if (isinstance(inputs, list) or isinstance(targets, list)) and shuffle: # zsdonghao: for list indexing when shuffle==True yield [inputs[i] for i in excerpt], [targets[i] for i in excerpt] else: yield inputs[excerpt], targets[excerpt]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generator(features, labels, batch_size):\n \n # Create empty arrays to contain batch of features and labels#\n batch_features = np.zeros((batch_size, 160, 320, 3))\n batch_labels = np.zeros((batch_size, 1))\n while True:\n for i in range(batch_size):\n # choose random index in features\n index = random.choice(range(len(features)))\n batch_features[i] = features[index]\n batch_labels[i] = labels[index]\n yield batch_features, batch_labels", "def generate_batches(data, labels, batch_size):\n for start in range(0, len(data), batch_size):\n yield Tensor(data[start:start+batch_size, ...]), Tensor(labels[start:start+batch_size, ...])", "def gen_batches(data, batch_size=8, randomize=False):\n indices = list(range(len(data)))\n targets = [randint(0, N_CLASSES - 1) for _ in indices] # random labels\n if randomize:\n shuffle(indices)\n\n for start in range(0, len(data), batch_size):\n labels = np.array(targets[start:start + batch_size])\n yield (pad_sequences(data[indices[start:start + batch_size]]),\n labels, labels)", "def get_batches_fn(batch_size):\n # Shuffle training data\n rnd.shuffle(image_paths)\n # Loop through batches and grab images, yielding each batch\n for batch_i in range(0, samples_n, batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n # Re-size to image_shape\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n # Create \"one-hot-like\" labels by class\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)", "def test_batch_generator(self, dir_name):\n input = np.zeros((self.batch_size, self.max_seq_len,\n self.embedding_size))\n seq_lengths = np.zeros((self.batch_size), dtype=np.intp)\n unique_counts = np.zeros((self.batch_size), dtype=np.intp)\n labels = np.zeros((self.batch_size), dtype=np.intp)\n i = 0\n\n fi = open(dir_name + \"all.txt\")\n sample_gen = self.dev_sample_generator(fi)\n self.load_embedding()\n\n for sequence, seq_length, unique_count, label in sample_gen:\n seq_lengths[i], labels[i], unique_counts[i] = seq_length, label, unique_count\n if seq_lengths[i] > self.max_seq_len:\n seq_lengths[i] = self.max_seq_len\n sequence = sequence[:seq_lengths[i]]\n input[i, 0:seq_lengths[i], :] = self.embedding[sequence, :]\n\n i += 1\n\n if i == self.batch_size:\n yield input, seq_lengths, unique_counts, labels\n input = np.zeros(\n (self.batch_size, self.max_seq_len,\n self.embedding_size)\n )\n i = 0\n\n if i < self.batch_size:\n yield input[:i, :, :], seq_lengths[:i], unique_counts[:i], labels[:i]\n\n fi.close()", "def generator(numbers, number_labels, batch_size=32):\n while True: # Loop forever so the generator never terminates\n\n images = []\n labels = []\n\n for batch_sample in range(batch_size):\n img, label = create_numbers(numbers, number_labels, return_label=True)\n\n # Here we will convert the label to a format that Keras API can process:\n n_label = np.zeros((5, 11), dtype='int')\n for i, digit in enumerate(label):\n if digit == \".\":\n n_digit = 10\n else:\n n_digit = int(digit)\n\n n_label[i][n_digit] = 1\n\n images.append(img)\n # labels.append(label)\n labels.append(n_label)\n\n X_train = np.array(images)\n if len(X_train.shape) == 3:\n X_train = np.expand_dims(X_train, -1)\n\n y_temp = np.array(labels)\n\n y1 = y_temp[:, 0, :]\n y2 = y_temp[:, 1, :]\n y3 = y_temp[:, 2, :]\n y4 = y_temp[:, 3, :]\n y5 = y_temp[:, 4, :]\n\n yield X_train, [y1, y2, y3, y4, y5]", "def gen_batch(img_dir, id_label_dict, batch_size, num_class, shuffle=True):\n img_file_path = gen_img_files(img_dir, shuffle)\n num_images = len(img_file_path)\n while True:\n for i in range(0, num_images-batch_size, batch_size):\n X, y = gen_data_file(img_file_path[i:i+batch_size], id_label_dict, num_class)\n yield X, y", "def batch_generator(batch_size, file, dataset, indices, labels=\"labels\"):\n sample_size = len(indices)\n n_batches = int(sample_size/batch_size)\n h5f = h5py.File(file,'r')\n instarget = Target('AAAAAA')\n aa_to_int = instarget.predefining_dict()\n while True: \n for i in range(n_batches):\n if i == n_batches:\n batch_samples = h5f[dataset][i*batch_size:sample_size]\n seqs_onehot = instarget.int_to_onehot(list(batch_samples), len(aa_to_int))\n batch_y = h5f[labels][i*batch_size:sample_size]\n else:\n batch_samples = h5f[dataset][i*batch_size:i*batch_size+batch_size]\n seqs_onehot = instarget.int_to_onehot(list(batch_samples), len(aa_to_int))\n batch_y = h5f[labels][i*batch_size:i*batch_size+batch_size]\n yield (seqs_onehot, batch_y)", "def batch_generator(batch_size, data, labels=None):\n n_batches = int(np.ceil(len(data) / float(batch_size)))\n idx = np.random.permutation(len(data))\n data_shuffled = data[idx]\n if labels is not None:\n labels_shuffled = labels[idx]\n for i in range(n_batches):\n start = i * batch_size\n end = start + batch_size\n if labels is not None:\n yield data_shuffled[start:end, :], labels_shuffled[start:end]\n else:\n yield data_shuffled[start:end, :]", "def data_generator(batch_size, preprocessor, x, y):\n num_examples = len(x)\n examples = zip(x, y)\n examples = sorted(examples, key = lambda x: x[0].shape[0])\n end = num_examples - batch_size + 1\n batches = [examples[i:i+batch_size]\n for i in range(0, end, batch_size)]\n random.shuffle(batches)\n while True:\n for batch in batches:\n x, y = zip(*batch)\n yield preprocessor.process(x, y)", "def multi_batch_generator(batch_size, *data_arrays):\n assert(data_arrays)\n num_examples = len(data_arrays[0])\n for i in range(1, len(data_arrays)):\n assert(len(data_arrays[i]) == num_examples)\n\n for i in range(0, num_examples, batch_size):\n # Yield matching slices from each data array.\n yield tuple(data[i:i+batch_size] for data in data_arrays)", "def batch_generate(self, inputs, labels, batch_size=64):\n inputs_image, inputs, labels = check_inputs_labels(inputs, labels)\n arr_x = inputs\n arr_y = labels\n len_x = inputs_image.shape[0]\n batch_size = check_int_positive('batch_size', batch_size)\n batches = int(len_x / batch_size)\n rest = len_x - batches*batch_size\n res = []\n for i in range(batches):\n if isinstance(arr_x, tuple):\n x_batch = tuple([sub_items[i*batch_size: (i + 1)*batch_size] for sub_items in arr_x])\n else:\n x_batch = arr_x[i*batch_size: (i + 1)*batch_size]\n if isinstance(arr_y, tuple):\n y_batch = tuple([sub_labels[i*batch_size: (i + 1)*batch_size] for sub_labels in arr_y])\n else:\n y_batch = arr_y[i*batch_size: (i + 1)*batch_size]\n adv_x = self.generate(x_batch, y_batch)\n # Black-attack methods will return 3 values, just get the second.\n res.append(adv_x[1] if isinstance(adv_x, tuple) else adv_x)\n\n if rest != 0:\n if isinstance(arr_x, tuple):\n x_batch = tuple([sub_items[batches*batch_size:] for sub_items in arr_x])\n else:\n x_batch = arr_x[batches*batch_size:]\n if isinstance(arr_y, tuple):\n y_batch = tuple([sub_labels[batches*batch_size:] for sub_labels in arr_y])\n else:\n y_batch = arr_y[batches*batch_size:]\n adv_x = self.generate(x_batch, y_batch)\n # Black-attack methods will return 3 values, just get the second.\n res.append(adv_x[1] if isinstance(adv_x, tuple) else adv_x)\n\n adv_x = np.concatenate(res, axis=0)\n return adv_x", "def batch_iter(input_data,batch_size):\r\n batch_ids,batch_mask,batch_segment,batch_label=[],[],[],[]\r\n for features in input_data:\r\n if len(batch_ids) == batch_size:\r\n yield batch_ids,batch_mask,batch_segment,batch_label\r\n batch_ids, batch_mask, batch_segment, batch_label = [], [], [], []\r\n\r\n batch_ids.append(features['input_ids'])\r\n batch_mask.append(features['input_mask'])\r\n batch_segment.append(features['segment_ids'])\r\n batch_label.append(features['label_ids'])\r\n\r\n if len(batch_ids) != 0:\r\n yield batch_ids, batch_mask, batch_segment, batch_label", "def data_gen(voc_size, batch, nbatches, seq_len = 15):\r\n for i in range(nbatches):\r\n # (batch_size, seq_len)\r\n data = torch.from_numpy(\r\n np.random.randint(1, voc_size, size=(batch, seq_len)))\r\n data[:, 0] = 1 # add start token\r\n src = Variable(data, requires_grad=False)\r\n tgt = Variable(data, requires_grad=False)\r\n yield Batch(src, tgt, 0) # Accessed by next function one by one\r", "def batch_data(cls, train_data, train_labels, batch_size):\n for batch in range(int(np.ceil(train_data.shape[0] / batch_size))):\n start = batch_size * batch\n end = start + batch_size\n if end > train_data.shape[0]:\n yield batch, (train_data[start:train_data.shape[0]], \\\n train_labels[start:train_data.shape[0]])\n else:\n yield batch, (train_data[start:end], \\\n train_labels[start:end])", "def batch_gen():\n i = 0\n while len(all_sentences) - i >= batch_size:\n # TODO this is a mess...\n yield np.stack([\n np.pad(\n np.stack(\n [embeddings[id]\n for id in sentence[:max_sentence_length]]), [[\n 0, max_sentence_length -\n min(len(sentence), max_sentence_length)\n ], [0, 0]],\n 'constant',\n constant_values=0)\n for sentence in all_sentences[i:i + batch_size]\n ])\n\n i += batch_size", "def multiple_generator(X: np.ndarray or List[np.ndarray],\n y: np.ndarray,\n batch_size: int) -> \"Generator\":\n\n list_of_generators = []\n for x in X:\n generator = create_generator()\n generator.fit(x, augment=True)\n generator = generator.flow(x, y, batch_size=batch_size)\n list_of_generators.append(generator)\n\n while True:\n\n generator_data = []\n generator_class = None\n\n for generator in list_of_generators:\n x, y = generator.next()\n\n generator_data.append(x)\n if generator_class is None:\n generator_class = y\n\n yield generator_data, generator_class # Yield both images and their mutual label", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n #print(labels[start:end])\n yield features[start:end], labels[start:end]", "def batch_generator(labels_df, set_kind):\n # Generate training batches\n if set_kind == \"train\" and (labels_df.shape[0] == 32384 or labels_df.shape[0] == 3120 or labels_df.shape[0] == 64):\n while 1:\n\n for i in range(labels_df.shape[0]//8):\n x_train = np.load('data/train-npy/npdatasetX{}.npy'.format(i))\n y_train = np.load('data/train-npy/npdatasetY{}.npy'.format(i))\n\n for j in range(1):\n x_trainj = x_train[j*8:j*8-1,:]\n y_trainj = y_train[j*8:j*8-1,:]\n\n yield (x_trainj, y_trainj)\n\n\n # Generate validation batches\n if set_kind == \"valid\" and (labels_df.shape[0] == 8080 or labels_df.shape[0] == 1920 or labels_df.shape[0] == 8):\n while 1:\n\n for i in range(labels_df.shape[0]//4): \n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n y_valid = np.load('data/valid-npy/npdatasetY{}.npy'.format(i))\n\n for j in range(1): \n x_validj = x_valid[j*4:j*4-1,:]\n y_validj = y_valid[j*4:j*4-1,:]\n\n yield (x_validj, y_validj)\n\n\n # Generate test batches\n if set_kind == \"test\" and labels_df.shape[0] == 40669:\n while 1:\n\n for i in range(labels_df.shape[0]//4): #REPLACE 1 by 3\n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n\n for j in range(1): #REPLACE 2 by 2816\n x_validj = x_valid[j*4:j*4-1,:]\n \n yield (x_validj, y_validj)\n\n if set_kind == \"test\" and (labels_df.shape[0] == 8080 or labels_df.shape[0] == 8):\n while 1:\n\n for i in range(labels_df.shape[0]//8): #REPLACE 1 by 3\n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n\n for j in range(2): #REPLACE 2 by 2816\n x_validj = x_valid[j*4:j*4-1,:]\n\n yield x_validj", "def multi_input_generator(df, batch_size, source_dir,shuffle=True):\n\n idx = 0\n\n while True:\n if shuffle:\n batch = df.sample(n=batch_size, replace=False)\n else:\n batch = df.loc[idx:(idx*batch_size), :] #attention:works only with batch_size=1\n\n batch_input1 = []\n batch_input2 = []\n batch_output = []\n\n # Read in each input, perform preprocessing and get labels\n for i in batch.index:\n\n full_path = source_dir + str(batch.loc[i].dx) + \"/\" + str(batch.loc[i].aug_id)\n input1 = get_input(full_path)\n input2 = [batch.loc[i].age, batch.loc[i].sex]\n output = batch.loc[i].dx\n\n input_pre = preprocess_input(input1)\n batch_input1 += [ input_pre ]\n batch_input2 += [ input2 ]\n batch_output += [ output ]\n\n # flatten the image list so that it looks like the tensorflow iterator\n batch_input1 = [val for sublist in batch_input1 for val in sublist]\n\n # Return a tuple of ([input,input],output) to feed the network\n batch_x1 = np.array(batch_input1)\n batch_x2 = np.array(batch_input2, dtype=\"float32\")\n batch_y = lb.transform(np.array(batch_output)).astype(\"float32\")\n\n yield[batch_x1, batch_x2], batch_y\n idx += 1\n\n if idx >= len(df):\n break", "def next_batch(self,batch_size):\r\n end_indicator = self._indicator + batch_size\r\n if end_indicator > self._num_examples:\r\n if self._need_shuffle:\r\n self._shuffle_data()\r\n end_indicator = batch_size\r\n else:\r\n raise Exception(\"have no more examples.\")\r\n\r\n if end_indicator > self._num_examples:\r\n raise Exception(\"batch size is larger than all examples.\")\r\n batch_data = self._data[self._indicator: end_indicator]\r\n batch_labels = self._labels[self._indicator: end_indicator]\r\n self._indicator = end_indicator\r\n return batch_data,batch_labels", "def generate(\n self,\n dataset: Tensor,\n labels: Tensor,\n chunk_size: int) -> Tuple[\n int, Iterator[Tuple[Tensor, Tensor]]]:", "def make_batch(filenames, batch_size):\n # Repeat infinitely.\n dataset = tf.data.TFRecordDataset(filenames).repeat()\n\n # Parse records.\n dataset = dataset.map(single_example_parser, num_parallel_calls=1)\n\n # Batch it up.\n dataset = dataset.batch(batch_size, drop_remainder=True)\n iterator = dataset.make_one_shot_iterator()\n\n image_batch, label_batch = iterator.get_next()\n return image_batch, label_batch", "def generate_batch(self, batch_size):\n n_words = len(self.center_words)\n while self.data_index <= n_words:\n self.data_index += batch_size\n yield self.center_words[self.data_index-batch_size:self.data_index], self.context_words[self.data_index-batch_size:self.data_index], self.neg_samples[self.data_index-batch_size:self.data_index, :]", "def test_generate_batch_from_several_1d_arrays_with_dividable_batch_size(\n arrays,\n batch_size,\n expected):\n gen = BatchGenerator(*arrays, batch_size=batch_size)\n\n first, second = next(gen.flow())\n\n assert first == expected[0]\n assert second == expected[1]", "def next_batch(self, batch_size):\n # Get batch\n assert(batch_size == 1)\n em, mask_list, seed_list = self.next_example(self.K)\n\n # Reshape for batch size 1\n em_batch = np.expand_dims(em, 0)\n mask_list = [np.expand_dims(m,0) for m in mask_list]\n \n return em_batch, mask_list", "def build_train_generator(X: numpy.array, y: numpy.array,\n batch_size: int = 500) -> Iterable[Tuple[numpy.array]]:\n assert X.shape[0] == y.shape[0], \"Number of samples mismatch in X and y.\"\n\n def xy_generator():\n while True:\n n_batches = X.shape[0] // batch_size\n if n_batches * batch_size < X.shape[0]:\n n_batches += 1 # to yield last samples\n for i in range(n_batches):\n start = i * batch_size\n end = min((i + 1) * batch_size, X.shape[0])\n yield X[start:end], y[start:end]\n return xy_generator()", "def generate(self, labels, list_IDs, n_classes):\n # Infinite loop\n while 1:\n # Generate order of exploration of dataset\n indexes = self.__get_exploration_order(list_IDs)\n\n # Generate batches\n imax = int(len(indexes)/self.batch_size)\n for i in range(imax):\n # Find list of IDs\n list_IDs_temp = [list_IDs[k] for k in indexes[i*self.batch_size:(i+1)*self.batch_size]]\n print(\"Producing\")\n #print(list_IDs_temp)\n # Generate data\n X, y = self.__data_generation(labels, list_IDs_temp, n_classes)\n # print(X.shape)\n # print(y.shape)\n #print(\"Target Label\")\n #print(y)\n gc.collect()\n yield X, y", "def generate(self, batch_size, s=\"train\"):\n while True:\n pairs, targets = self.get_batch(batch_size,s)\n yield (pairs, targets)", "def pklbatcher(inputs, targets, batch_size, shuffle=False, augment=False,\n img_shape=(321, 481, 3)):\n assert len(inputs) == len(targets)\n indices = inputs.keys()\n if shuffle:\n np.random.shuffle(indices)\n for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batch_size]\n else:\n excerpt = indices[start_idx:start_idx + batch_size]\n # Data augmentation\n im = []\n targ = []\n for i in range(len(excerpt)):\n img = inputs[excerpt[i]]['x']\n tg = targets[excerpt[i]]['y'] > 2\n if augment:\n # We use shuffle as a proxy for training\n if shuffle:\n img, tg = bsd_preprocess(img, tg)\n im.append(img)\n targ.append(tg)\n im = np.stack(im, axis=0)\n targ = np.stack(targ, axis=0)\n yield im, targ, excerpt", "def generate_batch(self, batch_size=8, shuffle=True):\n if self._contour_dicom_folder:\n contour_files = glob(os.path.join(self._contour_dicom_folder, \"*.h5\"))\n if shuffle:\n contour_files = np.random.permutation(contour_files)\n contours_generator = self._contour_folder_gen(contour_files)\n else:\n contours_generator = self._contour_dicom_generator\n\n x_batch, y_batch, sources_batch = [], [], []\n batch_idx = 0\n for idx, (dataset, sources) in enumerate(contours_generator):\n if batch_idx > 0 and batch_idx % batch_size == 0:\n if self._include_sources:\n yield sources_batch, np.array(x_batch), np.array(y_batch)\n else:\n yield np.array(x_batch), np.array(y_batch)\n x_batch, y_batch, sources_batch = [], [], []\n batch_idx = 0\n try:\n x_data = self._parse_channels(dataset, self.x_channels)\n y_data = self._parse_channels(dataset, self.y_channels)\n x_batch.append(x_data)\n y_batch.append(y_data)\n sources_batch.append(sources)\n batch_idx += 1\n except ValueError:\n # Log Error\n err_msg = \"Missing all channels in {}\".format(sources[\"filename\"])\n self._log_error(err_msg)\n\n if self._include_sources:\n yield sources_batch, np.array(x_batch), np.array(y_batch)\n else:\n yield np.array(x_batch), np.array(y_batch)", "def batch_generator(batch_size):\n\n # Infinite loop.\n while True:\n # Get a list of random indices for images in the training-set.\n idx = np.random.randint(100,size=batch_size)\n \n # Get the pre-computed transfer-values for those images.\n # These are the outputs of the pre-trained image-model.\n transf_values = np.array([transfer_values[_] for _ in idx])\n\n # For each of the randomly chosen images there are\n # at least 5 captions describing the contents of the image.\n # Select one of those captions at random and get the\n # associated sequence of integer-tokens.\n tokens = [caps_markedwords[_] for _ in idx]\n\n # Count the number of tokens in all these token-sequences.\n num_tokens = [len(t) for t in tokens]\n \n # Max number of tokens.\n max_tokens = np.max(num_tokens)\n # Pad all the other token-sequences with zeros\n # so they all have the same length and can be\n # input to the neural network as a numpy array.\n tokens_padded = pad_sequences(tokens,\n maxlen=max_tokens,\n padding='post',\n truncating='post')\n \n # Further prepare the token-sequences.\n # The decoder-part of the neural network\n # will try to map the token-sequences to\n # themselves shifted one time-step.\n decoder_input_data = tokens_padded[:, 0:-1]\n decoder_output_data = tokens_padded[:, 1:]\n\n # Dict for the input-data. Because we have\n # several inputs, we use a named dict to\n # ensure that the data is assigned correctly.\n x_data = \\\n {\n 'decoder_input': decoder_input_data,\n 'transfer_values_input': transf_values\n }\n\n\n # Dict for the output-data.\n y_data = \\\n {\n 'decoder_output': decoder_output_data\n }\n \n yield (x_data, y_data)", "def generate(self, batch_size, s=\"train\"):\n\t\twhile True:\n\t\t\tpairs = self.getBatch(batch_size)\n\t\t\tfeaturesX = self.getFeatures(pairs)\n\t\t\twordEmb = self.getWordEmbeddings(pairs)\n\t\t\tposEmb = self.getPosEmbeddings(pairs)\n\t\t\ttarget = to_categorical(pairs['target'].values, num_classes=2)\n\t\t\tyield ([wordEmb, posEmb, featuresX], target)", "def generate(batch, size=32):\n\n # Using the data Augmentation in traning data\n ptrain = 'data224/train'\n pval = 'data224/test'\n\n datagen1 = ImageDataGenerator(\n samplewise_center=True,\n samplewise_std_normalization=True,\n shear_range=0.2,\n zoom_range=0.2,\n rotation_range=90,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\n\n datagen2 = ImageDataGenerator(samplewise_center=True,\n samplewise_std_normalization=True,)\n\n train_generator = datagen1.flow_from_directory(\n ptrain,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n validation_generator = datagen2.flow_from_directory(\n pval,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n count1 = 0\n for root, dirs, files in os.walk(ptrain):\n for each in files:\n count1 += 1\n\n count2 = 0\n for root, dirs, files in os.walk(pval):\n for each in files:\n count2 += 1\n\n return train_generator, validation_generator, count1, count2", "def generate(self, batch_size, s=\"train\"):\n\t\twhile True:\n\t\t\tpairs = self.getBatch(batch_size)\n\t\t\tfeaturesX = self.getFeatures(pairs)\n\t\t\twordEmb = self.getWordEmbeddings(pairs)\n\t\t\tposEmb = self.getPosEmbeddings(pairs)\n\t\t\t#target = to_categorical(pairs['target'].values, num_classes=2)\n\t\t\tyield ([wordEmb, posEmb, featuresX])", "def next_batch(self, batch_size, fake_data=False, shuffle=True):\r\n if fake_data:\r\n #fake_image = [1] * 784\r\n fake_image = [1]*6400\r\n if self.one_hot:\r\n #fake_label = [1] + [0] * 9\r\n fake_label = [1]+[0]*(people-1)\r\n else:\r\n fake_label = 0\r\n return [fake_image for _ in xrange(batch_size)], [\r\n fake_label for _ in xrange(batch_size)\r\n ]\r\n start = self._index_in_epoch\r\n # Shuffle for the first epoch\r\n if self._epochs_completed == 0 and start == 0 and shuffle:\r\n perm0 = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm0)\r\n self._images = self.images[perm0]\r\n self._labels = self.labels[perm0]\r\n # Go to the next epoch\r\n if start + batch_size > self._num_examples:\r\n # Finished epoch\r\n self._epochs_completed += 1\r\n # Get the rest examples in this epoch\r\n rest_num_examples = self._num_examples - start\r\n images_rest_part = self._images[start:self._num_examples]\r\n labels_rest_part = self._labels[start:self._num_examples]\r\n # Shuffle the data\r\n if shuffle:\r\n perm = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm)\r\n self._images = self.images[perm]\r\n self._labels = self.labels[perm]\r\n # Start next epoch\r\n start = 0\r\n self._index_in_epoch = batch_size - rest_num_examples\r\n end = self._index_in_epoch\r\n images_new_part = self._images[start:end]\r\n labels_new_part = self._labels[start:end]\r\n return numpy.concatenate((images_rest_part, images_new_part), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0)\r\n else:\r\n self._index_in_epoch += batch_size\r\n end = self._index_in_epoch\r\n return self._images[start:end], self._labels[start:end]", "def next(self):\n #print('next')\n batch_size = self.batch_size\n batch_data = nd.empty((batch_size,)+self.data_shape)\n batch_label = nd.empty((batch_size,)+self.label_shape)\n i = 0\n #self.cutoff = random.randint(800,1280)\n try:\n while i < batch_size:\n #print('N', i)\n data, label, annot = self.next_sample()\n R = self.get_data(data, label, annot)\n if R is None:\n continue\n data_out, label_out, flip_data_out, flip_label_out = R\n if not self.use_coherent:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n i += 1\n else:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n data2 = nd.array(flip_data_out)\n data2 = nd.transpose(data2, axes=(2, 0, 1))\n label2 = nd.array(flip_label_out)\n #M = nd.array(M)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n #i+=1\n j = i+self.per_batch_size//2\n batch_data[j][:] = data2\n batch_label[j][:] = label2\n i += 1\n if j%self.per_batch_size==self.per_batch_size-1:\n i = j+1\n except StopIteration:\n if i<batch_size:\n raise StopIteration\n\n #return {self.data_name : batch_data,\n # self.label_name : batch_label}\n #print(batch_data.shape, batch_label.shape)\n return mx.io.DataBatch([batch_data], [batch_label], batch_size - i)", "def next_batch_set(images, labels, batch_size=128):\n indices = np.random.choice(len(images), batch_size)\n batch_images = images[indices]\n batch_labels = labels[indices]\n return batch_images, batch_labels", "def next_batch(self,batch_size):\r\n end_indicator = self._indicator + batch_size\r\n if end_indicator > self._num_examples:\r\n if self._need_shuffle:\r\n self._shuffle_data()\r\n self._indicator = 0\r\n end_indicator = batch_size\r\n else:\r\n raise Exception(\"have no more examples.\")\r\n if end_indicator > self._num_examples:\r\n raise Exception(\"too lager batch size than examples.\")\r\n batch_data = self._data[self._indicator: end_indicator]\r\n batch_label = self._label[self._indicator: end_indicator]\r\n self._indicator = end_indicator\r\n return batch_data, batch_label", "def star_detect_data_generator(sequences, labels, batch_size, shuffle=False):\n b = 0\n sequence_index = -1\n sequence_id = 0\n sequence_ids = np.array([i for i in range(len(sequences))])\n batch_review_sequences, batch_review_labels = None, None\n while True:\n try:\n sequence_index = (sequence_index + 1) % len(sequences)\n if shuffle and sequence_index == 0:\n np.random.shuffle(sequences)\n sequence_id = sequence_ids[sequence_index]\n sequence_input = sequences[sequence_id]\n if labels[sequence_id] == 1:\n label_output = [1, 0, 0, 0, 0]\n elif labels[sequence_id] == 2:\n label_output = [0, 1, 0, 0, 0]\n elif labels[sequence_id] == 3:\n label_output = [0, 0, 1, 0, 0]\n elif labels[sequence_id] == 4:\n label_output = [0, 0, 0, 1, 0]\n else:\n label_output = [0, 0, 0, 0, 1]\n label_output = np.array(label_output)\n if b == 0:\n batch_review_sequences = np.zeros((batch_size,) + sequence_input.shape, dtype=sequence_input.dtype)\n batch_review_labels = np.zeros((batch_size,) + label_output.shape, dtype=label_output.dtype)\n batch_review_sequences[b] = sequence_input\n batch_review_labels[b] = label_output\n b += 1\n if b >= batch_size:\n yield batch_review_sequences, batch_review_labels\n b = 0\n except:\n raise Exception('An error occurred while processing sequence ' + str(sequence_id))", "def generate_batch(self, batch_size, rand=None, *args, **kwargs):\n return [\n self.generate_datasets(rand, *args, **kwargs) for _ in range(batch_size)\n ]", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n #fake_image = [1.0 for _ in xrange(784)]\n fake_image = [1.0 for _ in range(784)]\n fake_label = 0\n #return [fake_image for _ in xrange(batch_size)], [\n # fake_label for _ in xrange(batch_size)]\n return [fake_image for _ in range(batch_size)], [\n fake_label for _ in range(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def next(self, batch_size=np.inf):\n if self.batch_id == len(self.data):\n self.batch_id = 0\n # shuffle the data each pass over it.\n rng_state = np.random.get_state()\n np.random.shuffle(self.data)\n np.random.set_state(rng_state)\n np.random.shuffle(self.labels)\n \n end_idx = min(self.batch_id + batch_size, len(self.data))\n batch_data = (self.data[self.batch_id:end_idx])\n batch_labels = self.labels[self.batch_id:end_idx]\n batch_seqlen = (self.seqlen[self.batch_id:end_idx])\n self.batch_id = end_idx\n return batch_data, batch_labels, batch_seqlen", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in xrange(batch_size)], [\n fake_label for _ in xrange(batch_size)\n ]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in range(batch_size)], [fake_label for _ in range(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n\n # Shuffle data\n np.random.seed(0)\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n\n end = self._index_in_epoch\n\n return self._images[start:end], self._labels[start:end]", "def next_batch(self, batch_size):\r\n start = self._index_in_epoch\r\n self._index_in_epoch += batch_size\r\n\r\n if self._index_in_epoch > self._num_examples:\r\n # After each epoch we update this\r\n self._epochs_done += 1\r\n start = 0\r\n self._index_in_epoch = batch_size\r\n #print(\"numexamples \",self._num_examples)\r\n assert batch_size <= self._num_examples\r\n end = self._index_in_epoch\r\n\r\n return self._images[start:end], self._labels[start:end], self._img_names[start:end], self._cls[start:end]", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in xrange(batch_size)], [\n fake_label for _ in xrange(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def get_images_batch(self, batch_size):\n images = []\n labels = []\n num_classes = len(self.samples_per_class.keys())\n if batch_size < num_classes:\n raise Exception(\"Batch smaller than the number of classes!\")\n rest = batch_size % num_classes\n idxs = []\n if rest == 0:\n num_samples_per_class = batch_size // num_classes\n for key in self.samples_per_class.keys():\n idxs = np.hstack((\n idxs,\n np.random.choice(self.samples_per_class[key], num_samples_per_class)\n ))\n else:\n num_samples_per_class = np.hstack((\n np.full(rest, 1 + (batch_size // num_classes)),\n np.full(num_classes - rest, batch_size // num_classes)\n ))\n for ikey, key in enumerate(self.samples_per_class):\n idxs = np.hstack((\n idxs,\n np.random.choice(self.samples_per_class[key], [num_samples_per_class[ikey]])\n ))\n for idx in idxs:\n imgFilename = os.path.join(os.path.dirname(\n self.summary_manager.current_labelgui_summary_filepath),\n idx)\n images.append(self.image_preprocessor(imageio.imread(imgFilename)))\n labels.append(\n tuple(self.dataframe_labeled_samples.loc[idx][self.class_names].values.astype('float')))\n\n images = np.asarray(images)\n labels = np.asarray(labels, 'int')\n return images, labels", "def batch(img_path, gt_path,img_list, batch, total_size, label_list):\r\n\r\n image_list = [os.path.join(img_path, i) for i in img_list]\r\n gt_list = [os.path.join(gt_path,i) for i in img_list]\r\n\r\n \r\n for i in range(0, total_size, batch):\r\n yield image_load_resize(image_list[i:i+batch]), make_label_map(gt_list[i:i+batch], label_list)", "def generate_batches(image, label, batch_size, shuffle):\n\n # Create a queue that shuffles the examples, and then\n # read 'batch_size' images + labels from the example queue.\n if shuffle:\n images, labels = tf.train.shuffle_batch(\n [image, label],\n batch_size=batch_size,\n capacity=100,\n min_after_dequeue=50,\n allow_smaller_final_batch=True)\n else:\n images, labels = tf.train.batch(\n [image, label],\n batch_size=batch_size,\n allow_smaller_final_batch=True)\n\n # Display the training images in Tensorboard\n tf.summary.image('images', images)\n\n return images, labels", "def sent_detect_data_generator(sequences, labels, batch_size, shuffle=False):\n b = 0\n sequence_index = -1\n sequence_id = 0\n sequence_ids = np.array([i for i in range(len(sequences))])\n batch_review_sequences, batch_review_labels = None, None\n while True:\n try:\n sequence_index = (sequence_index + 1) % len(sequences)\n if shuffle and sequence_index == 0:\n np.random.shuffle(sequences)\n sequence_id = sequence_ids[sequence_index]\n sequence_input = sequences[sequence_id]\n label_output = [1, 0] if labels[sequence_id] > 3 else [0, 1]\n label_output = np.array(label_output)\n if b == 0:\n batch_review_sequences = np.zeros((batch_size,) + sequence_input.shape, dtype=sequence_input.dtype)\n batch_review_labels = np.zeros((batch_size,) + label_output.shape, dtype=label_output.dtype)\n batch_review_sequences[b] = sequence_input\n batch_review_labels[b] = label_output\n b += 1\n if b >= batch_size:\n yield batch_review_sequences, batch_review_labels\n b = 0\n except:\n raise Exception('An error occurred while processing sequence ' + str(sequence_id))", "def generate(bat_size, s=\"train\"):\n while True:\n pairs, targets = get_batch(bat_size, s)\n yield (pairs, targets)", "def next_batch(self, batch_size, fake_data=False, shuffle=True):\r\n if fake_data:\r\n fake_image = [1] * 784\r\n if self.one_hot:\r\n fake_label = [1] + [0] * 9\r\n else:\r\n fake_label = 0\r\n return [fake_image for _ in xrange(batch_size)], [\r\n fake_label for _ in xrange(batch_size)\r\n ]\r\n start = self._index_in_epoch\r\n # Shuffle for the first epoch\r\n if self._epochs_completed == 0 and start == 0 and shuffle:\r\n perm0 = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm0)\r\n self._images = self.images[perm0]\r\n self._labels = self.labels[perm0]\r\n # Go to the next epoch\r\n if start + batch_size > self._num_examples:\r\n # Finished epoch\r\n self._epochs_completed += 1\r\n # Get the rest examples in this epoch\r\n rest_num_examples = self._num_examples - start\r\n images_rest_part = self._images[start:self._num_examples]\r\n labels_rest_part = self._labels[start:self._num_examples]\r\n # Shuffle the data\r\n if shuffle:\r\n perm = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm)\r\n self._images = self.images[perm]\r\n self._labels = self.labels[perm]\r\n # Start next epoch\r\n start = 0\r\n self._index_in_epoch = batch_size - rest_num_examples\r\n end = self._index_in_epoch\r\n images_new_part = self._images[start:end]\r\n labels_new_part = self._labels[start:end]\r\n return numpy.concatenate((images_rest_part, images_new_part), axis=0), numpy.concatenate((labels_rest_part, labels_new_part), axis=0)\r\n else:\r\n self._index_in_epoch += batch_size\r\n end = self._index_in_epoch\r\n return self._images[start:end], self._labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n # 用 yield迭代器。\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_generator(data, batch_size, config, target=False):\n listsize = min(len(data.pilist), 10)\n while 1:\n nrd = np.arange(len(data.pilist))\n np.random.shuffle(nrd)\n for ri in range(len(data.pilist) // listsize):\n cpilist = [data.pilist[nrd[rid]] for rid in range(listsize * ri, listsize * (ri + 1))]\n data.loadpatch(config, cpilist)\n if target:\n data_l = [data.xarray, np.zeros(shape=(len(data.xarray), 1))]\n else:\n data_l = [data.xarray, data.yarray - 1]\n\n for di in range(len(data_l[0])):\n data_l[0][di][:, :, :, 0] = data_l[0][di][:, :, :, 0] / np.max(data_l[0][di][:, :, :, 0])\n\n for repi in range(3):\n data_l = shuffle_aligned_list(data_l)\n\n batch_count = 0\n while True:\n if batch_count * batch_size + batch_size >= len(data_l[0]):\n if len(data.pilist) // listsize != 1:\n print('list end', ri * listsize)\n break\n else:\n batch_count = 0\n\n start = batch_count * batch_size\n end = start + batch_size\n batch_count += 1\n yield [d[start:end] for d in data_l]", "def batch_features_labels(features, labels, batch_size):\r\n for start in range(0, len(features), batch_size):\r\n end = min(start + batch_size, len(features))\r\n yield features[start:end], labels[start:end]", "def batch_generator(Dataset, batch_size, shuffle=True, repeat = 1, ignore_class = 255):\n\n \"\"\"\n Args : \n Dataset (class) : dataset class defined in cityscapes.py. \n batch_size (int) : batch size \n shuffle (bool) : shuffle dataset order \n ignore_class (int) : class number to be ignored \n\n Return : \n images (np.array) : images \n labels (np.array) : labels array in 2d \n \n \"\"\"\n \n idx_dataset = list(range(len(Dataset)))\n idx_dataset = idx_dataset*repeat\n \n\n if shuffle :\n from random import shuffle\n shuffle(idx_dataset)\n\n for idx in range(len(idx_dataset)//batch_size):\n \n imgs_to_stack = []\n labels_to_stack = []\n\n for _data_idx in range(idx*batch_size, (idx+1)*batch_size):\n data_idx = idx_dataset[_data_idx]\n image, label = load_image_train(Dataset[data_idx])\n imgs_to_stack.append(image)\n labels_to_stack.append(label)\n \n images = tf.stack(imgs_to_stack)\n labels = tf.stack(labels_to_stack)\n\n if ignore_class : \n idx_to_ignore = labels!=ignore_class\n labels = tf.where(idx_to_ignore, labels, 0)\n\n yield (images, labels)", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in range(batch_size)], [\n fake_label for _ in range(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n\n # Shuffle the data\n np.random.seed(0)\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def next_batch(self, batch_size):\n start = self._index_in_epoch\n if start + batch_size > self.num_examples:\n self._epochs_completed += 1\n rest_num_examples = self.num_examples - start\n images_rest_part = self._images[start:self.num_examples]\n labels_rest_part = self._labels[start:self.num_examples]\n self.permute()\n start = 0\n self._index_in_epoch = batch_size - rest_num_examples\n end = self._index_in_epoch\n images_new_part = self._images[start:end]\n labels_new_part = self._labels[start:end]\n\n result_images = np.concatenate(\n (images_rest_part, images_new_part), axis=0\n )\n result_labels = np.concatenate(\n (labels_rest_part, labels_new_part), axis=0\n )\n return result_images, result_labels\n else:\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None, batch_size=1):\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n\n # Keras requires a generator to run indefinately.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n image, gt_class_ids = load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation)\n\n # Init batch arrays\n if b == 0:\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n\n # Add to batch\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, gt_class_ids] = 1\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_gt_class_ids]\n outputs = []\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise", "def NumpyDatasetGenerator(dataset,\n batch_size,\n shuffle=True,\n num_batches=-1):\n # top.Optimizer is expecting for tuples\n if isinstance(dataset, tuple):\n dataset = tuple(dataset)\n\n if shuffle==True:\n perm = np.random.permutation(dataset[0].shape[0])\n dataset = [d[perm] for d in dataset]\n if num_batches == -1:\n num_batches = dataset[0].shape[0]/batch_size\n for i in range(num_batches):\n start = i*batch_size\n finish = (i+1)*batch_size\n batch = [d[start:finish] for d in dataset]\n yield tuple(batch)", "def batch_generator(batch_size, sequence_length,\n x_train_scaled, y_train_scaled, num_x_signals, num_y_signals, num_train):\n # Infinite loop.\n while True:\n # Allocate a new array for the batch of input-signals.\n x_shape = (batch_size, sequence_length, num_x_signals)\n x_batch = np.zeros(shape=x_shape, dtype=np.float16)\n\n # Allocate a new array for the batch of output-signals.\n y_shape = (batch_size, sequence_length, num_y_signals)\n y_batch = np.zeros(shape=y_shape, dtype=np.float16)\n\n # Fill the batch with random sequences of data.\n for i in range(batch_size):\n # Get a random start-index.\n # This points somewhere into the training-data.\n idx = np.random.randint(num_train - sequence_length)\n\n # Copy the sequences of data starting at this index.\n x_batch[i] = x_train_scaled[idx:idx + sequence_length]\n y_batch[i] = y_train_scaled[idx:idx + sequence_length]\n yield x_batch, y_batch\n # return x_batch, y_batch", "def batch_iter(data: Union[np.ndarray, List[Any]], labels: Union[np.ndarray, List[Any]],\n batch_size: int, num_epochs: int) -> Tuple[Iterable[Any], Iterable[Any]]:\n assert len(data) == len(labels)\n\n for _ in range(num_epochs):\n start_index = 0\n while start_index < len(data) - 1:\n end_index = min(len(data) - 1, start_index + batch_size)\n\n xdata = data[start_index: end_index]\n ydata = labels[start_index: end_index]\n\n yield xdata, ydata\n\n start_index += batch_size", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def data(self, train=True, batch_size=2):\n if train:\n elements = self.prepare_batch(self.training_albums)\n else:\n elements = self.prepare_batch(self.validation_albums)\n\n while len(elements) > 0:\n # Collect the batch\n batch = []\n for _ in range(min(batch_size, len(elements))):\n batch.append(elements.pop())\n\n # Get same sequence size for all elements of the batch\n albums, labels = self.batchify(batch)\n yield albums, labels", "def batch(data: np.ndarray, y: np.ndarray, size: int,\n shuffle: bool = False) -> tuple:\n n = data.shape[0]\n indices = None\n if shuffle:\n indices = np.arange(n)\n np.random.shuffle(indices)\n for start_idx in range(0, n, size):\n end_idx = start_idx + size\n if shuffle:\n yield (data[indices[start_idx: end_idx]],\n y[indices[start_idx: end_idx]])\n else:\n yield data[start_idx: end_idx], y[start_idx: end_idx]", "def next_batch(self, batch_size, fake_data=False):\r\n if fake_data:\r\n fake_image = [1.0 for _ in range(784)]\r\n fake_label = 0\r\n return [fake_image for _ in range(batch_size)], [fake_label for _ in range(batch_size)]\r\n start = self._index_in_epoch\r\n self._index_in_epoch += batch_size\r\n #print (0)\r\n #print(self._index_in_epoch,self._num_examples)\r\n #若当前训练读取的index>总体的images数时,则读取读取开始的batch_size大小的数据\r\n if self._index_in_epoch > self._num_examples:\r\n #print (0)\r\n # Finished epoch\r\n self._epochs_completed += 1\r\n # Shuffle the data\r\n perm = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm)\r\n self._images = self._images[perm]\r\n self._labels = self._labels[perm]\r\n # Start next epoch\r\n start = 0\r\n self._index_in_epoch = batch_size\r\n assert batch_size <= self._num_examples\r\n end = self._index_in_epoch\r\n #print (\"start is:%d,end is:%d\"%(start,end))\r\n return self._images[start:end], self._labels[start:end]", "def get_batch(batch_size,s=\"train\"):\n\n if s == 'train':\n X = Xtrain # X training input\n categories = train_classes # y categories\n else:\n X = Xval # X validation input\n categories = val_classes # y categories\n\n n_classes, n_examples, w, h = X.shape[0], X.shape[1], X.shape[2], X.shape[3]\n\n # randomly sample several classes to use in the batch of size n\n categories = rng.choice(n_classes,size=(batch_size,),replace=False)\n \n # initialize 2 empty arrays for the input image batch\n pairs=[np.zeros((batch_size, h, w,1)) for i in range(2)]\n \n # initialize vector for the targets\n targets=np.zeros((batch_size,))\n \n # one half of is full of '1's and 2nd half of batch has same class\n\n targets[batch_size//2:] = 1\n for i in range(batch_size):\n category = categories[i]\n idx_1 = rng.randint(0, n_examples)\n pairs[0][i,:,:,:] = X[category, idx_1].reshape(w, h, 1)\n idx_2 = rng.randint(0, n_examples)\n \n # pick images of same class for 1st half, different for 2nd\n if i >= batch_size // 2:\n category_2 = category \n else: \n # add a random number to the category modulo n classes to ensure 2nd image has a different category\n category_2 = (category + rng.randint(1,n_classes)) % n_classes\n \n pairs[1][i,:,:,:] = X[category_2,idx_2].reshape(w, h,1)\n\n \n return pairs, targets", "def get_batch(batch_size, s=\"train\"):\n if s == \"train\":\n X = Xtrain\n categories = train_classes\n else:\n X = Xtest\n categories = test_classes\n\n n_classes, n_examples, w, h = X.shape\n\n # randomly sample several classes to use in the batch\n categories = rng.choice(n_classes, size=(batch_size), replace=False)\n\n # Initial 2 empty arrays for the input image_batch\n pairs = [np.zeros((batch_size, h, w, 1)) for i in range(2)]\n # initialize vector fo the targets\n targets = np.zeros((batch_size, 1))\n\n # make one half of it \"1\"s so 2nd half of batch has same class\n targets[batch_size // 2:] = 1\n\n\n for i in range(batch_size):\n category = categories[i]\n idx_1 = rng.randint(0, n_examples)\n pairs[0][i, :, :, :] = X[category, idx_1].reshape(w, h, 1)\n idx_2 = rng.randint(0, n_examples)\n\n # pick images of same class for 1st half, different for 2nd\n if i >= batch_size // 2:\n category_2 = category\n else:\n # add a random number to the category modulo n classes to ensure 2nd image has a different category\n category_2 = (category + rng.randint(1, n_classes)) % n_classes\n\n pairs[1][i, :, :, :] = X[category_2, idx_2].reshape(w, h, 1)\n\n return pairs, targets", "def data_gen(\n v: int, batch: int, nbatches: int, device: torch.device = torch.device(\"cpu\")\n) -> Iterator[Batch]: # TODO bad name\n for i in range(nbatches):\n data = np.random.randint(1, v, size=(batch, 10))\n data[:, 0] = 1\n src: LongTensorType = torch.from_numpy(data)\n tgt: LongTensorType = torch.from_numpy(data)\n src, tgt = src.to(device), tgt.to(device)\n yield Batch(src, tgt, 0)", "def next_batch(num, data, labels):\n idx = np.arange(0 , len(data))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_shuffle = [data[i] for i in idx]\n labels_shuffle = [labels[i] for i in idx]\n\n return np.asarray(data_shuffle), np.asarray(labels_shuffle)", "def next_batch(num, data, labels):\n idx = np.arange(0, len(data))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_shuffle = [data[i] for i in idx]\n labels_shuffle = [labels[i] for i in idx]\n \n return np.asarray(data_shuffle), np.asarray(labels_shuffle)", "def next_batch(self, batch_size):\n start = self._index_in_epoch\n end = min(start + batch_size, self._num_examples)\n batch_data = self._data[start:end]\n if self._label_used:\n batch_labels = self._labels[start:end]\n\n if end == self._num_examples:\n self._epochs_completed += 1\n self._index_in_epoch = 0\n if self._shuffled:\n perm = np.arange(self._num_examples)\n random.shuffle(perm)\n self._data = self._data[perm]\n if self._label_used:\n self._labels = self._labels[perm]\n else:\n self._index_in_epoch = end\n\n if self._label_used:\n return batch_data,batch_labels\n else:\n return batch_data", "def gen_batch_function(data_folder, image_shape, seed=None, samples_limit=None):\n # Grab image and label paths\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))\n }\n background_color = np.array([255, 0, 0])\n\n if samples_limit:\n image_paths = image_paths[0:samples_limit]\n\n samples_n = len(image_paths)\n\n rnd = random.Random(seed)\n\n def get_batches_fn(batch_size):\n \"\"\"\n\t\tCreate batches of training data\n\t\t:param batch_size: Batch Size\n\t\t:return: Batches of training data\n\t\t\"\"\"\n # Shuffle training data\n rnd.shuffle(image_paths)\n # Loop through batches and grab images, yielding each batch\n for batch_i in range(0, samples_n, batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n # Re-size to image_shape\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n # Create \"one-hot-like\" labels by class\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n\n return get_batches_fn, samples_n", "def get_batches(self, batch_size):\n if self.data.shape[0] % batch_size != 0:\n raise RuntimeError('num of data tuples is not a multiple of batch size')\n num_batch = self.data.shape[0] // batch_size\n for b in range(num_batch):\n yield self.data[b*batch_size:(b+1)*batch_size, :], \\\n self.target[b*batch_size:(b+1)*batch_size, :]", "def gen(length):\n return itertools.product(LABELS,repeat=length)", "def next_batch(self, batch_size, fake_data=False):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._texts = self._texts[perm]\n self._topologys = self._topologys[perm]\n self._urls = self._urls[perm]\n self._demos = self._demos[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._texts[start:end], self._topologys[start:end], self._urls[start:end], self._demos[start:end], self._labels[start:end]", "def gen_batches(data, batch_size):\n data = np.array(data)\n\n for i in range(0, data.shape[0], batch_size):\n yield data[i:i+batch_size]", "def data_generation(imgs, labs, batch, validataion):\n\n # Initialization\n batch_images = np.empty((batch, imgs[0].shape[0], imgs[0].shape[1], imgs[0].shape[2]))\n batch_labels = np.empty((batch, 1))\n # Generate data\n while True: # loop forever\n for x in range(batch):\n rand = random.randint(0, len(labs)-1)\n if validataion:\n # Store un-altered image and measurement\n batch_images[x] = imgs[rand]\n batch_labels[x] = labs[rand]\n else:\n # Store new image and adjusted measurement\n batch_images[x], batch_labels[x] = transform_image(imgs[rand], labs[rand])\n yield batch_images, batch_labels", "def get_batches_fn(batch_size):\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)", "def trainDataGenerator(num_epochs):\r\n samples, all_files = get_filenames()\r\n for num in range(num_epochs):\r\n for i in range(len(samples)):\r\n sample = samples[i]\r\n for file in all_files[i]:\r\n ohvs, Y = prepData(sample, file)\r\n if (ohvs == []):\r\n continue\r\n X = np.array([ohvs[:800]])\r\n yield X, Y\r\n # for i in range(0, len(ohvs), 400):\r\n # X = np.array([ohvs[i : i+400]])\r\n # print(\"\\tX shape =\", X.shape)\r\n # yield X, Y\r", "def create_batches(data, label, max_seq_length, batch_size, rand_idx, mode='train'):\n num_samples = len(data)\n num_batches = num_samples // batch_size\n for i in xrange(num_batches):\n batch_start_pos = i * batch_size\n batch_end_pos = min((i + 1) * batch_size, num_samples)\n batch_idx = rand_idx[batch_start_pos:batch_end_pos]\n label_in_batch = to_sparse_representation(label, batch_idx)\n data_in_batch = np.zeros((max_seq_length, batch_size, num_features))\n seq_lengths = np.zeros(batch_size)\n for j, idx in enumerate(batch_idx):\n x = data[idx]\n data_in_batch[0:x.shape[1], j, :] = np.reshape(x, (x.shape[1], num_features))\n seq_lengths[j] = x.shape[1]\n yield ((data_in_batch, seq_lengths), label_in_batch)", "def batch_data(source, target, batch_size):\n for batch_i in range(0, len(source)//batch_size):\n start_i = batch_i * batch_size\n source_batch = source[start_i:start_i + batch_size]\n target_batch = target[start_i:start_i + batch_size]\n yield np.array(pad_sentence_batch(source_batch)), np.array(pad_sentence_batch(target_batch))", "def get_batches(dirname,\n gen=keras.preprocessing.image.ImageDataGenerator(),\n shuffle=True,\n batch_size=1,\n target_size=(224, 224),\n class_mode=\"categorical\"):\n return gen.flow_from_directory(dirname,\n shuffle=shuffle,\n batch_size=batch_size,\n target_size=target_size,\n class_mode=class_mode)", "def generate_labels(cfg, split_files):\n for file_name in split_files:\n file_name = join(cfg.data_dir, file_name)\n\n for example in generate_examples(file_name):\n yield from example['labels']", "def sample_train_batch(self):\r\n batch = []\r\n labels =[]\r\n num_groups = self.batch_size // self.batch_k\r\n sampleed_classes = np.random.choice(self.train_class_ids,num_groups,replace=False)\r\n for class_id in sampleed_classes:\r\n img_fname = np.random.choice(self.train_image_files[class_id],self.batch_k,replace=False)\r\n batch += img_fname.tolist()\r\n labels += [class_id]*self.batch_k\r\n return batch,labels", "def generate_batch(model, batch_size, test_data=False):\n if model == 'cnn':\n as_image = True\n else:\n as_image = False\n\n image = _read_images(test_data=test_data, as_image=as_image)\n label = _read_labels(test_data=test_data)\n\n images_batch, labels_batch = tf.train.batch([image, label],\n batch_size = batch_size,\n num_threads = 1,\n capacity = batch_size * 8)\n\n return images_batch, tf.reshape(labels_batch, [batch_size])", "def generator(self, annotations_csv_path, num_classes, augmentation=False, batch_size=4, size=224):\n\n # Get image paths and labels\n image_paths, labels = self.__read_image_paths_labels(annotations_csv_path)\n steps = len(image_paths)//batch_size\n \n step = 0\n itr = 0\n \n while True:\n #for itr in range(0, len(image_paths), batch_size):\n\n # Storing batches of paths and labels in lists\n temp_path = [image_paths[i] for i in range(itr, itr + batch_size)]\n temp_label = [labels[i] for i in range(itr, itr + batch_size)]\n\n # Create empty tensors for images and labels\n image_batch = np.zeros((batch_size, size, size, 3), dtype=np.float32)\n label_batch = np.zeros((batch_size, num_classes), dtype=np.float32)\n\n # Keep track of batch size\n count = 0\n\n for n, path in enumerate(temp_path):\n\n temp_org_image = self.__read_image(path)\n temp_image = self.__preprocesses_image(temp_org_image, size)\n\n image_batch[count] = temp_image\n label_batch[count] = temp_label[n]\n\n # At least two more empty arrays must be available in the \n # image_batch tensor\n if not temp_label[n][-1] and count < batch_size-2 and augmentation: \n\n aug_image_1 = self.sequence.augment_image(temp_org_image)\n aug_image_2 = self.sequence.augment_image(temp_org_image)\n\n aug_image_1 = self.__preprocesses_image(aug_image_1, size)\n aug_image_2 = self.__preprocesses_image(aug_image_2, size)\n\n image_batch[count+1] = aug_image_1\n label_batch[count+1] = temp_label[n]\n\n image_batch[count+2] = aug_image_2\n label_batch[count+2] = temp_label[n]\n\n count += 3\n\n else: \n count += 1\n\n\n if count == batch_size:\n break\n \n step += 1\n itr += batch_size\n\n yield image_batch, label_batch\n \n if step >= steps:\n step = 0\n itr = 0", "def generator(self):\n\n # Each thread gets its own randomized set of keys\n keys = self.loader.keys()\n\n while True:\n random.shuffle(keys)\n data_batch = []\n label_batch = []\n\n for key in keys:\n data = self.loader.get(key)\n s = StringIO(data)\n img = PIL.Image.open(s)\n img = img.resize((224, 224))\n img = img.convert('RGB')\n data_batch.append(np.array(img))\n\n label_str = self._classname_from_key(key)\n label_int = self._classname_to_label[label_str]\n label_arr = np.zeros(self.num_classes())\n label_arr[label_int] = 1 # one-hot encoding\n label_batch.append(label_arr)\n\n if len(data_batch) == 32: # batch size\n yield np.array(data_batch), np.array(label_batch)\n data_batch = []\n label_batch = []", "def next_batch(num, data, labels):\n idx = np.arange(0, len(data))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_shuffle = [data[i] for i in idx]\n labels_shuffle = [labels[i] for i in idx]\n return np.asarray(data_shuffle), np.asarray(labels_shuffle)", "def enhancer_iterator(self, data, labels, batch_size, num_steps):\n def seq_to_ints(seq):\n return [self.vocab.word_to_index[c] for c in seq]\n\n # Map raw data to array of ints. if all sequences are the same length L, \n # raw_data will be N-by-L\n mdata = np.array([seq_to_ints(i) for i in data], dtype=np.int32)\n num_batches = len(mdata) // batch_size\n \n # data will have batch_len elements, each of size batch_size\n # ASSUME FIXED SEQUENCE LENGTHS OFF 1000 FOR NOW (5/20/16)\n # Just grab middle self.config.num_steps nucleotides\n a = int(len(mdata[0,:])/2-self.config.num_steps/2)\n b = int(len(mdata[0,:])/2+self.config.num_steps/2)\n for i in range(num_batches):\n x = mdata[batch_size*i:batch_size*(i+1),a:b]\n if labels is not None:\n y = labels[batch_size*i:batch_size*(i+1)]\n else:\n y = None\n yield(x,y)", "def gen(num_batches,\n batch_size,\n seq_width,\n min_len,\n max_len):\n for batch_num in range(num_batches):\n\n # All batches have the same sequence length\n seq_len = random.randint(min_len, max_len)\n seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width))\n seq = Variable(torch.from_numpy(seq))\n\n # The input includes an additional channel used for the delimiter\n inp = Variable(torch.zeros(seq_len + 1, batch_size, seq_width + 1))\n inp[:seq_len, :, :seq_width] = seq\n inp[seq_len, :, seq_width] = 1.0 # delimiter in our control channel\n outp = seq.clone()\n\n yield batch_num+1, inp.float().to(params.device), outp.float().to(params.device)", "def next_batch(self, batch_size):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n self._epochs_completed += 1\n start = 0\n self._index_in_epoch = batch_size\n end = self._index_in_epoch\n return self._samples[start:end], self._labels[start:end]", "def _generate_batch_para(doc_ids, word_ids, batch_size, num_skips, window_size):\n data_index = 0\n assert batch_size % num_skips == 0\n assert num_skips <= 2 * window_size\n labels = np.ndarray(shape=(batch_size), dtype=np.int32)\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n span = 2 * window_size + 1\n buffer = collections.deque(maxlen=span)\n buffer_para = collections.deque(maxlen=span)\n\n i = 0\n while data_index < len(word_ids):\n if len(buffer) == span and len(set(buffer_para)) == 1:\n target = window_size\n targets_to_avoid = [window_size]\n for j in range(num_skips):\n while target in targets_to_avoid:\n target = random.randint(0, span - 1)\n labels[i + j] = buffer[target]\n batch[i + j] = buffer[window_size]\n i += num_skips\n buffer.append(word_ids[data_index])\n buffer_para.append(doc_ids[data_index])\n data_index = (data_index + 1) % len(word_ids)\n if i == batch_size:\n yield batch, labels[:, None]\n i = 0\n labels = np.ndarray(shape=(batch_size), dtype=np.int32)\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)", "def my_generator(batch_size, img_dir):\n cat_dirs = glob.glob(img_dir + \"/*\")\n counter = 0\n while True:\n input_images = np.zeros(\n (batch_size, config.height, config.width, 3 * 5))\n output_images = np.zeros((batch_size, config.height, config.width, 3))\n random.shuffle(cat_dirs)\n if (counter+batch_size >= len(cat_dirs)):\n counter = 0\n for i in range(batch_size):\n input_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[0-4]*\") \n imgs = [Image.open(img) for img in sorted(input_imgs)]\n input_images[i] = np.concatenate(imgs, axis=2)\n output_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[5-7]*\")\n imgs = [Image.open(img) for img in sorted(output_imgs)]\n output_images[i] = np.concatenate(imgs, axis=1)\n input_images[i] /= 255.\n output_images[i] /= 255.\n yield (input_images, output_images)\n counter += batch_size", "def gen_batches_functions(data_folder, image_paths, image_shape, out_shape,\n label_folder):\n\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n id_road = 7\n id_lane = 6\n id_car = 10\n\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n # Get corresponding label img path\n gt_image_file = image_file.replace('CameraRGB', 'CameraSeg')\n # Read rgb and label images\n img_in = scipy.misc.imread(image_file, mode='RGB')\n gt_in = scipy.misc.imread(gt_image_file)\n # Crop sky part of the image\n image = img_in[-out_shape[0]:, :]\n gt_image = gt_in[-out_shape[0]:, :, 0]\n # Obtain labels\n gt_road = ((gt_image == id_road) | (gt_image == id_lane))\n gt_car = (gt_image == id_car)\n gt_car[-105:, :] = False\n gt_bg = np.invert(gt_car | gt_road)\n # Augmentation\n if bool(random.getrandbits(1)):\n image, gt_bg, gt_car, gt_road = flip_img(\n image, gt_bg, gt_car, gt_road)\n\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_car = gt_car.reshape(*gt_car.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n\n gt_image = np.concatenate((gt_bg, gt_car, gt_road), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n\n return get_batches_fn", "def batch_generator(batch_size):\n # Randomly shuffle the order of the files in directory\n files = glob.glob(os.path.join(data_dir, pattern))\n np.random.shuffle(files)\n n_files = len(files)\n\n for batch_num in range(0, n_files, batch_size):\n batch = []\n\n for img_file in files[batch_num:batch_num+batch_size]:\n # Load image from file\n img = scipy.misc.imread(img_file)\n\n # -----------\n # BOOKMARK: File preprocessing steps here\n # -----------\n img = scipy.misc.imresize(img, img_shape)\n # -----------\n\n # Append to the batch\n batch.append(img)\n\n # Yield the current batch\n yield np.array(images)", "def batcher(X_, y_=None, w_=None, batch_size=-1):\n n_samples = X_.shape[0]\n\n if batch_size == -1:\n batch_size = n_samples\n if batch_size < 1:\n raise ValueError('Parameter batch_size={} is unsupported'.format(batch_size))\n\n for i in range(0, n_samples, batch_size):\n upper_bound = min(i + batch_size, n_samples)\n ret_x = X_[i:upper_bound]\n ret_y = None\n ret_w = None\n if y_ is not None:\n ret_y = y_[i:i + batch_size]\n if w_ is not None:\n ret_w = w_[i:i + batch_size]\n yield (ret_x, ret_y, ret_w)", "def sample_batch(self, batch_type, batch_size):\n if batch_type == \"train\":\n folders = self.metatrain_character_folders\n elif batch_type == \"val\":\n folders = self.metaval_character_folders\n else:\n folders = self.metatest_character_folders\n\n #############################\n #### YOUR CODE GOES HERE ####\n all_image_batches = []\n all_label_batches = []\n for _ in range(batch_size):\n # create batches of K lists\n images = [list() for _ in range(self.num_samples_per_class)]\n labels = [list() for _ in range(self.num_samples_per_class)]\n next_idx = [0] * self.num_classes\n\n # sample the classes and images\n classes = np.random.choice(folders, size=(self.num_classes,))\n labels_and_paths = get_images(classes, range(self.num_classes),\n nb_samples=self.num_samples_per_class)\n\n # load images and one-hot encode labels\n for label, path in labels_and_paths:\n # only add one class instance per sample list\n idx = next_idx[label]\n\n\n image = image_file_to_array(path, self.img_size, flatten=self.flatten)\n one_hot_label = np.zeros((self.num_classes,))\n one_hot_label[label] = 1.\n\n images[idx].append(image)\n labels[idx].append(one_hot_label)\n\n next_idx[label] += 1\n\n all_image_batches.append(images)\n all_label_batches.append(labels)\n\n # convert to numpy arrays\n all_image_batches = np.array(all_image_batches)\n all_label_batches = np.array(all_label_batches)\n #############################\n\n return all_image_batches, all_label_batches" ]
[ "0.7450385", "0.70173806", "0.6942358", "0.6913882", "0.68870944", "0.6886947", "0.68810886", "0.68509597", "0.67483735", "0.6738463", "0.67325974", "0.6717352", "0.6699821", "0.66986203", "0.66865635", "0.66477764", "0.6606987", "0.65960824", "0.6584885", "0.65848434", "0.65712637", "0.65303326", "0.6515678", "0.6511672", "0.6511431", "0.6508646", "0.6504626", "0.65017194", "0.6498612", "0.6486128", "0.6481376", "0.6472589", "0.64492923", "0.6448107", "0.644206", "0.64249676", "0.64222866", "0.6418607", "0.639855", "0.6398052", "0.63973117", "0.6396294", "0.6390964", "0.6382827", "0.6367127", "0.6366139", "0.63614845", "0.6360113", "0.6351938", "0.6344472", "0.63379395", "0.63373345", "0.63352937", "0.6331183", "0.63258165", "0.6310576", "0.6310081", "0.6309876", "0.6307173", "0.6301245", "0.62936825", "0.6288971", "0.62870103", "0.62864363", "0.62864363", "0.62864363", "0.6282522", "0.6280485", "0.627651", "0.62743473", "0.62652934", "0.6263656", "0.62633455", "0.6255318", "0.62550145", "0.62532306", "0.62525856", "0.6242172", "0.6239878", "0.623987", "0.6239677", "0.62395084", "0.62392557", "0.62346107", "0.622745", "0.62203133", "0.6219036", "0.6217193", "0.6203973", "0.6202928", "0.62015057", "0.61999154", "0.6198595", "0.619845", "0.6194266", "0.6188118", "0.618179", "0.61795384", "0.61761504", "0.61733204", "0.61717695" ]
0.0
-1
|coro| Refetches the inventory.
async def update(self) -> None: data = await self._state.http.get_user_inventory(self.owner.id64, self.game.app_id, self.game.context_id) self._update(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_inventory(self):\n for item in self.items:\n self.rooms[int(item.initial_room_id) - 1].inventory.add(item)", "def getitem(self):\n self.inventory += 1", "def inventory(self, time: int) -> Inventory:\n self.refreshDroneStatus(time)\n return self.__inventory", "async def get_inventory(request: web.Request, ) -> web.Response:\n return web.Response(status=200)", "def openinv(cls): #THIS DOESN'T NEED TO BE MODIFIED!\n\n while True:\n inventory_items = {thing.id: thing.name for thing in cls.inventory}\n inventory_items[\"exit\"] = \"Exit Inventory\"\n inventory_items[\"newln\"] = \"\"\n inventory_items[\"playername\"] = str(gray('\"{}\"'.format(cls.name)))\n inventory_items[\"lv\"] = str(gray(\"LV: {}\".format(cls.lv)))\n inventory_items[\"hp\"] = str(gray(\"HP: {}/{}\".format(cls.hp, cls.max_hp)))\n inventory_items[\"exp\"] = str(gray(\"EXP: {}/40\".format(cls.exp)))\n\n choice = Menu.menu(\n title = \"Inventory\",\n contents = inventory_items \n )\n if choice == \"exit\":\n Terminal.clear_all()\n return\n while True:\n displayed_item = next((thing for thing in cls.inventory if thing.id == choice), None)\n final_choice = Menu.menu(\n title = displayed_item.name,\n contents = {\n \"interact\":displayed_item.interact_label,\n \"inspect\":\"Inspect\",\n \"drop\":\"Drop\",\n \"back\":\"Back\"\n }\n )\n if final_choice == \"back\":\n break\n if final_choice == \"interact\":\n use = displayed_item.interact()\n Terminal.clear_all()\n print(use[\"message\"])\n if \"heal_\" in use[\"action\"]:\n cls.hp += int(use[\"action\"].replace(\"heal_\", ''))\n if cls.hp > cls.max_hp:\n cls.hp = cls.max_hp\n cls.inventory.remove(displayed_item)\n Game.standard_wait()\n break\n if final_choice == \"inspect\":\n Terminal.clear_all()\n print(displayed_item)\n Game.standard_wait()\n continue\n if final_choice == \"drop\":\n Terminal.clear_all()\n print(\"You dropped the {}\".format(displayed_item.name))\n cls.inventory.remove(displayed_item)\n Game.standard_wait()\n break", "def get_inventory():\n return INVENTORY", "def get_inventory(self, context):\n with LoggingSessionContext(context) as logger, LogCommand(\n logger, \"get_inventory\"\n ):\n api = CloudShellSessionContext(context).get_api()\n\n resource_config = FirewallResourceConfig.from_context(\n self.SHELL_NAME, context, api, self.SUPPORTED_OS\n )\n\n cli_configurator = CheckpointCliConfigurator(\n self._cli, resource_config, logger\n )\n enable_disable_snmp_flow = CheckpointEnableDisableSnmpFlow(\n cli_configurator, logger\n )\n snmp_configurator = EnableDisableSnmpConfigurator(\n enable_disable_snmp_flow, resource_config, logger\n )\n\n resource_model = FirewallResourceModel.from_resource_config(resource_config)\n\n autoload_operations = CheckpointSnmpAutoloadFlow(logger, snmp_configurator)\n logger.info(\"Autoload started\")\n response = autoload_operations.discover(self.SUPPORTED_OS, resource_model)\n logger.info(\"Autoload completed\")\n return response", "async def inv(self, ctx):\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n stock = self.iex.get_held_stocks(db, company.id)\r\n inventory = []\r\n for s in stock:\r\n close = await self.get_latest_close(ctx, db, s.symbol)\r\n inventory.append([s.symbol, s.quantity, close.close * s.quantity])\r\n inv_df = pd.DataFrame(inventory, columns=['Symbol', 'Quantity', 'Value'])\r\n aggregated = tabulate(inv_df.groupby(['Symbol']).sum().reset_index(), headers=['Symbol', 'Quantity', 'Value'])\r\n await ctx.send(f'```{aggregated}```')", "def inventory(self):\n data = self.client.inventory(self.creds, self.transaction, self.environment)\n return list(data) if isinstance(data, set) else data", "def inventory(self):\n return self._inventory", "async def list_inventory_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n start_date = request.args[\"start_date\"][0]\n end_date = request.args[\"end_date\"][0]\n inventory = model.list_inventory(hotel_id, start_date, end_date)\n if inventory == model.OPERATION_ERROR_RETURN_CODE:\n return json({\"success\": False})\n return json({\"success\": True, \"inventory\": inventory})", "async def stocks(self, ctx):\n\t\tpass", "def get_inventory(self, context):\n # See below some example code demonstrating how to return the resource structure and attributes\n # In real life, this code will be preceded by SNMP/other calls to the resource details and will not be static\n # run 'shellfoundry generate' in order to create classes that represent your data model\n\n '''\n resource = LanforgeResource.create_from_context(context)\n resource.vendor = 'specify the shell vendor'\n resource.model = 'specify the shell model'\n\n port1 = ResourcePort('Port 1')\n port1.ipv4_address = '192.168.10.7'\n resource.add_sub_resource('1', port1)\n\n return resource.create_autoload_details()\n '''\n return AutoLoadDetails([], [])", "def get_with_inventory(self, context, id_):\n try:\n db_resource_data = self.db_api.get_resource(context, id_)\n res_properties = self.db_api.get_properties(context, id_)\n\n # for non resource managers return get\n if (db_resource_data['type'] !=\n eon_const.EON_RESOURCE_TYPE_ESX_CLUSTER):\n return _make_response(db_resource_data)\n\n res_mgr_obj = (\n self.db_api.get_resource_managers_by_resource_id(context,\n id_))\n driver_obj = driver.load_resource_driver(db_resource_data['type'])\n _inventory = driver_obj.get_res_inventory(res_mgr_obj,\n res_properties)\n _resource_data = _make_response(db_resource_data,\n inventory=_inventory)\n # (NOTE) Here setting the details of resource manager for the\n # resource\n _res_mgr_data = _make_response(res_mgr_obj, meta_data=False)\n _resource_data[eon_const.RSRC_MGR_INFO] = _res_mgr_data\n\n except exception.NotFound as e:\n LOG.exception(e)\n raise e\n except Exception as e:\n msg = _(\"Error retrieving the 'eon_resource':%s. Reason: %s\") % (\n id_, e)\n log_msg = (\"Error retrieving the 'eon_resource':%s.\"\n \" Reason: %s\") % (id_, e)\n LOG.exception(log_msg)\n raise exception.RetrieveException(msg)\n\n LOG.info(\"The Resource data %s \"\n % logging.mask_password(_resource_data))\n return _resource_data", "def get_inventory(self, node):", "def fetch(self, vault_client):\n result = self.read(vault_client)\n if result:\n if isinstance(result, dict) and 'data' in result:\n self.existing = result['data']\n else:\n self.existing = result\n else:\n self.existing = None", "def get_with_inventory(self, context, id_):\n try:\n db_resource_mgr_data = self.db_api.get_resource_manager(\n context, id_)\n db_props_data = self.db_api.get_resource_mgr_properties(context,\n id_, key=eon_const.RESOURCE_MGR_STATE_KEY)\n\n driver_obj = driver.load_resource_mgr_driver(\n db_resource_mgr_data['type'])\n inventory = driver_obj.get_inventory(db_resource_mgr_data)\n resource_mgr_data = _make_response(db_resource_mgr_data,\n property_list=db_props_data,\n inventory=inventory)\n LOG.debug(\"[%s] Resource data %s\"\n % (id_, logging.mask_password(resource_mgr_data)))\n return resource_mgr_data\n\n except exception.NotFound as e:\n LOG.error(e)\n raise e\n except Exception as e:\n msg = \"Error retrieving the 'resource':%s. Reason: %s\" % (\n id_, e.message)\n LOG.exception(msg)\n raise exception.RetrieveException(e.message)", "def get_inventory_from_cache(self):\n cache = open(self.cache_path_cache, 'r')\n json_inventory = cache.read()\n return json_inventory", "def get_inventory(self, context):\n # See below some example code demonstrating how to return the resource structure\n # and attributes. In real life, of course, if the actual values are not static,\n # this code would be preceded by some SNMP/other calls to get the actual resource information\n '''\n # Add sub resources details\n sub_resources = [ AutoLoadResource(model ='Generic Chassis',name= 'Chassis 1', relative_address='1'),\n AutoLoadResource(model='Generic Module',name= 'Module 1',relative_address= '1/1'),\n AutoLoadResource(model='Generic Port',name= 'Port 1', relative_address='1/1/1'),\n AutoLoadResource(model='Generic Port', name='Port 2', relative_address='1/1/2'),\n AutoLoadResource(model='Generic Power Port', name='Power Port', relative_address='1/PP1')]\n\n\n attributes = [ AutoLoadAttribute(relative_address='', attribute_name='Location', attribute_value='Santa Clara Lab'),\n AutoLoadAttribute('', 'Model', 'Catalyst 3850'),\n AutoLoadAttribute('', 'Vendor', 'Cisco'),\n AutoLoadAttribute('1', 'Serial Number', 'JAE053002JD'),\n AutoLoadAttribute('1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/1', 'Model', 'WS-X4233-GB-EJ'),\n AutoLoadAttribute('1/1', 'Serial Number', 'RVE056702UD'),\n AutoLoadAttribute('1/1/1', 'MAC Address', 'fe80::e10c:f055:f7f1:bb7t16'),\n AutoLoadAttribute('1/1/1', 'IPv4 Address', '192.168.10.7'),\n AutoLoadAttribute('1/1/2', 'MAC Address', 'te67::e40c:g755:f55y:gh7w36'),\n AutoLoadAttribute('1/1/2', 'IPv4 Address', '192.168.10.9'),\n AutoLoadAttribute('1/PP1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/PP1', 'Port Description', 'Power'),\n AutoLoadAttribute('1/PP1', 'Serial Number', 'RVE056702UD')]\n\n return AutoLoadDetails(sub_resources,attributes)\n '''\n\n self._log(context, 'Begin autoload')\n resources = []\n attributes = []\n\n\n attributes.append(AutoLoadAttribute('', 'replication_address', self.get_replication_address(context)))\n attributes.append(AutoLoadAttribute('', 'connection_key', self.get_connection_key(context)))\n\n networks = self._get_newtork_interfaces(context)\n self._log(context, 'got networks')\n\n controllers = self._get_controllers(context)\n self._log(context, 'got controllers')\n ports = self._get_ports(context)\n\n model = None\n for controller in controllers:\n self._log(context, 'Processing ctrlt: ' + controller['name'] + ':' + controller['model'])\n resources.append(AutoLoadResource(model='Generic Storage Controller', name=controller['name'],\n relative_address=controller['name']))\n if model is None:\n model = controller['model']\n\n attributes.append(AutoLoadAttribute('', 'Model', model))\n\n for network in networks:\n self._log(context, 'Processing netwk: ' + network['name'] + ':' + str(network['address']))\n net_name = network['name']\n controller = net_name.split('.')[0]\n if 'vir0' in controller or 'vir1' in controller:\n attributes.append(AutoLoadAttribute('',str(controller + '_address'), str(network['address'])))\n continue\n if 'vir' in controller:\n continue\n if 'management' not in network['services']:\n continue\n resources.append(AutoLoadResource(model='Storage Network Port', name=net_name,\n relative_address=controller.upper() + '/' + str(network['address'])))\n\n for port in ports:\n if port['iqn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='iSCSI Storage Port', name=port['name'],\n relative_address=controller + '/' + port['portal']))\n attributes.append(AutoLoadAttribute(controller + '/' + port['portal'], 'iqn', port['iqn']))\n elif port['wwn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='FC Storage Port', name=port['name'],\n relative_address=controller + '/' + port['name'].split('.')[1]))\n attributes.append(AutoLoadAttribute(controller + '/' + port['name'].split('.')[1], 'wwn', port['wwn']))\n\n return AutoLoadDetails(resources, attributes)", "def get_inventory(self):\n raise NotImplementedError(\"Subclasses define what returning the inventory entails\")", "def refresh_inventory(self):\n if self.skill_tree_displaying:\n return\n self.inventory_tiles, _ = player_panel_renderer.draw_inventory(self.player_dict['inventory'], refresh=True)", "async def get(self):\n identifier = self.data[\"id\"]\n item = self.core.item_manager.items.get(identifier)\n if not item:\n return self.error(\n ERROR_ITEM_NOT_FOUND,\n f\"No item found with identifier {identifier}\", status_code=404)\n\n return self.json(data=list(item.actions.keys()))", "def test_get_dealer_active_inventory(self):\n pass", "def get_inventory(self, resources):\n uri = '/api/services/inventory'\n body = {'resources': resources}\n result = self.session.post(uri, body=body)\n return result", "def display_inventory(self):\n header = \"Carrying:\\n\"\n nothing_func = lambda *args: None\n action_list = [(item, nothing_func) for item in self.inventory]\n if len(action_list) == 0:\n header += \"Nothing at all\"\n events.trigger_event(\"print_list\", action_list, header=header)", "def populate_initial_inventory(self):\r\n\r\n weapons_file = open('initial-inventory.json', \"r\")\r\n json_data = json.loads(weapons_file.read())\r\n weapons_file.close()\r\n\r\n weapons = json_data['weapons']\r\n for weapon in weapons:\r\n requests.post(\"http://\" + self.ip_address + \":3000/Weapons\", data=weapon)", "def read_inventory_file():\n try:\n with open('inventory', 'r') as file:\n inventory = file.read()\n return inventory\n except OSError:\n pass", "def inventory(env):\n envs = environments()\n check_env(env, envs)\n\n headers = [] # a list of fact descriptions to go\n # in the table header\n fact_names = [] # a list of inventory fact names\n fact_data = {} # a multidimensional dict for node and\n # fact data\n\n # load the list of items/facts we want in our inventory\n try:\n inv_facts = app.config['INVENTORY_FACTS']\n except KeyError:\n inv_facts = [('Hostname', 'fqdn'),\n ('IP Address', 'ipaddress'),\n ('OS', 'lsbdistdescription'),\n ('Architecture', 'hardwaremodel'),\n ('Kernel Version', 'kernelrelease')]\n\n # generate a list of descriptions and a list of fact names\n # from the list of tuples inv_facts.\n for desc, name in inv_facts:\n headers.append(desc)\n fact_names.append(name)\n\n query = AndOperator()\n fact_query = OrOperator()\n fact_query.add([EqualsOperator(\"name\", name) for name in fact_names])\n\n if env != '*':\n query.add(EqualsOperator(\"environment\", env))\n\n query.add(fact_query)\n\n # get all the facts from PuppetDB\n facts = puppetdb.facts(query=query)\n\n for fact in facts:\n if fact.node not in fact_data:\n fact_data[fact.node] = {}\n\n fact_data[fact.node][fact.name] = fact.value\n\n return Response(stream_with_context(\n stream_template(\n 'inventory.html',\n headers=headers,\n fact_names=fact_names,\n fact_data=fact_data,\n envs=envs,\n current_env=env\n )))", "def inventory(env):\n envs = environments()\n check_env(env, envs)\n headers, fact_names = inventory_facts()\n\n return render_template(\n 'inventory.html',\n envs=envs,\n current_env=env,\n fact_headers=headers)", "def collect(item):\n inventory.append(item)\n print(f'You now have: {inventory}')", "def do_inventory(self, arg):\r\n\r\n if len(inventory) == 0:\r\n print('Inventory:\\n (nothing)')\r\n return\r\n\r\n # first get a count of each distinct item in the inventory\r\n itemCount = {}\r\n for item in inventory:\r\n if item in itemCount.keys():\r\n itemCount[item] += 1\r\n else:\r\n itemCount[item] = 1\r\n\r\n # get a list of inventory items with duplicates removed:\r\n print('Inventory:')\r\n for item in set(inventory):\r\n if itemCount[item] > 1:\r\n print(' %s (%s)' % (item, itemCount[item]))\r\n else:\r\n print(' ' + item)", "def load_inventory(file_name, lst_Inventory):\r\n \r\n try:\r\n objFile = open(file_name, 'r')\r\n lst_Inventory.clear()\r\n for line in objFile:\r\n data = line.strip().split(',')\r\n inventory = CD(data[0],data[1],data[2])\r\n lst_Inventory.append(inventory)\r\n objFile.close()\r\n except FileNotFoundError:\r\n pass\r\n return lst_Inventory", "def print_inventory(self):\r\n for item in self._inventory:\r\n print(item, '\\n')", "def refresh(self) -> None:\n self._itempage.get()", "def get_inventory(self):\n from noc.inv.models.object import Object\n\n return list(Object.objects.filter(data__management__managed_object=self.id))", "def display_inventory(self) -> None:\n\n print(\"Your current inventory includes:\\n\" + \" | \".join(self.player.inventory))", "async def items(self, ctx, search=''):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n inventory = ch.print_inventory(ctx.user_object, search.lower())\n await self.paginate(ctx, inventory)", "def remaining_inventory(self):\n for bike in self.sold:\n if bike in self.inventory:\n self.inventory.remove(bike)\n print \"{}'s remaining inventory is: {}\".format(\n self.bikeshop_name, self.inventory)", "def get(self, command):\n\n for item in self.location.inventory:\n if item.name == command[1]:\n self.inventory.append(item)\n self.location.inventory.remove(item)\n print(\"You picked up a\", item.name)\n return\n print(command[1] + \" is not here!\")", "def inventory(self):\n\n #when the item list is 0 , print out having no items \n if len(self.items) == 0:\n \n print('The player has no items')\n\n #if not, print out the item list \n else:\n print(self.items)", "def while_waiting(self):\n if self.action is None:\n self.action = Action()\n # don't include core tools in this inventory\n response = self.action.inventory(choices=['repos', 'core', 'tools', 'images', 'built', 'running', 'enabled'])\n if response[0]:\n inventory = response[1]\n value = \"Tools for each plugin found:\\n\"\n for repo in inventory['repos']:\n if repo != \"https://github.com/cyberreboot/vent\":\n value += \"\\n Plugin: \"+repo+\"\\n\"\n repo_name = repo.rsplit(\"/\", 2)[1:]\n for tool in inventory['tools']:\n is_core = False\n for core in inventory['core']:\n if core[0] == tool[0]:\n is_core = True\n if not is_core:\n r_name = tool[0].split(\":\")\n if repo_name[0] == r_name[0] and repo_name[1] == r_name[1]:\n value += \" \"+tool[1]+\"\\n\"\n for built in inventory['built']:\n if built[0] == tool[0]:\n value += \" Built: \"+built[2]+\"\\n\"\n for enabled in inventory['enabled']:\n if enabled[0] == tool[0]:\n value += \" Enabled: \"+enabled[2]+\"\\n\"\n for image in inventory['images']:\n if image[0] == tool[0]:\n value += \" Image name: \"+image[2]+\"\\n\"\n for running in inventory['running']:\n if running[0] == tool[0]:\n value += \" Status: \"+running[2]+\"\\n\"\n else:\n value = \"There was an issue with inventory retrieval:\\n\"+str(response[1])+\"\\nPlease see vent.log for more details.\"\n self.inventory_mle.values=value.split(\"\\n\")\n self.inventory_mle.display()\n return", "def test_update_inventory(self):\n pass", "def display_inventory(self):\n logging.info(yaml.dump(self.inventory_dict, sort_keys=False, default_flow_style=False))\n input('Press Enter to continue ')", "def take(self):\n print(\"You fill the kettle with water.\")\n inventory.remove('kettle')\n collect('filled kettle')", "async def get_item_data(self, ref, db):\n # If items_url is empty, treat ref as URL\n url = self.items_url.format(ref) or ref\n response = await self.asession.get(url)\n\n if response.status_code == 404:\n return self.log.debug(f\"Item {ref} doesn't exist\")\n\n try:\n # Raise for other response failures\n response.raise_for_status()\n\n # Add item to the db\n self.process_item_data(db, ref, response)\n\n self.log.debug(f'Got item {ref}')\n except Exception:\n e = traceback.format_exc()\n self.log.error(f'{e} (item {ref}, status {response.status_code})')", "def update(self):\n inventoryJson = self.__agent__.getInventoryJson()\n itemsLeft = len(inventoryJson) != 0\n itemTypesInObservation = []\n itemsAdded = []\n itemsDeleted = []\n\n # Loop over all item types in the observation\n while (itemsLeft):\n itemType = inventoryJson[0][\"type\"]\n itemTypesInObservation.append(itemType)\n numOfItemInObs = inventoryJson[0][\"quantity\"]\n\n if itemType not in self.__inventory__: # Add an array of ids for this item type if it was never discovered\n self.__inventory__[itemType] = []\n numOfItemInInv = len(self.__inventory__[itemType])\n\n for i in range(1, len(inventoryJson)): # Loop over remaining items, and for each item of matching type, add to counter\n if inventoryJson[i][\"type\"] == itemType:\n numOfItemInObs += inventoryJson[i][\"quantity\"]\n inventoryJson = [item for item in inventoryJson if item[\"type\"] != itemType] # Remove all of those inventory items\n \n if numOfItemInObs > numOfItemInInv: # Add more items with unique id of this type to inventory\n for i in range(numOfItemInInv, numOfItemInObs):\n newItem = self.addItem(itemType)\n itemsAdded.append(newItem)\n elif numOfItemInObs < numOfItemInInv: # Remove some items of this type from inventory\n for i in range(numOfItemInObs, numOfItemInInv):\n if len(self.__inventory__[itemType]) > 0:\n lostItem = self.__inventory__[itemType].pop(0)\n itemsDeleted.append(lostItem)\n\n # Only perform another iteration if there are more items of different types that we have not yet checked\n if len(inventoryJson) == 0:\n itemsLeft = False\n \n # For any items in the inventory that was not in the observation, set the quantity to 0\n for itemType in self.__inventory__:\n if itemType not in itemTypesInObservation:\n self.__inventory__[itemType].clear()\n\n return (itemsAdded, itemsDeleted)", "def retrieve_inventory(self, jobid):\n if jobid is None:\n return self.vault.retrieve_inventory(sns_topic=None, description=\"cloudbackup inventory job\")\n else:\n return self.vault.get_job(jobid)", "async def get_items_data(self, db):\n await asyncio.gather(\n *tuple(\n asyncio.ensure_future(self.get_item_data(ref, db))\n for ref in self.refs\n ),\n return_exceptions=True\n )", "def test_get_dealer_historical_inventory(self):\n pass", "def ansible_inventory(self):\n path_inventory = u'%s/inventories/%s' % (self.ansible_path, self.environment)\n path_lib = u'%s/library/beehive/' % (self.ansible_path)\n runner = Runner(inventory=path_inventory, verbosity=self.verbosity, \n module=path_lib)\n res = runner.get_inventory()\n resp = []\n for k,v in res.items():\n resp.append({u'group':k, u'hosts':u', '.join(v)})\n self.logger.debug(u'Ansible inventory nodes: %s' % res)\n self.result(resp, headers=[u'group', u'hosts'])", "def run(self):\n try:\n query = VectorQuery(self.username, self.password, self.api_key)\n query.log_in()\n new_items = query.query_items(self.items_params, True)\n self.json_item_object.task_complete.emit(self.items_params, new_items)\n except Exception, e:\n self.json_item_object.task_cancel.emit(e)", "def run(self):\n query = VectorQuery(self.username, self.password, self.api_key)\n query.log_in()\n new_items = query.query_items(self.item_params)\n self.item_object.task_complete.emit(self.item_params, new_items)", "def getInventory(self):\n return str(self.inventory)", "def show_inventory(self):\n\t\tclear_screen()\n\n\t\tprint(\"# INVENTORY #\\n\")\n\t\tprint(\"Weapon{:.>15} \".format(self.inventory['Weapon']))\n\t\tprint(\"Clothing{:.>13} \".format(self.inventory['Clothing']))\n\t\tprint(\"Items{:.>16} \".format(self.inventory['Items']))\n\n\t\tpress_enter()", "def grab(self):\n if len(self.location.contents) == 0:\n print('Hate to break it to you, but there\\'s nothing to grab.')\n elif random() >= .75:\n item = self.location.contents[\n randrange(len(self.location.contents))]\n self.inventory.append(item)\n self.location.remove(item)\n print('Nice one, you actually managed to grab the {}! '\n 'I\\'m not even angry, I\\'m impressed.'.format(item))\n else:\n print('Well, at least you flailed in an impressive fashion.')", "def main():\n dump(inventory(), fp=stdout, indent=4)", "def test_get_inventory_list(self):\n resp = self.app.get('/inventories')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 2)", "def Initialize(self):\r\n # Get the inventory out of our data, and put into our list\r\n self.inventory = []\r\n \r\n # For each selling group\r\n for group_key in self.data:\r\n group_name = self.data[group_key].get('name', 'Unknown')\r\n \r\n # For all our items\r\n for item_key in self.data[group_key]['items']:\r\n #TODO(g): Deal with stock levels later...\r\n \r\n item_data = copy.deepcopy(self.data[group_key]['items'][item_key]['item'])\r\n item_data['group_name'] = group_name\r\n \r\n # Create our item\r\n item = rpg_item.Item(self.game, None, item_data)\r\n \r\n # Add it to our list\r\n self.inventory.append(item)\r\n \r\n # Reset options\r\n self.options = []\r\n \r\n # Create options from inventory\r\n for item in self.inventory:\r\n text = '%s - %s - %s gold' % (item.data['name'], item.data['group_name'],\r\n item.data['cost'])\r\n option = {'text':text, 'item':item, 'operation':'buy'}\r\n \r\n self.options.append(option)\r\n \r\n # Add the Quit option\r\n option = {'text':'Done Bartering', 'operation':'done'}\r\n self.options.append(option)\r\n \r\n # Constrain maximum offset position\r\n if self.selected_option >= len(self.options):\r\n self.selected_option = len(self.options) - 1", "def update(self):\n try:\n data = self.api.get_inventory(self.site_id)\n inventory = data[\"Inventory\"]\n except KeyError:\n _LOGGER.error(\"Missing inventory data, skipping update\")\n return\n except (ConnectTimeout, HTTPError):\n _LOGGER.error(\"Could not retrieve data, skipping update\")\n return\n\n self.data = {}\n self.attributes = {}\n\n for key, value in inventory.items():\n self.data[key] = len(value)\n self.attributes[key] = {key: value}\n\n _LOGGER.debug(\"Updated SolarEdge inventory: %s, %s\", self.data, self.attributes)", "def inventory(self, inventory):\n\n self._inventory = inventory", "def refresh(self):\n self.lease = self.blazar.lease.get(self.id)", "def get_items():\n return requester.perform_request(Uri.items)", "def __getitem__(self, item):\n return self.get_data(stock=item)", "def get_items(self, caller, data, verbose):\n\n if self.inventory:\n out = f\"\\n{caller} inventory:\"\n else:\n out = f\"There's nothing here.\"\n\n for name, item in data[\"inventory\"].items():\n out += \"\\n \" + name\n if verbose:\n out += Game._verbose_print(item)\n return out", "def list_inventory(self):\n\n print('Your inventory contains:')\n #i = 1\n #inv_dict = {}\n for item in self.bag_of_holding:\n if 'casted' not in item.name:\n try:\n print(item.name)\n except:\n pass\n\n #inv_dict[str(i)] = item\n #i += 1\n #return inv_dict", "def test_vault_get_vault_item(self):\n pass", "def test_autoload(driver: IxiaChassisShell2GDriver, autoload_context: AutoLoadCommandContext) -> None:\n inventory = driver.get_inventory(autoload_context)\n print_inventory(inventory)", "def equip_item(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Actions/Items/EquipItem/\"))", "def inventory_ajax(env):\n draw = int(request.args.get('draw', 0))\n\n envs = environments()\n check_env(env, envs)\n headers, fact_names = inventory_facts()\n fact_templates = app.config['INVENTORY_FACT_TEMPLATES']\n\n query = AndOperator()\n fact_query = OrOperator()\n fact_query.add([EqualsOperator(\"name\", name) for name in fact_names])\n query.add(fact_query)\n\n if env != '*':\n query.add(EqualsOperator(\"environment\", env))\n\n facts = puppetdb.facts(query=query)\n\n fact_data = {}\n for fact in facts:\n if fact.node not in fact_data:\n fact_data[fact.node] = {}\n\n fact_value = fact.value\n\n if fact.name in fact_templates:\n fact_template = fact_templates[fact.name]\n fact_value = render_template_string(\n fact_template,\n current_env=env,\n value=fact_value,\n )\n\n fact_data[fact.node][fact.name] = fact_value\n\n total = len(fact_data)\n\n return render_template(\n 'inventory.json.tpl',\n draw=draw,\n total=total,\n total_filtered=total,\n fact_data=fact_data,\n columns=fact_names)", "def use(self):\n print_items()\n while True:\n print(\"Type 'back' to go back.\")\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == \"little key\":\n print(\"You open the cabinet door.\")\n print(\"In it, there is a golden key.\")\n gk = GoldenKey('golden key')\n gk.take()\n break\n else:\n print(\"That is the wrong item!\")\n else:\n print(\"You have not found the item yet.\")", "def restore_inventory(self):\n if config.get(\"aws\", \"s3_bucket\"):\n loaded_archives = self.load_archives_from_s3()\n\n with glacier_shelve() as d:\n archives = {}\n for a in loaded_archives:\n print a\n archives[a[\"filename\"]] = a[\"archive_id\"]\n d[\"archives\"] = archives\n else:\n raise Exception(\"You must set s3_bucket in order to backup/restore inventory to/from S3.\")", "def get_items_from(self, inventory=False):\n # if no outer inventory is provided, assume own inventory is needed\n if not inventory:\n inventory = self.inventory\n # get items normally\n items_ = MetaBeing.get_items_from(self, inventory)\n # return items in question\n return items_", "async def sell(self):\n if len(self.factory.foobar) > 0:\n how_many = min(len(self.factory.foobar), random.randint(1, 5))\n foobars = [self.factory.get_foobar() for x in range(how_many)]\n await self.wait(10)\n for foobar in foobars:\n self.say(f\"Selling {foobar} for 1€\")\n self.factory.money += 1\n else:\n self.say(\"Nothing to sell\")", "def refresh(self):\n # exists state\n self.shoprefobj = self.sc.get_shopref_obj({'Alias': self.Alias})\n self.exists = self.sc.exists(self.shoprefobj)\n\n if not self.exists:\n raise ShopDisappearedError(\"Could not find the shop anymore!\")\n\n # data from the server\n self.infoshopobj = self.sc.get_infoshop_obj({'Alias': self.Alias})\n self.shopinfo = self.sc.get_info(self.infoshopobj)\n\n self._from_dict(self.shopinfo)", "async def loadpokemon(self, ctx):\n await self.bot.di.new_items(ctx.guild, (ServerItem(**item) for item in self.bot.pokemonitems.values()))\n await ctx.send(await _(ctx, \"Successfully added all Pokemon items!\"))", "def load_stock(self):\n lines = []\n with Transaction().start(DBNAME, 1):\n stock_lines = self.Inventory.search([('state', '=', 'done'), ('location', '=', self.location.id)])\n if stock_lines:\n for i in stock_lines:\n batch = i.batch_number\n for j in i.lines:\n if j.quantity <= 0:\n continue\n dictionary = {}\n dictionary['code'] = j.product.code\n dictionary['item'] = j.product.template.name\n dictionary[\n 'category'] = j.product.template.category.name if j.product.template.category else None\n dictionary['quantity'] = Decimal(j.quantity).quantize(Decimal('0.11')).to_eng()\n dictionary['batch_number'] = batch\n dictionary['supplier'] = j.supplier.name if j.supplier else None\n dictionary['expiry_date'] = j.expiry_date.strftime('%d-%m-%Y') if j.expiry_date else None\n lines.append(dictionary)\n return lines", "def get_inventory(self, context):\n logger = get_logger_with_thread_id(context)\n logger.info(\"Autoload\")\n\n with ErrorHandlingContext(logger):\n cs_api = get_api(context)\n\n vblade_resource = TeraVMTrafficGeneratorVBladeResource.from_context(context)\n\n # get VM uuid of the Deployed App\n deployed_vm_resource = cs_api.GetResourceDetails(vblade_resource.fullname)\n vmuid = deployed_vm_resource.VmDetails.UID\n logger.info(\"Deployed TVM Module App uuid: {}\".format(vmuid))\n\n # get vCenter name\n app_request_data = json.loads(context.resource.app_context.app_request_json)\n vcenter_name = app_request_data[\"deploymentService\"][\"cloudProviderName\"]\n logger.info(\"vCenter shell resource name: {}\".format(vcenter_name))\n\n vsphere = pyVmomiService(SmartConnect, Disconnect, task_waiter=None)\n\n # get vCenter credentials\n vcenter_resource = cs_api.GetResourceDetails(resourceFullPath=vcenter_name)\n user = self._get_resource_attribute_value(resource=vcenter_resource,\n attribute_name=VCENTER_RESOURCE_USER_ATTR)\n\n encrypted_password = self._get_resource_attribute_value(resource=vcenter_resource,\n attribute_name=VCENTER_RESOURCE_PASSWORD_ATTR)\n\n password = cs_api.DecryptPassword(encrypted_password).Value\n\n logger.info(\"Connecting to the vCenter: {}\".format(vcenter_name))\n si = vsphere.connect(address=vcenter_resource.Address, user=user, password=password)\n\n # find Deployed App VM on the vCenter\n vm = vsphere.get_vm_by_uuid(si, vmuid)\n\n phys_interfaces = []\n comms_mac_addr = None\n\n for device in vm.config.hardware.device:\n if isinstance(device, vim.vm.device.VirtualEthernetCard):\n if device.deviceInfo.summary.lower() == vblade_resource.tvm_comms_network.lower():\n comms_mac_addr = device.macAddress\n else:\n phys_interfaces.append(device)\n\n if comms_mac_addr is None:\n raise Exception(\"Unable to find TVM Comms network with name '{}' on the device\"\n .format(vblade_resource.tvm_comms_network))\n\n logger.info(\"Found interfaces on the device: {}\".format(phys_interfaces))\n module_res = models.TeraVMModule(shell_name=\"\",\n name=\"Module {}\".format(comms_mac_addr.replace(\":\", \"-\")),\n unique_id=hash(comms_mac_addr))\n\n logger.info(\"Updating resource address for the module to {}\".format(comms_mac_addr))\n cs_api.UpdateResourceAddress(context.resource.fullname, comms_mac_addr)\n\n for port_number, phys_interface in enumerate(phys_interfaces, start=1):\n network_adapter_number = phys_interface.deviceInfo.label.lower().strip(\"network adapter \")\n unique_id = hash(phys_interface.macAddress)\n port_res = models.TeraVMPort(shell_name=\"\",\n name=\"Port {}\".format(port_number),\n unique_id=unique_id)\n\n port_res.mac_address = phys_interface.macAddress\n port_res.requested_vnic_name = network_adapter_number\n module_res.add_sub_resource(unique_id, port_res)\n\n return AutoloadDetailsBuilder(module_res).autoload_details()", "def Refresh(self):\n outer_scope = {'total': -1}\n offset = 0\n limit = 200\n items = []\n\n def xml_parser(body_xml_etree):\n outer_scope['total'] = int(\n body_xml_etree.find('TotalProducts').text)\n\n for product in body_xml_etree.find('Products').findall('Product'):\n sku = product.find('Skus').find('Sku')\n model = sku.find('SellerSku').text\n quantity = int(sku.find('quantity').text)\n reserved = quantity - \\\n int(sku.find('Available').text or quantity)\n\n item = LazadaProduct(\n model=model, quantity=quantity, reserved=reserved)\n\n items.append(item)\n\n while True:\n result = self._Request(\n _LIST_PRODUCTS_ACTION, offset=offset, limit=limit,\n body_xml_parser=xml_parser)\n if result.error_code:\n raise CommunicationError(\n 'Error communicating: %s' % result.error_description)\n\n logging.info(\n 'Loaded items: %d out of %d' % (len(items), outer_scope['total'],))\n\n offset += limit\n if offset >= outer_scope['total']:\n break\n\n logging.info('Total items: %d' % len(items))\n\n self._products = items\n\n return self", "def show_inventory(table):\r\n if (table):\r\n print('======= The Current Inventory: =======')\r\n print('ID\\tCD Title (by: Artist)\\n')\r\n for row in table:\r\n print('{}\\t{} (by:{})'.format(*row.values()))\r\n print('======================================')\r\n else:\r\n print ('Inventory is empty.\\n')\r\n # return None\r", "def get_items_for_catalog(catalog_id):\n pass", "def print_inventory(self):\n\t\tfor item, amount in self.inventoryDictionary.items():\n\t\t\tprint (\"Item: \" + item.name + \" Quantity: \" + str(amount))\n\t\t\tprint (item.description + \"\\n\")\n\n\t\tprint(\"Currently equipped: \")\n\t\tprint(\"Main Hand: \" + self.equippedMainHand.name)\n\t\tprint(\"Armor: \" + self.equippedArmor.name)", "def show_inventory(lst_Inventory):\r\n \r\n print('======= The Current Inventory: =======')\r\n print('ID\\tCD Title (by: Artist)\\n')\r\n for row in lst_Inventory:\r\n print('{}\\t{} (by:{})'.format(cd_instance.cd_id, cd_instance.cd_title, cd_instance.cd_artist))\r\n print('======================================')", "def FillInventoryServicePropertiesDuringEscrow(self, entity, request):\n return", "def refresh(self, context=None):\n current = self.get_by_uuid(self._context, uuid=self.uuid)\n self.obj_refresh(current)", "def clean_up_inventory(self):\n self.inventory = [i for i in self.inventory if i.quantity != 0]", "async def daily(self, ctx):\r\n # TODO: Asssess whether this can be cleaned up. \r\n # As it stands, very similar to inv()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n stock = self.iex.get_held_stocks(db, company.id)\r\n inventory = []\r\n for s in stock:\r\n close = await self.get_latest_close(ctx, db, s.symbol)\r\n inventory.append([s.symbol, s.quantity, s.purchase_price, close.close, s.quantity*close.close - s.quantity*s.purchase_price ]) \r\n inv_df = pd.DataFrame(inventory, columns=['Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value'])\r\n inv_df['sign'] = np.where(inv_df['Current Value']>=0, '+', '-')\r\n inv_df['%'] = abs(((inv_df['Close'] - inv_df['Purchase Price']) / inv_df['Purchase Price']) * 100)\r\n inv_df['%'] = inv_df['%'].round(1)\r\n inv_df = inv_df.sort_values(['Symbol'])\r\n inv_df = inv_df[['sign', '%', 'Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value']]\r\n aggregated = tabulate(inv_df.values.tolist(), headers=['Δ', '%', 'Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value'])\r\n await ctx.send(f'```diff\\n{aggregated}```')", "def test_get_inventory_with_empty_result(self, m):\n url = \"https://www.cellartracker.com/xlquery.asp?User=test-username&Password=test-password&Table=Inventory&Format=tab&Location=1\"\n file = open(\"./tests/fixtures/inventory_empty.tsv\", \"r\")\n m.register_uri(\"GET\", url, status_code=200, text=file.read())\n file.close\n\n cellartracker = CellarTracker(username=\"test-username\", password=\"test-password\")\n data = cellartracker.get_inventory()\n self.assertEqual([], data)", "async def add_inventory_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n room_inventory = request.args[\"room_inventory\"][0]\n model.add_inventory(hotel_id, room_type, room_inventory)\n return json({\"success\": True})", "async def get_catalog(self, board_id):\n\n route = f'{board_id}/catalog'\n\n data = await self.interact(route)\n\n value = Asset(data)\n\n return value", "def get_item_variants(self, item_id, item_name, start):\n\n item_url = f\"https://www.supremenewyork.com/shop/{item_id}.json\"\n\n item_variants = rq.get(item_url, headers=self.headers, proxies=self.proxy).json()\n\n for stylename in item_variants[\"styles\"]:\n for itemsize in stylename[\"sizes\"]:\n item = [item_name, stylename[\"name\"], itemsize['name'], item_variants[\"description\"], 'https:' + stylename[\"image_url\"], item_url.split('.json')[0]]\n if itemsize[\"stock_level\"] != 0:\n # Checks if it already exists in our instock\n if self.checker(item):\n pass\n else:\n # Add to instock dict\n self.instock.append(item)\n \n # Send a notification to the discord webhook with the in-stock product\n if start == 0:\n print('Sending new Notification')\n self.discord_webhook(item)\n logging.info(msg='Successfully sent Discord notification')\n\n else:\n if self.checker(item):\n self.instock.remove(item)", "def test_vault_get_all_vault_items(self):\n pass", "def _odl_inventory(self):\n return {\n \"id\": self._id,\n \"hard-timeout\": self._hard_timeout,\n \"idle-timeout\": self._idle_timeout,\n \"table_id\": self._table_id,\n \"priority\": self._priority,\n \"instructions\": {\n \"instruction\": [self._instructions[i].odl_inventory(i) for i in range(len(self._instructions))]\n },\n \"match\": self._match.odl_inventory()\n }", "def item_retrieve(id):\n item = getItem(id)\n if item is None:\n return jsonify({}), 204\n else:\n return jsonify(item=item.serialize)", "def vault(self):", "def do_fetch(self):\n pass", "def inventory():\n try:\n check50.run(run_command).stdin(\"INVENTORY\").stdout(\"Your inventory is empty.\")\n except check50.Failure as error:\n raise check50.Failure(f\"Let the player know they have no items.\\n {error}\")\n check = check50.run(run_command)\n moves = [\"IN\", \"TAKE keys\", \"INVENTORY\"]\n\n for move in moves:\n check.stdout(\"> \")\n check.stdin(move, prompt=False)\n\n check.stdout(\"KEYS: a set of keys\")", "def inventory(game):\n\n # Offset for displaying list on-screen\n x, y = 6, 2\n # Currently selected item\n selection = 0\n # Max number of items shown at once\n max_items = 10\n # Number of items scrolled through so far\n scrolled = 0\n # Offset for messages\n x_msg, y_msg = 2, max_items + 4\n\n game.window.clear()\n while True:\n # Draw selection cursor\n game.window.addstr(y + selection - scrolled, x - 4, CURSOR)\n\n # Get items between current scroll amount and max_items\n items = list(enumerate(game.player.items))[scrolled:scrolled+max_items]\n\n # Print each item in inventory\n for i, item in items:\n # If more than 1, put the quantity\n if item.quantity > 1:\n formatted = '{}: {} x {}\\n'.format(i, item.quantity, item.name)\n else:\n formatted = '{}: {}\\n'.format(i, item.name)\n\n game.window.addstr(i + y - scrolled, x, formatted)\n\n # If equipped, put a little star next to the item\n if item in game.player.equipment.values():\n game.window.addstr(i + y - scrolled, x - 2, '*')\n\n key = game.window.getkey()\n\n if key == 'k' or key == 'KEY_UP':\n if selection > 0:\n selection -= 1\n\n # If the user tries to go above the screen, scroll up by one\n if selection < scrolled:\n scrolled -= 1\n\n game.window.clear()\n\n if key == 'j' or key == 'KEY_DOWN':\n if selection < len(game.player.items) - 1:\n selection += 1\n\n # If the user tries to go below the screen, scroll down by one\n if selection > scrolled + max_items - 1:\n scrolled += 1\n\n game.window.clear()\n\n if key == 'e':\n # Equip the selected item\n if game.player.items[selection].equippable:\n game.player.equip(game.player.items[selection])\n game.window.clear()\n else:\n game.window.addstr(y_msg, x_msg, \"Cannot equip non-equippable item\")\n\n if key == 'c':\n # Eat the selected item\n if game.player.items[selection].kind == 'food':\n heal = game.player.items[selection].stats['hp']\n game.player.eat(game.player.items[selection])\n\n # Put selection cursor back to an item\n selection -= 1\n game.window.clear()\n\n game.window.addstr(y_msg, x_msg, \"Healed for {} hp\".format(heal))\n else:\n game.window.addstr(y_msg, x_msg, \"Cannot eat non-food item\")\n\n if key == 'l':\n # Print the item name and description\n item = game.player.items[int(selection)]\n game.window.addstr(y_msg, x_msg, '{}\\n\\n{}'.format(item.name, item.desc))\n\n if key == 'q':\n break\n\n if key == '?':\n help_inventory(game)\n continue", "def load_from_cache(self):\n try:\n with open(self.cache_filename, 'r') as cache:\n json_data = cache.read()\n data = json.loads(json_data)\n except IOError:\n data = {'data': {}, 'inventory': {}}\n\n self.data = data['data']\n self.inventory = data['inventory']", "def pull(self):\n data = api.get(endpoint=self.endpoint, resource_id=self.slug)\n self.__init__(**data)", "def replenish(self, amount: int):\n self._inventory += amount" ]
[ "0.6672313", "0.6481613", "0.64376086", "0.6390931", "0.6384816", "0.63713384", "0.6365049", "0.60815936", "0.6057353", "0.6050127", "0.60095286", "0.59535676", "0.5945936", "0.5925097", "0.5903476", "0.5844133", "0.5825453", "0.58238333", "0.580484", "0.57929665", "0.5770575", "0.5766562", "0.5720572", "0.5709943", "0.56841534", "0.5672511", "0.5639652", "0.5637677", "0.56189495", "0.5607275", "0.56017786", "0.55636716", "0.5563094", "0.554834", "0.55401564", "0.55336004", "0.55224127", "0.5515722", "0.5483069", "0.54706806", "0.54171544", "0.5415899", "0.5400501", "0.5397573", "0.53912014", "0.53837895", "0.53761303", "0.5372011", "0.53712296", "0.5365314", "0.5335415", "0.53292924", "0.5321004", "0.53099585", "0.5305417", "0.5302899", "0.5291723", "0.52915555", "0.5286055", "0.5280044", "0.527761", "0.5269713", "0.525714", "0.5234915", "0.5227667", "0.5220233", "0.52173007", "0.51989686", "0.5198034", "0.51812243", "0.51697284", "0.51638913", "0.51629484", "0.5158226", "0.5142891", "0.51385266", "0.51372266", "0.5125845", "0.51232314", "0.51203287", "0.5118307", "0.511218", "0.51117474", "0.5110671", "0.5108585", "0.510431", "0.5084087", "0.50815076", "0.508113", "0.50637835", "0.5060681", "0.50412834", "0.5035742", "0.50346863", "0.50337535", "0.502085", "0.50190777", "0.50184584", "0.5018147", "0.50155413" ]
0.65848404
1
A helper function that filters and removes items by name from the inventory.
def filter_items(self, *names: str, **kwargs) -> List[Item]: limit: int = kwargs.get("limit") if limit and names: raise ValueError("Cannot pass a limit with multiple items") items = [item for item in self if item.name in names] items = items if limit is None else items[:limit] for item in items: self.items.remove(item) return items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_item_by_name(name):\n item_found = False\n for item in INVENTORY.copy():\n if name == item['name']:\n item_found = True\n print(f'[INFO] Removing item {item}')\n remove_item(item)\n\n if not item_found:\n print(f'Sorry, we did not find {name} in inventory.')", "def remove_item(player, name):\n for index, gear in enumerate(player[\"inventory\"]):\n if gear[\"type\"] == name:\n if 'quantity' in gear.keys():\n gear['quantity'] -= 1\n if gear['quantity'] < 1:\n del player['inventory'][index]\n else:\n del player['inventory'][index]\n\n return True\n\n return False", "def __delitem__(self, name):\n name = name.lower()\n del self._items[name]\n self._names.remove(name)", "async def removeitem(self, ctx, *, name: str):\n try:\n await self.bot.di.remove_item(ctx.guild, name)\n await ctx.send((await _(ctx, \"Successfully removed {}\")).format(name))\n except KeyError:\n await ctx.send(await _(ctx, \"That item doesn't exist\"))", "def remove_item(self, item_name):\n if item_name in self.item_list:\n self.item_list.remove(item_name)", "async def unload(self, ctx, name: str):\n if name == \"dnd\":\n items = self.bot.dnditems\n elif name == \"dndmagic\":\n items = self.bot.dndmagic\n elif name == \"pokemon\":\n items = self.bot.pokemonitems\n else:\n await ctx.send(await _(ctx, \"That is not a valid input, look at `rp!help unload`\"))\n return\n\n await self.bot.di.remove_items(ctx.guild, *items)\n await ctx.send((await _(ctx, \"Successfully removed all {} items!\")).format(name))", "def drop(self, item_name):\n # Delete item from the player's inventory\n item = self.player.remove(item_name)\n\n # Add item to the current room's inventory\n if item is not None:\n self.current_room.inventory.add(item)\n print(f\"{item_name} dropped.\")\n else:\n print(\"No such item.\")", "def delete(self, name):\n global items\n items = _Helper.all_item_except_searching_for(name)\n return {\"message\": f\"Item {name} deleted successfully\"}, 204", "def take(self, item_name):\n # Delete item from the current room's inventory\n item = self.current_room.inventory.remove(item_name)\n\n # Add item to player's inventory\n if item is not None:\n self.player.add(item)\n print(f\"{item_name} taken.\")\n else:\n print(\"No such item.\")", "def drop(self, item):\n for obj in self.handler.player.inv:\n if obj.name == item.name:\n dropped = self.remove_from_inv(obj)\n\n # If we popped the item from player's inventory, we \n # can drop it directly. Else we need to copy it and\n # drop it\n if dropped:\n self.handler.world.add_item_tile(self.x, self.y, obj)\n else:\n self.handler.world.add_item_tile(self.x, self.y, copy.copy(obj))\n \n return obj.name\n\n return False", "def Filter(self, name, items):\n self.changed = True\n if name in self.ticker_lists:\n self.ticker_lists[name] = [\n t for t in self.ticker_lists[name] if t not in items]", "def drop(self, command):\n \n for item in self.inventory:\n if item.name == command[1]:\n self.location.inventory.append(item)\n self.inventory.remove(item)\n print(\"You dropped a\", item.name)\n return \n print(command[1] + \" is not here!\")", "def remove_item(self,itm):\n inv = self.get_inventory()\n s = str(itm)\n if s in inv:\n if inv[s] > 1:\n inv[s] -= 1\n else:\n del inv[s]\n self.put_inventory(inv)", "def remove_by_name(self, values, name):\n for d in values:\n if d[\"name\"] == name:\n values.remove(d)\n return", "def drop_inventory(self):\n header = \"Choose item to drop:\\n\"\n def drop(get_gameworld_cell, x, y, item):\n item_entity = ItemPickup([item], x, y, get_gameworld_cell)\n events.trigger_event(\"world_add_entity\", item_entity)\n self.inventory.remove(item)\n action_list = [(item, functools.partial(drop, get_gameworld_cell=self.get_gameworld_cell, x=self.x, y=self.y, item=item)) for item in self.inventory]\n if len(action_list) == 0:\n header += \"You hold nothing!\"\n events.trigger_event(\"print_list\", action_list, header=header)", "def remove_items(self, name, remove):\n items = self._get_itemmap(name, 'items')\n drop_item_names = [item for idx, item in enumerate(items, start=1)\n if idx in remove]\n keep_item_idxs = [idx for idx, item in enumerate(items, start=1)\n if idx not in remove]\n new_items = self._meta['masks'][name]['items']\n new_items = [item for idx, item in enumerate(new_items, start=1)\n if idx in keep_item_idxs]\n self._meta['masks'][name]['items'] = new_items\n for drop_item_name in drop_item_names:\n self._data.drop(drop_item_name, axis=1, inplace=True)\n del self._meta['columns'][drop_item_name]\n col_ref = 'columns@{}'.format(drop_item_name)\n if col_ref in self._meta['sets']['data file']['items']:\n self._meta['sets']['data file']['items'].remove(col_ref)\n self._meta['sets'][name]['items'].remove(col_ref)\n return None", "def removeItem(*args):", "def removeItem(*args):", "def use_some(self, item_name, amount_used):\n for item in self.foods:\n if item.name == item_name:\n item.amount -= amount_used", "def inventory_remove(self, item):\n if (item in self.ItemList):\n self.ItemList.remove(item)\n return 0\n # Item not found.\n return 1", "def remove(name):", "def get_item_inventory(self, item):\n return [item_data for item_data in self.inventory if item_data['item_name'] == item]", "def removeItem(self, item):\n if item.type not in self.__inventory__:\n return\n for i in range(0, len(self.__inventory__[item.type])):\n if self.__inventory__[item.type][i].id == item.id:\n self.__inventory__[item.type].pop(i)\n return", "def remove_from_inventory(self, item):\n\t\tif item in self.inventory:\n\t\t\tself.inventory[item] -= 1\n\t\t\tif self.inventory[item] == 0:\n\t\t\t\tdel self.inventory[item]", "def remove_book(name):\n global BOOKS\n BOOKS = [book for book in BOOKS if book['name'] != name]", "def remove_from_inventory(self, item_name: str, quantity: Optional[int] = None) -> None:\n raise_if_false(quantity >= 0, f\"Quantity [{quantity}] can't be negative\")\n\n # Item was never in inventory, return true since the end goal is satisfied\n if item_name not in self._player_data.inventory:\n return\n\n # If quantity is None, del the item\n if quantity is None:\n del self._player_data.inventory[item_name]\n return\n\n # Update the quantity. if its negative after update, del the item\n self._player_data.inventory[item_name] = max(0, self._player_data.inventory[item_name] - quantity)\n if self._player_data.inventory[item_name] <= 0:\n del self._player_data.inventory[item_name]", "def remove_items(todofile, items):\n for item in filter(lambda x: x.itemid in items, todofile.fetch_items()):\n todofile.remove_todo(item)", "def get_item_from_inventory(allitems, imgname):\n\tfor item in allitems:\n\t\tif 'image_inventory' in allitems[item]:\n\t\t\tif os.path.split(allitems[item]['image_inventory'])[1] == imgname:\n\t\t\t\treturn allitems[item]\n\treturn None", "def get_items_by_name(request, name):\n try:\n items = Items.objects.filter(titulo__icontains=name)\n except Items.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = ItemsSerializer(items, many=True)\n return Response(serializer.data)", "def clean_up_inventory(self):\n self.inventory = [i for i in self.inventory if i.quantity != 0]", "def items_contains_name(items, name):\n ret = 0\n # Loops all items and saves the searched one\n for x in range(len(items)):\n if items[x]['name'] == name:\n ret = x\n return ret", "def unequip(self, command):\n\n if len(command) > 1:\n for item in self.weapon:\n if command[1] == item.name:\n if command[1] == 'knife' or command[1] == 'stick' or command[1] == 'rock':\n self.weapon.remove(item)\n self.inventory.append(item)\n print(\"You unequipped a \" + item.name)\n return\n else:\n print(\"You don't have anything equipped\")\n else:\n print(\"Unequip what?\")", "def deleteInstrumentFromName(self, name):\n matching_instruments = list(filter(lambda x: x.name == name,\n self.instruments))\n assert len(matching_instruments) == 1\n del self.instruments[name]", "def __delitem__(self, name):\n name = name.lower()\n newheaders = []\n for k, v in self._headers:\n if k.lower() <> name:\n newheaders.append((k, v))\n self._headers = newheaders", "def remaining_inventory(self):\n for bike in self.sold:\n if bike in self.inventory:\n self.inventory.remove(bike)\n print \"{}'s remaining inventory is: {}\".format(\n self.bikeshop_name, self.inventory)", "async def remove(self, ctx, name: str, items: str):\n server = ctx.message.server\n items = items.split(\", \")\n itemis = dict()\n for item in items:\n item, value = item.split(\" \")\n item = item.replace(\"_\", \" \").lower()\n itemis[item] = value\n if server.id not in self.db:\n self.db[server.id] = {}\n if name not in self.db[server.id]:\n await self.bot.say(\"Box doesn't exist, please make sure the spelling is correct and\"\n \" that it's found in [p]box list\")\n return\n for item in itemis:\n value = itemis[item]\n print(item)\n if item in self.db[server.id][name][\"content\"]:\n del itemis[item]\n continue\n else:\n self.db[server.id][name][\"content\"][item] = value\n dataIO.save_json(\"data/lootbox/servers.json\", self.db)\n await self.bot.say(\"{} box's has added the following items:\\n{}\".format(name, \"\\n\".join(list(itemis))))", "def destroy_item(game, *args):\n (item, action_description, already_done_description) = args[0]\n if game.is_in_inventory(item):\n game.inventory.pop(item.name)\n print_bold(action_description)\n elif item.name in game.curr_location.items:\n game.curr_location.remove_item(item)\n print_bold(action_description)\n else:\n print_bold(already_done_description)\n return False", "def filter_del(name):\n\n\tweechat.command(weechat.buffer_search_main(), \"/mute filter del %s\" % name)", "def leaveInMPR():\n cont=True\n if len(inventory)>0:\n item = raw_input(\"What do you want to leave in the MPR?\")\n while cont:\n inInventory = False\n for stuff in inventory:\n if stuff.lower() == item.lower():\n inInventory = True\n break\n if not inInventory:\n item = raw_input(\"You search yourself for it but you can't find it. Try another item.\")\n else:\n print(\"You leave\",item,\"in the MPR.\")\n inventory.remove(item)\n mprstorage.append(item)\n inventoryCall()\n mprStorageCall()\n cont=False\n else:\n print(\"You don't have any items to leave in the MPR.\")", "def itemFilter(*args, byBin: Union[AnyStr, List[AnyStr], bool]=\"\", byName: Union[AnyStr,\n bool]=\"\", byScript: Union[AnyStr, bool]=\"\", byType: Union[AnyStr, List[AnyStr],\n bool]=\"\", category: Union[AnyStr, List[AnyStr], bool]=\"\", classification:\n Union[AnyStr, bool]=\"\", clearByBin: bool=True, clearByType: bool=True,\n difference: Union[List[AnyStr, AnyStr], bool]=None, exists: bool=True,\n intersect: Union[List[AnyStr, AnyStr], bool]=None, listBuiltInFilters: bool=True,\n listOtherFilters: bool=True, listUserFilters: bool=True, negate: bool=True,\n parent: Union[AnyStr, bool]=\"\", pythonModule: Union[AnyStr, bool]=\"\",\n secondScript: Union[AnyStr, bool]=\"\", text: Union[AnyStr, bool]=\"\", union:\n Union[List[AnyStr, AnyStr], bool]=None, uniqueNodeNames: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def removeItems(c, items):\n\t\tcontainer.containersToSave[c['id_item_container']] = item.inventory.removeItems(\n\t\t\titem.inventory.fromStr(c['items']),\n\t\t\titems\n\t\t)", "def remove_field_from_every_item_in_response_copy(context, name):\n items = context.response_copy['items']\n for item in items:\n print(item)\n if item['owner']['user_type'] == 'does_not_exist':\n continue\n if name in item:\n del(item[name])\n logging.debug(\n 'Successfully removed field \"%s\" from item: %s', name,\n item['question_id']) \n else:\n logging.debug(\n 'Item %s does not contain field \"%s', name, item) \n logging.debug(\n 'Response copy after removing \"%s\" field:\\n%s', name,\n pformat(context.response_copy))", "def remove_item_from_inventory(life, item_id):\n\titem = get_inventory_item(life, item_id)\n\t\n\t_holding = is_holding(life, item_id)\n\tif _holding:\n\t\t_holding['holding'].remove(item_id)\n\t\t#logging.debug('%s stops holding a %s' % (life['name'][0],item['name']))\n\t\t\n\telif item_is_equipped(life, item_id):\n\t\tlogging.debug('%s takes off a %s' % (life['name'][0],item['name']))\n\t\n\t\tif 'attaches_to' in item:\n\t\t\tfor limb in item['attaches_to']:\n\t\t\t\tremove_item_from_limb(life,item['uid'],limb)\n\t\t\n\t\titem['pos'] = life['pos'][:]\n\t\n\telif item_is_stored(life, item_id):\n\t\titem['pos'] = life['pos'][:]\n\t\tremove_item_in_storage(life, item_id)\n\telif not item_is_stored(life, item_id):\n\t\tprint 'item is NOT stored'\n\t\n\tif 'max_capacity' in item:\n\t\tlogging.debug('Dropping container storing:')\n\t\t\n\t\tfor _item in item['storing']:\n\t\t\tlogging.debug('\\tdropping %s' % _item)\n\t\t\t\n\t\t\t#item['storing'].remove(_item)\n\t\t\t#item['storing'].append(get_inventory_item(life,_item)['uid'])\n\t\t\tif ITEMS[_item]['owner']:\n\t\t\t\tITEMS[_item]['owner'] = None\n\t\t\t\n\t\t\tITEMS[_item]['pos'] = life['pos'][:]\n\t\t\t\n\t\t\titems.add_to_chunk(ITEMS[_item])\n\t\t\t\n\t\t\tlife['inventory'].remove(_item)\n\t\n\tlife['speed_max'] = get_max_speed(life)\n\t\n\tif 'player' in life:\n\t\tmenus.remove_item_from_menus({'id': item['uid']})\n\t\n\t#logging.debug('%s removed item from inventory: %s (#%s)' % (' '.join(life['name']), item['name'], item['uid']))\n\t\n\tlife['inventory'].remove(item['uid'])\n\tdel item['parent_id']\n\titem['owner'] = None\n\t\n\tcreate_and_update_self_snapshot(life)\n\t\n\treturn item_id", "def handle_search(self, todos, item):\n todos_removed = todos.copy()\n todos_removed.remove(item)\n return todos_removed", "def filter_items(self, context, data, propname):\n\n helper_funcs = bpy.types.UI_UL_list\n\n items = getattr(data, propname)\n\n # Filtering by name\n filtered = helper_funcs.filter_items_by_name(\n self.filter_name, self.bitflag_filter_item, items, \"name\", reverse=False\n )\n\n if not filtered:\n filtered = [self.bitflag_filter_item] * len(items)\n\n d = context.active_object.data\n anim_ret = context.active_object.anim_ret\n\n for index, bone in enumerate(items):\n excluded = False\n found = False\n\n anim_ret_bone = bone.anim_ret_bone\n\n if not anim_ret_bone:\n excluded = True\n if not excluded and anim_ret_bone.source_bone_name == \"\":\n excluded = True\n if bone.name.startswith(ObjectAnimRet.prefix):\n excluded = True\n if not excluded and not anim_ret.show_def and \"DEF-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_mch and \"MCH-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_org and \"ORG-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_fk and \"fk\" in bone.name.lower():\n excluded = True\n if not excluded and not anim_ret.show_ik and \"ik\" in bone.name.lower():\n excluded = True\n if not excluded and anim_ret.filter_layers:\n data_bone = d.bones[bone.name]\n for layer_id, layer in enumerate(d.layers):\n if layer:\n if data_bone.layers[layer_id]:\n found = True\n break\n\n if excluded or not found:\n filtered[index] &= ~self.bitflag_filter_item\n\n ordered = []\n\n # Reorder by name or average weight.\n if self.use_filter_sort_alpha:\n sort = [(idx, getattr(it, \"name\", \"\")) for idx, it in enumerate(items)]\n\n ordered = helper_funcs.sort_items_helper(sort, lambda e: e[1].lower())\n\n return filtered, ordered", "def unequip(self, item_name: str, quantity: Optional[int] = None) -> None:\n raise_if_false((quantity is not None) and (quantity <= 0), f\"Quantity [{quantity}] can't be negative\")\n raise_if_false(item_name in self._player_data.equipped_items, f\"Can't unequip [{item_name}], not equipped\")\n\n if quantity is None:\n quantity_to_unequip = self._player_data.equipped_items[item_name]\n del self._player_data.equipped_items[item_name]\n\n else:\n quantity_to_unequip = min(quantity, self._player_data.equipped_items[item_name])\n self._player_data.equipped_items[item_name] -= quantity_to_unequip\n\n self.add_to_inventory(item_name, quantity_to_unequip)\n\n if self._player_data.equipped_items[item_name] == 0:\n del self._player_data.equipped_items[item_name]", "def remove_mix(self, name: str) -> None:\n self.remove(name)", "def remove_enemy_from_list(name):\n\n def write_to_file(data):\n with open(\"data.json\", \"w\") as file2:\n json.dump(data, file2, indent=4)\n\n with open(\"data.json\", \"r\") as file:\n data = json.load(file)\n for _name in data[\"enemy_data\"][\"names\"]:\n if name == _name:\n data[\"enemy_data\"][\"names\"].remove(name)\n write_to_file(data)\n return True\n \n return False", "def itemFilterAttr(*args, byName: Union[AnyStr, bool]=\"\", byNameString: Union[AnyStr,\n List[AnyStr], bool]=\"\", byScript: Union[AnyStr, bool]=\"\", classification:\n Union[AnyStr, bool]=\"\", dynamic: bool=True, exists: bool=True, hasCurve:\n bool=True, hasDrivenKey: bool=True, hasExpression: bool=True, hidden:\n bool=True, intersect: Union[List[AnyStr, AnyStr], bool]=None, keyable:\n bool=True, listBuiltInFilters: bool=True, listOtherFilters: bool=True,\n listUserFilters: bool=True, negate: bool=True, parent: AnyStr=\"\", published:\n bool=True, readable: bool=True, scaleRotateTranslate: bool=True,\n secondScript: Union[AnyStr, bool]=\"\", text: Union[AnyStr, bool]=\"\", union:\n Union[List[AnyStr, AnyStr], bool]=None, writable: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def __delitem__(self, name):\n tag = self._find(name)\n if tag is not None:\n self.meta.remove(tag)\n else:\n raise KeyError(name)", "def delete_item(self, list_name: str, item_name: str) -> None:\n todo_list = self.get_list(list_name)\n for index, item in enumerate(todo_list.items):\n if item.name == item_name:\n todo_list.delete_item(index)", "def trim_items(self, items):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\t\r\n\t\tif self.transactions:\r\n\t\t\tall_items = set.union(*[self.transactions[u][-1] for u in self.transactions.keys()])\r\n\t\telse:\r\n\t\t\treturn items\r\n\t\t\t\r\n\t\ttmp = items.copy()\r\n\t\t\r\n\t\tfor i in items:\r\n\t\t\tif i in all_items:\r\n\t\t\t\tlogger.debug(\"Removing %r\" % i)\r\n\t\t\t\ttmp.remove(i)\r\n\t\t\t\t\r\n\t\tlogger.debug(\"Exit\")\r\n\t\treturn tmp", "def get(self, command):\n\n for item in self.location.inventory:\n if item.name == command[1]:\n self.inventory.append(item)\n self.location.inventory.remove(item)\n print(\"You picked up a\", item.name)\n return\n print(command[1] + \" is not here!\")", "def __delitem__(name):", "def discard(self, item):\n if item in self._inventory:\n self._erase(item)\n self.on_discard_item.trigger()\n self.echo(self.text(\"DISCARD\", item=item.name))\n else:\n self.echo(self.text(\"DISCARD\", item=item.name))", "def get_products_by_name(name: str = '') -> List[Product]:\n products = []\n for p in get_market().products.values():\n if not name or p.name == name:\n products.append(p)\n return products", "def clean(self):\n filtered_items = {}\n for name, ls in self.items.items():\n filtered_ls = []\n for i in ls:\n if i.alive():\n filtered_ls.append(i)\n else:\n self.del_item(i)\n filtered_items[name] = filtered_ls\n self.items = filtered_items", "def test_remove_item_test_remove_multiple_item():\n sc.menu = sc.default_menu\n sc.current.add_item('Fries', 3)\n sc.current.add_item('Steak', 1)\n sc.current.remove_item('Fries', 2)\n sc.current.receipt == {'subtotal': 3.28, 'Fries': 1, 'Steak': 1}", "def do_inventory(self, arg):\r\n\r\n if len(inventory) == 0:\r\n print('Inventory:\\n (nothing)')\r\n return\r\n\r\n # first get a count of each distinct item in the inventory\r\n itemCount = {}\r\n for item in inventory:\r\n if item in itemCount.keys():\r\n itemCount[item] += 1\r\n else:\r\n itemCount[item] = 1\r\n\r\n # get a list of inventory items with duplicates removed:\r\n print('Inventory:')\r\n for item in set(inventory):\r\n if itemCount[item] > 1:\r\n print(' %s (%s)' % (item, itemCount[item]))\r\n else:\r\n print(' ' + item)", "def Collection_remove_by_name(C: list, name: str) -> list:\r\n result = [ ]\r\n for r in C:\r\n if r.name != name:\r\n result.append(r)\r\n return result", "def remove(self, identifier: int):\n self.items = list(filter(lambda x: x.identifier != identifier, self.items))", "def remove_by_name(self, product_name, force=False):\n count = self.remove(\"product_name == @product_name\", {\"product_name\": product_name}, force)\n assert(count >= 0)\n\n if count == 0:\n raise Error(\"no products found with name '%s'\" % product_name)\n\n return count", "def get_el_by_name(items: List[Dict[str, Any]], name: str) -> Dict[str, Any]:\n for item in items:\n if item[\"name\"] == name:\n return item\n print(\"error, key name not found by value\", name, \"in list: \", items)\n sys.exit(1)", "def test_remove_all(self): #SAUCE-LAB-8\n login = LoginPage(self.driver)\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products\n first_item: InventoryItem\n for item in first_item:\n item.add_to_cart()\n if inventory_page.header.get_total_cart_items() == 6:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were added')\n for item in first_item:\n item.remove_from_cart()\n if inventory_page.header.get_total_cart_items() == 0:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were removed')", "def do_drop(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToDrop = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n\r\n # find out if the player doesn't have that item\r\n if itemToDrop not in invDescWords:\r\n print('You do not have \"%s\" in your inventory.' % (itemToDrop))\r\n return\r\n\r\n # get the item name that the player's command describes\r\n item = getFirstItemMatchingDesc(itemToDrop, inventory)\r\n if item != None:\r\n print('You drop %s.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item) # remove from inventory\r\n worldRooms[location][GROUND].append(item) # add to the ground\r", "def equip(self, item_name: str, quantity: int) -> None:\n raise_if_false(quantity >= 0, f\"Quantity [{quantity}] can't be negative\")\n raise_if_false(item_name in self._player_data.inventory, f\"Can't equip [{item_name}], not in inventory\")\n\n quantity_to_equip = min(quantity, self._player_data.inventory[item_name])\n if item_name in self._player_data.equipped_items:\n self._player_data.equipped_items[item_name] += quantity_to_equip\n else:\n self._player_data.equipped_items[item_name] = quantity_to_equip\n\n self.remove_from_inventory(item_name, quantity_to_equip)", "def remove(self, *names):\n for name in names:\n self._storage.pop(name, None)", "def remove(self, name):\n for var in self.inputs:\n if var.name == name:\n self.inputs.remove(var)\n return\n for var in self.outputs:\n if var.name == name:\n self.outputs.remove(var)\n return", "def __delitem__(self, skillName):\r\n self.removeSkill(skillName)", "def remove(name, send_events=True, moving=False):", "def equip(self, command):\n\n if len(command) > 1:\n if not self.weapon:\n for item in self.inventory:\n if item.name == command[1]:\n if command[1] == 'knife' or command[1] == 'rock' or command[1] == 'stick' or command[1] == 'lamp':\n self.inventory.remove(item)\n self.weapon.append(item)\n print(\"You equipped a \" + item.name)\n return\n else:\n print(\"You can't equip that\")\n else:\n print('You cannot equip two items \\nYou must unequip the ' + self.weapon[0].name + ' first.')\n else:\n print(\"Equip what?\")", "def remove_quantity(shared):\n\n not_done = True\n while not_done:\n input_string = input('Enter name of quantity to remove '\n '[leave blank to cancel]: ').strip()\n if not input_string:\n return\n\n for i, fm in enumerate(shared.field_mappings):\n if fm.title == input_string:\n if fm.extra is not None:\n not_done = False\n slot = i\n break\n else:\n print (' >> Cannot remove datafile quantity!')\n return\n else:\n print(' >> Unknown quantity!')\n return\n\n shared.config.remove_option('extra', input_string)\n del shared.field_mappings[slot]", "def remove(self, items):\n if isinstance(items, Container):\n items = items.items\n for item in items:\n # return an error if the container does not have (enough of) an item.\n if item not in self.items:\n raise ValueError(f'Item: \"{item}\" not present in Container.')\n elif items[item] > self.items[item]:\n raise ValueError(f'More \"{item}\"s removed than in Container.')\n\n elif items[item] == self.items[item]:\n del self.items[item]\n else:\n self.items[item] -= items[item]\n self.__sub__(items)", "def eliminate(sv, nam):\r\n del sv.Object[nam] # from sv.Object dictionary\r\n sv.Object_list.remove(nam)", "def get(self, item_name):\n for item in self._inventory:\n if item.name == item_name or item_name in item.synonyms:\n return item\n\n return None", "def takeFromMPR():\n cont=True\n if len(mprstorage)>0:\n item = raw_input(\"What do you want to take frpm the MPR?\")\n while cont:\n inStorage = False\n for stuff in mprstorage:\n if stuff.lower() == item.lower():\n inStorage = True\n break\n if not inStorage:\n item = raw_input(\"You search for it but you can't find it. Try another item.\")\n else:\n print(\"You take\",item,\"from the MPR.\")\n inventory.append(item)\n mprstorage.remove(item)\n inventoryCall()\n mprStorageCall()\n cont=False\n else:\n print(\"There aren't any items to take from the MPR.\")", "def drop(self, pitem):\n\n #if the item is not inside the item list, can't drop it \n if pitem not in self.items:\n print('The player does not carry the item')\n\n #if not, remove the item \n else:\n self.items.remove(pitem)", "def removeJim(name_list):\n name_list.remove(\"jim\")\n return(name_list)", "def search_product_by_name(name, filters):\n return store_handler.search_product_by_name(name, filters)", "def get_item(self, name: str) -> list:\n self.sql_lock.acquire()\n items = []\n query: str = \"SELECT * FROM menu Where item_name LIKE \\\"{0}\\\"\" \n querys = [] \n query = query.split(\"--\")[0]\n\n if '\\\"' in name:\n potential_querys = name.split(\"\\\"\") \n querys.append(query.format(potential_querys[0]))\n potential_querys = potential_querys[1].split(\";\")\n for query_to_run in potential_querys:\n if \"SELECT\" in query_to_run: \n for item in self.cursor.execute(query_to_run):\n items.append(item)\n else:\n self.cursor.execute(query_to_run)\n else: \n \n for item in self.cursor.execute(query.format(name)):\n item_name, cost, path, id = item\n items.append({\"item_name\": item_name, \"cost\": cost, \"path\": path, \"Id\": id})\n self.sql_lock.release()\n \n return items", "def handle_items():\n check50.exists(\"inventory.py\")\n # Take keys check\n check = check50.run(run_command)\n moves = [\"IN\", \"TAKE keys\"]\n\n for move in moves:\n check.stdout(\"> \")\n check.stdin(move, prompt=False)\n\n check.stdout(\"KEYS taken.\")\n\n # Drop keys check then look for dropped keys check\n check = check50.run(run_command)\n moves = [\"IN\", \"TAKE keys\", \"OUT\", \"DROP keys\"]\n\n for move in moves:\n check.stdout(\"> \")\n check.stdin(move, prompt=False)\n\n check.stdout(\"KEYS dropped.\")", "def remove_object(self, name):\n name = name if isinstance(name, str) else name.name\n for obj in self._objects:\n if name == obj.name:\n logger.debug('Removing object with name \"{}\"'.format(name))\n self._objects.remove(obj)", "def remove(self, name):\n raise NotImplementedError", "async def drop_cards(q: Q, card_names: list):\n\n for card_name in card_names:\n del q.page[card_name]", "def remove_item(self, product):\n if product in self.items_in_cart:\n del self.items_in_cart[product]\n print (product + \" removed.\")\n else:\n print (product + \" is not in the cart.\")", "def eat(self, command):\n \n if len(command) > 1:\n if self.inventory:\n for item in self.inventory:\n if item.name == command[1] and item.name != 'stick' and item.name != 'rock' and item.name != 'lamp' and item.name != 'stick':\n self.health += item.food\n if item.name == 'body':\n print(\"That's just gross..\")\n elif item.name == 'thing':\n print('It tasted like bacon..')\n elif item.name == 'plunger':\n print(\"+5 health, for effort..\")\n else:\n print('You consumed a ' + item.name)\n self.inventory.remove(item) \n print('Your health is now ' + str(self.health))\n else:\n print(\"You have no consumables in your inventory\")\n else:\n print('Consume what?')", "def filter_0_items(inventory):\r\n\r\n\tnew_list = [] # create an empty dictionary\r\n\tfor key in inventory: # iterate through the list\r\n\t\tif inventory[key] == 0: # check for key = 0, if it is then\r\n\t\t\tnew_list.append(key) # add it to a new list\r\n\r\n\tfor keys in new_list:#iterting through new_list\r\n\t\tdel inventory[keys]\r\n\r\n\treturn inventory", "async def unequip(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n item = ' '.join(args)\n out = ch.unequip_item(ctx.user_object, item.lower())\n await ctx.send(out)", "def find_item(name, currentRoom):\n for i in currentRoom.contents:\n if i.name == name:\n return i\n\n return None", "def remove_asset(self, name):\n if name in self.assets:\n del self.assets[name]", "def test_removeitem(run, mocker):\n mocked_remove = mocker.patch('dork.cli.remove_item')\n with open('./dork/yaml/default.yml') as file:\n # Should not call load directly\n data = yaml.safe_load(file.read())\n\n # remove_item uses game as an arg\n game = types.Game(data)\n game.player.inventory = ['key']\n run(dork.cli.remove_item, game)\n assert mocked_remove.call_count == 1", "def getItemsForArtist(self,name):\n return [i for i in self.items if i.artist == name]", "def remove_product_from_store_inventory(user_name, product_id, store_name):\n\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name, Action.REMOVE_PRODUCT_FROM_INVENTORY.value,\n store_name)\n store_handler.remove_product_from_store_inventory(user_name, product_id, store_name)", "def inventory(self) -> [str]:\r\n inventory_to_use = []\r\n items = [\"Apple\", \"Sword\", \"Shield\", \"Dagger\"]\r\n\r\n for item_in_items in range(2):\r\n if item_in_items <= 2:\r\n index = randint(0, len(items)) - 1\r\n inventory_to_use.append(items[index])\r\n del items[index]\r\n return inventory_to_use", "def filter_0_items(inventory):\n for k,v in inventory.copy().items():\n if inventory[k] == 0:\n inventory.pop(k)\n return inventory", "def exclude_other_class_items(_items, class_name):\n\n class_skills = class_skill_names(class_name)\n other_skill_names = list(set(all_class_skill_names()) - set(class_skills)) + class_attributes(Classes.DEMON_HUNTER)\n\n def match_invert_skills(item):\n \"\"\" filter items based on if they match a class skill \"\"\"\n text = item.text\n\n if any([skill in text for skill in other_skill_names]):\n if any([skill in text for skill in class_skills]): # double check\n print('found a wizard skill', [skill for skill in class_skills if skill in text])\n print(item)\n return True\n return False\n return True\n\n return list(filter(match_invert_skills, _items))\n\n # def match_invert_skills(_item):\n # \"\"\" filter items based on if they match a class skill \"\"\"\n # text = _item.text\n #\n # if any([skill in text for skill in other_skill_names]):\n #\n # if any([skill in text for skill in class_skills]): # double check\n # print('found aa wizard skill', [skill for skill in class_skills if skill in text])\n # print(_item)\n # return True\n # return False\n #\n # print('lolll')\n # return True\n #\n # print(other_skill_names)\n # to_return = []\n # for item in _items:\n # if match_invert_skills(item):\n # to_return.append(item)\n #\n #\n # return to_return", "def searchItems(name, allPages = False):\n return Gw2Spidy._paginatedRequest(allPages, 'item-search', name)", "def DelVid(self):\n delvid=input(\"Enter title to remove \")\n \n #Avoid termination on key error if value not in dictionary\n try:\n self.videos.pop(delvid)\n except KeyError:\n print(\"Item not in the inventory\")", "def remove_from_inventory(item, location, quantity, user=None):\n\n quantity = -quantity\n\n return InventoryTransaction.add_to_inventory(item=item, location=location, quantity=quantity, user=user)", "def filter_by_name(pillar_key, nodename=None):\n if nodename is None:\n nodename = __grains__['id']\n\n dictionary = __pillar__.get(pillar_key, {})\n filtered_list = []\n\n for name, items in dictionary.items():\n if name == '*' or name == nodename:\n filtered_list.extend(items)\n\n return filtered_list" ]
[ "0.80733", "0.69468737", "0.66359633", "0.64539057", "0.6433509", "0.6354961", "0.63414466", "0.6290945", "0.6256641", "0.6235117", "0.6185239", "0.616043", "0.6142811", "0.61316514", "0.6110784", "0.6085078", "0.6080392", "0.6080392", "0.6016932", "0.5994718", "0.5987538", "0.59614265", "0.59522945", "0.58589184", "0.5856384", "0.5846583", "0.5840075", "0.5834727", "0.5807419", "0.5805432", "0.5785957", "0.5703334", "0.5687712", "0.5674354", "0.56699854", "0.5657876", "0.5643842", "0.5619208", "0.5609236", "0.5606954", "0.55608517", "0.5539314", "0.5511665", "0.5498252", "0.5497599", "0.54559046", "0.54438883", "0.54325396", "0.5429076", "0.54287016", "0.54233795", "0.5410514", "0.5408009", "0.53676", "0.536282", "0.5330597", "0.5321402", "0.53151524", "0.5312533", "0.5311379", "0.5306637", "0.52880275", "0.5286293", "0.5248411", "0.52466345", "0.5244196", "0.52405536", "0.5226458", "0.52259594", "0.52188665", "0.5179695", "0.5176344", "0.5171062", "0.5168772", "0.5155537", "0.51518357", "0.5149531", "0.51396716", "0.5133044", "0.51227105", "0.5118508", "0.51180923", "0.5111424", "0.5105508", "0.5101412", "0.5099018", "0.509043", "0.50874096", "0.5078699", "0.5076739", "0.5065608", "0.5051298", "0.5045939", "0.504411", "0.5043306", "0.50380385", "0.50250375", "0.50227684", "0.5010105", "0.5007765" ]
0.622253
10
A helper function that gets and removes an item by name from the inventory.
def get_item(self, name: str) -> Optional[Item]: item = self.filter_items(name, limit=1) return item[0] if item else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_item_by_name(name):\n item_found = False\n for item in INVENTORY.copy():\n if name == item['name']:\n item_found = True\n print(f'[INFO] Removing item {item}')\n remove_item(item)\n\n if not item_found:\n print(f'Sorry, we did not find {name} in inventory.')", "async def removeitem(self, ctx, *, name: str):\n try:\n await self.bot.di.remove_item(ctx.guild, name)\n await ctx.send((await _(ctx, \"Successfully removed {}\")).format(name))\n except KeyError:\n await ctx.send(await _(ctx, \"That item doesn't exist\"))", "def remove_item(player, name):\n for index, gear in enumerate(player[\"inventory\"]):\n if gear[\"type\"] == name:\n if 'quantity' in gear.keys():\n gear['quantity'] -= 1\n if gear['quantity'] < 1:\n del player['inventory'][index]\n else:\n del player['inventory'][index]\n\n return True\n\n return False", "def drop(self, item_name):\n # Delete item from the player's inventory\n item = self.player.remove(item_name)\n\n # Add item to the current room's inventory\n if item is not None:\n self.current_room.inventory.add(item)\n print(f\"{item_name} dropped.\")\n else:\n print(\"No such item.\")", "def take(self, item_name):\n # Delete item from the current room's inventory\n item = self.current_room.inventory.remove(item_name)\n\n # Add item to player's inventory\n if item is not None:\n self.player.add(item)\n print(f\"{item_name} taken.\")\n else:\n print(\"No such item.\")", "def remove_item(self,itm):\n inv = self.get_inventory()\n s = str(itm)\n if s in inv:\n if inv[s] > 1:\n inv[s] -= 1\n else:\n del inv[s]\n self.put_inventory(inv)", "def __delitem__(self, name):\n name = name.lower()\n del self._items[name]\n self._names.remove(name)", "def remove_item(self, item_name):\n if item_name in self.item_list:\n self.item_list.remove(item_name)", "def drop(self, item):\n for obj in self.handler.player.inv:\n if obj.name == item.name:\n dropped = self.remove_from_inv(obj)\n\n # If we popped the item from player's inventory, we \n # can drop it directly. Else we need to copy it and\n # drop it\n if dropped:\n self.handler.world.add_item_tile(self.x, self.y, obj)\n else:\n self.handler.world.add_item_tile(self.x, self.y, copy.copy(obj))\n \n return obj.name\n\n return False", "def inventory_remove(self, item):\n if (item in self.ItemList):\n self.ItemList.remove(item)\n return 0\n # Item not found.\n return 1", "def removeItem(self, item):\n if item.type not in self.__inventory__:\n return\n for i in range(0, len(self.__inventory__[item.type])):\n if self.__inventory__[item.type][i].id == item.id:\n self.__inventory__[item.type].pop(i)\n return", "def remove_from_inventory(self, item):\n\t\tif item in self.inventory:\n\t\t\tself.inventory[item] -= 1\n\t\t\tif self.inventory[item] == 0:\n\t\t\t\tdel self.inventory[item]", "def drop(self, command):\n \n for item in self.inventory:\n if item.name == command[1]:\n self.location.inventory.append(item)\n self.inventory.remove(item)\n print(\"You dropped a\", item.name)\n return \n print(command[1] + \" is not here!\")", "def delete(self, name):\n global items\n items = _Helper.all_item_except_searching_for(name)\n return {\"message\": f\"Item {name} deleted successfully\"}, 204", "def removeItem(*args):", "def removeItem(*args):", "def destroy_item(game, *args):\n (item, action_description, already_done_description) = args[0]\n if game.is_in_inventory(item):\n game.inventory.pop(item.name)\n print_bold(action_description)\n elif item.name in game.curr_location.items:\n game.curr_location.remove_item(item)\n print_bold(action_description)\n else:\n print_bold(already_done_description)\n return False", "def get_item_from_inventory(allitems, imgname):\n\tfor item in allitems:\n\t\tif 'image_inventory' in allitems[item]:\n\t\t\tif os.path.split(allitems[item]['image_inventory'])[1] == imgname:\n\t\t\t\treturn allitems[item]\n\treturn None", "def remove_from_inventory(self, item_name: str, quantity: Optional[int] = None) -> None:\n raise_if_false(quantity >= 0, f\"Quantity [{quantity}] can't be negative\")\n\n # Item was never in inventory, return true since the end goal is satisfied\n if item_name not in self._player_data.inventory:\n return\n\n # If quantity is None, del the item\n if quantity is None:\n del self._player_data.inventory[item_name]\n return\n\n # Update the quantity. if its negative after update, del the item\n self._player_data.inventory[item_name] = max(0, self._player_data.inventory[item_name] - quantity)\n if self._player_data.inventory[item_name] <= 0:\n del self._player_data.inventory[item_name]", "def __delitem__(self, name):\n tag = self._find(name)\n if tag is not None:\n self.meta.remove(tag)\n else:\n raise KeyError(name)", "async def unload(self, ctx, name: str):\n if name == \"dnd\":\n items = self.bot.dnditems\n elif name == \"dndmagic\":\n items = self.bot.dndmagic\n elif name == \"pokemon\":\n items = self.bot.pokemonitems\n else:\n await ctx.send(await _(ctx, \"That is not a valid input, look at `rp!help unload`\"))\n return\n\n await self.bot.di.remove_items(ctx.guild, *items)\n await ctx.send((await _(ctx, \"Successfully removed all {} items!\")).format(name))", "def get(self, command):\n\n for item in self.location.inventory:\n if item.name == command[1]:\n self.inventory.append(item)\n self.location.inventory.remove(item)\n print(\"You picked up a\", item.name)\n return\n print(command[1] + \" is not here!\")", "def drop_inventory(self):\n header = \"Choose item to drop:\\n\"\n def drop(get_gameworld_cell, x, y, item):\n item_entity = ItemPickup([item], x, y, get_gameworld_cell)\n events.trigger_event(\"world_add_entity\", item_entity)\n self.inventory.remove(item)\n action_list = [(item, functools.partial(drop, get_gameworld_cell=self.get_gameworld_cell, x=self.x, y=self.y, item=item)) for item in self.inventory]\n if len(action_list) == 0:\n header += \"You hold nothing!\"\n events.trigger_event(\"print_list\", action_list, header=header)", "def unequip(self, command):\n\n if len(command) > 1:\n for item in self.weapon:\n if command[1] == item.name:\n if command[1] == 'knife' or command[1] == 'stick' or command[1] == 'rock':\n self.weapon.remove(item)\n self.inventory.append(item)\n print(\"You unequipped a \" + item.name)\n return\n else:\n print(\"You don't have anything equipped\")\n else:\n print(\"Unequip what?\")", "def remove(name):", "def unequip(self, item_name: str, quantity: Optional[int] = None) -> None:\n raise_if_false((quantity is not None) and (quantity <= 0), f\"Quantity [{quantity}] can't be negative\")\n raise_if_false(item_name in self._player_data.equipped_items, f\"Can't unequip [{item_name}], not equipped\")\n\n if quantity is None:\n quantity_to_unequip = self._player_data.equipped_items[item_name]\n del self._player_data.equipped_items[item_name]\n\n else:\n quantity_to_unequip = min(quantity, self._player_data.equipped_items[item_name])\n self._player_data.equipped_items[item_name] -= quantity_to_unequip\n\n self.add_to_inventory(item_name, quantity_to_unequip)\n\n if self._player_data.equipped_items[item_name] == 0:\n del self._player_data.equipped_items[item_name]", "def item_remove(self, item):\n\t\treturn self._modify_object(item=item, new_item=\"\")", "def remove_item(self, item):\n node = self.find(item)\n if node:\n self.delete(node)\n return node.item\n else:\n return None", "def remove_item_from_inventory(life, item_id):\n\titem = get_inventory_item(life, item_id)\n\t\n\t_holding = is_holding(life, item_id)\n\tif _holding:\n\t\t_holding['holding'].remove(item_id)\n\t\t#logging.debug('%s stops holding a %s' % (life['name'][0],item['name']))\n\t\t\n\telif item_is_equipped(life, item_id):\n\t\tlogging.debug('%s takes off a %s' % (life['name'][0],item['name']))\n\t\n\t\tif 'attaches_to' in item:\n\t\t\tfor limb in item['attaches_to']:\n\t\t\t\tremove_item_from_limb(life,item['uid'],limb)\n\t\t\n\t\titem['pos'] = life['pos'][:]\n\t\n\telif item_is_stored(life, item_id):\n\t\titem['pos'] = life['pos'][:]\n\t\tremove_item_in_storage(life, item_id)\n\telif not item_is_stored(life, item_id):\n\t\tprint 'item is NOT stored'\n\t\n\tif 'max_capacity' in item:\n\t\tlogging.debug('Dropping container storing:')\n\t\t\n\t\tfor _item in item['storing']:\n\t\t\tlogging.debug('\\tdropping %s' % _item)\n\t\t\t\n\t\t\t#item['storing'].remove(_item)\n\t\t\t#item['storing'].append(get_inventory_item(life,_item)['uid'])\n\t\t\tif ITEMS[_item]['owner']:\n\t\t\t\tITEMS[_item]['owner'] = None\n\t\t\t\n\t\t\tITEMS[_item]['pos'] = life['pos'][:]\n\t\t\t\n\t\t\titems.add_to_chunk(ITEMS[_item])\n\t\t\t\n\t\t\tlife['inventory'].remove(_item)\n\t\n\tlife['speed_max'] = get_max_speed(life)\n\t\n\tif 'player' in life:\n\t\tmenus.remove_item_from_menus({'id': item['uid']})\n\t\n\t#logging.debug('%s removed item from inventory: %s (#%s)' % (' '.join(life['name']), item['name'], item['uid']))\n\t\n\tlife['inventory'].remove(item['uid'])\n\tdel item['parent_id']\n\titem['owner'] = None\n\t\n\tcreate_and_update_self_snapshot(life)\n\t\n\treturn item_id", "def delete_item(self, list_name: str, item_name: str) -> None:\n todo_list = self.get_list(list_name)\n for index, item in enumerate(todo_list.items):\n if item.name == item_name:\n todo_list.delete_item(index)", "def poplar(self, item_to_be_popped):\n if self.check_inventory(item_to_be_popped): # Basic check to see if it's in the list\n als_lament = item_to_be_popped# ;P\n for an_item in self.bag_of_holding: # here we are extracting an the index of the object in the list\n if an_item.name == item_to_be_popped:\n index = self.bag_of_holding.index(an_item)\n to_be_returned = self.bag_of_holding[index]\n # and here is where the majic happens and the item is removed from the list.\n self.bag_of_holding.remove(self.bag_of_holding[index])\n else:\n # for testing porpoises if the item is not in dah bag, remove later.\n print(\" {} was not found in bag of holding.\".format(item_to_be_popped))\n return None\n return to_be_returned", "def __delitem__(self, skillName):\r\n self.removeSkill(skillName)", "def delete_item(category, name):\r\n item_key = course_key.make_usage_key(category, name)\r\n resp = self.client.delete(get_url('xblock_handler', item_key))\r\n self.assertEqual(resp.status_code, 204)\r\n _test_no_locations(self, resp, status_code=204, html=False)", "def remove_item(self, product):\n if product in self.items_in_cart:\n del self.items_in_cart[product]\n print (product + \" removed.\")\n else:\n print (product + \" is not in the cart.\")", "def __delitem__(name):", "def drop_item(life, item_id):\n\titem = items.get_item_from_uid(remove_item_from_inventory(life, item_id))\n\titem['pos'] = life['pos'][:]\n\titems.add_to_chunk(item)\n\t\n\t#TODO: Don't do this here/should probably be a function anyway.\n\tfor hand in life['hands']:\n\t\t_hand = get_limb(life, hand)\n\t\t\n\t\tif item_id in _hand['holding']:\n\t\t\t_hand['holding'].remove(item_id)\n\t\n\treturn item['uid']", "def remove_from_bag(request, item_id):\n\n resort = get_object_or_404(Resort, pk=item_id)\n\n try:\n bag = request.session.get('bag', {})\n ticket_type = request.POST['type']\n friendly_type = ticket_type.replace('_quantity', '')\n\n del bag[item_id][f'{ticket_type}']\n if not bag[item_id]:\n bag.pop(item_id)\n messages.success(\n request, f'Removed {resort}, {friendly_type}\\\n passes from your bag')\n\n request.session['bag'] = bag\n return HttpResponse(status=200)\n except Exception as e:\n messages.error(request, f'Error removing item: {e}')\n return HttpResponse(status=500)", "def test_removeitem(run, mocker):\n mocked_remove = mocker.patch('dork.cli.remove_item')\n with open('./dork/yaml/default.yml') as file:\n # Should not call load directly\n data = yaml.safe_load(file.read())\n\n # remove_item uses game as an arg\n game = types.Game(data)\n game.player.inventory = ['key']\n run(dork.cli.remove_item, game)\n assert mocked_remove.call_count == 1", "def remove(self, item):\n del self._dict[item]", "def do_drop(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToDrop = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n\r\n # find out if the player doesn't have that item\r\n if itemToDrop not in invDescWords:\r\n print('You do not have \"%s\" in your inventory.' % (itemToDrop))\r\n return\r\n\r\n # get the item name that the player's command describes\r\n item = getFirstItemMatchingDesc(itemToDrop, inventory)\r\n if item != None:\r\n print('You drop %s.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item) # remove from inventory\r\n worldRooms[location][GROUND].append(item) # add to the ground\r", "def drop(self, pitem):\n\n #if the item is not inside the item list, can't drop it \n if pitem not in self.items:\n print('The player does not carry the item')\n\n #if not, remove the item \n else:\n self.items.remove(pitem)", "def use_some(self, item_name, amount_used):\n for item in self.foods:\n if item.name == item_name:\n item.amount -= amount_used", "def deleteItem(list,item):\n print \"I deleted this item:\", item\n list.remove(item)", "def remove_from_inventory(item, location, quantity, user=None):\n\n quantity = -quantity\n\n return InventoryTransaction.add_to_inventory(item=item, location=location, quantity=quantity, user=user)", "def find_item(name, currentRoom):\n for i in currentRoom.contents:\n if i.name == name:\n return i\n\n return None", "def remove(self, item): \n item_id = str(item.id)\n if item_id in self.cart:\n # removal of item from cart\n del self.cart[item_id]\n # save method to update the cart in session\n self.save()", "def equip(self, item_name: str, quantity: int) -> None:\n raise_if_false(quantity >= 0, f\"Quantity [{quantity}] can't be negative\")\n raise_if_false(item_name in self._player_data.inventory, f\"Can't equip [{item_name}], not in inventory\")\n\n quantity_to_equip = min(quantity, self._player_data.inventory[item_name])\n if item_name in self._player_data.equipped_items:\n self._player_data.equipped_items[item_name] += quantity_to_equip\n else:\n self._player_data.equipped_items[item_name] = quantity_to_equip\n\n self.remove_from_inventory(item_name, quantity_to_equip)", "def remove_item(self, product):\r\n if product in self.items_in_cart:\r\n del self.items_in_cart[product]\r\n print(product + \" removed.\")\r\n else:\r\n print(product + \" is not in the cart.\")", "def take(self, item): \n self.contents.remove(item)", "def execute_drop(item_id):\r\n if (item_id in inventory):\r\n current_room[\"items\"][item_id] = inventory[item_id]\r\n del inventory[item_id]\r\n wrap_print(\"You dropped \" + items[item_id][\"name\"] + \".\")\r\n global valid_move\r\n valid_move = True\r\n else:\r\n wrap_print(\"You cannot drop that.\")", "def test_remove_item_test_remove_single_item():\n sc.menu = sc.default_menu\n # sc.current.add_item('Coffee', 1)\n sc.current.remove_item('Coffee', 1)\n assert sc.current.receipt == {'Coffee': 4, 'Tea': 1, 'subtotal': 6.36}", "def remove (self, item):\n pass", "def drop(self, item: Item):\n self.items.remove(item)\n item.place(self.parent.x, self.parent.y, self.gamemap)\n\n self.engine.message_log.add_message(f'You yeeted the {item.name}.')", "def get(self, item_name):\n for item in self._inventory:\n if item.name == item_name or item_name in item.synonyms:\n return item\n\n return None", "def take(self, item):\n if not item.inventory:\n self.echo(self.text(\"TAKE_NOT_INVENTORY\", item=item.name))\n self.echo(self.text(\"TAKE_NOT_INVENTORY\", item=item.name))\n return\n\n if item.owner is not None and item.owner.locked:\n self.echo(self.text(\"TAKE_IN_LOCKED_CONTAINER\", item=item.name,\n container=item.owner.name))\n return\n\n if item in self._inventory:\n self.echo(self.text(\"ALREADY_TAKEN\", item=item.name))\n return\n\n # if the item is in the container, open the container and\n # take the item out of it\n if item.owner is not None:\n if not item.owner.opened:\n item.owner.open()\n\n item.owner.remove(item)\n\n # the item is \"roomless\" when it is in the inventory\n self._insert(item)\n self.echo(self.text(\"TAKE\", item=item.name))\n self.on_take_item.trigger()", "def remove_from_inv(self, item):\n assert item in self.inv\n\n if self.inv[item] > 1:\n self.inv[item] -= 1\n else:\n return self.inv.pop(item)", "def remove_item_in_storage(life, item_uid):\n\tif 'stored_in' in items.get_item_from_uid(item_uid):\n\t\titems.remove_item_from_any_storage(item_uid)\n\telse:\n\t\tprint 'incorrect: item not stored'\n\t\n\t#for _container in [items.get_item_from_uid(_container) for _container in life['inventory']]:\n\t#\tif not 'max_capacity' in _container:\n\t#\t\tcontinue\n\t#\n\t#\tif id in _container['storing']:\n\t#\t\t_container['storing'].remove(id)\n\t#\t\t_container['capacity'] -= get_inventory_item(life,id)['size']\n\t#\t\tlogging.debug('Removed item #%s from %s' % (id,_container['name']))\n\t#\t\t\n\t#\t\tupdate_container_capacity(_container['uid'])\n\t#\t\treturn _container\n\t\n\treturn False", "async def remove(self, ctx, name: str, items: str):\n server = ctx.message.server\n items = items.split(\", \")\n itemis = dict()\n for item in items:\n item, value = item.split(\" \")\n item = item.replace(\"_\", \" \").lower()\n itemis[item] = value\n if server.id not in self.db:\n self.db[server.id] = {}\n if name not in self.db[server.id]:\n await self.bot.say(\"Box doesn't exist, please make sure the spelling is correct and\"\n \" that it's found in [p]box list\")\n return\n for item in itemis:\n value = itemis[item]\n print(item)\n if item in self.db[server.id][name][\"content\"]:\n del itemis[item]\n continue\n else:\n self.db[server.id][name][\"content\"][item] = value\n dataIO.save_json(\"data/lootbox/servers.json\", self.db)\n await self.bot.say(\"{} box's has added the following items:\\n{}\".format(name, \"\\n\".join(list(itemis))))", "def discard(self, item):\n if item in self._inventory:\n self._erase(item)\n self.on_discard_item.trigger()\n self.echo(self.text(\"DISCARD\", item=item.name))\n else:\n self.echo(self.text(\"DISCARD\", item=item.name))", "def remove_item(self, item_id):\n self.items.pop(item_id)", "def itemById(self, itemId):\n itemType = \"\".join([i for i in itemId if not i.isdigit()])\n if itemType not in self.__inventory__:\n return None\n for item in self.__inventory__[itemType]:\n if item.id == itemId:\n return item\n return None", "def hfp_firmware_pack_item_remove(handle, org_dn, hfp_name, hw_vendor,\r\n hw_model, type):\r\n\r\n hfp_dn = org_dn + \"/fw-host-pack-\" + hfp_name\r\n dn = hfp_dn + \"/pack-image-\" + hw_vendor + \"|\" + hw_model + \"|\" + type\r\n mo = handle.query_dn(dn)\r\n if mo is None:\r\n raise ValueError(\"FirmwarePackItem '%s' does not exist\" % dn)\r\n\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n return mo", "def getStockByName(self, item : str) -> bbInventory.bbInventory:\n if item == \"all\" or item not in bbConfig.validItemNames:\n raise ValueError(\"Invalid item type: \" + item)\n if item == \"ship\":\n return self.shipsStock\n if item == \"weapon\":\n return self.weaponsStock\n if item == \"module\":\n return self.modulesStock\n if item == \"turret\":\n return self.turretsStock\n else:\n raise NotImplementedError(\"Valid, but unrecognised item type: \" + item)", "def get(self, item_name):\n if isinstance(item_name, BaseItem):\n return item_name\n return self.all_items.get(item_name)", "def delete(self, item_name, item_id, flag_purge=False):\n item = {}\n try:\n item = self.glpi.delete(item_name, item_id, force_purge=flag_purge)\n except Exception as e:\n item = \"{ \\\"error_message\\\": \\\"%s\\\" }\" % e\n\n return item", "def delete_item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to delete the item because\n # the item was created by a different user, send a 403\n elif current_user != item.user:\n abort(403)\n\n form = DeleteItemForm()\n\n # If the form is submitted, delete the item from the database,\n # send a flash message, and redirect home\n if form.validate_on_submit():\n db.session.delete(item)\n db.session.commit()\n flash(f'\"{item.name}\" has been deleted.', 'good')\n return redirect(url_for('main.home'))\n\n return render_template('delete_item.html', item=item, form=form)", "def __delitem__(self, userid):\r\n self.removePlayer(userid)", "def DelVid(self):\n delvid=input(\"Enter title to remove \")\n \n #Avoid termination on key error if value not in dictionary\n try:\n self.videos.pop(delvid)\n except KeyError:\n print(\"Item not in the inventory\")", "def do_del_item(self, arg):\n try:\n del_item = arg[\"<list_name>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n del_item_str = \" \".join(del_item)\n print(del_item_str)\n elif choice == \"id\":\n del_item_str = int(\" \".join(del_item))\n print (del_item_str)\n app.ToDoApp.to_delete_item(del_item_str)\n print (\"Item deleted\")\n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def delete_item(item_name):\n\n item = Item.query.filter_by(name=item_name).first_or_404()\n if item.owner != current_user:\n flash(\"Failed to delete item %s since you are not the owner.\" %\n item.name)\n return redirect(url_for('.index'))\n\n form = DeleteForm()\n if form.validate_on_submit():\n try:\n db.session.delete(item)\n db.session.commit()\n except:\n flash((\"Failed to delete item \\\"%s\\\".\") % item.name)\n else:\n flash(\"Item \\\"%s\\\" has been deleted.\" % item.name)\n finally:\n return redirect(url_for('.index'))\n return render_template('delete.html', form=form, name=item_name)", "def get_menu_item(menu_item_name):\n\n pass", "def remove_from_bag(request, item_id):\n\n try:\n product = get_object_or_404(Product, pk=item_id)\n bag = request.session.get('bag', {})\n bag.pop(item_id)\n messages.success(request, f'Removed {product.name} from your bag!')\n\n request.session['bag'] = bag\n return HttpResponse(status=200)\n\n except Exception as e:\n messages.error(request, f'Error removing item: {e}.')\n return HttpResponse(status=500)", "def remove(self, name):\n raise NotImplementedError", "def remove_menu_item(menu_item_name, parent_menu):\n\n pass", "def remove_item(self, product):\n if product in self.items_in_cart:\n del self.items_in_cart[product]\n print product + \" removed.\"\n else:\n print product + \" is not in the cart.\"", "def remove_item(self, uuid):\n super(ListTile, self).remove_item(uuid) # check permission\n data_mgr = ITileDataManager(self)\n old_data = data_mgr.get()\n uuids = data_mgr.get()[\"uuids\"]\n if uuid in uuids.keys():\n del uuids[uuid]\n old_data[\"uuids\"] = uuids\n data_mgr.set(old_data)", "def remove_item(self, uuid):\n super(AssignMultipleItemsMixin, self).remove_item(uuid)\n data_mgr = ITileDataManager(self)\n old_data = data_mgr.get()\n uuids = data_mgr.get()['uuids']\n if uuid in uuids.keys():\n del uuids[uuid]\n old_data['uuids'] = uuids\n data_mgr.set(old_data)", "def test_remove_item_test_proper_input():\n sc.menu = sc.default_menu\n sc.current.add_item('Coffee', 1)\n assert sc.current.receipt == {'Coffee': 5, 'Tea': 1, 'subtotal': 7.95}", "def remove(self, ref: str) -> None:\n try:\n self.items.get(ref=ref).delete()\n self._cached_items = None\n except BasketItem.DoesNotExist:\n pass", "def remove_mix(self, name: str) -> None:\n self.remove(name)", "def get_item_inventory(self, item):\n return [item_data for item_data in self.inventory if item_data['item_name'] == item]", "def get_el_by_name(items: List[Dict[str, Any]], name: str) -> Dict[str, Any]:\n for item in items:\n if item[\"name\"] == name:\n return item\n print(\"error, key name not found by value\", name, \"in list: \", items)\n sys.exit(1)", "def items_contains_name(items, name):\n ret = 0\n # Loops all items and saves the searched one\n for x in range(len(items)):\n if items[x]['name'] == name:\n ret = x\n return ret", "def remove_from_bag(request, item_id):\n\n try:\n product = get_object_or_404(Product, pk=item_id)\n\n # get products\n select = None\n if 'product_select' in request.POST:\n select = request.POST['product_select']\n\n current_bag = request.session.get('current_bag', {})\n\n del current_bag[item_id]['items_by_select'][select]\n if not current_bag[item_id]['items_by_select']:\n current_bag.pop(item_id)\n messages.success(request, f'{product.name} removed from bag')\n\n request.session['current_bag'] = current_bag\n return HttpResponse(status=200)\n\n except Exception as e:\n messages.error(request, f'Error removing item: {e}')\n return HttpResponse(status=500)", "def remove(self, item):\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")", "async def _e_remove(self, ctx, name):\n if self.database.get_guild_event(ctx.guild.id, name) is None:\n await ctx.send(\"That event doesn't exist, sorry.\")\n return\n event = sql.GuildEvent((ctx.guild.id, name, None, None, None, None))\n self.database.remove_item(event, True)\n await ctx.send(f\"Event {name} successfully removed\")", "def remove_quantity(shared):\n\n not_done = True\n while not_done:\n input_string = input('Enter name of quantity to remove '\n '[leave blank to cancel]: ').strip()\n if not input_string:\n return\n\n for i, fm in enumerate(shared.field_mappings):\n if fm.title == input_string:\n if fm.extra is not None:\n not_done = False\n slot = i\n break\n else:\n print (' >> Cannot remove datafile quantity!')\n return\n else:\n print(' >> Unknown quantity!')\n return\n\n shared.config.remove_option('extra', input_string)\n del shared.field_mappings[slot]", "def delete_item(item_name, catagory_name):\n try:\n item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name)\n except NoResultFound:\n abort(404)\n item.delete()\n return redirect(url_for('home'))", "def __delitem__(self, key: Union[Any, int]) -> None:\n if isinstance(key, int):\n del self.contents[key]\n else:\n self.contents = [c for c in self.contents \n if denovo.unit.get_name(c) != key]\n return", "def dequeueItem(itemTypeStr):\n if itemTypeStr not in AgentInventory.__idQueue__:\n return\n if len(AgentInventory.__idQueue__[itemTypeStr]) <= 0:\n return\n AgentInventory.__idQueue__[itemTypeStr].pop(0)", "async def remove(self, ctx, cat: str, *, item: str):\n c = check_category(cat)\n if not c[0]:\n await ctx.send(\"`{}` isn't a category you can add things to.\".format(cat))\n return\n else:\n data = c[1]\n datafile = c[2]\n cat = c[3]\n \n user = ctx.author.name + \"#\" + ctx.author.discriminator\n if user not in data:\n await ctx.send(\"You haven't added anything to your {} list.\".format(cat))\n else:\n things = [x.strip() for x in item.split(',')]\n success = \"\"\n failure = \"\"\n for thing in things:\n try:\n data[user].remove(thing.title())\n success += \"`{}` was removed from your {} list.\\n\".format(thing.title(), cat)\n except:\n failure += \"You don't have a `{}` in your {} list.\\n\".format(thing.title(), cat)\n try:\n await ctx.send(success)\n except:\n pass\n try:\n await ctx.send(failure)\n except:\n pass\n pickle.dump(data, open(datafile, \"wb\"))", "def test_remove_item_test_remove_multiple_item():\n sc.menu = sc.default_menu\n sc.current.add_item('Fries', 3)\n sc.current.add_item('Steak', 1)\n sc.current.remove_item('Fries', 2)\n sc.current.receipt == {'subtotal': 3.28, 'Fries': 1, 'Steak': 1}", "def equip(self, command):\n\n if len(command) > 1:\n if not self.weapon:\n for item in self.inventory:\n if item.name == command[1]:\n if command[1] == 'knife' or command[1] == 'rock' or command[1] == 'stick' or command[1] == 'lamp':\n self.inventory.remove(item)\n self.weapon.append(item)\n print(\"You equipped a \" + item.name)\n return\n else:\n print(\"You can't equip that\")\n else:\n print('You cannot equip two items \\nYou must unequip the ' + self.weapon[0].name + ' first.')\n else:\n print(\"Equip what?\")", "def remove(self, item: Item) -> None:\n raise NotImplementedError(\"remove\")", "def add_to_inventory(self, newItem):\n\n if len(self.player_inventory) >= 8:\n print(\"\"\"You already have the maximum of 7 items in your inventory,\n looks like you will need to get rid of an item to get {}\"\"\".format(newItem.name))\n\n print(\"Would you like to get rid of an item to add the {} to your inventory?\".format(newItem.name))\n\n if 'yes' in choice:\n dropping = player_inventory.drop()\n print(dedent('Okay, {} was removed from your inventory.'.format(item_name)))\n\n elif 'no' in choice:\n print(dedent('Okay redirecting you back to shop.'))\n return False\n\n else:\n print(dedent('Seems like you did not make a valid choice, aborting ...'))\n return False\n\n else:\n\n if newItem.type == \"food\":\n self.player_inventory[newItem.name] = newItem.health_addition\n elif newItem.type == \"weapon\":\n self.player_inventory[newItem.name] = newItem.quality\n\n print(dedent(\"\"\"\n ##############################################\n Nice, the {} has been added to your inventory!\n \"\"\".format(newItem.name)))", "def remove_item(self,itmkey):\n itms = self.get_items_list()\n if itmkey in itms:\n itm = itms[itmkey]\n z = itm['z']\n del itms[itmkey]\n for k,t in itms.items():\n if t['z'] > z:\n t['z'] -= 1\n self.put_items_list(itms)\n self.items -= 1\n return itm['id']\n return None", "def remove_item_page(request):\n validate(instance=request.body, schema=item_schema_remove)\n body = json.loads(request.body)\n Item.remove_item(body['item_id'])\n return HttpResponse('success')", "async def unequip(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n item = ' '.join(args)\n out = ch.unequip_item(ctx.user_object, item.lower())\n await ctx.send(out)", "def remove_item(self, idx_of_item):\n del self.items[idx_of_item]", "def item(self, item_name):\n\tself.log.info('Not implemented yet... Sorry!')\n\tpass", "def del_item(self, item):\n index = self.board[item.pos[0]][item.pos[1]].index(item)\n del self.board[item.pos[0]][item.pos[1]][index]" ]
[ "0.8047145", "0.76232535", "0.7482074", "0.7447906", "0.7443081", "0.7161624", "0.70240676", "0.70180637", "0.6955937", "0.6908558", "0.67728066", "0.6741749", "0.6689494", "0.6678281", "0.6673457", "0.6673457", "0.6643998", "0.6608023", "0.65964246", "0.64536357", "0.6442856", "0.6398812", "0.62600225", "0.62509954", "0.624436", "0.6225526", "0.6224961", "0.62208945", "0.6180342", "0.612896", "0.6074304", "0.6062379", "0.6061759", "0.6020282", "0.60161835", "0.59872115", "0.59820884", "0.5976714", "0.5956503", "0.5953079", "0.591781", "0.5915101", "0.5895824", "0.5883446", "0.58714235", "0.5863716", "0.58549213", "0.58463866", "0.58356184", "0.5824421", "0.5797581", "0.57939744", "0.57925516", "0.57920545", "0.5773104", "0.57721084", "0.57699203", "0.576631", "0.57545733", "0.57456636", "0.57431364", "0.57228744", "0.57224154", "0.57198036", "0.5702314", "0.5696013", "0.5686379", "0.56827223", "0.5678508", "0.5669162", "0.5662163", "0.5656985", "0.56550527", "0.56472343", "0.563876", "0.56156576", "0.56114507", "0.5607999", "0.5602402", "0.55952936", "0.5591427", "0.5588886", "0.5580501", "0.5576453", "0.5570877", "0.5568412", "0.5559845", "0.5558185", "0.55557454", "0.5554844", "0.5553536", "0.55530214", "0.5551343", "0.5537961", "0.55369264", "0.5534458", "0.55343276", "0.5534192", "0.5530095", "0.5527217", "0.55253536" ]
0.0
-1
Retrieve one or more instance of OSLicence with the given key or criteria
def find(key): return ItopapiPrototype.find(ItopapiOSLicence, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find(self, key, condition) -> list:\n pass", "def get_objs_with_key_and_typeclass(self, oname, otypeclass_path):\n return self.filter(db_key__iexact=oname).filter(db_typeclass_path__exact=otypeclass_path)", "def getSpecific(self, keyword, key):", "def _get_criteria(self):\n for molecule in self.values():\n molecule.get_criterion()", "def find(cls, key):\r\n return cls.query().get(key)", "def get_assocs(**kwargs):\n if kwargs[\"type\"] == \"first\":\n assoc = Association.query.filter(Association.level >= kwargs[\"level\"],\n Association.users_id == kwargs[\"users_id\"],\n Association.skill_id == kwargs[\"skill_id\"]).first()\n else:\n assoc = Association.query.filter_by(users_id=kwargs[\"users_id\"]).all(\n )\n\n return assoc", "def __getitem__(self, key):\n return self.query(key)", "def __getitem__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n return q.filter(PAW2_DBObject.key == key).one()", "def __getitem__(self, (essid, key)):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PYR2_DBObject).join(ESSID_DBObject)\n result = q.filter(sql.and_(ESSID_DBObject.essid == essid, \\\n PYR2_DBObject.key == key)).first()\n if result is None:\n raise KeyError(\"No result for ESSID:Key-combination \" \\\n \"(%s:%s).\" % (essid, key))\n else:\n return result", "def find(key):\n return ItopapiPrototype.find(ItopapiIncident, key)", "def __getitem__(self, key):\n for sen in self.__s:\n if sen.name == key or sen.key == key:\n return sen\n raise KeyError(key)", "def lookup(self, key):", "def _extract_lookup(self, key):\n parts = key.rsplit(\"__\", 1)\n\n if len(parts) > 1 and parts[1] in operators:\n op = parts[1]\n attribute = parts[0]\n else:\n # 'exact' is the default lookup if there was no explicit comparison op in `key`\n op = \"exact\"\n attribute = key\n\n # Construct and assign the lookup class as a filter criteria\n return attribute, self.get_lookup(op)", "def search(cls, **kwargs):\n key = [key for key in kwargs][0]\n objects = cls.get_all()\n if isinstance(objects, dict):\n return objects\n results = []\n for i in objects:\n if is_substring(kwargs[key], getattr(i, key)):\n results.append(i)\n if not results:\n return {\n \"message\": \"No objects match the searched value.\",\n \"help\": \"Ensure arguments are of existent objects.\"\n }\n return results", "def get_criteria(self):\n\n\t\treturn self.__criteria", "def get_criterions(self, **kwargs):\n return self.get('criterions.json', **kwargs)", "def find_all_advanced(cls, keydict):\n return cls.dbm().modelclass_find_all_advanced(cls, keydict)", "def fetch(cls, key):\n return cls(_key=key, **(cls._dbag[key]))", "def search(self, key, headers=Headers()):", "def test_search_attrsonly(self):\n obj = self.conn.search(self.basedn, 2, \"(objectclass=person)\",\n ['cn'], attrsonly=True)[0]\n self.assertIsNotNone(obj)\n self.assertListEqual(obj['cn'], [])", "def get_spices(self, key):\n spices = []\n if key == 'disease':\n spices.append('(disease|symptom|sign)')\n elif key == 'symptom':\n spices.append('(signs|symptoms)')\n elif key == 'treatment':\n spices.append('(treatment|medicine|operation)')\n return spices", "def __getitem__(self, key):\n responses, resolution_map = self._data_dict.__getitem__(key)\n return (self.FilteredResponses(responses, self._path),\n self.FilteredResolution(resolution_map, self._path))", "def find_all_bykey(cls, keydict):\n return cls.dbm().modelclass_find_all_bykey(cls, keydict)", "def get(self, **args ):\n # Make sure its a valid argument\n for key in args.keys():\n if not key in self.schema:\n raise BadArgument(\"Key %s not a valid argument\" % key )\n\n query = STD.select('*')\n query = query.where( args )\n item = query.list()\n\n # If a list return make sure there is only one item\n if isinstance(item, collections.Iterable):\n if len(item) > 1:\n raise NotUnique(\"More than one items found\")\n if len(item) == 0:\n print \"No items found\"\n return None\n else:\n item = item[0]\n return item", "def criteria(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Criterion]:", "def find_one(self, criteria):\n return self.connection.find_one(criteria)", "def _do_get_persons(args):\n u_context = UserContext(user_session, current_user, request)\n if args.get(\"pg\") == \"search\":\n # No scope\n u_context.set_scope_from_request()\n if args.get(\"rule\", \"start\") == \"start\" or args.get(\"key\", \"\") == \"\":\n return {\"rule\": \"start\", \"status\": Status.NOT_STARTED}, u_context\n else: # pg:'all'\n u_context.set_scope_from_request(request, \"person_scope\")\n args[\"rule\"] = \"all\"\n u_context.count = request.args.get(\"c\", 100, type=int)\n\n with PersonReaderTx(\"read_tx\", u_context) as service:\n res = service.get_person_search(args)\n\n return res, u_context", "def get_instance(self, data):\n if self.transient:\n return None\n props = get_primary_keys(self.opts.model)\n filters = {prop.key: data.get(prop.key) for prop in props}\n if None not in filters.values():\n return self.session.query(self.opts.model).filter_by(**filters).first()\n return None", "def __getitem__(self,key):\n # Using [key] syntax on an equipment allows to retrieve a tag directly\n # or a point referred to this particular equipment\n for each in self.tags:\n if key == each:\n return self.tags[key]\n # if key not found in tags... we probably are searching a point\n # self will call __iter__ which will look for points in equipment\n for point in self:\n #partial_results = []\n # Given an ID.... should return the point with this ID\n if key.replace('@','') == str(point.id).replace('@',''):\n return point\n # Given a dis or navName... should return equip\n if 'dis' in each.tags:\n if key == each.tags['dis']:\n return each\n if 'navName' in each.tags:\n if key == each.tags['navName']:\n return each\n if 'navNameFormat' in each.tags:\n if key == each.tags['navNameFormat']:\n return each\n else: \n try:\n # Maybe key is a filter_expr\n request = self.find_entity(key)\n return request.result\n except HaystackError as e:\n self._session._log.warning('{} not found'.format(key))", "def get_by(cls, name, value, keys_only=None):\n return cls.query(getattr(cls, name) == value).get(keys_only=keys_only)", "def _fetch_journal_and_issue_data(self, **criteria):\n found_journal_issues = self._scieloapi.issues.filter(\n limit=1, **criteria)\n return self._scieloapi.fetch_relations(self._sapi_tools.get_one(found_journal_issues))", "def _lookup(self, key):\n\n if key in self.position and key in self.info:\n # If the key exists in both position and info, treat it as a list to intersect.\n return self._skill_list(key)\n if key in self.position:\n return self.position[key]\n if key in self.info:\n return self.info[key]\n\n raise KeyError(f\"Invalid Key: {key}\")", "def get(cls, args=None, user=None, session=None):\n if session is None:\n session = db.session\n try:\n instance = session.query(cls).filter_by(key=args, user=user['id']).one()\n except NoResultFound:\n instance = None\n return instance", "def load_criterias():\r\n l = [ (p.id, p.name) for p in StockProperty.objects.all() ]\r\n l.insert(0, ('', 'Select to add criteria ...'))\r\n return l", "def get_sensor(name):\n for sen in SENSORS:\n if sen.name == name or sen.key == name:\n return sen\n return None", "def getResourcesByEntitytype(entitytype, srcentty):\n # Distinction is implemented by python set\n cursor.execute(\n '''SELECT r1.value FROM resource as r1\n JOIN resource as r2 ON r1.content_id = r2.content_id\n JOIN entitytype as e1 ON r1.entitytype_id = e1.id\n JOIN entitytype as e2 ON r2.entitytype_id = e2.id\n JOIN content ON r1.content_id = content.id\n WHERE e1.name = %s\n AND e2.name = %s\n AND in_dump = True\n ''',\n (entitytype, srcentty,)\n )\n return {c['value'] for c in cursor}", "async def get_all(self, key: datastore.Key) -> RV:\n\t\treturn await (await self.get(key)).collect() # type: ignore[return-value]", "def get(self, key, missing=None, deferred=None,\n transaction=None, eventual=False, retry=None, timeout=None, read_time=None, model_type=None):\n start = datetime.datetime.now()\n entities = self.get_multi(keys=[key],\n missing=missing,\n deferred=deferred,\n transaction=transaction,\n eventual=eventual,\n retry=retry,\n timeout=timeout,\n read_time=read_time,\n model_type=model_type)\n if entities:\n end = datetime.datetime.now()\n print('Time taken for get {}'.format(end - start))\n return entities[0]", "def get_entity_by_key(cls, key):\n db_key = \"entity:\" + str(key)\n result = cls.db.hgetall(db_key)\n return (Entity.build(result) if type(result) is dict else None)", "def get(self, key: Any, **kwargs) -> Iterable:\n return self.store.get(key, **kwargs)", "def lookup_rows(self, key, values, fields=None):\n\n s3db = current.s3db\n atable = s3db.project_activity\n aotable = s3db.project_activity_organisation\n\n left = aotable.on((aotable.activity_id == atable.id) & \\\n (aotable.role == 1))\n\n qty = len(values)\n if qty == 1:\n query = (atable.id == values[0])\n limitby = (0, 1)\n else:\n query = (atable.id.belongs(values))\n limitby = (0, qty)\n\n rows = current.db(query).select(atable.id,\n atable.name,\n aotable.organisation_id,\n left = left,\n limitby = limitby,\n )\n self.queries += 1\n return rows", "def read_songs_by_criteria(expression):\n logging.debug('{CRUD_operations} BEGIN function read_songs_by_criteria()')\n logging.debug('{CRUD_operations} Data received: expression: %s', expression)\n looking_for = '%{0}%'.format(expression)\n songs = Song.query.filter(or_(\n Song.title.ilike(looking_for),\n Song.artist.ilike(looking_for),\n Song.album.ilike(looking_for),\n Song.release_year.ilike(looking_for)\n ))\n logging.debug('{CRUD_operations} END function read_songs_by_criteria()')\n return songs", "def __getitem__(self, key):\n return self.graph.readExtendedAttributes(self.entityId, key)[0]", "def get_objs_with_key_or_alias(self, ostring, location, exact=False): \n lstring_key, lstring_alias, estring = \"\", \"\", \"icontains\"\n if location:\n lstring_key = \", db_location=location\"\n lstring_alias = \", db_obj__db_location=location\"\n if exact:\n estring = \"__iexact\"\n else:\n estring = \"__istartswith\"\n matches = eval(\"self.filter(db_key%s=ostring%s)\" % (estring, lstring_key))\n if not matches:\n alias_matches = eval(\"self.model.alias_set.related.model.objects.filter(db_key%s=ostring%s)\" % (estring, lstring_alias))\n matches = [alias.db_obj for alias in alias_matches]\n return matches", "def find(self, key):\n visitor = VisitorFind()\n\n self.visit(key, visitor)\n\n return visitor.result", "def __getitem__(self, key) :\n answer = []\n attributeslist = getattr(self.request, \"_%s_attributes\" % self.name)\n for i in range(len(attributeslist)) :\n attribute = attributeslist[i]\n for j in range(len(attribute)) :\n (attrname, attrvalue) = attribute[j]\n if attrname == key :\n answer.extend(attrvalue)\n if answer :\n return answer\n raise KeyError, key", "def __getitem__(self, key):\n return self.get_models()[str(key)]", "def get(cls, **kwargs):\n c = None\n for x in kwargs:\n if c:\n c = coll.Intersection(\n c,\n coll.Match(field=x, value=force_unicode(kwargs[x])) # xmmsclient expects both args to be str or unicode\n )\n else:\n c = coll.Match(field=x, value=force_unicode(kwargs[x]))\n result = cls.client.coll_query(['id', 'artist', 'title'], c)\n num = len(result)\n if num == 1:\n return cls.get_from_dict(result[0])\n if not num:\n raise DoesNotExist(\"Song matching query does not exist.\")\n raise MultipleObjectsReturned(\"get() returned more than one Items! Lookup parameters were %s\" % kwargs)", "def searchForObjectDetails(self, user, extension, attribute, fltr, attributes, skip_values):\n\n # Extract the the required information about the object\n # relation out of the BackendParameters for the given extension.\n of = ObjectFactory.getInstance()\n be_data = of.getObjectBackendParameters(extension, attribute)\n if not be_data:\n raise GOsaException(C.make_error(\"BACKEND_PARAMETER_MISSING\", extension=extension, attribute=attribute))\n\n # Collection basic information\n otype, oattr, foreignMatchAttr, matchAttr = be_data[attribute] #@UnusedVariable\n\n # Create a list of attributes that will be requested\n if oattr not in attributes:\n attributes.append(oattr)\n attrs = dict([(x, 1) for x in attributes])\n if not \"dn\" in attrs:\n attrs.update({'dn': 1})\n\n # Start the query and brind the result in a usable form\n index = PluginRegistry.getInstance(\"ObjectIndex\")\n res = index.search({\n 'or_': {'_type': otype, '_extensions': otype},\n oattr: '%{}%'.format(fltr) if len(fltr) > 0 else '%'\n }, attrs)\n result = []\n\n # Do we have read permissions for the requested attribute\n env = Environment.getInstance()\n topic = \"%s.objects.%s\" % (env.domain, otype)\n aclresolver = PluginRegistry.getInstance(\"ACLResolver\")\n\n for entry in res:\n\n if not aclresolver.check(user, topic, \"s\", base=entry['dn']):\n continue\n\n item = {}\n for attr in attributes:\n if attr in entry and len(entry[attr]):\n item[attr] = entry[attr] if attr == \"dn\" else entry[attr][0]\n else:\n item[attr] = \"\"\n item['__identifier__'] = item[oattr]\n\n # Skip values that are in the skip list\n if skip_values and item['__identifier__'] in skip_values:\n continue\n\n result.append(item)\n\n return result", "def get(cls, **kwargs):\n # kwergs = map(lambda key, value: f\"{key}={value}\", kwargs.items())\n return cls.query.filter_by(\n **kwargs\n ).one_or_none()", "def search(self, key):\n if key in self.key_list:\n return (self.nodes)[key]\n return None", "def search(isamAppliance, name, force=False, check_mode=False):\n ret_obj = get_all(isamAppliance)\n return_obj = isamAppliance.create_return_object()\n\n for obj in ret_obj['data']:\n if obj['name'] == name:\n logger.info(\"Found STS Chain {0} id: {1}\".format(name, obj['id']))\n return_obj['data'] = obj['id']\n return_obj['rc'] = 0\n\n return return_obj", "def get_course_index(self, key, ignore_case=False):\r\n case_regex = r\"(?i)^{}$\" if ignore_case else r\"{}\"\r\n return self.course_index.find_one(\r\n son.SON([\r\n (key_attr, re.compile(case_regex.format(getattr(key, key_attr))))\r\n for key_attr in ('org', 'offering')\r\n ])\r\n )", "def test_data_source_soaps_find_one_get(self):\n pass", "def search_from_igraph(self, key):\n results =self.graph.vs.select(name_in=key)\n return json.dumps([{\n 'name': r[\"name\"],\n 'size': r[\"size\"],\n 'parent': r[\"parent\"],\n 'last_accessed': r[\"last_accessed\"],\n 'last_modified': r[\"last_modified\"]} for r in results])", "def _query_by_dict(self, opt: dict):\n return self._collect.find(self._get_query(opt))", "def read(cls, key, mode = FetchMode.All):\n assert isinstance(key, (basestring, Key))\n namespace, kind, member = Schema.Get(cls)\n if isinstance(key, Key):\n assert kind == key.kind, \"Mismatched Model, reading a %s with %s\" % (kind, key.kind)\n return Lisa.read(key, mode)\n else: \n key = Key(namespace, kind, key)\n return Lisa.read(key, mode)", "def search(self, *args, **kwargs):\n # comparison = f\"__{kwargs.get('comparison')}__\" if kwargs.get('comparison') else '__eq__'\n comparison = '__{comparison}__'.format(comparison=kwargs.get('comparison')) if kwargs.get('comparison') else '__eq__'\n try:\n key, value = args[0], args[1]\n except IndexError:\n for key in kwargs.keys():\n if '__' in key:\n # comparison = f'__{key.split(\"__\")[1]}__'\n comparison = '__{comparison}__'.format(comparison=key.split(\"__\")[1])\n key, value = key.split(\"__\")[0], kwargs[key]\n return SearchableList(list(filter(lambda x: try_compare(x, key, comparison, value), self)))", "def get_candidates(data):\n return data.groups[\"Candidates\"].objects", "def get_by_id(key):\n key = KeyModel(key)\n key.validate()\n return {\"text\": repository.get_doc(COLLECTION_NAME, key.to_dict())}", "def get(cls, approach):\n raise UnsupportedCriterionError", "def get(self, key):\n result = self.search({\n \"field\": \"identity.key\",\n \"operator\": \"=\",\n \"value\": key})\n if len(result) > 1:\n raise SarasvatiException(\"Entity is not unique {}\".format(key))\n return result[0] if len(result) > 0 else None", "def make_athlete_criteria(team_criteria):\n athlete_criteria = defaultdict(list)\n\n for key, attributes in team_criteria.items():\n for attribute in attributes:\n athlete_criteria[key].append(attribute)\n return athlete_criteria", "def search(self, *args, **keys):\n if not self.service:\n raise dalq.DALServiceError(\n \"resource, {}, is not a searchable service\".format(\n self.short_name))\n\n return self.service.search(*args, **keys)", "def get(cls, **kwargs) -> Dict:\n return cls.where(**kwargs).first()", "def _query(self, p, k):\n raise NotImplementedError(\"This method must be implemented by the subclass\")", "def get_by(cls, name, value):\n return cls.query(getattr(cls, name) == value).get()", "def get_objs_with_attr_match(self, attribute_name, attribute_value, location=None, exact=False): \n from src.objects.models import ObjAttribute\n lstring = \"\"\n if location:\n lstring = \", db_obj__db_location=location\" \n attrs = eval(\"ObjAttribute.objects.filter(db_key=attribute_name%s)\" % lstring)\n if exact: \n return [attr.obj for attr in attrs if attribute_value == attr.value]\n else:\n return [attr.obj for attr in attrs if utils.to_unicode(attribute_value) in str(attr.value)]", "def search(self, *args, **keys):\n service = self.to_service()\n if not service:\n raise RuntimeError(\"resource, %s, is not a searchable service\" % self.shortname)\n\n return service.search(*args, **keys)", "def find(cls, **kwargs):\n return cls.query.filter_by(**kwargs).first()", "def _fetch_journal_data(self, criteria):\n found_journal = self._scieloapi.journals.filter(\n limit=1, **criteria)\n return self._sapi_tools.get_one(found_journal)", "def find(self, line):\n return self._extract_by_key(line, self._attr_key)", "def test_get_risk_profile_all_using_get(self):\n pass", "def criteria(self) -> Optional[Sequence['outputs.MetadataDependenciesResponse']]:\n return pulumi.get(self, \"criteria\")", "def get_orphans(self, course_key):\r\n # here just to quell the abstractmethod. someone could write the impl if needed\r\n raise NotImplementedError", "def find(self, **kwargs):\n return super(LootsTable, self).records('loots', **kwargs)", "def search_stix21_objects(rel_list, object_name, rel_type='any') -> list:\n searched_rel_list = list()\n for relationship in rel_list:\n if relationship[3] == rel_type or rel_type == 'any':\n if relationship[0] == object_name and relationship[0] == relationship[2]:\n searched_rel_list.append(relationship)\n else:\n for position in range(len(relationship)):\n if relationship[position] == object_name:\n searched_rel_list.append(relationship)\n return searched_rel_list", "def get_object(cls, user_name, s_key):\n return Setting.all().ancestor(get_user(user_name)).filter('s_key =', s_key).get()", "def fetch_given_strain_position(strain, position):\n result = {}\n with database.make_connection() as connection:\n result = list(r.table(TABLE).filter({'StrainID': strain, 'Position': position}).run(connection))[0]\n print str(result['Position'])+\",\"+result['LocusTag']+\",\"+result['Product']+\",\"+result['Class']+\",\"+str(result['SubClass'])\n return result", "def find_all():\n return ItopapiPrototype.find_all(ItopapiOSLicence)", "def object_finder(\n self, datatable, objectlist, ralist, declist, radius,\n longquery=True\n ):\n pass", "def get(cls, session: Session = None, **kwargs):\n sess = next(cls.session()) if not session else session\n query = sess.query(cls)\n for key, val in kwargs.items():\n col = getattr(cls, key)\n query = query.filter(col == val)\n\n if query.count() > 1:\n raise Exception(\"Only one row is supposed to be returned, but got more than one.\")\n result = query.first()\n if not session:\n sess.close()\n return result", "def find(self, **kwargs):\n return self.__model__.query.filter_by(**kwargs)", "def searchRef(self, searchStr):\n filter = []\n attr = self.__listAttr()\n for name in attr:\n if searchStr.lower() in name.lower():\n doc = getattr(self, name)\n filter.append([name, doc]) \n # if in gloss, search for synonymes\n elif name in self.__glossIndex.keys():\n for altName in self.__glossIndex[name]['syn']:\n if searchStr in altName or altName in searchStr:\n doc = getattr(self, name)\n filter.append([name, doc])\n break\n \n return filter", "def _get_object(self, **kwargs):\n results = self.salesforce.salesforce_query(self.object_name, **kwargs)\n if len(results) == 0:\n human_friendly_args = \", \".join(\n [\"{}={}\".format(key, kwargs[key]) for key in kwargs]\n )\n raise Exception(\n \"no {} matches {}\".format(self.object_name, human_friendly_args)\n )\n elif len(results) > 1:\n raise Exception(\"Query returned {} objects\".format(len(results)))\n else:\n return results[0]", "def find_one_bykey(cls, keydict, defaultval = None):\n return cls.dbm().modelclass_find_one_bykey(cls, keydict, defaultval)", "def getLookups(self, modelSetKey: str, coordSetKey: str,\n lookupTupleType: str) -> Deferred:", "def __getitem__(self, key):\n result = mongo['readable-api'].foo.find_one({\"foo\": key})\n if result:\n return self.make_child(key)\n return None", "def find(self, **kwargs):\n rl = self.findall(**kwargs)\n num = len(rl)\n\n if num == 0:\n msg = \"No %s matching %s.\" % (self.resource_class.__name__, kwargs)\n raise exceptions.NotFound(msg)\n elif num > 1:\n raise exceptions.NoUniqueMatch\n else:\n return self.get(rl[0].id)", "def search(self, key):\n return self.find_iterative(self.root,key)", "def check_criteria(self, criteria, case_control=False):\n\n if case_control:\n pts_meeting_criteria = {key : [] for key in ['case', 'control']}\n else:\n pts_meeting_criteria = []\n\n if len(criteria) == 0: # mostly for exclusion criteria.\n return np.array([])\n\n for name, criterion in criteria.items():\n print(name, criterion)\n feature_inds = self.find_feature(name)\n pts_meeting_criterion = self.search_by_chunk(self.dataset, feature_inds, criterion, case_control)\n \n if case_control:\n pts_meeting_criteria['case'].append(pts_meeting_criterion['case'])\n pts_meeting_criteria['control'].append(pts_meeting_criterion['control'])\n else:\n pts_meeting_criteria.append(pts_meeting_criterion)\n\n if case_control:\n return reduce(np.intersect1d, pts_meeting_criteria['case']), \\\n reduce(np.intersect1d, pts_meeting_criteria['control'])\n else:\n return reduce(np.intersect1d, pts_meeting_criteria)", "def getOntoSimilarity(ontology_id, key):\n # print('getOntoSimilarity() =>', ontology_id)\n url = cfg.ontology_sim + '/query'\n res = requests.post(url, json={'ontologyId': ontology_id, 'key': key})\n res_dictionary = res.json()\n return res_dictionary.get('map', {})", "def __getitem__(self, key):\n for db in self.db:\n if db.name == key:\n return db\n raise IndexError", "def get_by_id(self, model, key_name):\n return model.get_by_id(key_name)", "def attribute_get(self, serial, domain, keys=()):\n\n execute = self.execute\n if keys:\n marks = ','.join('?' for k in keys)\n q = (\"select key, value from attributes \"\n \"where key in (%s) and serial = ? and domain = ?\" % (marks,))\n execute(q, keys + (serial, domain))\n else:\n q = (\"select key, value from attributes where \"\n \"serial = ? and domain = ?\")\n execute(q, (serial, domain))\n return self.fetchall()", "def industryTickers(tickerSym):\n temp = Stock.query.filter_by(ticker=tickerSym).first()\n industry1 = temp.industry\n print industry1\n #querys the SQL database for all the tickers with that industry\n subq = Stock.query.filter_by(industry=industry1).distinct(Stock.ticker).all()\n outputlist=[]\n for i in subq:\n outputlist.append(str(i.ticker))\n return outputlist", "def get_attribute_dict(feature_class, key, attribute, where_clause=None):\n\tcursor = arcpy.da.SearchCursor(feature_class, [key, attribute], where_clause=where_clause)\n\n\toutput_dict = {}\n\n\tfor row in cursor:\n\t\toutput_dict[row[0]] = row[1]\n\n\treturn output_dict", "def _search(self, searchterm, pred, **args):\n # TODO: DRY with sparql_ontol_utils\n searchterm = searchterm.replace('%','.*')\n namedGraph = get_named_graph(self.handle)\n query = \"\"\"\n prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#>\n SELECT ?c WHERE {{\n GRAPH <{g}> {{\n ?c {pred} ?l\n FILTER regex(?l,'{s}','i')\n }}\n }}\n \"\"\".format(pred=pred, s=searchterm, g=namedGraph)\n bindings = run_sparql(query)\n return [r['c']['value'] for r in bindings]", "def get_for_key(self, key) -> list:\n return [res[key] for res in self.list]", "def find_own_attributes(cs):\n own_attributes = {}\n for con in cs:\n own_attributes[con] = []\n for attr in con.intent:\n own_attributes[con].append(attr)\n for sub_con in cs:\n if sub_con.intent < con.intent and\\\n attr in sub_con.intent:\n own_attributes[con].pop()\n break\n return own_attributes" ]
[ "0.58318734", "0.5569626", "0.55302393", "0.5516962", "0.5510297", "0.5305333", "0.5302881", "0.5301674", "0.52550256", "0.52309424", "0.517999", "0.51336336", "0.5116383", "0.5070057", "0.5061014", "0.5043086", "0.50324726", "0.5003754", "0.49889943", "0.49371323", "0.49354962", "0.49353358", "0.49284694", "0.49268", "0.49149066", "0.4907102", "0.49041083", "0.48766118", "0.48734948", "0.4802388", "0.47930908", "0.47927558", "0.4786787", "0.47815996", "0.47769856", "0.4763672", "0.47599924", "0.47411972", "0.47408763", "0.4721709", "0.47178036", "0.47166505", "0.47146988", "0.47101986", "0.47088018", "0.4706987", "0.47017735", "0.4692602", "0.46885332", "0.46756402", "0.46730462", "0.46674132", "0.46633813", "0.4663289", "0.46552032", "0.46512595", "0.4649808", "0.46381107", "0.4637782", "0.46285608", "0.46284354", "0.46254736", "0.46250027", "0.46155623", "0.46017915", "0.45990896", "0.45902655", "0.45902053", "0.4574568", "0.45638284", "0.45604044", "0.45602417", "0.45599476", "0.45597163", "0.45587134", "0.45568806", "0.4552178", "0.4548697", "0.45427796", "0.4539916", "0.45324594", "0.45276093", "0.4523722", "0.45187694", "0.45133996", "0.45080996", "0.45065925", "0.45065612", "0.45054567", "0.4504462", "0.4502641", "0.45019057", "0.4499285", "0.4496018", "0.4495702", "0.44924578", "0.4489592", "0.44857076", "0.4485562", "0.44852585" ]
0.54087424
5
Retrieve all instance of OSLicence
def find_all(): return ItopapiPrototype.find_all(ItopapiOSLicence)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_soma_objects(self):\n\n msg_store = MessageStoreProxy(database=\"soma2data\", collection=\"soma2\")\n objs = msg_store.query(SOMA2Object._type, message_query={\"map_name\":self.soma_map,\"config\":self.soma_conf})\n print \"queried soma2 objects >> \", objs\n self.soma_objects = ce.get_soma_objects()\n print \"hard coded objects >> \", [self.soma_objects[r].keys() for r in self.soma_objects.keys()]", "def get_socios(self):\n return self.__socios", "def list_silos(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n attributes = ALL if verbose else [\"cn\", \"objectClass\"]\n\n self.display(\n self.engine.query(\n self.engine.SILOS_FILTER(),\n attributes, base=','.join([\"CN=AuthN Policy Configuration,CN=Services,CN=Configuration\", self.engine.base_dn])\n ),\n verbose\n )", "def get_scnlist_all(self):\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n logger.debug(\"Perform query to find scenes which need downloading.\")\n query_result = ses.query(EDDSentinel1ASF).order_by(EDDSentinel1ASF.Acquisition_Date.asc()).all()\n scns = list()\n if query_result is not None:\n for record in query_result:\n scns.append(record.PID)\n ses.close()\n logger.debug(\"Closed the database session.\")\n return scns", "def getobjsense(self): # 3\n res,resargs = self.__obj.getobjsense()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _sense_return_value = resargs\n _sense_return_value = objsense(_sense_return_value)\n return _sense_return_value", "def listObjects(instance):\n # Get a cursor from the DB connection.\n cursor = Conection.connect(DB_USER, DB_PASSWD, instance, DB_HOST)\n \n # Compose the SQL query to find all the orbits/SSM objects. We do this with \n # a simle query to the derivedobjects table since we realy only need the\n # ssm_id values.\n maxMJD = completedPrecoveryMaxDate(instance)\n if(maxMJD == None):\n return([], None)\n \n sql = 'select distinct(ssm_id) from derivedobjects where ssm_id is not null'\n sql += ' and status = \"I\"'\n # sql += ' and updated >= \"%s\"' %(minModifiedDate)\n # <-- end if\n \n nRes = cursor.execute(sql)\n return([x[0] for x in cursor.fetchall()], float(maxMJD))", "def list(self) -> List[Organisation]:\n ...", "def GetObjects(self): \r\n return self.model.GetObjects()", "def find_all():\n return ItopapiPrototype.find_all(ItopapiIncident)", "def objects(self):", "def iter_all(self):\n return self.opportunities.find()", "def ls():\n return dynamodb.ls(OrganizationModel)", "def __init__(self):\n self.incidents_models = {}\n self.risks = []\n self.incidents_models = None", "def get_all_elections(self) -> list:", "def find_all(self):\n pass", "def get_common_food(cls):\n objs = cls.objects\n return objs", "def list_instances(self):\n # list instances\n self._list_instances()", "def get_all(self, name):\n\t\tpass", "def get_instances(cls):\n raise NotImplementedError", "def get_soma_rois(self):\n soma_map = \"collect_data_map_cleaned\"\n # soma_config = \"test\"\n # query = {\"map\":soma_map, \"config\":soma_config}\n all_rois = []\n ret = self.soma_roi_store.query(SOMA2ROIObject._type)\n for (roi, meta) in ret:\n if roi.map_name != soma_map: continue\n if roi.geotype != \"Polygon\": continue\n all_rois.append(roi)\n return all_rois", "def list_circles(request):\n circles = Circle.objects.filter(is_public=True)\n serializers = CircleSerializer(circles, many=True)\n return Response(serializers.data)", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def get_scnlist_con2ard(self):\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n logger.debug(\"Perform query to find scenes which need downloading.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Downloaded == True,\n EDDSentinel1ASF.ARDProduct == False,\n EDDSentinel1ASF.Invalid == False).order_by(\n EDDSentinel1ASF.Acquisition_Date.asc()).all()\n\n scns2ard = list()\n if query_result is not None:\n for record in query_result:\n scns2ard.append(record.PID)\n ses.close()\n logger.debug(\"Closed the database session.\")\n return scns2ard", "def get_all_locations(self):", "def find_objs(self, cls, **attr):\n nodes = getattr(self.graph, getattr(models, cls).element_plural).query(**attr).all()\n return nodes", "def query_all(cls)->List:\n database.cursor.execute(\"SELECT * FROM {}\".format(cls.table_name))\n items = database.cursor.fetchall()\n return [cls.to_object(item) for item in items]", "def all(cls):\n return (cls(**client) for client in cls.collection().find({}))", "def all(self):\n return (self.__objects)", "def find_all(self):", "def list_sso(self):\n return self._json_object_field_to_list(\n self._get_sso_json(), self.__MISSION_STRING)", "def getAll(self, o_competition):\n rep = AbstractDAO._read(self, R_READALL, [o_competition.id])\n return self.__fetch_to_object(rep)", "def all(cls):\n api = BuslineAPI()\n try:\n objects = api.all()\n except ApiException:\n objects = cls.objects.all()\n return objects", "def list(self):\n return self.objects.all()", "def get_all_associations(self):\n return", "def odors(self, session):\n odors = session.query(Timepoint.odor).filter(\n Timepoint.id.between(self.start_timepoint_id, self.end_timepoint_id))\n return np.array(odors.all()).flatten()", "def _get_all_spectra(self):\n pass", "def get_all(cls):\n return DataStore.get_all_instance(cls)", "def getAllAPI():\n list_strain = StrainAPI().get_all()\n schema = StrainSchema()\n results = schema.load(list_strain, many=True)\n return results", "def find_all(self) -> List[Trade]:\n\n pass # pragma: no cover", "def objects_rst(self):\n return [_.as_rst for _ in self.objects]", "def get_all_entities(self):\n return Artifact.get_all()", "def get_all_labs():\n return Lab.query.all()", "def getList(self):", "def getList(self):", "def get_all(cls):\n return db_session.query(cls).order_by(cls.name).all()", "def all(klass):\n return klass.find()", "def all(self):\n print('HELLO')\n return self.__model__.query.all()", "def list(self):", "def get_landkreise(session: Session):\n\n return session.query(models.Landkreis).all()", "def all_objects():\n objs = {}\n objs['Section'] = list(h.all_sec())\n objs['Segment'] = []\n for sec in objs['Section']:\n objs['Segment'].extend(list(sec.allseg()))\n objs['PointProcess'] = []\n for seg in objs['Segment']:\n objs['PointProcess'].extend(list(seg.point_processes()))\n \n return objs", "async def find_all_appliances(self):\n #await self.update_rooms()\n #await self.update_heaters()\n await self.update_domain_objects()", "def get_all_cur_site_insts():\n return models.Curation_SiteInstance.objects.all()", "def show_instances():\n return get_instances()", "def getCatalogs():", "def get(self):\r\n\t\treturn list(self)", "def find_all(cls):\n return cls.dbm().modelclass_find_all(cls)", "def all(self):\n return list(self)", "def all_rooms(self):\n pass", "def query_all():\n\tstudents = session.query(Student).all()\n\treturn students", "def all(self, cls=None):\n if cls:\n objects = self.__session.query(cls).all()\n else:\n classes = [State, City] # , User, Place, Review, Amenity]\n objects = []\n for c in classes:\n objects += self.__session.query(c)\n return {\"{}.{}\".format(type(obj).__name__, obj.id): obj for obj in\n objects}", "def fetch_all(self):\n return list(iter(self))", "def list(self):\n url = self._resource_name\n return self._get(url)", "def get_all_offices():\n return [vars(office) for office in OFFICES]", "def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list", "def all (self):\n sparql_results = self.query (\"\"\"\n select distinct ?rs ?session ?name ?number ?pid ?sitename\n where {\n \n ?rs rdf:type austalk:RecordedSession .\n ?rs olac:speaker ?participant .\n \n ?participant austalk:id ?pid .\n ?participant austalk:recording_site ?site .\n ?site rdfs:label ?sitename .\n \n ?rs austalk:prototype ?session .\n ?session austalk:name ?name .\n ?session austalk:id ?number .\n }\n ORDER BY ?name\"\"\")\n\n results = []\n\n for result in sparql_results[\"results\"][\"bindings\"]:\n\n results.append (Session (\n client = self.client,\n identifier = result[\"rs\"][\"value\"],\n prototype = result[\"session\"][\"value\"],\n name = result[\"name\"][\"value\"],\n number = result[\"number\"][\"value\"],\n site = result[\"sitename\"][\"value\"],\n participantId = result[\"pid\"][\"value\"]))\n\n return results", "def get_list_of_ontologies(self):\n try:\n con = self.getOntologyDatabaseConnection()\n column_values = con.cursor()\n con.cursor().callproc('get_list_of_ontologies', [column_values])\n query_results=[]\n for row in column_values:\n if row[0] is None:\n continue\n query_results.append(row)\n return query_results\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def _get_all_oshapes(self):\n an_iname = self.node_list[0]\n an_inode = self.builder.nodes[an_iname]\n an_ishape = an_inode.oshapes['loc']\n \n return {'main' : an_ishape,\n 'loc' : an_ishape,\n 'cov' : an_ishape + [an_ishape[-1]]}", "def get_all(cls):\n if Model.data_connector:\n with Model.data_connector.u_lock:\n return Model.data_connector.get_all_objects(cls)\n \n return []", "def get(self):\n ue = UE.query.all()\n #print(ue)\n return ue", "def get(self):\r\n return get_all()", "def all(self, *args, **kwargs):\n list_to_return = []\n if not self.object_type:\n return list_to_return\n class_name = eval(self.object_type)\n if self.objects_id:\n for id in self.objects_id.split(';'):\n if id:\n list_to_return.append(class_name.objects.get(id=id))\n return list_to_return", "def all():\n return QueryBuilder(Card).all()", "def get_all(self, object):\n self.lock.acquire()\n result = self.__Session.query(object).all()\n self.lock.release()\n return result", "def list(self):\n return self._list(self._path())", "def do_all(self, line):\n try:\n tokens = split(line)\n except ValueError:\n return None\n objects = models.storage.all()\n if len(tokens) < 1:\n print([str(obj) for obj in objects.values()])\n else:\n cls = models.getmodel(tokens[0])\n if cls is None:\n print(\"** class doesn't exist **\")\n else:\n matches = []\n for obj in objects.values():\n if type(obj) is cls:\n matches.append(str(obj))\n print(matches)", "async def retrieve_all(cls) -> List[ExampleResource]:\n return await ExampleDAO.all()", "def fetch_all(cls):\n return cls.query.all()", "def get_instance_essentials(self):\n ret = []\n for instance in self.all_instances:\n ret.append(instance.get_essentials())\n return ret", "def _get_objects(self, cr, uid, name, args=[], ids=None): \n obj = self.pool.get(name)\n if not ids:\n ids = obj.search(cr, uid, args)\n return obj.browse(cr, uid, ids)", "def get_all(class_name):\n result = class_name.query.all()\n return result", "def getItems(self):\n for object in self.database:\n print(object)", "def get_objects(self):\n return self._objects", "def get_queryset(self):\n return Initiative.objects.filter(objective__perspective__description='Financial').order_by('objective')", "def availableSquares(self):\n List2=[]\n for item in self.all:\n if item.retrieve()==\"\":\n List2.append(item.name())\n return List2", "def get_objects_from_attribute(self, attribute: str) -> List[TgnObject]:\n pass", "def all(self, datastore):\n return datastore.query(self.__model__).all()", "def _get_all_records(self) -> List[DBModelInstance]:\n return self.model.query.all()", "def objects(self):\n\t\treturn self._objects", "def powerline_all(osm_path): \n return retrieve(osm_path,'lines',['power', 'voltage'])", "def index_queryset(self, using=None):\n return self.get_model().objects.select_related('id_compound').all()", "def all(cls):\n return dbsession.query(cls).all()", "def all(cls):\n return dbsession.query(cls).all()", "def getobjsense(self):\n sense_ = ctypes.c_int32()\n res = __library__.MSK_XX_getobjsense(self.__nativep,ctypes.byref(sense_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _sense_return_value = objsense(sense_.value)\n return (_sense_return_value)", "def hent_alle_sager(self, aktive=True) -> List[Sag]:\n return self.session.query(Sag).all()", "def getAll(self):\n # Get VT\n self.getVT()\n # Process VT data\n self.processVT()\n # Get reverse DNS\n self.getRDNS()\n # Get passivetotal\n self.getPT()\n # Get Geolocation\n self.getGeo()\n # Get Shodan\n self.getShodan()" ]
[ "0.6234363", "0.5961881", "0.595799", "0.59259844", "0.5838429", "0.57797927", "0.5702275", "0.57011425", "0.5666823", "0.5649265", "0.5632733", "0.56231284", "0.5603241", "0.54869276", "0.5477077", "0.5447893", "0.54415894", "0.5405049", "0.540362", "0.5400546", "0.5390554", "0.53836954", "0.53836954", "0.53836954", "0.53836954", "0.53836954", "0.53836954", "0.5383241", "0.5381411", "0.5374819", "0.5368326", "0.5356591", "0.53522646", "0.5321922", "0.53044784", "0.5292143", "0.52874005", "0.5266325", "0.5264121", "0.5260137", "0.52532053", "0.52519655", "0.5242192", "0.52404094", "0.5228516", "0.52281016", "0.52254015", "0.5200185", "0.5200185", "0.5193016", "0.51867086", "0.5182144", "0.5176356", "0.5171351", "0.51640135", "0.5152189", "0.51497597", "0.51428986", "0.51262486", "0.5124694", "0.51240116", "0.51239616", "0.5123777", "0.5114061", "0.5113808", "0.5107418", "0.5104935", "0.5097672", "0.5089406", "0.5088495", "0.50864655", "0.50848365", "0.5084689", "0.5082547", "0.5076718", "0.50635207", "0.50611365", "0.5059493", "0.50545526", "0.50526005", "0.5038696", "0.5037965", "0.5037131", "0.5030073", "0.5021671", "0.50157", "0.50151074", "0.5015023", "0.50136614", "0.50082546", "0.50079024", "0.5005526", "0.49941042", "0.499179", "0.49808693", "0.49766284", "0.49766284", "0.49754834", "0.4973231", "0.49719456" ]
0.60590714
1
Retrieve the ItopapiOSVersion corresponding to this server
def find_os_version(self): if self.osversion_id is not None: ItopapiPrototype.get_itop_class('OSVersion').find(self.osfamily_id) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_os_version(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetOsVersion', self.handle)", "def get_osversion(self):\n\t\treturn call_sdk_function('PrlFoundVmInfo_GetOSVersion', self.handle)", "def get_os_version(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetOsVersion', self.handle)", "def os_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"os_version\")", "def get_host_os_version(self):\n\t\treturn call_sdk_function('PrlLoginResponse_GetHostOsVersion', self.handle)", "def GetVersion(self):\n try:\n return self.server.GetVersionString()\n except dbus.DBusException:\n return None", "def get_version(self):\n\n r = self._create_operation_request(self, method=\"GET\")\n root_info = send_session_request(self._session, r).json()\n return root_info[\"currentVersion\"]", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['orionld version']\n except Exception as e:\n pass\n return ''", "def os_version(self) -> Optional[pulumi.Input['WindowsNodeConfigOsVersion']]:\n return pulumi.get(self, \"os_version\")", "def os_version(self):\n version_data = self._raw_version_data()\n if self._os_version is None:\n self._os_version = version_data[\"version\"]\n\n return self._os_version", "def get_version(self):\n return self.__make_api_call('get/version')", "def get_server_version(self):\n return self.client.getServerVersion().decode('utf-8')\n return self.client.getServerVersion().decode('utf-8')", "def version(self):\n response = self._request_call('/version')\n return response.version_etcdserver", "def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)", "def version():\n cmd = \"{} -v\".format(_detect_os())\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n ret = out[0].split(\": \")\n return ret[1]", "def get_version(self):\n verxml = self._ncc.nxoscli('show version')\n self.logger.debug(verxml)\n verparsed = _begin_parse(verxml)\n sysmgrclischema = parse_get_nsmap(verparsed)\n self.logger.debug(\"NSMAP: {}\".format(sysmgrclischema))\n showversion = find_element(['sys_ver_str', 'chassis_id', 'host_name', 'loader_ver_str'], sysmgrclischema,\n verparsed)\n self.logger.debug(str(showversion))\n self.hostname = showversion['host_name']\n self.chassis_id = showversion['chassis_id']\n self.system_version = showversion['sys_ver_str']", "def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)", "def os_version(self):\n return self._os_version", "def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()", "def version(self) -> 'outputs.VersionResponse':\n return pulumi.get(self, \"version\")", "def system_api_version(self):\n return self._system_api_version", "def get_version(self):\n url = '{}/v2/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def get_server_version(self):\n return self.__aceQLHttpApi.get_server_version()", "def get_version(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.version)", "def build_version(self):\n return self.nodes[0].get('infos').get('system_info').get('system_version')", "def get_version():\n ver = '0.0.0'\n req = restcall(0, 'config', 10.0)\n if req['text'] is not None:\n try: \n tree = ET.fromstring(req['text'])\n ver = tree.findall('app_version')[0].text\n if ver is None:\n ver = '0.0.0'\n _LOGGER.info(\"ISY: firmware version: %s\", ver)\n except ET.ParseError:\n _LOGGER.error(\"No version information found on ISY.\")\n return ver", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def get_api_version(self):\n return self.connector.request('GET', '/app/webapiVersion')", "def get_version(self):\n res = requests.get(self.base_url + '/version')\n\n return res", "def platform_version(self) -> Optional[str]:\n return pulumi.get(self, \"platform_version\")", "def _get_version(self):\n version = self.job_config.get(\"os_version\")\n if not version:\n version = DEFAULT_OS_VERSION.get(self.os_type)\n\n return str(version)", "async def version(self) -> str:\n response = await self._request(\"status\")\n return response[\"version\"]", "def get_version(self):\n return self.api_version", "def last_available_os_version(self) -> str:\n return pulumi.get(self, \"last_available_os_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")", "def version(self):\n\n if self.running() is True:\n return APIConsumer.get(\"/version\").content\n else:\n return None", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def get_product_version(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetProductVersion', self.handle)", "def server_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_version\")", "def server_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_version\")", "def version_string(self):\n return self.server_version", "def get_version(self):\n\t\treturn call_sdk_function('PrlApi_GetVersion')", "def get_version(self):\r\n return self._arm.get_version()", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def get_version(self):\n data = self._get('app_version')\n return data['version']", "def get_uni_version(self):\n version, major_version = None, None\n response = self.get_resource(category=VERSION, no_version=True)\n if response and response.get('version'):\n version = response['version']\n version_list = version.split('.')\n major_version = version_list[0][1:] + version_list[1]\n return version, major_version", "def last_installed_os_version(self) -> str:\n return pulumi.get(self, \"last_installed_os_version\")", "def _get_api_version(self):\n with self.nb_session.get(\n self.nb_api_url, timeout=10,\n verify=(not settings.NB_INSECURE_TLS)) as resp:\n result = float(resp.headers[\"API-Version\"])\n log.info(\"Detected NetBox API v%s.\", result)\n return result", "def version(self):\n return self._get(\"version\")", "def getVersion(self):\n return self.get('Version', type=\"numeric\")", "def get_host_os_minor(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostOsMinor', self.handle)", "def _get_ilo_firmware_version(self):\n\n manager, reset_uri = self._get_ilo_details()\n ilo_firmware_version = manager['Firmware']['Current']['VersionString']\n return {'ilo_firmware_version': ilo_firmware_version}", "def get_api_version(self):\n major, minor, patch = self.client.config['api_version']\n return '%s.%s.%s' % (major, minor, patch)", "def osi_version() -> str:\n if sp_osi is None:\n return find.find_sp_osi_version()\n\n if sp_osi == \"wip\":\n return find.find_sp_osi_version() + defs.VERSION_WIP_SUFFIX\n\n return sp_osi", "def version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"version\")", "def version(self):\n info = json.loads(self.get_info())\n return FapiInfo(info).version", "def operating_system_version(self):\n return self._operating_system_version", "def version(self):\n done, data = self._request('GV')\n if done:\n return {\n 'firmware': data[0],\n 'protocol': data[1]\n }\n\n raise EvseError", "def version(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"version\")", "def query_api_version(self):\n version_resp = self._session.get('/api/version',\n logon_required=False)\n self._api_version = version_resp\n return self._api_version", "def get_api_version(session: \"Session\") -> str:\n component_versions = get_component_versions(session)\n return str(component_versions.get(CoordConsts.KEY_API_VERSION, \"2.0.0\"))", "def version_info(self):\n if self._api_version is None:\n self.query_api_version()\n return self._api_version['api-major-version'],\\\n self._api_version['api-minor-version']", "def version(self):\n return self._client.getVersion()", "def get_host_os_major(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostOsMajor', self.handle)", "def get_version(self):\n return self.version", "def api_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"api_version\")", "def api_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"api_version\")", "def getVersion(cls):\n cVersion = cls.__getLib().voikkoGetVersion()\n return unicode_str(cVersion, \"UTF-8\")", "def client_version(self) -> str:\n return pulumi.get(self, \"client_version\")" ]
[ "0.8168305", "0.7857166", "0.7728561", "0.76559675", "0.7597805", "0.7398409", "0.7351898", "0.7313755", "0.7304044", "0.7298166", "0.7229817", "0.7214568", "0.7212459", "0.719492", "0.7193201", "0.71634763", "0.7163448", "0.7133119", "0.7087612", "0.7079623", "0.70749295", "0.7063206", "0.7047446", "0.7019772", "0.70145166", "0.6988825", "0.6976239", "0.6956967", "0.69497955", "0.6911627", "0.6902267", "0.689045", "0.6886206", "0.6876975", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6871638", "0.6854248", "0.6835924", "0.6835924", "0.6835924", "0.6835924", "0.6835924", "0.6821409", "0.6807335", "0.6807335", "0.6796426", "0.6786981", "0.6781699", "0.67756563", "0.67756563", "0.67756563", "0.67756563", "0.6769782", "0.6713852", "0.67077655", "0.67034245", "0.66840607", "0.6671419", "0.6620232", "0.66200995", "0.6619019", "0.66138303", "0.66034436", "0.6603429", "0.65924317", "0.65798646", "0.6574923", "0.6572697", "0.657023", "0.6565305", "0.65309733", "0.6520997", "0.65115976", "0.65102124", "0.65102124", "0.65102124", "0.65102124", "0.65102124", "0.6508191", "0.6503924" ]
0.7123767
18
Retrieve the ItopapiOrganization corresponding to this server
def find_organization(self): if self.org_id is not None: ItopapiPrototype.get_itop_class('Organization').find(self.org_id) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def organization(self):\n return self._tower.get_organization_by_id(self._data.get('organization'))", "def get_organization(self):\n return self.reference[REF_ORGANIZATION][REF_VALUE]", "def getOrganization(self):\n return _libsbml.ModelCreator_getOrganization(self)", "def get_organization(self):\n pos_or_org = self.position.to_object\n if pos_or_org is None:\n return None\n elif pos_or_org.portal_type == 'position':\n return pos_or_org.get_organization()\n elif pos_or_org.portal_type == 'organization':\n return pos_or_org", "def organization(self, organization_id):\r\n return organizations.Organization(self, organization_id)", "def GetOrganization(**argd):\n flag, ret = CGateway.core.GetOrganizationName(argd[\"session\"])\n xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd[\"session\"])\n if xFlag is not None:\n return xFlag\n return CGateway._SuccessResponse({'return': ret})", "def organization(self) -> \"Organization\":\n return Organization(connection=self)", "def fetch_organization(organization):\n return fetch_json(organization_url, organization)", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def get_org(self):\n return Org.deserialize(self._get_single('org', {}, from_results=False))", "def organization(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"organization\")", "async def get_organization(request: Request, org: str):\n\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n if org not in organizations_obj:\n logger.warning(\"Organization %s not found.\", org)\n raise HTTPException(\n status_code=404, detail=\"Organization {} not found.\".format(org))\n return {org: organizations_obj[org]}", "def get_organization(self, id: str) -> dict[str, Any]:\n params = {}\n\n return self.client.get(self._url(id), params=params)", "def get_organization(\n self, organization_id: Union[str, int], *, params: Optional[dict] = None\n ) -> \"resource_types.Organization\":\n\n return communicator.Organization(self.__requester).from_id(\n organization_id=organization_id, parameters=params\n )", "def organization(self):\r\n return Organization(self)", "def organization(self):\r\n return Organization(self)", "def organization(self):\r\n return Organization(self)", "def organization_id(self) -> str:\n return pulumi.get(self, \"organization_id\")", "def sub_organization(self) -> object:\n return self._sub_organization", "def organization_id(self):\n return self._organization_id", "def organization_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"organization_id\")", "def test_get_organization(self):\n pass", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def get_organizations(\n self, *, params: Optional[dict] = None\n ) -> \"resource_types.Organizations\":\n\n return communicator.Organizations(self.__requester).fetch(parameters=params)", "def get_org(self, retry_on_rate_exceed=False):\n return Org.deserialize(self._get_raw('org', {}, retry_on_rate_exceed))", "def organizations(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"organizations\")", "def test_get_organization_from_api_key(self):\n pass", "def organization_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"organization_id\")", "def organization_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"organization_id\")", "def get_organizations(self, language=None):\n return self.get_direct_related_page_extensions(\n Organization, OrganizationPluginModel, language=language\n )", "def test_retrieve_l_organization(self):\n pass", "def get_main_organization(self):\n return (\n self.get_organizations()\n .order_by(\"extended_object__organization_plugins__cmsplugin_ptr__position\")\n .first()\n )", "async def get_organizations(request: Request):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n return [org for org in organizations_obj]", "def organization(self, value):\n organization = self._tower.get_organization_by_name(value)\n if not organization:\n raise InvalidOrganization(value)\n self._update_values('organization', organization.id)", "def get_organization_details(self):\n\n # Returns 1) OU Name to OU ID mapping (dict)\n # key: OU Name (in the manifest); value: OU ID (at root level)\n # 2) all OU IDs under root (dict)\n org = Organizations(self.logger)\n all_ou_ids, ou_name_to_id_map = self._get_ou_ids(org)\n\n # Returns 1) active accounts (list) under an OU.\n # use case: used to validate accounts in the manifest file\n # 2) Accounts for each OU at the root level.\n # use case: map OU Name to account IDs\n # key: OU ID (str); value: Active accounts (list)\n accounts_in_all_ous, ou_id_to_account_map = \\\n self._get_accounts_in_ou(org, all_ou_ids)\n\n # Returns account name in manifest to account id mapping.\n # key: account name; value: account id\n name_to_account_map = self.get_account_for_name(org)\n\n return accounts_in_all_ous, ou_id_to_account_map, \\\n ou_name_to_id_map, name_to_account_map", "def get(self, organization_id):\n if organization_id is None:\n # Expose a list of organizations\n organizations = Organization.get_all()\n if organizations is None:\n abort(404)\n if request.args.get('name'):\n # search by name\n org_name = request.args.get('name')\n results = db.session.query(Organization).filter(\n Organization.name.ilike('%{0}%'.format(org_name)))\n organizations = results\n\n response = []\n for org in organizations:\n response.append(org.serialize())\n\n return make_response(jsonify(response)), 200\n\n else:\n # Expose a single organization\n try:\n organization = Organization.query.filter_by(\n id=organization_id).first()\n if not organization:\n abort(404)\n else:\n try:\n response = organization.serialize()\n return make_response(jsonify(response)), 200\n except Exception as e:\n response = {\n \"message\": str(e)\n }\n return make_response(jsonify(response)), 400\n except Exception as e:\n abort(404)", "def organizations(self):\r\n return organizations.Organizations(self)", "def getOrganisation(self):\n return _libsbml.ModelCreator_getOrganisation(self)", "def get_organization_links_by_page(self):\n return self.get_resource_by_page(\"/orgs\")", "def organizations(self):\n self.elements('organizations')", "def _get_org(self, context, org):\r\n try:\r\n rtn = {'context': context,\r\n 'org': org,\r\n 'space': self._bbreader.cache[context][org]['space'],\r\n 'org_config': self._bbreader.cache[context][org]['org'],\r\n }\r\n except KeyError:\r\n raise RequestError('No such context/org: {}/{}'.format(context, org))\r\n\r\n return rtn", "def organizations(self):\r\n return Organizations(self)", "def organization(self, organization_id_or_name):\r\n return Organization(self, organization_id_or_name)", "def organization_get_no_login(self, client, id):\n assert client.get('/organizations/' + id).status == '400 BAD REQUEST'", "def owner(self):\n return Organization.objects.get(id=self.owner_id)", "def clean_organization(self):\n return self.organization", "def test_get_one_for_organization(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n coAdmin = User.create(name='coAdmin', email='coAdmin@bar.com',\n owned_organizations=[org.uid])\n coAdmin.put()\n user = User.create(name='foo', email='foo@bar.com',\n owned_organizations=[org.uid])\n user.put()\n response = self.testapp.get(\n '/api/organizations/{}/users/{}'.format(org.uid, coAdmin.uid),\n headers=self.login_headers(user),\n )\n response_dict = json.loads(response.body)\n self.assertEqual(response_dict['uid'], coAdmin.uid)", "def _get_org(self, org_name):\n org = SpokeOrg()\n result = org.get(org_name)\n if result == []:\n msg = \"Can't find org %s\" % org_name\n self.log.error(msg)\n raise error.NotFound(msg) \n return result", "def organization_current_get(request):\n if request.organization:\n return request.organization.slug\n else:\n return None", "def organization_name(self):\n if self.organization is not None:\n return self.organization.name\n\n return ''", "def extract_organization(self, root):\n organization = {}\n info = root.xpath('.//li/h4/a')\n if info:\n link = info[0].get('href', None)\n name = info[0].get('title', None)\n if link and name:\n stmt = select([\n func.count(self.organization_table.c.path)\n ]).where(\n self.organization_table.c.path == link\n )\n results = self.connection.execute(stmt).fetchall()\n if results[0][0] > 0:\n self.logger.debug('{} already exists'.format(name))\n return None\n self.logger.debug('Querying {1}: {0}'.format(link, name))\n response = self.session.get(self.PODEROPEDIA_BASE_URL + link)\n content = response.content\n html_tree = etree.HTML(content, parser=self.parser)\n connections = html_tree.xpath('//div[@id=\"conexiones\"]')\n if connections:\n organization_data = self.extract_element_data(connections[0])\n organization['organization_data'] = organization_data if organization_data else {}\n organization['organization_data']['path'] = link\n\n person = self.extract_persons(connections[0])\n organization['member'] = person if person else []\n for item in organization['member']:\n item.update({'source_path': link})\n\n related_organization = self.extract_participation(connections[0])\n organization['organization'] = related_organization if related_organization else []\n for item in organization['organization']:\n item.update({'source_path': link})\n return organization", "def org_info(self):\n\n response = self.postman.request('info')\n\n if (response.status_code == requests.codes.ok):\n data = response.json()\n\n self.repos = data['public_repos']\n self.created = data['created_at']\n self.updated = data['updated_at']\n\n self.repo_info()\n self.member_info()", "def organization_id():\n return os.environ[\"GCLOUD_ORGANIZATION\"]", "def get_organization_links(self):\n yield from self.get_resource_by_item(\"/orgs\")", "def organization_unit(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"organization_unit\")", "def GetOrganizationSettings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def org():\n\n settings = current.deployment_settings\n ADMIN = current.session.s3.system_roles.ADMIN\n SECTORS = \"Clusters\" if settings.get_ui_label_cluster() \\\n else \"Sectors\"\n stats = lambda i: settings.has_module(\"stats\")\n\n return M(c=\"org\")(\n M(\"Organizations MSW\", f=\"organisation\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\"),\n M(\"TestSpiegel\", c=\"org\",f=\"spiegel\")\n ),\n M(\"Offices\", f=\"office\")(\n M(\"Create\", m=\"create\"),\n M(\"Map\", m=\"map\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Facilities\", f=\"facility\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\"),\n ),\n M(\"Resources\", f=\"resource\", m=\"summary\",\n check=stats)(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Organization Types\", f=\"organisation_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Office Types\", f=\"office_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Facility Types\", f=\"facility_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(SECTORS, f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )", "def get_organization_address(self, obj):\n if obj.organization_address is None:\n return None\n\n serializer = OrganizationAddressSerializer(\n obj.organization_address, read_only=True\n )\n\n return serializer.data", "def test_retrieve_l_organizations(self):\n pass", "def get_one_organization_by_name(ctx, org_name):\n pprint(cmd.get_one_organization_by_name(\n client=ctx.obj, organization_name=org_name))", "def org_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"org_id\")", "def common_organization_path(organization: str,) -> str:\n return \"organizations/{organization}\".format(organization=organization,)", "def get_organization_unit(self):\n return self.reference[REF_ORGANIZATION_UNIT][REF_VALUE]", "def find(self, organisation_id: OrganisationId) -> Optional[Organisation]:\n ...", "def get(self, org_name=None): \n if org_name is None: # Return a list of all orgs\n filter = '%s=*' % self.org_attr\n scope = 1\n trueorfalse = False\n else:\n filter = '%s=%s' % (self.org_attr, org_name)\n scope = self.search_scope\n trueorfalse = True \n result = self._get_object(self.base_dn, scope, filter, \\\n unique=trueorfalse)\n self.log.debug('Result: %s' % result)\n return result", "def get_organization_url(self, organization: Dict):\n return f\"{self.site_url}/organization/{organization['name']}\"", "def setOrganization(self, *args):\n return _libsbml.ModelCreator_setOrganization(self, *args)", "def org():\n\n sysroles = current.auth.get_system_roles()\n\n ADMIN = sysroles.ADMIN\n ORG_GROUP_ADMIN = sysroles.ORG_GROUP_ADMIN\n\n return M(c=\"org\")(\n M(\"Organizations\", f=\"organisation\")(\n M(\"Hierarchy\", m=\"hierarchy\"),\n M(\"Create\", m=\"create\", restrict=(ADMIN, ORG_GROUP_ADMIN)),\n ),\n M(\"Facilities\", f=\"facility\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Administration\", restrict=(ADMIN, ORG_GROUP_ADMIN))(\n M(\"Facility Types\", f=\"facility_type\"),\n M(\"Organization Types\", f=\"organisation_type\"),\n M(\"Sectors\", f=\"sector\"),\n )\n )", "def find_by_organization(self, organization, params={}, **options):\n path = \"/organizations/%s/teams\" % (organization)\n return self.client.get_collection(path, params, **options)", "def get_organization(self, check_perm=True):\n try:\n org = Organization.objects.get(name__iexact=self.lhs)\n except Organization.DoesNotExist:\n raise CommandError(\"No organization by the name '%s'.\" % self.lhs)\n if check_perm and not org.access(self.caller, \"favor\"):\n raise CommandError(\"You do not have permission to set favor.\")\n return org", "def test_get_all_for_organization(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='foo', email='foo@bar.com',\n owned_organizations=[org.uid])\n user.put()\n response = self.testapp.get(\n '/api/organizations/{}/users'.format(org.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)", "def parent_organization(self) -> object:\n return self._parent_organization", "def get_organization_by_id_with_http_info(self, organization_id, **kwargs):\n\n all_params = ['organization_id', 'organizations']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_organization_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'organization_id' is set\n if ('organization_id' not in params) or (params['organization_id'] is None):\n raise ValueError(\"Missing the required parameter `organization_id` when calling `get_organization_by_id`\")\n\n resource_path = '/organizations/{organization-ID}'.replace('{format}', 'json')\n path_params = {}\n if 'organization_id' in params:\n path_params['organization-ID'] = params['organization_id']\n\n query_params = {}\n if 'organizations' in params:\n query_params['organizations'] = params['organizations']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['text/plain'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='OrganizationPagedMetadata',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "def users_organizations(user):\n if not user or not user.is_authenticated():\n return None\n else:\n return get_users_organizations(user)", "def test_get_cloud_organization_api_key(self):\n pass", "def org():\n\n ADMIN = current.session.s3.system_roles.ADMIN\n SECTORS = \"Clusters\" if current.deployment_settings.get_ui_label_cluster() \\\n else \"Sectors\"\n\n return M(c=\"org\")(\n M(\"Organizations\", f=\"organisation\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Facilities\", f=\"facility\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n M(\"Map\", m=\"map\"),\n M(\"Import\", m=\"import\"),\n ),\n M(\"Offices\", f=\"office\")(\n M(\"Create\", m=\"create\"),\n M(\"Map\", m=\"map\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Resources\", f=\"resource\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Organization Types\", f=\"organisation_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Service Types\", f=\"service\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Office Types\", f=\"office_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Facility Types\", f=\"facility_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(SECTORS, f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )", "def get_org_data(org, session=None):\n url = f'{GITHUB_API_URL}/orgs/{org}'\n return get_whole_response_as_json(url, session)", "def organization_unit(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organization_unit\")", "def organization_unit(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organization_unit\")", "def OrganizationSpecificInfoTlv(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.organizationspecificinfotlv_cdbfb68a383cae9df3ba968fba52c095 import OrganizationSpecificInfoTlv\n return OrganizationSpecificInfoTlv(self)", "def vpp_organization_name(self):\n if \"vppOrganizationName\" in self._prop_dict:\n return self._prop_dict[\"vppOrganizationName\"]\n else:\n return None", "def get_organization(login: str, url: str, session):\n _try = 0\n tries = 3\n exception = None\n while _try <= tries:\n try:\n organization = session.query(Organization).get(login)\n if not organization:\n organization = Organization(login, url)\n session.add(organization)\n session.commit()\n return organization\n except IntegrityError as e:\n print(f'Got an Organization IntegrityError, Try {_try} of {tries}')\n _try += 1\n exception = e\n pass\n\n raise exception", "def _list_orgs(self, context):\r\n try:\r\n rtn = {'context': context,\r\n 'orgs': sorted(list(self._bbreader.cache[context].keys()))}\r\n except KeyError:\r\n raise RequestError('Context {} not found'.format(context))\r\n return rtn", "def test_client_get_organization(mocker, client_org_input):\n mocker.patch(\"tracker_client.client.get_auth_token\")\n mocker.patch(\"tracker_client.client.create_client\")\n test_client = Client()\n test_client.execute_query = mocker.MagicMock(return_value=client_org_input)\n\n org = test_client.get_organization(\"Foo Bar\")\n\n test_client.execute_query.assert_called_once_with(\n queries.GET_ORG, {\"orgSlug\": \"foo-bar\"}\n )\n assert org.acronym == \"FOO\"\n assert org.name == \"Foo Bar\"\n assert org.zone == \"FED\"\n assert org.sector == \"TBS\"\n assert org.country == \"Canada\"\n assert org.province == \"Ontario\"\n assert org.city == \"Ottawa\"\n assert org.domain_count == 10\n assert org.verified", "def get_org_list():\r\n\r\n resp = requests.get(''.join([Kegg.BASE_URL, 'list/organism']))\r\n return resp.text", "def test_get_test_organization_api_key(self):\n pass", "def get_organization_by_name(self, name: str | None = None) -> dict[str, Any]:\n params = {}\n\n return self.client.get(self._url(\"name\", name), params=params)", "def get_all_organizations_with_http_info(self, **kwargs):\n\n all_params = ['organizations', 'offset', 'records', 'order_by', 'order', 'include_retired']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_all_organizations\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/organizations'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'organizations' in params:\n query_params['organizations'] = params['organizations']\n if 'offset' in params:\n query_params['offset'] = params['offset']\n if 'records' in params:\n query_params['records'] = params['records']\n if 'order_by' in params:\n query_params['order_by'] = params['order_by']\n if 'order' in params:\n query_params['order'] = params['order']\n if 'include_retired' in params:\n query_params['include_retired'] = params['include_retired']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='OrganizationPagedMetadata',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "def organization_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"organization_arn\")", "def common_organization_path(\n organization: str,\n ) -> str:\n return \"organizations/{organization}\".format(\n organization=organization,\n )", "def get_org(self, name: str):\n org = self._get_org(name)\n if org.keychain:\n assert org.keychain is self\n else:\n org.keychain = self\n return org", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Organization':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = OrganizationArgs.__new__(OrganizationArgs)\n\n __props__.__dict__[\"arn\"] = None\n __props__.__dict__[\"feature_set\"] = None\n __props__.__dict__[\"management_account_arn\"] = None\n __props__.__dict__[\"management_account_email\"] = None\n __props__.__dict__[\"management_account_id\"] = None\n __props__.__dict__[\"root_id\"] = None\n return Organization(resource_name, opts=opts, __props__=__props__)", "def org_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"org_id\")", "def listOrganizations(self, name='', type=''):\n return self.get_json('/organization', {'name': name, 'type': type})", "def isSetOrganization(self):\n return _libsbml.ModelCreator_isSetOrganization(self)", "def get_organisation_with_role(user, rolecode):\n return get_organisations_with_role(user, rolecode).get()", "def test_getorganizations_item(self):\n pass", "def orgs(self):\n return self._sdk_dependencies.org_client", "def organization_fields(self):\r\n return organizations.OrganizationFields(self)" ]
[ "0.7651209", "0.71187073", "0.7091516", "0.68542224", "0.6810734", "0.680332", "0.67780435", "0.6712279", "0.6706957", "0.66816705", "0.6667797", "0.6625002", "0.65964967", "0.6589382", "0.6561183", "0.6561183", "0.6561183", "0.65521574", "0.6543021", "0.64575344", "0.63788193", "0.63149315", "0.6290847", "0.6290847", "0.6279144", "0.6279081", "0.6276546", "0.6250836", "0.6137893", "0.6137893", "0.61156267", "0.6105276", "0.6085743", "0.6040368", "0.602985", "0.60236025", "0.5973099", "0.5969819", "0.5969501", "0.5936737", "0.5923759", "0.59106886", "0.5906054", "0.59010077", "0.58551997", "0.5851998", "0.58455795", "0.5802396", "0.57901335", "0.57882166", "0.56984335", "0.5680183", "0.5665234", "0.5662923", "0.5656212", "0.5648887", "0.56437004", "0.56163514", "0.5541743", "0.55402553", "0.5510284", "0.55053246", "0.5478975", "0.54634804", "0.5460917", "0.54394746", "0.5430115", "0.5429473", "0.5424231", "0.541231", "0.5409859", "0.54087126", "0.5398098", "0.53957736", "0.53741056", "0.53726804", "0.53452474", "0.53420776", "0.5340223", "0.5340223", "0.53193516", "0.5319003", "0.53167427", "0.5313767", "0.530684", "0.53067327", "0.5305388", "0.5298673", "0.52887595", "0.5274665", "0.5272671", "0.5248832", "0.5248595", "0.5247467", "0.52471966", "0.5225274", "0.52170205", "0.5212234", "0.5210789", "0.52038854" ]
0.75703514
1
Create mock coroutine function.
def mock_coro(return_value=None, **kwargs): async def wrapped(*args, **kwargs): return return_value return MagicMock(wraps=wrapped, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def async(function):\n function = asyncio.coroutine(function)\n\n @wraps(function)\n def wrapper(self, *args, **kwargs):\n @asyncio.coroutine\n def c():\n # Run test\n yield from function(self, *args, **kwargs)\n self.loop.run_until_complete(c())\n return wrapper", "def coroutine(func):\n def start(*args, **kwargs):\n cr = func(*args, **kwargs)\n cr.send(None)\n return cr\n return start", "def test_async_function(self):\n @self.eventloop.wait_for(timeout=0.1)\n async def go():\n self.assertTrue(self.reactor.in_call_from_thread)\n return 17\n\n self.assertEqual((go(), go()), (17, 17))\n self.assertFalse(inspect.iscoroutinefunction(go))", "def _coro_test_wrapper(func, self):\n\tcoro = Coroutine(func(self), is_watched = True)\n\tcoro.start()\n\treactor.run()\n\n\tif coro.state != coro.STATE_COMPLETED:\n\t\tif coro.result and coro.result[1]:\n\t\t\texc_type, exc_value, exc_traceback = coro.result[1]\n\t\t\traise exc_type, exc_value, exc_traceback\n\t\telse:\n\t\t\traise Exception(\"Coroutine did not complete: %r\" % (coro, ))", "def mock_coro(return_value=None, **kwargs):\n async def wrapped(*args, **kwargs):\n return return_value\n return Mock(wraps=wrapped, **kwargs)", "def coroutine(func, replace_cb = True):\n return _make_coroutine_wrapper(func, replace_cb)", "def create_async_mock(data: bytes = None, status: int = None) -> mock.MagicMock:\n\n class AsyncMock(mock.MagicMock):\n \"\"\"\n Mock class that works with an async context manager. Currently used to mock aiohttp.ClientSession.get\n the ClientResponse is a MagicMock with the specified data and status.\n \"\"\"\n\n async def __aenter__(self):\n conn = mock.MagicMock()\n f = asyncio.Future()\n f.set_result(data)\n conn.read = mock.MagicMock(return_value=f)\n type(conn).status = mock.PropertyMock(return_value=status)\n return conn\n\n async def __aexit__(self, *_):\n pass\n\n def __await__(self):\n yield\n\n return AsyncMock()", "def mock_fcn(self, name):\n return MockFunction(self._context, name)", "def coroutine(func):\n def start(*args,**kwargs):\n coro = func(*args,**kwargs)\n coro.next()\n return coro\n return start", "def AsyncMock(*args, **kwargs):\n m = mock.MagicMock(*args, **kwargs)\n\n async def mock_coro(*args, **kwargs):\n return m(*args, **kwargs)\n\n mock_coro.mock = m\n return mock_coro", "def fake_spawn(time_from_now_in_seconds, func, *args, **kw):\n def thread_start():\n # fake_sleep(time_from_now_in_seconds)\n return func(*args, **kw)\n\n cr = Coroutine(thread_start)\n fake_threads.append({'sleep': time_from_now_in_seconds,\n 'greenlet': cr,\n 'name': str(func)})", "def test_async_function(self):\n myreactor = FakeReactor()\n c = EventLoop(lambda: myreactor, lambda f, g: None)\n c.no_setup()\n calls = []\n\n @c.run_in_reactor\n async def go():\n self.assertTrue(myreactor.in_call_from_thread)\n calls.append(1)\n return 23\n\n self.assertEqual((go().wait(0.1), go().wait(0.1)), (23, 23))\n self.assertEqual(len(calls), 2)\n self.assertFalse(inspect.iscoroutinefunction(go))", "def async_test(wrapped):\n\n @functools.wraps(wrapped)\n def wrapper(*args, **kwargs):\n return asyncio.run(wrapped(*args, **kwargs))\n return wrapper", "def testBaseCase(self):\n r = []\n async_fn = utils.make_async()(lambda: r.append(\"a\"))\n async_fn()\n time.sleep(1)\n self.assertListEqual(r, [\"a\"])", "def coroutine(func):\n def start(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start", "def coroutine(func):\n def start(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start", "def coroutine(func):\n def start(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start", "def get_coroutine_wrapper(): # real signature unknown; restored from __doc__\n pass", "def coroutine(func):\n\n def start(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n\n return start", "def async_test(func):\n # inner import because for Python 3.6+ tests only\n from asgiref.sync import async_to_sync\n\n sync_func = async_to_sync(func)\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n return sync_func(*args, **kwargs)\n\n return wrapper", "def mock_open_connection(self):\n def create_connection(*args, **kwargs):\n reader = MockStreamReader(loop=self.loop)\n writer = MockStreamWriter(None, None, reader, self.loop)\n writer.write = mock.Mock()\n fut = asyncio.Future(loop=self.loop)\n fut.set_result((reader, writer))\n return fut\n return mock.patch('asyncio.open_connection', side_effect=create_connection)", "async def _executor(self, func):\n return await asyncio.coroutine(func)()", "def coroutine(func):\n if inspect.isgeneratorfunction(func):\n coro = func\n else:\n @functools.wraps(func)\n def coro(*args, **kw):\n res = func(*args, **kw)\n if False:\n yield\n raise Return(res)\n\n # We could potentially add a flag to the returned coroutine object here, to\n # be checked by `Task` however for now this decorator just serves as\n # documentation.\n coro._is_compat_coroutine = True\n return coro", "def make_wrapped_function(self):\n myreactor = FakeReactor()\n c = EventLoop(lambda: myreactor, lambda f, g: None)\n c.no_setup()\n\n @c.run_in_reactor\n def passthrough(argument):\n return argument\n\n return passthrough", "def test_await_if_coroutine(coroutine, exp_return, args):\n result = asyncio.run(await_if_coroutine(coroutine, *args))\n\n assert result == exp_return", "def __getattribute__(self, name):\n attr = super().__getattribute__(name)\n if name.startswith('test_') and asyncio.iscoroutinefunction(attr):\n return lambda: asyncio.run(self.async_test_wrapper(attr))\n else:\n return attr", "async def wrapped_coroutine():\n try:\n return await coroutine_function(*coroutine_args)\n finally:\n if len(_tasks) != 0:\n del _tasks[0]", "def awaitable(obj):\n yield from asyncio.sleep(0)\n return obj", "def test_wrap_method(self):\n myreactor = FakeReactor()\n c = EventLoop(lambda: myreactor, lambda f, g: None)\n c.no_setup()\n calls = []\n\n class C(object):\n def func(self, a, b, c):\n calls.append((a, b, c))\n\n f = c.run_in_reactor(C().func)\n f(4, 5, c=6)\n self.assertEqual(calls, [(4, 5, 6)])", "async def sleep_fake(*args, **kwargs):\n sleep_sync_mock(*args, **kwargs)", "def coroutine(func):\n wrapped = tornado.gen._make_coroutine_wrapper(func, True)\n ioloop = tornado.ioloop.IOLoop.current()\n # If this isn't a ProfilingIOLoop -- lets do nothing\n if not isinstance(ioloop, tornado_prof.ioloop.ProfilingIOLoop):\n return wrapped\n\n @functools.wraps(wrapped)\n def wrapper(*args, **kwargs):\n if not ioloop.timing_enabled:\n return wrapped(*args, **kwargs)\n start = time.time()\n try:\n ret = wrapped(*args, **kwargs)\n return ret\n finally:\n took = time.time() - start\n key = (\n func.func_code.co_filename,\n func.func_code.co_name,\n func.func_code.co_firstlineno,\n )\n\n # TODO: store method?\n # Store the metrics\n try:\n ioloop._timing[key]['sum'] += took\n ioloop._timing[key]['count'] += 1\n ioloop._timing[key]['max'] = max(ioloop._timing[key]['max'], took)\n except KeyError:\n ioloop._timing[key] = {'sum': took, 'count': 1, 'max': took}\n\n return wrapper", "def test_method(self):\n myreactor = FakeReactor()\n c = EventLoop(lambda: myreactor, lambda f, g: None)\n c.no_setup()\n calls = []\n\n class C(object):\n @c.run_in_reactor\n def func(self, a, b, c):\n calls.append((self, a, b, c))\n\n o = C()\n o.func(1, 2, c=3)\n self.assertEqual(calls, [(o, 1, 2, 3)])", "def async_test(\n f: Callable[[TestCase], Coroutine[Deferred[object], object, object]]\n) -> Callable[[TestCase], Deferred[None]]:\n\n @inlineCallbacks\n def g(self: object) -> Generator[Deferred[object], object, None]:\n d: Deferred[object] = Deferred.fromCoroutine(f(self))\n yield d\n\n return g", "def test_starts_returned_async(self):\n from furious.async import Async\n from furious.context._execution import _ExecutionContext\n from furious.processors import run_job\n\n returned_async = Mock(spec=Async)\n\n work = Async(target=_fake_async_returning_target,\n args=[returned_async])\n\n with _ExecutionContext(work):\n run_job()\n\n returned_async.start.assert_called_once_with()", "def return_fake_future(f):\n def wrap(*args, **kwargs):\n future = Future()\n future.set_result(f(*args, **kwargs))\n return future\n return wrap", "def set_mock_return_value(magic_mock: MagicMock, return_value: t.Any):\n if magic_mock.__class__.__name__ == \"AsyncMock\":\n # Python 3.8 and above\n magic_mock.return_value = return_value\n else:\n\n async def coroutine(*args, **kwargs):\n return return_value\n\n magic_mock.return_value = coroutine()", "def to_coroutine(func):\n if not iscoroutinefunction(func):\n func = coroutine(func)\n return func", "def public_async_generator(func):\n @functools.wraps(func)\n def function(*args, **kwargs):\n \"Wrapped function\"\n return GeneratorFuture(func(*args, **kwargs)).future\n return function", "def wrapper(coro: CoroutineFunction) -> NoReturn:\n if not asyncio.iscoroutinefunction(coro):\n raise TypeError('Callback function must be coroutine function')\n self._callback = coro\n return coro", "async def test_nodeclient_pipe_connect():\n f = asyncio.Future()\n f.set_result(None)\n pipe = Mock()\n pipe.connect.return_value = f\n node_client = NodeClient(pipe, Mock())\n await node_client.connect()\n pipe.connect.assert_called_once()", "def pytest_pyfunc_call(pyfuncitem):\n if _is_coroutine(pyfuncitem.function):\n loop = pyfuncitem.funcargs[LOOP_KEY]\n funcargs = pyfuncitem.funcargs\n testargs = {}\n for arg in pyfuncitem._fixtureinfo.argnames:\n testargs[arg] = funcargs[arg]\n loop.run_until_complete(\n loop.create_task(\n pyfuncitem.obj(**testargs)\n )\n )\n return True", "async def test_rpc(bus: lightbus.BusNode, dummy_api):\n\n async def co_call_rpc():\n await asyncio.sleep(0.1)\n return await bus.my.dummy.my_proc.call_async(field='Hello! 😎')\n\n async def co_consume_rpcs():\n return await bus.bus_client.consume_rpcs(apis=[dummy_api])\n\n (call_task, ), (consume_task, ) = await asyncio.wait([co_call_rpc(), co_consume_rpcs()], return_when=asyncio.FIRST_COMPLETED)\n consume_task.cancel()\n assert call_task.result() == 'value: Hello! 😎'", "async def test_rpc(bus: lightbus.BusNode, dummy_api):\n\n async def co_call_rpc():\n asyncio.sleep(0.1)\n return await bus.my.dummy.my_proc.call_async(field='Hello! 😎')\n\n async def co_consume_rpcs():\n return await bus.bus_client.consume_rpcs(apis=[dummy_api])\n\n (call_task, ), (consume_task, ) = await asyncio.wait([co_call_rpc(), co_consume_rpcs()], return_when=asyncio.FIRST_COMPLETED)\n consume_task.cancel()\n assert call_task.result() == 'value: Hello! 😎'", "def coroutine(func):\n @wraps(func)\n def primer(*args, **kwargs):\n gen = func(*args, **kwargs)\n next(gen)\n return gen\n return primer", "def cli_coro(f):\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n loop = asyncio.get_event_loop()\n return loop.run_until_complete(f(*args, **kwargs))\n\n return wrapper", "def test_timed_coroutine(self):\n import asyncio\n\n source = \"\"\"\n@self.statsd.timed('timed.test')\nasync def print_foo():\n \"docstring\"\n import time\n time.sleep(0.5)\n print(\"foo\")\n \"\"\"\n exec(source, {}, locals())\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(locals()['print_foo']())\n loop.close()\n\n # Assert\n packet = self.recv(2).split(\"\\n\")[0] # ignore telemetry packet\n name_value, type_ = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('ms', type_)\n self.assertEqual('timed.test', name)\n self.assert_almost_equal(0.5, float(value), 0.1)", "def mock_handler_function_wrapper(ret):\n def mock_handler_function(http_response, response):\n \"\"\"\n mock handler\n :param http_response:\n :param response:\n :return:\n \"\"\"\n return ret\n return mock_handler_function", "def test_py_closure(self):", "def monkey_patch():\n tornado.gen.coroutine = coroutine", "async def awaitable(obj):\n await asyncio.sleep(0)\n return obj", "async def new_coro():\n try:\n await coro\n except asyncio.CancelledError:\n pass", "def run(coroutine):\n\n return asyncio.get_event_loop().run_until_complete(coroutine)", "async def test_coro_from_async(dut):\n v = await produce.coro(Value(1))\n assert v == 1\n\n try:\n await produce.coro(Error(SomeException))\n except SomeException:\n pass\n else:\n assert False", "def test_parsing_args(event_loop) -> None:\n called = False\n\n async def mock_func(hass, provider, args2):\n \"\"\"Mock function to be called.\"\"\"\n nonlocal called\n called = True\n assert provider.hass.config.config_dir == \"/somewhere/config\"\n assert args2 is args\n\n args = Mock(config=\"/somewhere/config\", func=mock_func)\n\n with patch(\"argparse.ArgumentParser.parse_args\", return_value=args):\n script_auth.run(None)\n\n assert called, \"Mock function did not get called\"", "def via_usim(test_case: Callable[..., Coroutine]):\n\n @wraps(test_case)\n def run_test(*args, **kwargs):\n test_completed = False\n\n async def complete_test_case():\n nonlocal test_completed\n await test_case(*args, **kwargs)\n test_completed = True\n\n run(complete_test_case())\n if not test_completed:\n raise UnfinishedTest(test_case)\n\n return run_test", "def mock_tcp_connection() -> Generator[CoroutineMock, Any, None]:\n with patch('aioswitcher.api.open_connection') as conn:\n reader = Mock(StreamReader)\n writer = Mock(StreamWriter)\n conn.return_value = (reader, writer)\n yield reader", "async def coro_proxy():\n try:\n result = await coro\n except (CancelledError, Exception) as e:\n if not fut.cancelled():\n fut.set_exception(e)\n else:\n if not fut.cancelled():\n fut.set_result(result)", "async def test_rpc_ids(bus: lightbus.BusNode, dummy_api, mocker):\n\n async def co_call_rpc():\n await asyncio.sleep(0.1)\n return await bus.my.dummy.my_proc.call_async(field='foo')\n\n async def co_consume_rpcs():\n return await bus.bus_client.consume_rpcs(apis=[dummy_api])\n\n mocker.spy(bus.bus_client, 'send_result')\n\n (call_task, ), (consume_task, ) = await asyncio.wait([co_call_rpc(), co_consume_rpcs()], return_when=asyncio.FIRST_COMPLETED)\n _, kw = bus.bus_client.send_result.call_args\n rpc_message = kw['rpc_message']\n result_message = kw['result_message']\n consume_task.cancel()\n\n assert rpc_message.rpc_id\n assert result_message.rpc_id\n assert rpc_message.rpc_id == result_message.rpc_id", "async def test_trigger_await_gives_self(dut):\n t = Timer(1)\n t2 = await t\n assert t2 is t", "async def test_rpc_ids(bus: lightbus.BusNode, dummy_api, mocker):\n\n async def co_call_rpc():\n asyncio.sleep(0.1)\n return await bus.my.dummy.my_proc.call_async(field='foo')\n\n async def co_consume_rpcs():\n return await bus.bus_client.consume_rpcs(apis=[dummy_api])\n\n mocker.spy(bus.bus_client, 'send_result')\n\n (call_task, ), (consume_task, ) = await asyncio.wait([co_call_rpc(), co_consume_rpcs()], return_when=asyncio.FIRST_COMPLETED)\n _, kw = bus.bus_client.send_result.call_args\n rpc_message = kw['rpc_message']\n result_message = kw['result_message']\n consume_task.cancel()\n\n assert rpc_message.rpc_id\n assert result_message.rpc_id\n assert rpc_message.rpc_id == result_message.rpc_id", "def testSerialExecution(self):\n r = []\n a = lambda: r.append((time.sleep(5), \"a\"))\n b = lambda: r.append((None, \"b\"))\n async_fn = utils.make_async()(lambda f: f())\n async_fn(a)\n async_fn(b).result()\n self.assertListEqual(r, [(None, \"a\"), (None, \"b\")])", "def async_generator(func):\n @functools.wraps(func)\n def function(*args, **kwargs):\n \"Wrapped function\"\n return GeneratorFuture(func(*args, **kwargs))\n return function", "def wrapper(*args, **kwargs):\n loop = asyncio.get_event_loop()\n return loop.run_until_complete(method(*args, **kwargs))", "def await_prepared_test(test_fn):\n\n @functools.wraps(test_fn)\n def run(test_class_instance, *args, **kwargs):\n trim_kwargs_from_test_function(test_fn, kwargs)\n loop = asyncio.get_event_loop()\n return loop.run_until_complete(test_fn(test_class_instance, **kwargs))\n\n return run", "def await_prepared_test(test_fn):\n\n @functools.wraps(test_fn)\n def run(test_class_instance, *args, **kwargs):\n trim_kwargs_from_test_function(test_fn, kwargs)\n loop = asyncio.get_event_loop()\n return loop.run_until_complete(test_fn(test_class_instance, **kwargs))\n\n return run", "def asyncio_coro(coro_worker: Worker) -> Coroutine: \n async def wrapper(*args, **kwargs):\n return await await_coro_prim(coro_worker, *args, **kwargs)\n \n coro_worker_sign: Signature = signature(coro_worker)\n wrapper.__signature__ = coro_worker_sign.replace(parameters=tuple(coro_worker_sign.parameters.values())[1:], return_annotation=coro_worker_sign.return_annotation)\n return wrapper", "async def no_sleep_coro():\n pass", "def create_mock_client(self, fake_request_method):\n class FakeHttpLib2(object):\n pass\n\n FakeHttpLib2.request = fake_request_method\n mock_client = self.mox.CreateMock(DNSaasClient)\n mock_client.http_pool = pools.Pool()\n mock_client.http_pool.create = FakeHttpLib2\n mock_client.auth_token = 'token'\n return mock_client", "def test_wrapped_function(self):\n c = EventLoop(lambda: None, lambda f, g: None)\n\n def func():\n pass\n\n wrapper = c.run_in_reactor(func)\n self.assertIdentical(wrapper.__wrapped__, func)", "def RunCoroutineOrFunction(function, args=[]):\r\n if inspect.isgeneratorfunction(function):\r\n coroutine = function(*args)\r\n response = yield coroutine.next()\r\n while True:\r\n response = yield coroutine.send(response)\r\n else:\r\n function(*args)", "def test_make_request(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.get.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.get.assert_called_with(url=SAMPLE_URL)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)", "def test_make_request_method(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL, \r\n mut.METHOD_KEY: SAMPLE_METHOD})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.post.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.post.assert_called_with(url=SAMPLE_URL)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)", "def stubFunc( *args, **keywords ):\n maya.cmds.dynamicLoad( library )\n # call the real function which has replaced us\n return maya.cmds.__dict__[command]( *args, **keywords )", "def taskwrap(fn):\n coroutine = asyncio.coroutine(fn)\n\n @functools.wraps(fn)\n def create_task(*args, **kwargs):\n logger.debug('Create task %s', fn.__name__)\n loop = asyncio.get_event_loop()\n task = asyncio.async(coroutine(*args, **kwargs))\n task.add_done_callback(task_died)\n return task\n return create_task", "def requestsmock():\n with requests_mock.mock() as m:\n yield m", "def spawn_greenlet(func, *args, **kwargs):\n\n g = greenlet.greenlet(func)\n result = g.switch(*args, **kwargs)\n while True:\n if isinstance(result, asyncio.Future):\n result = yield from result\n else:\n break\n return result", "def ensure(\r\n self,\r\n coroutine,\r\n args = [],\r\n kwargs = {},\r\n thread = None,\r\n future = None,\r\n immediately = True\r\n ):\r\n\r\n # tries to determine if the provided callable is really\r\n # a coroutine and uses that condition to determine the\r\n # default value for the thread argument, notice that the\r\n # verification is also performed for the coroutine object\r\n is_coroutine = asynchronous.is_coroutine(coroutine)\r\n is_coroutine_object = asynchronous.is_coroutine_object(coroutine)\r\n is_defined = is_coroutine or is_coroutine_object\r\n if thread == None: thread = False if is_defined else True\r\n\r\n # verifies if a future variable is meant to be re-used\r\n # or if instead a new one should be created for the new\r\n # ensure execution operation\r\n future = future or self.build_future()\r\n\r\n # in case the provided coroutine callable is not really\r\n # a coroutine and instead a \"normal\" function a conversion\r\n # is required so that there's compatibility between the\r\n # coroutine model and the typical sync model\r\n if not is_defined:\r\n # saves the \"original\" callable so that it may be latter\r\n # used as part of the back calling process\r\n coroutine_c = coroutine\r\n\r\n # creates the coroutine that is going to be used to\r\n # encapsulate the callable, note that the result of the\r\n # callable is set as the result of the future (as expected)\r\n def coroutine(future, *args, **kwargs):\r\n yield\r\n result = coroutine_c(*args, **kwargs)\r\n future.set_result(result)\r\n\r\n # creates the function that is going to \"propagate\" the cancel\r\n # operation from the \"parent\" future to the child one, this\r\n # should also close the associated generator\r\n def cleanup(future):\r\n if not future.cancelled(): return\r\n if not hasattr(future, \"child\"): return\r\n if not future.child: return\r\n future.child.cancel()\r\n\r\n # adds the cleanup function as a done callback so that whenever\r\n # the future is canceled a child future is also canceled, this\r\n # propagation of operations allows for proper cleanup\r\n future.add_done_callback(cleanup)\r\n\r\n # verifies if the currently provided coroutine is in fact (already)\r\n # a coroutine object, if that's the case the sequence (generator)\r\n # is already present and the coroutine is simply assigned to the\r\n # sequence without any kind of conversion\r\n if is_coroutine_object:\r\n sequence = coroutine\r\n\r\n # otherwise the sequence must be created by calling the coroutine\r\n # (function) with the proper set of arguments, notice that the signature\r\n # is inspected to determine if a future argument is required\r\n else:\r\n # retrieves the argument spec of the provided coroutine to check\r\n # if the provided coroutine requires a future to be passed\r\n spec = legacy.getargspec(coroutine)\r\n is_future = spec[0] and spec[0][0] == \"future\"\r\n\r\n # creates the generate sequence from the coroutine callable\r\n # by calling it with the newly created future instance, that\r\n # will be used for the control of the execution, notice that\r\n # the future is only passed in case the coroutine has been\r\n # determined to be receiving the future as first argument\r\n if is_future: sequence = coroutine(future, *args, **kwargs)\r\n else: sequence = coroutine(*args, **kwargs)\r\n\r\n # calls the ensure generator method so that the provided sequence\r\n # gets properly \"normalized\" into the expected generator structure\r\n # in case the normalization is not possible a proper exception is\r\n # raised indicating the \"critical\" problem\r\n is_generator, sequence = asynchronous.ensure_generator(sequence)\r\n if not is_generator: raise errors.AssertionError(\"Expected generator\")\r\n\r\n # creates the callable that is going to be used to call\r\n # the coroutine with the proper future variable as argument\r\n # note that in case the thread mode execution is enabled the\r\n # callable is going to be executed on a different thread\r\n if thread: callable = lambda f = future: self.texecute(step, [f])\r\n else: callable = lambda f = future: step(f)\r\n\r\n # creates the function that will be used to step through the\r\n # various elements in the sequence created from the calling of\r\n # the coroutine, the values returned from it may be either future\r\n # or concrete values, for each situation a proper operation must\r\n # be applied to complete the final task in the proper way\r\n def step(_future):\r\n # unsets any possible reference to a child element as it must\r\n # have been processed if the control flow reached this point,\r\n # this avoids duplicated approval of child futures\r\n future.child = None\r\n\r\n # iterates continuously over the generator that may emit both\r\n # plain object values or future (delayed executions)\r\n while True:\r\n # in case the future object is considered to be closed,\r\n # (done using a pipeline of callbacks) no more steps are\r\n # going to be taken and the sequence should be closed as\r\n # it's not longer going to be used (for sure), this means\r\n # that the blocked coroutine is not going to be resumed\r\n if future.closed:\r\n sequence.close()\r\n future.cancel()\r\n break\r\n\r\n # determines if the future is ready to receive new work\r\n # this is done using a pipeline of callbacks that must\r\n # deliver a positive value so that the future is considered\r\n # ready, note that in case the future is not ready the current\r\n # iteration cycle is delayed until the next tick\r\n if not future.ready:\r\n self.delay(callable)\r\n break\r\n\r\n # in case the finished future has been canceled propagates\r\n # such cancellation to the parent future\r\n if _future.cancelled():\r\n future.cancel()\r\n break\r\n\r\n # in case there's an exception in the future that has just\r\n # been executed propagates such exception to the parent future\r\n if _future.exception():\r\n future.set_exception(_future.exception())\r\n break\r\n\r\n # retrieves the next value from the generator and in case\r\n # value is the last one (stop iteration) verifies if the\r\n # is still considered running (no value or exception) set and\r\n # if that's the case runs the default value set (approve)\r\n # and then breaks the loop, notice that if there's an\r\n # exception raised in the middle of the generator iteration\r\n # it's set on the future (indirect notification)\r\n try: value = next(sequence)\r\n except StopIteration as exception:\r\n result = exception.args[0] if exception.args else None\r\n if future.running(): future.set_result(result)\r\n break\r\n except BaseException as exception:\r\n future.set_exception(exception)\r\n break\r\n\r\n # determines if the value retrieved from the generator is a\r\n # future and if that's the case schedules a proper execution\r\n is_future = asynchronous.is_future(value)\r\n\r\n # in case the current value is a future schedules it for execution\r\n # taking into account the proper thread execution model, note that\r\n # the future is set as a child of the current \"parent\" future\r\n if is_future:\r\n future.child = value\r\n value.add_done_callback(callable)\r\n break\r\n\r\n # otherwise it's a normal value being yielded and should be sent\r\n # to the future object as a partial value (pipelining)\r\n else:\r\n # for a situation where a thread pool should be used the new\r\n # value should be \"consumed\" by adding the data handler operation\r\n # to the list of delayed operations and notifying the task pool\r\n # so that the event loop on the main thread gets unblocked and\r\n # the proper partial value handling is performed (always on main thread)\r\n if thread:\r\n def handler():\r\n future.partial(value)\r\n callable()\r\n\r\n self.delay_s(handler)\r\n break\r\n\r\n # otherwise we're already on the main thread so a simple partial callback\r\n # notification should be enough for the proper consuming of the data\r\n else:\r\n future.partial(value)\r\n\r\n # delays the execution of the callable so that it is executed\r\n # immediately if possible (event on the same iteration)\r\n self.delay(callable, immediately = immediately)\r\n return future", "def simulation_method(simulate_method):\n\n def decorated_simulate_method(self):\n self._start_simulation()\n result = simulate_method(self)\n self._end_simulation()\n self.data = result\n return result\n\n return decorated_simulate_method", "def test_GeneratorBuilt(self):\n generator = Mock()\n genFn = Mock(return_value=generator)\n args = range(3)\n kwargs = {'one': 1, 'two': 2, 'three': 3}\n \n wrapper = KaoGenerator(genFn, *args, **kwargs)\n genFn.assert_called_once_with(*args, **kwargs)\n self.assertEqual(wrapper.generator, generator)", "def test_starts_callback_returned_async(self):\n from furious.async import Async\n from furious.context._execution import _ExecutionContext\n from furious.processors import run_job\n\n returned_async = Mock(spec=Async)\n\n work = Async(target=_fake_async_returning_target,\n args=[returned_async],\n callbacks={'success': _fake_result_returning_callback})\n\n with _ExecutionContext(work):\n run_job()\n\n returned_async.start.assert_called_once_with()", "def test_telnetrmq_after_execute(self, mocked):\n agentconf={}\n telnetconf={\"host\":\"telnet.lan\"}\n rmqconf={\"host\":\"rmq.lan\"}\n agent=TelnetRmqAgent(agentconf, telnetconf, rmqconf)\n\n #Setup generic mock for others methods wich are not tested here\n ignoredmocks=Mock()\n agent.telnetclient=ignoredmocks\n agent.rmqclient=ignoredmocks\n \n\n instance = mocked.return_value \n agent.after_execute()\n mocked.assert_called_with(agent)\n mocked.assert_called_with(agent)", "def asyncinit(cls):\r\n __new__ = cls.__new__\r\n\r\n async def init(obj, *arg, **kwarg):\r\n await obj.__init__(*arg, **kwarg)\r\n return obj\r\n\r\n def new(cls, *arg, **kwarg):\r\n obj = __new__(cls, *arg, **kwarg)\r\n coro = init(obj, *arg, **kwarg)\r\n return coro\r\n\r\n cls.__new__ = new\r\n return cls", "def test_handle(self):\n with pytest.raises(NotImplementedError):\n self.handler.handle(MagicMock())", "def test__call__(self):\n mock = Mock()\n factory = Factory(mock)\n factory()\n mock.assert_called_once_with()", "def mock_apiclient() -> Generator[ApiClient, None, None]:\n with patch(\n \"homeassistant.components.kostal_plenticore.helper.ApiClient\",\n autospec=True,\n ) as mock_api_class:\n apiclient = MagicMock(spec=ApiClient)\n apiclient.__aenter__.return_value = apiclient\n apiclient.__aexit__ = AsyncMock()\n mock_api_class.return_value = apiclient\n yield apiclient", "def _stub_generator(self, nargs, body_func, kwargs=None):\n def stub(tyctx):\n # body is supplied when the function is magic'd into life via glbls\n return body(tyctx) # noqa: F821\n if kwargs is None:\n kwargs = {}\n # create new code parts\n stub_code = stub.__code__\n co_args = [stub_code.co_argcount + nargs + len(kwargs)]\n\n new_varnames = [*stub_code.co_varnames]\n new_varnames.extend([f'tmp{x}' for x in range(nargs)])\n new_varnames.extend([x for x, _ in kwargs.items()])\n from numba.core import utils\n if utils.PYVERSION >= (3, 8):\n co_args.append(stub_code.co_posonlyargcount)\n co_args.append(stub_code.co_kwonlyargcount)\n co_args.extend([stub_code.co_nlocals + nargs + len(kwargs),\n stub_code.co_stacksize,\n stub_code.co_flags,\n stub_code.co_code,\n stub_code.co_consts,\n stub_code.co_names,\n tuple(new_varnames),\n stub_code.co_filename,\n stub_code.co_name,\n stub_code.co_firstlineno,\n stub_code.co_lnotab,\n stub_code.co_freevars,\n stub_code.co_cellvars\n ])\n\n new_code = pytypes.CodeType(*co_args)\n\n # get function\n new_func = pytypes.FunctionType(new_code, {'body': body_func})\n return new_func", "def _mock_function(self, obj, func):\n setattr(obj, func.__name__, MethodType(func, self.breaker))", "def _mock_function(self, obj, func):\n setattr(obj, func.__name__, MethodType(func, self.breaker))", "def future_func(func):\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n return make_future(func(*args, **kwargs))\n return func_wrapper", "def fake_open_library(*args):\n\tresult = mock.Mock()\n\tresult.__exit__ = mock.Mock(return_value=False)\n\tresult.__enter__ = mock.Mock(return_value=StringIO.StringIO(\"good\\t1\\nbad\\t-1\"))\n\treturn result", "def test_telnetrmq_before_execute(self, mocked):\n agentconf={}\n telnetconf={\"host\":\"telnet.lan\"}\n rmqconf={\"host\":\"rmq.lan\"}\n agent=TelnetRmqAgent(agentconf, telnetconf, rmqconf)\n\n #Setup generic mock for others methos wich are not tested here\n ignoredmocks=Mock()\n agent.telnetclient=ignoredmocks\n agent.rmqclient=ignoredmocks\n \n\n instance = mocked.return_value \n agent.before_execute()\n mocked.assert_called_with(agent)", "async def test_write_acn_error():\n f = asyncio.Future()\n f.set_result(None)\n pipe = Mock()\n pipe.write.return_value = f\n node_client = NodeClient(pipe, Mock())\n await node_client.write_acn_status_error(\"some message\")\n pipe.write.assert_called_once()", "async def test_wrap_async(self):\n result = 987\n wrapped = async_util.wrap_async(result)\n await wrapped\n assert isinstance(wrapped, asyncio.Future)\n assert wrapped.result() == result", "def get_mock_response(status_code, content):\n test_mock = mock.Mock()\n test_mock.status_code = status_code\n test_mock.content = content\n\n def mock_response(api_url, headers, timeout, proxies):\n return test_mock\n\n return mock_response", "def get_mock_response(status_code, content):\n test_mock = mock.Mock()\n test_mock.status_code = status_code\n test_mock.content = content\n\n def mock_response(api_url, headers, timeout, proxies):\n return test_mock\n\n return mock_response", "def provider(reuse=False):\n def decorator(fun):\n jobname = fun.__name__\n\n def wrap(*args, **kwargs):\n job, ex_manager = fun(*args, **kwargs)\n if inspect.isasyncgenfunction(job):\n if DD.HELPERS ^ DD.DEEP:\n print(f'## {jobname} is coroutine')\n return jobname, asynccontextmanager(job), ex_manager, reuse, True\n else:\n if DD.HELPERS ^ DD.DEEP:\n print(f'## {jobname} is not coroutine')\n return jobname, contextmanager(job), ex_manager, reuse, False\n return wrap\n return decorator", "async def test_async_set_preset_mode(\n preset_mode, sleep, auto, manual, plasmawave_off, plasmawave_on\n):\n\n wrapper = build_mock_wrapper()\n\n wrapper.async_ensure_on = AsyncMock()\n wrapper.async_sleep = AsyncMock()\n wrapper.async_auto = AsyncMock()\n wrapper.async_manual = AsyncMock()\n wrapper.async_plasmawave_off = AsyncMock()\n wrapper.async_plasmawave_on = AsyncMock()\n\n await wrapper.async_set_preset_mode(preset_mode)\n assert wrapper.async_ensure_on.call_count == 1\n\n assert wrapper.async_sleep.call_count == sleep\n assert wrapper.async_auto.call_count == auto\n assert wrapper.async_manual.call_count == manual\n assert wrapper.async_plasmawave_off.call_count == plasmawave_off\n assert wrapper.async_plasmawave_on.call_count == plasmawave_on", "def testNonBlocking(self):\n r = []\n async_fn = utils.make_async()(lambda: r.append((time.sleep(5), \"a\")))\n r.append((None, \"b\"))\n async_fn().result()\n self.assertListEqual(r, [(None, \"b\"), (None, \"a\")])", "def _run(coro):\r\n return asyncio.get_event_loop().run_until_complete(coro)", "def _run(coro):\n return asyncio.get_event_loop().run_until_complete(coro)" ]
[ "0.68554175", "0.6837", "0.6691568", "0.6626183", "0.6579949", "0.65167445", "0.6472262", "0.64584243", "0.6420148", "0.6401542", "0.637773", "0.6373077", "0.6311976", "0.62048763", "0.61998427", "0.61998427", "0.61998427", "0.6194855", "0.6184243", "0.6151873", "0.6118671", "0.60522836", "0.6051182", "0.59993905", "0.599108", "0.59469205", "0.5922477", "0.59153014", "0.5896827", "0.5867416", "0.58611685", "0.58278537", "0.57778883", "0.5770288", "0.5765783", "0.57593346", "0.57584256", "0.5747242", "0.5744216", "0.5741433", "0.5737474", "0.5736264", "0.572822", "0.5727358", "0.57160515", "0.5692073", "0.5690354", "0.56870115", "0.5666608", "0.5661423", "0.5656475", "0.56545997", "0.56530976", "0.56460625", "0.56396943", "0.5613264", "0.5603611", "0.5601319", "0.55906177", "0.5586191", "0.55738723", "0.55716866", "0.55694675", "0.5565595", "0.5565595", "0.55611944", "0.556047", "0.55560434", "0.5547612", "0.5542959", "0.5529648", "0.55134636", "0.5513076", "0.550564", "0.5475176", "0.5473507", "0.5467206", "0.54619455", "0.5461769", "0.54376686", "0.5431081", "0.5426516", "0.5418015", "0.53921556", "0.5384204", "0.5379536", "0.5374201", "0.5374201", "0.5361154", "0.5354309", "0.53427213", "0.53316927", "0.5329832", "0.5312539", "0.5312539", "0.5308687", "0.53064036", "0.5302791", "0.5302147", "0.5299261" ]
0.63443387
12
Places the robot on position .
def set_base_xpos(self, pos): node = self.worldbody.find("./body[@name='base']") node.set("pos", array_to_string(pos - self.bottom_offset))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move():\n Robot.move()", "def locateRobot(self):\n logging.info(\"Display Carte : {}\".format(self.name))\n for r, row in enumerate(self.map):\n #print(row)\n for c, cell in enumerate(row):\n if (cell == \"X\"):\n logging.info(\"r={} / c={}\".format(r, c))\n self.robot.posX = c\n self.robot.posY = r", "def setRobotPosition(self, position):\n posx = position.getX()\n posy = position.getY()\n self.position = Position(posx, posy)\n #raise NotImplementedError", "def setRobotPosition(self, position):\n self.position = position", "def setRobotPosition(self, position):\n self.position = position", "def set_robot_pos(self):\n\t\tx,y,z = self.geo2desiredENU(self.curr_lat, self.curr_lon, self.gpsAlt)\n\t\tself.robot_msg.point.x = x\n\t\tself.robot_msg.point.y = y\n\t\tself.robot_msg.point.z = z", "def place(placement: str):\n Robot.place(placement)", "def setRobotPosition(self, position):\n self.position = position\n #raise NotImplementedError", "def move(self):\n \n self.position = self.wander()", "def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY", "def _move(self, pos):\n self.put_par(\"drive\", pos)", "def move_robot(self, pose):\n # type: (Pose) -> None\n start_pos = ModelState()\n start_pos.model_name = 'turtlebot3'\n start_pos.pose = pose\n rospy.wait_for_service('/gazebo/set_model_state')\n try:\n set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)\n resp = set_state(start_pos)\n\n except rospy.ServiceException:\n print(\"Move Robot to position failed\")\n\n pub = rospy.Publisher('/initialpose', PoseWithCovarianceStamped, queue_size = 10)\n rospy.sleep(3)\n start_pos = PoseWithCovarianceStamped()\n start_pos.header.frame_id = 'map'\n start_pos.pose.pose = pose \n pub.publish(start_pos)", "def replace_robot(self, x, y, z):\n # min z = 0.1\n arr = [x, y, z]\n vrep.simxSetObjectPosition(self.client_id, self.handles['youBot' + self.postfix], -1, arr, ONE_SHOT_MODE)", "def my_go_to_pose1(robot, x, y, angle_z):\n # Assuming positive y is to the robot's left and positive x is the direction the robot is already facing\n hypotenuse = numpy.sqrt(x*x + y*y)\n angle_offset_of_target_point = numpy.arcsin(y/hypotenuse)*180/math.pi\n my_turn_in_place(robot, angle_offset_of_target_point , 30)\n my_drive_straight(robot, hypotenuse, 50)\n my_turn_in_place(robot, angle_z-angle_offset_of_target_point, 30)\n time.sleep(1)", "def movement(self):", "def prep_robot_location(self):\n x = int(self.robot.odo_x)\n y = int(self.robot.odo_y)\n o = round(self.robot.odo_o, 2)\n location_str = f\"Location (X,Y,O): {str(x)}, {str(y)}, {str(o)}\"\n # Prepare the image and positions it on the screen\n self.location_image = self.font.render(location_str, True, self.text_color, self.bg_color)\n self.location_rect = self.location_image.get_rect()\n self.location_rect.left = self.action_rect.left\n self.location_rect.top = self.action_rect.bottom + self.line_gap", "def move(self):\n if self.ycor() > 280: self.y_dir = -1 # Set vertical movement to down if ball at top of screen\n if self.xcor() > 380: self.x_dir = -1 # Set horizontal movement to left if ball at right of screen\n if self.xcor() < -380: self.x_dir = 1 # Set horizontal movement to right if ball at left of screen\n new_x = self.xcor() + self.x_dir * 2 # Define 2 spaces forward in set horizontal dir of travel\n new_y = self.ycor() + self.y_dir * 2 # Define 2 spaces forward in set vertical dir of travel\n self.goto(new_x, new_y) # Move ball to newly defined position", "def move_to(self, x_pos, y_pos, z_pos):\n def ik_angles(X_Pos,Y_Pos,Z_Pos,Roll,Pitch,Yaw):\n \"\"\"\n Compute the joint angles needed to place the robot arm in a given pose.\n \"\"\"\n limb_side = 'left'\n ns = \"ExternalTools/\" + limb_side + \"/PositionKinematicsNode/IKService\"\n iksvc = rospy.ServiceProxy(ns, SolvePositionIK)\n ikreq = SolvePositionIKRequest()\n hdr = Header(stamp=rospy.Time.now(), frame_id='base')\n quat = tf.transformations.quaternion_from_euler(float(Roll),float(Pitch),float(Yaw))\n poses = {\n 'left': PoseStamped(\n header=hdr,\n pose=Pose(\n position=Point(\n\t\t x=float(X_Pos),\n y=float(Y_Pos),\n z=float(Z_Pos),\n ),\n orientation=Quaternion(\n\t\t x = quat[0],\n\t\t y = quat[1],\n\t\t z = quat[2],\n\t\t w = quat[3],\n\t\t )\n )\n )\n }\n\n ikreq.pose_stamp.append(poses[limb_side])\n try:\n rospy.wait_for_service(ns, 5.0)\n resp = iksvc(ikreq)\n except (rospy.ServiceException, rospy.ROSException), e:\n rospy.logerr(\"Service call failed: %s\" % (e,))\n return 1\n\n # Check if result valid, and type of seed ultimately used to get solution\n # convert rospy's string representation of uint8[]'s to int's\n resp_seeds = struct.unpack('<%dB' % len(resp.result_type),\n resp.result_type)\n if (resp_seeds[0] != resp.RESULT_INVALID):\n # Format solution into Limb API-compatible dictionary\n limb_joints = dict(zip(resp.joints[0].name, resp.joints[0].position))\n return limb_joints \n\n else:\n print(\"INVALID POSE - No Valid Joint Solution Found.\")\n\n return 0\n \n roll = 0\n pitch = 3.14\n yaw = 0 #controls roll of gripper\n\n #compute required joint angles\n angles = ik_angles(x_pos,y_pos,z_pos,roll,pitch,yaw)\n\n #move left limb to position\n limb = baxter_interface.Limb('left')\n limb.move_to_joint_positions(angles)\n \n #update current position\n self.x = x_pos\n self.y = y_pos\n self.z = z_pos\n \n return [x_pos, y_pos]", "def set_new_location(self, xPos, yPos):", "def update_position(self, robot_pose):\n x = self.pose[0] + robot_pose[0]\n y = self.pose[1] + robot_pose[1]\n phi = (self.pose[2] + robot_pose[2]) % 360\n self.pose = (x, y, phi)", "def move(self):\n \n self.position = self.explore()", "def set_robot(self, x, y):\n state = ModelState()\n state.model_name = 'turtlebot3_waffle_pi'\n state.reference_frame = 'world'\n # pose\n state.pose.position.x = x\n state.pose.position.y = y\n state.pose.position.z = 0\n quaternion = tf.transformations.quaternion_from_euler(0, 0, 0)\n state.pose.orientation.x = quaternion[0]\n state.pose.orientation.y = quaternion[1]\n state.pose.orientation.z = quaternion[2]\n state.pose.orientation.w = quaternion[3]\n # twist\n state.twist.linear.x = 0\n state.twist.linear.y = 0\n state.twist.linear.z = 0\n state.twist.angular.x = 0\n state.twist.angular.y = 0\n state.twist.angular.z = 0\n\n rospy.wait_for_service('/gazebo/set_model_state')\n try:\n set_state = self.set_state\n result = set_state(state)\n assert result.success is True\n except rospy.ServiceException:\n print(\"/gazebo/get_model_state service call failed\")", "def move(self):\r\n min_x = self.__screen.SCREEN_MIN_X\r\n min_y = self.__screen.SCREEN_MIN_Y\r\n delta_x = self.__screen.SCREEN_MAX_X - min_x\r\n delta_y = self.__screen.SCREEN_MAX_Y - min_y\r\n\r\n # new location formula according to pdf.\r\n new_x = (self.__x_speed + self.__x - min_x) % delta_x + min_x\r\n new_y = (self.__y_speed + self.__y - min_y) % delta_y + min_y\r\n self.__x, self.__y = new_x, new_y", "def move(self):\n possible_steps = self.model.grid.get_neighborhood(\n self.pos,\n moore=False, # implements Von Neumann neighborhood\n include_center=False)\n new_position = self.random.choice(possible_steps)\n self.heading = [new_position[0] - self.pos[0],\n new_position[1] - self.pos[1]]\n self.model.grid.move_agent(self, new_position)", "def move(self, p):\r\n self.position.setvalue(p)", "def move_to_position2(self):", "def RobotInit():\n names = [\"Body\"]\n angles = [-0.038392066955566406, 0.1349501609802246, 1.1964781284332275, 0.07512402534484863, -1.4926238059997559, -1.3391400575637817, 0.11500811576843262, 0.029999971389770508, -0.25766992568969727, -0.09506607055664062, -0.9694461822509766, 2.086198091506958, -1.168950080871582, 0.07367396354675293, -0.25766992568969727, 0.10128593444824219, -0.9342479705810547, 2.0663399696350098, -1.186300277709961, -0.07205605506896973, -0.309826135635376, 0.24233007431030273, 0.06131792068481445, 0.8544800281524658, 1.5983860492706299, 0.17799997329711914]\n fractionMaxSpeed = 0.1\n time.sleep(1)\n motion.setAngles(names, angles, fractionMaxSpeed)", "def move_to_position1(self):", "def move(self):\n \n # checks for bots nearby\n next_move = self.follow()\n \n # finds a random move if no bot\n if next_move is self.position:\n self.position = self.wander()\n else:\n self.position = next_move", "def move_to_start(self):\n self.pos = (SCREEN_WIDTH / 2, SCREEN_HEIGHT - 64)", "def main():\n\n robot2 = rb.DriveSystem()\n robot2.spin_in_place_degrees(50)", "def position(x, y):\n command([x + 0x80, y + 0x40])", "def __init__(self,name,speed,depth_of_view,view_angle,x_coor = \"\",y_coor = \"\"):\n self.name = name\n self.speed = speed # That will the instantenous speed of the robot\n self.depth_of_view = depth_of_view # That will the instantenous depth of view of the robot\n self.view_angle = view_angle # That will the instantenous view angle of the robot\n self.type = \"Robot\" #Specift the object type\n self.x = x_coor # store the position of the robot\n self.y = y_coor # store the position of the robot\n self.kind = name #Store its kind to give the GUI", "def start(self):\n global trackWidth\n trackWidth = self.getTrackWidth()\n print(\"track width = \" + str(trackWidth))\n #motors.moveForward(0,2)\n initTheta = self.getTheta(trackWidth)\n motors.pivot(\"left\", 30, 0.25) #spin left for 1 second\n print(\"Moved\")\n newTheta = self.getTheta(trackWidth)\n #Checks if the robot is pointed even further of course or not, corrects for whichever\n if newTheta < initTheta:\n while self.getTheta(trackWidth) >=rads: #Spins while the robot is pointed more than 0.122 rads from straight\n motors.pivot(\"left\", 30, 0.25) #spin left for 0.25 second\n elif newTheta > initTheta:\n while self.getTheta(trackWidth) >= rads:\n motors.pivot(\"right\", 30, 0.25) #spin right for 0.25 second", "def teleport(self, x, y):\n self.rect.x = x\n self.rect.y = y", "def move(self,x,y):\n self.pos.x = x\n self.pos.y = y", "def place(self,x, y, direction):\r\n self.x = x\r\n self.y = y\r\n self.d = direction", "def position(self, position):\n self.move_to(position)", "def move():\n print(\" ------ Execution -----\\n\")\n pyautogui.moveRel(0, 10)\n pyautogui.moveRel(0, -10)\n pyautogui.click()", "def _move(self, pos):\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos", "def move (self):\n\t\tself.x += self.direction[0]\n\t\tself.y += self.direction[1]\n\t\tself.row = (self.y - 15) / 30\n\t\tself.col = (self.x - 15) / 30\n\t\tself.rowcol = (self.row,self.col)\n\t\tself.draw()", "def start(self, robot):\n rospy.loginfo(\"Moving randomly\" + \" - \" + str(robot.robot_id))", "def setRoboPos(self,x,y):\r\n self.RoboPosX=x\r\n self.RoboPosY=y", "def __init__(self):\n self.positionx = 400\n self.positiony = 600\n # direction goes from [0,360)\n self.direction = (45)", "def move(self):\n # moving each of the obstacles\n time = rospy.get_rostime()\n self.vel_msg1.linear.x = math.sin(time.to_sec())\n self.pub1.publish(self.vel_msg1)\n self.vel_msg2.linear.x = math.cos(1.2 * time.to_sec())\n self.pub2.publish(self.vel_msg2)\n self.vel_msg3.linear.x = math.cos(2 * time.to_sec())\n self.pub3.publish(self.vel_msg3)\n self.vel_msg4.linear.x = math.sin(0.85 * time.to_sec())\n self.pub4.publish(self.vel_msg4)", "def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y", "def runRobot():", "def _setup_move(self, position):\n self.log.debug(\"%s.setpoint = %s\", self.name, position)\n self.setpoint.put(position, wait=True)\n if self.actuate is not None:\n self.log.debug(\"%s.actuate = %s\", self.name, self.actuate_value)\n self.actuate.put(self.actuate_value, wait=False)", "def automove_to(self, x: int, y: int) -> None:\n self.cpu_controlled = True\n self.end_cinematic_x_pos = x\n self.end_cinematic_y_pos = y", "async def update(self, robot):\r\n if self.first:\r\n robot.was_turning = False\r\n robot.was_driving = False\r\n\r\n rotation_rad = math.radians(robot.rotation)\r\n rotation_cos = math.cos(rotation_rad)\r\n rotation_sin = math.sin(rotation_rad)\r\n if robot.was_driving:\r\n speed_delta = robot.delta_time * robot.ROBOT_SPEED\r\n\r\n robot.add_odom_position(robot, (rotation_cos * speed_delta, rotation_sin * speed_delta))\r\n robot.grid.setStart(robot.grid_position)\r\n else:\r\n robot.drive_timer = robot.DRIVE_COOLDOWN\r\n if robot.was_turning:\r\n robot.add_odom_rotation(robot, robot.TURN_YAW * robot.delta_time)\r\n\r\n changed = False\r\n if robot.ball is not None:\r\n if robot.prev_ball is not None:\r\n robot.ball_grid = robot.grid.worldToGridCoords(robot.ball)\r\n robot.ball_prev_grid = robot.grid.worldToGridCoords(robot.prev_ball)\r\n changed = robot.ball_grid != robot.ball_prev_grid\r\n else:\r\n changed = True\r\n \r\n if not changed and robot.prev_grid_position != robot.grid_position:\r\n changed = True\r\n\r\n if self.first:\r\n changed = True\r\n self.first = False\r\n\r\n rounded_grid = (round(robot.grid_position[0]), round(robot.grid_position[1]))\r\n if changed:\r\n robot.grid.clearObstacles()\r\n if robot.ball is not None:\r\n grid_points = getGridPoints(robot.ball_grid[0], robot.ball_grid[1], robot)\r\n for point in grid_points:\r\n if robot.grid.coordInBounds(point):\r\n robot.grid.addObstacle(point)\r\n\r\n # Wall obstacles.\r\n for i in range(0, robot.grid.width):\r\n robot.grid.addObstacle((i, 0))\r\n robot.grid.addObstacle((i, robot.grid.height - 1))\r\n for i in range(1, robot.grid.height - 1):\r\n robot.grid.addObstacle((0, i))\r\n robot.grid.addObstacle((robot.grid.width - 1, i))\r\n\r\n goal_to_ball = np.subtract(robot.ball, robot.goal_position)\r\n goal_distance = np.linalg.norm(goal_to_ball)\r\n if goal_distance == 0:\r\n return\r\n goal_direction = np.divide(goal_to_ball, goal_distance)\r\n goal_direction = np.multiply(goal_direction, (robot.RADIUS + robot.BALL_RADIUS) * 1.2)\r\n robot.target_position = np.add(robot.ball, goal_direction)\r\n robot.target_position = robot.grid.worldToGridCoords(robot.target_position)\r\n\r\n if robot.target_position is not None:\r\n robot.grid.clearGoals()\r\n robot.grid.setStart(rounded_grid)\r\n rounded_target = (round(robot.target_position[0]), round(robot.target_position[1]))\r\n robot.grid.addGoal(rounded_target)\r\n astar(robot.grid, heuristic)\r\n\r\n path = robot.grid.getPath()\r\n robot.was_turning = False\r\n if path is not None and len(path) > 1:\r\n robot.next_cell = path[0]\r\n if path[0] == rounded_grid:\r\n robot.next_cell = path[1]\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, rounded_grid, robot.next_cell)\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n robot.stop_all_motors()\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n robot.was_driving = False\r\n else:\r\n await robot.drive_wheels(robot.ROBOT_SPEED, robot.ROBOT_SPEED, robot.ROBOT_ACCELERATION, robot.ROBOT_ACCELERATION)\r\n robot.was_driving = True\r\n else:\r\n robot.was_driving = False\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, robot.grid_position, robot.target_position)\r\n robot.stop_all_motors()\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n\r\n robot.stop_all_motors()\r\n distance = grid_distance(robot.grid_position[0], robot.grid_position[1], robot.target_position[0], robot.target_position[1]) * robot.grid.scale\r\n await robot.drive_straight(distance_mm(distance), speed_mmps(robot.HIT_SPEED), should_play_anim = False).wait_for_completed()\r\n robot.add_odom_forward(robot, distance)\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, robot.grid_position, robot.ball_grid)\r\n robot.stop_all_motors()\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n return goto_ball.HitBall()", "def move_robot(command):\n global robot_type\n hexapod_motions = {'up' : ['9', 'wd', 'w'],\n 'left_up' : ['9', 'w', 'sd'],\n 'left_down' : ['7', 'wa', 'w', 'd'],\n 'right_up' : [''],\n 'right_down' : [''],\n 'down' : ['10', 'sa', '2', 'a', 'w']}\n\n vikingbot1_motions = {'up' : ['1.35', 'a', '1.5', 'w'],\n 'left_up' : ['0.68', 'a', '1.3', 'w', '0.74', 'a', '0.4', 's'],\n 'left_down' : ['0.8', 'd', '1.1', 'w', '0.8', 'd', '0.6', 's'],\n 'right_up' : ['0.7', 'd', '1.1', 'w', '0.7', 'd', '0.2', 's'],\n 'right_down' : ['0.7', 'a', '1.1', 'w', '0.7', 'a', '0.5', 's'], \n 'down' : ['1.35', 'a', '1.35', 'w']}\n vikingbot0_motions = {'up' : ['1.05', 'a', '2.2', 'w'],\n 'left_up' : ['0.6', 'a', '1.5', 'w', '0.6', 'a', '0.9', 's'],\n 'left_down' : ['0.6', 'd', '1.5', 'w', '0.6', 'd', '0.9', 's'],\n 'right_up' : ['0.6', 'd', '1.5', 'w', '0.6', 'd', '0.9', 's'],\n 'right_down' : ['0.6', 'a', '1.5', 'w', '0.6', 'a', '0.9', 's'],\n 'down' : ['1.05', 'a', '2.2', 'w']}\n\n print (\"Robot type is \" + robot_type)\n if robot_type == 'hexapod':\n correct_for_drift()\n send_motion_command(command, hexapod_motions)\n elif robot_type == 'vikingbot0':\n send_motion_command(command, vikingbot0_motions) \n elif robot_type == 'vikingbot1':\n send_motion_command(command, vikingbot1_motions)", "def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100", "def run(self):\n # type: () -> None\n self.move_to(self.location)", "def setPosition(position):", "def robot_start():\n \n global history\n\n global position_x, position_y, current_direction_index\n \n robot_name = get_robot_name()\n output(robot_name, \"Hello kiddo!\")\n\n position_x = 0\n position_y = 0\n current_direction_index = 0\n\n command = get_command(robot_name)\n keep_history(command) \n while handle_command(robot_name, command):\n command = get_command(robot_name)\n keep_history(command)\n output(robot_name, \"Shutting down..\")\n\n history = []", "def AeroMove(self, pos):\r\n\r\n pass", "def move(self, rel_pos):\n self.pos = (self.pos[0] + rel_pos[0] * GRID, self.pos[1] + rel_pos[1] * GRID)", "def movement(self):\n self.rect.left -= self.speedx #to move the asteroid to the left", "def getRobotPosition(self):\n return self.position\n #raise NotImplementedError", "def getRobotPosition(self):\n return self.position\n #raise NotImplementedError", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def place_object(self, thing):\n color = [i * 255 for i in thing.color.rgb]\n size = (20, 20)\n if thing.name == \"luna\":\n size = (5, 5)\n if self.is_visible(thing.position, max(size)):\n position = self.get_position(thing.position, size)\n pygame.draw.ellipse(self.screen, color, (position, size))", "def getRobotPosition(self):\n return self.position", "def getRobotPosition(self):\n return self.position", "def move(self, model):\n grid = model.grid\n possible_steps = grid.get_neighborhood(\n self.pos, moore=True, include_center=True)\n choice = random.choice(possible_steps)\n grid.move_agent(self, choice)", "def place(self,y,x):\n self.y = y\n self.x = x", "def enter_parking_lot(self):\n\n self.start_driving()\n time.sleep(2)\n\n # drive back into gap with strong angle\n self.angle = 25\n self.velocity = -8\n self.drive_thread.driven_distance = 0\n self.distance = 35\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(1)\n\n # drive back until close to wall\n self.angle = 0\n self.velocity = -8\n self.distance = 150\n self.drive_thread.driven_distance = 0\n while self.sensor_manager.rear > 60:\n time.sleep(0.2)\n \n # get into straight position\n self.angle = -25\n self.velocity = -8\n self.distance = 40\n self.drive_thread.driven_distance = 0\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(1)\n \n # drive backwards up to end of gap\n self.angle = 0\n self.velocity = -8\n self.drive_thread.driven_distance = 0\n while self.sensor_manager.rear >= 10:\n print(self.sensor_manager.rear)\n time.sleep(0.5)\n \n self.stop_driving()", "def cozmo_turn_in_place(robot, angle, speed):\n\trobot.turn_in_place(degrees(angle), speed=degrees(speed)).wait_for_completed()", "def automove(self):\n if self.x < self.end_cinematic_x_pos:\n self.x += self.SHIP_SPEED\n if self.x > self.end_cinematic_x_pos:\n self.x -= self.SHIP_SPEED\n if self.y < self.end_cinematic_y_pos:\n self.y += self.SHIP_SPEED\n if self.y > self.end_cinematic_y_pos:\n self.y -= self.SHIP_SPEED", "def move_to(self, x, y):\r\n self.__current_room = x, y", "def set_position(self, x, y):\n self.pos = pygame.Rect(x, y, 0, 0)", "def _drive_player_position(self) -> None:\n player = self._player\n if player:\n assert self.node\n assert player.node\n self.node.connectattr('torso_position', player.node, 'position')", "def set_position(self, x, y):\n self.position.x = x\n self.position.y = y\n self.rect.topleft = x, y", "def move(self):\n # using a formula of axis coordinates and speed modulus delta of the\n # screen axis plus the minimal screen size\n self.x_coord = \\\n (self.x_speed + self.x_coord - Screen.SCREEN_MIN_X) % delta_x + \\\n Screen.SCREEN_MIN_X\n self.y_coord = \\\n (self.y_speed + self.y_coord - Screen.SCREEN_MIN_Y) % delta_y + \\\n Screen.SCREEN_MIN_Y", "def my_go_to_pose2(robot, x, y, angle_z):\n\t# ####\n\t# TODO: Implement a function that makes the robot move to a desired pose\n\t# using the robot.drive_wheels() function to jointly move and rotate the \n\t# robot to reduce distance between current and desired pose (Approach 2).\n\t# ####\n\t\n\tabsoluteTargetPosition = (robot.pose.position.x + x, robot.pose.position.y + y, robot.pose.rotation.angle_z.degrees + angle_z)\n\t\n\twhile(math.sqrt(x*x + y*y) > 50.0):\n\t\t# print(\"(x, y, angle_z) = (%i,%i,%i)\" % (x, y, angle_z))\n\t\tfirstRotationInRadians = (0 if y == 0 else 90) if x == 0 else math.atan(y/x)\n\t\tleftMotor = 10 * (2 * x - angle_z * (2 * math.pi / 360.0) * get_distance_between_wheels() * math.cos(firstRotationInRadians)) / (2 * get_front_wheel_radius() * math.cos(firstRotationInRadians))\n\t\trightMotor = 10 * (2 * x + angle_z * (2 * math.pi / 360.0) * get_distance_between_wheels() * math.cos(firstRotationInRadians)) / (2 * get_front_wheel_radius() * math.cos(firstRotationInRadians))\n\t\t# print(\"(leftMotor, rightMotor) = (%i,%i)\" % (leftMotor, rightMotor))\n\t\tangle_delta = get_front_wheel_radius() * (rightMotor - leftMotor) / get_distance_between_wheels()\n\t\tx_delta = get_front_wheel_radius() * math.cos(angle_z * 2.0 * math.pi / 360.0) * (leftMotor + rightMotor) / 2.0\n\t\ty_delta = get_front_wheel_radius() * math.sin(angle_z * 2.0 * math.pi / 360.0) * (leftMotor + rightMotor) / 2.0\n\t\t# print(\"angle_delta %i\" % angle_delta)\n\t\t# x = x - get_front_wheel_radius() * math.cos(angle_delta) * (leftMotor + rightMotor) / 2.0\n\t\t# y = y - get_front_wheel_radius() * math.sin(angle_delta) * (leftMotor + rightMotor) / 2.0\n\t\t# print(\"(x, y, angle_z) = (%i,%i,%i)\" % (x, y, angle_z))\n\t\t# angle_z = angle_z + angle_delta * (360.0/(2 * math.pi))\n\t\t# print(\"(x, y, angle_z) = (%i,%i,%i)\" % (x, y, angle_z))\n\t\trobot.drive_wheels(leftMotor, rightMotor, duration = 1)\n\t\trobot.stop_all_motors()\n\t\t# time.sleep(1)\n\t\tx = absoluteTargetPosition[0] - robot.pose.position.x\n\t\ty = absoluteTargetPosition[1] - robot.pose.position.y\n\t\tangle_z = absoluteTargetPosition[2] - robot.pose.rotation.angle_z.degrees\n\t\t# print(\"(x, y, angle_z) = (%i,%i,%i)\" % (x, y, angle_z))\n\t\trobot.stop_all_motors()\n\t\t# robot.drive_wheels(0,0)", "def positioning(self):\n pass", "def move(self):\n\n # get the location we WOULD go to\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n while (abs (newX) > self.BOX_RANGE) or (abs(newY) > self.BOX_RANGE):\n # print(\"choosing new direction... \",end=\"\")\n self.chooseNewDirection()\n # print(self.dx, self.dy)\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n\n # now move our monster\n super().move()", "def setPos(self, pos):\n self.cameraNode.setPos(pos)", "def move_me_on_spawn(self):\r\n\t\tif self.points_to_go:\r\n\t\t\tself.start_pos = self.points_to_go[0]\r\n\t\t\tfor point in self.points_to_go[1:]:\r\n\t\t\t\tfor i in range(len(self.points_to_go[1:])):\r\n\t\t\t\t\tself.goal_pos = self.points_to_go[i]\r\n\t\t\t\t\t\r\n\t\t\t\t\tself.move_me()\r\n\t\t\t\t\t#self.start_pos = \r\n\t\t\t\t\t#print(self.goal_pos)\r\n\t\t\t\t\t#if self.move_me():\r\n\t\t\t\t\t#\ti += 1\r\n\t\t\t\t\t#\tprint('switch')\r", "def move(self, window):\r\n self.save_pos = (self.center_x, self.center_y) # sauvegarde la position avant de bouger\r\n self.center_x = math.cos(self.angle) * self.velocity + self.center_x\r\n self.center_y = math.sin(self.angle) * self.velocity + self.center_y\r\n self.rectangle = pygame.draw.circle(window, self.color, (self.center_x, self.center_y), self.radius) # update le rectangle\r", "def __init__(self, robot, x, y, rz, radius=0.15, frame_id=\"map\"):\n super(NavigateToPose, self).__init__(robot, lambda: pose_constraints(x, y, rz, radius, frame_id))", "def cozmo_turn_in_place(robot, angle, speed):\n robot.turn_in_place(degrees(angle), speed=degrees(speed)).wait_for_completed()", "def my_go_to_pose2(robot, x, y, angle_z):\n # ####\n # TODO: Implement a function that makes the robot move to a desired pose\n # using the robot.drive_wheels() function to jointly move and rotate the \n # robot to reduce distance between current and desired pose (Approach 2).\n # ####\n pass", "def my_go_to_pose1(robot, x, y, angle_z):\n\t# ####\n\t# TODO: Implement a function that makes the robot move to a desired pose\n\t# using the my_drive_straight and my_turn_in_place functions. This should\n\t# include a sequence of turning in place, moving straight, and then turning\n\t# again at the target to get to the desired rotation (Approach 1).\n\t# ####\n\tfirstRotationInRadians = (0 if y == 0 else 90) if x == 0 else math.atan(y/x)\n\tfirstRotation = firstRotationInRadians * 360.0/ (2.0 * math.pi)\n\tmy_turn_in_place(robot, firstRotation, 30)\n\trobot.stop_all_motors()\n\t# robot.drive_wheels(0, 0, duration=1)\n\t# time.sleep(1)\n\tmy_drive_straight(robot, math.sqrt(x*x + y*y), (-1 if x < 0 else 1) * 30)\n\trobot.stop_all_motors()\n\t# robot.drive_wheels(0, 0, duration=1)\n\t# time.sleep(1)\n\tmy_turn_in_place(robot, angle_z - firstRotation , 30)\n\ttime.sleep(1)", "def _moveTo(self, pt):\n self._handleAnchor()\n t = \"M%s\" % (pointToString(pt))\n self._commands.append(t)\n self._lastCommand = \"M\"\n self._lastX, self._lastY = pt", "def tick_move(self):\n if self.velocity[0] > 0 and self.pos[0] > SCREEN_WIDTH:\n # Moving right, reposition to off the left of the screen\n new_pos = (-self.width, self.pos[1])\n elif self.velocity[0] < 0 and self.pos[0] < -self.width:\n # Moving left, reposition to off the right of the screen\n new_pos = (SCREEN_WIDTH + self.width, self.pos[1])\n else:\n # Car not offscreen, move as normal\n new_pos = (\n self.pos[0] + (self.velocity[0] * self.controller.engine.last_tick),\n self.pos[1]\n )\n\n self.pos = new_pos", "def move_turtle(self):\n self.forward(self.move_speed)", "def set_goal(self,pos):\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = 'map'\n goal.target_pose.header.stamp = rospy.Time.now()\n mygoal = Pose(Point(pos[0],pos[1],0),Quaternion(0,0,0,1))\n goal.target_pose.pose = mygoal\n self.move_base.send_goal(goal)", "def __init__(self, pos=(SCREEN_X//2, SCREEN_Y//2)):\n self.heading = \"right\"\n self.speed = 4\n self.length = 32\n self.size = 16\n self.color = COLOR\n self.pos = pos\n (self.x_coord, self.y_coord) = ([], [])\n self.displacement = 0\n for _ in range(self.length):\n self.x_coord.append(self.pos[0] - self.displacement)\n self.y_coord.append(self.pos[1])\n self.displacement += 4", "def move(self):\n pass", "def advance(self):\n #x and y coordinates move and advance by adding the randomly generated velocity \n self.center.x += self.velocity.dx\n self.center.y += self.velocity.dy\n return", "def spawn_orb(self):\n x_pos = random.randint(0, self.config.arena_size[0] - 1)\n y_pos = random.randint(0, self.config.arena_size[1] - 1)\n self.arena[x_pos][y_pos] = Tile.ORB", "def move(self):\n \n self.rect.move_ip(0,self.speed) # Funcion para mover el enemigo especificando la velocidad xy\n \n if (self.rect.top > SCREEN_HEIGHT): # Condicion cuando llega a la parte inferior y no colisiono con el jugador\n del self.surf #Libera memoria\n del self.rect\n self.randomNumber = random.choice([70,64,32]) # Su tamaño se asigna nuevamente\n self.size = (self.randomNumber,self.randomNumber) #Se genera su tamaño como un cuadrado de lado aleatorio\n self.surf = pygame.Surface(self.size) #Se genera la superficie que aparecera la pantalla\n self.surf.fill(RED)\n self.rect = self.surf.get_rect(center = (random.randint(40,SCREEN_WIDTH-40),0))# me da info de las coordenadas de surf\n if(self.randomNumber == 32):\n self.surf = self.imagen\n elif(self.randomNumber ==64):\n self.surf = self.imagen2\n elif self.randomNumber ==70 :\n self.surf = self.imagen3", "def move(x,y):\r\n pass", "def move_to_random_pos(self):\n newpos = [(np.random.rand() - 0.5) * 0.1,\n (np.random.rand() - 0.5) * 0.1,\n np.random.rand() * 0.9 + 0.2]\n self.move_to(newpos)", "def Position(V, W, X, Y, Beta, LC, RC, r):\n d = 23.5 # distance between wheels (cm)\n rad = 3.6 # radius of wheels (cm)\n\n # iCreate Robot library uses left and right wheel VELOCITY values\n LeftWheelVel = ((V - ((W * d) / 2)) / rad) * 3.6 / (2 * math.pi) * 100\n RightWheelVel = (V * 2 + W * d / (2 * rad)) * 3.6 / (2 * math.pi) * 100\n\n # r is robot object from create library\n r.driveDirect(LeftWheelVel, RightWheelVel) # assign calculated velocities\n \n LCnew = r.getSensor(\"LEFT_ENCODER\") # recieve updated sensor \n RCnew = r.getSensor(\"RIGHT_ENCODER\")\n DLC = LCnew - LC # update sensor values from previously record\n DRC = RCnew - RC\n # L is Left?\n DLWheel = DLC * (72.0 * math.pi / 508.8) / 10\n DRWheel = DRC * (72.0 * math.pi / 508.8) / 10\n BetaNew = ((DRWheel - DLWheel) / d) + Beta\n XNew = ((1/2 * (DRWheel + DLWheel)) * math.cos(BetaNew)) + X\n YNew = ((1/2 * (DRWheel + DLWheel)) * math.sin(BetaNew)) + Y\n return(XNew, YNew, BetaNew, LCnew, RCnew)", "def move(self, center):\n\t\t#print \"made it\"\n\t\tself.rect = self.rect.move(center)", "def _move(self):\n self.pos += self.direction # add direction vector\n self.direction += self.gravity # add gravity to direction\n self.direction = self.direction.elementwise() * self.drag # apply drag to direction", "def set_robot(self, robot):\n self.robot = robot", "def main():\r\n robot = ROBOT()\r\n print(\"COMMANDS AVAILABLE:\", commands_avail)\r\n print(\"'PLACE' must be used in the form: PLACE x,y,direction\")\r\n print(\"VALID DIRECTIONS:\", direction_tuple)\r\n print(\"MOVE: moves the robot one unit in the direction its facing, unless it would fall of the tabletop\")\r\n print(\"LEFT and RIGHT: rotates the robot 90 degrees anti-clockwise or clockwise respectively\")\r\n print(\"REPORT: prints the position on the tabletop and which direction the robot is facing\")\r\n print(\"NOTE: position 0,0 is south-west corner and 4,4 is north-east corner. Size can be changed in source code\")\r\n print(\"NOTE: if REPORT in input, all previous commands processed and reported. Option to enter more input given.\")\r\n while True:\r\n commands = robot.input_discard()\r\n try:\r\n for i in commands:\r\n if commands_avail[0] in i:\r\n xyd = i.split(' ')\r\n xy = xyd[1].split(',')\r\n x = int(xy[0])\r\n y = int(xy[1])\r\n d = xy[2]\r\n robot.place(x, y, d)\r\n elif i == commands_avail[1]:\r\n robot.move()\r\n elif i == commands_avail[2]:\r\n robot.left()\r\n elif i == commands_avail[3]:\r\n robot.right()\r\n elif i == commands_avail[4]:\r\n robot.report()\r\n except TypeError:\r\n print(\"NO VALID PLACE OR EXISTING ROBOT POSITION\")\r\n print(\"Would you like to enter more input: 'Y' for yes or 'N' to quit\")\r\n cont = input()\r\n while cont != 'N' and cont != 'Y':\r\n print(\"INVALID COMMAND!\\nWould you like to enter more input: 'Y' for yes or 'N' to quit\")\r\n cont = input()\r\n if cont == 'N':\r\n break\r\n print(\"Exiting Program\")" ]
[ "0.7640588", "0.7587472", "0.72154766", "0.71969634", "0.71969634", "0.719446", "0.69888204", "0.6888003", "0.6881876", "0.6855027", "0.6762757", "0.67011625", "0.66731703", "0.6627563", "0.66041356", "0.6589483", "0.6580823", "0.65348256", "0.65167373", "0.65115154", "0.6504705", "0.64993846", "0.64767486", "0.6447751", "0.6438819", "0.64323634", "0.6426791", "0.64153147", "0.6408552", "0.6397143", "0.6373032", "0.6352126", "0.6344638", "0.63353825", "0.6328043", "0.63192683", "0.63177395", "0.6317042", "0.6315598", "0.63117796", "0.63088596", "0.6308147", "0.6302293", "0.629546", "0.6292169", "0.62835497", "0.6281779", "0.6279483", "0.62513906", "0.62437063", "0.62330806", "0.6226005", "0.6225823", "0.6219705", "0.6205888", "0.6194696", "0.61873347", "0.6184482", "0.61731744", "0.61731744", "0.616957", "0.616957", "0.616411", "0.6161569", "0.6161569", "0.6157771", "0.615693", "0.6152692", "0.6146118", "0.6144687", "0.613529", "0.6126928", "0.6122284", "0.61085975", "0.6097988", "0.6092024", "0.6091192", "0.6089778", "0.60857075", "0.60847825", "0.6083036", "0.6077261", "0.6076712", "0.6064774", "0.6049772", "0.604768", "0.6031642", "0.6024821", "0.60240823", "0.60229796", "0.6022339", "0.60214", "0.60205555", "0.6019933", "0.6015802", "0.6013689", "0.60102993", "0.60073185", "0.5996488", "0.5984638", "0.5981391" ]
0.0
-1
Places the robot on position .
def set_base_xquat(self, quat): node = self.worldbody.find("./body[@name='base']") node.set("quat", array_to_string(quat))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move():\n Robot.move()", "def locateRobot(self):\n logging.info(\"Display Carte : {}\".format(self.name))\n for r, row in enumerate(self.map):\n #print(row)\n for c, cell in enumerate(row):\n if (cell == \"X\"):\n logging.info(\"r={} / c={}\".format(r, c))\n self.robot.posX = c\n self.robot.posY = r", "def setRobotPosition(self, position):\n posx = position.getX()\n posy = position.getY()\n self.position = Position(posx, posy)\n #raise NotImplementedError", "def setRobotPosition(self, position):\n self.position = position", "def setRobotPosition(self, position):\n self.position = position", "def set_robot_pos(self):\n\t\tx,y,z = self.geo2desiredENU(self.curr_lat, self.curr_lon, self.gpsAlt)\n\t\tself.robot_msg.point.x = x\n\t\tself.robot_msg.point.y = y\n\t\tself.robot_msg.point.z = z", "def place(placement: str):\n Robot.place(placement)", "def setRobotPosition(self, position):\n self.position = position\n #raise NotImplementedError", "def move(self):\n \n self.position = self.wander()", "def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY", "def _move(self, pos):\n self.put_par(\"drive\", pos)", "def move_robot(self, pose):\n # type: (Pose) -> None\n start_pos = ModelState()\n start_pos.model_name = 'turtlebot3'\n start_pos.pose = pose\n rospy.wait_for_service('/gazebo/set_model_state')\n try:\n set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)\n resp = set_state(start_pos)\n\n except rospy.ServiceException:\n print(\"Move Robot to position failed\")\n\n pub = rospy.Publisher('/initialpose', PoseWithCovarianceStamped, queue_size = 10)\n rospy.sleep(3)\n start_pos = PoseWithCovarianceStamped()\n start_pos.header.frame_id = 'map'\n start_pos.pose.pose = pose \n pub.publish(start_pos)", "def replace_robot(self, x, y, z):\n # min z = 0.1\n arr = [x, y, z]\n vrep.simxSetObjectPosition(self.client_id, self.handles['youBot' + self.postfix], -1, arr, ONE_SHOT_MODE)", "def my_go_to_pose1(robot, x, y, angle_z):\n # Assuming positive y is to the robot's left and positive x is the direction the robot is already facing\n hypotenuse = numpy.sqrt(x*x + y*y)\n angle_offset_of_target_point = numpy.arcsin(y/hypotenuse)*180/math.pi\n my_turn_in_place(robot, angle_offset_of_target_point , 30)\n my_drive_straight(robot, hypotenuse, 50)\n my_turn_in_place(robot, angle_z-angle_offset_of_target_point, 30)\n time.sleep(1)", "def movement(self):", "def prep_robot_location(self):\n x = int(self.robot.odo_x)\n y = int(self.robot.odo_y)\n o = round(self.robot.odo_o, 2)\n location_str = f\"Location (X,Y,O): {str(x)}, {str(y)}, {str(o)}\"\n # Prepare the image and positions it on the screen\n self.location_image = self.font.render(location_str, True, self.text_color, self.bg_color)\n self.location_rect = self.location_image.get_rect()\n self.location_rect.left = self.action_rect.left\n self.location_rect.top = self.action_rect.bottom + self.line_gap", "def move(self):\n if self.ycor() > 280: self.y_dir = -1 # Set vertical movement to down if ball at top of screen\n if self.xcor() > 380: self.x_dir = -1 # Set horizontal movement to left if ball at right of screen\n if self.xcor() < -380: self.x_dir = 1 # Set horizontal movement to right if ball at left of screen\n new_x = self.xcor() + self.x_dir * 2 # Define 2 spaces forward in set horizontal dir of travel\n new_y = self.ycor() + self.y_dir * 2 # Define 2 spaces forward in set vertical dir of travel\n self.goto(new_x, new_y) # Move ball to newly defined position", "def move_to(self, x_pos, y_pos, z_pos):\n def ik_angles(X_Pos,Y_Pos,Z_Pos,Roll,Pitch,Yaw):\n \"\"\"\n Compute the joint angles needed to place the robot arm in a given pose.\n \"\"\"\n limb_side = 'left'\n ns = \"ExternalTools/\" + limb_side + \"/PositionKinematicsNode/IKService\"\n iksvc = rospy.ServiceProxy(ns, SolvePositionIK)\n ikreq = SolvePositionIKRequest()\n hdr = Header(stamp=rospy.Time.now(), frame_id='base')\n quat = tf.transformations.quaternion_from_euler(float(Roll),float(Pitch),float(Yaw))\n poses = {\n 'left': PoseStamped(\n header=hdr,\n pose=Pose(\n position=Point(\n\t\t x=float(X_Pos),\n y=float(Y_Pos),\n z=float(Z_Pos),\n ),\n orientation=Quaternion(\n\t\t x = quat[0],\n\t\t y = quat[1],\n\t\t z = quat[2],\n\t\t w = quat[3],\n\t\t )\n )\n )\n }\n\n ikreq.pose_stamp.append(poses[limb_side])\n try:\n rospy.wait_for_service(ns, 5.0)\n resp = iksvc(ikreq)\n except (rospy.ServiceException, rospy.ROSException), e:\n rospy.logerr(\"Service call failed: %s\" % (e,))\n return 1\n\n # Check if result valid, and type of seed ultimately used to get solution\n # convert rospy's string representation of uint8[]'s to int's\n resp_seeds = struct.unpack('<%dB' % len(resp.result_type),\n resp.result_type)\n if (resp_seeds[0] != resp.RESULT_INVALID):\n # Format solution into Limb API-compatible dictionary\n limb_joints = dict(zip(resp.joints[0].name, resp.joints[0].position))\n return limb_joints \n\n else:\n print(\"INVALID POSE - No Valid Joint Solution Found.\")\n\n return 0\n \n roll = 0\n pitch = 3.14\n yaw = 0 #controls roll of gripper\n\n #compute required joint angles\n angles = ik_angles(x_pos,y_pos,z_pos,roll,pitch,yaw)\n\n #move left limb to position\n limb = baxter_interface.Limb('left')\n limb.move_to_joint_positions(angles)\n \n #update current position\n self.x = x_pos\n self.y = y_pos\n self.z = z_pos\n \n return [x_pos, y_pos]", "def set_new_location(self, xPos, yPos):", "def update_position(self, robot_pose):\n x = self.pose[0] + robot_pose[0]\n y = self.pose[1] + robot_pose[1]\n phi = (self.pose[2] + robot_pose[2]) % 360\n self.pose = (x, y, phi)", "def move(self):\n \n self.position = self.explore()", "def set_robot(self, x, y):\n state = ModelState()\n state.model_name = 'turtlebot3_waffle_pi'\n state.reference_frame = 'world'\n # pose\n state.pose.position.x = x\n state.pose.position.y = y\n state.pose.position.z = 0\n quaternion = tf.transformations.quaternion_from_euler(0, 0, 0)\n state.pose.orientation.x = quaternion[0]\n state.pose.orientation.y = quaternion[1]\n state.pose.orientation.z = quaternion[2]\n state.pose.orientation.w = quaternion[3]\n # twist\n state.twist.linear.x = 0\n state.twist.linear.y = 0\n state.twist.linear.z = 0\n state.twist.angular.x = 0\n state.twist.angular.y = 0\n state.twist.angular.z = 0\n\n rospy.wait_for_service('/gazebo/set_model_state')\n try:\n set_state = self.set_state\n result = set_state(state)\n assert result.success is True\n except rospy.ServiceException:\n print(\"/gazebo/get_model_state service call failed\")", "def move(self):\r\n min_x = self.__screen.SCREEN_MIN_X\r\n min_y = self.__screen.SCREEN_MIN_Y\r\n delta_x = self.__screen.SCREEN_MAX_X - min_x\r\n delta_y = self.__screen.SCREEN_MAX_Y - min_y\r\n\r\n # new location formula according to pdf.\r\n new_x = (self.__x_speed + self.__x - min_x) % delta_x + min_x\r\n new_y = (self.__y_speed + self.__y - min_y) % delta_y + min_y\r\n self.__x, self.__y = new_x, new_y", "def move(self):\n possible_steps = self.model.grid.get_neighborhood(\n self.pos,\n moore=False, # implements Von Neumann neighborhood\n include_center=False)\n new_position = self.random.choice(possible_steps)\n self.heading = [new_position[0] - self.pos[0],\n new_position[1] - self.pos[1]]\n self.model.grid.move_agent(self, new_position)", "def move(self, p):\r\n self.position.setvalue(p)", "def move_to_position2(self):", "def RobotInit():\n names = [\"Body\"]\n angles = [-0.038392066955566406, 0.1349501609802246, 1.1964781284332275, 0.07512402534484863, -1.4926238059997559, -1.3391400575637817, 0.11500811576843262, 0.029999971389770508, -0.25766992568969727, -0.09506607055664062, -0.9694461822509766, 2.086198091506958, -1.168950080871582, 0.07367396354675293, -0.25766992568969727, 0.10128593444824219, -0.9342479705810547, 2.0663399696350098, -1.186300277709961, -0.07205605506896973, -0.309826135635376, 0.24233007431030273, 0.06131792068481445, 0.8544800281524658, 1.5983860492706299, 0.17799997329711914]\n fractionMaxSpeed = 0.1\n time.sleep(1)\n motion.setAngles(names, angles, fractionMaxSpeed)", "def move_to_position1(self):", "def move(self):\n \n # checks for bots nearby\n next_move = self.follow()\n \n # finds a random move if no bot\n if next_move is self.position:\n self.position = self.wander()\n else:\n self.position = next_move", "def move_to_start(self):\n self.pos = (SCREEN_WIDTH / 2, SCREEN_HEIGHT - 64)", "def main():\n\n robot2 = rb.DriveSystem()\n robot2.spin_in_place_degrees(50)", "def position(x, y):\n command([x + 0x80, y + 0x40])", "def __init__(self,name,speed,depth_of_view,view_angle,x_coor = \"\",y_coor = \"\"):\n self.name = name\n self.speed = speed # That will the instantenous speed of the robot\n self.depth_of_view = depth_of_view # That will the instantenous depth of view of the robot\n self.view_angle = view_angle # That will the instantenous view angle of the robot\n self.type = \"Robot\" #Specift the object type\n self.x = x_coor # store the position of the robot\n self.y = y_coor # store the position of the robot\n self.kind = name #Store its kind to give the GUI", "def start(self):\n global trackWidth\n trackWidth = self.getTrackWidth()\n print(\"track width = \" + str(trackWidth))\n #motors.moveForward(0,2)\n initTheta = self.getTheta(trackWidth)\n motors.pivot(\"left\", 30, 0.25) #spin left for 1 second\n print(\"Moved\")\n newTheta = self.getTheta(trackWidth)\n #Checks if the robot is pointed even further of course or not, corrects for whichever\n if newTheta < initTheta:\n while self.getTheta(trackWidth) >=rads: #Spins while the robot is pointed more than 0.122 rads from straight\n motors.pivot(\"left\", 30, 0.25) #spin left for 0.25 second\n elif newTheta > initTheta:\n while self.getTheta(trackWidth) >= rads:\n motors.pivot(\"right\", 30, 0.25) #spin right for 0.25 second", "def teleport(self, x, y):\n self.rect.x = x\n self.rect.y = y", "def move(self,x,y):\n self.pos.x = x\n self.pos.y = y", "def place(self,x, y, direction):\r\n self.x = x\r\n self.y = y\r\n self.d = direction", "def position(self, position):\n self.move_to(position)", "def move():\n print(\" ------ Execution -----\\n\")\n pyautogui.moveRel(0, 10)\n pyautogui.moveRel(0, -10)\n pyautogui.click()", "def _move(self, pos):\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos", "def start(self, robot):\n rospy.loginfo(\"Moving randomly\" + \" - \" + str(robot.robot_id))", "def move (self):\n\t\tself.x += self.direction[0]\n\t\tself.y += self.direction[1]\n\t\tself.row = (self.y - 15) / 30\n\t\tself.col = (self.x - 15) / 30\n\t\tself.rowcol = (self.row,self.col)\n\t\tself.draw()", "def setRoboPos(self,x,y):\r\n self.RoboPosX=x\r\n self.RoboPosY=y", "def __init__(self):\n self.positionx = 400\n self.positiony = 600\n # direction goes from [0,360)\n self.direction = (45)", "def move(self):\n # moving each of the obstacles\n time = rospy.get_rostime()\n self.vel_msg1.linear.x = math.sin(time.to_sec())\n self.pub1.publish(self.vel_msg1)\n self.vel_msg2.linear.x = math.cos(1.2 * time.to_sec())\n self.pub2.publish(self.vel_msg2)\n self.vel_msg3.linear.x = math.cos(2 * time.to_sec())\n self.pub3.publish(self.vel_msg3)\n self.vel_msg4.linear.x = math.sin(0.85 * time.to_sec())\n self.pub4.publish(self.vel_msg4)", "def runRobot():", "def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y", "def _setup_move(self, position):\n self.log.debug(\"%s.setpoint = %s\", self.name, position)\n self.setpoint.put(position, wait=True)\n if self.actuate is not None:\n self.log.debug(\"%s.actuate = %s\", self.name, self.actuate_value)\n self.actuate.put(self.actuate_value, wait=False)", "def automove_to(self, x: int, y: int) -> None:\n self.cpu_controlled = True\n self.end_cinematic_x_pos = x\n self.end_cinematic_y_pos = y", "async def update(self, robot):\r\n if self.first:\r\n robot.was_turning = False\r\n robot.was_driving = False\r\n\r\n rotation_rad = math.radians(robot.rotation)\r\n rotation_cos = math.cos(rotation_rad)\r\n rotation_sin = math.sin(rotation_rad)\r\n if robot.was_driving:\r\n speed_delta = robot.delta_time * robot.ROBOT_SPEED\r\n\r\n robot.add_odom_position(robot, (rotation_cos * speed_delta, rotation_sin * speed_delta))\r\n robot.grid.setStart(robot.grid_position)\r\n else:\r\n robot.drive_timer = robot.DRIVE_COOLDOWN\r\n if robot.was_turning:\r\n robot.add_odom_rotation(robot, robot.TURN_YAW * robot.delta_time)\r\n\r\n changed = False\r\n if robot.ball is not None:\r\n if robot.prev_ball is not None:\r\n robot.ball_grid = robot.grid.worldToGridCoords(robot.ball)\r\n robot.ball_prev_grid = robot.grid.worldToGridCoords(robot.prev_ball)\r\n changed = robot.ball_grid != robot.ball_prev_grid\r\n else:\r\n changed = True\r\n \r\n if not changed and robot.prev_grid_position != robot.grid_position:\r\n changed = True\r\n\r\n if self.first:\r\n changed = True\r\n self.first = False\r\n\r\n rounded_grid = (round(robot.grid_position[0]), round(robot.grid_position[1]))\r\n if changed:\r\n robot.grid.clearObstacles()\r\n if robot.ball is not None:\r\n grid_points = getGridPoints(robot.ball_grid[0], robot.ball_grid[1], robot)\r\n for point in grid_points:\r\n if robot.grid.coordInBounds(point):\r\n robot.grid.addObstacle(point)\r\n\r\n # Wall obstacles.\r\n for i in range(0, robot.grid.width):\r\n robot.grid.addObstacle((i, 0))\r\n robot.grid.addObstacle((i, robot.grid.height - 1))\r\n for i in range(1, robot.grid.height - 1):\r\n robot.grid.addObstacle((0, i))\r\n robot.grid.addObstacle((robot.grid.width - 1, i))\r\n\r\n goal_to_ball = np.subtract(robot.ball, robot.goal_position)\r\n goal_distance = np.linalg.norm(goal_to_ball)\r\n if goal_distance == 0:\r\n return\r\n goal_direction = np.divide(goal_to_ball, goal_distance)\r\n goal_direction = np.multiply(goal_direction, (robot.RADIUS + robot.BALL_RADIUS) * 1.2)\r\n robot.target_position = np.add(robot.ball, goal_direction)\r\n robot.target_position = robot.grid.worldToGridCoords(robot.target_position)\r\n\r\n if robot.target_position is not None:\r\n robot.grid.clearGoals()\r\n robot.grid.setStart(rounded_grid)\r\n rounded_target = (round(robot.target_position[0]), round(robot.target_position[1]))\r\n robot.grid.addGoal(rounded_target)\r\n astar(robot.grid, heuristic)\r\n\r\n path = robot.grid.getPath()\r\n robot.was_turning = False\r\n if path is not None and len(path) > 1:\r\n robot.next_cell = path[0]\r\n if path[0] == rounded_grid:\r\n robot.next_cell = path[1]\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, rounded_grid, robot.next_cell)\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n robot.stop_all_motors()\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n robot.was_driving = False\r\n else:\r\n await robot.drive_wheels(robot.ROBOT_SPEED, robot.ROBOT_SPEED, robot.ROBOT_ACCELERATION, robot.ROBOT_ACCELERATION)\r\n robot.was_driving = True\r\n else:\r\n robot.was_driving = False\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, robot.grid_position, robot.target_position)\r\n robot.stop_all_motors()\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n\r\n robot.stop_all_motors()\r\n distance = grid_distance(robot.grid_position[0], robot.grid_position[1], robot.target_position[0], robot.target_position[1]) * robot.grid.scale\r\n await robot.drive_straight(distance_mm(distance), speed_mmps(robot.HIT_SPEED), should_play_anim = False).wait_for_completed()\r\n robot.add_odom_forward(robot, distance)\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, robot.grid_position, robot.ball_grid)\r\n robot.stop_all_motors()\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n return goto_ball.HitBall()", "def move_robot(command):\n global robot_type\n hexapod_motions = {'up' : ['9', 'wd', 'w'],\n 'left_up' : ['9', 'w', 'sd'],\n 'left_down' : ['7', 'wa', 'w', 'd'],\n 'right_up' : [''],\n 'right_down' : [''],\n 'down' : ['10', 'sa', '2', 'a', 'w']}\n\n vikingbot1_motions = {'up' : ['1.35', 'a', '1.5', 'w'],\n 'left_up' : ['0.68', 'a', '1.3', 'w', '0.74', 'a', '0.4', 's'],\n 'left_down' : ['0.8', 'd', '1.1', 'w', '0.8', 'd', '0.6', 's'],\n 'right_up' : ['0.7', 'd', '1.1', 'w', '0.7', 'd', '0.2', 's'],\n 'right_down' : ['0.7', 'a', '1.1', 'w', '0.7', 'a', '0.5', 's'], \n 'down' : ['1.35', 'a', '1.35', 'w']}\n vikingbot0_motions = {'up' : ['1.05', 'a', '2.2', 'w'],\n 'left_up' : ['0.6', 'a', '1.5', 'w', '0.6', 'a', '0.9', 's'],\n 'left_down' : ['0.6', 'd', '1.5', 'w', '0.6', 'd', '0.9', 's'],\n 'right_up' : ['0.6', 'd', '1.5', 'w', '0.6', 'd', '0.9', 's'],\n 'right_down' : ['0.6', 'a', '1.5', 'w', '0.6', 'a', '0.9', 's'],\n 'down' : ['1.05', 'a', '2.2', 'w']}\n\n print (\"Robot type is \" + robot_type)\n if robot_type == 'hexapod':\n correct_for_drift()\n send_motion_command(command, hexapod_motions)\n elif robot_type == 'vikingbot0':\n send_motion_command(command, vikingbot0_motions) \n elif robot_type == 'vikingbot1':\n send_motion_command(command, vikingbot1_motions)", "def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100", "def run(self):\n # type: () -> None\n self.move_to(self.location)", "def setPosition(position):", "def robot_start():\n \n global history\n\n global position_x, position_y, current_direction_index\n \n robot_name = get_robot_name()\n output(robot_name, \"Hello kiddo!\")\n\n position_x = 0\n position_y = 0\n current_direction_index = 0\n\n command = get_command(robot_name)\n keep_history(command) \n while handle_command(robot_name, command):\n command = get_command(robot_name)\n keep_history(command)\n output(robot_name, \"Shutting down..\")\n\n history = []", "def AeroMove(self, pos):\r\n\r\n pass", "def move(self, rel_pos):\n self.pos = (self.pos[0] + rel_pos[0] * GRID, self.pos[1] + rel_pos[1] * GRID)", "def movement(self):\n self.rect.left -= self.speedx #to move the asteroid to the left", "def getRobotPosition(self):\n return self.position\n #raise NotImplementedError", "def getRobotPosition(self):\n return self.position\n #raise NotImplementedError", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def getRobotPosition(self):\n return self.position", "def getRobotPosition(self):\n return self.position", "def place_object(self, thing):\n color = [i * 255 for i in thing.color.rgb]\n size = (20, 20)\n if thing.name == \"luna\":\n size = (5, 5)\n if self.is_visible(thing.position, max(size)):\n position = self.get_position(thing.position, size)\n pygame.draw.ellipse(self.screen, color, (position, size))", "def move(self, model):\n grid = model.grid\n possible_steps = grid.get_neighborhood(\n self.pos, moore=True, include_center=True)\n choice = random.choice(possible_steps)\n grid.move_agent(self, choice)", "def enter_parking_lot(self):\n\n self.start_driving()\n time.sleep(2)\n\n # drive back into gap with strong angle\n self.angle = 25\n self.velocity = -8\n self.drive_thread.driven_distance = 0\n self.distance = 35\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(1)\n\n # drive back until close to wall\n self.angle = 0\n self.velocity = -8\n self.distance = 150\n self.drive_thread.driven_distance = 0\n while self.sensor_manager.rear > 60:\n time.sleep(0.2)\n \n # get into straight position\n self.angle = -25\n self.velocity = -8\n self.distance = 40\n self.drive_thread.driven_distance = 0\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(1)\n \n # drive backwards up to end of gap\n self.angle = 0\n self.velocity = -8\n self.drive_thread.driven_distance = 0\n while self.sensor_manager.rear >= 10:\n print(self.sensor_manager.rear)\n time.sleep(0.5)\n \n self.stop_driving()", "def place(self,y,x):\n self.y = y\n self.x = x", "def cozmo_turn_in_place(robot, angle, speed):\n\trobot.turn_in_place(degrees(angle), speed=degrees(speed)).wait_for_completed()", "def automove(self):\n if self.x < self.end_cinematic_x_pos:\n self.x += self.SHIP_SPEED\n if self.x > self.end_cinematic_x_pos:\n self.x -= self.SHIP_SPEED\n if self.y < self.end_cinematic_y_pos:\n self.y += self.SHIP_SPEED\n if self.y > self.end_cinematic_y_pos:\n self.y -= self.SHIP_SPEED", "def move_to(self, x, y):\r\n self.__current_room = x, y", "def set_position(self, x, y):\n self.pos = pygame.Rect(x, y, 0, 0)", "def _drive_player_position(self) -> None:\n player = self._player\n if player:\n assert self.node\n assert player.node\n self.node.connectattr('torso_position', player.node, 'position')", "def set_position(self, x, y):\n self.position.x = x\n self.position.y = y\n self.rect.topleft = x, y", "def move(self):\n # using a formula of axis coordinates and speed modulus delta of the\n # screen axis plus the minimal screen size\n self.x_coord = \\\n (self.x_speed + self.x_coord - Screen.SCREEN_MIN_X) % delta_x + \\\n Screen.SCREEN_MIN_X\n self.y_coord = \\\n (self.y_speed + self.y_coord - Screen.SCREEN_MIN_Y) % delta_y + \\\n Screen.SCREEN_MIN_Y", "def my_go_to_pose2(robot, x, y, angle_z):\n\t# ####\n\t# TODO: Implement a function that makes the robot move to a desired pose\n\t# using the robot.drive_wheels() function to jointly move and rotate the \n\t# robot to reduce distance between current and desired pose (Approach 2).\n\t# ####\n\t\n\tabsoluteTargetPosition = (robot.pose.position.x + x, robot.pose.position.y + y, robot.pose.rotation.angle_z.degrees + angle_z)\n\t\n\twhile(math.sqrt(x*x + y*y) > 50.0):\n\t\t# print(\"(x, y, angle_z) = (%i,%i,%i)\" % (x, y, angle_z))\n\t\tfirstRotationInRadians = (0 if y == 0 else 90) if x == 0 else math.atan(y/x)\n\t\tleftMotor = 10 * (2 * x - angle_z * (2 * math.pi / 360.0) * get_distance_between_wheels() * math.cos(firstRotationInRadians)) / (2 * get_front_wheel_radius() * math.cos(firstRotationInRadians))\n\t\trightMotor = 10 * (2 * x + angle_z * (2 * math.pi / 360.0) * get_distance_between_wheels() * math.cos(firstRotationInRadians)) / (2 * get_front_wheel_radius() * math.cos(firstRotationInRadians))\n\t\t# print(\"(leftMotor, rightMotor) = (%i,%i)\" % (leftMotor, rightMotor))\n\t\tangle_delta = get_front_wheel_radius() * (rightMotor - leftMotor) / get_distance_between_wheels()\n\t\tx_delta = get_front_wheel_radius() * math.cos(angle_z * 2.0 * math.pi / 360.0) * (leftMotor + rightMotor) / 2.0\n\t\ty_delta = get_front_wheel_radius() * math.sin(angle_z * 2.0 * math.pi / 360.0) * (leftMotor + rightMotor) / 2.0\n\t\t# print(\"angle_delta %i\" % angle_delta)\n\t\t# x = x - get_front_wheel_radius() * math.cos(angle_delta) * (leftMotor + rightMotor) / 2.0\n\t\t# y = y - get_front_wheel_radius() * math.sin(angle_delta) * (leftMotor + rightMotor) / 2.0\n\t\t# print(\"(x, y, angle_z) = (%i,%i,%i)\" % (x, y, angle_z))\n\t\t# angle_z = angle_z + angle_delta * (360.0/(2 * math.pi))\n\t\t# print(\"(x, y, angle_z) = (%i,%i,%i)\" % (x, y, angle_z))\n\t\trobot.drive_wheels(leftMotor, rightMotor, duration = 1)\n\t\trobot.stop_all_motors()\n\t\t# time.sleep(1)\n\t\tx = absoluteTargetPosition[0] - robot.pose.position.x\n\t\ty = absoluteTargetPosition[1] - robot.pose.position.y\n\t\tangle_z = absoluteTargetPosition[2] - robot.pose.rotation.angle_z.degrees\n\t\t# print(\"(x, y, angle_z) = (%i,%i,%i)\" % (x, y, angle_z))\n\t\trobot.stop_all_motors()\n\t\t# robot.drive_wheels(0,0)", "def positioning(self):\n pass", "def move(self):\n\n # get the location we WOULD go to\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n while (abs (newX) > self.BOX_RANGE) or (abs(newY) > self.BOX_RANGE):\n # print(\"choosing new direction... \",end=\"\")\n self.chooseNewDirection()\n # print(self.dx, self.dy)\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n\n # now move our monster\n super().move()", "def setPos(self, pos):\n self.cameraNode.setPos(pos)", "def move(self, window):\r\n self.save_pos = (self.center_x, self.center_y) # sauvegarde la position avant de bouger\r\n self.center_x = math.cos(self.angle) * self.velocity + self.center_x\r\n self.center_y = math.sin(self.angle) * self.velocity + self.center_y\r\n self.rectangle = pygame.draw.circle(window, self.color, (self.center_x, self.center_y), self.radius) # update le rectangle\r", "def move_me_on_spawn(self):\r\n\t\tif self.points_to_go:\r\n\t\t\tself.start_pos = self.points_to_go[0]\r\n\t\t\tfor point in self.points_to_go[1:]:\r\n\t\t\t\tfor i in range(len(self.points_to_go[1:])):\r\n\t\t\t\t\tself.goal_pos = self.points_to_go[i]\r\n\t\t\t\t\t\r\n\t\t\t\t\tself.move_me()\r\n\t\t\t\t\t#self.start_pos = \r\n\t\t\t\t\t#print(self.goal_pos)\r\n\t\t\t\t\t#if self.move_me():\r\n\t\t\t\t\t#\ti += 1\r\n\t\t\t\t\t#\tprint('switch')\r", "def __init__(self, robot, x, y, rz, radius=0.15, frame_id=\"map\"):\n super(NavigateToPose, self).__init__(robot, lambda: pose_constraints(x, y, rz, radius, frame_id))", "def cozmo_turn_in_place(robot, angle, speed):\n robot.turn_in_place(degrees(angle), speed=degrees(speed)).wait_for_completed()", "def my_go_to_pose2(robot, x, y, angle_z):\n # ####\n # TODO: Implement a function that makes the robot move to a desired pose\n # using the robot.drive_wheels() function to jointly move and rotate the \n # robot to reduce distance between current and desired pose (Approach 2).\n # ####\n pass", "def my_go_to_pose1(robot, x, y, angle_z):\n\t# ####\n\t# TODO: Implement a function that makes the robot move to a desired pose\n\t# using the my_drive_straight and my_turn_in_place functions. This should\n\t# include a sequence of turning in place, moving straight, and then turning\n\t# again at the target to get to the desired rotation (Approach 1).\n\t# ####\n\tfirstRotationInRadians = (0 if y == 0 else 90) if x == 0 else math.atan(y/x)\n\tfirstRotation = firstRotationInRadians * 360.0/ (2.0 * math.pi)\n\tmy_turn_in_place(robot, firstRotation, 30)\n\trobot.stop_all_motors()\n\t# robot.drive_wheels(0, 0, duration=1)\n\t# time.sleep(1)\n\tmy_drive_straight(robot, math.sqrt(x*x + y*y), (-1 if x < 0 else 1) * 30)\n\trobot.stop_all_motors()\n\t# robot.drive_wheels(0, 0, duration=1)\n\t# time.sleep(1)\n\tmy_turn_in_place(robot, angle_z - firstRotation , 30)\n\ttime.sleep(1)", "def _moveTo(self, pt):\n self._handleAnchor()\n t = \"M%s\" % (pointToString(pt))\n self._commands.append(t)\n self._lastCommand = \"M\"\n self._lastX, self._lastY = pt", "def tick_move(self):\n if self.velocity[0] > 0 and self.pos[0] > SCREEN_WIDTH:\n # Moving right, reposition to off the left of the screen\n new_pos = (-self.width, self.pos[1])\n elif self.velocity[0] < 0 and self.pos[0] < -self.width:\n # Moving left, reposition to off the right of the screen\n new_pos = (SCREEN_WIDTH + self.width, self.pos[1])\n else:\n # Car not offscreen, move as normal\n new_pos = (\n self.pos[0] + (self.velocity[0] * self.controller.engine.last_tick),\n self.pos[1]\n )\n\n self.pos = new_pos", "def move_turtle(self):\n self.forward(self.move_speed)", "def __init__(self, pos=(SCREEN_X//2, SCREEN_Y//2)):\n self.heading = \"right\"\n self.speed = 4\n self.length = 32\n self.size = 16\n self.color = COLOR\n self.pos = pos\n (self.x_coord, self.y_coord) = ([], [])\n self.displacement = 0\n for _ in range(self.length):\n self.x_coord.append(self.pos[0] - self.displacement)\n self.y_coord.append(self.pos[1])\n self.displacement += 4", "def set_goal(self,pos):\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = 'map'\n goal.target_pose.header.stamp = rospy.Time.now()\n mygoal = Pose(Point(pos[0],pos[1],0),Quaternion(0,0,0,1))\n goal.target_pose.pose = mygoal\n self.move_base.send_goal(goal)", "def advance(self):\n #x and y coordinates move and advance by adding the randomly generated velocity \n self.center.x += self.velocity.dx\n self.center.y += self.velocity.dy\n return", "def move(self):\n pass", "def spawn_orb(self):\n x_pos = random.randint(0, self.config.arena_size[0] - 1)\n y_pos = random.randint(0, self.config.arena_size[1] - 1)\n self.arena[x_pos][y_pos] = Tile.ORB", "def move(self):\n \n self.rect.move_ip(0,self.speed) # Funcion para mover el enemigo especificando la velocidad xy\n \n if (self.rect.top > SCREEN_HEIGHT): # Condicion cuando llega a la parte inferior y no colisiono con el jugador\n del self.surf #Libera memoria\n del self.rect\n self.randomNumber = random.choice([70,64,32]) # Su tamaño se asigna nuevamente\n self.size = (self.randomNumber,self.randomNumber) #Se genera su tamaño como un cuadrado de lado aleatorio\n self.surf = pygame.Surface(self.size) #Se genera la superficie que aparecera la pantalla\n self.surf.fill(RED)\n self.rect = self.surf.get_rect(center = (random.randint(40,SCREEN_WIDTH-40),0))# me da info de las coordenadas de surf\n if(self.randomNumber == 32):\n self.surf = self.imagen\n elif(self.randomNumber ==64):\n self.surf = self.imagen2\n elif self.randomNumber ==70 :\n self.surf = self.imagen3", "def move(x,y):\r\n pass", "def Position(V, W, X, Y, Beta, LC, RC, r):\n d = 23.5 # distance between wheels (cm)\n rad = 3.6 # radius of wheels (cm)\n\n # iCreate Robot library uses left and right wheel VELOCITY values\n LeftWheelVel = ((V - ((W * d) / 2)) / rad) * 3.6 / (2 * math.pi) * 100\n RightWheelVel = (V * 2 + W * d / (2 * rad)) * 3.6 / (2 * math.pi) * 100\n\n # r is robot object from create library\n r.driveDirect(LeftWheelVel, RightWheelVel) # assign calculated velocities\n \n LCnew = r.getSensor(\"LEFT_ENCODER\") # recieve updated sensor \n RCnew = r.getSensor(\"RIGHT_ENCODER\")\n DLC = LCnew - LC # update sensor values from previously record\n DRC = RCnew - RC\n # L is Left?\n DLWheel = DLC * (72.0 * math.pi / 508.8) / 10\n DRWheel = DRC * (72.0 * math.pi / 508.8) / 10\n BetaNew = ((DRWheel - DLWheel) / d) + Beta\n XNew = ((1/2 * (DRWheel + DLWheel)) * math.cos(BetaNew)) + X\n YNew = ((1/2 * (DRWheel + DLWheel)) * math.sin(BetaNew)) + Y\n return(XNew, YNew, BetaNew, LCnew, RCnew)", "def move_to_random_pos(self):\n newpos = [(np.random.rand() - 0.5) * 0.1,\n (np.random.rand() - 0.5) * 0.1,\n np.random.rand() * 0.9 + 0.2]\n self.move_to(newpos)", "def move(self, center):\n\t\t#print \"made it\"\n\t\tself.rect = self.rect.move(center)", "def _move(self):\n self.pos += self.direction # add direction vector\n self.direction += self.gravity # add gravity to direction\n self.direction = self.direction.elementwise() * self.drag # apply drag to direction", "def set_robot(self, robot):\n self.robot = robot", "def main():\r\n robot = ROBOT()\r\n print(\"COMMANDS AVAILABLE:\", commands_avail)\r\n print(\"'PLACE' must be used in the form: PLACE x,y,direction\")\r\n print(\"VALID DIRECTIONS:\", direction_tuple)\r\n print(\"MOVE: moves the robot one unit in the direction its facing, unless it would fall of the tabletop\")\r\n print(\"LEFT and RIGHT: rotates the robot 90 degrees anti-clockwise or clockwise respectively\")\r\n print(\"REPORT: prints the position on the tabletop and which direction the robot is facing\")\r\n print(\"NOTE: position 0,0 is south-west corner and 4,4 is north-east corner. Size can be changed in source code\")\r\n print(\"NOTE: if REPORT in input, all previous commands processed and reported. Option to enter more input given.\")\r\n while True:\r\n commands = robot.input_discard()\r\n try:\r\n for i in commands:\r\n if commands_avail[0] in i:\r\n xyd = i.split(' ')\r\n xy = xyd[1].split(',')\r\n x = int(xy[0])\r\n y = int(xy[1])\r\n d = xy[2]\r\n robot.place(x, y, d)\r\n elif i == commands_avail[1]:\r\n robot.move()\r\n elif i == commands_avail[2]:\r\n robot.left()\r\n elif i == commands_avail[3]:\r\n robot.right()\r\n elif i == commands_avail[4]:\r\n robot.report()\r\n except TypeError:\r\n print(\"NO VALID PLACE OR EXISTING ROBOT POSITION\")\r\n print(\"Would you like to enter more input: 'Y' for yes or 'N' to quit\")\r\n cont = input()\r\n while cont != 'N' and cont != 'Y':\r\n print(\"INVALID COMMAND!\\nWould you like to enter more input: 'Y' for yes or 'N' to quit\")\r\n cont = input()\r\n if cont == 'N':\r\n break\r\n print(\"Exiting Program\")" ]
[ "0.76408446", "0.7587763", "0.7215272", "0.71969175", "0.71969175", "0.7194924", "0.6987033", "0.68881804", "0.68799406", "0.68528825", "0.6761995", "0.6702392", "0.66734", "0.66272604", "0.6603478", "0.658863", "0.65802443", "0.65351194", "0.651352", "0.65125144", "0.6502403", "0.6500763", "0.64749974", "0.64464104", "0.6437031", "0.6429298", "0.6428721", "0.64121145", "0.64064264", "0.63950956", "0.637456", "0.63492465", "0.6345895", "0.6336674", "0.63255906", "0.63171446", "0.6314533", "0.6314444", "0.63135374", "0.6310555", "0.63101155", "0.630769", "0.6302616", "0.6295046", "0.6292722", "0.6283201", "0.6280948", "0.6277489", "0.62502456", "0.62464005", "0.62338996", "0.622413", "0.62234104", "0.62167764", "0.620731", "0.61942697", "0.6185669", "0.6184137", "0.61744314", "0.61744314", "0.61705", "0.61705", "0.6162698", "0.6162698", "0.61623526", "0.615625", "0.6153638", "0.61535424", "0.61465937", "0.61435866", "0.6133042", "0.61240166", "0.61210763", "0.6105825", "0.60966796", "0.60929984", "0.6088798", "0.6087531", "0.6084386", "0.6082992", "0.60816455", "0.60784376", "0.6077179", "0.60653746", "0.6050091", "0.6044217", "0.60315335", "0.60245705", "0.6022888", "0.60226643", "0.60205054", "0.60204375", "0.6020243", "0.6018312", "0.6013978", "0.6011736", "0.60108453", "0.60047483", "0.5995508", "0.5987439", "0.59817874" ]
0.0
-1
Determine whether a Roman token is the next logical Roman token. This test is for Roman levels 3 or 6, and checks whether the next token is both a Roman numeral and the next bigger Roman numeral. For instance 'v' is a valid Roman numeral. But unless the current Roman evaluates to 4, the 'v' must be a level1 alpha marker.
def roman_surf_test(self, token, next_token): if not token: return False for each in [token, next_token]: if not roman_to_int(each): return False return roman_to_int(next_token) == roman_to_int(token) + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_roman_numeral(s: str) -> bool:\n if not isinstance(s, str):\n raise TypeError(\"Only strings may be tested \")\n return bool(_romanNumeralPattern.match(s))", "def roman_numerals_decoder(roman):\n roman_numerals = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n result = 0\n for i, c in enumerate(roman):\n if (i + 1) == len(roman) or roman_numerals[c] >= roman_numerals[roman[i + 1]]:\n result += roman_numerals[c]\n else:\n result -= roman_numerals[c]\n return result", "def roman_to_arabic_previous(roman):\n result = [0]\n previous_number = 4000\n p_previous_number = 4001\n # we store 2 previous numbers in order to check is\n # this number still correct\n for i, char in enumerate(roman):\n if char in MAP_ROMAN:\n number = MAP_ROMAN[char]\n # Chars in Roman numbers should decrease if not 3 same chars in line\n if p_previous_number <= number and previous_number != number:\n raise ValueError('Wrong Roman Number (...1)')\n if number > previous_number:\n # minus previous number if current > previous\n # IV: 5 - 1, IX: 10 - 1, XC: 100 - 10\n if number % previous_number < 5:\n sign = -1\n else:\n raise ValueError('Wrong Roman number (...2)')\n else:\n sign = 1\n\n print_debug(i, roman, char, number, previous_number, sign)\n\n result[-1] *= sign\n result.append(number)\n p_previous_number = previous_number\n previous_number = number\n else:\n raise ValueError('Unknown char \"%s\" in input roman number' % char)\n counts = defaultdict(int)\n\n # test for same multiple Roman numbers\n for number in result:\n num = abs(number)\n counts[num] += 1\n if counts[num] > 3:\n raise ValueError('Wrong Roman number (...3)')\n\n return sum(result)", "def solution(roman):\n r = roman.upper()\n nums = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n total = 0\n for i, c in enumerate(r):\n if i < len(r) - 1:\n if nums[r[i]] < nums[r[i+1]]:\n total -= nums[c]\n else:\n total += nums[c]\n else:\n total += nums[c]\n return total", "def toRoman(n):\n pass", "def is_ror(val):\n return ror_regexp.match(val)", "def isoperator(token):\n\n # Token is an operator\n return token and token.lower() in Token.OPERATORS", "def fromRoman(s):\n if not s:\n raise InvalidRomanNumeralError, 'Input can not be blank'\n if not romanNumeralPattern.search(s):\n raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s\n\n result = 0\n index = 0\n for numeral, integer in romanNumeralMap:\n while s[index:index+len(numeral)] == numeral:\n result += integer\n index += len(numeral)\n return result", "def islogicseparator(token):\n\n # Token is a logic separator\n return token and token.lower() in Token.LOGIC_SEPARATORS", "def to_roman(an_arabic):\n result = \"\"\n\n for level, symbol in [(1000,\"M\"),\n (900,\"CM\"),\n (500,\"D\"),\n (400,\"CD\"),\n (100,\"C\"),\n (90,\"XC\"),\n (50,\"L\"),\n (40,\"XL\"),\n (10,\"X\"),\n (9,\"IX\"),\n (5,\"V\"),\n (4,\"IV\"),\n (1,\"I\")]:\n\n while an_arabic >= level:\n result += symbol\n an_arabic -= level\n \n return result", "def isBinaryOp(tokens):\n stop = SwiftSupport.getLastOpTokenIndex(tokens)\n if stop == -1:\n return False\n start = tokens.index\n prevToken = tokens.get(start - 1)\n nextToken = tokens.get(stop + 1)\n prevIsWS = SwiftSupport.isLeftOperatorWS(prevToken)\n nextIsWS = SwiftSupport.isRightOperatorWS(nextToken)\n result = prevIsWS and nextIsWS or (not prevIsWS and not nextIsWS)\n text = tokens.getText(start, stop)\n return result", "def romanize(digit, glyphs):\n if 1 <= digit <= 3:\n return digit*glyphs[0]\n elif digit == 4:\n return glyphs[0] + glyphs[1]\n elif digit >= 5 and digit <= 8:\n return glyphs[1] + ((digit - 5) * glyphs[0])\n elif digit == 9:\n return glyphs[0]+glyphs[2]\n else:\n return ''", "def fromRoman(s):\n pass", "def is_nine_pandigital(number):\n digits = str(number)\n return bool(len(digits) == len(ALL_NINE) and set(digits) == ALL_NINE)", "def check_polarity(vertex, lexicon):\n if vertex.lang != \"EN\":\n return False\n return vertex.w in lexicon", "def isRegexPossible(self):\n if self._lastToken is None:\n # No token has been produced yet: at the start of the input,\n # no division is possible, so a regex literal _is_ possible.\n return True\n\n if self._lastToken.type == ECMAScriptLexer.Identifier or \\\n self._lastToken.type == ECMAScriptLexer.NullLiteral or \\\n self._lastToken.type == ECMAScriptLexer.BooleanLiteral or \\\n self._lastToken.type == ECMAScriptLexer.This or \\\n self._lastToken.type == ECMAScriptLexer.CloseBracket or \\\n self._lastToken.type == ECMAScriptLexer.CloseParen or \\\n self._lastToken.type == ECMAScriptLexer.OctalIntegerLiteral or \\\n self._lastToken.type == ECMAScriptLexer.DecimalLiteral or \\\n self._lastToken.type == ECMAScriptLexer.HexIntegerLiteral or \\\n self._lastToken.type == ECMAScriptLexer.StringLiteral or \\\n self._lastToken.type == ECMAScriptLexer.PlusPlus or \\\n self._lastToken.type == ECMAScriptLexer.MinusMinus:\n # After any of the tokens above, no regex literal can follow.\n return False\n else:\n # In all other cases, a regex literal _is_ possible.\n return True", "def num2roman(num):\n roman = ''\n while num > 0:\n for i, r in ROMAN_MAP:\n while num >= i:\n roman += r\n num -= i\n return roman", "def check_sym(ikjl, nmo, sym):\n if sym == 1:\n return True\n else:\n i, k, j, l = ikjl\n if sym == 4:\n kilj = (k,i,l,j)\n jlik = (j,l,i,k)\n ljki = (l,j,k,i)\n if (ikjl > jlik) or (ikjl > kilj) or (ikjl > ljki):\n return False\n else:\n return True\n else:\n ik = i + k*nmo\n jl = j + l*nmo\n return (i >= k and j >= l) and ik >= jl", "def toRoman(dec):\t\t\n if dec <=0:\n\t raise ValueError, \"It must be a positive\"\n # to avoid MMMM\n\telif dec>=4000: \n\t raise ValueError, \"It must be lower than MMMM(4000)\"\n \n\treturn decToRoman(dec,\"\",decimalDens,romanDens)", "def roman_number(value):\n try:\n value = to_roman(value)\n except RomanError as e:\n raise TemplateSyntaxError(\"roman_number error: %s\" % str(e))\n return value", "def toRoman(n):\n result = \"\"\n for numeral, integer in romanNumeralMap:\n while n >= integer:\n result += numeral\n n -= integer\n return result", "def isRNANucleotide(letter):\n if letter == 'A' or letter == 'C' or letter == 'G' or letter == 'U':\n return True\n return False", "def match(self, token):\n try:\n if token == 'S' and is_symbol(self.the_input[self.index]) \\\n or self.the_input[self.index] == token:\n self.index += 1\n return True\n except IndexError:\n print 'Error on checking \\'' + token + \\\n '\\': the next token is empty'\n exit(1)\n print 'No' # there is improper grammar\n exit(1)", "def romanify(num):\n result = \"\"\n return result", "def formatRomanNumeral(rn, key):\n # Something of \"I\" and \"I\" of something\n if rn == \"I/I\":\n rn = \"I\"\n return rn", "def should_lex(cls, char):\n return char == '{' or char == '}'", "def has_room(r_l, out, char_bud):\n if r_l == \"R\":\n if len(out) + len(sentence[\"tokens\"][counter_r][0]) < char_bud:\n return True\n else:\n return False\n if r_l == \"L\":\n if len(out) + len(sentence[\"tokens\"][counter_l][0]) < char_bud:\n return True\n else:\n return False", "def check(i):\r\n return (has_palindrome(i, 2, 4) and\r\n has_palindrome(i+1, 1, 5) and\r\n has_palindrome(i+2, 1, 4) and\r\n has_palindrome(i+3, 0, 6))", "def isbimol(rxn_typ):\n return rxn_typ in BIMOL_REACTIONS", "def check(self, text):\n lt = s = n = 0\n result = False\n for g in text:\n if g in LETTERS and lt < self.letters:\n lt += 1\n if g in NUMBERS and n < self.numbers:\n n += 1\n if g in SYMBOLS and s < self.symbols:\n s += 1\n if n == self.numbers and s == self.symbols and lt == self.letters:\n result = True\n break\n return result", "def containsToken(self, token):\n if token.sentence != self.tokens[0].sentence:\n return False # not in same sentence\n \n return self.tokens[0].index <= token.index and token.index <= self.tokens[-1].index", "def roman_numerals(text):\n return re.findall(r\"\\b([IVXLCDM]+)\\b\", text)", "def is_horn(clause) -> bool:\n from logic_formula_generator.syntax_tree.first_order_logic import Atom\n positive_literals = 0\n for atom in clause:\n atom: Atom\n if LogicalConnective.NOT in atom.unary_connectives:\n positive_literals += 1\n return positive_literals <= 1", "def __le__(self, other):\n try:\n lhs = (self._num * other._den)\n rhs = (other._num * self._den)\n return (lhs <= rhs)\n except AttributeError:\n return (self <= Rational.parse_number(other))", "def testFromRomanKnownValues(self):\n for integer, numeral in self.knownValues:\n result = roman.fromRoman(numeral)\n self.assertEqual(integer, result)", "def if2symbols(symbol1, symbol2, reel):\n for i in range(len(reel)-2):\n if reel[i] == symbol1 and reel[i+1] == symbol2:\n return True\n return False", "def toRoman(n):\n if not isinstance(n, int):\n raise NorIntegerError(\"decimals can not be converted\")\n if not (0 < n < 5000):\n raise OutOfRangeError(\"number out of range (must be 1..4999)\")\n \n result = \"\"\n for numeral, integer in romanNumeralMap:\n while n >= integer:\n result += numeral\n n -= integer\n return result", "def to_roman(numeral):\n mapping = {\n 'M': 1000,\n 'CM': 900,\n 'D': 500,\n 'CD': 400,\n 'C': 100,\n 'XC': 90,\n 'L': 50,\n 'XL': 40,\n 'X': 10,\n 'IX': 9,\n 'V': 5,\n 'IV': 4,\n 'I': 1\n }\n romans = {v:k for k,v in mapping.items()}\n result = ''\n\n for divisor, symbol in romans.items():\n count = numeral // divisor\n remainder = numeral % divisor\n numeral = remainder\n result += symbol * count\n\n return result", "def is_limerick(self, text):\n \n sentences = text.splitlines()\n \n #remove blank setences\n sentences = [sentence for sentence in sentences if sentence.strip()] \n \n if len(sentences) != 5 : return False \n #remove punctuations for all sentences\n words_sentence1 = word_tokenize(sentences[0].translate(None, string.punctuation).lower())\n words_sentence2 = word_tokenize(sentences[1].translate(None, string.punctuation).lower())\n words_sentence3 = word_tokenize(sentences[2].translate(None, string.punctuation).lower())\n words_sentence4 = word_tokenize(sentences[3].translate(None, string.punctuation).lower())\n words_sentence5 = word_tokenize(sentences[4].translate(None, string.punctuation).lower())\n \n #check rhymes for AAA BB and not rhymes for AB\n ret_flag = (self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence2[len(words_sentence2) - 1]) and\n self.rhymes(words_sentence3[len(words_sentence3) - 1 ],\n words_sentence4[len(words_sentence4) - 1 ]) and\n self.rhymes(words_sentence2[len(words_sentence2) - 1 ],\n words_sentence5[len(words_sentence5) - 1 ]) and\n self.rhymes(words_sentence1[len(words_sentence1) - 1 ],\n words_sentence5[len(words_sentence5) - 1 ]) and \n (not self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence4[len(words_sentence4) - 1])) and \n (not self.rhymes(words_sentence2[len(words_sentence2) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence2[len(words_sentence2) - 1],\n words_sentence4[len(words_sentence4) - 1])) and \n (not self.rhymes(words_sentence5[len(words_sentence5) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence5[len(words_sentence5) - 1],\n words_sentence4[len(words_sentence4) - 1])))\n \n if ret_flag == False: return False\n \n \n # Check additional constraints\n \n sum_of_syl1 = 0\n for word in words_sentence1 : sum_of_syl1 += self.num_syllables(word)\n \n if sum_of_syl1 < 4 : return False\n sum_of_syl2 = 0\n for word in words_sentence2 : sum_of_syl2 += self.num_syllables(word)\n \n if sum_of_syl2 < 4 : return False\n \n \n sum_of_syl_A_diff = 0\n if sum_of_syl1 > sum_of_syl2 : sum_of_syl_A_diff = sum_of_syl1 - sum_of_syl2\n else : sum_of_syl_A_diff = sum_of_syl2 - sum_of_syl1\n \n if sum_of_syl_A_diff > 2 : return False \n \n sum_of_syl3 = 0\n for word in words_sentence3 : sum_of_syl3 += self.num_syllables(word)\n \n if sum_of_syl3 < 4 : return False\n sum_of_syl4 = 0\n for word in words_sentence4 : sum_of_syl4 += self.num_syllables(word)\n \n if sum_of_syl4 < 4 : return False\n \n \n sum_of_syl_B_diff = 0\n if sum_of_syl3 > sum_of_syl4 : sum_of_syl_B_diff = sum_of_syl3 - sum_of_syl4\n else : sum_of_syl_B_diff = sum_of_syl4 - sum_of_syl3\n \n if sum_of_syl_B_diff > 2 : return False \n \n if (sum_of_syl3 > sum_of_syl1 and sum_of_syl3 > sum_of_syl2 \n and sum_of_syl4 > sum_of_syl1 and sum_of_syl4 > sum_of_syl2) : return False\n \n \n sum_of_syl5 = 0\n for word in words_sentence5 : sum_of_syl5 += self.num_syllables(word) \n \n if sum_of_syl5 < 4 : return False\n \n sum_of_syl_A_diff = 0\n if sum_of_syl1 > sum_of_syl5 : sum_of_syl_A_diff = sum_of_syl1 - sum_of_syl5\n else : sum_of_syl_A_diff = sum_of_syl5 - sum_of_syl1\n \n if sum_of_syl_A_diff > 2 : return False \n \n sum_of_syl_A_diff = 0\n if sum_of_syl2 > sum_of_syl5 : sum_of_syl_A_diff = sum_of_syl2 - sum_of_syl5\n else : sum_of_syl_A_diff = sum_of_syl5 - sum_of_syl2\n \n \n if sum_of_syl_A_diff > 2 : return False \n \n if (sum_of_syl3 > sum_of_syl5 and sum_of_syl4 > sum_of_syl5) : return False\n \n \n return ret_flag", "def isRational(self):\n return _libsbml.ASTNode_isRational(self)", "def roman_to_int(roman_string):\n\n NUMERALS_SET = set(list(zip(*NUMERAL_MAP))[1])\n roman_string = roman_string.upper()\n if len(set(list(roman_string.upper())) - NUMERALS_SET) != 0:\n raise ValueError('{0} does not seem to be a roman numeral'.format(\n roman_string))\n i = result = 0\n for integer, numeral in NUMERAL_MAP:\n while roman_string[i:i + len(numeral)] == numeral:\n result += integer\n i += len(numeral)\n if result < 1:\n raise ValueError('Can not interpret Roman Numeral {0}'.format(roman_string))\n return result", "def is_miller_rabin_prime(n):\n if n <= 1:\n return False\n elif n == 2:\n return True\n elif n % 2 == 0:\n return False\n \n witnesses = get_witnesses(n)\n if witnesses is None:\n msg = 'No definite Miller-Rabin test is available for %d' % n\n raise ValueError(msg)\n \n d, s = factorN(n-1)\n for w in witnesses:\n if is_composite(w, n, d, s):\n return False\n \n return True", "def is_miller_rabin_prime(n):\n if n <= 1:\n return False\n elif n == 2:\n return True\n elif n % 2 == 0:\n return False\n \n witnesses = get_witnesses(n)\n if witnesses is None:\n msg = 'No definite Miller-Rabin test is available for %d' % n\n raise ValueError(msg)\n \n d, s = factorN(n-1)\n for w in witnesses:\n if is_composite(w, n, d, s):\n return False\n \n return True", "def from_roman(s: str) -> Integral:\n if not isinstance(s, str):\n raise TypeError(\"The argument to from_roman must be a string.\")\n if not _romanNumeralPattern.search(s):\n raise InvalidRomanNumeralError(f\"Invalid Roman numeral: {s}\")\n\n result = 0\n index = 0\n for numeral, integer in _romanNumeralMap:\n while s[index : index + len(numeral)] == numeral:\n result += integer\n index += len(numeral)\n return result", "def residue_ramachandran_type(residue) :\n if residue_amino(residue)==\"GLY\" :\n return rama_GLYCINE\n elif residue_amino(residue)==\"PRO\" :\n return rama_PROLINE\n elif residue_amino(next_residue(residue))==\"PRO\" :\n #exlcudes those that are Pro or Gly\n return rama_PRE_PRO\n else :\n return rama_GENERAL", "def isECGLeadAVL(obxDict):\n readingCode = getReadingCode(obxDict)\n return readingCode == 'X113-1'", "def is_antipalindrome(n):\n digits = [int(num) for num in str(n)]\n reverseDigits = digits[::-1]\n return all([reverseDigits[i]==(9-digits[i]) for i in range(len(digits))])", "def romanify(num):\n result = \"\"\n onesDict = {1:\"I\", 2: \"II\", 3: \"III\", 4: \"IV\", 5: \"V\", 6: \"VI\", 7: \"VII\", 8: \"VIII\", 9: \"IX\", 0:\"\"}\n ones = num%10\n num-=num%10\n result = onesDict[ones] + result\n tensDict = {10:\"X\", 20: \"XX\", 30: \"XXX\", 40:\"XL\", 50:\"L\", 60:\"LX\", 70: \"LXX\", 80: \"LXXX\", 90: \"XC\", 0:\"\"}\n tens = num%100\n num-=num%100\n result = tensDict[tens] + result\n hunsDict = {100:\"C\", 200: \"CC\", 300: \"CCC\", 400:\"CD\", 500:\"D\", 600:\"DC\", 700: \"DCC\", 800: \"DCCC\", 900: \"CM\", 0:\"\"}\n huns = num%1000\n num-=num%1000\n result = hunsDict[huns] + result\n thous = num/1000\n result = \"M\"*thous + result\n \n return result", "def _check_grammar(seq, accepted_tokens):\r\n if len(seq) == 0:\r\n if accepted_tokens == [0, 3]:\r\n return True\r\n else:\r\n return False\r\n if seq[0] in accepted_tokens:\r\n curr_token = seq[0]\r\n if curr_token in [0, 2]:\r\n next_possible_tokens = [1, 2]\r\n elif curr_token in [1, 3]:\r\n next_possible_tokens = [0, 3]\r\n else:\r\n raise ValueError\r\n return Model._check_grammar(seq[1:], next_possible_tokens)\r\n return False", "def is_alternating(self):\n _is_alt = self._is_alt\n if _is_alt is not None:\n return _is_alt\n\n n = self.degree\n if n >= 8:\n if self.is_transitive():\n _is_alt_sym = self._eval_is_alt_sym_monte_carlo()\n if _is_alt_sym:\n if all(g.is_even for g in self.generators):\n self._is_sym, self._is_alt = False, True\n return True\n\n self._is_sym, self._is_alt = True, False\n return False\n\n return self._eval_is_alt_sym_naive(only_alt=True)\n\n self._is_sym, self._is_alt = False, False\n return False\n\n return self._eval_is_alt_sym_naive(only_alt=True)", "def is_emirp(n) -> bool:\r\n if not is_prime(n):\r\n return False\r\n if not is_palindromic_number(n):\r\n return is_prime(int(str(n)[::-1]))\r\n return False", "def _biconditional_symbol(self):\n # First symbol was read in get_next_token()\n self._expect('=')\n self._expect('>')\n\n return BiconditionalToken()", "def _is_lexsorted(self) -> bool:\n return self._lexsort_depth == self.nlevels", "def decToRoman(numStr):\n try:\n n = int(numStr)\n if n >= 4000:\n return 'Error!'\n romans = [\n (1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'),\n (100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'),\n (10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'),\n (1, 'I')\n ]\n result = ''\n for value, letters in romans:\n while n >= value:\n result += letters\n n -= value\n return result\n except:\n result = 'Error!'\n return result", "def millerRabin(n, r):\n\n if n < 2: # 0, 1 and negative numbers are considered not prime\n return False\n\n ############ CALCULATING d AND i #########\n # find the values d and i s.t. 2^i * d = n - 1\n d = n - 1\n i = 0\n\n while not d & 1:\n d >>= 1\n i += 1\n\n ############ TEST ONE WITNESS FOR EACH MR-ROUND #########\n for _ in range(r):\n\n # get random witness\n w = secrets.SystemRandom().randrange(2, n - 1)\n\n # use power-remainder method\n z = powerRemainder(w, d, n)\n\n # if z is 1 or n -1 then w is not a witness for n being a composite number\n if z not in (1, n - 1):\n\n # check no j s.t. (w^(2^j)) ^ d = -1 (mod n)\n for j in range(i):\n\n # get next z\n z = powerRemainder(w, 2 ** j * d, n)\n\n if z == 1: # n is definitely composite\n return False # return False\n elif z == n -1 : # n is prime or the witness is a strong liar\n break # break to next witness\n\n else:\n return False # if the inner loop didn't break, n is composite\n\n return True # if no witness can be found for n being composite, it is a probable prime", "def intToRoman(self, num: int) -> str:\n\n # Last remainder\n remainder = num\n\n # Initial string\n roman = \"\"\n\n # Loops through all remainder values\n for v in self.values:\n division = remainder // v\n remainder = remainder % v\n\n # Adds to the string only if division is not empty.\n if division != 0:\n roman += self.symbols[v] * division\n\n return roman", "def compare_LR(value, val_L=0, val_R=0, side='LR'):\n\n if len(side) == 2:\n is_beyond = (value < val_L) | (value > val_R)\n elif side == 'L':\n is_beyond = (value < val_L)\n elif side == 'R':\n is_beyond = (value > val_R)\n else:\n raise ValueError('Invalid side given.')\n\n return is_beyond", "def testToRomanKnownValues(self):\n for integer, numeral in self.knownValues:\n result = roman.toRoman(integer)\n self.assertEqual(numeral, result)", "def _check(self, token_type):\n if self._is_at_end():\n return False\n\n return self._peek().token_type == token_type", "def to_roman(n: Union[Integral, np.integer]) -> str:\n if not isinstance(n, (Integral, np.integer)):\n raise TypeError(f\"{n} cannot be converted to a Roman numeral.\")\n if not (0 < n < 5000):\n raise OutOfRangeError(\"Number is out of range (need 0 < n < 5000)\")\n\n result = \"\"\n for numeral, integer in _romanNumeralMap:\n while n >= integer:\n result += numeral\n n -= integer\n return result", "def meets_criteria2(num):\n output = True\n if not exactly_two_same_digits(num):\n output = False\n if not digits_increase(num):\n output = False\n return output", "def is_librating(triple):\n return triple.CKL <= 0", "def eol(self):\n return self.pos == len(self.tokens)", "def has_right_rauzy_move(self, winner):\n winner = interval_conversion(winner)\n loser = self._labels[1-winner][-1]\n\n # the same letter at the right-end (False)\n if self._labels[0][-1] == self._labels[1][-1] :\n return False\n\n # the winner (or loser) letter is repeated on the other interval (True)\n if self._labels[0][-1] in self._labels[1]: return True\n if self._labels[1][-1] in self._labels[0]: return True\n\n # the loser letters is the only letter repeated in the loser\n # interval (False)\n for i,c in enumerate((self._labels[1-winner])):\n if c != loser and c in self._labels[1-winner][i+1:]:\n return True\n\n return False", "def continues_to_right(self):\n if self.col_num == len(self.master_grid.matrix[0])-1:\n return False\n return (self.master_grid.matrix[self.row_num][self.col_num+1] \n == self.character)", "def convert(roman_num: str) -> int:\n result = 0\n roman_num = roman_num.upper()\n rome_dict = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n\n for i, _ in enumerate(roman_num):\n if i + 1 < len(roman_num) and rome_dict[roman_num[i]] < rome_dict[roman_num[i + 1]]:\n result -= rome_dict[roman_num[i]]\n else:\n result += rome_dict[roman_num[i]]\n\n return result", "def is_unary(s):\n return s == '~'", "def is_lval(t):\n if not t:\n return False\n i = iter(t)\n if i.next() not in IDENTIFIER_START:\n return False\n return all(e in IDENTIFIER_PART for e in i)", "def lineTerminatorAhead(self):\n # Get the token ahead of the current index.\n possibleIndexEosToken = self.getCurrentToken().tokenIndex - 1\n ahead = self._input.get(possibleIndexEosToken)\n\n if ahead.channel != Lexer.HIDDEN:\n # We're only interested in tokens on the HIDDEN channel.\n return False\n\n if ahead.type == ECMAScriptParser.LineTerminator:\n # There is definitely a line terminator ahead.\n return True\n\n if ahead.type == ECMAScriptParser.WhiteSpaces:\n # Get the token ahead of the current whitespaces.\n possibleIndexEosToken = self.getCurrentToken().tokenIndex - 2\n ahead = self._input.get(possibleIndexEosToken)\n\n # Get the token's text and type.\n text = ahead.text\n type = ahead.type\n\n # Check if the token is, or contains a line terminator.\n return (type == ECMAScriptParser.MultiLineComment and \\\n ('\\r' in text or '\\n' in text)) or \\\n (type == ECMAScriptParser.LineTerminator)", "def is_po2(n) -> bool:\n return not (n & (n - 1))", "def equivalent(h, r, monomial_generator, progress_meter=1<<14):\n order = len(h)\n h = np.array(h)\n r = np.array(r)\n for p in monomial_generator(order):\n # check if r^-1 * p^-1 * h is monomial\n # r is hadamard, so r^-1 = 1/order r^T\n r_inv = (1/order) * r.T\n if is_monomial(r_inv.dot(np.linalg.inv(p)).dot(h)):\n return True\n if progress_meter: print_dot(progress_meter)\n return False", "def is_terminal(self, u1):\n\t\treturn (u1 in self.T) # returns True if in array, else False", "def is_operator(obj):\n return isinstance(obj, Token) and obj[0] not in '/01234567890+-.<[('", "def isLogical(self):\n return _libsbml.ASTNode_isLogical(self)", "def is_numeral(self, symbol: str) -> bool:\n return symbol in self.numerals", "def _token_splittable(token_name: str) -> bool:\n if '_' in token_name:\n return False\n try:\n return not unicodedata.lookup('GREEK SMALL LETTER ' + token_name)\n except KeyError:\n return len(token_name) > 1", "def is_antipalindrome(n):\n v = []\n while n > 0:\n v.append(n % 10)\n n //= 10\n for i in range(len(v)//2):\n if v[i] == v[len(v)-i-1]:\n return False\n return True", "def check_pronominal(self):\n pronom_tags = [\"PRP\", \"PRP$\", \"WDT\", \"WP\", \"WP$\"]\n token_procs = self.get_processed_tokens()\n all_pronom = all(\n t.tag_ in pronom_tags for t in token_procs\n ) # True if all tokens are pronom_tags\n # print(f\"{' '.join(t.text + '.' + t.tag_ for t in token_procs)}: Pronominal = {all_pronom}\")\n return all_pronom", "def isOperand(self, token):\n if len(token) == 1:\n if token in self.operands:\n return True\n elif len(token) > 1:\n validChars = self.operands + '+-'\n for eachChar in token:\n if eachChar not in validChars:\n return False\n return True", "def check_for_token(token):\n try:\n decode_token(token)\n return True\n except:\n return False", "def seq_exceeds_homopolymers(curr_seq, max_len=6):\r\n for base in 'ATGC':\r\n curr = base * (max_len + 1)\r\n if curr in curr_seq:\r\n return True\r\n return False", "def is_lms(i, t):\n return t[i] == S_TYPE and t[i - 1] == L_TYPE", "def istele(number):\n if number[:3] == '140':\n return True\n return False", "def is_next_separated(self, segPre, segNext, timexPre=None, timexNext=None):\n ##: Search for seperator\n match = re.findall(';|and| & ', segNext, re.I)\n if match:\n return True\n \n match = re.findall(';|and', segPre, re.I)\n #match = re.findall(';|and| & ', segPre, re.I)\n if match:\n return False \n \n rankPre = 0\n rankNext = 0\n ##: a date with all year, month and day has rank 3\n if timexPre:\n rankPre = timexPre.getDateCompleteness()\n if timexNext:\n rankNext = timexNext.getDateCompleteness()\n \n ##: Compare number of comma\n nCommasPre = len(re.findall(', ', segPre))\n nCommasNext = len(re.findall(', ', segNext))\n rankPre -= nCommasPre\n rankNext -= nCommasNext\n \n if rankPre > rankNext:\n return True\n elif rankPre < rankNext:\n return False\n \n ##: Compare location\n if len(segPre) <= len(segNext):\n return True\n else:\n return False\n \n return False", "def if3symbols(symbol1, symbol2, symbol3, reel):\n for i in range(len(reel)-3):\n if reel[i] == symbol1 and reel[i+1] == symbol2 and reel[i+2] == symbol3:\n return True\n return False", "def bigram(l1, l2):\n # This was an attempt to implement a less naive way of intersection...\n # it1 = iter(l1)\n # it2 = iter(l2)\n # sol = False\n #\n # n1 = next(it1)\n # n2 = next(it2)\n # while True:\n # try:\n # if n1 == n2 + 1:\n # return True\n # else:\n # if n1 < n2:\n # n1 = next(it1)\n # else:\n # n2 = next(it2)\n # except StopIteration:\n # return sol\n\n # Below is a naive way of intersection. However, it works...\n sol = False\n try:\n # naive implementation\n for p in l1:\n for j in l2:\n if p == j + 1:\n sol = True\n print(sol)\n\n except TypeError:\n print(sol)", "def is_right_angle(a, b, c):\n if a == 0 or b == 0 or c == 0:\n return False\n else :\n return (a == b + c) or (b == c + a) or (c == a + b)", "def _is_equal(self, symbol):\n if symbol.type == self.scanner.EQUALS:\n return True\n else:\n return False", "def is_operator(formula):\n return is_binary_operator(formula) or isinstance(formula, Not)", "def __le__(self, rs):\n Number.comparisons += 1\n result = self.data <= rs.data\n return result", "def check_non_singletons(token: str, line_num: int) -> Union[Tuple[Tuple[int, str], str, int], str, Tuple[str, str]]:\n\n if re.match(Lexer.ID[1], token):\n return (Lexer.ID[0], Lexer.ID[2]), token, line_num\n elif re.match(Lexer.STRING[1], token):\n return (Lexer.STRING[0], Lexer.STRING[2]), token, line_num\n elif re.match(Lexer.INT[1], token):\n return (Lexer.INT[0], Lexer.INT[2]), token, line_num\n elif re.match(Lexer.REAL[1], token):\n return (Lexer.REAL[0], Lexer.REAL[2]), token, line_num\n elif re.match(Lexer.COMMENT[1], token):\n return \"COMMENT\"\n else:\n return \"ILLEGAL\", 'Invalid character sequence \"' + token + '\" on line: ' + str(line_num)", "def decToRoman(num,s,decs,romans):\n\tif decs:\n\t if (num < decs[0]):\n\t # deal with the rest denomination\n\t return decToRoman(num,s,decs[1:],romans[1:])\t\t \n\t else:\n\t # deduce this denomation till num<desc[0]\n\t return decToRoman(num-decs[0],s+romans[0],decs,romans)\t \n\telse:\n\t # we run out of denomination, we are done \n\t return s", "def decToRoman(num,s,decs,romans):\n\tif decs:\n\t if (num < decs[0]):\n\t # deal with the rest denomination\n\t return decToRoman(num,s,decs[1:],romans[1:])\t\t \n\t else:\n\t # deduce this denomation till num<desc[0]\n\t return decToRoman(num-decs[0],s+romans[0],decs,romans)\t \n\telse:\n\t # we run out of denomination, we are done \n\t return s", "def __valid_token_format(self, token):\n if len(token) != self.TOKEN_LENGTH * 2:\n return False\n for c in token:\n if c not in '01234567890abcdef':\n return False\n return True", "def to_roman(n):\n if not isinstance(n, int):\n try:\n n = int(n)\n except ValueError:\n raise NotIntegerError(\"non-integers cannot be converted\")\n\n if not (0 < n < 4000):\n raise OutOfRangeError(\"number out of range (must be 1..3999)\")\n\n result = \"\"\n for numeral, integer in ROMAN_NUMBER_MAP:\n while n >= integer:\n result += numeral\n n -= integer\n return result", "def check_pra_symbol(symbol):\n # Platts\n if len(symbol) == 7 and symbol[:2] in [\n 'PC', 'PA', 'AA', 'PU', 'F1', 'PH', 'PJ', 'PG', 'PO', 'PP', ]:\n return True\n\n # Argus\n if '.' in symbol:\n sm = symbol.split('.')[0]\n if len(sm) == 9 and sm.startswith('PA'):\n return True\n\n return False", "def __le__(self, other):\n return int(self.rank) <= int(other.rank)", "def miller_rabin_base_2(n):\n d, s = n - 1, 0\n while not d & 1:\n d, s = d >> 1, s + 1\n\n x = pow(2, d, n)\n if (x == 1) or (x == n - 1):\n return True\n\n for i in range(s - 1):\n x = pow(x, 2, n)\n if x == 1:\n return False\n elif x == n - 1:\n return True\n\n return False", "def check_token(self, *args) -> bool:\n if len(args) == 1:\n if isinstance(args[0], str):\n return self.token_name == args[0]\n elif isinstance(args[0], _Enum):\n return self.token_name == args[0].name\n elif isinstance(args[0], _Sequence):\n return self.token_name in args[0]\n raise TypeError(\"_check_token() taking 1 argument, type: str, Enum or Sequence\")", "def CheckToken(self, token, state):\n # Store some convenience variables\n first_in_line = token.IsFirstInLine()\n last_in_line = token.IsLastInLine()\n last_non_space_token = state.GetLastNonSpaceToken()\n\n type = token.type\n\n # Process the line change.\n if not self._is_html and FLAGS.strict:\n # TODO(robbyw): Support checking indentation in HTML files.\n indentation_errors = self._indentation.CheckToken(token, state)\n for indentation_error in indentation_errors:\n self._HandleError(*indentation_error)\n\n if last_in_line:\n self._CheckLineLength(token, state)\n\n if type == Type.PARAMETERS:\n # Find missing spaces in parameter lists.\n if self.MISSING_PARAMETER_SPACE.search(token.string):\n self._HandleError(errors.MISSING_SPACE, 'Missing space after \",\"',\n token)\n\n # Find extra spaces at the beginning of parameter lists. Make sure\n # we aren't at the beginning of a continuing multi-line list.\n if not first_in_line:\n space_count = len(token.string) - len(token.string.lstrip())\n if space_count:\n self._HandleError(errors.EXTRA_SPACE, 'Extra space after \"(\"',\n token, Position(0, space_count))\n\n elif (type == Type.START_BLOCK and\n token.metadata.context.type == Context.BLOCK):\n self._CheckForMissingSpaceBeforeToken(token)\n\n elif type == Type.END_BLOCK:\n # This check is for object literal end block tokens, but there is no need\n # to test that condition since a comma at the end of any other kind of\n # block is undoubtedly a parse error.\n last_code = token.metadata.last_code\n if last_code.IsOperator(','):\n self._HandleError(errors.COMMA_AT_END_OF_LITERAL,\n 'Illegal comma at end of object literal', last_code,\n Position.All(last_code.string))\n\n if state.InFunction() and state.IsFunctionClose():\n is_immediately_called = (token.next and\n token.next.type == Type.START_PAREN)\n if state.InTopLevelFunction():\n # When the function was top-level and not immediately called, check\n # that it's terminated by a semi-colon.\n if state.InAssignedFunction():\n if not is_immediately_called and (last_in_line or\n not token.next.type == Type.SEMICOLON):\n self._HandleError(errors.MISSING_SEMICOLON_AFTER_FUNCTION,\n 'Missing semicolon after function assigned to a variable',\n token, Position.AtEnd(token.string))\n else:\n if not last_in_line and token.next.type == Type.SEMICOLON:\n self._HandleError(errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,\n 'Illegal semicolon after function declaration',\n token.next, Position.All(token.next.string))\n\n if (state.InInterfaceMethod() and last_code.type != Type.START_BLOCK):\n self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,\n 'Interface methods cannot contain code', last_code)\n\n elif (state.IsBlockClose() and\n token.next and token.next.type == Type.SEMICOLON):\n self._HandleError(errors.REDUNDANT_SEMICOLON,\n 'No semicolon is required to end a code block',\n token.next, Position.All(token.next.string))\n\n elif type == Type.SEMICOLON:\n if token.previous and token.previous.type == Type.WHITESPACE:\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \";\"',\n token.previous, Position.All(token.previous.string))\n\n if token.next and token.next.line_number == token.line_number:\n if token.metadata.context.type != Context.FOR_GROUP_BLOCK:\n # TODO(robbyw): Error about no multi-statement lines.\n pass\n\n elif token.next.type not in (\n Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):\n self._HandleError(errors.MISSING_SPACE,\n 'Missing space after \";\" in for statement',\n token.next,\n Position.AtBeginning())\n\n last_code = token.metadata.last_code\n if last_code and last_code.type == Type.SEMICOLON:\n # Allow a single double semi colon in for loops for cases like:\n # for (;;) { }.\n # NOTE(user): This is not a perfect check, and will not throw an error\n # for cases like: for (var i = 0;; i < n; i++) {}, but then your code\n # probably won't work either.\n for_token = tokenutil.CustomSearch(last_code,\n lambda token: token.type == Type.KEYWORD and token.string == 'for',\n end_func=lambda token: token.type == Type.SEMICOLON,\n distance=None,\n reverse=True)\n\n if not for_token:\n self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',\n token, Position.All(token.string))\n\n elif type == Type.START_PAREN:\n if token.previous and token.previous.type == Type.KEYWORD:\n self._HandleError(errors.MISSING_SPACE, 'Missing space before \"(\"',\n token, Position.AtBeginning())\n elif token.previous and token.previous.type == Type.WHITESPACE:\n before_space = token.previous.previous\n if (before_space and before_space.line_number == token.line_number and\n before_space.type == Type.IDENTIFIER):\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \"(\"',\n token.previous, Position.All(token.previous.string))\n\n elif type == Type.START_BRACKET:\n if (not first_in_line and token.previous.type == Type.WHITESPACE and\n last_non_space_token and\n last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \"[\"',\n token.previous, Position.All(token.previous.string))\n # If the [ token is the first token in a line we shouldn't complain\n # about a missing space before [. This is because some Ecma script\n # languages allow syntax like:\n # [Annotation]\n # class MyClass {...}\n # So we don't want to blindly warn about missing spaces before [.\n # In the the future, when rules for computing exactly how many spaces\n # lines should be indented are added, then we can return errors for\n # [ tokens that are improperly indented.\n # For example:\n # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =\n # [a,b,c];\n # should trigger a proper indentation warning message as [ is not indented\n # by four spaces.\n elif (not first_in_line and token.previous and\n not token.previous.type in (\n [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +\n Type.EXPRESSION_ENDER_TYPES)):\n self._HandleError(errors.MISSING_SPACE, 'Missing space before \"[\"',\n token, Position.AtBeginning())\n\n elif type in (Type.END_PAREN, Type.END_BRACKET):\n # Ensure there is no space before closing parentheses, except when\n # it's in a for statement with an omitted section, or when it's at the\n # beginning of a line.\n if (token.previous and token.previous.type == Type.WHITESPACE and\n not token.previous.IsFirstInLine() and\n not (last_non_space_token and last_non_space_token.line_number ==\n token.line_number and\n last_non_space_token.type == Type.SEMICOLON)):\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \"%s\"' %\n token.string, token.previous, Position.All(token.previous.string))\n\n if token.type == Type.END_BRACKET:\n last_code = token.metadata.last_code\n if last_code.IsOperator(','):\n self._HandleError(errors.COMMA_AT_END_OF_LITERAL,\n 'Illegal comma at end of array literal', last_code,\n Position.All(last_code.string))\n\n elif type == Type.WHITESPACE:\n if self.ILLEGAL_TAB.search(token.string):\n if token.IsFirstInLine():\n self._HandleError(errors.ILLEGAL_TAB,\n 'Illegal tab in whitespace before \"%s\"' % token.next.string,\n token, Position.All(token.string))\n else:\n self._HandleError(errors.ILLEGAL_TAB,\n 'Illegal tab in whitespace after \"%s\"' % token.previous.string,\n token, Position.All(token.string))\n\n # Check whitespace length if it's not the first token of the line and\n # if it's not immediately before a comment.\n if last_in_line:\n # Check for extra whitespace at the end of a line.\n self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',\n token, Position.All(token.string))\n elif not first_in_line and not token.next.IsComment():\n if token.length > 1:\n self._HandleError(errors.EXTRA_SPACE, 'Extra space after \"%s\"' %\n token.previous.string, token,\n Position(1, len(token.string) - 1))\n\n elif type == Type.OPERATOR:\n last_code = token.metadata.last_code\n\n if not self._ExpectSpaceBeforeOperator(token):\n if (token.previous and token.previous.type == Type.WHITESPACE and\n last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER)):\n self._HandleError(errors.EXTRA_SPACE,\n 'Extra space before \"%s\"' % token.string, token.previous,\n Position.All(token.previous.string))\n\n elif (token.previous and\n not token.previous.IsComment() and\n token.previous.type in Type.EXPRESSION_ENDER_TYPES):\n self._HandleError(errors.MISSING_SPACE,\n 'Missing space before \"%s\"' % token.string, token,\n Position.AtBeginning())\n\n # Check that binary operators are not used to start lines.\n if ((not last_code or last_code.line_number != token.line_number) and\n not token.metadata.IsUnaryOperator()):\n self._HandleError(errors.LINE_STARTS_WITH_OPERATOR,\n 'Binary operator should go on previous line \"%s\"' % token.string,\n token)\n\n elif type == Type.DOC_FLAG:\n flag = token.attached_object\n\n if flag.flag_type == 'bug':\n # TODO(robbyw): Check for exactly 1 space on the left.\n string = token.next.string.lstrip()\n string = string.split(' ', 1)[0]\n\n if not string.isdigit():\n self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,\n '@bug should be followed by a bug number', token)\n\n elif flag.flag_type == 'suppress':\n if flag.type is None:\n # A syntactically invalid suppress tag will get tokenized as a normal\n # flag, indicating an error.\n self._HandleError(errors.INCORRECT_SUPPRESS_SYNTAX,\n 'Invalid suppress syntax: should be @suppress {errortype}. '\n 'Spaces matter.', token)\n elif flag.type not in state.GetDocFlag().SUPPRESS_TYPES:\n self._HandleError(errors.INVALID_SUPPRESS_TYPE,\n 'Invalid suppression type: %s' % flag.type,\n token)\n\n elif FLAGS.strict and flag.flag_type == 'author':\n # TODO(user): In non strict mode check the author tag for as much as\n # it exists, though the full form checked below isn't required.\n string = token.next.string\n result = self.AUTHOR_SPEC.match(string)\n if not result:\n self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,\n 'Author tag line should be of the form: '\n '@author foo@somewhere.com (Your Name)',\n token.next)\n else:\n # Check spacing between email address and name. Do this before\n # checking earlier spacing so positions are easier to calculate for\n # autofixing.\n num_spaces = len(result.group(2))\n if num_spaces < 1:\n self._HandleError(errors.MISSING_SPACE,\n 'Missing space after email address',\n token.next, Position(result.start(2), 0))\n elif num_spaces > 1:\n self._HandleError(errors.EXTRA_SPACE,\n 'Extra space after email address',\n token.next,\n Position(result.start(2) + 1, num_spaces - 1))\n\n # Check for extra spaces before email address. Can't be too few, if\n # not at least one we wouldn't match @author tag.\n num_spaces = len(result.group(1))\n if num_spaces > 1:\n self._HandleError(errors.EXTRA_SPACE,\n 'Extra space before email address',\n token.next, Position(1, num_spaces - 1))\n\n elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and\n not self._limited_doc_checks):\n if flag.flag_type == 'param':\n if flag.name is None:\n self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,\n 'Missing name in @param tag', token)\n\n if not flag.description or flag.description is None:\n flag_name = token.type\n if 'name' in token.values:\n flag_name = '@' + token.values['name']\n self._HandleError(errors.MISSING_JSDOC_TAG_DESCRIPTION,\n 'Missing description in %s tag' % flag_name, token)\n else:\n self._CheckForMissingSpaceBeforeToken(flag.description_start_token)\n\n # We want punctuation to be inside of any tags ending a description,\n # so strip tags before checking description. See bug 1127192. Note\n # that depending on how lines break, the real description end token\n # may consist only of stripped html and the effective end token can\n # be different.\n end_token = flag.description_end_token\n end_string = htmlutil.StripTags(end_token.string).strip()\n while (end_string == '' and not\n end_token.type in Type.FLAG_ENDING_TYPES):\n end_token = end_token.previous\n if end_token.type in Type.FLAG_DESCRIPTION_TYPES:\n end_string = htmlutil.StripTags(end_token.string).rstrip()\n\n if not (end_string.endswith('.') or end_string.endswith('?') or\n end_string.endswith('!')):\n # Find the position for the missing punctuation, inside of any html\n # tags.\n desc_str = end_token.string.rstrip()\n while desc_str.endswith('>'):\n start_tag_index = desc_str.rfind('<')\n if start_tag_index < 0:\n break\n desc_str = desc_str[:start_tag_index].rstrip()\n end_position = Position(len(desc_str), 0)\n\n self._HandleError(\n errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER,\n ('%s descriptions must end with valid punctuation such as a '\n 'period.' % token.string),\n end_token, end_position)\n\n if flag.flag_type in state.GetDocFlag().HAS_TYPE:\n if flag.type_start_token is not None:\n self._CheckForMissingSpaceBeforeToken(\n token.attached_object.type_start_token)\n\n if flag.type and flag.type != '' and not flag.type.isspace():\n self._CheckJsDocType(token)\n\n if type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):\n if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and\n token.values['name'] not in FLAGS.custom_jsdoc_tags):\n self._HandleError(errors.INVALID_JSDOC_TAG,\n 'Invalid JsDoc tag: %s' % token.values['name'], token)\n\n if (FLAGS.strict and token.values['name'] == 'inheritDoc' and\n type == Type.DOC_INLINE_FLAG):\n self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,\n 'Unnecessary braces around @inheritDoc',\n token)\n\n elif type == Type.SIMPLE_LVALUE:\n identifier = token.values['identifier']\n\n if ((not state.InFunction() or state.InConstructor()) and\n not state.InParentheses() and not state.InObjectLiteralDescendant()):\n jsdoc = state.GetDocComment()\n if not state.HasDocComment(identifier):\n # Only test for documentation on identifiers with .s in them to\n # avoid checking things like simple variables. We don't require\n # documenting assignments to .prototype itself (bug 1880803).\n if (not state.InConstructor() and\n identifier.find('.') != -1 and not\n identifier.endswith('.prototype') and not\n self._limited_doc_checks):\n comment = state.GetLastComment()\n if not (comment and comment.lower().count('jsdoc inherited')):\n self._HandleError(errors.MISSING_MEMBER_DOCUMENTATION,\n \"No docs found for member '%s'\" % identifier,\n token);\n elif jsdoc and (not state.InConstructor() or\n identifier.startswith('this.')):\n # We are at the top level and the function/member is documented.\n if identifier.endswith('_') and not identifier.endswith('__'):\n if jsdoc.HasFlag('override'):\n self._HandleError(errors.INVALID_OVERRIDE_PRIVATE,\n '%s should not override a private member.' % identifier,\n jsdoc.GetFlag('override').flag_token)\n # Can have a private class which inherits documentation from a\n # public superclass.\n if jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor'):\n self._HandleError(errors.INVALID_INHERIT_DOC_PRIVATE,\n '%s should not inherit from a private member.' % identifier,\n jsdoc.GetFlag('inheritDoc').flag_token)\n if (not jsdoc.HasFlag('private') and\n not ('underscore' in jsdoc.suppressions)):\n self._HandleError(errors.MISSING_PRIVATE,\n 'Member \"%s\" must have @private JsDoc.' %\n identifier, token)\n if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:\n self._HandleError(errors.UNNECESSARY_SUPPRESS,\n '@suppress {underscore} is not necessary with @private',\n jsdoc.suppressions['underscore'])\n elif jsdoc.HasFlag('private'):\n self._HandleError(errors.EXTRA_PRIVATE,\n 'Member \"%s\" must not have @private JsDoc' %\n identifier, token)\n\n if ((jsdoc.HasFlag('desc') or jsdoc.HasFlag('hidden'))\n and not identifier.startswith('MSG_')\n and identifier.find('.MSG_') == -1):\n # TODO(user): Update error message to show the actual invalid\n # tag, either @desc or @hidden.\n self._HandleError(errors.INVALID_USE_OF_DESC_TAG,\n 'Member \"%s\" should not have @desc JsDoc' % identifier,\n token)\n\n # Check for illegaly assigning live objects as prototype property values.\n index = identifier.find('.prototype.')\n # Ignore anything with additional .s after the prototype.\n if index != -1 and identifier.find('.', index + 11) == -1:\n equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)\n next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)\n if next_code and (\n next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or\n next_code.IsOperator('new')):\n self._HandleError(errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,\n 'Member %s cannot have a non-primitive value' % identifier,\n token)\n\n elif type == Type.END_PARAMETERS:\n # Find extra space at the end of parameter lists. We check the token\n # prior to the current one when it is a closing paren.\n if (token.previous and token.previous.type == Type.PARAMETERS\n and self.ENDS_WITH_SPACE.search(token.previous.string)):\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \")\"',\n token.previous)\n\n jsdoc = state.GetDocComment()\n if state.GetFunction().is_interface:\n if token.previous and token.previous.type == Type.PARAMETERS:\n self._HandleError(errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,\n 'Interface constructor cannot have parameters',\n token.previous)\n elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')\n and not jsdoc.InheritsDocumentation()\n and not state.InObjectLiteralDescendant() and not\n jsdoc.IsInvalidated()):\n distance, edit = jsdoc.CompareParameters(state.GetParams())\n if distance:\n params_iter = iter(state.GetParams())\n docs_iter = iter(jsdoc.ordered_params)\n\n for op in edit:\n if op == 'I':\n # Insertion.\n # Parsing doc comments is the same for all languages\n # but some languages care about parameters that don't have\n # doc comments and some languages don't care.\n # Languages that don't allow variables to by typed such as\n # JavaScript care but languages such as ActionScript or Java\n # that allow variables to be typed don't care.\n if not self._limited_doc_checks:\n self.HandleMissingParameterDoc(token, params_iter.next())\n\n elif op == 'D':\n # Deletion\n self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,\n 'Found docs for non-existing parameter: \"%s\"' %\n docs_iter.next(), token)\n elif op == 'S':\n # Substitution\n if not self._limited_doc_checks:\n self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION,\n 'Parameter mismatch: got \"%s\", expected \"%s\"' %\n (params_iter.next(), docs_iter.next()), token)\n\n else:\n # Equality - just advance the iterators\n params_iter.next()\n docs_iter.next()\n\n elif type == Type.STRING_TEXT:\n # If this is the first token after the start of the string, but it's at\n # the end of a line, we know we have a multi-line string.\n if token.previous.type in (Type.SINGLE_QUOTE_STRING_START,\n Type.DOUBLE_QUOTE_STRING_START) and last_in_line:\n self._HandleError(errors.MULTI_LINE_STRING,\n 'Multi-line strings are not allowed', token)\n\n\n # This check is orthogonal to the ones above, and repeats some types, so\n # it is a plain if and not an elif.\n if token.type in Type.COMMENT_TYPES:\n if self.ILLEGAL_TAB.search(token.string):\n self._HandleError(errors.ILLEGAL_TAB,\n 'Illegal tab in comment \"%s\"' % token.string, token)\n\n trimmed = token.string.rstrip()\n if last_in_line and token.string != trimmed:\n # Check for extra whitespace at the end of a line.\n self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',\n token, Position(len(trimmed), len(token.string) - len(trimmed)))\n\n # This check is also orthogonal since it is based on metadata.\n if token.metadata.is_implied_semicolon:\n self._HandleError(errors.MISSING_SEMICOLON,\n 'Missing semicolon at end of line', token)" ]
[ "0.5724986", "0.55127585", "0.53830534", "0.52930975", "0.5107975", "0.5093595", "0.49485403", "0.49321708", "0.4916438", "0.49055704", "0.48494673", "0.48381567", "0.48101753", "0.47975814", "0.47125", "0.47098485", "0.46954077", "0.46896818", "0.46814248", "0.46712714", "0.46646854", "0.4634048", "0.46017638", "0.45813024", "0.4556283", "0.450957", "0.4505944", "0.44940138", "0.44937068", "0.448682", "0.44841897", "0.4480688", "0.44770658", "0.44486344", "0.44347048", "0.4433851", "0.4432114", "0.44232985", "0.44221944", "0.44050238", "0.4401158", "0.43982983", "0.43982983", "0.439545", "0.4394934", "0.43893278", "0.43805104", "0.43786457", "0.4375495", "0.4374358", "0.4365047", "0.43635517", "0.43620822", "0.43481934", "0.43462273", "0.43298227", "0.43263862", "0.43227544", "0.43226266", "0.43175876", "0.43153307", "0.43098682", "0.43016654", "0.42973614", "0.42917404", "0.42834297", "0.4278487", "0.42715386", "0.42637384", "0.4259345", "0.42566186", "0.42543638", "0.4252329", "0.42507994", "0.42482257", "0.42468795", "0.42316186", "0.42294696", "0.4216914", "0.42143717", "0.42037082", "0.41969696", "0.41947836", "0.41937393", "0.4189317", "0.4186726", "0.41850019", "0.41822073", "0.41690153", "0.4164484", "0.4161467", "0.41565073", "0.41565073", "0.4156455", "0.41553688", "0.41542065", "0.41539723", "0.41533074", "0.41502684", "0.41466373" ]
0.6816487
0
Initializes an FDSN Web Service client. >>> client = Client("TAPS")
def __init__(self, base_url="TAPS", major_versions=None, user=None, password=None, user_agent=DEFAULT_USER_AGENT, debug=False, timeout=120, service_mappings=None, jwt_access_token=None, jwt_refresh_token=None): self.debug = debug self.user = user self.timeout = timeout # Cache for the webservice versions. This makes interactive use of # the client more convenient. self.__version_cache = {} if base_url.upper() in URL_MAPPINGS: url_mapping = base_url.upper() base_url = URL_MAPPINGS[url_mapping] url_subpath = URL_DEFAULT_SUBPATH else: if base_url.isalpha(): msg = "The FDSN service shortcut `{}` is unknown."\ .format(base_url) raise ValueError(msg) url_subpath = URL_DEFAULT_SUBPATH # Make sure the base_url does not end with a slash. base_url = base_url.strip("/") # Catch invalid URLs to avoid confusing error messages if not self._validate_base_url(base_url): msg = "The FDSN service base URL `{}` is not a valid URL."\ .format(base_url) raise ValueError(msg) self.base_url = base_url self.url_subpath = url_subpath self._set_opener(user, password) self.request_headers = {"User-Agent": user_agent} # Avoid mutable kwarg. if major_versions is None: major_versions = {} # Make a copy to avoid overwriting the default service versions. self.major_versions = DEFAULT_SERVICE_VERSIONS.copy() self.major_versions.update(major_versions) # Avoid mutable kwarg. if service_mappings is None: service_mappings = {} self._service_mappings = service_mappings if self.debug is True: print("Base URL: %s" % self.base_url) if self._service_mappings: print("Custom service mappings:") for key, value in self._service_mappings.items(): print("\t%s: '%s'" % (key, value)) print("Request Headers: %s" % str(self.request_headers)) self.services = DEFAULT_SERVICES self.jwt_access_token = jwt_access_token self.jwt_refresh_token = jwt_refresh_token
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def client_setup(self):\n self.client = Client()", "def service_client_initialization(self) -> global___Snippet.ClientInitialization:", "def init_client(self, client):\n self.client = client", "def __init__(self, client=None):\n self._client = client", "def __init__(self, client):\n self._client = client", "def _client(self):\n\n if self._suds_client is None:\n self._suds_client = suds.client.Client(SERVICE_WSDL_URL)\n # Add SOAP Security tokens\n self.set_security_token()\n\n return self._suds_client", "def __init__(self, client):\n\n self.client = client", "def create_client(self) -> None:\n self._client = discovery.build('ml', 'v1')", "def __init__(self, client):\n self.client = client", "def create_client(self) -> None:\n pass", "def __init__(self, username=None, password=None):\n self._username = username\n self._password = password\n self._suds_client = None", "def client():\n\n client = Client()\n return client", "def __init__(\n self,\n clientID,\n secretID,\n redirctURI,\n username\n ):\n\n print('SpotifClient starts...')\n \n self.client_id = clientID\n self.secret_id = secretID\n self.redirect_uri = redirctURI\n self.username = username\n self._isConnected = False\n\n #self.Connect()", "def __init__(self, client):\n self.client = client\n self.call_params = {\n }", "def __init__(self, uri, service_id, api_key=None):\n DSSBaseClient.__init__(self, \"%s/%s\" % (uri, \"public/api/v1/%s\" % service_id), api_key)", "def __init__(self):\n self.__client = Client(verify_ssl_cert=True)\n self.__headers = {'Content-Type': 'application/json'}\n self.login()", "def __init__(self, auth_args, name, desc,\n srv_chain, flow_conf):\n logger = logging.getLogger(__name__)\n self.conn = connection.Connection(**auth_args)\n self.pc_client = netsfc_clt.SFCClient(auth_args, logger)\n\n self.name = name\n self.desc = desc\n self.srv_chain = srv_chain\n self.flow_conf = flow_conf", "def _init_keystone_client(self, username, password, tenant_id, auth_url):\n\n __logger__.debug(\"Init Keystone Client\")\n self.keystone_client = KeystoneClient(username=username, password=password, tenant_id=tenant_id,\n auth_url=auth_url)", "def __init__(self):\n self.service = Client(key=GEO_LOCATION_API_KEY)", "def __init__(self, app_key=None, app_sid=None, base_url=None,\n api_version=None, debug=False, proxy=None):\n configuration = Configuration(app_key=app_key,\n app_sid=app_sid,\n base_url=base_url,\n api_version=api_version,\n debug=debug,\n\t\t\t\t\t\t\t\t\t proxy=proxy)\n self.api_client = ApiClient(configuration)", "def __init__(self, client, name):\n if not isinstance(client, couch.Client):\n raise Exception(\"'client' arg must be instance of couch.Client\")\n\n self.client = client\n self.name = name", "def __init__(self, api_key, client=Fetcher(FANART_URL)):\n self.api_key = api_key\n self.client = client", "def get_client():\n return Client(__address, authkey='strumamor')", "def _init_http_client(service_id=None, opts=None):\n if service_id:\n opts = _get_trs_opts(service_id)\n\n http_client = RequestsClient()\n\n http_client.set_api_key(host=opts['host'],\n api_key=opts['auth'],\n param_in='header')\n return http_client", "def _establish_client():\n logger.debug('SoapService - _establish_client()')\n try:\n client = zeep.Client(wsdl=settings.WSDL)\n except Exception as e:\n message = 'Unable to create soap client from wsdl file, error: {}'.format(e)\n logger.error(message)\n raise IOError(message)\n\n return client", "def __init__(self, **kwargs):\r\n super(Client, self).__init__()\r\n self.httpclient = client.HTTPClient(**kwargs)\r\n self.version = '2.0'\r\n self.format = 'json'\r\n self.action_prefix = \"/v%s\" % (self.version)\r\n self.retries = 0\r\n self.retry_interval = 1", "def __init__(self, conn, iTag, srvType, cb):\r\n super(ServiceClient, self).__init__(conn, iTag, srvType)\r\n\r\n self._cb = cb", "def __init__(self):\n self.client.ssl = True\n self.client.http_client_debug = False\n self.createBaseFolder()", "def build_client(url=None, port_name=None, **kwargs):\n if url is None and port_name is None:\n mirror = get_online_vso_url()\n if mirror is None:\n raise ConnectionError(\"No online VSO mirrors could be found.\")\n url = mirror['url']\n port_name = mirror['port']\n elif url and port_name:\n if not check_connection(url):\n raise ConnectionError(f\"Can't connect to url {url}\")\n else:\n raise ValueError(\"Both url and port_name must be specified if either is.\")\n\n if \"plugins\" not in kwargs:\n kwargs[\"plugins\"] = [SunPyLoggingZeepPlugin()]\n\n client = zeep.Client(url, port_name=port_name, **kwargs)\n client.set_ns_prefix('VSO', 'http://virtualsolar.org/VSO/VSOi')\n return client", "def __init__(self, api_key, client_id = \"ucsd_sysnet_group\", client_version = \"1.0\"):\n self.client_id = client_id\n self.client_version = client_version\n # Google API client\n # https://googleapis.github.io/google-api-python-client/docs/epy/googleapiclient.discovery-module.html\n self.service = build('safebrowsing', 'v4', developerKey=api_key, cache_discovery=False)\n self.next_request_no_sooner_than = None", "def initialize_watson_client(self):\n naturalLanguageUnderstandingClient = NaturalLanguageUnderstandingV1(\n version='2017-02-27',\n username='<username>',\n password='<password>')\n return naturalLanguageUnderstandingClient", "def InitClient(options):\n client = gdata.spreadsheet.service.SpreadsheetsService()\n client.email = options.username\n client.password = options.password\n client.source = 'Spread Sheet'\n client.account_type = options.account_type\n print 'Logging in as %s (%s)' % (client.email, client.account_type)\n client.ProgrammaticLogin()\n return client", "def init_client():\n init_config()\n begin_sending_packets()", "def client():\n return Client(**common_data.AUTH_ARGS)", "def __init__(self):\n\n\t\tself.account_sid = os.environ['TWILIO_ACCOUNT_SID']\n\t\tself.auth_token = os.environ['TWILIO_AUTH_TOKEN']\n\t\tself.twilio_phone_number = os.environ['TWILIO_PHONE_NUMBER']\n\t\tself.client = Client(self.account_sid, self.auth_token)\n\n\t\tself.call_domain = 'http://twimlets.com/echo?Twiml='", "def __init__(self, wsdlSource, namespace, username, password):\n self.__username = username\n self.__password = password\n\n self.sfdc = None # container for the session. Used for all calls.\n\n self.userInfo = {}\n self.mapHdrRaw = {} # map of soap headers to set.\n\n self.__config()\n\n self.wsdlSource = wsdlSource\n self.sfdc = WSDL.Proxy(self.wsdlSource)\n\n # Assigning sforce namespace to each sforce method\n #\n # For some reason WSDLTools does not assign\n # \"urn:partner.soap.sforce.com\"\n # namespace to operations when it parses binding\n for method in self.sfdc.methods.itervalues():\n method.namespace = namespace\n continue\n\n self.__login()\n\n return", "def __init__(self, client_id: str):\n\n self._cs = aiohttp.ClientSession(\n loop=asyncio.get_event_loop(),\n raise_for_status=True,\n headers={\"Client-ID\": client_id},\n )", "def __init__(self, service, acces_key, secret_key):\n \n self.client = boto3.client(\n service,\n aws_access_key_id=acces_key,\n aws_secret_access_key=secret_key,\n )", "def __init__(self, name, client):\n self.name = name\n self.client = client", "def __init__(self):\n self.config = get_config()\n self.log = get_logger(self)\n\n self.factory = SugarServerFactory(\"wss://*:5505\")\n self.factory.protocol = SugarServerProtocol\n\n self.console_factory = SugarConsoleServerFactory(\"wss://localhost:5507\")\n self.console_factory.protocol = SugarConsoleServerProtocol\n\n self.api = APIService(self.config)", "def setUp(self):\n self.client = api.Client(config.get_config(), api.json_handler)", "def __init__(self, zk_client):\n self._settings = SearchServiceSettings(zk_client)\n self.solr = SolrAPI(zk_client, SOLR_ZK_ROOT, self._settings)", "def CreateClient():\n client = gdata.docs.client.DocsClient(source=SampleConfig.APP_NAME)\n client.http_client.debug = SampleConfig.DEBUG\n # Authenticate the user with CLientLogin, OAuth, or AuthSub.\n try:\n gdata.sample_util.authorize_client(\n client,\n service=client.auth_service,\n source=client.source,\n scopes=client.auth_scopes\n )\n except gdata.client.BadAuthentication:\n exit('Invalid user credentials given.')\n except gdata.client.Error:\n exit('Login Error')\n return client", "def setUp(self):\n rand = ''.join(\n [random\n .choice(string.ascii_letters + string.digits) for n in range(16)])\n self.secret_key = 'sk_test_16c58271c29a007970de0353d8a47868df727cd0'\n self.random_ref = util.utf8(rand)\n self.test_email = 'bernard@disgui.se'\n self.test_amount = 5000\n self.plan = 'Basic'\n self.client = TransactionResource(self.secret_key, self.random_ref)\n # self.client.initialize(util.utf8(self.test_amount),\n # util.utf8(self.test_email),\n # util.utf8(self.plan))", "def __init__(self, use_datetime=0,\r\n username=None, password=None,\r\n certChain=None, privateKey=None,\r\n checker=None,\r\n settings=None,\r\n ignoreAbruptClose=False):\r\n\r\n # self._connection is new in python 2.7, since we're using it here,\r\n # we'll add this ourselves too, just in case we're pre-2.7\r\n self._connection = (None, None)\r\n xmlrpclib.Transport.__init__(self, use_datetime)\r\n self.ignoreAbruptClose = ignoreAbruptClose\r\n ClientHelper.__init__(self,\r\n username, password, \r\n certChain, privateKey,\r\n checker,\r\n settings)", "def __init__(self, address, username=None, password=None, debug=False):\n self.container_id = \"eventhub.pysdk-\" + str(uuid.uuid4())[:8]\n self.address = urlparse(address)\n url_username = unquote_plus(self.address.username) if self.address.username else None\n username = username or url_username\n url_password = unquote_plus(self.address.password) if self.address.password else None\n password = password or url_password\n if not username or not password:\n raise ValueError(\"Missing username and/or password.\")\n auth_uri = \"sb://{}{}\".format(self.address.hostname, self.address.path)\n self.auth = self._create_auth(auth_uri, username, password)\n self.connection = None\n self.debug = debug\n\n self.clients = []\n self.stopped = False\n log.info(\"{}: Created the Event Hub client\".format(self.container_id))", "def create_client(self):\n client = iperf3.Client()\n client.duration = self._host[CONF_DURATION]\n client.server_hostname = self._host[CONF_HOST]\n client.port = self._host[CONF_PORT]\n client.num_streams = self._host[CONF_PARALLEL]\n client.protocol = self._host[CONF_PROTOCOL]\n client.verbose = False\n return client", "def __init__(self, config_obj, wsdl_name, *args, **kwargs):\r\n self.logger = logging.getLogger('fedex')\r\n \"\"\"@ivar: Python logger instance with name 'fedex'.\"\"\"\r\n self.config_obj = config_obj\r\n \"\"\"@ivar: The FedexConfig object to pull auth info from.\"\"\"\r\n\r\n # If the config object is set to use the test server, point\r\n # suds at the test server WSDL directory.\r\n if config_obj.use_test_server:\r\n self.logger.info(\"Using test server.\")\r\n self.wsdl_path = os.path.join(config_obj.wsdl_path,\r\n 'test_server_wsdl', wsdl_name)\r\n else:\r\n self.logger.info(\"Using production server.\")\r\n self.wsdl_path = os.path.join(config_obj.wsdl_path, wsdl_name)\r\n\r\n self.client = Client('file:///%s' % self.wsdl_path.lstrip('/'))\r\n\r\n #print self.client\r\n\r\n self.VersionId = None\r\n \"\"\"@ivar: Holds details on the version numbers of the WSDL.\"\"\"\r\n self.WebAuthenticationDetail = None\r\n \"\"\"@ivar: WSDL object that holds authentication info.\"\"\"\r\n self.ClientDetail = None\r\n \"\"\"@ivar: WSDL object that holds client account details.\"\"\"\r\n self.response = None\r\n \"\"\"@ivar: The response from Fedex. You will want to pick what you\r\n want out here here. This object does have a __str__() method,\r\n you'll want to print or log it to see what possible values\r\n you can pull.\"\"\"\r\n self.TransactionDetail = None\r\n \"\"\"@ivar: Holds customer-specified transaction IDs.\"\"\"\r\n\r\n self.__set_web_authentication_detail()\r\n self.__set_client_detail()\r\n self.__set_version_id()\r\n self.__set_transaction_detail(*args, **kwargs)\r\n self._prepare_wsdl_objects()", "def __init__(self):\r\n self._zendesk_instance = zendesk.Zendesk(\r\n settings.ZENDESK_URL,\r\n settings.ZENDESK_USER,\r\n settings.ZENDESK_API_KEY,\r\n use_api_token=True,\r\n api_version=2,\r\n # As of 2012-05-08, Zendesk is using a CA that is not\r\n # installed on our servers\r\n client_args={\"disable_ssl_certificate_validation\": True}\r\n )", "def __init__(self):\n self.client = language.LanguageServiceClient()", "def __init__(self, contacts_client):\n self.contacts_client = contacts_client", "def __init__(self, server_address):\n if sys.version_info[0] > 2:\n self.__target = server_address\n else:\n self.__target = server_address.encode('ascii')\n self.__service_name = \"twirp.twirptest.Haberdasher\"", "def get_apiclient():\n api_server = [(fgt_info['address'], fgt_info['port'],\n 'https' == fgt_info['protocol'])]\n return client.FortiosApiClient(\n api_server, fgt_info['username'], fgt_info['password'])", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(\n self,\n message_serializer: 'Serializer',\n timeout: int = 60,\n headers_callback: Optional[Callable[[], Dict[str, str]]] = None):\n self._client = DaprHttpClient(message_serializer, timeout, headers_callback)", "def __init__(self, client):\n self._client = client\n self._argument_converter = ArgumentConverter()", "def __init__(self, client):\n super().__init__(client)", "def __init__(self, client):\n\n self.__route_tag = \"wallet-ng\"\n self.__path = \"wallet\"\n self.__client= client", "def __init__(self,\n client_id,\n client_secret):\n self.__client_id = client_id\n self.__client_secret = client_secret", "def __init__(self, client_auth_type, client_id, client_secret=None):\n self.client_auth_type = client_auth_type\n self.client_id = client_id\n self.client_secret = client_secret", "def create_client(self) -> None:\n self._client = gapic.JobServiceClient(\n client_options=dict(api_endpoint=self._region + _UCAIP_ENDPOINT_SUFFIX))", "def client() -> botocore.client.BaseClient:\n global _client\n if _client is None:\n endpoint_url = os.environ.get('LOCALSTACK_SNS_URL')\n # If endpoint_url is None, botocore constructs the default AWS URL\n _client = boto3.client('sns', endpoint_url=endpoint_url)\n return _client", "def testclient():\n base_url = PARAMS.get(\"url\") + \"/v2\"\n client = Client(\n base_url=base_url,\n headers={\n \"Authorization\": f\"GenieKey {PARAMS.get('token')}\",\n }\n )\n return client", "def __init__(self, **kwargs):\n self.config = kwargs[\"config\"]\n self.cli = client.DefaultClient(app_key=self.config[\"app_key\"], app_secret=self.config[\"app_secret\"])\n self.req = None", "def __init__(\n self,\n *,\n credentials: Optional[ga_credentials.Credentials] = None,\n transport: Union[str, DataLabelingServiceTransport] = \"grpc_asyncio\",\n client_options: Optional[ClientOptions] = None,\n client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,\n ) -> None:\n self._client = DataLabelingServiceClient(\n credentials=credentials,\n transport=transport,\n client_options=client_options,\n client_info=client_info,\n )", "def __init__(self, client, **kwargs):\n self._ac = client\n self._wrapped = kwargs", "def setUp(self):\n self.client_socket = open_client_socket()", "def setUp(self):\n self.client_socket = open_client_socket()", "def hello_svc_client():\n from clients.hello_svc import HelloServiceClient\n return HelloServiceClient()", "def __init__(self, username=None, password=None, account_sid=None,\n http_client=None, environment=None):\n environment = environment or os.environ\n \n self.username = username or environment.get('TWILIO_ACCOUNT_SID')\n \"\"\" :type : str \"\"\"\n self.password = password or environment.get('TWILIO_AUTH_TOKEN')\n \"\"\" :type : str \"\"\"\n self.account_sid = account_sid or self.username\n \"\"\" :type : str \"\"\"\n \n if not self.username or not self.password:\n raise TwilioException(\"Credentials are required to create a TwilioClient\")\n \n self.auth = (self.username, self.password)\n \"\"\" :type : tuple(str, str) \"\"\"\n self.http_client = http_client or Httplib2Client()\n \"\"\" :type : HttpClient \"\"\"\n \n # Domains\n self._api = None\n self._conversations = None\n self._ip_messaging = None\n self._lookups = None\n self._monitor = None\n self._pricing = None\n self._taskrouter = None\n self._trunking = None", "def test_query_client_instantiated():\n client = ConfigureClients()\n assert client.query_client", "def __init__(\n self,\n username=None,\n password=None,\n api_timeout=API_TIMEOUT,\n wss_timeout=WSS_TIMEOUT,\n client_session=None,\n ssl_verify=True,\n ssl_cafile=\"\",\n device_id=None,\n ):\n self._client_session = client_session or aiohttp.ClientSession()\n\n super().__init__(\n username=username,\n password=password,\n api_timeout=api_timeout,\n wss_timeout=wss_timeout,\n ssl_verify=ssl_verify,\n ssl_cafile=ssl_cafile,\n device_id=device_id,\n )", "def __init__(self, client, name):\n self._client = client\n self._attr_name = name", "def setUp(self):\r\n super(SSLClientTest, self).setUp()\r\n self.client = Client()\r\n self.factory = RequestFactory()\r\n self.mock = Mock()", "def __init__(self):\n # Create a TCP/IP socket\n self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "def serviceClient(self, iTag, srvType, cb=None):\r\n if cb and not callable(cb):\r\n raise TypeError('Callback has to be callable.')\r\n\r\n return ServiceClient(self, iTag, srvType, cb)", "def test_client(test_username, test_api_key):\n return ViClient(username=test_username, api_key=test_api_key,\n url=\"https://vectorai-development-api-vectorai-test-api.azurewebsites.net/\")", "def __init__(self, hostname: str, port: int):\n # Create a dictionary of topics and callbacks\n self.callback_dict = dict()\n\n self.client = mqtt.Client(userdata=self.callback_dict)\n self.client.on_message = _on_message_handler\n self.client.connect(hostname, port, 60)", "def __init__(self, client):\n\n self.__route_tag = \"wallet-ng\"\n self.__path = \"poe\"\n self.__client = client", "def setUp(self):\n self.client = DummyClient()", "def client():\n from csuibot import app\n app.config['TESTING'] = True\n return app.test_client()", "def client():", "def __init__(self, client: Union[\"ConnectionType\", \"SessionType\"]) -> None:\n self.client: Union[\"ConnectionType\", \"SessionType\"] = client", "def __init__(self, client: Union[\"ConnectionType\", \"SessionType\"]) -> None:\n self.client: Union[\"ConnectionType\", \"SessionType\"] = client", "def __init__(self, client: Union[\"ConnectionType\", \"SessionType\"]) -> None:\n self.client: Union[\"ConnectionType\", \"SessionType\"] = client", "def make_client(instance):\n network_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating network client: %s', network_client)\n\n endpoint = instance.get_endpoint_for_service_type(\n API_NAME,\n region_name=instance._region_name,\n )\n\n return network_client(\n username=instance._username,\n tenant_name=instance._project_name,\n password=instance._password,\n region_name=instance._region_name,\n auth_url=instance._auth_url,\n endpoint_url=endpoint,\n token=instance.auth.get_token(instance.session),\n insecure=instance._insecure,\n ca_cert=instance._cacert,\n )", "def typesense_client():\n client = typesense.Client({\n 'nodes': [{\n 'host': settings.TYPESENSE_HOST,\n 'port': settings.TYPESENSE_PORT,\n 'protocol': settings.TYPESENSE_PROTOCOL,\n }],\n 'api_key': settings.TYPESENSE_API_KEY,\n 'connection_timeout_seconds': settings.TYPESENSE_CONN_TIMEOUT,\n })\n return client", "def build_client(module):\n return drac.DRACClient(module.params['address'],\n module.params['username'],\n module.params['password'])", "def create_client(email, password):\n gd_client = gdata.contacts.service.ContactsService()\n gd_client.email = email\n gd_client.password = password\n gd_client.source = 'syncContacts'\n gd_client.ProgrammaticLogin()\n return gd_client", "def __init__(self, service_name):\n self.service_name = service_name", "def test_auth_client_instantiated():\n client = ConfigureClients()\n assert client.auth_client", "def setup_class(cls):\n cls.client = APP.test_client()", "def get_feeds_client(sync_config: SyncConfig) -> FeedServiceClient:\n\n logger.debug(\n \"Initializing a feeds client: url=%s, user=%s, conn_timeout=%s, read_timeout=%s\",\n sync_config.url,\n sync_config.username,\n sync_config.connection_timeout_seconds,\n sync_config.read_timeout_seconds,\n )\n\n return FeedServiceClient(\n feeds_endpoint=sync_config.url,\n http_client=HTTPBasicAuthClient(\n username=sync_config.username,\n password=sync_config.password,\n connect_timeout=sync_config.connection_timeout_seconds,\n read_timeout=sync_config.read_timeout_seconds,\n verify=sync_config.ssl_verify,\n ),\n )", "def test_create_client(self):\n pass", "def setup_client(url: str, username: str, password: str, verify_ssl: bool) -> Client:\n client = Client(url, verify=verify_ssl)\n client.login(username, password)\n # Get an arbitrary attribute to test if connection succeeds\n client.get_alternative_speed_status()\n return client", "def __init__(self, config, **kwargs):\n validate_config(config, signer=kwargs.get('signer'))\n if 'signer' in kwargs:\n signer = kwargs['signer']\n else:\n signer = Signer(\n tenancy=config[\"tenancy\"],\n user=config[\"user\"],\n fingerprint=config[\"fingerprint\"],\n private_key_file_location=config.get(\"key_file\"),\n pass_phrase=get_config_value_or_default(config, \"pass_phrase\"),\n private_key_content=config.get(\"key_content\")\n )\n\n base_client_init_kwargs = {\n 'regional_client': True,\n 'service_endpoint': kwargs.get('service_endpoint'),\n 'timeout': kwargs.get('timeout'),\n 'base_path': '/20160918',\n 'skip_deserialization': kwargs.get('skip_deserialization', False)\n }\n self.base_client = BaseClient(\"identity\", config, signer, identity_type_mapping, **base_client_init_kwargs)\n self.retry_strategy = kwargs.get('retry_strategy')" ]
[ "0.68235403", "0.67337793", "0.65410775", "0.6467698", "0.6393337", "0.63824844", "0.6354962", "0.6354742", "0.632095", "0.63021624", "0.61219805", "0.6116711", "0.60593694", "0.5988456", "0.59749275", "0.5955179", "0.5946236", "0.594365", "0.59188706", "0.59016854", "0.5899715", "0.5895551", "0.58736527", "0.5864592", "0.5843246", "0.5835595", "0.58292127", "0.5820486", "0.58094114", "0.57819426", "0.5770049", "0.57681775", "0.5761714", "0.57593423", "0.57582486", "0.5745219", "0.5728852", "0.5719978", "0.5716214", "0.5689097", "0.5687887", "0.5687012", "0.56860954", "0.56853485", "0.56848085", "0.56645197", "0.5661109", "0.5655017", "0.56544846", "0.56544244", "0.5646141", "0.564595", "0.5639696", "0.56358016", "0.56358016", "0.56358016", "0.56358016", "0.56358016", "0.56346905", "0.56294376", "0.5613893", "0.5605477", "0.5584948", "0.55836", "0.55690444", "0.55574614", "0.5552558", "0.55501485", "0.5550099", "0.55494654", "0.55437046", "0.55437046", "0.55425847", "0.55365896", "0.5528643", "0.55156744", "0.5512799", "0.55080783", "0.5500816", "0.5500107", "0.54888815", "0.54834116", "0.5482449", "0.5482283", "0.5477745", "0.54741716", "0.5473671", "0.5473671", "0.5473671", "0.5460991", "0.54606056", "0.54601413", "0.5458653", "0.54557914", "0.5455742", "0.5453873", "0.5449499", "0.5449117", "0.5444395", "0.5443927" ]
0.6459557
4
Set user and password resulting in subsequent web service requests for waveforms being authenticated for potential access to restricted data. This will overwrite any previously setup credentials/authentication.
def set_credentials(self, user, password): self.user = user self._set_opener(user, password)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_credentials():", "def setAuthenticationCredentials(self, username, password):\n self.PDFreactorConfiguration.in1[\"authenticationUsername\"] = username\n self.PDFreactorConfiguration.in1[\"authenticationPassword\"] = password", "def set_credentials(self, *args, **kwargs):\n pass", "def set_auth_credentials():\n import os\n from passlib.apps import custom_app_context as pwd_context\n\n os.environ[\"AUTH_USERNAME\"] = \"testme\"\n os.environ[\"AUTH_PASSWORD\"] = pwd_context.hash(\"foobar\")", "def setUserPassword(self,value):\n self.PDFreactorConfiguration.in1[\"userPassword\"] = value", "def basic_authentication(self, username: str, password: str) -> None:\n self.api_session.auth = (username, password)", "def set_credentials(self, ipv4, user, passwd):\n self.ip = ipv4\n self.username = user\n self.password = passwd", "def testUpdateCredentials(self):\r\n \r\n credentials = dict()\r\n credentials[\"username\"] = \"\"\r\n credentials[\"password\"] = \"\"\r\n self._factory.updateCredentials(credentials)", "def setCredentials( self, login=MissingValue, password=MissingValue ):\n if login is not MissingValue:\n self._login = login\n if password is not MissingValue:\n self._password = password", "def set_credentials(self, authenticator):\n pass", "def set_user_credentials(self):\n\n self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, self.selectors['input_username_css'])))\n\n self.driver.find_element_by_css_selector(self.selectors['input_username_css'])\\\n .send_keys(os.environ.get('QKNOWS_USER'))\n self.driver.find_element_by_css_selector(self.selectors['input_password_css'])\\\n .send_keys(os.environ.get('QKNOWS_PASSWORD'))", "def winhttp_WinHttpSetCredentials(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hRequest\", \"AuthTargets\", \"AuthScheme\", \"pwszUserName\", \"pwszPassword\", \"pAuthParams\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def SetCredentials(self, credentials):\n self._session[_CREDENTIAL_KEY] = credentials\n self._ReCreateUserInfo(credentials)", "def set_password(self, password):\n self.authentication.password = password", "def change_user(self, username, password):\n self.creds['username'] = username\n self.creds['password'] = password", "def set_credentials(self, username, password, url):\n # remove trailing slash off URL\n url = url.rstrip('/')\n # save variables to object\n self.url = url\n self.username = username\n self.password = password\n self.xml_rpc = '%s/server/xml.server.php' % (self.url)", "def _set_user_authenticated(user_id: int, device_id: int, value: bool = True) -> None:\n client = net_interface.get_user()\n client.is_authenticated = value\n client.user_id = user_id\n client.device_id = device_id", "def setCredentials(self,api_id,api_secret):\n self.api_id = api_id\n self.api_secret = api_secret", "def __init__(__self__, *,\n password: str,\n username: str):\n pulumi.set(__self__, \"password\", password)\n pulumi.set(__self__, \"username\", username)", "def setup_user():\n if 'auth_user' in flask.session:\n user = models.User.query.get(flask.session['auth_user'])\n if user is None:\n # old bad cookie, no good\n del flask.session['auth_user']\n # save the user in `flask.g`, which is a set of globals for this request\n flask.g.user = user", "def disable_authentication():\n cherrypy.request.security = { \"user\" : \"\", \"name\" : \"\", \"roles\": [] }", "def _set_credentials(args):\n if hasattr(args, 'username') and hasattr(args, 'apikey') \\\n and args.username and args.apikey:\n config.update({'username': args.username})\n config.update({'apikey': args.apikey})\n elif os.path.exists(os.path.expanduser('~/.jarvice.cfg')):\n CParser = configparser.ConfigParser()\n CParser.read([os.path.expanduser('~/.jarvice.cfg'), ])\n config.update({'username': CParser.get('auth', 'username')})\n config.update({'apikey': CParser.get('auth', 'apikey')})\n else:\n sys.stderr.write(\"username and apikey must be passed as arguments \" \n \"or set in ~/.jarvice.cfg\")\n sys.exit(1)", "def login(self):\n request = self.REQUEST\n response = request['RESPONSE']\n\n login = request.get('__ac_name', '')\n password = request.get('__ac_password', '')\n\n pas_instance = self._getPAS()\n\n if pas_instance is not None:\n pas_instance.updateCredentials(request, response, login, password)", "def authenticate(self):\n self.login(closet.app.config['USERNAME'],\n closet.app.config['PASSWORD'])", "def impersonate_user(self, username, password):", "def set_credentials(self, client_id=None, client_secret=None):\n self._client_id = client_id\n self._client_secret = client_secret\n\n # make sure to reset session due to credential change\n self._session = None", "def set_basic_auth(self, host, username, password):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def credentials(self, credentials):\n\n self._credentials = credentials", "def __set_web_authentication_detail(self):\r\n # Start of the authentication stuff.\r\n WebAuthenticationCredential = self.client.factory.create('WebAuthenticationCredential')\r\n WebAuthenticationCredential.Key = self.config_obj.key\r\n WebAuthenticationCredential.Password = self.config_obj.password\r\n\r\n # Encapsulates the auth credentials.\r\n WebAuthenticationDetail = self.client.factory.create('WebAuthenticationDetail')\r\n WebAuthenticationDetail.UserCredential = WebAuthenticationCredential\r\n self.WebAuthenticationDetail = WebAuthenticationDetail", "def _Authenticate(self):\n super(HttpRpcServer, self)._Authenticate()\n if self.save_cookies:\n StatusUpdate(\"Saving authentication cookies to %s\" % self.cookie_file)\n self.cookie_jar.save()", "def setUp(self) -> None:\n self.user = get_user_model().objects.create_user(\n 'mail@mail.com',\n 'password1'\n )\n self.client = APIClient()\n self.client.force_authenticate(self.user)", "def credentials(self) -> HTTPBasicAuth:\n if self.user is None or self.password is None:\n return None\n else:\n return HTTPBasicAuth(self.user, self.password)", "def setpassword(self, pwd):\n pass", "def _Authenticate(self):\r\n super(HttpRpcServer, self)._Authenticate()\r\n if self.save_cookies:\r\n StatusUpdate(\"Saving authentication cookies to %s\" % self.cookie_file)\r\n self.cookie_jar.save()", "def set_password(self, password):\n self.cloudserver.change_password(password)", "def setUp(self):\n self.credentials = {\n \"username\": \"BobRobert\",\n \"first_name\": \"Bob\",\n \"last_name\": \"Robert\",\n \"email\": \"test_bob@test.com\",\n \"password\": \"fglZfYmr%?,\",\n }", "def authenticate(self):\n #it's weird i have to do this here, but the code makes this not simple\n auth_json={'email':self.user, 'password':self.password}\n #send a post with no auth. prevents an infinite loop\n auth_response = self.post('/auth', data = json.dumps(auth_json), auth =\n None)\n\n _token = auth_response.json['token']\n\n self._token = _token\n self._wrapped.auth = SpringAuth(_token)", "def set_user_from_oauth(event):\n request = event.request\n request.verify_request()\n request.environ['REMOTE_USER'] = getattr(request, 'user', None)", "def set_password(self, password):\n from kalon.auth import encrypt_password\n self.document.password = encrypt_password(password)", "def authenticate(self):\n\n LOGGER.info(f\"Authenticating as {self.user['apple_id']}\")\n\n data = dict(self.user)\n\n # We authenticate every time, so \"remember me\" is not needed\n #data.update({\"extended_login\": False})\n data.update({\"extended_login\": True})\n\n try:\n req = self.session.post(\n self._base_login_url, params=self.params, data=json.dumps(data)\n )\n except PyiCloudAPIResponseException as error:\n msg = \"Invalid email/password combination.\"\n raise PyiCloudFailedLoginException(msg, error)\n\n self.data = req.json()\n self.params.update({\"dsid\": self.data[\"dsInfo\"][\"dsid\"]})\n self._webservices = self.data[\"webservices\"]\n\n if not path.exists(self._cookie_directory):\n mkdir(self._cookie_directory)\n self.session.cookies.save()\n LOGGER.debug(f\"Cookies saved to {self._get_cookiejar_path()}\")\n\n LOGGER.info(\"Authentication completed successfully\")\n LOGGER.debug(self.params)", "def _make_sure_credentials_are_set(self):\n if self.backend_options:\n if not os.environ.get('APCA_API_KEY_ID') and \\\n self.backend_options['key_id']:\n os.environ['APCA_API_KEY_ID'] = self.backend_options['key_id']\n if not os.environ.get('APCA_API_SECRET_KEY') and \\\n self.backend_options['secret']:\n os.environ['APCA_API_SECRET_KEY'] = self.backend_options[\n 'secret']\n if not os.environ.get('APCA_API_BASE_URL') and \\\n self.backend_options['base_url']:\n os.environ['APCA_API_BASE_URL'] = self.backend_options[\n 'base_url']", "def _set_credentials():\n # Override credentials here if necessary\n if env.user == 'ubuntu':\n env.key_filename = [\n os.path.expanduser('~/.ssh/ubuntu-id_dsa')]\n env.abort_on_prompts = True\n env.disable_known_hosts = True\n env.use_shell = False", "def set_connection(cls, user_name, password, end_point, session_verify):\n if not session_verify:\n requests.packages.urllib3.disable_warnings()\n\n cls.user_name = user_name\n cls.password = password\n cls.end_point = end_point\n\n cls.session = requests.Session()\n cls.session.auth = HTTPBasicAuth(user_name, password)\n cls.session.verify = session_verify", "def setUpAuth(self):\n self.user, self.user_headers = self.authUser()\n self.admin, self.admin_headers = self.authAdmin()", "def login_permitted_user(self):\n self.grant_permission()\n self.client.login(username=\"john\", password=\"pass\")", "def basic_auth(\n monkeypatch: pytest.MonkeyPatch,\n username: str = \"test_user\",\n password: str = \"r4ndom_bUt_memorable\",\n) -> tuple:\n monkeypatch.setenv(\"BASIC_AUTH_USERNAME\", username)\n monkeypatch.setenv(\"BASIC_AUTH_PASSWORD\", password)\n assert os.getenv(\"BASIC_AUTH_USERNAME\") == username\n assert os.getenv(\"BASIC_AUTH_PASSWORD\") == password\n return username, password", "def set_api_credentials(self):\n SCOPES = 'https://www.googleapis.com/auth/calendar'\n store = file.Storage('credentials.json')\n credentials = store.get()\n\n if not credentials or credentials.invalid:\n # Create a flow object. This object holds the client_id,\n # client_secret, and\n # SCOPES. It assists with OAuth 2.0 steps to get user\n # authorization and credentials.\n flow = OAuth2WebServerFlow(\n os.getenv('OOATH2_CLIENT_ID'),\n os.getenv('OOATH2_CLIENT_SECRET'),\n SCOPES)\n credentials = tools.run_flow(flow, store)\n api_key = os.getenv('API_KEY')\n service = build('calendar', 'v3', developerKey=api_key,\n http=credentials.authorize(Http()))\n return service", "def auth_password(self, auth_password):\n\n self._auth_password = auth_password", "def set_headers(username, password):\n REQUESTS_HEADERS[\"username\"] = username\n REQUESTS_HEADERS[\"password\"] = password\n REQUESTS_HEADERS[\"Content-Type\"] = \"application/json\"", "def set_runtime_params(self, **kwargs):\n username = kwargs.get('username', None)\n password = kwargs.get('password', None)\n\n execution_type = kwargs.get('execution_type', None)\n client = self.get_client(execution_type=execution_type)\n\n if username:\n client.parent.username = username\n if password:\n client.parent.password = password", "def initialize(self):\n self.login()", "def __init__(__self__, *,\n password: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None):\n if password is not None:\n pulumi.set(__self__, \"password\", password)\n if username is not None:\n pulumi.set(__self__, \"username\", username)", "def set_credentials(\n self, credentials: firebase_admin.credentials.Certificate\n ) -> None:\n with self.__synch_mutex:\n self.__app = firebase_admin.initialize_app(\n credentials, self.__app_initialize_dict\n )\n self.__bucket = storage.bucket(app=self.__app)\n self.__db_ref = db.reference(f\"/{self.UNPROCESSED_MAPS_PARENT}\")\n self.__were_credentials_set = True", "def _require_login(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + str(self.token))", "def resetCredentials(self, request, response):\n response.expireCookie('.ASPXAUTH', path='/', domain=COOKIE_DOMAIN)\n response.expireCookie('username', path='/', domain=COOKIE_DOMAIN)", "def set_password(self, password):\n self.password = password", "def SetCredentials(self,\n api_key,\n ):\n self._api_key = api_key", "def change_authentication(self, client_id=None, client_secret=None,\n access_token=None, refresh_token=None):\n # TODO: Add error checking so you cannot change client_id and retain\n # access_token. Because that doesn't make sense.\n self.client_id = client_id or self.client_id\n self.client_secret = client_secret or self.client_secret\n self.access_token = access_token or self.access_token\n self.refresh_token = refresh_token or self.refresh_token", "def set_credentials_file(uname=None, password=None, client_id=None, client_secret=None):\n if not is_kaa_dir_permissions_valid():\n raise UnauthorizedFileAccessException(KAA_DIR)\n\n creds = get_credentials()\n creds['username'] = uname\n creds['password'] = password\n creds['client_id'] = client_id\n creds['client_secret'] = client_secret\n\n with open(KAA_CREDENTIALS_FILE, 'w') as f:\n json.dump(creds, f, indent=4)", "def test_set_user_password(self):\n pass", "def setOwnerPassword(self,value):\n self.PDFreactorConfiguration.in1[\"ownerPassword\"] = value", "def reset_credentials(self):\n credentials = {}\n with open(self.credentials_file, 'w') as fh_credentials:\n fh_credentials.write(json.dumps(credentials))", "def set(self, username, password):\n\n # if there are no args, give them the form\n if not username and not password:\n return render('/login_form.html')\n\n # get the user by the username\n user_data = o.User.get_data(key=username)\n\n # no user data?\n if not user_data:\n add_flash('error','User not found')\n return render('/login_form.html')\n\n # check their password\n if not o.User.check_password(user_data,password):\n add_flash('error','Incorrect password')\n return render('/login_form.html')\n\n # set them as active user\n set_active_user(user_data.get('_hash'))", "async def prepare(self):\n\n # Read the secure cookie which exists if we are in an authenticated\n # context (though not if the caimira webservice is running standalone).\n session = json.loads(self.get_secure_cookie('session') or 'null')\n\n if session:\n self.current_user = AuthenticatedUser(\n username=session['username'],\n email=session['email'],\n fullname=session['fullname'],\n )\n else:\n self.current_user = AnonymousUser()", "def __authenticate(self):\n try:\n self.creds = self.client.login(self.username, self.password, self.environment)\n except Thrift.TException as e:\n raise e", "def setup(self):\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time. ONLY NEED To AUTH Once\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as self.token:\n self.creds = pickle.load(self.token)\n # If there are no (valid) credentials available, let the user log in.\n if not self.creds or not self.creds.valid:\n if self.creds and self.creds.expired and self.creds.refresh_token:\n self.creds.refresh(Request())\n else:\n self.flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n self.creds = self.flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as self.token:\n pickle.dump(self.creds, self.token)\n\n self.service = build('calendar', 'v3', credentials=self.creds)", "def set_password(self, password):\n self.__init__(password=password)", "def _setstaff_login(self):\r\n GlobalStaff().add_users(self.user)\r\n self.client.login(username=self.user.username, password='foo')", "def set_password(self, password):\n self.PASS = password", "def set_requests_auth(self):\n self.__auth = OAuth2(token=self.bearer_token)", "def set_password(self, password):\n self.PASSWORD = password", "def __init__(__self__, *,\n password: pulumi.Input[str],\n server: pulumi.Input[str],\n type: pulumi.Input[str],\n username: pulumi.Input[str]):\n pulumi.set(__self__, \"password\", password)\n pulumi.set(__self__, \"server\", server)\n pulumi.set(__self__, \"type\", 'BasicAuth')\n pulumi.set(__self__, \"username\", username)", "def setUp(self):\n self.new_credentials = Credentials(\"Facebook\",\"Josphato\",\"jose!!otieno@45\")", "def _setup_current_user():\n admin_user = set_admin_current_user(server.app)\n login_manager = server.app.extensions['security'].login_manager\n login_manager.anonymous_user = MagicMock(return_value=admin_user)", "def __init__(self, my_data, my_auth):\n self.user = my_auth.user\n self.password = my_auth.password\n self.my_data = my_data", "def set_sign_in(self, username, password):\n params = [\n ('username', username),\n ('password', password),\n ]\n\n self.get(COMMAND_CPM, 'SetSignIn', params)", "def perform_authentication(self):\n\n if not self.authenticators:\n return\n\n request.user = None\n request.auth = None\n\n for authenticator in self.authenticators:\n auth_tuple = authenticator.authenticate()\n\n if auth_tuple:\n request.user = auth_tuple[0]\n request.auth = auth_tuple[1]\n break", "def set_password(self, raw_password: str):\n self.new_password = raw_password", "def setUp(self):\n\n credentials = {'email': 'testuser@gmail.com', 'password': 'Testpass12'}\n self.user = get_user_model().objects.create_user(**credentials)\n self.client = APIClient()\n self.client.force_authenticate(self.user)", "def setUp(self):\n\n credentials = {'email': 'testuser@gmail.com', 'password': 'Testpass12'}\n self.user = get_user_model().objects.create_user(**credentials)\n\n self.client = APIClient()\n self.client.force_authenticate(self.user)", "def _authenticate_from_web(self, save_credentials):\n # Create local webserver and auto handles authentication.\n self._gauth.LocalWebserverAuth()\n # Save the current credentials to a file\n if save_credentials:\n self._save_credentials()", "def fill_authorization_basic(user, pass_word):\n return user, pass_word", "def set_password(self, system):\n if system[\"embedded_available\"] and system[\"controller_addresses\"]:\n for url in [\"https://%s:8443/devmgr\" % system[\"controller_addresses\"][0],\n \"https://%s:443/devmgr\" % system[\"controller_addresses\"][0],\n \"http://%s:8080/devmgr\" % system[\"controller_addresses\"][0]]:\n try:\n rc, response = self._request(\"%s/utils/login?uid=admin&xsrf=false&onlycheck=true\" % url, ignore_errors=True, url_username=\"admin\",\n url_password=\"\", validate_certs=False)\n\n if rc == 200: # successful login without password\n system[\"password_set\"] = False\n if system[\"password\"]:\n try:\n rc, storage_system = self._request(\"%s/v2/storage-systems/1/passwords\" % url, method=\"POST\", url_username=\"admin\",\n headers=self.DEFAULT_HEADERS, url_password=\"\", validate_certs=False,\n data=json.dumps({\"currentAdminPassword\": \"\", \"adminPassword\": True,\n \"newPassword\": system[\"password\"]}))\n\n except Exception as error:\n system[\"failed\"] = True\n self.module.warn(\"Failed to set storage system password. Array [%s].\" % system[\"ssid\"])\n break\n\n elif rc == 401: # unauthorized\n system[\"password_set\"] = True\n break\n except Exception as error:\n pass\n else:\n self.module.warn(\"Failed to retrieve array password state. Array [%s].\" % system[\"ssid\"])\n system[\"failed\"] = True", "def newcred(self):\n return {'login': input('username: '),\n 'password': getpass.getpass()}", "def allow_credentials(self, allow_credentials):\n\n self._allow_credentials = allow_credentials", "def test_credentialsSetResponse(self):\n cred = imap4.PLAINCredentials()\n cred.setResponse(b'\\0testuser\\0secret')\n self.assertEqual(cred.username, b'testuser')\n self.assertEqual(cred.password, b'secret')", "def _set_password(self, password):\n self._password = generate_password_hash(password)", "def authentication_hook(self):\n pass", "def __init__(self, **kwargs):\n self._username = kwargs.get('username', current_app.config.get('WORDAI_API_EMAIL', None))\n self._password = kwargs.get('password', current_app.config.get('WORDAI_API_PASSWORD', None))\n self._hash = kwargs.get('hash', current_app.config.get('WORDAI_API_KEY', None))", "def set_auth_state(self, data):\n raise NotImplementedError()", "def __init__(self):\n self.user = \"\"\n self.password = \"\"", "def _set_authenticator(self):\n pass", "def __init__(self,\n identifier=None,\n password=None,\n silent=False):\n self.data_source = RestApi()\n self.user_id = self.data_source.login(identifier, password, silent)", "def set_input_data(self, request, auth_data):\n request.auth_data = auth_data", "def __init__(self, username=None, password=None, apitoken=None):\n self.__credentials = None\n self.__headers = {}\n if apitoken:\n self.authenticate_by_token(apitoken)\n if username and password:\n self.authenticate(username, password)", "def step_impl(context):\n\n from django.contrib.auth.models import User\n u = User(username='test_user', email='testuser@test.com')\n u.set_password('admin')", "def __init__(self, username, password=False):\n self.username = username\n self.cookies, self.token = get_cookies_and_token()\n self._login(password)", "def identity_authentication(realm, blacklist=[\"nobody\"]):\n def checkpassword(realm, username, password):\n return username and password and username == password and username not in blacklist\n cherrypy.lib.auth_basic.basic_auth(realm, checkpassword)\n cherrypy.request.security = { \"user\" : cherrypy.request.login, \"name\" : cherrypy.request.login, \"roles\": [] }", "def updateCredentials(self, request, response, login, new_password):\n\n setAuthCookie = getattr(self, 'setAuthCookie', None)\n \n if setAuthCookie:\n \n cookie_val = '%s:%s' % (login, new_password)\n cookie_val = self._get_denc().encrypt(cookie_val)\n cookie_val = cookie_val.rstrip()\n setAuthCookie(response, self.cookie_name, quote(cookie_val))\n \n else:\n \n BasePlugin.updateCredentials(self, request, response, login, new_password)", "def _login(self, environ, start_response):\n response = HTTPUnauthorized()\n response.www_authenticate = ('Basic', {'realm': self._realm})\n return response(environ, start_response)" ]
[ "0.75870895", "0.7177658", "0.67700595", "0.6669403", "0.64879966", "0.6485973", "0.6440404", "0.6395458", "0.6339234", "0.6270898", "0.62019813", "0.6158831", "0.6129935", "0.60635895", "0.6046939", "0.6014322", "0.59898496", "0.59827876", "0.5884749", "0.5865817", "0.58531785", "0.58309764", "0.5817622", "0.58071", "0.579636", "0.5757854", "0.57495254", "0.5748886", "0.5731355", "0.57311296", "0.57281643", "0.57251495", "0.5722923", "0.5722699", "0.57207835", "0.5717093", "0.5710375", "0.57007444", "0.5681914", "0.568092", "0.5666079", "0.5658767", "0.5646547", "0.56438196", "0.56382585", "0.56315535", "0.5631411", "0.56308645", "0.563041", "0.5617516", "0.56166947", "0.5612323", "0.5608409", "0.5607518", "0.56039864", "0.56030434", "0.5601995", "0.559361", "0.5586384", "0.5585189", "0.55837446", "0.5574908", "0.5574094", "0.5559595", "0.55403614", "0.5537867", "0.5537168", "0.55353945", "0.5521508", "0.5520727", "0.5520645", "0.5517527", "0.55169046", "0.5513506", "0.5509419", "0.5505998", "0.5505024", "0.548473", "0.54833007", "0.54677826", "0.546682", "0.54609585", "0.5452678", "0.54520905", "0.5450354", "0.54493606", "0.5447838", "0.54406905", "0.54333705", "0.54245687", "0.54241025", "0.5410497", "0.54088366", "0.54021955", "0.5396661", "0.5391967", "0.53865266", "0.5386499", "0.5385694", "0.53844255" ]
0.69228536
2
Fetch token from the server using the provided user, password resulting in subsequent web service requests for waveforms being authenticated for potential access to restricted data.
def _retrieve_jwt_token(self, user, password): # force https so that we don't send around tokens unsecurely url = 'https://{}/api/token'.format(urlparse(self.base_url).netloc) # paranoid: check again that we only send the token to https if urlparse(url).scheme != "https": msg = 'This should not happen, please file a bug report.' raise Exception(msg) # convert to json data = json.dumps({"username": user, "password": password}) # encode data = bytes(data, "utf-8") headers = {"Content-Type": "application/json"} html = urllib_request.Request(url, data=data, headers=headers) # decode('utf-8') result = urllib_request.urlopen(html).read().decode("utf-8") dic = json.loads(result) # get token self.jwt_access_token = dic['access'] self.jwt_refresh_token = dic['refresh'] if self.debug: print('Got temporary access/refresh: {}/{}'.format(self.jwt_access_token, self.jwt_refresh_token)) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_token(self, user_id, password):\n url = buildCommandUrl(self.server, \"/as/user/token\")\n result = json_request(\"POST\", url, {\n \"userId\": user_id,\n \"password\": password\n })\n return result[\"token\"]", "def _request_token(self):\n response = requests.post(\n \"%s/generateToken\" % self.root_uri.rstrip(\"/\"), {\n \"username\": self.username,\n \"password\": self.password,\n \"expiration\": '60',\n \"referer\": 'https://wsdot.maps.arcgis.com',\n \"f\": 'json'\n })\n\n token_info = response.json()\n if \"error\" in token_info:\n raise TokenError(token_info[\"error\"])\n self._token = token_info[\"token\"]\n self._expires = datetime.fromtimestamp(token_info[\"expires\"] / 1000)", "def get_token(user, password):\n url = urljoin(PivotalTrackerService.URI, \"me\")\n auth = (user, password)\n response = PivotalTrackerService.get_response(\"get\", url, auth=auth)\n\n try:\n response.raise_for_status()\n data = response.json()\n ret_val = data[\"api_token\"]\n except RequestException:\n ret_val = None\n\n return ret_val", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_auth_token(self, username, password):\n url = '/'.join([self.base_url, self.TOKEN_ENDPOINT])\n r = requests.get(url, auth=(username, password))\n if r.status_code == 200:\n return r.content\n return r", "def get_token():\n req = request.get_json()\n username = str(req['username'])\n password = str(req['password'])\n if User.username_password_match(username, password):\n expiration_date = datetime.datetime.utcnow() + \\\n datetime.timedelta(seconds=100)\n token = jwt.encode({'exp': expiration_date}, app.config['SECRET_KEY'], algorithm='HS256')\n return token\n return Response('', 401, mimetype='application/json')", "def getUser(self, authenticationToken):\r\n pass", "def get_token(client, email_or_username, password):\n\turl = 'account/token'\n\tbasic_auth = (email_or_username, password)\n\treturn client._request(url, Request.GET, basic_auth=basic_auth)", "def get_auth_token(username, password):\n url = get_auth_token_url()\n user_credentials = {\"username\": username, \"password\": password}\n r = requests.post(url, json=user_credentials)\n return r", "def get_token():\n json = request.get_json(force=True)\n\n user = User.query.filter_by(username=json['username']).first()\n if user is None:\n raise UserDoesNotExistException()\n\n if not user.passhash == json['password']:\n raise InvalidPasswordException()\n\n return jsonify(status='OK',\n token=user.get_token())", "def get_auth_token_student():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def get_api_token(self, app, user, pwd):\n authorization = ('Basic ' + base64.b64encode(user + \":\" + pwd))\n api_token_resp = app.post('/v1/api_token', headers={'Authorization': authorization})\n if api_token_resp.status != '200 OK':\n raise ValueError(api_token_resp.status)\n api_token = json.loads(api_token_resp.data)['api_token']\n return api_token", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get_token():\n if not request.is_json:\n return jsonify({\"msg\": \"Missing JSON in request\"}), 400\n username = request.json.get('username', None)\n password = request.json.get('password', None)\n\n if not username:\n abort(400, \"Invalid username or password\")\n if not password:\n abort(400, \"Invalid username or password\")\n users = app.data.driver.db[config.DOMAIN['user']['datasource']['source']]\n user = users.find_one({'email':username})\n # validate the user in the user's service\n if not user:\n abort(401, \"Invalid username or password\")\n if not check_password_hash(user.get('password'), password):\n abort(401, \"Invalid username or password\")\n role = user.get('role', 'user')\n user_id = str(user.get('_id'))\n user = User(user_id, username, role)\n access_token, refresh_token = create_token(user)\n return jsonify(\n token=access_token,\n type='bearer',\n roles=role,\n user=username,\n refreshToken=refresh_token), 200", "def get_token(request):\n request_json = request.get_json()\n # response = dict()\n if request.authorization and 'password' in request.authorization and 'username' in request.authorization:\n pwd = request.authorization.get('password')\n user = request.authorization.get('username')\n if pwd == 'password':\n token = jwt.encode({\"user\": user,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=20)}, SECRET_KEY,\n algorithm=\"HS256\")\n\n return jsonify({'token': token})\n\n return make_response(\"basic login required\", 404, {\"www-authenticate\": \"basic login required\"})", "def skyserv_authenticator(self):\n \n header = {\n 'Content-Type': accept, \n 'X-Auth-Token': self.casjobtoken,\n 'Accept': accept\n }\n # this format is disgusting but required....\n authdata = {\n 'auth' :{\n 'identity': {\n 'password': {\n 'user': {\n 'name': username,\n 'password': password\n }\n }\n }\n }\n }\n payload = json.dumps(authdata).encode(encoding='utf-8')\n try:\n post = requests.post(self.loginurl, data=payload, headers=header)\n\n if post.status_code == 200:\n response = json.loads(post.text)\n token = response[self.tokenkey]\n return token\n else:\n print('Username and/or password are invalid.')\n post.raise_for_status()\n except Exception as e:\n raise(str(e))", "def get_token(self, tenant_name, user_name, password):\n _url = \"http://\" + self.host_ip + \":5000/v2.0/tokens\"\n _headers = {\"content-type\": \"application/json\"}\n _token_info = {\"auth\": {\"tenantName\": tenant_name,\n \"passwordCredentials\":\n {\"username\": user_name,\n \"password\": password}}\n }\n\n _body = json.dumps(_token_info)\n response = self.request(\"POST\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while getting token for\"\n \" tenant: %s\" % tenant_name)\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Request of token for %s tenant Failed with\"\n \" status %s \" % (tenant_name, response.status))\n return response.status\n output = json.loads(response.data)\n token_id = output['access']['token']['id']\n LOG_OBJ.debug(\"Token ID for tenant %s is %s\" % (tenant_name, token_id))\n\n return token_id", "def UserToken(self) -> object:", "def get_auth_token(self):\r\n\r\n self._authenticating = True\r\n\r\n auth_data = {\r\n \"auth\": {\r\n \"identity\": {\r\n \"methods\": [\r\n \"password\"\r\n ],\r\n \"password\": {\r\n \"user\": {\r\n \"domain\": {\r\n \"name\": self._config['user_domain'] if 'user_domain' in self._config else self._config[\r\n 'domain']\r\n },\r\n \"name\": self._config['user'],\r\n\r\n \"password\": self._config['password']\r\n }\r\n }\r\n },\r\n \"scope\": {\r\n \"project\": {\r\n \"domain\": {\r\n \"name\": self._config['domain']\r\n },\r\n \"name\": self._config['project'],\r\n }\r\n }\r\n }\r\n }\r\n\r\n # profile = prof,\r\n # user_agent = 'toil',\r\n # auth_url = self._config['auth_url'],\r\n # project_name = self._config['project'],\r\n # project_domain_name = self._config['domain'],\r\n # user_domain_name = self._config['domain'],\r\n # username = self._config['user'],\r\n # password = self._config['password']\r\n\r\n response = self.post(None, self.URL_AUTH_TOKEN, data=json.dumps(auth_data))\r\n\r\n self._authenticating = False\r\n\r\n json_response = response.json()\r\n self._token = json_response['token']\r\n self._token_x_subject = response.headers['x-subject-token']\r\n\r\n catalog = json_response['token']['catalog']\r\n\r\n for service in catalog:\r\n self._services[service['name']] = service", "def get_token(username, password):\n\t\ttoken = cf.get_token(username, password)\n\t\treturn token", "def get_auth_token_teacher():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def _get_token(self):\n return user.get_token()", "def get(self):\n # Login of authorized user stores in Flask g object\n user = User.query.filter_by(username=g.user.username).first()\n # Generate token\n token = user.generate_auth_token()\n # Send token in ASCII format\n return {'token': token.decode('ascii')}", "def get_token(self):\n self.session.headers.pop(\"Authorization\", None) # delete old token if was\n\n data = json.dumps({\"password\": self.password, \"username\": self.username})\n answer = self.server_request(self._authTokenPath, data=data)\n\n try:\n self.token = json.loads(answer)[\"token\"]\n self.session.headers.update({\"Authorization\": \"Token \" + self.token})\n except KeyError as err:\n print_unexpected_json_error_key(err, answer, self._authTokenPath)\n exit(1)", "def get_token():\n url = settings.GENERATE_TOKEN_URL\n headers = {\"Authorization\": \"Basic {}\".format(settings.MPESA_APP_AUTHTOKEN)}\n response = get(url, headers)\n return response.json()", "def get_auth_token():\n token = g.user.generate_auth_token(24*3600)\n return jsonify({'user_id': g.user.id, 'token': token.decode('ascii')})", "def _get_token(self, client):\n\n url = self._url('token')\n data = {'grant_type': 'password',\n 'username': self.user,\n 'password': self.password,\n 'scope': 'PRODUCTION'}\n client_data = self.clients[client]\n consumer_key = client_data['response']['consumerKey']\n consumer_secret = client_data['response']['consumerSecret']\n auth = requests.auth.HTTPBasicAuth(consumer_key, consumer_secret)\n return self.POST(url, data=data, auth=auth)", "def _get_token(self, username, password, user_domain, project_id=None):\n request = {\n \"auth\": {\n \"identity\": {\n \"methods\": [\n \"password\"\n ],\n \"password\": {\n \"user\": {\n \"domain\": {\n \"name\": user_domain\n },\n \"name\": username,\n \"password\": password\n }\n }\n }\n }\n }\n\n if project_id:\n request['auth']['scope'] = {\n \"project\": {\n \"id\": project_id\n }\n }\n response = self.client.post(TOKEN_PATH, data=json.dumps(request),\n headers=HEADERS)\n if response.status_code == 201:\n return response.headers['X-Subject-Token']\n else:\n msg = \"Failed to authenticate %s user. Status %s\" % (username,\n response.status_code)\n raise SystemExit(msg)", "def RetreiveToken(self, email, password):\n data = urllib.urlencode(dict(\n service=_SERVICE,\n source=_SOURCE,\n accountType='HOSTED_OR_GOOGLE',\n Email=email,\n Passwd=password))\n response = urllib2.urlopen(\n 'https://www.google.com/accounts/ClientLogin', data)\n self.auth_token_ = None\n result = []\n for line in response:\n result.append(line)\n if line.startswith('Auth='):\n self.auth_token_ = line[len('Auth='):].strip()\n break\n if not self.auth_token_:\n raise RuntimeError('Could not login: ' + ''.join(result))", "def request_authentication(user: str, password: str) -> bool:\n # request URL using GET method adding authorization header\n response = requests.get(url=\"https://httpbin.org/basic-auth/correlaid/password\",\n auth=(user, password))\n # parse response\n parsed_response = response.json()\n # return authenticated key\n return parsed_response[\"authenticated\"]", "def auth_token(self):", "def get_token(client_id, client_secret, username, password):\r\n try:\r\n if oauth2db.check_client(client_id, client_secret):\r\n if oauth2db.check_user(username, password):\r\n token, refresh = oauth2db.generate_token(client_id, username)\r\n res = { \"token\": token }\r\n except:\r\n res = { \"error\": \"\" }\r\n \r\n if 'token' in res:\r\n return res['token']\r\n else:\r\n return None", "def getToken(email, password):\n r = requests.post(r\"https://opendata.hopefully.works/api/login\", json={\"email\":email, \"password\":password})\n if r.status_code == 200: \n return r.json()[\"accessToken\"]\n else:\n return \"\"", "def basic_auth(user, password):\n return AuthToken(\"basic\", user, password)", "def get_token(self):\n response = self.client.post(\n url_for('auth.login'),\n data=json.dumps({'username': 'thundoss@gmail.com', 'password': 'denno'}),\n headers={'content_type': 'application/json'})\n return json.loads(response.data)['token']", "def get_token(self) -> None:\n with self._lock:\n if not self._endpoint:\n raise AuthenticationTokenError(\n 'Token is invalid and endpoint (auth_endpoint) for obtaining is not set.')\n\n url = self._endpoint + '/app'\n data = {\n \"client_id\": self._client_id,\n \"client_secret\": self._client_secret,\n \"username\": self._username,\n \"password\": self._password\n }\n\n res = self.post(url, data)\n self._token_info.parse_token_result(res, 'Get token')", "def do_login(user):\n\n access_token = create_access_token(identity=user)\n return (jsonify(token=access_token), 200)", "def get_token(self):\n auth_data = {\"auth\": {\"tenantName\": 'service',\n \"passwordCredentials\":{ \"username\": 'vsm',\n \"password\": self._password}}}\n\n auth_request = urllib2.Request(self._auth_url)\n auth_request.add_header(\"content-type\", \"application/json\")\n auth_request.add_header('Accept', 'application/json')\n auth_request.add_header('User-Agent', 'python-mikeyp')\n auth_request.add_data(json.dumps(auth_data))\n auth_response = urllib2.urlopen(auth_request)\n response_data = json.loads(auth_response.read())\n\n self._token = response_data['access']['token']['id']\n\n service_list = response_data['access']['serviceCatalog']\n for s in service_list:\n if s['type'] == 'vsm' and s['name'] == 'vsm':\n self._vsm_url = s['endpoints'][0]['publicURL']\n break\n\n url_id = self._vsm_url.split('/')[-1]\n return self._token + \"-\" + url_id", "def login():\n username = request.json.get('username')\n password = request.json.get('password')\n\n if verify_password(username, password):\n token = g.user.generate_auth_token()\n status = \"token generated successfully\"\n else:\n status = \"Invalid username or password\"\n token = None\n\n return {'status': status, 'token': token}", "def login_require(request):\n\n if request.method == \"GET\":\n data = request.GET\n else:\n data = request.POST\n user = authenticate(username=data[\"username\"], password=data[\"password\"])\n if user and user.is_active:\n ret = Response(SUCCESS, error_code[SUCCESS])\n else: \n ret = Response(AUTHENTICATION_FAIL, error_code[AUTHENTICATION_FAIL])\n return HttpResponse(ret.serialize(f))\n\n # Generate a token for authentication\n token = token_generator(30)\n try:\n user_token = Token.objects.get(username=data[\"username\"])\n user_token.token = token\n user_token.start_time = datetime.now()\n except: \n user_token = Token(token=token, username=data[\"username\"])\n user_token.save()\n ret.set_ret(\"auth_token\", token) \n user = User.objects.get(username=data[\"username\"])\n ret.set_ret(\"data\", UserSerializer(user.appuser).serialize())\n return HttpResponse(ret.serialize(f))", "def _authenticate(self):\n url = self.endpoint + \"/tokens\"\n h = httplib2.Http()\n response, rawcontent = h.request(\n url, \n method=\"POST\",\n headers={ \"Content-Type\":\"application/json\" },\n body=json.dumps(self.credentials()))\n content = json.loads(rawcontent)\n self.token = content['access']['token']['id']\n #TODO: this needs to convert the ISO8601 string to a timestamp\n self.expiration = content['access']['token']['expires']\n self.catalog = content['access']['serviceCatalog']", "async def token(request: Request):\n return get_token()", "def login_user(self):\n response = self.client.post(self.login_url, self.login_data, format='json')\n return response.data['token']", "def auth(self):\n return self.api(self.token)", "def auth_user():\n global token\n app.logger.info(\"Microsoft Planner Service running on /auth port as expected\")\n try:\n request_count = 0\n if request_count == 0:\n token = get_tokens_as_app(client_id, user_code_info, tenant_id)\n request_count = 1 \n if 'access_token' in token:\n app.logger.info('Adding access token to cache...')\n add_token_to_cache(client_id, tenant_id, token)\n return_object = (f\"{token['refresh_token']}\")\n return render_template('token.html', return_object=return_object)\n else:\n return_error = (\"Token response did not result in a proper response. Athenticate again please.\")\n return render_template('token.html', return_error=return_error)\n except AttributeError or TypeError:\n return_error = ('Authentification failed. Please pull and restart your system and authenticate again.')\n return render_template('token.html', return_error=return_error)\n except adal.AdalError as err:\n return_error = (\"You're logged in with the wrong user. Please log out and authenticate again.\")\n return render_template('token.html', return_error=return_error)", "def getToken(self, username, password):\n\n response = requests.post(self.url + \"/user/\" + username + \"/profile/loginToken\",\n auth=(username, password))\n\n return response", "def login():\n print(request.get_json())\n user = request.get_json()['username']\n passwd = request.get_json()['passwd']\n user_check = storage.get_user(User, user)\n if not user:\n return jsonify(message='missing value'), 401\n if not user_check:\n return jsonify(message='error'), 401\n if user == user_check.username and passwd == user_check.passwd:\n token = jwt.encode(\n {\n 'user_id': user_check.id,\n 'exp': datetime.utcnow() + timedelta(minutes=60)\n },\n current_app.config['SECRET_KEY']\n )\n token = token.decode('UTF-8')\n return jsonify(token=token), 200\n if user == user_check.username and passwd != user_check.passwd:\n return jsonify(message='authorization failed'), 403\n return jsonify(message='authorization failed'), 403", "async def login(form_data: OAuth2PasswordRequestForm = Depends()):\n user = get_user_info(form_data.username)\n if user == None:\n raise HTTPException(status_code=404, detail=\"Incorrect username or password\")\n hashed_password = simple_hash(form_data.username, form_data.password)\n if not hashed_password == user.password:\n raise HTTPException(status_code=400, detail=\"Incorrect username or password\")\n\n return {\"access_token\": user.name, \"token_type\": \"bearer\"}", "def get_keeper_token(host: str, username: str, password: str) -> str:\n token_endpoint = urljoin(host, \"/token\")\n r = requests.get(token_endpoint, auth=(username, password))\n if r.status_code != 200:\n raise KeeperError(\n \"Could not authenticate to {0}: error {1:d}\\n{2}\".format(\n host, r.status_code, r.json()\n )\n )\n return r.json()[\"token\"]", "def login() -> Any:\n user_dict = UserSchema().load(\n request.json, partial=(\"id\", \"qualifications\") + PERMISSIONS\n )\n username = user_dict[\"username\"]\n password = user_dict[\"password\"]\n\n if is_password_correct(username, password):\n user = fetch_user(username)\n session[\"user_id\"] = user[\"id\"]\n response = make_response(user)\n response.set_cookie(\"is_authenticated\", \"1\")\n return response\n\n raise APIError(reason=\"invalid_user_or_password\", status_code=403)", "def login(self, user_name, password):\n end_point = '/'.join([self.host, 'api', 'rest-auth', 'login', ''])\n resp = requests.post(end_point, {'username': user_name, 'password': password})\n token = resp.json()['key']\n self.token = token\n return token", "def get_token(self, user):\n\n jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n payload = jwt_payload_handler(user)\n token = jwt_encode_handler(payload)\n return token", "def get_token(email, password):\n data = {'username': email, 'password': password}\n res = requests.post(TOKEN_ENDPOINT, data=data)\n\n return res.json()['token']", "def get_token(self, request_data):\n data = {\n \"grant_type\": \"password\",\n \"client_id\": CLIENT_ID,\n \"client_secret\": CLIENT_SECRET,\n \"username\": request_data.get(\"username\"),\n \"password\": request_data.get(\"password\"),\n }\n\n # create keycloak uri for token login\n url = URI + REALM_PREFIX + REALM + AUTH_ENDPOINT\n\n response = requests.post(url, data=data)\n\n # handle error if its anything more than a 200 as a 200 response is the\n # only expected response\n if response.status_code != 200:\n raise AppException.KeyCloakAdminException(\n context={\"message\": \"Error in username or password\"},\n status_code=response.status_code,\n )\n\n tokens_data = response.json()\n result = {\n \"access_token\": tokens_data[\"access_token\"],\n \"refresh_token\": tokens_data[\"refresh_token\"],\n }\n\n return result", "def login():\n body = request.json\n try:\n password = md5(body.get('password').encode('utf-8')).hexdigest()\n user = User.get(\n (User.username == body.get('username')) &\n (User.password == password))\n token = generate_token(body.get('username'))\n ret = {\"token\":token.decode('utf-8'), \"user_id\":user.id}\n return HTTPResponse(status=200, body=ret)\n except User.DoesNotExist:\n ret = json.dumps({'message':'Error on login.'})\n return HTTPResponse(status=500, body=ret)", "def get_auth_token(controller_ip=DNAC, username=DNAC_USER, password=DNAC_PASSWORD):\n\n login_url = \"https://{0}:{1}/dna/system/api/v1/auth/token\".format(controller_ip, DNAC_PORT)\n# Change verify to TRUE\n result = requests.post(url=login_url, auth=HTTPBasicAuth(DNAC_USER, DNAC_PASSWORD), verify=True)\n result.raise_for_status()\n\n token = result.json()[\"Token\"]\n# print(resultss)\n tokens = result.json()\n # print(result.headers)\n # print(token) \n # print(tokens)\n return {\n \"controller_ip\": controller_ip,\n \"token\": token\n }", "def login(**kwargs):\n data = request.get_json()\n print(\"data={}\".format(data))\n login = data.get('username')\n password = data.get('password')\n\n if not login or not password:\n raise Unauthorized('Wrong username and/or password.')\n else:\n user = app.data.driver.session.query(User).get(login)\n if user and user.check_password(password):\n token = user.generate_auth_token()\n return jsonify({'token': token.decode('ascii')})\n raise Unauthorized('Wrong username and/or password.')", "def generate_auth_token(self, username: str, password: str) -> None:\n data = {'username': username, 'password': password}\n\n response = self._http_request('POST', 'login', json_data=data)\n try:\n token = response.get('token')\n if not token:\n raise DemistoException(f'Could not retrieve token from server: {response.get(\"message\")}', res=response)\n except ValueError as exception:\n raise DemistoException('Could not parse API response.', exception=exception) from exception\n\n self._headers[REQUEST_CSPM_AUTH_HEADER] = token", "def login():\n data = request.get_json()\n user = User.authenticate(**data)\n\n if not user:\n return jsonify({ 'message': 'Invalid credentials', 'authenticated': False }), 401\n \n token = jwt.encode(\n {\n 'exp': datetime.now() + timedelta(minutes=90),\n 'iat': datetime.now(),\n 'sub': user.user_id\n },\n current_app.config['SECRET_KEY'],\n algorithm='HS256')\n #print(token)\n user_id = data['user_id']\n user = User.query.get(user_id)\n return jsonify({ 'user': user.to_dict(), 'token': token.decode('UTF-8') }), 200", "def _auth(self):\n url = 'https://forsight.crimsonhexagon.com/api/authenticate?'\n\n payload = {\n 'username': self.username,\n 'password': self.password\n }\n\n r = self.session.get(url, params=payload)\n j_result = r.json()\n self.auth_token = j_result[\"auth\"]\n #print('-- Crimson Hexagon Authenticated --')\n return", "def login(self, *, app, user):\n method = 'POST'\n path = self.path('login')\n app = extract_id(app)\n user = extract_name(user)\n data = {'app_id': app,\n 'user_id': user}\n\n token = yield from authenticate(self.req_handler,\n method,\n path,\n json=data)\n return token", "async def login_for_access_token(\n form_data: OAuth2PasswordRequestForm = Depends()\n):\n user = authenticate_user(form_data.username, form_data.password)\n if not user:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect username or password\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token = create_access_token(\n data={\"sub\": user.username}, expires_delta=access_token_expires\n )\n return {\"access_token\": access_token, \"token_type\": \"bearer\"}", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL, need token.\\n', 403,\n {'WWW-Authenticate': 'Basic realm=\"token Required\"'})", "def get_token():\n return session.get('microsoft_token')", "def get_token():\n return session.get('microsoft_token')", "def _lookup_token(self):\n path = '/authn/{account}/{login}/authenticate'.format(\n account=self.account, login='admin'\n )\n res = self._post(path, data=self.api_token, skip_auth=True)\n return base64.b64encode(res.text)", "async def get_token(self):\n # TODO: turn this into a custom auth engine\n body = {\n \"applicationKey\": self.application_key,\n \"applicationSecret\": self.application_secret,\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"resourceOwnerId\": self.application_key,\n \"requestUId\": uuid.uuid4().hex,\n \"accept-language\": \"EN\",\n }\n\n auth_url = self.base_url / \"v1/oauth/token\"\n r = httpx.post(\n auth_url.url,\n json=body,\n headers=headers,\n # auth=(self.consumer_id, self.consumer_secret),\n cert=self.cert,\n )\n\n if r.status_code == 200:\n self.creds = SCBCredentialsResponse.parse_raw(r.content)\n return self.creds\n else:\n raise ConnectionError(r.json())", "async def login(form_data: OAuth2PasswordRequestForm = Depends()):\n db = get_database()\n\n user = await crud.user.authenticate(\n db, username=form_data.username, password=form_data.password\n )\n\n if not user:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST, detail=\"Incorrect email or password\"\n )\n elif not crud.user.is_active(user):\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST, detail=HTTP_400_BAD_REQUEST_INACTIVE_USER\n )\n\n access_token_expires = timedelta(minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES)\n\n return {\n \"access_token\": create_access_token(\n data={\"username\": user.username}, expires_delta=access_token_expires\n ),\n \"token_type\": \"bearer\",\n }", "def _requestSwiftToken(self):\n oauth_access_token = self.accessTokenManager.token\n c, r = http._get(\n self.auth_package.HUBIC_API+'account/credentials/',\n headers={\n 'Authorization': 'Bearer '+oauth_access_token\n }\n )\n result = json.loads(r.read())\n c.close()\n\n if r.status != 200:\n try:\n err =result\n err['code'] = r.status\n except Exception as e:\n err = {}\n\n raise Exception(\"Unable to get swift token, \"\n \"(%s)\"%str(err))\n\n self._endpoint = result['endpoint']\n self._token = result['token']\n self._expire = datetime.strptime( result['expires'][:-6], \"%Y-%m-%dT%H:%M:%S\" ) - timedelta(seconds=10)", "def authenticate_and_get_user():\n try:\n gauth_token = request.form['gauth_token']\n response = authenticate_with_users_service(gauth_token)\n\n if response.status_code == 201:\n # authentication successful, store login in cookies\n session['user_id'] = response.json()['user_id']\n session['name'] = response.json()['name']\n session['gauth_token'] = gauth_token\n return response.content, response.status_code\n except (BadRequestKeyError, requests.exceptions.ConnectionError) as error:\n return f'Error: {error}.', 400", "def get_auth_token():\n headers = {\n 'Content-Type': 'text/plain;charset=UTF-8', }\n data = '{ \\\n \"auth\": { \\\n \"identity\": { \\\n \"methods\": [ \\\n \"password\" \\\n ], \\\n \"password\": { \\\n \"user\": { \\\n \"name\": \"zheng_zhao\", \\\n \"password\": \"ZhaoZheng0426\", \\\n \"domain\": { \\\n \"name\": \"hwstaff_y00465251\" \\\n } \\\n } \\\n } \\\n }, \\\n \"scope\": { \\\n \"project\": { \\\n \"id\": \"454add6b26d04f53ae5c593551acf1ff\" \\\n } \\\n } \\\n } \\\n }'\n\n r = requests.post('https://iam.cn-north-1.myhuaweicloud.com/v3/auth/tokens',\n headers=headers, data=data)\n\n # print(r.status_code)\n # print(r.headers)\n token = r.headers.get('X-Subject-Token')\n\n return token", "def get_token():\n token = getpass.getpass('Paste in your RDR API token and press Enter:')\n return {'Authorization': 'token ' + token}", "def login():\n req = request.get_json(force=True)\n username = req.get('username', None)\n password = req.get('password', None)\n user = guard.authenticate(username, password)\n ret = {'access_token': guard.encode_jwt_token(user)}\n return ret, 200", "def login():\n data = request.get_json()\n if 'username' in data and 'password' in data:\n username = data['username']\n password = data['password']\n access_token = authenticate(username, password)\n if access_token is not None:\n print('access token: ' + access_token)\n return jsonify({'access_token': access_token})\n else:\n abort(403)\n else:\n abort(400)", "def get_token():\n\n def token_helper():\n token = util.prompt_for_user_token(username=\"robbo1992\",\n scope='user-library-read playlist-modify-private playlist-modify',\n client_id=config[\"spotify\"][\"client_id\"],\n client_secret=config[\"spotify\"][\"secret_id\"],\n redirect_uri='http://localhost:8080', cache_path=spotify_cache)\n return token\n\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n if motley.internet:\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n log.error(\"Authentication error in create_token method.\")\n raise Exception", "def get(self):\n\n\t\trequest = user_auth_parser.parse_args(strict=True)\n\n\t\tresult = Authenticator.authenticate(\n\t\t\trequest[\"username\"],\n\t\t\trequest[\"password\"]\n\t\t)\n\n\t\treturn result", "def get(self, url, user):\n token = self.login(user)\n response = requests.get(url_root + url, headers={\"access-token\": token})\n return response.json(), response.status_code", "def login():\n request_data = request.get_json()\n\n if User.authenticate(request_data['username'], request_data['password']):\n expiration = datetime.datetime.now() + datetime.timedelta(minutes=20)\n token = jwt.encode(\n {'exp': expiration},\n app.config['SECRET_KEY'],\n algorithm='HS256'\n ).decode()\n return jsonify({'token': token}), 200\n\n return Response(\n json.dumps({'error': 'Invalid username / password'}),\n 400,\n mimetype='application/json'\n )", "def login():\n req = flask.request.get_json(force=True)\n username = req.get('username', None)\n password = req.get('password', None)\n user = guard.authenticate(username, password)\n ret = {'access_token': guard.encode_jwt_token(user)}\n return ret, 200", "def getUser(self, authenticationToken):\r\n self.send_getUser(authenticationToken)\r\n return self.recv_getUser()", "def get_token():\n params = {'get_token': 'get_token'}\n return load_page(API, params=params, headers={'content-type': 'application/json'})['token']", "def get_token(request):\n capability = TwilioCapability(\n settings.TWILIO_ACCOUNT_SID,\n settings.TWILIO_AUTH_TOKEN)\n \"\"\"Allow our users to make outgoing calls with Twilio Client\"\"\"\n capability.allow_client_outgoing(settings.TWIML_APPLICATION_SID)\n\n \"\"\"Allow our users to accept incoming calls from pyphon\"\"\"\n capability.allow_client_incoming('caller')\n\n \"\"\"Generate the capability token\"\"\"\n token = capability.generate()\n\n return JsonResponse({'token': token})", "def GetToken(self):\n if self.auth_token_:\n return self.auth_token_\n raise RuntimeError('ClientLoginAuthPolicy is not logged in.')", "def auth_authenticate():\n data = {'LoginName': username, 'Password': password}\n parameters = data_to_json(data)\n url = base_url + 'general/authentication/authenticate'\n response = make_request(url, parameters)\n r_value = ''\n if response['Status'] == 0:\n r_value = response['Value']['Token']\n return r_value", "def _request_token(self, data):\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n response = requests.post(\n \"{site}{token_url}\".format(\n site=self.auth_site,\n token_url=self._token_url\n ),\n data=data,\n headers=headers\n )\n\n return response", "def get_oauth_token():\n return session.get('remote_oauth')", "def _login_token(self):\n data = {\n 'cmd': 'login',\n 'login': self.username,\n 'password': self.password,\n }\n \n token = self.helper._post_request(\n self.basename,\n self.basic_auth,\n data, \n self.headers)\n\n if token.status_code == 200:\n xml_response = BeautifulSoup(token.content, 'lxml')\n self.token = xml_response.find('token').get_text()\n self.cookies = token.cookies.get_dict()\n else:\n raise Exception('[FAIL] Could not login to OpenVAS')", "def post(self):\n data = request.get_json()\n is_verified = actions.verify(data['username'], data['password'])\n if not is_verified:\n abort(404, message='A user with matching credentials does not exist.')\n else:\n token = actions.create_token(data['username'], data['password'])\n token = token.decode('utf-8')\n return{'token': token}, 200\n pass", "def login():\n\n if not config.requires_auth():\n abort(403,\n \"authentication not permitted since service is in insecure mode\")\n\n info = request.get_json() or {}\n username = info.get('username')\n password = info.get('password')\n user_domain_name = info.get('user_domain_name', 'Default')\n token = _authenticate(CONF.keystone_authtoken.auth_url,\n username,\n password,\n user_domain_name)\n return jsonify(token)", "def authTest(token=None):\n if not token:\n token = bottle.request.get_header('X-Auth-Token')\n\n data = bottle.request.json\n if not token:\n user = data.get('user')\n password = data.get('password')\n\n query = odict(bottle.request.query.items())\n if not user or not password:\n user = query.get('user')\n password = query.get('password')\n\n if not token and (not user or not password):\n bottle.abort(400, \"Authentication credentials missing.\")\n\n result = odict(token=token,\n user=user,\n password=password,\n headers=odict(bottle.request.headers.items()),\n query=query,\n data=data,\n )\n return result", "def getToken(self):\n \n data = '''\n {\n \"auth\": \n {\n \"username\" : \"%s\",\n \"password\" : \"%s\"\n }\n }\n ''' % (self.username, self.password)\n \n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'api.appnexus.com'\n }\n r = requests.post(self.auth_url, data=data, \n headers=headers)\n ac_data = r.json()\n \n if ac_data['response']['status'] != 'OK':\n self.stream_logger.error('Error while retrieving access token')\n self.stream_logger.error('Status code {0}'\\\n .format(ac_data['response']['status']))\n return False\n\n return ac_data['response']['token']", "def get_juicebox_token(self, save=False):\n logger.debug('Getting JB token from Public API')\n url = '{}/token/'.format(get_public_api())\n data = {\n 'data': {\n 'attributes': {\n 'username': self.username,\n 'password': self.password,\n 'endpoint': self.endpoint\n },\n 'type': 'auth'\n }\n }\n headers = {'content-type': 'application/json'}\n response = jb_requests.post(url, data=json.dumps(data),\n headers=headers)\n if response.status_code != 201:\n logger.debug(response)\n raise AuthenticationError('I was unable to authenticate you with '\n 'those credentials')\n token = response.json()['data']['attributes']['token']\n self.token = token\n logger.debug('Successfully retrieved JB token')\n\n if save:\n logger.debug('Saving token to netrc')\n self.update_netrc()", "def get_access_token(self, path='/oauth/token', data={}):\n if data.keys():\n data.update(self.data)\n else:\n data = self.data.copy()\n data.update({\n 'grant_type': 'password',\n 'email': self.env.get('TESLA_EMAIL'),\n 'password': self.env.get('TESLA_PASSWORD')\n })\n try:\n req = requests.post(url='%s%s' % (self.url, path), data=data)\n # print(req.status_code)\n # print(req.content)\n self.token.update(req.json())\n except:\n raise 'invalid credentials'\n return self.token", "def token_auth(self):\n self.client = APIClient()\n self.user = User.objects.create_user(username='testuser', email='test@test.com', password='testpassword')\n self.token = Token.objects.create(user=self.user)\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)", "def handler(self):\r\n credentials = self.get_credentials()\r\n return credentials.authorize(httplib2.Http())", "def authenticate( self ):\n\n print(\"Getting new token\")\n self.getFrob()\n self.getAuthKey()\n self.getToken()\n self.cacheToken()", "def login_to_api(self):\n\n # set the API endpoint and POST the username/password to it\n endpoint = app.config['API']['url'] + 'login'\n response = requests.post(\n endpoint,\n verify = app.config['API']['verify_ssl'],\n json = {\n 'username': self.username,\n 'password': self.password\n }\n )\n\n # if the response is good, return True\n if response.status_code == 200:\n user = response.json()\n self._id = ObjectId(user['_id'])\n self.token = user['access_token']\n return True", "def access():\n access_token = session.get('access_token')\n user_data_url = '%s/api/user_data' % SERVER_HOST\n resp = requests.get(user_data_url, params={'access_token': access_token})\n return render_template('client/access.html', url=user_data_url, response=resp)" ]
[ "0.68086314", "0.6705352", "0.660126", "0.6583421", "0.6583421", "0.6573778", "0.645508", "0.6440794", "0.64196825", "0.64065206", "0.6396614", "0.63819987", "0.63676745", "0.63418996", "0.63418996", "0.6296961", "0.62778217", "0.62672204", "0.6247356", "0.6242239", "0.61941713", "0.6193632", "0.61840963", "0.61683995", "0.61664414", "0.6151358", "0.614308", "0.6142724", "0.6123825", "0.6081841", "0.6064245", "0.6061088", "0.6042407", "0.6039065", "0.60289556", "0.6025293", "0.5998648", "0.599392", "0.5991204", "0.5984433", "0.5972707", "0.59700936", "0.59670496", "0.59588546", "0.59503573", "0.59488744", "0.59305346", "0.59248734", "0.59195375", "0.59150755", "0.5904187", "0.58914465", "0.58817273", "0.58648086", "0.5858864", "0.58548963", "0.58458763", "0.58431125", "0.584191", "0.58406854", "0.58294624", "0.58286", "0.58241445", "0.5818478", "0.58170694", "0.58114135", "0.58114135", "0.5799701", "0.5797107", "0.57965094", "0.57951546", "0.5785358", "0.5784977", "0.57812715", "0.57680655", "0.57597136", "0.5757723", "0.5749539", "0.5745975", "0.57352823", "0.57337505", "0.5732928", "0.5728009", "0.5724333", "0.5722667", "0.5721117", "0.571844", "0.5709762", "0.570611", "0.57053566", "0.57051694", "0.57026154", "0.5702578", "0.56979305", "0.5695657", "0.5693591", "0.5689709", "0.568649", "0.56849885", "0.56802404" ]
0.6948478
0
A check if the jwt token is valid
def _validate_jwt_token(self): # force https so that we don't send around tokens unsecurely url = 'https://{}/api/token/verify'.format(urlparse(self.base_url).netloc) # paranoid: check again that we only send the token to https if urlparse(url).scheme != "https": msg = 'This should not happen, please file a bug report.' raise Exception(msg) if not self.jwt_access_token: raise FDSNUnauthorizedException("Unauthorized, authentication " "required.", ) # convert to json data = json.dumps({"token": self.jwt_access_token}) # encode data = bytes(data, "utf-8") headers = {"Content-Type": "application/json"} html = urllib_request.Request(url, data=data, headers=headers) # decode('utf-8') try: result = urllib_request.urlopen(html).read().decode("utf-8") dic = json.loads(result) valid = not bool(dic) if self.debug: print('Valid token : {}'.format(valid)) return valid except urllib_error.HTTPError as e: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isValid(token):\n try:\n decoded = jwt.decode(token, SECRET_KEY)\n return True\n except:\n return False", "def __token_is_valid(self):\n\n if not self.__login_token or len(self.__login_token) < 10:\n # Token is not set or totally invalid\n return False\n\n try:\n jwt.decode(self.__login_token, verify = False)\n return True\n except:\n # Most likely the token is expired as `exp` is in the past\n return False", "def check_if_token_is_valid(token):\n if token is None:\n return\n try:\n jwt.decode(\n token,\n key=current_app.config['JWT_KEY'],\n audience=current_app.config['AUTH0_BASE_URL'] + '/api/v2/',\n issuer=current_app.config['AUTH0_BASE_URL'] + '/')\n except (jwt.JWTError,\n jwk.JWKError,\n jwt.ExpiredSignatureError,\n jwt.JWTClaimsError,\n AttributeError,\n AssertionError,\n IndexError):\n return False\n else:\n return True", "def validate_token():\n try:\n token = validate_auth()\n except Unauthorized:\n return jsonify(valid=False, expires_in=0)\n expires = oidc.user_getfield('exp')\n delta = expires - datetime.now().timestamp()\n return jsonify(valid=True, expires_in=delta)", "def validate_token(user, tkn):\n try:\n decoded = jwt.decode(tkn, KEY)\n if decoded['user'] == user:\n stored_token = User.get(User.username == user).token\n if stored_token == tkn:\n return True\n return False\n except jwt.ExpiredSignatureError:\n return HTTPResponse(status=400, body={\"msg\":\"Validation error.\"})", "def test_validate_token_returns_false_for_invalid_token(self, demo_app):\n demo_app.config.get.return_value = self.jwt_key\n token = jwt.encode({}, self.jwt_key_2, algorithm='HS256')\n\n self.assertFalse(\n validate_token(token)[0],\n 'Failed to recognise invalidate token.'\n )", "async def validate_token(self, token):", "def validate(cls, token):\n if not cls.JWT_REGEX.match(token):\n raise ValueError('Invalid JWT token')\n\n return token", "def test_validate_token(self, demo_app):\n demo_app.config.get.return_value = self.jwt_key\n token = jwt.encode({}, self.jwt_key, algorithm='HS256')\n\n self.assertTrue(\n validate_token(token)[0],\n 'Failed to validate token.'\n )", "def verify_token(self, token):\n return False", "def validate(self, data):\n try:\n payload = jwt.decode(data['token'], settings.SECRET_KEY, algorithms=['HS256'])\n except ExpiredSignatureError:\n raise serializers.ValidationError(\"The token has expired.\")\n except JWTError:\n raise serializers.ValidationError(\"Error validating token. Ensure is the right token.\")\n\n self.context['payload'] = payload\n return data", "def is_token_valid(self):\n try:\n token_details = jwt.decode(self.__token, verify=False)\n self.__admin_id = token_details[\"id\"]\n self.__username = token_details[\"username\"]\n expiry = token_details[\"expiry\"]\n if time.time() > expiry:\n raise TokenExpiredException\n cursor = self.__connection.cursor()\n cursor.execute(\n \"select password from neutron_admin_credential where admin_id=%s and username=%s\",\n (self.__admin_id, self.__username)\n )\n result = cursor.fetchone()\n if result is None:\n self.__message = \"Invalid id details\"\n return False\n passsword = result[\"password\"]\n admin_secret = passsword + get_admin_credential()\n jwt.decode(self.__token, key=admin_secret, verify=True)\n return True\n except jwt.DecodeError:\n self.__message = \"Invalid Token\"\n return False\n except KeyError:\n self.__message = \"Insecure Token\"\n return False\n except ValueError:\n self.__message = \"Insecure Token\"", "def verify_jwt(token):\n return jwt.decode(token.encode(), SECRET_KEY)", "def _validar_token(self):\n\n\t\ttoken = request.headers.get(\"Authorization\").split(\" \")[1]\n\n\t\tres = self.autenticador.validarToken(token)\n\t\tif(not res):\n\t\t\treturn False\n\t\treturn True", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired')\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token.')\n\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token.')\n\n self.context['payload'] = payload\n return data", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithm=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired')\n except jwt.exceptions.PyJWTError:\n raise serializers.ValidationError('Invalidad token')\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token')\n self.context['payload'] = payload\n return data", "def validate_token(self):\n r = requests.get(urljoin(self._url, Client._token_resource),\n params={\"tokenid\": self._token_id})\n\n if r.status_code == requests.status_codes.codes.unauthorized:\n raise ClientUnauthorized()\n elif r.status_code != requests.status_codes.codes.ok:\n error_messages = self._parse_invalid_request(r.text)\n raise ClientException(r.status_code, error_messages)\n\n try:\n type_, value = r.text.split(\"=\")\n value = value.strip(\" \\r\\n\")\n except Exception, e:\n raise ClientException(r.status_code,\n \"Some error has ocurred getting the result value from %s\"\n % r.text)\n\n return value == \"true\"", "def check_token_validate(self, token):\n payload = {'key': self._lr_object._get_api_key(), 'secret': self._lr_object._get_api_secret(), 'access_token': token}\n url = SECURE_API_URL + \"api/v2/access_token/Validate/\"\n return self._lr_object._get_json(url, payload)", "def validate_token(self, payload, headers, request):\n token = headers.get(self.TOKEN_NAME, \"\")\n\n # no token\n if self.verify == VerificationMethod.NONE:\n # do nothing as no method was chosen\n pass\n\n # static token\n elif self.verify == VerificationMethod.TOKEN:\n if not compare_digest(token, self.token):\n raise PermissionDenied(self.MESSAGE_TOKEN_ERROR)\n\n # hmac token\n elif self.verify == VerificationMethod.HMAC:\n digest = hmac.new(self.secret.encode('utf-8'), request.body, hashlib.sha256).digest()\n computed_hmac = base64.b64encode(digest)\n if not hmac.compare_digest(computed_hmac, token.encode('utf-8')):\n raise PermissionDenied(self.MESSAGE_TOKEN_ERROR)\n\n return True", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired.')\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token')\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token')\n\n self.context['payload'] = payload\n return data", "def validate_token():\n global vault_token\n global vault_token_time\n\n if vault_token is None:\n return False\n\n return datetime.datetime.now() < vault_token_time", "def check_token_structure(data):\n assert \"token\" in data\n token_structure = data[\"token\"]\n\n assert \"access_token\" in token_structure\n assert \"token_type\" in token_structure\n assert \"expires_in\" in token_structure", "def _validate_exp(self):\n now = timegm(datetime.utcnow().utctimetuple())\n\n if self.authtype == 'jwt':\n if not hasattr(self, 'token'):\n # I pass here only one time, when I request a token\n self.token = None\n return True\n payload = jwt.decode(self.token, verify=False)\n try:\n exp = int(payload['exp'])\n except ValueError:\n raise jwt.DecodeError('Expiration Time claim (exp) must be an'\n ' integer.')\n\n if exp < now:\n # raise jwt.ExpiredSignatureError('Signature has expired')\n return False\n else:\n self.s.auth = JWTAuth(self.token)\n return True\n else:\n return True", "def validate(cls, token, user, service):\n expected = cls.generate(user, service)\n return token == expected", "def verify_auth_token(token):\n serializer = Serializer(SECRET_KEY)\n try:\n data = serializer.loads(token)\n except SignatureExpired:\n return None # valid token, but expired\n except BadSignature:\n return None # invalid token\n return data['token']", "def verify_jwt(self, token: str):\n try:\n unverified_token = jwt.decode(token, verify=False)\n except DecodeError:\n logger.warning(f\"Failed to decode JWT without verification: {token}\", exc_info=True)\n raise NonDecodableTokenError(token)\n\n try:\n issuer = unverified_token['iss']\n except KeyError:\n raise InvalidTokenError(token)\n\n if not self.is_valid_issuer(issuer):\n logger.warning(f\"Detected a JWT with UNKNOWN ISSUER. ({issuer})\", exc_info=True)\n raise InvalidTokenError(token)\n\n public_keys = self.get_public_keys(issuer)\n token_header = jwt.get_unverified_header(token)\n\n try:\n public_key_id = token_header[\"kid\"]\n except KeyError:\n raise InvalidTokenError(token)\n\n public_key = public_keys[public_key_id]\n verification_options = dict(key=public_key,\n issuer=issuer,\n audience=config.access_token_audience_list,\n algorithms=('RS256',))\n\n try:\n return jwt.decode(token, **verification_options)\n except PyJWTError:\n logger.warning('Detected a JWT with INVALID SIGNATURE.', exc_info=True)\n raise InvalidTokenError(token)", "def check_token(self, token):\n decoded_token = manage_tokens.decode(token)\n if decoded_token is None:\n return {'error': 'Token is invalid'}\n\n if 'email' not in decoded_token or 'expires' not in decoded_token \\\n or 'token' not in decoded_token:\n return {'error': 'Token is invalid'}\n\n self.email = decoded_token['email']\n self.user_in_db = User.users_db.get(decoded_token['email'])\n\n if not self.user_in_db:\n # User does not exist\n return {'error': 'User does not exist'}\n\n if self.user_in_db['token'] != decoded_token['token']:\n return {'error': 'Token is invalid'}\n\n if decoded_token['expires'] < time.time():\n return {'error': 'Token is expired'}\n\n return decoded_token", "def validate(validator: JwtValidator, raw_jwt: _raw_jwt.RawJwt) -> None:\n if validator.has_fixed_now():\n now = validator.fixed_now()\n else:\n now = datetime.datetime.now(tz=datetime.timezone.utc)\n if (raw_jwt.has_expiration() and\n raw_jwt.expiration() <= now - validator.clock_skew()):\n raise _jwt_error.JwtInvalidError('token has expired since %s' %\n raw_jwt.expiration())\n if (raw_jwt.has_not_before() and\n raw_jwt.not_before() > now + validator.clock_skew()):\n raise _jwt_error.JwtInvalidError('token cannot be used before %s' %\n raw_jwt.not_before())\n if validator.has_issuer():\n if not raw_jwt.has_issuer():\n raise _jwt_error.JwtInvalidError(\n 'invalid JWT; missing expected issuer %s.' % validator.issuer())\n if validator.issuer() != raw_jwt.issuer():\n raise _jwt_error.JwtInvalidError(\n 'invalid JWT; expected issuer %s, but got %s' %\n (validator.issuer(), raw_jwt.issuer()))\n if validator.has_subject():\n if not raw_jwt.has_subject():\n raise _jwt_error.JwtInvalidError(\n 'invalid JWT; missing expected subject %s.' % validator.subject())\n if validator.subject() != raw_jwt.subject():\n raise _jwt_error.JwtInvalidError(\n 'invalid JWT; expected subject %s, but got %s' %\n (validator.subject(), raw_jwt.subject()))\n if validator.has_audience():\n if (not raw_jwt.has_audiences() or\n validator.audience() not in raw_jwt.audiences()):\n raise _jwt_error.JwtInvalidError(\n 'invalid JWT; missing expected audience %s.' % validator.audience())\n else:\n if raw_jwt.has_audiences():\n raise _jwt_error.JwtInvalidError(\n 'invalid JWT; token has audience set, but validator not.')", "def verify_token(token):\n if config.API_TOKEN is None:\n logger.error(\n 'API token is not configured, auth will fail!')\n return token == config.API_TOKEN", "def validateAgentJWTToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def check_token(fn):\n def response(self, *args, **kw):\n if not JWT_DISABLED:\n intoken = get_token_from_header()\n try:\n jwt.decode(intoken, SECRET_KEY)\n except jwt.exceptions.DecodeError:\n raise Error(FORBIDDEN)\n except jwt.ExpiredSignatureError:\n raise Error(UNAUTHORIZED, msg=\"Signature expired.\")\n except jwt.InvalidTokenError:\n raise Error(UNAUTHORIZED, msg=\"Invalid token.\")\n return fn(self, *args, **kw)\n return response", "def _validate_token(self):\n if not self.token:\n self.login()\n if not self.token:\n # TODO: create exception for this\n # Access is denied!!\n raise Exception(\"AccessDenied\")", "def test_unused_token_is_valid(self):\n assert self.token.is_valid()", "def check_token(self, user, token):\n try:\n data = signing.loads(token, max_age=properties.TOKEN_LOGIN_TIMEOUT)\n except signing.BadSignature:\n return False\n\n return (\n (\n (data['last_login'] is None and user.last_login is None) or\n data['last_login'] == user.last_login.strftime('%s')\n ) and\n data['user_id'] == user.pk\n )", "def testIsValidToken(self):\n self.assertTrue(TokenResource.isValidToken('aValidToken'),\n msg='Expected isValidToken to accept a valid token.')\n self.assertTrue(TokenResource.isValidToken(TokenResource.VALID_TOKEN_CHARS),\n msg='Expected isValidToken to accept a valid token.')\n self.assertFalse(TokenResource.isValidToken('Token!'),\n msg='Expected isValidToken to accept an invalid token.')\n self.assertFalse(TokenResource.isValidToken('an invalid Token'),\n msg='Expected isValidToken to accept an invalid token.')", "def token_valid_check(start_time):\n #calculate the time elapsed since token was last refreshed\n elapsed_time = time.time() - start_time\n #take action if token is expired\n if elapsed_time > 3540:\n return False\n return True", "def validate_jwt_svid(self, token: str, audience: str) -> JwtSvid:", "def _verified_token(self,encoded_token: bytes) -> Dict[str,Union[str,int,bool]]:\n try:\n return jwt.decode(encoded_token,self._SECRET_KEY,algorithms=self._ALGORITHM)\n except jwt.ExpiredSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.DecodeError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidAlgorithmError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidKeyError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidTokenError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidIssuerError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidAudienceError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidIssuedAtError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.ImmatureSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.MissingRequiredClaimError as err:\n raise HTTPException(status_code=422,detail=str(err))", "def jwt_required(self) -> None:\n if not self._TOKEN:\n raise HTTPException(status_code=401,detail=\"Missing Authorization Header\")\n\n if self.get_raw_jwt()['type'] != 'access':\n raise HTTPException(status_code=422,detail=\"Only access tokens are allowed\")", "async def check_token_works(self) -> bool:\n async with self.web_session.get(url=self._user_endpoint, headers=self._headers) as resp:\n self._expired_token = not resp.status == 200\n return not self._expired_token", "def test_live_thread_token_is_valid(self):\n assert self.token.is_valid()", "def is_token_valid(self,pk,request):\n\n pass", "def verify_jwt(*args, **kwargs):\n auth = request.headers.get('Authorization', None)\n\n if auth is None:\n raise ProcessingException('Authorization header was missing', 401)\n\n parts = auth.split()\n\n if parts[0].lower() != 'Bearer'.lower():\n raise ProcessingException('Unsupported authorization type', 400)\n elif len(parts) == 1:\n raise ProcessingException('Token missing', 400)\n elif len(parts) > 2:\n raise ProcessingException('Token contains spaces', 400)\n\n try:\n payload = jwt.decode(\n parts[1],\n current_app.config['SECRET_KEY'],\n options=dict(verify_exp=False)\n )\n user = User.query.filter_by(id=payload['user_id']).first()\n\n if user is None:\n raise ProcessingException('User does not exist', 401)\n\n except jwt.InvalidTokenError:\n raise ProcessingException('Token is invalid', 400)", "def check_for_token(token):\n try:\n decode_token(token)\n return True\n except:\n return False", "def is_missing_token_service(request):\n if request.json == {}:\n return True\n schema = schema_utils.get_auth_schema()\n validator = Validator(schema, require_all=True)\n result = validator.validate(request.json)\n if validator.errors:\n logging.error(str(validator.errors))\n return not result", "def validate_token(token):\n # first, decode the token data to determine the tenant associated with the token. We are not able to\n # check the signature until we know which tenant, and thus, which public key, to use for validation.\n try:\n data = jwt.decode(token, verify=False)\n except Exception as e:\n logger.debug(f\"got exception trying to parse data from the access_token jwt; exception: {e}\")\n raise errors.AuthenticationError(\"could not parse the access token.\")\n # get the tenant out of the jwt payload and get associated public key\n token_tenant_id = data['tenant_id']\n try:\n public_key_str = get_tenant_config(token_tenant_id)['public_key']\n except errors.BaseTapisError:\n raise errors.AuthenticationError(\"Unable to process Tapis token; unexpected tenant_id.\")\n except KeyError:\n raise errors.AuthenticationError(\"Unable to process Tapis token; no public key associated with the \"\n \"tenant_id.\")\n # try:\n # pub_key = get_pub_rsa_key(public_key_str)\n # except Exception as e:\n # logger.error(f\"got exception trying to create public RSA key object; e: {e} \")\n # raise errors.ServiceConfigError(\"Unable to process public key associated with tenant.\")\n try:\n return jwt.decode(token, public_key_str, algorithm='RS256')\n except Exception as e:\n logger.debug(f\"Got exception trying to decode token; exception: {e}\")\n raise errors.AuthenticationError(\"Invalid Tapis token.\")", "def jwt_token_verify(auth_header):\n # Hug do not extract Bearer prefix\n auth_token, payload = parse_header(auth_header)\n return payload", "def verify_token(self, token):\n _now = timezone.now()\n\n if (\n (self.token is not None)\n and (token == self.token)\n and (_now < self.valid_until)\n ):\n self.token = None\n self.valid_until = _now\n self.save()\n\n return True\n else:\n return False", "def verify_token(token: str):\n # Generate JWT signer.\n jws = JWS(current_app.config[\"SECRET_KEY\"], current_app.config[\"TOKEN_EXPIRY\"])\n try:\n data = jws.loads(token)\n except Exception as err:\n logger.debug(f\"{err}\")\n return False\n\n # Set flask global state.\n set_globals(token_used=True)\n\n # Return active user.\n user = User.user_from_token_props(data)\n\n if user is not None:\n logger.debug(\"Authorized with Token.\")\n else:\n logger.warning(\"Authentication failed.\")\n\n return user", "def verify_access_token(self, token: str) -> bool:\n try:\n data = crypt.verify_token(token)\n except crypt.jwt_exceptions.PyJWTError as e:\n raise FileAccessError() from e\n if data['uuid'] != str(self.pk) or data['space_id'] != str(self.space_id):\n raise FileAccessError()\n\n return True", "def check_auth_token_validity(self):\n endpoint = self.url + 'api/v1/readiness'\n response = self.perform_get_request(endpoint)\n\n if response.status_code != 200:\n self.print_error_response(response, \"error\")\n return response.status_code == 200", "def checkToken( self ):\n\n if ( self.token == None ):\n return False\n else :\n d = {\n \"auth_token\" : str(self.token) ,\n \"method\" : \"flickr.auth.checkToken\",\n \"format\" : \"json\",\n \"nojsoncallback\" : \"1\"\n }\n sig = self.signCall( d )\n\n url = self.urlGen( api.rest, d, sig )\n try:\n res = self.getResponse( url )\n if ( self.isGood( res ) ):\n self.token = res['auth']['token']['_content']\n self.perms = res['auth']['perms']['_content']\n return True\n else :\n self.reportError( res )\n except:\n print(str(sys.exc_info()))\n return False", "def check_auth_token_validity(self):\n endpoint = self.url + 'api/v1/jobs'\n response = requests.get(endpoint, headers=self.authorization())\n if response.status_code != 200:\n self.print_error_response(response, \"detail\")\n return response.status_code == 200", "def check_auth_token_validity(self):\n endpoint = self.url + 'api/v1/jobs'\n response = requests.get(endpoint, headers=self.authorization())\n if response.status_code != 200:\n self.print_error_response(response, \"detail\")\n return response.status_code == 200", "def _is_oauth_token_valid(token: dict, time_key=\"expires_on\") -> bool:\n if \"access_token\" not in token or token.get(\"token_type\", \"\") != \"Bearer\" or time_key not in token:\n raise AirflowException(f\"Can't get necessary data from OAuth token: {token}\")\n\n return int(token[time_key]) > (int(time.time()) + TOKEN_REFRESH_LEAD_TIME)", "def verify_token(auth_token):\n blacklisted_token = TokenBlacklisting.query.filter_by(\n token=str(auth_token)).first()\n if blacklisted_token:\n return True\n return False", "def verify_token(*token): # pragma: no cover\n\n if current_app.config.get('IGNORE_AUTH') is True:\n return True\n\n g.user = APITokenModel.verify_token(token[0])\n\n if g.user is None:\n return False\n\n return g.user", "def __check_token(self) -> bool:\r\n\r\n now = datetime.now(self.__tz)\r\n\r\n if (self.__token_expiration_date - now).total_seconds() < 0:\r\n log.debug('Token needs update!')\r\n return self.__update_token()\r\n return False", "def check_token(token):\n token = db.session.query(Token).filter(Token.token==token).first()\n if token == None:\n return False\n #TODO token lifetime\n #if (datetime.datetime.now() - token.date >= datetime.timedelta(day=2)):\n # return False \n return True", "def token_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n token = request.headers['token']\n try:\n decoded = decode_token(token)\n except jwt.ExpiredSignatureError:\n return jsonify({\"message\": \"token expired\"}), 401\n except jwt.InvalidSignatureError:\n return jsonify({\"message\": \"Signature verification failed\"}), 401\n except jwt.InvalidTokenError:\n return jsonify({\"message\": \"Invalid Token verification failed\"}), 401\n except KeyError:\n return jsonify({\"message\": \"Missing token\"}), 401\n return func(*args, **kwargs)\n return wrapper", "def _check_token_data(self, jwt_token_data):\n try:\n self.user = get_user_model().objects.get(pk=jwt_token_data['id'])\n except (TypeError, KeyError):\n return self.render_api_error_response('Not authenticated - Bad authorization header data', status=401)\n except get_user_model().DoesNotExist:\n return self.render_api_error_response('Not authenticated - User not found', status=401)\n self.jwt_token_data = jwt_token_data\n return None", "def token_validation(self, realm=None, token=None):\n token_url = 'sessions/' + token + '?_action=validate'\n uri = self._uri_realm_creator(realm=realm, uri=token_url)\n data = self._post(uri=uri, data='{}', headers=self.headers)\n if data.status_code == 200:\n return data.json()\n else:\n return False", "def test_is_token_json_temporally_valid(self):\n payload_list = []\n\n # Test that we reject a payload without 'iat' or 'exp'\n # as the tokens should have a lifetime\n payload_list.append({\n 'sub': CLIENT_ID,\n 'iss': 'https://iam-test.indigo-datacloud.eu/',\n 'jti': '714892f5-014f-43ad-bea0-fa47579db222'\n })\n\n # Test that we reject a payload without 'exp'\n # as such a token would never expire\n payload_list.append({\n 'iss': 'https://iam-test.indigo-datacloud.eu/',\n 'jti': '098cb343-c45e-490d-8aa0-ce1873cdc5f8',\n 'iat': int(time.time()) - 2000000,\n 'sub': CLIENT_ID\n })\n\n # Test that we reject a payload without 'iat'\n # as all tokens should indicate when they were issued\n payload_list.append({\n 'iss': 'https://iam-test.indigo-datacloud.eu/',\n 'jti': '098cb343-c45e-490d-8aa0-ce1873cdc5f8',\n 'sub': CLIENT_ID,\n 'exp': int(time.time()) + 200000\n })\n\n # Test that we reject a payload with an 'iat' and 'exp'\n # in the past (e.g. they have expired)\n payload_list.append({\n 'iss': 'https://iam-test.indigo-datacloud.eu/',\n 'jti': '098cb343-c45e-490d-8aa0-ce1873cdc5f8',\n 'iat': int(time.time()) - 2000000,\n 'sub': CLIENT_ID,\n 'exp': int(time.time()) - 200000\n })\n\n # Test that we reject a payload with an 'iat' and 'exp'\n # in the future (as we should as they are not yet valid)\n payload_list.append({\n 'iss': 'https://iam-test.indigo-datacloud.eu/',\n 'jti': '098cb343-c45e-490d-8aa0-ce1873cdc5f8',\n 'iat': int(time.time()) + 200000,\n 'sub': CLIENT_ID,\n 'exp': int(time.time()) + 2000000\n })\n\n for payload in payload_list:\n # Assert the underlying helper method reponsible for\n # checking temporal validity returns False when passed\n # temporally invalid payloads\n self.assertFalse(\n self._token_checker._is_token_json_temporally_valid(payload),\n \"Payload %s should not be accepted!\" % payload\n )\n\n # Assert the wrapper method valid_token_to_id returns\n # None when passed temporally invalid tokens\n token = self._create_token(payload, PRIVATE_KEY)\n self.assertEqual(\n self._token_checker.valid_token_to_id(token), None,\n \"Token with payload %s should not be accepted!\" % payload\n )", "def is_token_required(self):\n return any([self.app_id, self._login, self._password])", "def check_permission(self, token):\n decoded_token = jwt.decode(token, os.getenv('SECRET_KEY'))\n if decoded_token['roles'] != ['Admin']:\n return True\n return False", "def is_expired(self, token: str) -> bool:\n try:\n decoded_token = jwt.decode(token, options=self._options)\n except jwt.ExpiredSignatureError: # type: ignore\n return True\n else:\n if decoded_token['exp'] - time.time() >= self.renew_buffer:\n # If the token will expire in less than cls._renew_buffer amount of time in seconds, the token is\n # considered expired.\n return True\n else:\n return False", "def verify_auth_token(token):\n s = Serializer(mscolab_settings.SECRET_KEY)\n try:\n data = s.loads(token)\n except SignatureExpired:\n logging.debug(\"Signature Expired\")\n return None # valid token, but expired\n except BadSignature:\n logging.debug(\"Bad Signature\")\n return None # invalid token\n user = User.query.filter_by(id=data['id']).first()\n return user", "def check_token(self, token):\n if not token or not self.verification_token:\n return False\n if not constant_time_compare(token, self.verification_token):\n return False\n if self.is_verified:\n return False\n age = timezone.now() - self.added_date\n if age >= timedelta(days=AssociatedEmail.VERIFICATION_TIMEOUT_DAYS):\n return False\n return True", "def check_token(token, secret, message=None):\n\n config = prologin.config.load('timeauth')\n\n if not config['enabled']:\n return True\n\n if token is None:\n return False\n\n # Reject badly formatted tokens.\n chunks = token.split(':')\n if len(chunks) != 2:\n return False\n try:\n timestamp = int(chunks[0])\n except ValueError:\n return False\n\n # Reject outdated tokens.\n if time.time() - timestamp > TOKEN_TIMEOUT:\n return False\n\n # Check if the token is valid.\n return hmac.compare_digest(\n get_hmac(secret, str(message) + chunks[0]), chunks[1]\n )", "def validate_token(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"Tries to decode the JWT token using the SECRET KEY.\n\n Executes the original function if token is valid.\n\n Otherwise returns HTTP 401 to the Client.\n\n \"\"\"\n token = request.headers.get('token')\n\n try:\n jwt.decode(token, app.config['SECRET_KEY'])\n return func(*args, **kwargs)\n except jwt.DecodeError:\n message = 'Token is missing / invalid'\n except jwt.exceptions.ExpiredSignatureError:\n message = 'Token has expired'\n\n\n return Response(\n json.dumps({'error': message}),\n 401,\n mimetype='application/json'\n )\n\n return wrapper", "def test_validate_token(self, mock_xsrf_validate_token):\n self.handler.validate_token('test token', 'user@example.com')\n mock_xsrf_validate_token.assert_called_once_with(\n 'test token', 'user@example.com',\n timeout=xsrf.REFRESH_TOKEN_TIMEOUT_SEC)", "def verify_token(vial_http: urllib3.connectionpool.ConnectionPool) -> bool:\n verify_resp = vial_http.request(\"GET\", \"/api/verifyToken\")\n return verify_resp.status == 200", "def test_valid_token(self, mock_check_token_not_revoked,\n mock_get_issuer_public_key):\n # Mock the external call to retrieve the IAM public key\n # used in the _verify_token and valid_token_to_id call\n mock_get_issuer_public_key.return_value = PUBLIC_KEY\n # Mock the external call to check the token has not been rejected\n # used in the valid_token_to_id call\n mock_check_token_not_revoked.return_value = CLIENT_ID\n\n # This payload will be valid as we will sign it with PRIVATE_KEY\n payload = self._standard_token()\n\n token = self._create_token(payload, PRIVATE_KEY)\n\n with self.settings(IAM_HOSTNAME_LIST=['iam-test.idc.eu']):\n client_id = payload['sub']\n self.assertEqual(\n self._token_checker.valid_token_to_id(token), client_id,\n \"Token with payload %s should be accepted!\" % payload\n )", "def test_verifies_bearer_token(self):\n\n badgr = self.get_badgr_setup()\n\n # _token_data isn't meant to be exposed; pylint: disable=W0212\n self.assertEqual(badgr._token_data['token_type'], \"Bearer\")\n self.assertEqual(badgr._token_data['access_token'],\n self._sample_token)", "def fresh_jwt_required(self) -> None:\n if not self._TOKEN:\n raise HTTPException(status_code=401,detail=\"Missing Authorization Header\")\n\n if self.get_raw_jwt()['type'] != 'access':\n raise HTTPException(status_code=422,detail=\"Only access tokens are allowed\")\n\n if not self.get_raw_jwt()['fresh']:\n raise HTTPException(status_code=401,detail=\"Fresh token required\")", "def test_generate_and_validate_token(self):\n\n audience = 'http://www.service.teletracking.com/'\n roles = {'role': ['admin', 'user'], 'audience': audience}\n secret = 'drMemxWrLen6fCXQA5jO6gXkK/UoZVzPGRDiff7ByPU='\n token = AuthenticationHandler.generate_auth_token(roles, secret)\n decoded_token = AuthenticationHandler.validate_and_decode_token(\n token=token, key=secret,\n audience=audience\n )\n self.assertTrue(decoded_token['role'][0] == 'admin')\n self.assertTrue(decoded_token['role'][1] == 'user')", "def decode_auth_token(auth_token): \n try: \n payload = jwt.decode(auth_token, getattr(settings, \"SECRET_KEY\", \"\"),algorithms=['HS256']) \n is_blacklisted_token = User.check_blacklist(auth_token)\n if is_blacklisted_token:\n return False,'Token blacklisted. Please log in again.'\n else:\n return True, payload['sub']\n except jwt.ExpiredSignatureError:\n return False,'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return False,'Invalid token. Please log in again.'", "def token_required(f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"validate token provided\"\"\"\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return make_response(jsonify({\"message\" : \"Please sign-up and login\"}), 401)\n\n try:\n data = jwt.decode(token, Config.SECRET)\n except:\n return make_response(jsonify({\n \"message\" : \"kindly provide a valid token in the header\"}), 401)\n return f(*args, **kwargs)\n\n return decorated", "def validate_eve_jwt(self):\n\n\t\tres = self.session.get(self.settings['jwks_url'])\n\t\tres.raise_for_status()\n\n\t\tdata = res.json()\n\n\t\ttry:\n\t\t\tjwk_sets = data[\"keys\"]\n\t\texcept KeyError as e:\n\t\t\tself.p(\"Something went wrong when retrieving the JWK set. The returned \"\n\t\t\t\t\"payload did not have the expected key {}. \\nPayload returned \"\n\t\t\t\t\"from the SSO looks like: {}\".format(e, data))\n\t\t\treturn None\n\n\t\tjwk_set = next((item for item in jwk_sets if item[\"alg\"] == \"RS256\"))\n\n\t\ttry:\n\t\t\treturn jwt.decode(\n\t\t\t\tself.access_token,\n\t\t\t\tjwk_set,\n\t\t\t\talgorithms=jwk_set[\"alg\"],\n\t\t\t\tissuer=self.settings['login_host']\n\t\t\t)\n\t\texcept ExpiredSignatureError:\n\t\t\tself.p(\"The JWT token has expired: {}\")\n\t\t\treturn None\n\t\texcept JWTError as e:\n\t\t\tself.p(\"The JWT signature was invalid: {}\").format(str(e))\n\t\t\treturn None\n\t\texcept JWTClaimsError as e:\n\t\t\ttry:\n\t\t\t\treturn jwt.decode(\n\t\t\t\t\t\t\tself.access_token,\n\t\t\t\t\t\t\tjwk_set,\n\t\t\t\t\t\t\talgorithms=jwk_set[\"alg\"],\n\t\t\t\t\t\t\tissuer=urllib.parse.urlunparse([self.settings['esi_proto'],self.settings['login_host'],'','','',''])\n\t\t\t\t\t\t)\n\t\t\texcept JWTClaimsError as e:\n\t\t\t\tself.p(\"The issuer claim was not from login.eveonline.com or \"\n\t\t\t\t\t\"https://login.eveonline.com: {}\".format(str(e)))\n\t\t\t\treturn None", "def test_gen_and_verify_good_token(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n token = tool.generate_token(12345, 'test_action')\n self.assertTrue(tool.verify_token(token, 12345, 'test_action'))", "def validate_credentials(self, data):\n try:\n boolean_param_list = []\n get_service_data = app.config.get('JWT_CONFIG').get('CREDENTIAL')\n token_identity_param = app.config.get('JWT_CONFIG').get('TOKEN_IDENTITY_PARAM')\n expires_delta = app.config.get('JWT_CONFIG').get('TOKEN_EXPIRY')\n expires_delta = eval(expires_delta) if isinstance(expires_delta, str) else expires_delta\n credentials = data.get('credentials')\n identity_credentials_keys = list(get_service_data.keys())\n for key in identity_credentials_keys:\n if get_service_data[key] != credentials[key]:\n boolean_param_list.append(False)\n else:\n boolean_param_list.append(True)\n\n if False in boolean_param_list:\n return {'msg': \"Incorrect Credentials\"}, 401\n else:\n access_token = self.auth_token_generate(\n identity_param_val=credentials[token_identity_param], expires_delta=expires_delta)\n return {'access_token': access_token}, 200\n except Exception as e:\n print(e)\n return {'msg': \"Incorrect Credentials\"}, 401", "def verify_token(event):\n if event['token'] != VERIFICATION_TOKEN:\n print('Presented with invalid token - ignoring message...')\n return False\n return True", "def validate(self, encrypted_token: str) -> bool:\n payload, timestamp_ms, crc = self.unsleeve(encrypted_token)\n ts_bytes = timestamp_ms.to_bytes(8, 'big')\n\n computed_crc = zlib.crc32(payload + ts_bytes)\n\n if crc == computed_crc:\n return in_range(timestamp_ms, deadline=self.token_life_ms)\n\n return False", "def test_create_token_valid(self):\n create_mock_user(**self.mock_user)\n res = self.client.post(TOKEN_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('token', res.data)", "def verify_auth_token(token):\n # In case the token so wrong that it's None\n if not token:\n raise BadSignatureToken\n\n gen_token = Serializer(app.config['API_SECRET_KEY'])\n try:\n data = gen_token.loads(token)\n except SignatureExpired:\n raise ExpiredToken() # valid token, but expired\n except BadSignature:\n raise BadSignatureToken() # invalid token\n user = User.query.get(data['id'])\n return user", "def verifyToken():\n if request:\n data = json.dumps(request.json)\n reqToken = json.loads(data)[\"token\"]\n if len(reqToken) >= 8 and len(reqToken) <= 32:\n found = Token.query.filter(Token.token == f'{reqToken}').first()\n print(found)\n if found:\n message = \"Success! It's an older code, sir, but it checks out.\" # noqa\n else:\n message = \"Code not found.\"\n else:\n message = 'Invalid token length.'\n else:\n message = 'Invalid JSON request'\n return jsonify(status=message)", "def validate_token(self, token):\n\n try:\n if not token:\n raise AuthException(\"Needed a token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n # try to get from cache first\n now = time()\n token_info = self.token_cache.get(token)\n if token_info and token_info[\"expires\"] < now:\n # delete token. MUST be done with care, as another thread maybe already delete it. Do not use del\n self.token_cache.pop(token, None)\n token_info = None\n\n # get from database if not in cache\n if not token_info:\n token_info = self.db.get_one(\"tokens\", {\"_id\": token})\n if token_info[\"expires\"] < now:\n raise AuthException(\"Expired Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n return token_info\n\n except DbException as e:\n if e.http_code == HTTPStatus.NOT_FOUND:\n raise AuthException(\"Invalid Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n else:\n raise\n except AuthException:\n if self.config[\"global\"].get(\"test.user_not_authorized\"):\n return {\"id\": \"fake-token-id-for-test\",\n \"project_id\": self.config[\"global\"].get(\"test.project_not_authorized\", \"admin\"),\n \"username\": self.config[\"global\"][\"test.user_not_authorized\"], \"admin\": True}\n else:\n raise\n except Exception:\n self.logger.exception(\"Error during token validation using internal backend\")\n raise AuthException(\"Error during token validation using internal backend\",\n http_code=HTTPStatus.UNAUTHORIZED)", "def verify_token(token):\n return AuthToken.query.filter_by(auth_token=token).first()", "def jwt_optional(self) -> None:\n if self._TOKEN and self.get_raw_jwt()['type'] != 'access':\n raise HTTPException(status_code=422,detail=\"Only access tokens are allowed\")", "def verify_auth_token(token):\n\n s = Serializer(current_app.config['SECRET_KEY'])\n\n try:\n data = s.loads(token)\n except SignatureExpired:\n print \"EXP\", token\n return None\n except BadSignature:\n print \"BAD\", token\n return None\n\n user = User.query.get(data['id'])\n return user", "def test_token(self):\r\n expected = \"eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3N1ZWRBdCI6ICIyMDE0LTAyLTI3VDE3OjAwOjQyLjQwNjQ0MSswOjAwIiwgImNvbnN1bWVyS2V5IjogImZha2Vfc2VjcmV0IiwgInVzZXJJZCI6ICJ1c2VybmFtZSIsICJ0dGwiOiA4NjQwMH0.Dx1PoF-7mqBOOSGDMZ9R_s3oaaLRPnn6CJgGGF2A5CQ\"\r\n response = retrieve_token(\"username\", \"fake_secret\")\r\n\r\n # because the middle hashes are dependent on time, conly the header and footer are checked for secret key\r\n self.assertEqual(expected.split('.')[0], response.split('.')[0])\r\n self.assertNotEqual(expected.split('.')[2], response.split('.')[2])", "def check_token(self):\n return config.outlook_token is not None", "def check_token(self, user, token):\n\n # Parse the token\n try:\n ts_b36, hash = token.split(\"-\")\n except ValueError:\n return False\n\n try:\n ts = base36_to_int(ts_b36)\n except ValueError:\n return False\n\n # Check that the timestamp/uid has not been tampered with\n recomputed_token = self._make_token_with_timestamp(user, ts)\n\n log.debug(\"Ricalcolo re_token=%s token=%s\" % (recomputed_token, token))\n if not constant_time_compare(recomputed_token, token):\n return False\n\n # Check the timestamp is within limit\n if (self._num_days(self._today()) - ts) > settings.REFERRAL_TOKEN_RESET_TIMEOUT_DAYS:\n return False\n\n return True", "def validate_token(self, token):\n try:\n self._verification = models.EmailVerification.objects.get(\n token=token,\n )\n except models.EmailVerification.DoesNotExist:\n raise serializers.ValidationError(\n code='invalid_token',\n detail=_('The provided token does not exist or has expired.'),\n )\n\n return token", "def _assert_valid(self, token_id, token_ref):\n current_time = timeutils.normalize_time(timeutils.utcnow())\n expires = token_ref.get('expires')\n if not expires or current_time > timeutils.normalize_time(expires):\n raise exception.TokenNotFound(token_id=token_id)", "async def validate_token(self, token: bytes, audience=None) -> Dict[str, str]:\n\n try:\n header = jwt.get_unverified_header(token)\n if \"kid\" not in header:\n raise InvalidToken(\"Missing kid in header\")\n return jwt.decode(token, await self.retrieve_public_key(self._decode_public_key_identifier(header[\"kid\"])), algorithms='RS256', issuer=tedious.config.CONFIG[\"TOKEN\"][\"issuer\"], audience=audience)\n except DecodeError:\n raise InvalidToken(\"Unable to decode token.\")\n except Exception as e:\n raise InvalidToken(str(type(e)) + \" \" + str(e))", "def token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"Check if token is genuine\"\"\"\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if not token:\n return jsonify({\"message\":\"Token is missing!\"}), 401\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'])\n current_user = User.query.filter_by(public_id=data['public_id']).first()\n except:\n return jsonify({\"message\":\"Token is invalid\"}), 401\n return f(current_user, *args, **kwargs)\n\n return decorated", "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def test_authtoken_is_valid(self):\n auth_client = self.fixtures.auth_client\n # scenario 1: when validity is unlimited (0)\n tomriddle = models.User(username='voldemort', fullname='Tom Riddle')\n scope = ['id', 'email']\n tomriddle_token = models.AuthToken(\n auth_client=auth_client, user=tomriddle, scope=scope, validity=0\n )\n self.assertTrue(tomriddle_token.is_valid())\n\n # scenario 2: when validity has not been given\n draco = models.User(username='draco', fullname='Draco Malfoy')\n draco_token = models.AuthToken(auth_client=auth_client, user=draco, scope=scope)\n with self.assertRaises(TypeError):\n draco_token.is_valid()\n\n # scenario 3: when validity is limited\n harry = models.User(username='harry', fullname='Harry Potter')\n harry_token = models.AuthToken(\n auth_client=auth_client,\n user=harry,\n scope=scope,\n validity=3600,\n created_at=utcnow(),\n )\n self.assertTrue(harry_token.is_valid())\n\n # scenario 4: when validity is limited *and* the token has expired\n cedric = models.User(username='cedric', fullname='Cedric Diggory')\n cedric_token = models.AuthToken(\n auth_client=auth_client,\n user=cedric,\n scope=scope,\n validity=1,\n created_at=utcnow() - timedelta(1),\n )\n self.assertFalse(cedric_token.is_valid())", "def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)" ]
[ "0.8520996", "0.8204527", "0.80866903", "0.7793878", "0.7776991", "0.7762956", "0.77588475", "0.7680332", "0.7579834", "0.7543709", "0.74756145", "0.74731255", "0.74194485", "0.7321448", "0.7219745", "0.71922845", "0.7190927", "0.71824664", "0.7138779", "0.7125586", "0.7120983", "0.7116968", "0.7099627", "0.70848614", "0.70518714", "0.70331436", "0.70223606", "0.70134187", "0.70128095", "0.70103943", "0.7008096", "0.6998699", "0.69673896", "0.6956695", "0.6950336", "0.69458187", "0.6927721", "0.692462", "0.6923537", "0.6920046", "0.6920006", "0.6890512", "0.68848693", "0.6869634", "0.68631136", "0.6835655", "0.6821348", "0.68013865", "0.6794732", "0.6791715", "0.67910045", "0.6779422", "0.6773845", "0.6773845", "0.67684746", "0.67587227", "0.6741654", "0.67391545", "0.67263967", "0.67261153", "0.67149454", "0.6702281", "0.6698924", "0.6692904", "0.6665855", "0.66349256", "0.66207546", "0.66183317", "0.65985453", "0.65946823", "0.6584057", "0.6562737", "0.65504616", "0.65494156", "0.65387946", "0.65307206", "0.65276617", "0.65236866", "0.6517927", "0.6516944", "0.6500442", "0.6477226", "0.647543", "0.6474052", "0.64646155", "0.6457906", "0.6457088", "0.64502865", "0.6448047", "0.6441448", "0.6438858", "0.6435281", "0.64302075", "0.64153975", "0.6414963", "0.64133555", "0.6391629", "0.63903886", "0.63749194", "0.63656634" ]
0.8363303
1
Get access token from refresh token
def _refresh_access_token(self): # force https so that we don't send around tokens unsecurely url = 'https://{}/api/token/refresh'.format(urlparse(self.base_url).netloc) # paranoid: check again that we only send the token to https if urlparse(url).scheme != "https": msg = 'This should not happen, please file a bug report.' raise Exception(msg) if not self.jwt_refresh_token: raise FDSNUnauthorizedException("Unauthorized, authentication " "required.", ) # convert to json data = json.dumps({"refresh": self.jwt_refresh_token}) # encode data = bytes(data, "utf-8") headers = {"Content-Type": "application/json"} html = urllib_request.Request(url, data=data, headers=headers) # decode('utf-8') try: result = urllib_request.urlopen(html).read().decode("utf-8") dic = json.loads(result) self.jwt_access_token = dic['access'] if self.debug: print('Got temporary access/refresh: {}/{}'.format(self.jwt_access_token, self.jwt_refresh_token)) return except: raise FDSNUnauthorizedException("Unauthorized, authentication " "expired. Please set your credentials again.", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_access_token(self, refresh=False):\n return self._token_man.get_access_token(refresh)", "def get_access_token(client_id, refresh_token):\n h = {\n \"content-type\": 'application/x-www-form-urlencoded'\n }\n\n d = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": refresh_token,\n \"client_id\": client_id\n }\n\n r = requests.post(\"https://api.tdameritrade.com/v1/oauth2/token\", data=d, headers=h)\n\n return json.loads(r.text)[\"access_token\"]", "def getAccessToken( refresh_token):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=ApiJWTAuthentication.expirationTime_Access),\n 'refresh_token': refresh_token\n }\n jwttoken= jwt.encode(\n payload,\n ApiJWTAuthentication.secretKey_access,\n algorithm='HS256'\n )\n token=jwttoken.decode('utf-8')\n return {\"message\": \"success\", \"access_token\": token}\n except Exception as e:\n return {\"message\": \"exception\",\"Exception\": str(e)}", "def _get_token(self):\n if self._access_token is None or self._is_expired():\n self._refresh_token()\n return self._access_token", "def refresh_token():\n json_request = request.json\n refresh_token = json_request.get('refresh_token')\n if not refresh_token:\n return msg.errors.bad_request(\n 'You should provide refresh token for this call')\n refresh_token_obj = RefreshToken.valid_token(refresh_token)\n if not refresh_token_obj:\n return msg.errors.unauthorized('Provided refresh token is not valid')\n access_token = generate_token(refresh_token_obj.user_id)\n return msg.success(\n message='New access token generated',\n access_token=access_token)", "def refreshAccessToken(self):\r\n\r\n assert hasattr(self.oauthToken, \"getRefreshToken\")\r\n\r\n #turn the response into json\r\n\r\n response = self._oauth.refreshAccessToken(self.oauthToken)\r\n responseBody = json.loads(response['Body'])\r\n\r\n try:\r\n oauthToken = token.Token(responseBody)\r\n except TypeError:\r\n print (\"Bad response when refreshing the token \" + str(responseBody))\r\n sys.exit()\r\n\r\n return oauthToken", "def refresh_token(self, refresh_token):\n data = {\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'grant_type': 'refresh_token',\n 'refresh_token': refresh_token,\n 'redirect_uri': self.redirect_uri,\n 'scope': 'identify email connections'\n }\n\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n\n access_token = self.http_client.post(\n f'{self.api_endpoint}/oauth2/token', headers, data=data)\n return access_token", "def get(self):\n\n user = context_property.request_user\n Log.info(\"Refresh access token for %i\" % user.id)\n\n return {\n \"accessToken\" : create_access_token(user.id)\n }, 200", "def refresh_token(self):\n # basic function to get an access token\n api_response = requests.get(\n self.api_config.get_api_url() + \"authentication/g?username=\" + self.api_config.get_api_username() + \"&password=\" + self.api_config.get_api_password())\n\n if api_response.status_code >= 200:\n self.API_TOKEN = api_response.content.decode()\n\n return self.API_TOKEN\n else:\n return None", "def get_access_token(self, renew=False):\n if self.access_token is None or renew:\n headers = {} # don't use json here, juse urlencode.\n url = self._url_for_op('token')\n data = urllib.urlencode({'grant_type': 'client_credentials',\n 'client_id':self.CLIENT_ID,\n 'client_secret':self.CLIENT_SECRET})\n req = urllib2.Request(url, data, headers)\n try:\n response = urllib2.urlopen(req).read()\n response = json.loads(response)\n except urllib2.HTTPError as e:\n raise ApiError(e.reason)\n except Exception, e:\n raise ApiError(e)\n self.access_token = response['access_token']\n return self.access_token", "def refresh_token(self, refresh_token):\r\n params = (base.get_params(None, locals()))\r\n params.update({'client_id': self.client_id,\r\n 'client_secret': self.client_secret,\r\n 'grant_type': 'refresh_token'})\r\n\r\n request = http.Request('POST', self.get_url('token'), params)\r\n\r\n return request, parsers.parse_json", "def _refresh_token(self):\n token_url = self._base_url + '/api/oauth2/token'\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': self._client_id,\n 'client_secret': self._client_secret\n }\n headers = {'accept': 'application/json'}\n response = requests.post(token_url,proxies = self._proxy,params= params,headers = headers)\n logging.debug(response.text)\n parsed = response.json()\n self._access_token = parsed['access_token']\n self._refresh_token = parsed['refresh_token']\n expires_in = parsed['expires_in']\n ## Keep a buffer of 120 seconds to refresh token before expiry\n self._expires_at = datetime.now() + timedelta(seconds=(expires_in - 120))\n\n logging.debug('access_token %s expires at %s', self._access_token, self._expires_at)\n\n return", "def refresh_token(self, iam_client_id, iam_client_secret, refresh_token):\n\n data = HTTPHeaderDict()\n data.add('client_id', iam_client_id)\n data.add('client_secret', iam_client_secret)\n data.add('grant_type', 'refresh_token')\n data.add('refresh_token', refresh_token)\n \n self.log.info(\"refresh_token. data: %s\" % data)\n\n response = requests.post(self.token_endpoint, data=data, verify=True)\n\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as e:\n # Whoops it wasn't a 200\n self.log.error(\"refresh_token() Error: %s \" %str(e))\n self.log.error(\"http error:\" + response.status_code)\n return response.status_code\n\n result = json.loads(response.content)\n return result[\"access_token\"]", "def refreshAccessToken(self):\n params = {\"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refreshToken}\n for i in [self.CLIENT_ID, self.CLIENT_SECRET]:\n params[i] = self.conf[i]\n data = urllib.parse.urlencode(params).encode(\"utf-8\")\n request = urllib.request.Request(self.conf[self.TOKEN_ENDPOINT])\n request.add_header(\"Content-Type\", \"application/x-www-form-urlencoded; charset=utf-8\")\n f = urllib.request.urlopen(request, data)\n root = json.loads(f.read().decode(\"utf-8\"))\n self.accessToken = root[self.ACCESS_TOKEN]\n self.__saveCacheTokens()\n return self.accessToken", "def refresh_access_token(self):\n parameters = {'client_id': self.CLIENT_ID,\n 'auth_code': self.auth_code,\n 'client_secret': self.CLIENT_SECRET,\n 'grant_type': 'authorization_code'}\n url = self.ACCESS_TOKEN_URL % parameters\n data = self._get_refresh_data()\n logging.info('url: %s, data: %s', url, data)\n\n try:\n # empty data to trigger a post\n req = urllib2.Request(url, data)\n req.add_header('Content-Type', 'application/x-www-form-urlencoded')\n result = urllib2.urlopen(req)\n result = json.load(result)\n logging.info('result: %s', result)\n except urllib2.HTTPError, err:\n result = json.load(err)\n logging.info(result)\n raise err\n\n self.access_token = result['access_token']\n self.expires = int(time.time() + result['expires_in'])\n self.refresh_token = result.get('refresh_token', None)", "def refresh_token():\n current_user = get_jwt_identity()\n if current_user is None:\n return abort(401)\n response = deepcopy(AUTH_OKAY)\n response['payload']['access_token'] = create_access_token(\n identity=current_user,\n expires_delta=EXPIRY_DURATION\n )\n response['payload']['expires_in'] = EXPIRY_DURATION.seconds\n response['payload']['not_before'] = int(time() + EXPIRY_DURATION.seconds)\n return jsonify(response['payload']), response['status_code']", "def access_token(self):\n if self.has_expired():\n self.update()\n\n return self.token['access_token']", "def RefreshToken():\n params = {}\n params['client_id'] = Constants.USER['CLIENT_ID']\n params['client_secret'] = Constants.USER['CLIENT_SECRET']\n params['refresh_token'] = Constants.AUTH['REFRESH']\n params['grant_type'] = 'refresh_token'\n\n data = urllib.urlencode(params)\n\n headers = {\n 'User-Agent': 'LogoCert Client',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': 'text/html, */*',\n }\n\n request_url = Constants.OAUTH_TOKEN\n\n request = urllib2.Request(request_url, data, headers)\n res = urllib2.urlopen(request)\n response = res.read()\n return json.loads(response)", "def getAccessToken(self):\r\n\r\n #lets see if we have an oauth code\r\n if self.oauthToken is None:\r\n self.oauthToken = self.createAccessToken\r\n\r\n if self.oauthToken.isExpired(): #check to see if its expired if so refresh it\r\n self.oauthToken = self.refreshAccessToken()\r\n\r\n return self.oauthToken #return out access token\r", "def get_access_token(self):\n\n token_work = time.time() < self.expires\n\n if token_work:\n # No need update token\n return self.access_token\n\n data = {\n 'client_id': self.client_id,\n 'grant_type': 'implicit'\n }\n\n response = requests.post('https://api.moltin.com/oauth/access_token', data=data)\n raise_response_errors(response)\n\n response_json = response.json()\n\n self.access_token = response_json['access_token']\n self.expires = response_json['expires']\n\n logger.debug('elasticpathh access token was updated')\n\n return self.access_token", "def get_access_token(self):\n if self.token.is_expired():\n logging.debug('Requesting a new access token')\n self.token.load_from_json(json=self.__get_token_data__())\n else:\n logging.debug('Access token still valid')\n\n return self.token.access_token", "def _get_access_token(self):\n\n self._access_token = None\n if not self._refresh_token:\n raise ValueError(\"Refresh Token not set\")\n\n doc = minidom.Document()\n root = doc.createElement('tokenAuthRequest')\n doc.appendChild(root)\n aki = doc.createElement('accessKeyId')\n aki.appendChild(doc.createTextNode(self.publicAccessKey))\n root.appendChild(aki)\n pak = doc.createElement('privateAccessKey')\n pak.appendChild(doc.createTextNode(self.privateAccessKey))\n root.appendChild(pak)\n rt = doc.createElement('refreshToken')\n rt.appendChild(doc.createTextNode(self._refresh_token))\n root.appendChild(rt)\n data = doc.toprettyxml()\n\n resp = requests.post(BASE_URL + \"authorization\", data=data, headers=self._default_headers, verify=False)\n if resp.status_code >= 300:\n raise Exception(\"Failed to claim access token: {}\".format(resp))\n\n vals = etree_to_dict(ET.XML(resp.content.decode('utf-8')))\n\n self._access_token = resp.headers.get('Location', None)\n if not self._access_token:\n raise ValueError(\"Unable to get access token\")\n\n self._user_id = os.path.basename(vals.get('authorization').get('user'))\n\n # Always set the expiry 30 minutes from now so we dont have to deal with parsing timezones\n # self._access_token_expiry = dateutil_parser.parse(vals.get('authorization').get('expiration'))\n self._access_token_expiry = datetime.datetime.utcnow() + datetime.timedelta(minutes=30)", "def refresh_token(refresh_token):\r\n \r\n return None", "def exchange_refresh_token(refresh_token):\n body = {'grant_type': 'refresh_token',\n 'client_id': current_app.config['AUTH0_CLIENT_ID'],\n 'client_secret': current_app.config['AUTH0_CLIENT_SECRET'],\n 'audience': current_app.config['AUTH0_AUDIENCE'],\n 'refresh_token': refresh_token\n }\n req = requests.post(\n current_app.config['AUTH0_BASE_URL'] + '/oauth/token',\n json=body)\n req.raise_for_status()\n return req.json()['access_token']", "def refresh_access_token(self):\n if self.client_secret is None:\n raise Exception(\"client_secret must be set to execute \"\n \"refresh_access_token.\")\n if self.refresh_token is None:\n raise Exception(\"refresh_token must be set to execute \"\n \"refresh_access_token.\")\n params = {'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'grant_type': 'refresh_token',\n 'refresh_token': self.refresh_token}\n result = self._send_request(REFRESH_URL, params=params, method='POST',\n data_field=None)\n self.access_token = result['access_token']\n return self.access_token", "def get_token():\n\turl = SPOTIFY_ACCOUNT_HOST + 'token'\n\tcurrent_refresh_token = config.get('spotify_credentials', 'refresh_token')\n\tbody = {'grant_type': 'refresh_token', 'refresh_token': current_refresh_token}\n\tauth_header = 'Basic ' + b64encode('{0}:{1}'.format(SPOTIFY_CLIENT_ID, \n\t\tSPOTIFY_CLIENT_SECRET))\n\theaders = {'Authorization': auth_header}\n\n\tresponse = requests.post(url, headers=headers, data=body).json()\n\tif response.has_key('refresh_token'):\n\t\tlogging.debug('Received new refresh token')\n\t\tconfig.set('spotify_credentials', 'refresh_token', \n\t\t\tresponse['refresh_token'])\n\treturn response['access_token']", "def decodeAccesshTokenForRefreshToken( accessToken):\n try:\n payload = jwt.decode(accessToken, ApiJWTAuthentication.secretKey_access)\n return {\"message\": \"success\",\"refresh_token\": payload['refresh_token']}\n except jwt.ExpiredSignatureError:\n return {\"message\": \"Expired Access Token\"}\n except jwt.InvalidTokenError:\n return {\"message\": \"Invalid access Token\"}", "def refresh():\n current_user = get_jwt_identity()\n ret = {\n 'access_token': create_access_token(identity=current_user)\n }\n return jsonify(ret), 200", "def get_access_token(self,\n client_id=settings.OPENHUMANS_CLIENT_ID,\n client_secret=settings.OPENHUMANS_CLIENT_SECRET):\n # Also refresh if nearly expired (less than 60s remaining).\n delta = timedelta(seconds=60)\n if arrow.get(self.token_expires) - delta < arrow.now():\n self._refresh_tokens(client_id=client_id,\n client_secret=client_secret)\n return self.access_token", "def get_access_token():\n\n scopes = [\n 'https://www.googleapis.com/auth/cloud-platform', 'email', 'profile'\n ]\n\n credentials, _ = default()\n credentials = auth.delegated_credentials(credentials, scopes=scopes)\n\n request = req.Request()\n credentials.refresh(request)\n access_token = credentials.token\n\n return access_token", "def _refresh_access_token(self) -> None:\n response = httpx.post(\n f\"{self._base_url}/oauth2/token\",\n proxies=self._proxies,\n data={\n \"grant_type\": \"client_credentials\",\n \"client_id\": self._api_key,\n \"client_secret\": self._api_secret,\n },\n )\n response.raise_for_status()\n token = response.json()[\"access_token\"]\n c = httpx.Client()\n c.close()\n self._authorization_headers = {\"Authorization\": f\"Bearer {token}\"}", "def get_token(self):\n oauth_provider = UserSocialAuth.objects.get(provider='drchrono')\n access_token = oauth_provider.extra_data['access_token']\n return access_token", "def get_access_token(self):\n access_token = self._auth_provider._get_auth_value()\n return access_token", "def get_access_token(self):\n payload = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'resource': self.resource\n }\n res = requests.post(self.auth_url, data=payload)\n data = res.json()\n if res.status_code == 200:\n return data['access_token'], res\n\n return False, res", "def refreshToken(self, token):\n \n postData = { 'refresh_token': token.refreshToken,\n 'client_id': self.clientId,\n 'client_secret': self.clientSecret,\n 'grant_type': self.refreshGrantType }\n postFields = urlencode(postData)\n\n \n buffer = BytesIO()\n c = pycurl.Curl()\n try:\n c.setopt(c.URL, self.refreshServer)\n c.setopt(c.POSTFIELDS, postFields)\n c.setopt(c.WRITEDATA, buffer)\n c.perform()\n \n responsecode = c.getinfo(c.RESPONSE_CODE)\n reqResp = json.loads(buffer.getvalue().decode('iso-8859-1'))\n except pycurl.error as err:\n msgData = { 'error_code': GDataOAuthError.ERR_NETWORK, 'error_string': c.errstr() }\n self.applicationCallback(MessageTypes.MSG_OAUTH_FAILED, msgData)\n return\n finally:\n c.close()\n\n\n if(responsecode == 200):\n expiration = int(time.time()) + int(reqResp['expires_in'])\n token.accessToken = reqResp['access_token']\n token.expiration = expiration\n token.tokenType = reqResp['token_type']\n self.applicationCallback(MessageTypes.MSG_OAUTH_SUCCESS, token);\n elif(responsecode == 401):\n msgData = { 'error_code': GDataOAuthError.ERR_CREDENTIALS, 'error_string': reqResp['error'] }\n self.applicationCallback(MessageTypes.MSG_OAUTH_FAILED, msgData)\n elif(responsecode == 400):\n msgData = { 'error_code': GDataOAuthError.ERR_PROTOCOL, 'error_string': reqResp['error'] + \": \" + reqResp['error_description']}\n self.applicationCallback(MessageTypes.MSG_OAUTH_FAILED, msgData)\n else:\n msgData = { 'error_code': GDataOAuthError.ERR_UNKNOWN, 'error_string': reqResp['error'] + \": \" + reqResp['error_description'] }\n self.applicationCallback(MessageTypes.MSG_OAUTH_FAILED, msgData)", "def get_sts_token(current_refresh_token, url=None):\n\n if url is None:\n url = auth_url\n\n if not current_refresh_token: # First time through, send password\n data = {'username': user, 'password': password, 'client_id': clientid, 'grant_type': 'password', 'takeExclusiveSignOnControl': True,\n 'scope': scope}\n print(\"Sending authentication request with password to\", url, \"...\")\n else: # Use the given refresh token\n data = {'username': user, 'client_id': clientid, 'refresh_token': current_refresh_token, 'grant_type': 'refresh_token'}\n print(\"Sending authentication request with refresh token to\", url, \"...\")\n if client_secret != '':\n data['client_secret'] = client_secret;\n \n try:\n # Request with auth for https protocol \n r = requests.post(url,\n headers={'Accept': 'application/json'},\n data=data,\n auth=(clientid, client_secret),\n verify=cert_file,\r\n proxies={\r\n 'http':'http://'+proxy_hostname+':'+proxy_port,\r\n 'https':'http://'+proxy_hostname+':'+proxy_port\r\n },\n allow_redirects=False)\n\n except requests.exceptions.RequestException as e:\n print('Refinitiv Data Platform authentication exception failure:', e)\n return None, None, None\n\n if r.status_code == 200:\n auth_json = r.json()\n print(\"Refinitiv Data Platform Authentication succeeded. RECEIVED:\")\n print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))\n\n return auth_json['access_token'], auth_json['refresh_token'], auth_json['expires_in']\n elif r.status_code == 301 or r.status_code == 302 or r.status_code == 307 or r.status_code == 308:\n # Perform URL redirect\n print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)\n new_host = r.headers['Location']\n if new_host is not None:\n print('Perform URL redirect to ', new_host)\n return get_sts_token(current_refresh_token, new_host)\n return None, None, None\n elif r.status_code == 400 or r.status_code == 401:\n # Retry with username and password\n print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)\n if current_refresh_token:\n # Refresh token may have expired. Try using our password.\n print('Retry with username and password')\n return get_sts_token(None)\n return None, None, None\n elif r.status_code == 403 or r.status_code == 451:\n # Stop retrying with the request\n print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)\n print('Stop retrying with the request')\n return None, None, None\n else:\n # Retry the request to Refinitiv Data Platform \n print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)\n print('Retry the request to Refinitiv Data Platform')\n return get_sts_token(current_refresh_token)", "def refresh():\n print(\"refresh request\")\n old_token = request.get_data()\n new_token = guard.refresh_jwt_token(old_token)\n ret = {'access_token': new_token}\n return ret, 200", "def get_token(self, access_token):\n if access_token:\n return access_token\n elif self.default_access_token:\n return self.default_access_token\n else:\n return ''", "def refresh_token(self):\n data = {\n \"client_id\": self._client_id,\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self._refresh_token,\n }\n token_resp = requests.post(self._token_server_uri, data=data)\n\n self._logging.info(token_resp.headers)\n self._logging.info(json.loads(token_resp.content.decode(\"utf-8\")))\n\n if token_resp.status_code == 200:\n auth_token_json = token_resp.json()\n\n self._logging.info(auth_token_json)\n\n self._token = auth_token_json[\"access_token\"]\n self._refresh_token = auth_token_json[\"refresh_token\"]\n\n self._token_expire = auth_token_json[\"expires_in\"]\n self._token_expire_refresh = auth_token_json[\"refresh_expires_in\"]\n self._token_expire_time = (\n self._token_expire + (datetime.today()).timestamp()\n )\n self._logging.info(\"Token Refreshed.\")\n else:\n raise Exception(\"Token Refreshed Failed.\")", "def get_access(access_token='',expire_time=0):\r\n #Get a new access token if it expires or is five minutes away from exp#iration\r\n if (expire_time==0) or (len(access_token)==0) or (time.time()-expire_time>=-300):\r\n\r\n #API needed to authorize account with refresh token\r\n auth_url = 'https://api.tdameritrade.com/v1/oauth2/token'\r\n\r\n #Data needed for token\r\n data = {'grant_type':'refresh_token',\r\n 'refresh_token':TDAuth_Info.refresh_token,\r\n 'client_id':TDAuth_Info.client_id}\r\n\r\n #Post the data to get the token\r\n auth_reply_json = requests.post(url=auth_url,data=data)\r\n auth_reply=auth_reply_json.json()\r\n\r\n #Now use the token to get account information\r\n access_token = auth_reply['access_token']\r\n expire_time=time.time()+auth_reply['expires_in']\r\n \r\n return (access_token,expire_time)", "def access_token(self):\n access_token = self.session.get('component_access_token')\n if access_token:\n if not self.expires_at:\n # user provided access_token, just return it\n return access_token\n\n timestamp = time.time()\n if self.expires_at - timestamp > 60:\n return access_token\n\n self.fetch_access_token()\n return self.session.get('component_access_token')", "def refresh(self):\n self._request_token(grant_type='client_credentials')", "async def get_access_token(self):\n async with self._access_token_lock:\n if (not self._access_token\n or (not self._access_token_checked\n and not await self.check_access_token(\n self._access_token))):\n await self.receive_new_access_token()\n return self._access_token", "async def async_get_access_token(self):\n if not self._oauth_session.valid_token:\n await self._oauth_session.async_ensure_token_valid()\n\n return self._oauth_session.token[\"access_token\"]", "def refresh_token(self):\n token = json.loads(get_metadata(\n 'instance/service-accounts/%s/token' % self.service_account,\n ))\n seconds = token['expires_in'] - 60\n self._expiration_time = (\n datetime.datetime.now() + datetime.timedelta(seconds=seconds)\n )\n self._token = token['access_token']", "def re_authenticate(self):\n url = URLS['token']\n data = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refresh_token,\n \"client_id\": self.client_id,\n \"client_secret\": self.client_secret\n }\n r = requests.post(url, data=data)\n r.raise_for_status()\n j = r.json()\n self.access_token = j['access_token']\n self.refresh_token = j['refresh_token']\n self._set_token_expiration_time(expires_in=j['expires_in'])\n return r", "def refresh_authorization_token(client_id: str, client_secret: str, redirect_uri: str, refresh_token: str,) -> Dict[str, str]:\n parameters = {\n \"client_id\": client_id,\n \"client_secret\": client_secret,\n \"redirect_uri\": redirect_uri,\n \"refresh_token\": refresh_token,\n \"grant_type\": \"refresh_token\"\n }\n header = {\n \"user-agent\": get_user_agent()\n }\n r = requests.post(\"https://login.live.com/oauth20_token.srf\", data=parameters, headers=header)\n return r.json()", "def get_token(self, refresh_if_expired=False):\n if refresh_if_expired and self.test_token() is False:\n return self.refresh_token()\n\n if self.API_TOKEN is None:\n # try and get one\n return self.refresh_token()\n else:\n return self.API_TOKEN", "def refresh_token(self,refresh_token=None,client_id=None, client_secret=None):\n\t\tif not refresh_token and not client_id:\n\t\t refresh_token = self.credentials.refresh_token\n\t\t client_id = self.credentials.client_id\n\t\t client_secret = self.credentials.client_secret\n\n\t\turl = 'https://accounts.google.com/o/oauth2/token'\n\t\tvalues = {\"refresh_token\":refresh_token, \"client_id\":client_id, \"client_secret\":client_secret, \"grant_type\":\"refresh_token\"}\n\t\tprint 'refresh_token POST values: ' + str(values)\n\t\t# encode data\n\t\tdata = urllib.urlencode(values)\n\t\tprint 'changed'\n\t\tprint 'data:' + str(data)\n\t\timport traceback\n\t\timport sys\n\t\t# post request for refresh token\n\t\ttry:\n\t\t\treq = urllib2.Request(url, data)\n\t\t\tprint req.get_full_url()\n\t\t\tresponse = urllib2.urlopen(req)\n\t\t\tprint 'response: ' + str(response)\n\t\t\tresponse_json = json.loads(response.read())\n\t\t\tprint 'google refresh token response json: ' + str(response_json)\n\n\t\texcept Exception, err:\n\t\t\tprint traceback.format_exc()\n\t\tnew_access_token = response_json[\"access_token\"]\n\t\tself.credentials.access_token = new_access_token\n\t\tnew_expiration_date = datetime.now() + timedelta(hours=1)\n\t\tself.credentials.token_expiry = new_expiration_date\n\t\tdb.session.add(self.credentials)\n\t\tdb.session.commit()\n\t\tprint 'done getting values from fresh_token'", "def refresh():\n current_user = get_jwt_identity()\n\n user = get_user_by_username(current_user)\n\n if not user:\n return make_response(CONST_LOGIN_MSG, 401, {\n 'WWW-Authenticate': f'Basic realm=\"{CONST_REALM_MSG}\"'})\n\n if user.is_admin:\n claims = {'is_admin': True}\n else:\n claims = {'is_admin': False}\n\n now = datetime.datetime.now(datetime.timezone.utc)\n access_expires = (now + jwt_config.access_expires).timestamp()\n refresh_expires = (now + jwt_config.refresh_expires).timestamp()\n\n response = {\n 'access_token': create_access_token(identity=current_user,\n user_claims=claims),\n 'access_expires': access_expires,\n 'refresh_expires': refresh_expires,\n 'refresh_token': create_refresh_token(identity=current_user),\n 'user': get_user_details(user)\n\n }\n return jsonify(response), 200", "def refreshAccessToken(self, token):\r\n header = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}\r\n url = self._config['OAUTH2ENDPOINT']['huddleAccessTokenServer']\r\n\r\n body = {\"grant_type\": \"refresh_token\",\r\n \"client_id\": self._config['OAUTH2']['clientID'],\r\n \"refresh_token\": token.getRefreshToken()\r\n }\r\n\r\n return self._adapter.postRequest(url, header, parse.urlencode(body))", "def refresh(refreshtoken):\n if not refreshtoken:\n raise Exception('refresh() requires refreshtoken parameter')\n\n p = {\n 'client_id': c.client_id,\n 'client_secret': c.client_secret,\n 'grant_type': 'refresh_token',\n 'refresh_token': refreshtoken\n }\n\n return r._post('/token/', p, '/oauth/v2', False)", "def access_token(self):\n return self.access_token_str", "def get_access_token(*args, **kwargs):\n return get_access_token_async(*args, **kwargs).get_result()", "def get_access_token(self, token_url):\n # type: (str) -> str\n\n payload = {\n \"grant_type\" : \"client_credentials\",\n \"client_id\" : self.client_id,\n \"client_secret\" : self.client_secret,\n \"scope\" : self.client_scope,\n }\n headers = {\n \"accept\" : \"application/json\",\n }\n resp = requests.post(f\"{self.base_url}/{token_url}\", data=payload, headers=headers)\n try:\n if (resp.ok):\n return resp.json().get('access_token')\n except (ValueError):\n self.__log.error (\"Error obtaining access token with credentials\")", "def get_user_tokens_via_refresh(client_id, \n client_secret, refresh_token):\n # Setup params\n params = {'client_id': client_id,\n 'client_secret': client_secret,\n 'refresh_token': refresh_token,\n 'grant_type': 'refresh_token'}\n url = \"https://api.imgur.com/oauth2/token\"\n\n # POST to server using requests\n r = requests.post(url, data=params)\n\n # Get JSON response and parse\n j = r.json()\n return j_handler.parse_user_token_json(j)", "def refresh_token(self, first_time=False):\n\n with REFRESH_CONDITION:\n request_data = {}\n if first_time:\n request_data['grant_type'] = 'authorization_code'\n request_data['code'] = self._refresh_token\n request_data['redirect_uri'] = CALLBACK_ENDPOINT\n request_data['client_id'] = self.client_id\n request_data['client_secret'] = self.client_secret\n r = requests.post(url=TOKEN_URL, data=request_data)\n else:\n request_data['grant_type'] = 'refresh_token'\n request_data['refresh_token'] = self._refresh_token\n client_id_secret = '{}:{}'.format(\n self.client_id, self.client_secret)\n headers = {'Authorization': 'Basic {}'.format(\n base64.b64encode(client_id_secret.encode()).decode())}\n r = requests.post(\n url=TOKEN_URL, headers=headers, data=request_data)\n\n r.raise_for_status()\n refresh_response = r.json()\n\n # logger.debug('refresh_token answer from server: %s',\n # refresh_response)\n\n if first_time:\n self._refresh_token = refresh_response.get(\n 'refresh_token', None)\n self._access_token = refresh_response.get('access_token', None)\n self._expires_in = refresh_response.get('expires_in', None)\n\n # logger.info('refresh token expires in: %s', self._expires_in)\n # logger.info('new access token %s', self._access_token)", "def _get_access_token(self) -> dict:\n demisto.debug('CDL - Fetching access token')\n try:\n oproxy_response = self._http_request('POST',\n '/cdl-token',\n json_data={'token': get_encrypted(self.refresh_token, self.enc_key)},\n timeout=(60 * 3, 60 * 3),\n retries=3,\n backoff_factor=10,\n status_list_to_retry=[400])\n except DemistoException as e:\n if re.match(BAD_REQUEST_REGEX, str(e)):\n demisto.error('The request to retrieve the access token has failed with 400 status code.')\n demisto.setIntegrationContext(self._cache_failure_times(demisto.getIntegrationContext()))\n raise e\n\n self.reset_failure_times()\n return oproxy_response", "def get_access_token(self, request) -> str or Exception:\n pass", "def access_token(self):\n return self._authentication.access_token", "def access_token(self):\n return self._authentication.access_token", "def _get_refresh_token(self):\n\n doc = minidom.Document()\n root = doc.createElement('appAuthorization')\n doc.appendChild(root)\n user = doc.createElement('username')\n user.appendChild(doc.createTextNode(self.username))\n root.appendChild(user)\n pw = doc.createElement('password')\n pw.appendChild(doc.createTextNode(self.password))\n root.appendChild(pw)\n application = doc.createElement('application')\n application.appendChild(doc.createTextNode(self.appId))\n root.appendChild(application)\n aki = doc.createElement('accessKeyId')\n aki.appendChild(doc.createTextNode(self.publicAccessKey))\n root.appendChild(aki)\n pak = doc.createElement('privateAccessKey')\n pak.appendChild(doc.createTextNode(self.privateAccessKey))\n root.appendChild(pak)\n data = doc.toprettyxml()\n\n resp = requests.post(BASE_URL + \"app-authorization\", data=data, headers=self._default_headers, verify=False)\n if resp.status_code >= 300:\n raise Exception(\"Failed to authorize app: {}\".format(resp))\n\n # Save off the refresh token\n self._refresh_token = resp.headers.get('Location', None)", "def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")", "def accessToken(self):\n if self.isExpired:\n self.refresh()\n\n return self._accessToken", "def refresh_token(self):\n now = timezone.now()\n limit = now - timedelta(days=20)\n # TODO: use expires_in from response data?\n print(self.token_refresh_date)\n print(limit)\n if self.token_refresh_date < limit:\n url = '{}refresh_access_token'.format(conf.INSTAGRAM_API)\n params = {\n 'grant_type': 'ig_refresh_token',\n 'access_token': self.token\n }\n response = requests.get(url, params=params)\n data = response.json()\n else:\n print('no need to get a fresch token yet')\n return\n if response.status_code == 200 and data:\n self.token = data.get('access_token')\n self.token_refresh_date = now\n self.token_ok = True\n self.save()\n elif settings.DEBUG:\n self.token_ok = False\n self.save()\n print('could not refresh token')\n return", "def get_auth_token():\n \n form_fields = {\n \"client_id\": client_id,\n \"client_secret\":client_secret,\n \"code\": code,\n \"redirect_uri\": \"http://www.stackprinter.com\"\n }\n form_data = urllib.urlencode(form_fields)\n results = __gae_fetch(url = 'https://stackexchange.com/oauth/access_token',\n method = urlfetch.POST, \n payload = form_data,\n headers={'Content-Type': 'application/x-www-form-urlencoded'})\n response = results.content\n return response", "def _refresh_access_token(self):\n url = self._get_url(subpath=\"auth\", route=\"refresh\")\n refresh_token = get_refresh_token()\n payload = {\"refresh_token\": refresh_token}\n response = self.session.post(url, json=payload)\n response.raise_for_status()\n access_token = response.json()[\"access_token\"]\n set_process_execution_user_token(access_token)\n self.session.headers[\"authorization\"] = f\"Bearer {access_token}\"", "def refresh_access_token(self):\n self._access_token = self.generate_access_token()", "def _get_access_token(self, url):\n if self.access_token:\n return self.access_token\n data = \"client_id=%s&client_secret=%s&grant_type=password&username=%s&password=%s&scope=write\" %\\\n (self.client_id, self.client_secret, self.username, self.password)\n\n parsed = urlparse(url)\n path = urlunparse(ParseResult(parsed.scheme, parsed.netloc, \"/oauth2/access_token\", None, None, None))\n\n auth_resp = urlopen(Request(path, data), timeout=10)\n if auth_resp.getcode() != 200:\n self.logger.error(\"Error with client credentials\")\n return self.access_token\n auth_resp_data = json.loads(auth_resp.read())\n\n if \"access_token\" in auth_resp_data:\n self.access_token = auth_resp_data[\"access_token\"]\n else:\n self.logger.error(\"Error with client credentials\")\n return self.access_token", "def access_token(config, token):\n response = call_api('post', 'oauth/access_token', config,\n params={'oauth_token': token['oauth_token']},\n data={'oauth_verifier': token['oauth_verifier']})\n return dict([(k, v[0]) for k,v in urlparse.parse_qs(response.text).items()])", "def retrieve_token():\n try:\n deserialized_message = json.loads(peek_app_token())\n\n expires_at = deserialized_message.get('expires_at')\n # Token is good, return it\n if expires_at and check_expired_time(expires_at):\n return deserialized_message.get('token')\n else: # Token expired, refresh it\n refresh_token()\n\n deserialized_message = peek_app_token()\n expires_at = deserialized_message.get('expires_at')\n # Token is good, return it\n try:\n assert(expires_at and check_expired_time(expires_at))\n return deserialized_message.get('token')\n except:\n raise # When all else fails\n\n except Exception as exc:\n log.error(f'Could not refresh token.\\n{exc}')\n traceback.print_exc(file=sys.stderr)\n\n return None", "def _refresh_token(self, client):\n\n url = self._url('token')\n client_data = self.clients[client]\n refresh_token = client_data['token']['refresh_token']\n data = {'grant_type': 'refresh_token',\n 'scope': 'PRODUCTION',\n 'refresh_token': refresh_token}\n consumer_key = client_data['response']['consumerKey']\n consumer_secret = client_data['response']['consumerSecret']\n auth = requests.auth.HTTPBasicAuth(consumer_key, consumer_secret)\n return self.POST(url, data=data, auth=auth)", "def get_token(client):\n # Begin by looking in token cache, first arg is for scopes,\n # because token is for app rather than user, second arg is None.\n result = client.acquire_token_silent(\n [\"https://graph.microsoft.com/.default\"], account=None\n )\n\n if not result:\n logger.info(\"No suitable token exists in cache. Get new one from Azure AD\")\n result = client.acquire_token_for_client(\n scopes=[\"https://graph.microsoft.com/.default\"]\n )\n\n # If we can't get access token, see what went wrong, otherwise return it.\n if \"access_token\" not in result:\n logger.exception(f'{result[\"error_description\"]} - {result[\"correlation_id\"]}')\n else:\n return result[\"access_token\"]", "def get_token(self, legs=2):\n if legs == 2:\n\n headers = {}\n\n headers.update({ 'Content-Type' : 'application/x-www-form-urlencoded' })\n\n data = {}\n\n data.update({'client_id' : self.clientId})\n data.update({'client_secret' : self.clientSecret})\n data.update({'grant_type' : 'client_credentials'})\n data.update({'scope' : self.scopes})\n\n resp = self.http.post(self.webAddress, headers=headers, data=data)\n\n if resp.status_code == 200:\n cont = resp.json()\n return (cont['access_token'], cont['expires_in'])\n\n raise ConnectionError(\"Request failed with code {}\".format(resp.status_code) +\n \" and message : {}\".format(resp.content) +\n \" during authentication.\")\n else:\n raise NotImplementedError(\"3-legged authentication has not been implemented.\")", "def _request_access_token(self):\n resp = requests.get(self.TOKEN_URL_FORMAT.format(\n self.creds().consumer_key(), self.creds().app_secret())\n )\n status = resp.status_code\n\n # If the token request fails, try to use the configured app id\n # and secret. This probably won't work, but the docs say that it\n # should. for more info, see:\n # https://developers.facebook.com/docs/facebook-login/access-tokens\n token = \"%s|%s\" % (self.creds().consumer_key(),\n self.creds().app_secret())\n if status == 200:\n token = resp.text.split('access_token=')[1]\n else:\n self.logger.error(\n \"Facebook token request failed with status %d\" % status\n )\n return token", "def access_token(self):\n social_auth = self.social_auth.get()\n return social_auth.tokens", "def get_access_token():\n\n account = get_account()\n\n account.EnsureCredentials(dbus_interface=GOA_ACCOUNT)\n access_token, _ = account.GetAccessToken(dbus_interface=GOA_ACCOUNT_OAUTH2)\n return str(access_token)", "def refresh_credentials():\n global auth_token\n auth_token = get_oauth_token()", "def create_access_token(oauth):\n #create parameters for API authorization\n\tredirect_uri = 'oob'\n\tparams = {'client_secret': oauth.client_secret,\n\t\t\t 'redirect_uri': redirect_uri,\n\t\t\t 'response_type': 'code'}\n\t#store the access code\n\turl = oauth.get_authorize_url(**params)\n\n\t#open a web browser to get access token and then store it via manual input\n\twebbrowser.open(url)\n\tcode = input('Enter code: ')\n\t#create credentials item\n\tstart_time = time.time()\n\t#create dictionary to hold credentials and store beginning time\n\tcredentials = {'token_time': start_time}\n\n\t#NEED TO ADD IN 'REFRESH TOKEN' FUNCTION HERE SOMEWHERE\n\t#\n\t\n\t#create parameters\n\tdata = {'code': code,\n\t\t\t'redirect_uri': redirect_uri,\n\t\t\t'grant_type': 'authorization_code'}\n\t#build the headers\n\theaders = oauth_headers(oauth)\n\t#create the raw access token\n\traw_access = oauth.get_raw_access_token(data=data, headers=headers)\n\t#parse the raw access token and add to credentials variable\n\tcredentials.update(access_parse(raw_access))\n\n\t#parse access token from credentials\n\taccess_token = credentials['access_token']\n\t#return access token\n\treturn access_token", "def step_impl(context):\n fields = {\n 'grant_type': 'refresh_token',\n 'refresh_token': context.oauth.refresh_token,\n 'scope': context.vendor_config['auth']['scope'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def _GetAccessToken(self):\n\n # Encoding client authorization \n pair = \"{client_key}:{client_secret}\".format(client_key=self.client_key, client_secret=self.client_secret)\n authorization = 'MUthRmpVa1JUaVlxbDVUTElUYVFnOlRENmpYMTdGbmhPSzNodWdqWUZqVDU0YzVjWGNQeko3'\n\n # Getting the access token\n access_token_headers = { \"Authorization\": \"Basic {authorization}\".format(authorization=authorization) }\n request_endpoint = \"/oauth/token?grant_type=authorization_code&code={code}&redirect_uri=https://80a3bb863001.ngrok.io\".format(code=self.code)\n print(request_endpoint)\n self.conn.request(\"POST\", request_endpoint, headers=access_token_headers)\n res = self.conn.getresponse()\n response = json.loads(res.read().decode(\"utf-8\"))\n\n try:\n return response[\"access_token\"]\n except KeyError:\n print(\"Request for access token failed for the following reason: {reason}\".format(reason=response[\"reason\"]))", "def Access(self):\n if datetime.now() < self.access_exp:\n pass\n elif datetime.now() > self.access_exp and datetime.now() < self.refresh_exp:\n grant = 'refresh_token'\n self._postRequest(grant=grant)\n elif datetime.now() > self.refresh_exp:\n grant = 'authorization_code'\n self._getURLcode()\n self._postRequest(grant=grant)", "def refresh_token(self):\n url = 'https://www.yikyak.com/api/auth/token/refresh'\n token = self._request('POST', url)\n self.session.headers.update({'x-access-token': token})", "def get_token(client_id, client_secret, username, password):\r\n try:\r\n if oauth2db.check_client(client_id, client_secret):\r\n if oauth2db.check_user(username, password):\r\n token, refresh = oauth2db.generate_token(client_id, username)\r\n res = { \"token\": token }\r\n except:\r\n res = { \"error\": \"\" }\r\n \r\n if 'token' in res:\r\n return res['token']\r\n else:\r\n return None", "def get_token(client_id, client_secret, token, renewal):\n url = \"https://www.strava.com/api/v3/oauth/token\"\n\n if renewal:\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'refresh_token': token,\n 'grant_type': 'refresh_token'}\n message = \"Successfully refreshed Strava token.\"\n else:\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'code': token,\n 'grant_type': 'authorization_code'}\n message = \"Successfully authenticated with Strava using access code.\"\n\n response = return_json(url, \"POST\", payload=payload)\n helper.log_info(message)\n return response", "def request_access_token(self, *args, **kwargs):\n response = super().request_access_token(*args, **kwargs)\n if \"access_token\" not in response:\n response[\"access_token\"] = response[\"id_token\"]\n return response", "def fetch_oauth_access_token(consumer_token, request_token):\n url = get_oauth_access_token_url(consumer_token, request_token)\n request = urllib2.urlopen(url)\n token = _oauth_parse_response(request.read())\n request.close()\n return token", "def token_refresh_handler(refresh):\n ser = TokenRefreshSerializer(data={'refresh': refresh})\n ser.is_valid(raise_exception=True)\n res = dict(refresh=ser.validated_data.get('refresh'),\n access=ser.validated_data.get('access')\n )\n return res", "def get_access_token(self, callback_uri, request_token):\n verifier = dict(urldecode(urlparse.urlparse(callback_uri).query))\n self.client.verifier = verifier.get('oauth_verifier')\n self.client.resource_owner_key = request_token.get('oauth_token')\n self.client.resource_owner_secret = request_token.get('oauth_token_secret')\n uri, headers, body = self.client.sign(self.access_token_url)\n response = requests.request(self.token_method, uri, headers=headers, data=body)\n self.client.verifier = None\n response.raise_for_status()\n token = dict(urldecode(response.text))\n self.set_token(token)\n return self.normalize_token_data(token)", "def get_access_token(self):\n signed_jwt = self.generate_jwt(os.path.join(FILE_DIR, KEYFILE))\n if signed_jwt is None:\n return False\n url = HOMEGRAPH_TOKEN_URL\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n data = 'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer&assertion=' + signed_jwt.decode(\n 'utf-8')\n\n r = requests.post(url, headers=headers, data=data)\n\n if r.status_code == requests.codes.ok:\n token_data = json.loads(r.text)\n self._access_token = token_data['access_token']\n return token_data['access_token']\n\n r.raise_for_status()\n return", "def test_access_token_get(self):\n client = oauth.Client(self.consumer, None)\n resp, content = client.request(self._uri('request_token'), \"GET\")\n\n self.assertEqual(int(resp['status']), 200)", "def get_refresh_token(email, password):\n body = {'grant_type': 'password',\n 'username': email,\n 'password': password,\n 'client_id': current_app.config['AUTH0_CLIENT_ID'],\n 'client_secret': current_app.config['AUTH0_CLIENT_SECRET'],\n 'audience': current_app.config['AUTH0_AUDIENCE'],\n 'scope': 'offline_access'\n }\n req = requests.post(\n current_app.config['AUTH0_BASE_URL'] + '/oauth/token',\n json=body\n )\n req.raise_for_status()\n return req.json()['refresh_token']", "def refresh():\n print(\"refresh request\")\n old_token = flask.request.get_data()\n new_token = guard.refresh_jwt_token(old_token)\n ret = {'access_token': new_token}\n return ret, 200", "def get_token(request):\n try:\n ft_session = request.session['ft_token']\n token = OAuthAccessToken.objects.get(session_key=ft_session)\n # invalidate any token > 24 hours old\n now = datetime.now()\n diff = now - token.created\n if diff.days:\n token.delete()\n return False\n # TODO check ip address matches\n #oauthorize\n return token\n except KeyError:\n print 'no session token..'\n except OAuthAccessToken.DoesNotExist:\n print 'no access token ...'\n return False", "def obtain_access_token():\n\tpost_data = {'grant_type': 'client_credentials',\n\t\t\t\t 'client_id': conos_config['client_id'],\n\t\t\t\t 'client_secret': conos_config['client_secret']}\n\n\ttry:\n\t\tresponse = requests.post(url=conos_config['sts_url'], data=post_data, timeout=60) # 60 seconds\n\t\tif response.ok:\n\t\t\treturn 'Bearer ' + response.json()['access_token']\n\t\telse:\n\t\t\tprint('\\nERROR: Can not obtain access token')\n\t\t\tprint('\\nResponse error: ', response.json())\n\t\t\tresponse.raise_for_status()\n\texcept requests.exceptions.RequestException as e:\n\t\t# All exceptions that Requests explicitly raises inherit from requests.exceptions.RequestException\n\t\tprint(\"Root cause: \", e)\n\t\tsys.exit(1)", "def refresh_token(self):\n # type: () -> Token\n token = self._request(\n self._client.refresh_token,\n self._token_endpoint,\n self.token[\"refresh_token\"],\n )\n self.set_token(token)\n return token", "def step_impl(context):\n fields = {\n 'grant_type': 'refresh_token',\n 'refresh_token': context.oauth.refresh_token,\n 'scope': context.vendor_config['versioned_auth']['scope'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)", "def get_access_token(self) -> Optional[Text]:\n return self.access_token", "def get_token(self):\n logging.debug(\"In the Token get_token() class method.\")\n\n if datetime.datetime.now() > self.token_expiry:\n logging.info(\"Token Expired.\")\n self.generate_tokens()\n return self.access_token", "async def solicit_token(url, scope):\n rc = RestClient(url, \"\")\n result = await rc.request(\"GET\", f\"/token?scope={scope}\")\n print(result[\"access\"])" ]
[ "0.8227711", "0.80836374", "0.79436207", "0.7837815", "0.7809161", "0.77488285", "0.7722438", "0.7707735", "0.7651722", "0.76375973", "0.76296955", "0.7606603", "0.7567231", "0.7512205", "0.74993056", "0.7455479", "0.7370668", "0.7347706", "0.7344518", "0.7326517", "0.7323279", "0.7312945", "0.73030126", "0.7234234", "0.721434", "0.7163277", "0.7154395", "0.7150042", "0.7144854", "0.7122201", "0.7116842", "0.7080227", "0.7073514", "0.7072968", "0.701259", "0.6990641", "0.6965409", "0.6956444", "0.6941075", "0.6938947", "0.6928595", "0.6905615", "0.6896261", "0.68844515", "0.6879967", "0.685805", "0.6849474", "0.6847235", "0.6843147", "0.6837833", "0.683291", "0.6830286", "0.68192875", "0.6806154", "0.67933226", "0.6788323", "0.6780807", "0.67800343", "0.67784995", "0.6772263", "0.6772263", "0.6766425", "0.67600316", "0.6738986", "0.6734957", "0.6708707", "0.6694361", "0.6683992", "0.6660967", "0.6657656", "0.6650583", "0.6641695", "0.66416055", "0.66388273", "0.6633644", "0.6633133", "0.6631507", "0.66236", "0.6607633", "0.66050714", "0.65682745", "0.6561273", "0.65594536", "0.65577614", "0.6557032", "0.65541136", "0.65502584", "0.6535416", "0.6527367", "0.6525135", "0.6522391", "0.6515247", "0.65091443", "0.65059704", "0.6504422", "0.64950275", "0.64943993", "0.64939034", "0.64924634", "0.6482428" ]
0.68015915
54
Query the dataselect service of the client.
def get_waveforms(self, network, station, location, channel, starttime, endtime, quality=None, minimumlength=None, longestonly=None, filename=None, attach_response=False, **kwargs): if "dataselect" not in self.services: msg = "The current client does not have a dataselect service." raise ValueError(msg) locs = locals() setup_query_dict('dataselect', locs, kwargs) # Special location handling. Convert empty strings to "--". if "location" in kwargs and not kwargs["location"]: kwargs["location"] = "--" url = self._create_url_from_parameters( "dataselect", DEFAULT_PARAMETERS['dataselect'], kwargs) # Gzip not worth it for MiniSEED and most likely disabled for this # route in any case. if not self._validate_jwt_token(): self._refresh_access_token() data_stream = self._download(url, use_gzip=False, use_jwt=self.jwt_access_token) data_stream.seek(0, 0) if filename: self._write_to_file_object(filename, data_stream) data_stream.close() else: st = obspy.read(data_stream, format="MSEED") data_stream.close() if attach_response: self._attach_responses(st) self._attach_dataselect_url_to_stream(st) st.trim(starttime, endtime) return st
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select(self):\n pass", "def select(self):\n pass", "def _run_select(self):\n return self._connection.select(\n self.to_sql(),\n self.get_bindings(),\n not self._use_write_connection\n )", "def select(self):\n return", "def query(self) -> None:\n raise NotImplementedError()", "def select(self):\r\n pass", "def get_data(self, key, **kwargs):\n with self.read():\n return self.handle.select(key, **kwargs)", "def select(self, query):\n\n if query.isId():\n # simple\n url = '%s/%s/%i' % (self.uri, query.table(), query._where[0].value)\n else:\n # real query\n url = '%s/%s/filter?%s' % (self.uri, query.table(), query.encode())\n data, resp = self.execute(method='GET', url=url, decode=True)\n return data", "def query(self):\r\n raise NotImplementedError", "def select(self, db):\n self.connect()\n self._write('SELECT %s\\r\\n' % db)\n return self._get_simple_response()", "async def queried(self, value=None):\n pass", "def select_data_item(self, data_items):\n raise NotImplementedError", "def select(collectionID,userID,timeIntervalStart=None,timeIntervalEnd=None,inputData=[]):\n\n\treturn pull(\n\t\tcollectionID=collectionID,\n\t\tuserID=userID,\n\t\ttimeIntervalStart=timeIntervalStart,\n\t\ttimeIntervalEnd=timeIntervalEnd\n\t)", "async def test_run_select_service(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n cmd_topic = \"test/select/set\"\n state_topic = \"test/select\"\n\n mqtt_mock = await mqtt_mock_entry()\n\n async_fire_mqtt_message(hass, state_topic, \"beer\")\n state = hass.states.get(\"select.test_select\")\n assert state.state == \"beer\"\n\n await hass.services.async_call(\n SELECT_DOMAIN,\n SERVICE_SELECT_OPTION,\n {ATTR_ENTITY_ID: \"select.test_select\", ATTR_OPTION: \"milk\"},\n blocking=True,\n )\n mqtt_mock.async_publish.assert_called_once_with(cmd_topic, \"milk\", 0, False)\n state = hass.states.get(\"select.test_select\")\n assert state.state == \"beer\"", "def select(cls, *flst):\n cls.runtime.set_select(flst)\n return SelectQuery(cls.runtime)", "def query(self):\n pass", "def selectOpt(self, sql): # select\n # apply connection rescource\n dbp_opt = dbPool()\n results = dbp_opt.opSelect(sql)\n # release connection rescource\n dbp_opt.dispose()\n return results", "def select(self, domain_or_name, query='', next_token=None,\r\n consistent_read=False):\r\n domain, domain_name = self.get_domain_and_name(domain_or_name)\r\n params = {'SelectExpression' : query}\r\n if consistent_read:\r\n params['ConsistentRead'] = 'true'\r\n if next_token:\r\n params['NextToken'] = next_token\r\n try:\r\n return self.get_list('Select', params, [('Item', self.item_cls)],\r\n parent=domain)\r\n except SDBResponseError, e:\r\n e.body = \"Query: %s\\n%s\" % (query, e.body)\r\n raise e", "def query(self):\r\n self.reportDrivers()", "def query(self, **kwargs):", "def query(self):\n self._measurements[self.KEY_USAGE].df = self.fetch_data_usage()", "def _select_datagrams(self, params):", "def dbselect(cxn, query, payload):\n\tcursor = cxn.cursor()\n\tif not payload:\n\t\trows = cursor.execute(query)\n\telse:\n\t\trows = cursor.execute(query,payload)\n\tresults = []\n\tfor row in rows:\n\t\tresults.append(row)\n\tcursor.close()\n\treturn results", "def query(self):", "def select(self, domain_or_name, query='', next_token=None,\n consistent_read=False):\n domain, domain_name = self.get_domain_and_name(domain_or_name)\n params = {'SelectExpression': query}\n if consistent_read:\n params['ConsistentRead'] = 'true'\n if next_token:\n params['NextToken'] = next_token\n try:\n return self.get_list('Select', params, [('Item', self.item_cls)],\n parent=domain)\n except SDBResponseError as e:\n e.body = \"Query: %s\\n%s\" % (query, e.body)\n raise e", "async def get(self):\r\n try:\r\n query = System.select().execute()\r\n table = []\r\n for facility in query:\r\n table.append(facility)\r\n return web.Response(body=str(table), status=200)\r\n except Exception as ex:\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(error_message), status=404)", "def show_db_combobox(self):\n self.database_chosen[\"values\"] = self.sql_database.show_database()\n if len(self.database_chosen[\"values\"]) > 0:\n self.database_chosen.current(0)", "def sel(\n self,\n **kwargs,\n ) -> \"Dataset\":\n res = [da.sel(**kwargs) for da in self]\n return Dataset(data=res, validate=False)", "async def test_run_select_service_optimistic(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n fake_state = State(\"select.test_select\", \"milk\")\n mock_restore_cache(hass, (fake_state,))\n\n mqtt_mock = await mqtt_mock_entry()\n\n state = hass.states.get(\"select.test_select\")\n assert state.state == \"milk\"\n assert state.attributes.get(ATTR_ASSUMED_STATE)\n\n await hass.services.async_call(\n SELECT_DOMAIN,\n SERVICE_SELECT_OPTION,\n {ATTR_ENTITY_ID: \"select.test_select\", ATTR_OPTION: \"beer\"},\n blocking=True,\n )\n\n mqtt_mock.async_publish.assert_called_once_with(\"test/select_cmd\", \"beer\", 0, False)\n mqtt_mock.async_publish.reset_mock()\n state = hass.states.get(\"select.test_select\")\n assert state.state == \"beer\"", "def salesforce_query(self, obj_name, **kwargs):\n query = \"SELECT \"\n if \"select\" in kwargs:\n query += kwargs[\"select\"]\n else:\n query += \"Id\"\n query += \" FROM {}\".format(obj_name)\n where = []\n for key, value in kwargs.items():\n if key == \"select\":\n continue\n where.append(\"{} = '{}'\".format(key, value))\n if where:\n query += \" WHERE \" + \" AND \".join(where)\n self.builtin.log(\"Running SOQL Query: {}\".format(query))\n return self.cumulusci.sf.query_all(query).get(\"records\", [])", "def select(self, location, name):\n if self._transfer is not None:\n return\n\n self._client.proxy.Select(location, name)", "def get_data(self, user, password, table):\n self.my_connect = SetData.GetData(self.host, self.database, self.charset)\n self.my_connect.connect(user, password)\n self.my_connect.select(\"SELECT * FROM {}\".format(table))\n self.result = self.my_connect.result", "def query(self, bytes_gen: Iterator[bytes] = None, **kwargs):\n self._call_client(bytes_gen, mode='query', **kwargs)", "def query(self, dataset):\n\n host = self.options.host\n debug = self.options.verbose\n idx = self.options.idx\n limit = self.options.limit\n \n def check(ds):\n query = 'dataset=%s' % ds\n result = Das.get_data(host, query, idx, limit, debug)\n result = result.replace('null','None')\n result = result.replace('true','True')\n result = result.replace('false','False')\n data = eval(result)\n if data['status'] != 'ok':\n raise Exception(\"Das query failed: Output is '%s'\" % data)\n return (data['data'],data)\n\n data = None\n exists = False\n \n if self.options.name is None:\n #guess the dataset name in DBS\n tokens = [t for t in dataset.split(os.sep) if t]\n if len(tokens) >= 3:\n #DBS names always have three entries\n ds = os.sep + os.sep.join(tokens[0:3])\n if ds:\n exists, data = check(ds)\n self.options.name = ds\n else:\n exists, data = check(self.options.name)\n if not exists:\n raise Exception(\"Specified dataset '%s' not found in Das. Please check.\" % self.options.name)\n \n if data is None:\n raise Exception(\"Dataset '%s' not found in Das. Please check.\" % self.dataset)\n return data", "def query(self, qpath):\n return data.Query(self, qpath)", "def get_clients():\n data = DataTable(document=ClientDocument, schema=ClientSchema).get_data\n return Response(data=data).send()", "def select_request_client_id_access_item(self):\n self.driver.click(\"request_client_advertising_id_access_btn\")", "def selectData(self, sql: str) -> List:\n try:\n connection = self.connect()\n cursor = connection.cursor() \n data = cursor.execute(sql)\n result = data.fetchall() \n return result\n except Exception as e:\n logging.error(f'{self.cn} Exception: {e}', exc_info=1)\n logging.error(f'{self.cn} SQL: {sql}')", "def fetch_data(self):\n if not self.json_query:\n self.generate_json_query()\n\n response = search_graphql(self.json_query)\n\n if \"errors\" in response:\n print(\"ERROR encountered in fetch_data().\")\n for error in response['errors']:\n print(error['message'])\n\n return\n\n self.response = response\n\n if len(self.response['data'][self.data_type.value]) != len(self.id):\n print(\"WARNING: one or more IDs not found in the PDB.\")", "def _run_query(self):", "def query(self, query, request_type=None):\n\n #encode to UTF-8\n try: query = query.encode(\"utf-8\")\n except: query = query.decode('raw_unicode_escape').encode(\"utf-8\")\n\n lowercase_query = query.lower()\n if lowercase_query.startswith(\"select\") or \\\n lowercase_query.startswith(\"describe\") or \\\n lowercase_query.startswith(\"show\") or \\\n request_type==\"GET\":\n\n return self._get(urllib.urlencode({'sql': query}))\n\n else:\n return self._post(urllib.urlencode({'sql': query}))", "def _select(self):\r\n readable = [self.socket.handle.fileno(), self._read.fileno()]\r\n writable = []\r\n for i, connection in self.clients.items():\r\n if connection.is_readable():\r\n readable.append(connection.fileno())\r\n if connection.is_writeable():\r\n writable.append(connection.fileno())\r\n if connection.is_closed():\r\n del self.clients[i]\r\n return select.select(readable, writable, readable)", "def query(self, q, **kwargs):\n return self._client.query(self._db_name, q, **kwargs)", "def data_source(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"data_source\"), kwargs)", "def data_source(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"data_source\"), kwargs)", "def data_source(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"data_source\"), kwargs)", "def client_request(self, evt):\n threads.deferToThread(self.cli_db.accept, evt)", "def select_query(self):\n query = db.select([self.tables])\n print(query)\n ResultProxy = self.connection.execute(query)\n ResultSet = ResultProxy.fetchall()\n return ResultSet", "def query(self) -> dict:\n raise NotImplementedError()", "def query(self,query):\n if self.data is not None:\n qData = cPickle.loads(query)\n results = self.handleQuery(qData)\n qResults = cPickle.dumps(results)\n else:\n results = None\n qResults = cPickle.dumps(results)\n return qResults", "def send_select(self, query):\n # TODO: Add exception handling from bad queries\n\n # We use regex to find table name and remove and ':' from the name\n select_pattern = r\"SELECT .+ FROM ([\\w_:]+)\"\n match = re.search(select_pattern, query, re.IGNORECASE)\n table_name = match.group(1)\n if \":\" in table_name:\n query = self.clean_table_name(query, table_name)\n self.send_sql_query2(query)\n return self.cursor.fetchall()", "def service(self):\n self.serviceConnects()\n self.serviceQueries()", "def fetch_data():\n data.fetch_data()\n data.start_updating()", "def _on_select(self, object):\n pass", "def fetch_data(self):", "def query_fetch(self, **kwargs):\n return iterate_with_exp_backoff(self._client.query(**kwargs).fetch())", "def _query_implementation(cls, cb, **kwargs):\n return USBDeviceQuery(cls, cb)", "def get_queryset(self):\n return Data.objects.all()", "async def test_run_select_service_with_command_template(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n cmd_topic = \"test/select/set\"\n state_topic = \"test/select\"\n\n mqtt_mock = await mqtt_mock_entry()\n\n async_fire_mqtt_message(hass, state_topic, \"beer\")\n state = hass.states.get(\"select.test_select\")\n assert state.state == \"beer\"\n\n await hass.services.async_call(\n SELECT_DOMAIN,\n SERVICE_SELECT_OPTION,\n {ATTR_ENTITY_ID: \"select.test_select\", ATTR_OPTION: \"milk\"},\n blocking=True,\n )\n mqtt_mock.async_publish.assert_called_once_with(\n cmd_topic, '{\"option\": \"milk\"}', 0, False\n )", "def _selected_data(self):\n for items in self.ui.data_list.selectedItems():\n yield self._data[str(items.text())]", "def select_server(self):\n pass", "def ng_query(self, request, *args, **kwargs):\r\n return self.build_json_response(self.get_queryset())", "def _select_device(call: ServiceCall) -> None:\n if not (addr := call.data[ATTR_DEVICE]):\n _LOGGER.error(\"Device not found: %s\", call.data[ATTR_DEVICE])\n return\n if addr in device_aliases:\n addr = device_aliases[addr]\n else:\n entity = hass.states.get(addr)\n _LOGGER.debug(\"Selecting entity %s\", entity)\n if entity is not None:\n addr = entity.attributes[\"physical_address\"]\n _LOGGER.debug(\"Address acquired: %s\", addr)\n if addr is None:\n _LOGGER.error(\n \"Device %s has not physical address\", call.data[ATTR_DEVICE]\n )\n return\n if not isinstance(addr, (PhysicalAddress,)):\n addr = PhysicalAddress(addr)\n hdmi_network.active_source(addr)\n _LOGGER.info(\"Selected %s (%s)\", call.data[ATTR_DEVICE], addr)", "def select(self, host, port, db, password=None):\r\n self.connection = self.get_connection(host, port, db, password)", "def run_select_examples():\n table = \"actors\"\n select_fields = ['name', 'last_name', 'country']\n select_conds1 = {}\n select_conds2 = {'id': 3}\n select_conds3 = {'id': 3, 'name': \"Matt\"}\n print querify.select_from_dict(table, select_fields)\n print querify.select_from_dict(table, select_fields, select_conds1)\n print querify.select_from_dict(table, select_fields, select_conds2)\n print querify.select_from_dict(table, select_fields, select_conds3)", "def get_data(self, query):\n result = input(\"{}: \".format(query))\n return result", "async def test_run_select_service_optimistic_with_command_template(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n fake_state = State(\"select.test_select\", \"milk\")\n mock_restore_cache(hass, (fake_state,))\n\n mqtt_mock = await mqtt_mock_entry()\n\n state = hass.states.get(\"select.test_select\")\n assert state.state == \"milk\"\n assert state.attributes.get(ATTR_ASSUMED_STATE)\n\n await hass.services.async_call(\n SELECT_DOMAIN,\n SERVICE_SELECT_OPTION,\n {ATTR_ENTITY_ID: \"select.test_select\", ATTR_OPTION: \"beer\"},\n blocking=True,\n )\n\n mqtt_mock.async_publish.assert_called_once_with(\n \"test/select_cmd\", '{\"option\": \"beer\"}', 0, False\n )\n mqtt_mock.async_publish.reset_mock()\n state = hass.states.get(\"select.test_select\")\n assert state.state == \"beer\"", "def query(self, query):", "def select_client_id_availability_item(self):\n self.driver.click(\"get_client_advertising_id_availability_btn\")", "def data_from_ucr_query(self):\n raise NotImplementedError", "def _get_data_selection(self, event):\n data = None\n # selection = event.GetSelection()\n id, _, _ = self.FindFocus().GetSelection().GetData()\n data_list, theory_list = \\\n self.parent.get_data_manager().get_by_id(id_list=[id])\n if data_list:\n data = data_list.values()[0]\n if data is None:\n data = theory_list.values()[0][0]\n return data", "def _run_async_query(self, context):\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n self._total_results = len(results)\n self._count_valid = True\n return [self._doc_class(self._cb, item[\"id\"], item) for item in results]", "def select_from_bd(self, query):\n try:\n self.cursor.fetchall()\n except psycopg2.ProgrammingError:\n pass\n self.cursor.execute(query)\n return self", "def test_do_select(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n _function = DUT.do_select(1)\r\n\r\n assert isinstance(_function, RAMSTKFunction)\r\n assert _function.function_id == 1\r\n assert _function.availability_logistics == 1.0", "def test_execution_select_success(self, pool):\n command = stellr.SelectCommand(TEST_HTTP, name='test select')\n self.assertEquals(command.pool, pool)\n response = self._create_execution_mocks(pool, 200)\n\n command.add_param('fq', 'field:filter')\n data = command.execute()\n\n # check the mock\n hdrs = {'connection': 'keep-alive',\n 'content-type': ('application/x-www-form-urlencoded; '\n 'charset=utf-8')}\n pool.urlopen.assert_called_once_with('GET',\n 'http://localhost:8983/solr/select?wt=json&fq=field%3Afilter',\n body=None, headers=hdrs, timeout=15,\n assert_same_host=False)\n\n self.assertEqual(len(data), 2)\n self.assertEqual(data['key'], 'value')\n self.assertEqual(data['number'], 42)\n\n # verify name is returned\n data, name = command.execute(return_name=True)\n self.assertEqual(name, 'test select')\n self.assertEqual(len(data), 2)\n self.assertEqual(data['key'], 'value')\n self.assertEqual(data['number'], 42)", "def test_request_do_select(test_dao, test_configuration):\r\n DUT = dtcFunction(test_dao, test_configuration, test=True)\r\n DUT.request_do_select_all(revision_id=1)\r\n\r\n _function = DUT.request_do_select(1)\r\n\r\n assert isinstance(_function, RAMSTKFunction)", "def run_select_query(query, args = None):\n cursor = db.get_cursor()\n cursor.execute(query, args)\n return cursor.fetchall()", "def get(self):\n self.set_action(\"select\")\n result = self.connection.query(self.to_qmark(), self._bindings)\n relations = self.eager_load_model(result)\n return self.owner.new_collection(result).map_into(\n self.owner, \"hydrate\", relations=relations\n )", "def query_all(self):\n return multisearch.queries.QueryAll().connect(self)", "def query_sel(self, job, filters, options):\n results = []\n job.set_progress(50, 'Enumerating extended event log')\n cp = run(['ipmitool', '-c', 'sel', 'elist'], capture_output=True) # this is slowwww\n if cp.returncode == 0 and cp.stdout:\n job.set_progress(95, 'Parsing extended event log')\n for record in parse_ipmitool_output(cp.stdout.decode()):\n results.append(record._asdict())\n job.set_progress(100, 'Parsing extended event log complete')\n\n return filter_list(results, filters, options)", "def select(message, items=[], timeout=0, buttons=DIALOGBUTTON_OK):\n box = Combobox(__name__, message, buttons)\n box.timeout = timeout\n box.items = items\n return _retcode2bool(box.show())", "def do_select(self, line):\n # Available data sources\n options = \"-csv\", \"-db\"\n args = list (arg.lower () for arg in str (line).split ())\n\n try:\n # Check if the input data source is available in this program or not\n if args[0] not in options:\n raise ValueError (\"The data resource is not available.\")\n else:\n # Code for initialise CSV data source\n if args[0] == \"-csv\":\n try:\n if len (args) == 1:\n self._shw.select_source (args[0][1:], \"employeeinfo.csv\")\n View.warning (\n \"No CSV file path specified. A default file \\\"employeeinfo.csv\\\" will be used.\")\n elif len (args) == 2:\n self._shw.select_source (args[0][1:], args[1])\n elif len (args) == 3:\n if args[1] == \"-a\":\n self._shw.select_source (args[0][1:], args[2], True)\n except (CSVError, OSError) as e:\n View.error (e)\n except Exception as e:\n View.error (e)\n else:\n View.success (\"Data source CSV is selected.\")\n\n # Code for initialise database source\n elif args[0] == \"-db\":\n try:\n self._shw.select_source (args[0][1:])\n except (ConnectionError, TypeError) as e:\n View.error (e)\n except Exception as e:\n View.error (e)\n else:\n View.success (\"Data source Database is selected.\")\n\n # Code for initialise XXXX data source\n else:\n pass\n # Catch and display error message\n except ValueError as e:\n View.error (str (e) + \"\\n\")\n View.help_select ()\n except Exception as e:\n View.error (e)", "def __selectMS(self):\n \n if self._msTool is None:\n self._msTool = mstool()\n self._msTool.open(self._arg['vis']) \n else:\n self._msTool.reset()\n \n # It returns a dictionary if there was any selection otherwise None\n self.__selectionFilter = self.__getSelectionFilter()\n\n if self.__selectionFilter is not None:\n self._msTool.msselect(self.__selectionFilter)", "def select(self):\n if not self._selected:\n \tself._selected = True\n\t\tself.log(\"device {} is now selected\".format(self._secondary_address))", "def api_query(self, **kwargs):\n with self._api_lock:\n return self._api_query(kwargs)", "def test_select():\n assert_that(users.select(), all_of(\n instance_of(SelectQuery),\n has_properties({\n 'collection': users,\n 'model': User,\n\n 'state': has_entries({\n 'properties': None\n })\n })\n ))", "def select(*args):", "def select(*args):", "def select(self, query='', next_token=None, consistent_read=False, max_items=None):\r\n return SelectResultSet(self, query, max_items=max_items, next_token=next_token,\r\n consistent_read=consistent_read)", "def select(self, aid: bytes) -> bool:\n ...", "def dataset_choice(self):\n # while running:\n\n # select the dataset file for this cycle\n dataset = self.which_dataset()\n # print('A2. dataset = ', dataset)\n\n # send to list making function\n self.data_list = self.dataparsing(dataset)\n\n # how long to read a dataset file for this cycle\n dataset_choice_dur = (random.randrange(6000, 26000) / 1000) * self.glob_speed\n if self.debug_choose:\n print(f'A4 dataset choice duration = {dataset_choice_dur} seconds')\n\n # wait for this process to timeout 6-26 seconds\n # time.sleep(dataset_choice_dur)\n for _ in range (int(dataset_choice_dur) * 100):\n if config.affect_interrupt:\n continue\n else:\n time.sleep(0.01)", "def get_data(self):\n self._send_command(self._adapter.get_data())", "def _attach_dataselect_url_to_stream(self, st):\n url = self._build_url(\"dataselect\", \"query\")\n for tr in st:\n tr.stats._fdsnws_dataselect_url = url", "def query(self, uri, projection, selection, selectionArgs, sortOrder=None, cancellationSignal=None):\n pass", "def execselect(self, sql, vals=()):\n self.conn.ping()\n c = self.conn.cursor()\n c.execute(sql, vals)\n return c.fetchone()", "def curr_selection(self):\n\n self.domain = self.row[0]\n abstract = self.row[5]\n self.data_type = self.row[1]\n self.object_id = self.row[3]\n self.service = self.row[2]\n self.layer_title = self.row[4]\n crs_options = self.row[6]\n self.dlg.uCRSCombo.clear()\n if self.data_type != \"table\":\n self.dlg.uCRSCombo.addItems(crs_options)\n curr_crs = self.map_crs()\n if curr_crs in crs_options:\n idx = self.dlg.uCRSCombo.findText(curr_crs)\n self.dlg.uCRSCombo.setCurrentIndex(idx)\n self.dlg.uTextDescription.setText(abstract)", "def _fetch_data(self):\n pass", "def test_zmq_execution_select_success(self, pool):\n s, c = self._create_zmq_execution_mocks(pool)\n command = stellr.SelectCommand(TEST_ZMQ)\n command.add_param('fq', 'field:filter')\n data = command.execute()\n\n # check the mocks\n s.send.assert_called_once_with('/select?wt=json&fq=field%3Afilter')\n\n self.assertEqual(len(data), 2)\n self.assertEqual(data['responseHeader']['status'], 0)\n\n # verify name is returned\n data, name = command.execute(return_name=True)\n self.assertEqual(name, 'select')", "def request_meteodata(request: str):\n import MySQLdb\n import platform\n if platform.system() == \"Windows\":\n MySQLParams = {\n 'host' : \"192.168.5.1\",\n 'user' : \"MeteoRobot\",\n 'passwd': \"robot\",\n 'db' : \"MeteoData\"\n }\n else:\n MySQLParams = {\n 'host' : \"localhost\",\n 'user' : \"MeteoRobot\",\n 'passwd': \"robot\",\n 'db' : \"MeteoData\"\n }\n try:\n con = MySQLdb.connect(**MySQLParams)\n cur = con.cursor()\n cur.execute(request)\n con.commit()\n data = cur.fetchall()\n except MySQLdb.Error as err:\n print(str(err))\n return []\n except Exception as err:\n print(str(err))\n return []\n con.close()\n return data", "def select(self,\n query_dict,\n groups=False,\n facets=False,\n stats=False,\n **kwargs\n ):\n\n if kwargs:\n query_dict.update(kwargs)\n\n response = self.client.post(\n self._get_collection_url('select'),\n body=json.dumps({'params': query_dict})\n )\n\n data = {}\n if groups and 'grouped' in response:\n data['groups'] = response['grouped']\n\n if facets and 'facet_counts' in response:\n data['facets'] = response['facet_counts']\n\n if stats and 'stats' in response:\n data['stats'] = response['stats']\n\n if 'response' in response and 'docs' in response['response']:\n response_data = response['response']\n data['docs'] = response_data['docs']\n data['total'] = response_data.get('numFound', len(data['docs']))\n\n return data", "def handleTableSelectionChange(self):\n self.selectEntireRow()\n self.showSelectedDataset()" ]
[ "0.5689811", "0.5689811", "0.5676971", "0.56546783", "0.56363803", "0.56242454", "0.5599584", "0.5577287", "0.5566766", "0.55223155", "0.54716337", "0.54475677", "0.5439183", "0.543774", "0.5430872", "0.54283375", "0.5352924", "0.53037834", "0.52550447", "0.524659", "0.5246172", "0.5243796", "0.52417547", "0.5231555", "0.52282244", "0.52021986", "0.51863647", "0.51747555", "0.51535374", "0.512447", "0.5121683", "0.5119774", "0.50977665", "0.5094958", "0.5024197", "0.501067", "0.50055116", "0.4994488", "0.4991824", "0.49793518", "0.49736166", "0.49601462", "0.49369273", "0.4936678", "0.4936678", "0.4936678", "0.492645", "0.49243614", "0.4903995", "0.4902447", "0.49022228", "0.48996377", "0.48966423", "0.48904628", "0.48812288", "0.48782414", "0.4878171", "0.4876643", "0.48713565", "0.48695913", "0.48613948", "0.48550677", "0.48516086", "0.48509768", "0.4850071", "0.48430327", "0.48415422", "0.48392934", "0.48366734", "0.4825383", "0.4824073", "0.48207796", "0.48147842", "0.48136532", "0.480719", "0.4786828", "0.47866845", "0.47866148", "0.47731486", "0.47729126", "0.47622165", "0.4761782", "0.47603893", "0.47598356", "0.47476038", "0.4744264", "0.47438803", "0.47438803", "0.47432128", "0.47374907", "0.47355357", "0.4733988", "0.47335863", "0.47306538", "0.47290838", "0.4720104", "0.47179216", "0.47092524", "0.47091207", "0.4709069", "0.47061813" ]
0.0
-1
Helper method to fetch response via get_stations() and attach it to each trace in stream.
def _attach_responses(self, st): netids = {} for tr in st: if tr.id not in netids: netids[tr.id] = (tr.stats.starttime, tr.stats.endtime) continue netids[tr.id] = ( min(tr.stats.starttime, netids[tr.id][0]), max(tr.stats.endtime, netids[tr.id][1])) inventories = [] for key, value in netids.items(): net, sta, loc, chan = key.split(".") starttime, endtime = value try: inventories.append(self.get_stations( network=net, station=sta, location=loc, channel=chan, starttime=starttime, endtime=endtime, level="response")) except Exception as e: warnings.warn(str(e)) st.attach_response(inventories)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_stations(response: Response,\n source: StationSourceEnum = StationSourceEnum.UNSPECIFIED):\n try:\n logger.info('/stations/')\n\n weather_stations = await get_stations_as_geojson(source)\n response.headers[\"Cache-Control\"] = no_cache\n\n return WeatherStationsResponse(features=weather_stations)\n except Exception as exception:\n logger.critical(exception, exc_info=True)\n raise", "def stations():\n print(\"server received request for stations data...\")\n return jsonify(stations_data)", "async def get_detailed_stations(response: Response,\n toi: datetime = None,\n source: StationSourceEnum = StationSourceEnum.WILDFIRE_ONE,\n __=Depends(audit),\n _=Depends(authentication_required)):\n try:\n logger.info('/stations/details/')\n response.headers[\"Cache-Control\"] = no_cache\n if toi is None:\n # NOTE: Don't be tempted to move this into the function definition. It's not possible\n # to mock a function if it's part of the function definition, and will cause\n # tests to fail.\n toi = get_utc_now()\n else:\n toi = get_hour_20(toi)\n weather_stations = await fetch_detailed_stations_as_geojson(toi, source)\n return DetailedWeatherStationsResponse(features=weather_stations)\n\n except Exception as exception:\n logger.critical(exception, exc_info=True)\n raise", "def get_stations():\n response = requests.get('https://api.hh.ru/metro/160')\n todos = json.loads(response.text)\n colors = {'CD0505': 'red'}\n all_stations_one_line = []\n\n for i in todos['lines']:\n all_stations_one_line = []\n\n for j in i['stations']:\n one_station = station.station()\n one_station.set_name(j['name'])\n one_station.set_color(colors.get(i['hex_color']))\n one_station.set_lat(j['lat'])\n one_station.set_lng(j['lng'])\n all_stations_one_line.append(one_station)\n return all_stations_one_line", "async def _fetch_raw_stations(session: ClientSession, headers: dict, query_builder: BuildQuery) -> dict:\n # We don't know how many pages until our first call - so we assume one page to start with.\n total_pages = 1\n page_count = 0\n while page_count < total_pages:\n # Build up the request URL.\n url, params = query_builder.query(page_count)\n LOGGER.debug('loading station page %d...', page_count)\n async with session.get(url, headers=headers, params=params) as response:\n station_json = await response.json()\n LOGGER.debug('done loading station page %d.', page_count)\n # Update the total page count.\n total_pages = station_json['page']['totalPages']\n for station in station_json['_embedded']['stations']:\n yield station\n # Keep track of our page count.\n page_count = page_count + 1", "def stations(self):\n try:\n stations_api = requests.get(self._stations_url)\n stations = {}\n for station in stations_api.json():\n station_id = station['id']\n station_name = station['name']\n stations[station_id] = station_name\n\n return stations\n except (RequestException, KeyError) as exc:\n LOG.error('could not read from api: %s', exc)\n raise SlfError('could not read from api: %s' % exc) from None", "def stations():\n\n return station_list", "def lineup_xml() -> Response:\n watch = \"watch_direct\" if config.direct else \"watch\"\n xml = render_template('lineup.xml',\n stations=locast_service.get_stations(),\n url_base=host_and_port,\n watch=watch).encode(\"utf-8\")\n return Response(xml, mimetype='text/xml')", "def epg() -> Response:\n return jsonify(locast_service.get_stations())", "def prep_stations(url):\n stations = []\n _stations = requests.get(url).json()\n\n for _station in _stations['stationBeanList']:\n if _station['statusKey'] == 1:\n stations.append([_station['stationName'], _station['id'],\n _station['availableDocks'], _station['totalDocks'],\n _station['latitude'], _station['longitude']])\n\n return stations", "def view_station(request,station_id):\n station_url = settings.SODOR_ENDPOINT + 'station/' + str(int(station_id)) + '.json'\n context = {}\n try:\n station_data = client.load(station_url)\n except KeyError:\n return HttpResponseNotFound('Station not found')\n\n context['station'] = station_data.content\n\n # check children callsigns\n # do NOT assume flagship is (all) that we want - that is a bad assumption\n # e.g. WFSU has two children callsigns\n flagship_obj = station_data.related('flagship')\n flagship_callsign = flagship_obj.content.callsign\n children_callsigns = station_data.related('children')\n\n feeds = []\n callsigns = []\n context['callsign'] = flagship_callsign\n context['callsigns'] = []\n updated_callsigns = []\n\n for callsign_obj in children_callsigns.items():\n \"\"\"iterate thru callsigns\"\"\"\n if callsign_obj.content.callsign == flagship_callsign:\n callsign_obj.is_flagship = 'True'\n else:\n callsign_obj.is_flagship = None\n\n updated_callsigns.append(callsign_obj)\n callsigns.append(callsign_obj.content.callsign)\n\n children_feeds = callsign_obj.related('children')\n\n if children_feeds.self:\n for feed in children_feeds.items():\n feed_obj = {}\n # over the air channel\n # aka subchannel\n ota_channel = feed.related('summary').content\n feed_obj['ota_channel'] = ota_channel\n if callsign_obj.content.callsign == flagship_callsign:\n feed_obj['is_callsign'] = 'True'\n else:\n feed_obj['is_callsign'] = None\n feeds.append(feed_obj)\n\n feeds_by_flagship = sorted(feeds, key=itemgetter('is_callsign'),\n reverse=True)\n callsigns_by_flagship = sorted(updated_callsigns,\n key=attrgetter('is_flagship'), reverse=True)\n context['feeds'] = feeds_by_flagship\n context['callsigns'] = callsigns_by_flagship\n context = render_todays_listings(request, context, callsigns)\n\n return render_to_response(\n 'view_station.html',\n context,\n context_instance = RequestContext(request)\n )", "def get_events_stations(\n fname_all_geoNet_stats=None,\n loc_all_geoNet_stats=None,\n loc_Vol1=\"/\".join([os.getcwd(), \"Vol1\"]),\n save_stats=False,\n fname=None,\n loc=os.getcwd(),\n):\n\n all_geoNet_stats = read_statsll(loc_all_geoNet_stats, fname_all_geoNet_stats)\n\n event_stats_V1A = glob(\"/\".join([loc_Vol1, \"data\", \"*.V1A\"]))\n event_stats_V1A = [os.path.basename(_) for _ in event_stats_V1A]\n\n event_stats = {}\n for V1A_file in event_stats_V1A:\n # year, event_id, stat_code, V1A = V1A_file.split(\".\")[0].split(\"_\")\n split_file_name = V1A_file.split(\".\")[0].split(\"_\")\n year, event_id, stat_code = split_file_name[0:3]\n event_stats[stat_code] = (None, None)\n if all_geoNet_stats.has_key(stat_code):\n event_stats[stat_code] = all_geoNet_stats[stat_code]\n\n if save_stats == True:\n # assert fname is not None, \"Specify name of station file to save\"\n # assert loc is not None, \"Specify location for station file to save\"\n if fname is None:\n fname = \"_\".join([year, event_id, \"eventStats\", str(datetime.date.today())])\n fname += \".ll\"\n with open(\"/\".join([loc, fname]), \"w\") as f:\n for key, value in event_stats.items():\n if value[0] is None:\n print(\n \"{:10s} not found in all_geoNet_stats, add this manually to event_stats.ll\".format(\n key\n )\n )\n else:\n line = \"{:10.4f} {:10.4f} {:10s}\".format(\n value[0], value[1], key\n )\n f.write(line + \"\\n\")\n\n return event_stats, fname", "def collect_stations(self):\n # First, iterate provinces and build url's\n site = urllib.request.urlopen(self.base_url)\n\n # Check that the site is still valid or operating by collecting a list of provinces\n print(\"Collecting provinces\")\n provinces = [s[9:11] for s in re.findall('<a href=\"../\">../</a>', site.read())]\n\n # Iterate provinces and collect list of available times\n print(\"Collecting time periods and station ID's\")\n self.stations = defaultdict(dict)\n for prov in provinces:\n site = urllib.request.urlopen(self.build_url(prov))\n expression = '<a href=\"[hd][a-zA-Z]*/\">[hd][a-zA-Z]*/</a>'\n times = [s.split('>')[1].split('<')[0].replace('/', '') for s in re.findall(expression, site.read())]\n\n # Iterate times and collect the station ID's\n for time in times:\n site = urllib.request.urlopen(self.build_url(prov, time))\n expression = '<a href=\"{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv\">{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv</a>'\n expression = expression.format(prov.upper(), time.lower())\n stations = [s.split('_')[1] for s in re.findall(expression, site.read())]\n self.stations[prov][time] = stations", "def get_stations(self):\n return self.__request('stations')['stations']", "def __init__ (self, msname, inverse = False, useElementResponse = True,\n useArrayFactor = True, useChanFreq = False):\n self._response = _stationresponse.StationResponse(msname, inverse,\n useElementResponse, useArrayFactor, useChanFreq)", "def parse(self, response):\n theater_list = response.xpath('//div[@class=\"theater_info\"]//li/a')\n for theater_element in theater_list:\n curr_cinema_url = theater_element.xpath(\n './@href').extract_first()\n cinema_name = theater_element.xpath('./text()').extract_first()\n if not cinema_name:\n # partner theater element is different\n cinema_name = ''.join(theater_element.xpath(\n './/text()').extract())\n else:\n curr_cinema_url = response.urljoin(curr_cinema_url)\n data_proto = ShowingLoader(response=response)\n data_proto.add_cinema_name(cinema_name)\n cinema_name = data_proto.get_output_value('cinema_name')\n data_proto.add_cinema_site(curr_cinema_url, cinema_name)\n data_proto.add_value('source', self.name)\n if not self.is_cinema_crawl([cinema_name]):\n continue\n request = scrapy.Request(\n curr_cinema_url, callback=self.parse_cinema)\n request.meta[\"data_proto\"] = data_proto.load_item()\n yield request", "def stations(self):\n for stat in sorted(self.station_records):\n yield self.station_records[stat]", "def _get_stations_local() -> List[dict]:\n LOGGER.info('Using pre-generated json to retrieve station list')\n with open(weather_stations_file_path) as weather_stations_file:\n json_data = json.load(weather_stations_file)\n return json_data['weather_stations']", "def add_stations(stations, pool):\n\n for station in stations:\n\n print(add_station(pool=pool, name=station.get('name'), latitude=station.get('latitude'),\n longitude=station.get('longitude'), station_type=station.get('station_type'),\n description=station.get('description')))\n print(station.get('name'))", "async def stations():\n with open(\"/data/station_list.json\") as j:\n data = json.load(j)\n return data", "def get_stations(self, limit=250):\n\n endpoint = \"/station/getStations\"\n response = self._send(endpoint, \"POST\", {\"pageSize\": limit})\n stations = response.json()[\"stations\"]\n return stations", "async def _get_stations_remote() -> List[WeatherStation]:\n LOGGER.info('Using WFWX to retrieve station list')\n async with ClientSession() as session:\n # Get the authentication header\n header = await _get_auth_header(session)\n stations = []\n # Iterate through \"raw\" station data.\n async for raw_station in _fetch_raw_stations(session, header, BuildQueryAllStations()):\n # If the station is valid, add it to our list of stations.\n if _is_station_valid(raw_station):\n LOGGER.info('Processing raw_station %d',\n int(raw_station['stationCode']))\n stations.append(_parse_station(raw_station))\n LOGGER.debug('total stations: %d', len(stations))\n return stations", "def receiver():\n def generate(entities_to_proceed):\n \"\"\"Process list of entities populating them with altitude data\"\"\"\n yield \"[\"\n for index, entity in enumerate(entities_to_proceed):\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n logging.debug(\"processing entity : %s\", entity)\n else:\n logging.info(\"processing entity : %s\", entity.get(GUID_STR))\n\n if index > 0:\n yield \",\"\n booking_guid = entity.get(GUID_STR)\n iata = entity.get(IATA_STR)\n api_key = resolve_api_key(API_KEYS, iata)\n\n if not isinstance(api_key, str):\n entity[PROP] = []\n yield json.dumps(entity)\n continue\n url = URL_TEMPLATE.render(entity) + booking_guid + \"?api_key=\" + api_key\n if METHOD == \"get\":\n entity[PROP] = requests.get(url, headers=HEADERS).json()\n else:\n entity[PROP] = requests.request(METHOD, url, data=entity.get(\"payload\"),\n headers=HEADERS).json()\n yield json.dumps(entity)\n yield \"]\"\n\n # get entities from request\n entities = request.get_json()\n\n # create the response\n logging.debug(\"Processing %i entities\", len(entities))\n return Response(generate(entities), mimetype='application/json')", "def trailers_received(self, event):\n super().trailers_received(event)\n\n stream_id = event.stream_id\n response_stream = self.receive_streams.get(stream_id)\n if response_stream is None:\n self.conn.reset_stream(stream_id, error_code=ErrorCodes.PROTOCOL_ERROR)\n return\n\n trailers = response_stream.trailers\n\n if int(trailers.get(\"grpc-status\", 0)) > 0:\n error = GrpcError.from_headers(trailers)\n response_stream.close(error)\n del self.receive_streams[stream_id]", "def _getGather(self):\r\n if self.gather is None:\r\n print ('loading traces')\r\n if DEBUG:\r\n start_time = time.time()\r\n\r\n nChannels = len(self.channelRange)\r\n print(self.channelRange)\r\n traceList = [None]*nChannels\r\n #\r\n if METHOD==1:\r\n #demean all traces\r\n self.st.detrend('constant')\r\n #detrend\r\n self.st.detrend('linear')\r\n #\r\n #taper all traces on both sides\r\n #self.st.taper(max_percentage=0.05, type='cosine')\r\n print ('original sample rate is', self.st[0].stats.sampling_rate)\r\n self.sampRate = self.st[0].stats.sampling_rate /self.dsfactor\r\n print ('new sample rate is ', self.sampRate)\r\n #self.st.decimate(self.dsfactor)\r\n #process traces in parallel\r\n \r\n with Parallel(n_jobs=12) as parallelPool:\r\n traceList = parallelPool(delayed(getSingleTrace)\r\n (self.st[channelNo], \r\n self.sampRate,\r\n self.isIntegrate)\r\n for channelNo in self.channelRange)\r\n\r\n self.traceList = traceList\r\n self.st = obspy.Stream(traceList) \r\n elif METHOD==2:\r\n #do simple filtering as in Ariel Lellouch paper\r\n #self.st = utils.medianSubtract(self.st)\r\n self.st.detrend('constant')\r\n self.st.detrend('linear')\r\n self.st.filter('bandpass',freqmin=10,freqmax=150)\r\n if self.dsfactor>1:\r\n self.sampRate = self.st[0].stats.sampling_rate /self.dsfactor\r\n self.st.decimate(self.dsfactor, no_filter=True)\r\n print(self.channelRange)\r\n self.traceList=[self.st[channelNo] for channelNo in self.channelRange]\r\n \r\n if DEBUG:\r\n print ('processing time is ', time.time()-start_time)", "def lineup_json() -> Response:\n watch = \"watch_direct\" if config.direct else \"watch\"\n\n return jsonify([{\n \"GuideNumber\": station.get('channel_remapped') or station['channel'],\n \"GuideName\": station['name'],\n \"URL\": f\"http://{host_and_port}/{watch}/{station['id']}\"\n } for station in locast_service.get_stations()])", "def stations():\n results = session.query(Station.station,Station.name).all()\n key=[results[i][0] for i in range(len(results))]\n values=[results[i][1] for i in range(len(results))]\n results=dict(zip(key,values))\n print(f\"Route /api/v1.0/stations is being visited\")\n return jsonify(results)", "async def get_stations() -> List[WeatherStation]:\n # Check if we're really using the api, or loading from pre-generated files.\n use_wfwx = config.get('USE_WFWX') == 'True'\n if use_wfwx:\n return await _get_stations_remote()\n return _get_stations_local()", "def stations(update, context):\n db_helper.insert_chat_id(update.effective_chat.id)\n message = processor.process_stations_chat(update, context)\n processor.send_message(update, context, message)", "def get_data(link):\n data = re.get(link)\n jsondata = data.json()\n for weatherstation in jsondata['weatherStations']:\n FetchandStore.sensordict.update({weatherstation[\"id\"]:weatherstation[\"sensorValues\"]})\n for sensorvalue in weatherstation[\"sensorValues\"]:\n FetchandStore.sensors.append({\"id\": sensorvalue[\"roadStationId\"], \"name\": sensorvalue[\"oldName\"],\n \"value\": sensorvalue[\"sensorValue\"], \"unit\": sensorvalue[\"sensorUnit\"],\n \"datetime\": sensorvalue[\"measuredTime\"]})\n return FetchandStore.sensors", "async def _get_spot_feed(self):\n self._logger.debug(\"Polling Spot API\")\n async with aiohttp.ClientSession() as session:\n response = await session.request(\n method=\"GET\",\n url=self.spot_url,\n params=self.params\n )\n json_resp = await response.json()\n _response = json_resp.get(\"response\")\n\n if \"errors\" in _response:\n self._logger.error(\"Error from Spot API: '%s'\", _response)\n else:\n await self.handle_response(_response)", "def _handle_station(self, value):\n value_json = value if isinstance(value, dict) else json.loads(value)\n if value_json[\"line\"] != self.color:\n return\n self.stations[value_json[\"station_id\"]] = Station.from_message(value_json)", "def connection():\n\n\tclient = boto3.client('firehose', region_name=\"us-east-1\")\n\n\tuber_json = {}\n\tlyft_json = {}\n\n\tfor start,end in rides.items():\n\t\tuber_json[start] = {}\n\t\tlyft_json[start] = {}\n\n\t\tuber_json[start][end] = get_uber(start,end)\n\t\tlyft_json[start][end] = get_lyft(start,end)\n\t\tuber_json[start][\"time\"] = int(round(time.time()/60))\n\t\tlyft_json[start][\"time\"] = int(round(time.time()/60))\n\n\tresponse = client.put_record(\n DeliveryStreamName='uber_stream',\n Record={'Data': json.dumps(uber_json) + \"\\n\"})\n\n\tresponse = client.put_record(\n DeliveryStreamName='lyft_stream',\n Record={'Data': json.dumps(lyft_json) + \"\\n\"})", "def getStationData(self):\n dtime = datetime.strptime(self.refTime, \"%y%m%d/%H%M\")\n trange = TimeRange()\n trange.setStart(dtime)\n trange.setEnd(dtime)\n dataTime = DataTime(refTime=dtime, validPeriod=trange)\n req = StationDataRequest()\n req.setPluginName(self.pluginName)\n req.setStationId(self.stationId)\n req.setRefTime(dataTime)\n req.setParmList(self.parmList)\n req.setPartNumber(self.partNumber)\n resp = self.client.sendRequest(req)\n\n for i, rec in enumerate(resp):\n resp[i] = {\n key.decode() if isinstance(key, bytes) else key:\n val.decode() if isinstance(val, bytes) else val\n for key, val in rec.items()\n }\n\n return resp", "def populate_satellites_array():\n total_tles = 0\n tles = storage.get_tles_from_cache()\n metadata = storage.get_metadata()\n last_updated.append(metadata.get('last_updated'))\n if len(last_updated) > 1:\n del last_updated[0] \n if not tles:\n print('Fetching from spacetrack')\n cron_refresh_spacetrack_cache()\n tles = storage.get_tles_from_cache()\n for tle in tles:\n total_tles += 1\n s = Satellite(tle)\n if s.is_valid():\n satellites.append(s)\n print('Loaded {} of {} satellites'.format(len(satellites), total_tles))", "def train_stations(self) -> List[str]:\n return sorted([train_info['HE'] for train_info in train_api.stations_info.values()])", "def stations():\n \n # Query all the stations\n results = session.query(Station).all()\n\n # Create a dictionary to append the station data\n stations_info = []\n for stations in results:\n stations_dict = {}\n stations_dict[\"Station\"] = stations.station\n stations_dict[\"Station Name\"] = stations.name\n stations_dict[\"Latitude\"] = stations.latitude\n stations_dict[\"Longitude\"] = stations.longitude\n stations_dict[\"Elevation\"] = stations.elevation\n all_stations.append(stations_dict)\n \n return jsonify(stations_info)", "def __save_all():\n \n # Use directory listing from stilt-web data. Ignore stations that\n # may be in the queue but are not finished yet.\n allStations = [s for s in os.listdir(CPC.STILTPATH) if os.path.exists(CPC.STILTPATH + s)]\n\n \n # read lis of ICOS stations\n icosStations = cpstation.getIdList()\n icosStations = list(icosStations['id'][icosStations.theme=='AS'])\n \n # dictionary to return\n stations = {}\n\n # fill dictionary with ICOS station id, latitude, longitude and altitude\n for ist in tqdm(sorted(allStations)):\n \n stations[ist] = {}\n # get filename of link (original stiltweb directory structure) and extract location information\n \n loc_ident = os.readlink(CPC.STILTPATH+ist)\n clon = loc_ident[-13:-6]\n lon = float(clon[:-1])\n if clon[-1:] == 'W':\n lon = -lon\n clat = loc_ident[-20:-14]\n lat = float(clat[:-1])\n if clat[-1:] == 'S':\n lat = -lat\n alt = int(loc_ident[-5:])\n\n stations[ist]['lat']=lat\n stations[ist]['lon']=lon\n stations[ist]['alt']=alt\n stations[ist]['locIdent']=os.path.split(loc_ident)[-1]\n \n # set the name and id\n stations[ist]['id'] = ist\n \n # set a flag if it is an ICOS station\n stn = ist[0:3].upper()\n if stn in icosStations:\n stations[ist]['icos'] = cpstation.get(stn).info()\n lat = stations[ist]['icos']['lat']\n lon = stations[ist]['icos']['lon']\n else:\n stations[ist]['icos'] = False \n lat = stations[ist]['lat']\n lon = stations[ist]['lon']\n \n stations[ist]['geoinfo'] = country.get(latlon=[lat,lon])\n \n return stations", "def dataLoader(stationDict, startDate, endDate):\n\n # Generate a URL\n url = ('https://waterservices.usgs.gov/nwis/dv/?format=json' +\n # Specify the sites to download\n '&sites=' + stationDict['DatasetExternalID'] +\n # Specify the start date\n '&startDT=' + datetime.strftime( startDate, '%Y-%m-%d' ) +\n #Specify the end data\n '&endDT=' + datetime.strftime( endDate, '%Y-%m-%d' ) +\n # Specify that we want streamflow\n '&parameterCd=00060' +\n # Specify that we want daily means\n '&statCd=00003' +\n # Allow all sites\n '&siteStatus=all' )\n \n # Get the data\n response = requests.get(url)\n\n # Check the status code\n if response.status_code != 200:\n return \n else:\n response = response.json()\n \n # Create a dataframe from the data\n df = pd.DataFrame(response['value']['timeSeries'][0]['values'][0]['value'])\n\n # Set the index to the dateTime index\n df.set_index(pd.DatetimeIndex(pd.to_datetime(df['dateTime'])), inplace = True)\n del df['dateTime'] # Delete the redundant column\n\n # Replace missing data with NaN's\n df['value'].replace(to_replace = '-999999', value = np.nan, inplace = True)\n\n # Convert to numeric\n df['value'] = pd.to_numeric(df['value'])\n \n # Remove any duplicate data in the dataset\n df = df[~df.index.duplicated(keep='last')] # Remove duplicates from the dataset\n df = df[~df.index.isnull()]\n\n # Rename the columns\n df.columns = ['USGS | ' + stationDict['DatasetExternalID'] + ' | Flag', 'USGS | ' + stationDict['DatasetExternalID'] + ' | Streamflow | CFS']\n del df['USGS | ' + stationDict['DatasetExternalID'] + ' | Flag']\n\n # Return the data frame\n return df", "def event_info_data(event, station):\n origin = event.preferred_origin() or event.origins[0]\n latter = origin.latitude\n lonter = origin.longitude\n startev = origin.time\n depth = origin.depth * 0.001\n\n # set station and channel information\n if station == 'FUR':\n net_s = 'GR'\n sta_s = 'FUR'\n loc_s = ''\n chan2 = 'BHE'\n chan3 = 'BHN'\n chan4 = 'BHZ'\n\n # broadband station signal\n acE = download_data(startev, net_s, sta_s, loc_s, chan2)\n acN = download_data(startev, net_s, sta_s, loc_s, chan3)\n acZ = download_data(startev, net_s, sta_s, loc_s, chan4)\n ac = Stream(traces=[acE[0], acN[0], acZ[0]])\n\n for ca in [ac[0], ac[1], ac[2]]:\n ca.stats.coordinates = AttribDict()\n ca.stats.coordinates['longitude'] = 11.275\n ca.stats.coordinates['latitude'] = 48.163\n ca.stats['starttime'] = startev - 180\n ca.stats['sampling_rate'] = 20.\n\n # theoretical event backazimuth and distance\n baz = gps2dist_azimuth(latter, lonter, ac[0].stats.coordinates.latitude,\n ac[0].stats.coordinates.longitude)\n # great circle distance\n gcdist = locations2degrees(latter, lonter,\n ac[0].stats.coordinates.latitude,\n ac[0].stats.coordinates.longitude)\n\n return latter, lonter, depth, startev, ac, baz, gcdist, \\\n net_s, chan2, chan3, chan4, sta_s, loc_s", "def all_stations(self, provider: ID) -> List[StationInfo]:\n srv_key = self.__stations_key(provider=provider)\n value = self.get(name=srv_key)\n if value is None:\n return []\n js = utf8_decode(data=value)\n array = json_decode(string=js)\n return StationInfo.convert(array=array)", "def fetch_stations(parameter: str, resolution: str, period: str):\n log.info(\n f\"Requesting stations for \"\n f\"parameter={parameter}, \"\n f\"resolution={resolution}, \"\n f\"period={period}\"\n )\n try:\n stations = DwdObservationRequest(\n parameter=DwdObservationDataset(parameter),\n resolution=DwdObservationResolution(resolution),\n period=DwdObservationPeriod(period),\n ).all()\n except (requests.exceptions.ConnectionError, InvalidParameterCombination) as ex:\n log.warning(ex)\n # raise PreventUpdate\n log.error(\"Unable to connect to data source\")\n return empty_frame\n\n df = stations.df\n\n log.info(f\"Propagating stations data frame with {frame_summary(df)}\")\n\n return df.to_json(date_format=\"iso\", orient=\"split\")", "def stations_call():\n # Query all stations\n stations_call = session.query(Station.station).all()\n all_stations = list(np.ravel(stations_call))\n \n return jsonify(all_stations)", "def mock_get_all_stations(__):\n return all_station_codes", "def get_streaming_information():\n\n #getting the guidebox_id variable from show_page.html\n guidebox_id = request.args.get(\"guidebox_id\")\n\n #gathering information about where the show's available online\n all_streaming_sources = guidebox_streaming_sources_info(guidebox_id)\n\n return jsonify(all_streaming_sources)", "def list_stations(intent, session):\n stations = location.get_stations(config.bikes_api)\n street_name = intent['slots']['street_name']['value']\n possible = location.matching_station_list(stations,\n street_name,\n exact=True)\n street_name = street_name.capitalize()\n\n if len(possible) == 0:\n return reply.build(\"I didn't find any stations on %s.\" % street_name,\n is_end=True)\n elif len(possible) == 1:\n sta_name = location.text_to_speech(possible[0]['name'])\n return reply.build(\"There's only one: the %s \"\n \"station.\" % sta_name,\n card_title=(\"%s Stations on %s\" %\n (config.network_name, street_name)),\n card_text=(\"One station on %s: %s\" %\n (street_name, possible[0]['name'])),\n is_end=True)\n else:\n last_name = location.text_to_speech(possible[-1]['name'])\n speech = \"There are %d stations on %s: \" % (len(possible),\n street_name)\n speech += (', '.join([location.text_to_speech(p['name'])\n for p in possible[:-1]]) +\n ', and %s' % last_name)\n card_text = (\"The following %d stations are on %s:\\n%s\" %\n (len(possible), street_name,\n '\\n'.join(p['name'] for p in possible)))\n return reply.build(speech,\n card_title=(\"%s Stations on %s\" %\n (config.network_name, street_name)),\n card_text=card_text,\n is_end=True)", "async def get_station_groups(response: Response, _=Depends(authentication_required)):\n logger.info('/stations/groups')\n groups = await wfwx_api.get_station_groups()\n response.headers[\"Cache-Control\"] = no_cache\n return WeatherStationGroupsResponse(groups=groups)", "def xmlStationGetPlatforms(stationObj, stationElement, whenCreated):\n for platformElement in \\\n stationElement.iter(\"{http://trackernet.lul.co.uk}P\"):\n name = platformElement.get(\"N\")\n number = platformElement.get(\"Num\")\n trackCode = platformElement.get(\"TrackCode\")\n platformObj = stationObj.addPlatform(number, name, trackCode)\n xmlPlatformGetTrains(platformObj, platformElement, whenCreated)", "def stations():\n # Create a link to the session\n session = Session(engine)\n \n # Query all station records\n results = session.query(Stations.station, Stations.name).all()\n \n session.close()\n\n # Create a dictionary from the query results\n all_stations = []\n for station, name in results:\n station_dict = {}\n station_dict[\"station\"] = station\n station_dict[\"name\"] = name\n all_stations.append(station_dict)\n \n return jsonify(all_stations)", "def on_data(self, raw_data):\n data = json.loads(raw_data)\n\n tweet = None\n includes = {}\n errors = []\n matching_rules = []\n\n if \"data\" in data:\n tweet = Tweet(data[\"data\"])\n self.on_tweet(tweet)\n if \"includes\" in data:\n includes = self._process_includes(data[\"includes\"])\n self.on_includes(includes)\n if \"errors\" in data:\n errors = data[\"errors\"]\n self.on_errors(errors)\n if \"matching_rules\" in data:\n matching_rules = [\n StreamRule(id=rule[\"id\"], tag=rule[\"tag\"])\n for rule in data[\"matching_rules\"]\n ]\n self.on_matching_rules(matching_rules)\n\n self.on_response(\n StreamResponse(tweet, includes, errors, matching_rules)\n )", "def get_all_streaming_info():\n\n #get user email from session\n email = session.get(\"current_user\")\n\n if email:\n\n #get user_id to get access to favorites table and users table\n user = User.get_user_with_email(email)\n\n #use the backref relationship to find the titles of the user's favorite shows and save in a list\n guidebox_info = {}\n for favorite in user.favorites:\n guidebox_info[str(favorite.show.guidebox_id)] = str(favorite.show.title)\n\n streaming_info = []\n\n for guidebox_id in guidebox_info:\n show = {}\n streaming_sources = guidebox_streaming_sources_info(guidebox_id)\n #add show title to dictionary, add airings object to dictionary\n show[\"id\"] = guidebox_id\n show[\"title\"] = guidebox_info[guidebox_id]\n if streaming_sources:\n show[\"streaming\"] = streaming_sources\n else:\n show[\"streaming\"] = [\"empty\"]\n #add dictionary to the listings list\n streaming_info.append(show)\n\n streaming = jsonify(streaming_info)\n\n return streaming\n\n else:\n flash(\"Please login first!\")\n return redirect('/login')", "def sentientPlanets():\n\n url = \"https://swapi-api.hbtn.io/api/species/\"\n planets = []\n while url is not None:\n r = requests.get(url)\n results = r.json()[\"results\"]\n for specie in results:\n if (specie[\"designation\"] == \"sentient\" or\n specie[\"classification\"] == \"sentient\"):\n\n planet_url = specie[\"homeworld\"]\n if planet_url is not None:\n p = requests.get(planet_url).json()\n planets.append(p[\"name\"])\n url = r.json()[\"next\"]\n return planets", "def finish_processing(self):\n logging.debug('Processing network requests from moz log')\n # Pass the HTTP/2 stream information to the requests\n for stream_key in self.http['streams']:\n stream = self.http['streams'][stream_key]\n if 'request_id' in stream and stream['request_id'] in self.http['requests']:\n request = self.http['requests'][stream['request_id']]\n if 'stream_id' in stream:\n request['http2_stream_id'] = stream['stream_id']\n if 'parent_stream_id' in stream:\n request['http2_stream_dependency'] = stream['parent_stream_id']\n if 'weight' in stream:\n request['http2_stream_weight'] = stream['weight']\n requests = []\n # Pull out the network requests and sort them\n for request_id in self.http['requests']:\n request = self.http['requests'][request_id]\n if 'url' in request and request['url'][0:22] != 'http://127.0.0.1:8888/'\\\n and 'start' in request:\n request['id'] = request_id\n requests.append(dict(request))\n if len(requests):\n requests.sort(key=lambda x: x['start'] if 'start' in x else 0)\n # Attach the DNS lookups to the first request on each domain\n for domain in self.dns:\n if 'claimed' not in self.dns[domain]:\n for request in requests:\n host = urlsplit(request['url']).hostname\n if host == domain:\n self.dns[domain]['claimed'] = True\n if 'start' in self.dns[domain]:\n request['dns_start'] = self.dns[domain]['start']\n if 'end' in self.dns[domain]:\n request['dns_end'] = self.dns[domain]['end']\n break\n # Attach the socket connect events to the first request on each connection\n for request in requests:\n if 'connection' in request and request['connection'] in self.http['connections']:\n connection = self.http['connections'][request['connection']]\n if 'socket' in connection and connection['socket'] in self.http['sockets']:\n socket = self.http['sockets'][connection['socket']]\n if 'claimed' not in socket:\n socket['claimed'] = True\n if 'start' in socket:\n request['connect_start'] = socket['start']\n if 'end' in socket:\n request['connect_end'] = socket['end']\n if 'ssl_start' in connection and 'ssl_end' in connection:\n request['ssl_start'] = connection['ssl_start']\n request['ssl_end'] = connection['ssl_end']\n return requests", "def get_stations(base_url, hts, mtype):\n stns1 = ws.site_list(base_url, hts, location='LatLong') # There's a problem with Hilltop that requires running the site list without a measurement first...\n stns1 = ws.site_list(base_url, hts, location='LatLong', measurement=mtype)\n stns2 = stns1[(stns1.lat > -47.5) & (stns1.lat < -34) & (stns1.lon > 166) & (stns1.lon < 179)].dropna().copy()\n stns2.rename(columns={'SiteName': 'ref'}, inplace=True)\n\n return stns2", "def process_response_data(self, response):\n response = response.replace('false', \"'false'\")\n response = response.replace('true', \"'true'\")\n response = eval(response)\n locations = response[\"locationSearchResponse\"][\"locations\"]\n\n atms = []\n branches = []\n\n for loc in locations:\n loc_type = loc[\"apiStructType\"]\n\n if loc_type==\"atm\":\n atm_dict = loc[\"atm\"]\n atm = self.get_item_details(atm_dict, self.atm_headers)\n self.ATMS[atm[0]] = atm\n\n elif loc_type==\"brc\":\n branch_dict = loc[\"brc\"]\n brc = self.get_item_details(branch_dict, self.branch_headers)\n self.BRANCHES[brc[0]] = brc", "def stations():\n\n # Open sessions\n session = Session(bind=engine)\n\n # Query DB for StationID and Station Name\n results=session.query(Station.station,Station.name).all()\n\n # Initiating an empty dictionary\n stations={}\n\n # Going over the results and storing them in stations dict reated previously\n for id,name in results:\n station={id:name}\n stations.update(station)\n\n # Main API dict that holds an info key and a stations key with the stations ids and names\n stationsAPI={'info':'Available stations responsible for the observations',\n 'stations':stations\n }\n \n # Returing the main dictionary in a JSON format API response \n return jsonify(stationsAPI)", "def get_traces(self, traces, **kwargs):\n self.resource.clear()\n sweep = kwargs.get(\"sweep\", False)\n\n name_prefix = kwargs.get(\"name_prefix\", \"\")\n if name_prefix:\n name_prefix += \" - \"\n\n channels = OrderedDict()\n for trace in traces:\n ch = trace[\"channel\"]\n if ch not in channels.keys():\n channels[ch] = {\n \"frequency\": None,\n \"traces\": list()}\n channels[ch][\"traces\"].append(trace)\n\n if sweep is True:\n self.sweep(channels=list(channels.keys()))\n\n traces = []\n for ch, ch_data in channels.items():\n frequency = ch_data[\"frequency\"] = self.get_frequency()\n for trace in ch_data[\"traces\"]:\n self.scpi.set_selected_meas_by_number(trace[\"channel\"], trace[\"measurement number\"])\n sdata = self.scpi.query_data(trace[\"channel\"], \"SDATA\")\n s = sdata[::2] + 1j * sdata[1::2]\n ntwk = skrf.Network()\n ntwk.s = s\n ntwk.frequency = frequency\n ntwk.name = name_prefix + trace.get(\"parameter\", \"trace\")\n traces.append(ntwk)\n return traces", "def getStations(self) :\n return self._stations", "async def run_collector(url: str, session: ClientSession):\n try:\n response = await get_records_from_api(url, session)\n event_data = json.dumps(response[0], ensure_ascii=False)\n log.info(f'Record to stream: {event_data}')\n return event_data\n except Exception as err:\n log.info('Unable to proceed: Error: ', err)\n raise err", "def _build_stations(self, stop_list):\n # stations = [] TODO: What is this for\n dists = self._euclidian_distances(stop_list)\n stations = self._calculate_y_lines(dists)\n return stations", "def run(self):\n for transis_response in self.transis_consumer.get_detector_counts():\n self.di_framework_client.start_job()\n response = self.push_transis_response_to_kinesis(transis_response, self.di_framework_client)\n log.info(response)\n self.di_framework_client.log_job_status(json.dumps(response))\n self.di_framework_client.end_job()", "def parse(self, response):\n json_response = loads(response.text)\n token = json_response[\"Token\"]\n # api_server = json_response[\"ApiServer\"]\n api_server = \"https://awsapieast1-prod2.schoolwires.com/REST/\"\n api_gateway = api_server + \"api/v4/\"\n api_function = \"CalendarEvents/GetEvents/1?\"\n start_date = \"2019-02-01\"\n today = datetime.today()\n\n e = today.replace(\n year=today.year + 10,\n month=1,\n day=1,\n hour=0,\n minute=0,\n second=1,\n microsecond=1,\n )\n # the end date will be ten years from the date that the script runs\n end_date = str(e.year) + \"-\" + str(e.month).zfill(2) + \"-\" + str(e.day).zfill(2)\n dates = \"StartDate={}&EndDate={}\".format(start_date, end_date)\n modules = \"&ModuleInstanceFilter=\"\n\n # this line is to filter just school board meetings.\n category_filters = (\n \"0-49-40-21-16-4-3-44-39-1-57-43-64-65-58-62-28-25-\"\n \"52-50-55-38-59-17-13-51-56-8-63-53-37-54-7-47-46-33-60-10-19-66-61-48-34-45-41-42-\"\n )\n\n category = \"&CategoryFilter={}\".format(category_filters)\n dbstream = \"&IsDBStreamAndShowAll=true\"\n url = api_gateway + api_function + dates + modules + category + dbstream\n headers = {\"Authorization\": \"Bearer \" + token, \"Accept\": \"application/json\"}\n req = Request(url, headers=headers, callback=self._parse_api)\n\n yield req", "def load_raw_data(self):\n if self.trendfile:\n self.raw_data = self.get_ap_file()\n else:\n report_params = self.format_api_request_params()\n report_params['test'] = self.testresults\n\n self.raw_data = self.get_ap_report(params=report_params)", "def run(self):\n while self.i < len(self.series):\n # Grab line + RSS\n s = self.series[self.i]\n rss = self.request_rss(s.feedUrl)\n\n # Compose Episodes\n ep_dicts = []\n for entry in rss['entries']:\n ep_dicts.append(Episode(s, entry).__dict__)\n\n # Build result dict\n result_dict = dict()\n result_dict['series'] = deepcopy(s.__dict__)\n result_dict['series']['genres'] = \\\n result_dict['series']['genres'].split(';')\n result_dict['series']['type'] = 'series'\n result_dict['episodes'] = ep_dicts\n\n # Store podcast\n self.storer.store(result_dict)\n\n # Move onto the next one\n self.i += 20\n print(\"Retrieved \" + str(s.id))", "def xmlPlatformGetTrains(platformObj, platformElement, whenCreated):\n for trainElement in platformElement.iter(\"{http://trackernet.lul.co.uk}T\"):\n lcid = trainElement.get(\"LCID\")\n setNo = trainElement.get(\"SetNo\")\n tripNo = trainElement.get(\"TripNo\")\n secondsTo = int(trainElement.get(\"SecondsTo\"))\n location = trainElement.get(\"Location\")\n destination = trainElement.get(\"Destination\")\n destCode = trainElement.get(\"DestCode\")\n trackCode = trainElement.get(\"TrackCode\")\n ln = trainElement.get(\"LN\")\n platformObj.addTrain(lcid, setNo, tripNo, secondsTo, location,\n destination, destCode, trackCode, ln, whenCreated)", "def server_streaming(self) -> global___Snippet.ServerStreaming:", "def setup_response_collector(self):\n pass", "def _stream_direct(config: Configuration, stream_uri: str, log: logging.Logger):\n # Ordered dict of URI->dict to keep track of what segments we have served\n # and which we haven't. We do it this way because we load an updated m3u8\n # every time we have served all known segments, but since timing isn't\n # synced we could (and want to) encountered segments that we have already\n # served.\n segments = OrderedDict()\n start_time = datetime.utcnow()\n total_secs_served = 0\n while True:\n try:\n added = 0\n removed = 0\n # Update current segments\n playlist = m3u8.load(stream_uri)\n\n # Only add new segments to our segments OrderedDict\n for m3u8_segment in playlist.segments:\n uri = m3u8_segment.absolute_uri\n if uri not in segments:\n segments[uri] = {\n \"played\": False,\n \"duration\": m3u8_segment.duration\n }\n log.debug(f\"Added {uri} to play queue\")\n added += 1\n\n # Update when we have last seen this segment. Used for cleanup\n segments[uri][\"last_seen\"] = datetime.utcnow()\n\n # Clean up list, so we're not iterating a massive list in the future\n # We transform our OrderedDict into a list, since we can't mutate\n # the dict when iterating over it.\n for uri, data in list(segments.items()):\n # Remove the segment if it has been played and hasn't been updated\n # in the last 10 seconds (i.e. it wasn't in the last updates).\n # We have to make sure the segment isn't in the m3u8 file anymore,\n # because otherwise it will be seen as a new segment.\n if data[\"played\"] and (datetime.utcnow() - data[\"last_seen\"]).total_seconds() > 10:\n log.debug(f\"Removed {uri} from play queue\")\n del segments[uri]\n removed += 1\n\n log.info(f\"Added {added} new segments, removed {removed}\")\n\n for uri, data in segments.items():\n if not data[\"played\"]:\n # Download the chunk\n start_download = datetime.utcnow()\n chunk = LocastService.get(uri).content\n end_download = datetime.utcnow()\n if config.verbose >= 1:\n download_secs = (\n end_download-start_download).total_seconds()\n log.info(\n f\"Downloaded {uri}, time spent: {download_secs:.2f}\")\n\n # Mark this chunk as played\n # segments[uri][\"played\"] = True\n data['played'] = True\n\n # Chunk might have expired, move on to the next one\n if not chunk:\n log.warn(f\"Segment {uri} not available. Skipping..\")\n continue\n\n # Since yielding a chunk happens pretty much instantly and is not\n # related to the speed the connecting client consumes the stream,\n # we preferrably wait here. If we don't wait, we will be requesting\n # the m3u8 file from locast at a high (and unnecessary) rate after\n # we're done serving the first 10 chunks.\n #\n # The duration of a chunk is caputured in the m3u8 data, but since\n # we're downloading the clip to serve it to the client as well,\n # we need some time, rather than waiting the full `duration` before\n # serving the next clip. However, if we would wait a fixed number of\n # seconds (say 8 for a 10 second clip), we would drain the queue of\n # clips, since the 2 second difference will compound over time.\n # E.g. in case there are 10 clips of 10 seconds served and we would\n # run 2 seconds ahead with every serving, we'd run out of clips\n # after 50 iterations (10*10/2).\n #\n # In order to counter this effect, we will try to stay ahead of\n # locast by a fixed amount of seconds. In order to do this we use\n # the following algorithm:\n # - We calculate the amount of seconds served to our client\n # (total_secs_served). This is the sum of all the durations taken\n # from the m3u8 playlist of previously served chunks.\n # - We calculate the time that has passed since we started to serve\n # the stream (runtime). Since yielding a chunk doesn't take as long\n # as the actual playback time, runtime will be less than\n # total_secs_played.\n # - We calculate the target difference between runtime and\n # total_secs_served, which is 50% of the duration of the chunk we're\n # about to serve. In case of a 10 sec chunk, this will be 5 seconds.\n # - Then we calculate the actual wait time, which is the\n # total_secs_served - target difference - runtime.\n #\n # Example:\n # - 10 second chunks\n # - Total seconds served (before serving the current chunk): 220 sec\n # - Total runtime since beginning of this stream: 204\n # - Target: 5 seconds ahead of playback in order to account for\n # downloading and processing of the next chunk\n # - Wait time: 220 - 5 - 204 = 11 sec\n\n duration = data['duration']\n runtime = (datetime.utcnow() - start_time).total_seconds()\n target_diff = 0.5 * duration\n\n if total_secs_served > 0:\n wait = total_secs_served - target_diff - runtime\n else:\n wait = 0\n\n log.info(f\"Serving {uri} ({duration}s) in, {wait:.2f}s\")\n\n # We can't wait negative time..\n if wait > 0:\n sleep(wait)\n yield chunk\n total_secs_served += duration\n except:\n break", "def _connect(self):\n if self.should_connect:\n if self.last_id:\n self.requests_kwargs['headers']['Last-Event-ID'] = self.last_id\n self.resp = self.session.get(self.url, stream=True, **self.requests_kwargs)\n self.resp_iterator = self.resp.iter_content(decode_unicode=True)\n self.resp.raise_for_status()\n else:\n raise StopIteration()", "def on_iteration(self):\n for stream_id in list(self.send_streams.keys()):\n self.send_headers(stream_id)\n self.send_data(stream_id)", "def serialize(\n self,\n transactions: Sequence[SnubaTransaction],\n errors: Sequence[SnubaError],\n roots: Sequence[SnubaTransaction],\n warning_extra: Dict[str, str],\n event_id: Optional[str],\n detailed: bool = False,\n ) -> Sequence[LightResponse]:\n if event_id is None:\n raise ParseError(detail=\"An event_id is required for the light trace\")\n snuba_event, nodestore_event = self.get_current_transaction(transactions, errors, event_id)\n parent_map = self.construct_parent_map(transactions)\n error_map = self.construct_error_map(errors)\n trace_results: List[TraceEvent] = []\n current_generation: Optional[int] = None\n root_id: Optional[str] = None\n\n with sentry_sdk.start_span(op=\"building.trace\", description=\"light trace\"):\n # Going to nodestore is more expensive than looping twice so check if we're on the root first\n for root in roots:\n if root[\"id\"] == snuba_event[\"id\"]:\n current_generation = 0\n break\n\n if current_generation is None:\n for root in roots:\n # We might not be necessarily connected to the root if we're on an orphan event\n if root[\"id\"] != snuba_event[\"id\"]:\n # Get the root event and see if the current event's span is in the root event\n root_event = eventstore.get_event_by_id(root[\"project.id\"], root[\"id\"])\n root_spans: NodeSpans = root_event.data.get(\"spans\", [])\n root_span = find_event(\n root_spans,\n lambda item: item is not None\n and item[\"span_id\"] == snuba_event[\"trace.parent_span\"],\n )\n\n # We only know to add the root if its the direct parent\n if root_span is not None:\n # For the light response, the parent will be unknown unless it is a direct descendent of the root\n root_id = root[\"id\"]\n trace_results.append(\n TraceEvent(\n root,\n None,\n 0,\n )\n )\n current_generation = 1\n break\n\n current_event = TraceEvent(snuba_event, root_id, current_generation)\n trace_results.append(current_event)\n\n spans: NodeSpans = nodestore_event.data.get(\"spans\", [])\n # Need to include the transaction as a span as well\n #\n # Important that we left pad the span id with 0s because\n # the span id is stored as an UInt64 and converted into\n # a hex string when quering. However, the conversion does\n # not ensure that the final span id is 16 chars long since\n # it's a naive base 10 to base 16 conversion.\n spans.append({\"span_id\": snuba_event[\"trace.span\"].rjust(16, \"0\")})\n\n for span in spans:\n if span[\"span_id\"] in error_map:\n current_event.errors.extend(\n [self.serialize_error(error) for error in error_map.pop(span[\"span_id\"])]\n )\n if span[\"span_id\"] in parent_map:\n child_events = parent_map.pop(span[\"span_id\"])\n trace_results.extend(\n [\n TraceEvent(\n child_event,\n snuba_event[\"id\"],\n (\n current_event.generation + 1\n if current_event.generation is not None\n else None\n ),\n )\n for child_event in child_events\n ]\n )\n\n return [result.to_dict() for result in trace_results]", "def inner():\n for line in file_obj:\n logdata = tilak_haproxylog.parse_line(line)\n if logdata is not None:\n logdata[\"hits\"] = 1\n for value_key in value_keynames:\n if value_key not in logdata:\n logdata[value_key] = 0\n status_code = int(logdata[\"status_code\"])\n if 100 <= status_code <= 199:\n logdata[\"rsp_1xx\"] = 1\n elif 200 <= status_code <= 299:\n logdata[\"rsp_2xx\"] = 1\n elif 300 <= status_code <= 399:\n logdata[\"rsp_3xx\"] = 1\n elif 400 <= status_code <= 499:\n logdata[\"rsp_4xx\"] = 1\n elif 500 <= status_code <= 599:\n logdata[\"rsp_5xx\"] = 1\n else:\n logdata[\"rsp_other\"] = 1\n ret_data = dict(zip(index_keynames, (logdata[index_key] for index_key in index_keynames)))\n ret_data.update(dict(zip(value_keynames, (logdata[value_key] for value_key in value_keynames))))\n yield (logdata[\"ts\"], ret_data)", "def swarm_track(self, *args, **kwargs):\n if self.st_type == \"track\" and SHOULD_WAIT:\n if len(self.completion_messages) < len(self.neighbors):\n return\n\n # TODO: make all drones know the st_type. Now only master knows\n if self.st_type == \"simple-track\":\n positions = []\n # Using separate pos_dict because find_center accepts a dictionary as argument\n # Todo: make find_center applicable to lists\n pos_dict = dict()\n dBms = []\n\n for ip, data in self.all_drones_data.items():\n pos_dict[ip] = Coordinate(data['lat'], data['lon'])\n positions.append(pos_dict[ip])\n dBms.append(data['dBm'])\n\n max_dBm_index = dBms.index(max(dBms))\n center_coord = self.find_center(pos_dict)\n pos_with_max_dBm = positions[max_dBm_index]\n max_dBm_bearing = center_coord.bearing_toward(pos_with_max_dBm)\n\n move_distance = 3 # meters\n combo_hotspot = center_coord.offset_bearing(max_dBm_bearing, move_distance)\n else:\n alphas = []\n epsilons = []\n shared_data = []\n for drone_ip, drone_port in self.neighbors:\n self.log.debug('Requesting data from drone {}'.format(drone_ip))\n local_alpha, local_epsilon, samples = send(\n drone_ip=drone_ip,\n mission_id=self.mission_id,\n endpoint='/share',\n skyserve_port=drone_port,\n ).json().get('data', {})\n if local_alpha == 0.0 and local_epsilon == 0.0:\n return\n self.log.debug('Received data')\n alphas.append(local_alpha)\n epsilons.append(local_epsilon)\n samples = [[sample['lat'],\n sample['lon'],\n sample['alt'],\n sample['dBm']] for sample in json.loads(samples)]\n\n shared_data.append(samples)\n\n drone_count = len(self.neighbors)\n\n prediction = predict(dronenum=drone_count,\n maxRun=1,\n numIterations=GDParameters.NUM_ITERATIONS,\n numEpoch=GDParameters.NUM_EPOCH,\n threshold=GDParameters.THRESHOLD,\n learning_rate=GDParameters.LEARNING_RATE,\n numberBatch=1,\n data_length=SAMPLES_SWARM*drone_count)\n\n if 1 < drone_count <= 3:\n try:\n start = time.time()\n hotspot = prediction.swarm(drone_data=shared_data,\n alphas=alphas,\n epsilons=epsilons)\n end = time.time()\n self.swarmtime = end - start\n self.log.debug('Drone is using data from {a} drones'.format(a=drone_count))\n except IndexError:\n self.log.warn('Hotspot localization failed. Data not good enough.')\n return\n else:\n self.log.warn('Drone Number Incorrect')\n return\n\n combo_hotspot = Coordinate(hotspot[0], hotspot[1])\n\n self.log.debug('=========================================================================')\n self.log.debug('Calculated new hotspot at location: {}'.format(combo_hotspot))\n # TODO: allow this to run in all mission types\n # This would require implementing simulated cheater in all types, not just track\n # if IS_SIMULATION and (self.st_type == \"track\" or self.st_type == \"simple-track\"):\n if IS_SIMULATION:\n error = combo_hotspot.distance_to(self.current_simulated_hotspot)\n self.log.debug('Simulated error: {err}, Simulated hotspot has moved {dist} meters to: {loc}'.format(\n err=error,\n dist=self.hotspot_meters_moved,\n loc=self.current_simulated_hotspot\n ))\n self.log.debug('=========================================================================')\n\n if not self.region.contains(combo_hotspot) and not IS_SIMULATION:\n self.log.debug('New hotspot is out of region')\n return\n\n if self.st_type == \"track\" or self.st_type == \"simple-track\":\n self.completion_messages = set()\n\n if self.st_type != \"hover\" and self.st_type != \"spin\":\n for drone_idx, (drone_ip, drone_port) in enumerate(self.neighbors):\n self.log.debug('Sending drone at IP {drone_ip} to new hotspot location.'.format(\n drone_ip=drone_ip,\n ))\n\n send(\n drone_ip=drone_ip,\n mission_id=self.mission_id,\n endpoint='/swarm',\n data={\n 'lat': combo_hotspot.lat,\n 'lon': combo_hotspot.lon,\n },\n skyserve_port=drone_port,\n async=True,\n )", "def get_station_details(station_id):\n url = \"https://www.kvb.koeln/haltestellen/overview/%d/\" % station_id\n r = requests.get(url, headers=HEADERS)\n soup = BeautifulSoup(r.text)\n details = {\n \"station_id\": station_id,\n \"name\": stations[station_id],\n \"line_ids\": set()\n }\n div = soup.find(\"ul\", class_=\"info-list\")\n for a in div.find_all(\"a\"):\n href = a.get(\"href\")\n if href is None:\n continue\n result = parse(\n URL_TEMPLATES[\"line_details\"],\n href)\n if result is None:\n continue\n details[\"line_ids\"].add(result[\"line_id\"])\n details[\"line_ids\"] = sorted(list(details[\"line_ids\"]))\n return details", "async def stations_data():\n with open(\"/data/station_data.json\") as j:\n data = json.load(j)\n return data", "def get_data_for_day(i,t0):\n t0 = UTCDateTime(t0)\n\n # open clients\n client = FDSNClient(\"GEONET\")\n client_nrt = FDSNClient('https://service-nrt.geonet.org.nz')\n \n daysec = 24*3600\n data_streams = [[2, 5], [4.5, 8], [8,16]]\n names = ['rsam','mf','hf']\n\n # download data\n datas = []\n try:\n site = client.get_stations(starttime=t0+i*daysec, endtime=t0 + (i+1)*daysec, station='WIZ', level=\"response\", channel=\"HHZ\")\n except FDSNNoDataException:\n pass\n\n try:\n WIZ = client.get_waveforms('NZ','WIZ', \"10\", \"HHZ\", t0+i*daysec, t0 + (i+1)*daysec)\n \n # if less than 1 day of data, try different client\n if len(WIZ.traces[0].data) < 600*100:\n raise FDSNNoDataException('')\n except ObsPyMSEEDFilesizeTooSmallError:\n return\n except FDSNNoDataException:\n try:\n WIZ = client_nrt.get_waveforms('NZ','WIZ', \"10\", \"HHZ\", t0+i*daysec, t0 + (i+1)*daysec)\n except FDSNNoDataException:\n return\n\n # process frequency bands\n WIZ.remove_sensitivity(inventory=site)\n data = WIZ.traces[0].data\n ti = WIZ.traces[0].meta['starttime']\n # round start time to nearest 10 min increment\n tiday = UTCDateTime(\"{:d}-{:02d}-{:02d} 00:00:00\".format(ti.year, ti.month, ti.day))\n ti = tiday+int(np.round((ti-tiday)/600))*600\n N = 600*100 # 10 minute windows in seconds\n Nm = int(N*np.floor(len(data)/N))\n for data_stream, name in zip(data_streams, names):\n filtered_data = bandpass(data, data_stream[0], data_stream[1], 100)\n filtered_data = abs(filtered_data[:Nm])\n datas.append(filtered_data.reshape(-1,N).mean(axis=-1)*1.e9)\n\n # compute dsar\n data = cumtrapz(data, dx=1./100, initial=0)\n data -= np.mean(data)\n j = names.index('mf')\n mfd = bandpass(data, data_streams[j][0], data_streams[j][1], 100)\n mfd = abs(mfd[:Nm])\n mfd = mfd.reshape(-1,N).mean(axis=-1)\n j = names.index('hf')\n hfd = bandpass(data, data_streams[j][0], data_streams[j][1], 100)\n hfd = abs(hfd[:Nm])\n hfd = hfd.reshape(-1,N).mean(axis=-1)\n dsar = mfd/hfd\n datas.append(dsar)\n names.append('dsar')\n\n # write out temporary file\n datas = np.array(datas)\n time = [(ti+j*600).datetime for j in range(datas.shape[1])]\n df = pd.DataFrame(zip(*datas), columns=names, index=pd.Series(time))\n df.to_csv('_tmp/_tmp_fl_{:05d}.dat'.format(i), index=True, index_label='time')", "def get_waveforms_for_time(lat, lng, starttime, endtime, channel, maxradius, location='*', station=None) -> Stream:\n\n # For whatever reason GeoNet returns version '1' not version '1.0' and obspy\n # throws a fit when it sees this. Just ignore it\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n inv: Inventory = default_client.get_stations(latitude=lat, longitude=lng, maxradius=maxradius,\n channel=channel, level='channel', starttime=starttime, endtime=endtime, station=station)\n\n # We want to be nice to GeoNet. If we're requesting >30 stations it's likely\n # something might have gone wrong (e.g. maxradius was too big) and we'd end\n # up requesting a ton of data, which is no fun for anyone.\n station_count = sum([len(nw) for nw in inv])\n if station_count > MAX_STATIONS and not station_limit_disabled:\n raise RuntimeError(\n f'Failing because station count exceeded maximum of {MAX_STATIONS} (was {station_count}). Disable station count limit to ignore this')\n\n st = Stream()\n\n station_info = ''\n for nw in inv:\n for station in nw:\n try:\n waveforms = try_get_waveforms(nw.code, station.code,\n location, channel, starttime, endtime)\n\n # Required for section plot. Has no ill effect on other plots so\n # we just calculate it for everything. If we don't have a lat\n # and lng it doesn't make sense to calculate a distance so we\n # just set it to 0.\n #\n # TODO: replace this with disallowing section plot for stn selection?\n if lat is not None and lng is not None:\n for trace in waveforms:\n d = lat_lng_dist((lat, lng), (station.latitude, station.longitude))\n trace.stats['distance'] = d\n else:\n for trace in waveforms:\n trace.stats['distance'] = 0\n\n st += waveforms\n station_info += str(station) + '\\n'\n except Exception as e:\n # Do something here?\n print('Exception: ' + str(e))\n pass\n print(station_info)\n return st", "def _add_streams(network_desc: Element, results: Solution) -> Element:\n\n\tfor stream in results.streams:\n\t\tstream_element = SubElement(network_desc, \"stream\", {\n\t\t\t\"id\": str(stream.id),\n\t\t\t\"src\": stream.src.name,\n\t\t\t\"dest\": stream.dest.name,\n\t\t\t\"size\": str(stream.size),\n\t\t\t\"period\": str(stream.period),\n\t\t\t\"deadline\": str(stream.deadline),\n\t\t\t\"rl\": str(stream.rl),\n\t\t\t\"wctt\": str(stream.WCTT),\n\t\t})\n\n\t\tfor instance in stream.instances:\n\t\t\tinstance_element = SubElement(stream_element, \"instance\", {\"local_deadline\": str(instance.local_deadline)})\n\n\t\t\tfor framelet in instance.framelets:\n\t\t\t\tSubElement(instance_element, \"framelet\", {\"id\": str(framelet.id), \"size\": str(framelet.size)})\n\n\treturn network_desc", "def stations(self):\n stations = []\n f = self._fetch(Citibike.STATION_URL)\n data = json.load(f)\n if 'stationBeanList' not in data or len(data['stationBeanList']) == 0:\n raise BadResponse('Station Fetch Failed', data)\n for station in data['stationBeanList']:\n stations.append(Station._from_json(station))\n logging.debug(\"Retrieved %d stations\" % len(stations))\n return stations", "def get_info(self, response):\n try:\n if re.search('artist/\\d+', response.url) or \\\n re.search('i\\.xiami\\.com/[^/]+$', response.url):\n self.get_artist(response)\n elif re.search('album/\\d+', response.url):\n self.get_albums(response)\n elif re.search('song/\\d+', response.url):\n self.get_songs(response)\n elif 'count/getplaycount' in response.url:\n self.get_count(response)\n else:\n self.get_pages(response)\n except (AttributeError, TypeError):\n return\n request = self.gen_info(response)\n if not request:\n self.save(response.meta['source_id'],\n response.meta['raw_info'],\n response.meta['result'])\n else:\n yield request", "def get_trace(captured_file, stream_num, rids):\n follow_stream = \"tshark -q -r {} -z follow,tcp,raw,{}\".format(captured_file, stream_num)\n stream_content = run_command(follow_stream, True)\n stream_all_res, stream_all_req = get_req_res(stream_content, rids)\n\n return stream_all_res, stream_all_req", "def get_traceroute_output(self):\n url = self.source['url']\n if 'post_data' in self.source:\n context = self.source['post_data']\n else:\n context = None\n status_code, content = self.urlopen(url, context=context)\n content = content.strip()\n regex = r'<pre.*?>(?P<traceroute>.*?)</pre>'\n pattern = re.compile(regex, re.DOTALL | re.IGNORECASE)\n try:\n traceroute = re.findall(pattern, content)[0].strip()\n except IndexError:\n # Manually append closing </pre> for partially downloaded page\n content = \"{0}</pre>\".format(content)\n traceroute = re.findall(pattern, content)[0].strip()\n return (status_code, traceroute)", "def dev_get_all_lineups(self, contest_id):\n\n settings_module_name = os.environ['DJANGO_SETTINGS_MODULE']\n # 'mysite.settings.local' should let this method work\n if 'local' not in settings_module_name:\n raise Exception(\n 'json from dev_get_all_lineups not allowed unless local settings being used')\n\n lineups = []\n\n for e in self.entries:\n\n lineup_id = e.lineup.pk\n player_ids = []\n\n # pack in each player in the lineup, in order of course\n lm = LineupManager(e.user)\n for pid in lm.get_player_ids(e.lineup):\n # player_ids.append( self.starter_map[ pid ] ) # masks out no-yet-started players\n player_ids.append(pid)\n\n lineups.append({\n 'lineup_id': lineup_id,\n 'player_ids': player_ids,\n })\n\n data = {\n 'endpoint': '/contest/all-lineups/%s?json' % int(contest_id),\n 'bytes_for_condensed_response': self.get_size_in_bytes(),\n 'total_lineups': self.contest.entries,\n 'players_per_lineup': self.players_per_lineup,\n 'lineups': lineups,\n }\n return data", "def fetch_propagation_data(observer_stats):\n columns = [\n Observer.TABLE.c.start_time,\n Observer.TABLE.c.duration,\n Observer.TABLE.c.type,\n Observer.TABLE.c.status,\n Observer.TABLE.c.nameserver,\n ]\n query = select(columns).where(\n and_(Observer.TABLE.c.start_time >= observer_stats.start,\n Observer.TABLE.c.start_time <= observer_stats.end)\n )\n result = get_engine().execute(query)\n\n data = {\n 'by_type': {},\n 'by_nameserver': {},\n }\n for row in result:\n start_time, duration, type, status, nameserver = row\n if type not in data['by_type']:\n data['by_type'][type] = {\n 'error': [],\n 'success': [],\n }\n if nameserver not in data['by_nameserver']:\n data['by_nameserver'][nameserver] = {\n 'error': [],\n 'success': [],\n }\n datapoint = (start_time, duration)\n if status == Observer.STATUSES.COMPLETE:\n data['by_type'][type]['success'].append(datapoint)\n data['by_nameserver'][nameserver]['success'].append(datapoint)\n else:\n data['by_type'][type]['error'].append(datapoint)\n data['by_nameserver'][nameserver]['error'].append(datapoint)\n return data", "def _get_ogd_stations():\n return {r[\"Station\"] for r in ZamgData.current_observations()}", "def reload_infos(self):\n self.networks = {}\n networks = self.client.waveform.getNetworkIds()\n # Get stations.\n for key in networks:\n if not key:\n continue\n self.networks[key] = {}\n stations = self.client.waveform.getStationIds(network_id=key)\n for station in stations:\n if not station:\n continue\n self.networks[key][station] = {}\n # Get locations.\n locations = self.client.waveform.getLocationIds(network_id=key,\n station_id=station)\n for location in locations:\n channels = self.client.waveform.getChannelIds(\\\n network_id=key , station_id=station,\n location_id=location)\n self.networks[key][station][location] = [channels]\n # Add current date to Dictionary.\n self.networks['Date'] = UTCDateTime()\n # Also add the server to it.\n self.networks['Server'] = self.client.base_url\n # Open file.\n file = open(self.pickle_file, 'wb')\n pickle.dump(self.networks, file, protocol = 2)\n file.close()", "def all(self, skip_cache=False):\n now = _time_ms(datetime.datetime.utcnow())\n if skip_cache or now - self._last_updated > CACHE_LIMIT:\n self._process_stations()\n return self._stations_lst", "def stations():\n\n station_results = session.query(Stations.station, Stations.name).all()\n\n station_data = []\n for row in station_results:\n station_dict = {}\n station_dict[\"station\"] = row.station\n station_dict[\"name\"] = row.name\n station_data.append(station_dict)\n\n return jsonify(station_data)", "def _send_all_data(self):\n admin_context = qcontext.get_admin_context()\n networks = []\n routers = []\n\n all_networks = super(NeutronRestProxyV2,\n self).get_networks(admin_context) or []\n for net in all_networks:\n mapped_network = self._get_mapped_network_with_subnets(net)\n net_fl_ips = self._get_network_with_floatingips(mapped_network)\n\n ports = []\n net_filter = {'network_id': [net.get('id')]}\n net_ports = super(NeutronRestProxyV2,\n self).get_ports(admin_context,\n filters=net_filter) or []\n for port in net_ports:\n mapped_port = self._map_state_and_status(port)\n mapped_port['attachment'] = {\n 'id': port.get('device_id'),\n 'mac': port.get('mac_address'),\n }\n ports.append(mapped_port)\n net_fl_ips['ports'] = ports\n\n networks.append(net_fl_ips)\n\n all_routers = super(NeutronRestProxyV2,\n self).get_routers(admin_context) or []\n for router in all_routers:\n interfaces = []\n mapped_router = self._map_state_and_status(router)\n router_filter = {\n 'device_owner': [\"network:router_interface\"],\n 'device_id': [router.get('id')]\n }\n router_ports = super(NeutronRestProxyV2,\n self).get_ports(admin_context,\n filters=router_filter) or []\n for port in router_ports:\n net_id = port.get('network_id')\n subnet_id = port['fixed_ips'][0]['subnet_id']\n intf_details = self._get_router_intf_details(admin_context,\n net_id,\n subnet_id)\n interfaces.append(intf_details)\n mapped_router['interfaces'] = interfaces\n\n routers.append(mapped_router)\n\n resource = '/topology'\n data = {\n 'networks': networks,\n 'routers': routers,\n }\n errstr = _(\"Unable to update remote topology: %s\")\n return self.servers.rest_action('PUT', resource, data, errstr)", "def process_events(cat_data, n_run, cfg, sta_locs):\n import time\n import os\n import shutil\n import sys\n import logging\n from obspy import read\n from obspy.geodetics.base import gps2dist_azimuth\n import matplotlib.pyplot as plt\n\n if cfg.output.FORCE_RECALC is True:\n w = open(\"refined_events.dat\", \"w\")\n w.close()\n if cfg.plotting.DO_PLOT_1 is True or cfg.plotting.DO_PLOT_2 is True:\n fig = plt.figure(figsize=(18, 10))\n else:\n fig = []\n # Prepare directory\n if (os.path.exists(\"runs/run{:}\".format(n_run))\n and os.path.isdir(\"runs/run{:}\".format(n_run))):\n shutil.rmtree(\"runs/run{:}\".format(n_run))\n copytree(\"NLLOC_run\", \"runs/run{:}\".format(n_run))\n os.chdir(\"runs/run{:}\".format(n_run))\n for n_ev, ev in enumerate(cat_data):\n start = time.time()\n ev_id = ev.event_descriptions[0].text\n sys.stdout.flush()\n ev_dict = {}\n ev_dict[\"stations\"] = {}\n orig_lat, orig_lon = [ev.origins[0].latitude, ev.origins[0].longitude]\n logging.debug(\"startint logging\")\n st = read(\"../../{:}/{:}/MSEED/*.msd\".format(\n cfg.input.DIR_TO_EVENTDIRS, ev_id), format=\"MSEED\")\n print(n_run, ev_id)\n for n_tr, tr in enumerate(st):\n if st[n_tr].stats.sampling_rate > 40.0:\n try:\n st[n_tr].resample(40)\n except ZeroDivisionError:\n continue\n st1, st2, st_mag = [st.copy(), st.copy(), st.copy()]\n # Append distance to trace\n stations_data = sorted(set([tr.stats.station for tr in st\n if tr.stats.station not in\n cfg.sta_select.STA_BLACKLIST]))\n stations_dist = {sta_code: gps2dist_azimuth(\n sta_locs[sta_code][\"lat\"], sta_locs[sta_code][\"lon\"],\n orig_lat, orig_lon)[0] for sta_code in stations_data\n if gps2dist_azimuth(\n sta_locs[sta_code][\"lat\"], sta_locs[sta_code][\"lon\"],\n orig_lat, orig_lon)[0]/1000 <= cfg.sta_select.MAX_DIST}\n path_to_figs = \"../../{:}/{:}/figs\".format(\n cfg.input.DIR_TO_EVENTDIRS, ev_id)\n if not os.path.exists(path_to_figs):\n os.mkdir(path_to_figs)\n print(\"Doing first refinement\")\n sys.stdout.flush()\n if (\"R\" in cfg.picking.CMPS_REFINE_1[\"S\"] or\n \"T\" in cfg.picking.CMPS_REFINE_1[\"S\"]):\n rot = True\n else:\n rot = False\n evt_refine_1, rms, found = refine_events(\n st1, stations_dist, cfg.picking.CMPS_REFINE_1,\n cfg.picking.MAX_PICK_DIFF_REFINE1, ev,\n cfg.ploting.DO_PLOT_1, 1, fig, \"const\", path_to_figs, ev_dict,\n ev_id, cfg, rot\n )\n if found is False:\n continue\n print(\"RMS = \", rms)\n sys.stdout.flush()\n prev_rms = rms\n print(\"Doing second refinement\")\n sys.stdout.flush()\n if (\"R\" in cfg.picking.CMPS_REFINE_2[\"S\"] or\n \"T\" in cfg.picking.CMPS_REFINE_2[\"S\"]):\n rot = True\n else:\n rot = False\n evt_refine_2, rms, found = refine_events(\n st2, stations_dist, cfg.picking.CMPS_REFINE_2,\n cfg.picking.MAX_PICK_DIFF_REFINE2, evt_refine_1,\n cfg.plotting.DO_PLOT_2, 2, fig, \"dist\", path_to_figs, ev_dict,\n ev_id, rot\n )\n if found is False:\n continue\n print(\"RMS = \", rms)\n if rms > prev_rms * 1.25:\n print(\"RMS is significantly increasing (*25%) - skipping event\")\n continue\n prev_rms = rms\n evt_refine_2 = compute_magnitude(evt_refine_2, st_mag, cfg)\n write_evt(evt_refine_2, ev_id)\n end = time.time()\n print(\"Time taken for event: {:3.1f} mins\".format((end-start)/60))", "def connect(self):\n # Streams can be queried by name, type (xdf file format spec), and\n # other metadata.\n\n # NOTE: According to the documentation this is a blocking call that can\n # only be performed on the main thread in Linux systems. So far testing\n # seems fine when done in a separate multiprocessing.Process.\n eeg_streams = pylsl.resolve_stream('type', 'EEG')\n marker_streams = pylsl.resolve_stream('type', 'Markers')\n\n assert eeg_streams, \"One or more EEG streams must be present\"\n assert marker_streams, \"One or more Marker streams must be present\"\n self._inlet = pylsl.StreamInlet(eeg_streams[0])\n\n self._marker_inlets = [pylsl.StreamInlet(inlet)\n for inlet in marker_streams]\n\n # initialize the current_markers for each marker stream.\n for inlet in self._marker_inlets:\n self.current_markers[inlet_name(inlet)] = Marker.empty()", "def stations():\n # Query \n results = session.query(Station.station).all()\n \n list = []\n for result in results:\n list.append(result)\n return jsonify(list)", "def get_map_traces(map_data, measurements):\n traces = dict()\n traces[\"stations\"] = [dict(\n # TRACE 0: radius selection marker\n name=\"Filter radius\",\n type=\"scattermapbox\",\n fill=\"toself\",\n showlegend=False,\n fillcolor=\"rgba(135, 206, 250, 0.3)\",\n marker=dict(\n color=\"rgba(135, 206, 250, 0.0)\",\n ),\n hoverinfo=\"skip\",\n lat=[],\n lon=[],\n mode=\"lines\"\n )]\n\n for measurement in measurements:\n measurement_map_data = map_data[map_data[\"_measurement\"] == measurement]\n\n # geodataseries as return (lat, lon,...) can cause issues, convert to dataframe:\n measurement_map_data = pd.DataFrame(measurement_map_data)\n\n trace = dict(\n # TRACE 1...N: Datapoints\n _measurement=measurement, # custom entry\n name=helpers.measurementtitles[measurement],\n type=\"scattermapbox\",\n lat=list(measurement_map_data[\"lat\"]),\n lon=list(measurement_map_data[\"lon\"]),\n mode='markers',\n marker=dict(\n size=20,\n color=list(measurement_map_data.apply(lambda x: helpers.trend2color(x[\"trend\"]), axis=1)),\n line=dict(width=2,\n color='DarkSlateGrey'),\n ),\n text=helpers.tooltiptext(measurement_map_data, mode=\"stations\"),\n hoverinfo=\"text\",\n customdata=list(measurement_map_data[\"c_id\"])\n )\n traces[\"stations\"].append(trace)\n\n # Prepare landkreis choropleth maps\n region = \"landkreis\"\n if map_data.empty:\n traces[region] = [go.Choroplethmapbox()]\n return traces\n choropleth_df_original = map_data.copy()\n choropleth_df_original = choropleth_df_original[choropleth_df_original[\"_measurement\"].isin(measurements)]\n\n choropleth_df = choropleth_df_original.copy()\n # if region == \"bundesland\":\n # choropleth_df[\"ags\"] = choropleth_df[\"ags\"].str[:-3]\n # geojson_filename = \"states.json\"\n geojson_filename = \"counties.json\"\n choropleth_df = choropleth_df.groupby([\"ags\", region]).agg([\"mean\", \"size\"]).reset_index()\n with open(f\"utils/geofeatures-ags-germany/{geojson_filename}\", \"r\") as f:\n geojson = json.load(f)\n # noinspection PyTypeChecker\n traces[region] = [go.Choroplethmapbox(\n geojson=geojson,\n locations=choropleth_df[\"ags\"],\n z=choropleth_df[\"trend\"][\"mean\"],\n showlegend=False,\n showscale=False,\n colorscale=[helpers.trend2color(x) for x in np.linspace(-1, 2, 10)],\n hoverinfo=\"text\",\n zmin=-1,\n zmax=2,\n text=helpers.tooltiptext(choropleth_df, mode=region),\n marker_line_color=\"white\",\n marker_opacity=1,\n marker_line_width=1)]\n return traces", "def genLoopPackets(self):\n\n for p in self.get_observations():\n ts = int(time.time() + 0.5)\n packet = pywws2weewx(p, ts,\n self._last_rain_loop, self._last_rain_ts_loop,\n self.max_rain_rate)\n self._last_rain_loop = packet['rainTotal']\n self._last_rain_ts_loop = ts\n if packet['status'] != self._last_status:\n log.info('station status %s (%s)' % \n (decode_status(packet['status']), packet['status']))\n self._last_status = packet['status']\n yield packet", "def stations_dict(self):\n return self.__stations_dict", "def stations():\n # Query all station names from dataset\n station_list = session.query(Measurement.station).distinct().all()\n all_stations = list(np.ravel(station_list))\n\n return jsonify(all_stations)", "def stations ():\n # Query all passengers\n Stns= session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).all()\n\n allStationns = list(np.ravel(Stns))\n\n return jsonify(allStations)", "async def read(self, sensors):\n\n try:\n timeout = aiohttp.ClientTimeout(total=5)\n async with aiohttp.ClientSession(timeout=timeout,\n raise_for_status=True) as session:\n current_url = self.url_info\n async with session.get(current_url) as response:\n data = await response.text()\n\n if self.wifi:\n csv_data = StringIO(data)\n reader = csv.reader(csv_data)\n\n for row in reader:\n self.serialnumber = row.pop(0)\n else:\n xml = ET.fromstring(data)\n\n find = xml.find(\"SN\")\n if find is not None:\n self.serialnumber = find.text\n\n _LOGGER.debug(\"Inverter SN: %s\", self.serialnumber)\n\n current_url = self.url\n async with session.get(current_url) as response:\n data = await response.text()\n at_least_one_enabled = False\n\n if self.wifi:\n csv_data = StringIO(data)\n reader = csv.reader(csv_data)\n ncol = len(next(reader))\n csv_data.seek(0)\n\n values = []\n\n for row in reader:\n for (i, v) in enumerate(row):\n values.append(v)\n\n for sen in sensors:\n if ncol < 24:\n if sen.csv_1_key != -1:\n try:\n v = values[sen.csv_1_key]\n except IndexError:\n v = None\n else:\n v = None\n else:\n if sen.csv_2_key != -1:\n try:\n v = values[sen.csv_2_key]\n except IndexError:\n v = None\n else:\n v = None\n\n if v is not None:\n if sen.name == \"state\":\n sen.value = MAPPER_STATES[v]\n else:\n sen.value = eval(\n \"{0}{1}\".format(v, sen.factor)\n )\n sen.date = date.today()\n sen.enabled = True\n at_least_one_enabled = True\n else:\n xml = ET.fromstring(data)\n\n for sen in sensors:\n find = xml.find(sen.key)\n if find is not None:\n sen.value = find.text\n sen.date = date.today()\n sen.enabled = True\n at_least_one_enabled = True\n\n if not at_least_one_enabled:\n if self.wifi:\n raise csv.Error\n else:\n raise ET.ParseError\n\n if sen.enabled:\n _LOGGER.debug(\"Got new value for sensor %s: %s\",\n sen.name, sen.value)\n\n return True\n except (aiohttp.client_exceptions.ClientConnectorError,\n concurrent.futures._base.TimeoutError):\n # Connection to inverter not possible.\n # This can be \"normal\" - so warning instead of error - as SAJ\n # inverters are powered by DC and thus have no power after the sun\n # has set.\n _LOGGER.warning(\"Connection to SAJ inverter is not possible. \" +\n \"The inverter may be offline due to darkness. \" +\n \"Otherwise check host/ip address.\")\n return False\n except aiohttp.client_exceptions.ClientResponseError as err:\n # 401 Unauthorized: wrong username/password\n if err.status == 401:\n raise UnauthorizedException(err)\n else:\n raise UnexpectedResponseException(err)\n except csv.Error:\n # CSV is not valid\n raise UnexpectedResponseException(\n str.format(\"No valid CSV received from {0} at {1}\", self.host,\n current_url)\n )\n except ET.ParseError:\n # XML is not valid or even no XML at all\n raise UnexpectedResponseException(\n str.format(\"No valid XML received from {0} at {1}\", self.host,\n current_url)\n )", "def update(self):\n try:\n response = requests.get(\n self.API_URL, headers=self.API_HEADERS, timeout=15)\n except requests.exceptions.RequestException:\n self._logger.exception(\"While fetching data from server\")\n return\n\n if response.status_code != 200:\n self._logger.error(\"API call returned with status %s\",\n response.status_code)\n return\n\n content_type = response.headers.get('Content-Type', 'whatever')\n if content_type != 'text/csv':\n self._logger.error(\"Expected text/csv but got %s\", content_type)\n return\n\n response.encoding = 'UTF8'\n content = response.text\n data = (line for line in content.split('\\n'))\n reader = csv.DictReader(data, delimiter=';', quotechar='\"')\n for row in reader:\n if row.get(\"Station\", None) == self._station_id:\n self.data = {\n self.API_FIELDS.get(k)[0]:\n self.API_FIELDS.get(k)[1](v.replace(',', '.'))\n for k, v in row.items()\n if v and k in self.API_FIELDS\n }\n break", "def add_station(self, station):\n self.__stations.append(station)" ]
[ "0.6360263", "0.6021909", "0.59900856", "0.5956896", "0.5865758", "0.56059825", "0.5558574", "0.548664", "0.5460832", "0.54202706", "0.53476536", "0.5343625", "0.5334587", "0.5327473", "0.52983934", "0.52805454", "0.5267272", "0.525761", "0.52528024", "0.52523386", "0.5234326", "0.52224135", "0.5195955", "0.51932204", "0.5172995", "0.51529455", "0.5140023", "0.5137015", "0.51342463", "0.513179", "0.5048484", "0.50292164", "0.50121677", "0.5011149", "0.499143", "0.49863023", "0.49812287", "0.4974494", "0.497208", "0.49369976", "0.49340305", "0.49010858", "0.48991895", "0.48935953", "0.48828834", "0.48806062", "0.48769954", "0.48763964", "0.48689792", "0.484767", "0.4846647", "0.48456106", "0.4843333", "0.48293263", "0.48256487", "0.48130646", "0.48046413", "0.4798809", "0.47969463", "0.47936657", "0.4791942", "0.47886813", "0.4788037", "0.47839746", "0.47818056", "0.4780199", "0.47793815", "0.47704515", "0.4770092", "0.4767843", "0.4765553", "0.47650754", "0.47639844", "0.476382", "0.4762699", "0.47624516", "0.4762349", "0.47572884", "0.4757284", "0.4757022", "0.4745452", "0.47413605", "0.4740423", "0.47372806", "0.47357175", "0.47325456", "0.47249186", "0.4724756", "0.47113806", "0.47113377", "0.47108424", "0.47083157", "0.469987", "0.46976644", "0.4697205", "0.46944606", "0.469227", "0.46919876", "0.46894345", "0.46891236" ]
0.7019285
0
Builds the correct URL. Replaces "query" with "queryauth" if client has authentication information.
def _build_url(self, service, resource_type, parameters={}): # authenticated dataselect queries have different target URL if self.user is not None: if service == "dataselect" and resource_type == "query": resource_type = "queryauth" return build_url(self.base_url, service, self.major_versions[service], resource_type, parameters, service_mappings=self._service_mappings, subpath=self.url_subpath)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_url(self, request, action, **query):\n base = urlparse.urljoin(request['base_url'], self.auth_prefix + '/' + action)\n return appendArgs(base, query)", "def construct_url(self):\n path = [self.path]\n path.extend([str(x) for x in self.params])\n\n url = self.client.base_url + '/'.join(x for x in path if x)\n query = self.kwargs.get('query')\n\n if query:\n # Dict -> List\n if type(query) is dict:\n query = query.items()\n\n # Remove items with `None` value\n query = [\n (k, v) for (k, v) in query\n if v is not None\n ]\n\n # Encode query, append to URL\n url += '?' + urlencode(query)\n\n return url", "def create_query_url(self):\n self.__log('Starting to create the query URL.')\n query_url = self.config['API_URI']\n for key, value in self.options.items():\n if value:\n if query_url == self.config['API_URI']:\n query_url = query_url + str(key) + \"=\" + str(value)\n else:\n query_url = query_url + \"&\" + str(key) + \"=\" + str(value)\n query_url = query_url.replace(' ', '%20')\n self.__log(f'Done creating query url. URL to query: \"{query_url}\"')\n return query_url", "def build_url(self, config, query):\n if(not os.environ['FLICKR_API_KEY']):\n raise ValueError('Environement variable \"FLICKR_API_KEY\" is empty')\n \n current_provider = [provider for provider in config['providers'] if provider['name'] == self.provider_name][0]\n current_provider['query']['text'] = str(query)\n current_provider['query']['api_key'] = os.environ['FLICKR_API_KEY']\n\n query_strings = helper.build_query_strings(current_provider['query'])\n\n return current_provider['base_url'] + query_strings", "def _prepare_url(self):\n\n base_url = '{}://{}{}'.format(\n self.client.protocol, self.client.base_url, self.api_path\n )\n url_parts = '/'.join(\n [part for part in self.parameters[constants.RequestConst.PATH]]\n )\n\n if url_parts:\n final_url = '{}/{}'.format(base_url, url_parts)\n else:\n final_url = base_url\n\n if self.method == constants.RequestConst.GET:\n params = self.parameters[constants.RequestConst.QUERY]\n for param, value in params.items():\n if isinstance(value, list):\n params[param] = ','.join(value)\n elif isinstance(value, dict):\n params[param] = ','.join([f'{k}:{v}' for k, v in value])\n\n url_query = '?' + '&'.join([f'{k}={v}' for k, v in params.items()])\n final_url = '{}{}'.format(final_url, url_query)\n\n self.debug.ok('final url', final_url)\n\n return final_url", "def build_url(self, query):\n\n parts = list(urlparse.urlparse(self.addon_url))\n parts[4] = urllib.urlencode(query)\n\n return urlparse.urlunparse(parts)", "def make_req_url(user, repo, endpoint, limit=50, queries=None):\n url = \"%s%s/%s/%s\" % (API_BASE_URL, user, repo, endpoint)\n\n # Set limit is given and is above 50, set limit to 50\n if limit and limit > 50:\n limit = 50\n url += \"?limit=%d\" % limit\n\n # Add additional query parameters\n if queries:\n for key in queries:\n url += \"&%s=%s\" % (key, queries[key])\n return url", "def build_auth_url(additional_scopes=[], client_id=''):\n user_scopes = ['Read & modify playback.'] + additional_scopes\n scopes = []\n for scope in AUTH_SCOPES_MAPPING:\n if scope['name'] in user_scopes:\n scopes += scope['scopes']\n\n auth_url = (\n 'https://accounts.spotify.com/authorize?client_id={}'\n '&response_type=code&redirect_uri={}&scope={}&state={}'\n .format(\n client_id or CLIENT_ID,\n ul.quote_plus(REDIRECT_URI),\n ul.quote_plus(\" \".join(scopes)),\n uuid1(),\n )\n )\n return auth_url", "def _build_request_url(self, params, kwargs, post=False):\n if post:\n return '%s%s' % (self.endpoint, self.methodname)\n else:\n return '%s%s?%s' % (self.endpoint, self.methodname, kwargs)", "def get_query_url(self):\n url = self.__config.server_url\n url = url + self.__config.paths[self.query_type] + '?'\n logging.debug('The url to query is %s' % url)\n\n params = {\n self.FILTER_KEY: {},\n self.QUERY_FILTER_KEY: {}\n }\n\n # add passed params\n self.__add_query_param(self.query_params, params)\n\n # add default params if they have been passed they will not overwrite previously passed values\n self.__add_query_param(self.__config.default_params, params, False)\n\n # add static params such as wt=json\n self.__add_query_param(self.__config.params, params)\n\n # add the items in filter query to filter params\n url = url + '&' + self.__get_filter_param(params[self.FILTER_KEY])\n url = url + '&' + self.__get_query_filter_param(params[self.QUERY_FILTER_KEY])\n\n del params[self.FILTER_KEY]\n del params[self.QUERY_FILTER_KEY]\n\n url = url + '&' + parse.urlencode(params)\n\n return url", "def get_api_url(self, query_, api):\n api_url = \"%s%s%s\" % (api, query_, self.api_key)\n\n return api_url", "def _generate_url(action, query_params=None):\r\n if query_params:\r\n query_params = urllib.parse.urlencode(query_params)\r\n action = f\"{action}?{query_params}\"\r\n \r\n\r\n url = urllib.parse.urljoin(api_url, action)\r\n\r\n return url", "def __BuildGetUrl(self, baseUrl, userName = \"\", limit = -1, since = -1, offset = -1):\n\n url = \"/\"\n if (userName == self.userName):\n if (since < 1):\n url += baseUrl\n else:\n url += baseUrl+\"/\"+str(since)+\"/since\"\n elif (userName == \"\"):\n if (since < 1):\n url += baseUrl+\"/all\"\n else:\n url += baseUrl+\"/\"+str(since)+\"/all_since\"\n else:\n if (since < 1):\n url += \"users/\"+userName+\"/\"+baseUrl\n else:\n url += \"users/\"+userName+\"/\"+baseUrl+\"/\"+str(since)+\"/since\"\n\n if (limit > 0 and offset == -1):\n url += \"?limit=\"+str(limit)\n elif (offset > 0 and limit == -1):\n url += \"?offset=\"+str(offset)\n elif (limit > 0 and offset > 0):\n url += \"?limit=\"+str(limit)+\"&offset=\"+str(offset)\n\n return url", "def _build_api_request_uri(self, http_method=\"GET\"):\n return self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)", "def _build_api_request_uri(self, http_method=\"GET\"):\n return self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)", "def _url_builder(url_root,api_key,path,params):\n params['api_key'] = api_key\n url_end = urlencode(params)\n url = \"%s%s%s\" % (url_root,path,url_end)\n return url", "def __http_build_url(self, url_path):\n\n return '{}://{}{}'.format(_GOVEE_API_PROTOCOL, _GOVEE_API_HOST, url_path)", "def composeQueryUrl(self, params):\n\t\ttextparams = urllib.urlencode(params)\n\t\treturn self.api_url + \"?\" + textparams", "def __BuildGetUrlRev(self, baseUrl, userName = \"\", limit = -1, since = -1, offset = -1):\n\n url = \"/\"\n if (userName == self.userName):\n if (since < 1):\n url += baseUrl\n else:\n url += baseUrl+\"/since\"+\"/\"+str(since)\n elif (userName == \"\"):\n if (since < 1):\n url += baseUrl+\"/all\"\n else:\n url += baseUrl+\"/\"+str(since)+\"/all_since\"\n else:\n if (since < 1):\n url += \"users/\"+userName+\"/\"+baseUrl\n else:\n url += \"users/\"+userName+\"/\"+baseUrl+\"/\"+\"since/\" + str(since)\n\n if (limit > 0 and offset == -1):\n url += \"?limit=\"+str(limit)\n elif (offset > 0 and limit == -1):\n url += \"?offset=\"+str(offset)\n elif (limit > 0 and offset > 0):\n url += \"?limit=\"+str(limit)+\"&offset=\"+str(offset)\n\n return url", "def __build_url(self, api_call, **kwargs):\n kwargs['key'] = self.api_key\n if 'language' not in kwargs:\n kwargs['language'] = self.language\n if 'format' not in kwargs:\n kwargs['format'] = self.__format\n api_query = urlencode(kwargs)\n\n return \"{0}{1}?{2}\".format(urls.BASE_URL,\n api_call,\n api_query)", "def _build_api_query(self, params, ignore_maxlag, no_assert):\n if not self._base_url or self._script_path is None:\n e = \"Tried to do an API query, but no API URL is known.\"\n raise exceptions.APIError(e)\n\n url = self.url + self._script_path + \"/api.php\"\n params[\"format\"] = \"json\" # This is the only format we understand\n if self._assert_edit and not no_assert:\n # If requested, ensure that we're logged in\n params[\"assert\"] = self._assert_edit\n if self._maxlag and not ignore_maxlag:\n # If requested, don't overload the servers:\n params[\"maxlag\"] = self._maxlag\n if \"csrf\" not in self._tokens:\n # If we don't have a CSRF token, try to fetch one:\n self._request_csrf_token(params)\n return url, params", "def _generate_query_string(self):\n \n query_items = {}\n \n for key, val in self.__dict__.iteritems():\n if not key.startswith('_'):\n query_items[key] = val.encode('utf-8')\n \n return urllib.urlencode(query_items)", "def _override_tourl(self):\n base_url = urlparse.urlparse(self.url)\n try:\n query = base_url.query\n except AttributeError:\n # must be python <2.5\n query = base_url[4]\n query = parse_qs(query)\n for k, v in self.items():\n query.setdefault(k, []).append(v)\n\n try:\n scheme = base_url.scheme\n netloc = base_url.netloc\n path = base_url.path\n params = base_url.params\n fragment = base_url.fragment\n except AttributeError:\n # must be python <2.5\n scheme = base_url[0]\n netloc = base_url[1]\n path = base_url[2]\n params = base_url[3]\n fragment = base_url[5]\n\n url = (scheme, netloc, path, params,\n urllib.urlencode(query, True), fragment)\n return urlparse.urlunparse(url)", "def _generate_query(self, url):\n\n query_str = \"token=%s&mid=%s&murl=%s\" % (self.token, self.merchant_id,\n url)\n\n logging.debug('_generate_query:%s', query_str)\n return query_str", "def GenerateUrl():\n params = {}\n params['client_id'] = Constants.USER['CLIENT_ID']\n params['redirect_uri'] = Constants.AUTH['REDIRECT']\n params['scope'] = Constants.AUTH['SCOPE']\n params['response_type'] = 'code'\n return '%s?%s' % (Constants.OAUTH, FormatUrl(params))", "def create_auth_url():\n state = secrets.token_hex(16)\n nonce = secrets.token_hex(16)\n credentials = {\n 'response_type': 'code',\n 'redirect_uri': REDIRECT_URI,\n 'client_id': CLIENT_ID,\n # Define your app scopes here. If you're building an OIDC app you'll\n # want to stick to the scopes defined in Okta's OIDC spec:\n # https://developer.okta.com/docs/reference/api/oidc/#scopes\n # If you're building an app to interact with your Okta OAuth APIs\n # You'll want to state each API scope you need here.\n 'scope': \"openid okta.groups.read okta.users.read\",\n 'state': state,\n 'nonce': nonce,\n 'code_challenge_method': 'S256',\n 'code_challenge': CODE_CHALLENGE\n }\n url = (AUTHORIZE_URL + \"?\" + urllib.parse.urlencode(credentials))\n return url", "def getUrl(self, query: QueryParamsBase):\n assert isinstance(query, QueryParamsBase), \"query parameter should be an instance of a class that has Query as a base class, such as QueryArticles or QueryEvents\"\n import urllib\n # don't modify original query params\n allParams = query._getQueryParams()\n # make the url\n url = self._host + query._getPath() + \"?\" + urllib.parse.urlencode(allParams, doseq=True)\n return url", "def _build_url(self):\n url = BASE_URL.format(self._host, self._port)\n _LOGGER.debug(\"TOON fetch URL: %s\", url)\n return url", "def rebuild_url(scheme, path, fragment, username,\n password, hostname, port, query):\n netloc = \"@\".join(filter(None, [\n \":\".join(\n filter(None, [\n username,\n password,\n ])\n ),\n \":\".join(\n filter(None, [\n hostname,\n str(port or ''),\n ])\n )\n ]))\n\n return urllib.parse.urlunsplit([\n scheme,\n netloc,\n path,\n query,\n fragment,\n ])", "def _build_request_url(self, params, kwargs, post=False):\n if post:\n return '%s?method=%s&type=%s' % (self.endpoint, self.methodname, params.get('type', 'json'))\n else:\n return '%s?%s' % (self.endpoint, kwargs)", "def _build_uri(self, uri_base, params):\n if not params:\n return uri_base\n else:\n uri_extension = \"?\"\n for param in params:\n uri_extension = uri_extension + param + \"&\"\n uri_extension = uri_extension[:-1] # clip off the final & \n uri = uri_base + uri_extension\n return uri", "def _generate_url(self, endpoint:str, params:Dict[str, str]=None) -> str:\n if params:\n return f\"{self.BASE_URL}/{self._api_version}{endpoint}?{urlencode(params)}\"\n return f\"{self.BASE_URL}/{self._api_version}{endpoint}\"", "def auth_url(self):\n\n return \"{}?client_id={}&redirect_uri={}&scope={}&state={}\".format(AUTH_ENDPOINT, self.client_id,\\\n self.redirect_uri, self.scope, self.state)", "def build_url(main_url, url_params):\n return main_url + \"/\" + \"/\".join(url_params)", "def build_url(self, dict_args_in_out=None):\n if dict_args_in_out is None:\n dict_args_in_out = {}\n\n url = dict_args_in_out.pop('base_url', None) or ''\n url += '/%s' % self.collection_key\n\n # do we have a specific entity?\n entity_id = dict_args_in_out.pop('%s_id' % self.key, None)\n if entity_id is not None:\n url += '/%s' % entity_id\n\n return url", "def create_guardian_search_url(api_key, query, page, from_date, to_date):\n\n # format base url\n url = '%s?page-size=%s&show-fields=%s&q=%s&page=%s&api-key=%s' % (\n GUARDIAN_SEARCH_API, PAGE_SIZE, SHOW_FIELDS, query, page, api_key\n )\n\n # add from-date query, if exists\n if (from_date):\n url += '&from-date=%s' % (from_date)\n\n # add to-date query, if exists\n if (to_date):\n url += '&to-date=%s' % (to_date)\n\n return url", "def construct_url(self,*path):\n base = self.request.protocol+\"://\"+self.request.host+\"/\"\n return base+\"/\".join(path)", "def _build_url(self, host, handler):\n scheme = 'https' if self.use_https else 'http'\n return '%s://%s/%s' % (scheme, host, handler)", "def _url(path, **kwargs):\n if kwargs:\n if isinstance(kwargs.get('owner'), users.User):\n kwargs['owner'] = kwargs['owner'].email()\n encoded_parameters = urllib.urlencode(kwargs)\n if path.endswith('?'):\n # Trailing ? on path. Append parameters to end.\n return '%s%s' % (path, encoded_parameters)\n elif '?' in path:\n # Append additional parameters to existing query parameters.\n return '%s&%s' % (path, encoded_parameters)\n else:\n # Add query parameters to path with no query parameters.\n return '%s?%s' % (path, encoded_parameters)\n else:\n return path", "def _prepare_url(self, paging=False):\n headers = {\"Content-Type\": \"application/json\"}\n fmt = \"%s&access_token=%s\" % (self.URL_FORMAT, self._access_token)\n if not paging:\n self.paging_url = None\n self.url = fmt.format(self.freshest - 2,\n self.current_query,\n self.limit())\n else:\n self.paging_url = \"%s&until=%d\" % (self.url, self.prev_stalest)\n\n return headers", "def _get_url_params(self):\n\n # These should have been established by _logon\n assert self.__apikey\n assert self.__token\n\n return \"api_key=%s&access_token=%s\" % (self.__apikey, self.__token)", "def _construct_url(self, endpoint):\n return self.base_url + self.api_path + endpoint.strip('/')", "def _create_query_string(self, query):\n # Check for a result type, if none found, set it to default.\n result_type = query.result_type\n if not result_type:\n result_type = self.default_result_type\n\n # Check to if the result type is valid\n if result_type not in RESULT_TYPES:\n raise QueryParamException(self.name, \"Engine doesn't support query result type '{0}'\"\n .format(query.result_type))\n\n search_params = {'result_type': result_type,\n 'q': query.terms}\n\n query_append = \"search?q={}&type={}&access_token={}\".format\\\n (search_params['q'], search_params['result_type'], self.api_key)\n\n return API_ENDPOINT + encode_symbols(query_append)", "def _build_url_exact(self, q: str, **kwargs: Dict) -> str:\n url = f\"{self._URL}?where=\"\n if kwargs.get('doi'):\n input_doi = kwargs.get('doi')\n url += f'''{{\"doi\":\"{input_doi}\"}}'''\n return url", "def _make_url(self, path):\n if not self.base_location:\n raise ValueError(\"No base_location set. Cannot construct url.\")\n\n if path:\n path = self._normalise_last_slashes(path)\n path = self._normalise_head_slashes(path)\n\n return \"\".join((self.base_location, self.endpoint, path))", "def build_api_url(project, method, base_url):\n return API_URL_TEMPLATE.format(\n api_base=base_url, api_version=API_VERSION, project=project, method=method\n )", "def build_url(**kwargs):\n base_url = 'https://sfbay.craigslist.org/search/sby/apa?'\n\n query_params = {\n 'hasPic': '1',\n 'bundleDuplicates': '1',\n 'min_price': '1100',\n 'max_price': '1800',\n 'availabilityMode': '0',\n 'sale_date': 'all+dates',\n }\n\n # more query parameters passed, add them to the dict\n if kwargs:\n query_params.update(kwargs)\n\n return base_url + urllib.parse.urlencode(query_params)", "def gen_query_url(self, url, function, format=None, method=None, get_args=None):\n function = self.namespace_map[function]\n return '%s/%s' % (url, function)", "def build_search_url(query):\n google_url = []\n # Build URL to query Google\n google_url.append('https://www.google.com/search?')\n # I'm feeling lucky: go to first result\n google_url.append('btnI=1')\n # Limit results to only this specific website\n google_url.append('&as_sitesearch=docs.aws.amazon.com')\n # Build query\n query = \"aws cloudformation \" + query\n # This line escapes spaces and the like\n query = urllib.quote_plus(query.strip())\n # Attach query to URL\n google_url.append(\"&q=\")\n google_url.append(query)\n return \"\".join(google_url)", "def generate_url(query: QueryData, verb: str, validate: bool = True,\n endpoint: Optional[yarl.URL] = None) -> yarl.URL:\n # NOTE: The yarl.URL object uses the division operator to chain URI\n # components.\n\n # Census endpoint\n default = default_endpoints()[0]\n url = yarl.URL(endpoint or default)\n # Service ID\n if url == default:\n url /= query.service_id\n if validate and endpoint == default and query.service_id == 's:example':\n warnings.warn('The default service ID is heavily rate-limited. '\n 'Consider applying for your own service ID at '\n 'https://census.daybreakgames.com/#devSignup')\n # Query verb\n url /= verb\n # Namespace\n url /= query.namespace\n # Collection\n if (collection := query.collection) is not None:\n url /= collection\n elif validate:\n if query.terms:\n warnings.warn(f'No collection specified, but {len(query.terms)} '\n 'query terms provided')\n elif query.joins:\n warnings.warn(f'No collection specified, but {len(query.joins)} '\n 'joined queries provided')\n # Top-level query terms\n url = url.with_query([t.as_tuple() for t in query.terms])\n # Process query commands\n url = url.update_query(_process_query_commands(query, validate=validate))\n return url", "def build_url(base: str, *segments, **query) -> str:\n\n parsed_base = urlparse(base).geturl()\n\n if not segments:\n path = ''\n else:\n path_segments = []\n for segment in segments:\n # Do not strip leading or trailing `/` from segments\n path_segments.append(quote(segment, safe=''))\n path = '/'.join(path_segments)\n\n if not query:\n queries = ''\n else:\n query_pairs = []\n for key, value in query.items():\n key_value_pair = [quote(key, safe=''), quote(value, safe='')]\n query_pairs.append('='.join(key_value_pair))\n queries = '?' + '&'.join(query_pairs)\n\n path = '/' + path if path else ''\n\n return ''.join([parsed_base, path, queries])", "def build_url(host, port, api_version=None, path=None,\n params=None, use_ssl=False):\n\n pattern = 'v\\d\\.\\d'\n if re.match(pattern, path):\n message = 'Version should not be included in path.'\n raise exceptions.InvalidConfiguration(message=message)\n\n if use_ssl:\n url = \"https://\" + host\n else:\n url = \"http://\" + host\n\n if port is not None:\n url += \":\" + port\n url += \"/\"\n\n if api_version is not None:\n url += api_version + \"/\"\n\n if path is not None:\n url += path\n\n if params is not None:\n url += \"?\"\n url += urllib.urlencode(params)\n\n return url", "def make_url(realm_url, endpoint):\n return \"{}/protocol/openid-connect/{}\".format(realm_url, endpoint)", "def _clean_authorization_request_url(request_url):\n parsed_url = urlparse(request_url)\n query_params = dict(parse_qsl(parsed_url.query, keep_blank_values=True))\n for param in [\"code\", \"state\"]:\n if param in query_params:\n query_params[param] = \"redacted\"\n url_parts = list(parsed_url) # cast to list to override query params\n url_parts[4] = urlencode(query=query_params)\n request_url = urlunparse(url_parts)\n return request_url", "def generate_call_string(self):\n if(self.api_key is None):\n raise error(\"API Key is not defined\");#Should base class do this? \n \n self.call_url=self.baseurl;\n if hasattr(self,'search_str'):\n self.call_url+=self.search_str;\n if hasattr(self,'filter_field_str'):\n self.call_url=self.call_url+'&'+self.filter_field_str;\n \n #loop over the parameters dict\n for key in self.input_params:\n self.call_url+=self.input_params[key];\n \n #finally add api key. at this point already checked it exists\n self.call_url=self.call_url+'&'+\"api-key=\"+str(self.api_key);\n return;", "def _build_url(self, tail_end):\n url = self._doc_class.urlobject.format(self._cb.credentials.org_key) + tail_end\n return url", "def _build_url(self, tail_end):\n url = self._doc_class.urlobject.format(self._cb.credentials.org_key) + tail_end\n return url", "def get_full_url(self, url):\n param_str = self.request.GET.urlencode()\n request_url = u'%s%s' % (self.base_url, url)\n request_url += '?%s' % param_str if param_str else ''\n return request_url", "def make_url(api_key, url, args=None):\n if args is None:\n args = []\n argsep = '&'\n if '?' not in url:\n argsep = '?'\n if '?apiKey=' not in url and '&apiKey=' not in url:\n args.insert(0, ('apiKey', api_key))\n return url + argsep + '&'.join(['='.join(t) for t in args])", "def __build_url(path, api_site_parameter, **params):\n \n query = [\"%s=%s\" % (key, params[key]) for key in params if (params[key] or key == 'pagesize') ]\n query_string = \"&\".join(query)\n url = \"%s/%s/%s?\" % (__api_endpoint, __api_version, path)\n url += query_string\n return url", "def build_url(self, endpoint_url: str) -> str:\n return self.base_url + endpoint_url % self.instance_id", "def test_client_build_url():\n eq_(\"{0}/{1}\".format(client.BASE_URL, \"v1/charges/\"), client.build_url(\"v1/charges/\"))", "def _format_api_url(self, url):\n user_name = self._get_user_name()\n # format and return url\n return url.format(\n user_name = user_name,\n element = urllib.quote(self.qnet_element.encode('utf-8'), safe=''),\n token = self._md5(\"%s:%s:%s\" % (user_name, self.iteration_id, self._secret_key))\n )", "def build_url(self, host, target, params=None):\n return \"https://%s%s\" % (host, self.build_path(target, params))", "def __get_url(cls, url):\n url = url + AdvertCoordinationAdaptor.BASE_URL_QUERY_STRING\n return url", "def get_url(self, **kwargs):\n\n return build(\n self._request.path,\n self._request.GET,\n self._meta.prefix,\n **kwargs )", "def get_url():\n if os.environ['SERVER_PORT'] == '80':\n scheme = 'http://'\n else:\n scheme = 'https://'\n host = os.environ['SERVER_NAME']\n script_name = urllib.quote(os.environ.get('SCRIPT_NAME', ''))\n path_info = urllib.quote(os.environ.get('PATH_INFO', ''))\n qs = os.environ.get('QUERY_STRING', '')\n if qs:\n qs = '?' + qs\n return scheme + host + script_name + path_info + qs", "def __call__(self, request):\n if self.where == \"qs\":\n parts = urlparse(request.url)\n qs = parse_qs(parts.query)\n qs[self.qs_key] = self.token\n request.url = urlunparse(\n (\n parts.scheme,\n parts.netloc,\n parts.path,\n parts.params,\n urlencode(qs),\n parts.fragment,\n )\n )\n elif self.where == \"header\":\n request.headers[\"Authorization\"] = \"Bearer {}\".format(self.token)\n return request", "def _build_endpoint(self, api_path, params=None, query_params=None):\n\n\t\tnew_api_path = api_path\n\n\t\t# Replace all parameters in the new_api_path path, if required\n\t\ttry:\n\t\t\t# Make the parameters values safe for adding to URLs\n\t\t\turl_params = {k: compat.quote(str(v)) if isinstance(v, str)\\\n\t\t\t else v for k, v in iteritems(params)}\n\n\t\t\tlog_with_debug_info(logging.DEBUG, u'URL parameters are: [{0}]'.format(url_params))\n\n\t\t\tqparams = u''\n\t\t\tif query_params:\n\t\t\t\t# Process the URL query parameters\n\t\t\t\tqparams = u'?{0}'.format(compat.urlencode(query_params))\n\t\t\t\tlog_with_debug_info(logging.DEBUG,\n\t\t\t\t u'URL query parameters are: [{0}]'.format(qparams))\n\n\t\t\tnew_api_path = api_path.format(**url_params) + qparams\n\t\texcept KeyError as e:\n\t\t\tmsg = (u'Expecting a value for keyword argument [{0}] for format field '\n\t\t\t u'specification [{1!r}]')\n\t\t\tmsg = msg.format(e, api_path)\n\t\t\tlog_with_debug_info(logging.ERROR, msg)\n\t\t\traise ValueError(msg)\n\t\texcept ValueError as e:\n\t\t\tmsg = (u'One or more values do not match the format field specification '\n\t\t\t u'[{0!r}]; Supplied values: {1!r} ')\n\t\t\tmsg = msg.format(api_path, params)\n\t\t\tlog_with_debug_info(logging.ERROR, msg)\n\t\t\traise ValueError(msg)\n\n\t\tretval = compat.urljoin(self.api_base_url, new_api_path)\n\n\t\tlog_with_debug_info(logging.DEBUG, u'Built end-point to return: {0}'.format(retval))\n\n\t\treturn retval", "def _build_endpoint(self, api_path, params=None, query_params=None):\n\n new_api_path = api_path\n\n # Replace all parameters in the new_api_path path, if required\n try:\n # Make the parameters values safe for adding to URLs\n url_params = {k: compat.quote(str(v)) if isinstance(v, str) else v for k, v in iteritems(params)}\n\n utils.log_with_debug_info(logging.DEBUG, u'URL parameters are: [{0}]'.format(url_params))\n\n qparams = u''\n if query_params:\n # Process the URL query parameters\n qparams = u'?{0}'.format(compat.urlencode(query_params))\n utils.log_with_debug_info(logging.DEBUG, u'URL query parameters are: [{0}]'.format(qparams))\n\n new_api_path = api_path.format(**url_params) + qparams\n except KeyError as e:\n msg = (u'Expecting a value for keyword argument [{0}] for format field '\n u'specification [{1!r}]')\n msg = msg.format(e, api_path)\n utils.log_with_debug_info(logging.ERROR, msg)\n raise ValueError(msg)\n except ValueError as e:\n msg = (u'One or more values do not match the format field specification '\n u'[{0!r}]; Supplied values: {1!r} ')\n msg = msg.format(api_path, params)\n utils.log_with_debug_info(logging.ERROR, msg)\n raise ValueError(msg)\n\n retval = compat.urljoin(self.api_base_url, new_api_path)\n\n utils.log_with_debug_info(logging.DEBUG, u'Built end-point to return: {0}'.format(retval))\n\n return retval", "def build_url(self):\n url = requests.utils.requote_uri(\n self.torrent_page + self.string_search)\n if self.page == '1337x':\n return(url + '/1/')\n elif self.page == 'limetorrents':\n return(url + '/')\n else:\n return(url)", "def _build_request(self, endpoint='', **parameters):\n\n request = {\n 'method': parameters.pop('method', 'GET'),\n 'data': parameters.pop('data', None),\n 'json': parameters.pop('json', None)\n }\n\n # url = {base_url}[/{endpoint}]\n url = '/'.join(filter(None, (self.__class__.base_url, endpoint)))\n\n for index, (key, value) in enumerate(parameters.items()):\n url += '{symbol}{key}={value}'.format(\n symbol='&' if index else '?', key=key, value=value\n )\n\n request['url'] = url\n\n return request", "def genauthurl(redirect=False, scope=False):\n if not scope:\n scope = c.oauth_scope\n\n return (c.sandbox_host if c.sandbox else c.production_host) \\\n + 'oauth/v2/authenticate?client_id=' \\\n + c.client_id \\\n + '&response_type=code&scope=' \\\n + scope \\\n + (('&redirect_uri=' + redirect) if redirect else '')", "def query_options_to_url(self):\n return '&'.join(['$%s=%s' % (key, value) for (key, value) in self.query_options.items()])", "def construct_url(self, local_json: Dict) -> str:\n url_str = \"\"\n\n for arg in self.get_url_args():\n if arg == \"merchantId\":\n url_str = url_str + str(self.merchant_id) + \"/\"\n elif arg == \"signature\":\n url_str = url_str + str(self.get_url_signature(local_json)) + \"/\"\n else:\n url_str = url_str + str(local_json[arg]) + \"/\"\n\n return urljoin(self.get_url(), url_str[:-1])", "def _make_combined_url(base_url, parameters, state):\n url = base_url.rstrip('?')\n url_parts = [url]\n sep_with_ampersand = ('?' in url)\n if parameters:\n query_string = urllib.urlencode(parameters)\n url_parts.extend([('&' if (sep_with_ampersand) else '?'), \n query_string])\n sep_with_ampersand = True\n\n if state:\n url_parts.extend([('&' if (sep_with_ampersand) else '?'), \n 'state=',\n state])\n\n return ''.join(url_parts)", "def build_url(self, endpoint):\n if hasattr(self, \"port\"):\n return \"{}://{}:{}/{}\".format(\n self.scheme, self.root_url, self.port, endpoint)\n else:\n return \"{}://{}/{}\".format(\n self.scheme, self.root_url, endpoint)", "def construct_base_string(method, uri, params, host=None):\n\n # Create base string URI per Section 3.4.1.2\n base_string_uri = normalize_base_string_uri(uri, host)\n\n # Cleanup parameter sources per Section 3.4.1.3.1\n unescaped_params = []\n for k, v in params:\n # The \"oauth_signature\" parameter MUST be excluded from the signature\n if k in ('oauth_signature', 'realm'):\n continue\n\n # ensure oauth params are unescaped\n if k.startswith('oauth_'):\n v = unescape(v)\n unescaped_params.append((k, v))\n\n # Normalize parameters per Section 3.4.1.3.2\n normalized_params = normalize_parameters(unescaped_params)\n\n # construct base string\n return '&'.join([\n escape(method.upper()),\n escape(base_string_uri),\n escape(normalized_params),\n ])", "def build_url(base_url, path):\n if absolute_http_url_regexp.match(path):\n return path\n elif base_url:\n return \"{}/{}\".format(base_url.rstrip(\"/\"), path.lstrip(\"/\"))\n else:\n raise exceptions.ParamsError(\"base url missed!\")", "def format_url(endpoint, cmd):\n url = base_url + endpoint + cmd + '&key=' + bart_api_key + json\n return url", "def createAuthRequestURL(self, state=None, **kwargs):\n # Fill arguments for preperation URL\n kwargs['response_type'] = 'code'\n kwargs['state'] = state or self.createState()\n kwargs['client_id'] = self.parameters['client_id']\n kwargs['redirect_uri'] = kwargs.get('redirect_uri') or self.parameters['redirect_uri']\n kwargs['scope'] = kwargs.get('scope') or self.parameters['scope'] or self.parameters['scopes_supported']\n if self.parameters['prompt']:\n kwargs['prompt'] = self.parameters['prompt']\n \n # Add IdP authorization endpoint\n self.log.info(kwargs['state'], 'session, generate URL for authetication.')\n url = (kwargs.get('authorization_endpoint') or self.parameters['authorization_endpoint']) + '?access_type=offline'\n if not url:\n return S_ERROR('No found authorization endpoint.')\n\n # Add arguments\n for key, value in kwargs.items():\n url += '&%s=%s' % (key, '+'.join(list(set(v.strip() for v in value))) if isinstance(value, list) else value)\n return S_OK({'URL': url, 'Session': kwargs['state']})", "def build_url(base_url, service, major_version, resource_type,\n parameters=None, service_mappings=None, subpath='fdsnws'):\n # Avoid mutable kwargs.\n if parameters is None:\n parameters = {}\n if service_mappings is None:\n service_mappings = {}\n\n # Only allow certain resource types.\n if service not in [\"dataselect\", \"station\"]:\n msg = \"Resource type '%s' not allowed. Allowed types: \\n%s\" % \\\n (service, \",\".join((\"dataselect\", \"station\")))\n raise ValueError(msg)\n\n # Special location handling.\n if \"location\" in parameters:\n loc = parameters[\"location\"].replace(\" \", \"\")\n # Empty location.\n if not loc:\n loc = \"--\"\n # Empty location at start of list.\n if loc.startswith(','):\n loc = \"--\" + loc\n # Empty location at end of list.\n if loc.endswith(','):\n loc += \"--\"\n # Empty location in middle of list.\n loc = loc.replace(\",,\", \",--,\")\n parameters[\"location\"] = loc\n\n # Apply per-service mappings if any.\n if service in service_mappings:\n url = \"/\".join((service_mappings[service], resource_type))\n else:\n if subpath is None:\n parts = (base_url, service, str(major_version),\n resource_type)\n else:\n parts = (base_url, subpath.lstrip('/'), service,\n str(major_version), resource_type)\n url = \"/\".join(parts)\n\n if parameters:\n # Strip parameters.\n for key, value in parameters.items():\n try:\n parameters[key] = value.strip()\n except Exception:\n pass\n url = \"?\".join((url, urlencode(parameters, safe=':,*')))\n \n return url", "def _endpoint(\n self,\n *,\n path: str = \"\",\n query: dict[str, str | None] | None = None,\n ) -> str:\n endpoint = f\"https://api.cloudflare.com/client/v4/zones/{path}\"\n if query is None:\n return endpoint\n return f\"{endpoint}?{'&'.join(f'{k}={v}' for k, v in query.items() if v is not None)}\"", "def generate_auth_url(self, OAUTH_SETTINGS, consumer_key, consumer_secret, domain, callback_url):\n\n\t\tconsumer = oauth2.Consumer(consumer_key, consumer_secret)\n\t\tclient = oauth2.Client(consumer)\n\n\t\treq = oauth2.Request(method=\"GET\", url=OAUTH_SETTINGS['request_token_url'], \\\n\t\t\tparameters=dict({\"oauth_callback\": \"http://localhost:8080\"}, **OAUTH_SETTINGS['auth_params']))\n\t\tsignature_method = oauth2.SignatureMethod_HMAC_SHA1()\n\t\t#req.sign_request(signature_method, consumer, None)\n\t\tresp, content = client.request(req.to_url(), \"GET\")\n\t\tif resp['status'] != '200':\n\t\t\traise Exception(\"Invalid response %s.\" % resp['status'])\n\n\t\tquery = urlparse.parse_qs(content)\n\t\tauth_url = \"%s?oauth_token=%s&&domain=%s\" % (OAUTH_SETTINGS['authorize_url'],\n\t\t\tquery['oauth_token'][0],\n\t\t\tdomain)\n\t\treturn auth_url, query['oauth_token'][0], query['oauth_token_secret'][0]", "def create_url(self):\n\n # Format the template strings with the user credentials and host\n # information provided upon instantiation.\n url = self.sql_url_template\n url = url.format(\n username=self.sql_username,\n password=self.sql_password,\n host=self.sql_host,\n port=self.sql_port,\n db=self.sql_db\n )\n\n return url", "def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])", "def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])", "def _update_request_uri_query(self, request):\n if \"?\" in request.path:\n request.path, _, query_string = request.path.partition(\"?\")\n if query_string:\n query_params = query_string.split(\"&\")\n for query in query_params:\n if \"=\" in query:\n name, _, value = query.partition(\"=\")\n request.query.append((name, value))\n\n request.path = url_quote(request.path, \"/()$=',\")\n\n # add encoded queries to request.path.\n if request.query:\n request.path += \"?\"\n for name, value in request.query:\n if value is not None:\n request.path += \"{}={}{}\".format(name, url_quote(value, \"/()$=',\"), \"&\")\n request.path = request.path[:-1]\n\n return request.path, request.query", "def get_query_string(self):\r\n pass", "def auth_url(self,code_challenge=None):\n\t\tself.unique_state = base64.urlsafe_b64encode(secrets.token_bytes(8)).decode().replace(\"=\", \"\")\n\t\tparams = {\n\t\t\t\"response_type\": \"code\",\n\t\t\t\"redirect_uri\": self.settings['client_callback_url'],\n\t\t\t\"client_id\": self.settings['client_id'],\n\t\t\t\"scope\": self.settings['scopes'],\n\t\t\t\"state\": self.unique_state\n\t\t}\n\n\t\tif code_challenge:\n\t\t\tparams.update({\n\t\t\t\t\"code_challenge\": code_challenge,\n\t\t\t\t\"code_challenge_method\": \"S256\"\n\t\t\t})\n\n\t\tstring_params = urllib.parse.urlencode(params)\n\t\tfull_auth_url = \"{}?{}\".format(self.settings['base_auth_url'], string_params)\n\t\tself.full_auth_url = full_auth_url\n\t\treturn full_auth_url", "def construct_api_url(self, method_name, params):\n since = params.get('since', None)\n if since:\n since = datetime.date(*time.strptime(since, \"%Y-%m-%d\")[0:3])\n today = datetime.date.today()\n delta = today - since\n if delta.days > 35:\n diff = datetime.timedelta(days=36)\n params['since'] = today - diff\n api_method = \"/\".join(method_name.split('__'))\n params['access_token'] = self.access_token.token\n url = \"%s/%s/%s?%s\" % (self.api_url,\n self.page.page_id,\n api_method,\n urllib.urlencode(params))\n return url", "def base_url(self):\n return 'http://%s/api.php?token=%s&path_info=' % \\\n (self.ac_url, self.api_key)", "def to_url(request):\r\n scheme, netloc, path, query, fragment = urlsplit(to_utf8(request.url))\r\n query = parse_qs(query)\r\n\r\n for key, value in request.data_and_params.iteritems():\r\n query.setdefault(key, []).append(value)\r\n\r\n query = urllib.urlencode(query, True)\r\n return urlunsplit((scheme, netloc, path, query, fragment))", "def __redirect_uri(self):\n uri = '%s://%s%s' % (request.scheme, request.hostname,\n request.path_info)\n if request.get_vars:\n uri += '?' + urlencode(request.get_vars)\n return uri", "def url(self, suffix: str, *, version: Optional[str] = None,\n query: Union[None, dict, MultiValueDict] = None,\n **kwargs: Any) -> str:\n name = self.url_name_format.format(\n version=version,\n app_label=self.app_label,\n basename=self.basename,\n suffix=suffix,\n )\n url = reverse(name, kwargs=kwargs)\n\n # support multiple values for same param: \"id=1&id=2&id=3\"\n if isinstance(query, MultiValueDict):\n params = []\n for key, value in query.lists():\n if isinstance(value, list):\n params.append('&'.join([f'{key}={x}' for x in value]))\n else:\n params.append(f'{key}={value}')\n\n url += f\"?{'&'.join(params)}\"\n elif query:\n # simple dict query params\n url += '?%s' % urlencode(query)\n return url", "def _get_url(self, category):\n query = []\n for key,value in self._params.iteritems():\n query.append(\"{key}={value}\".format(key=key,value=value))\n return \"{base}/{category}?{query}\".format(base = self._base_url, category = category, query = \"&\".join(query))", "def command_url(self):\n url = self.base_url + self.command\n\n if self.item_id:\n # A particular project/person/comany etc id\n url += '/' + str(self.item_id)\n if self.subcommand:\n # This is used to get tickets or milestones for example\n url += '/' + self.subcommand\n if self.sub_id:\n url += '/' + self.sub_id\n\n if self.params:\n # Extra parameters via a dict which may be passed\n # outside of our base command url\n return '%s&%s' % (url, self.params)\n else:\n return url", "def _create_query_str(data):\n params = []\n for name, value in data.items():\n params.append(name + '=' + str(value))\n\n return '?' + '&'.join(params)", "def get_upstream_url_with_query(self, *, scope: Scope) -> str:\n # The default implementation simply appends the original URL's query string to the\n # upstream URL generated by `get_upstream_url`.\n url = self.get_upstream_url(scope=scope)\n query_string = scope.get(\"query_string\")\n if query_string:\n sep = \"&\" if \"?\" in url else \"?\"\n url += \"{}{}\".format(sep, query_string.decode(\"utf-8\"))\n return url", "def query_url(target):\n query = \"info:\"+target\n params = urllib.urlencode({\n \"client\": \"navclient-auto\",\n \"ch\": \"6%s\" % checksum(query),\n \"ie\": \"UTF-8\",\n \"oe\": \"UTF-8\",\n \"features\": \"Rank\",\n \"q\": query,\n })\n return \"http://%s/search?%s\" % (HOST, params)" ]
[ "0.7369034", "0.7334827", "0.72362584", "0.7071104", "0.7013484", "0.6760965", "0.65050304", "0.6447025", "0.6425153", "0.63879895", "0.6378408", "0.6330067", "0.63281715", "0.62623614", "0.62623614", "0.62423044", "0.62401664", "0.62292403", "0.6221821", "0.61912405", "0.6155979", "0.61519265", "0.6121074", "0.6120364", "0.611641", "0.6085016", "0.60798705", "0.6053425", "0.60519254", "0.6051128", "0.60322094", "0.6007801", "0.6004259", "0.59998196", "0.5981944", "0.59583837", "0.5947006", "0.5941505", "0.5939452", "0.5907101", "0.59019876", "0.5894293", "0.5891262", "0.5870856", "0.58706206", "0.5869617", "0.5862674", "0.5841952", "0.5841337", "0.5835177", "0.5825585", "0.5821332", "0.5813479", "0.5813378", "0.5797278", "0.57673836", "0.57673836", "0.57621574", "0.5755053", "0.5752493", "0.5742954", "0.5741471", "0.5738859", "0.57314044", "0.5689742", "0.56868255", "0.5686546", "0.5685573", "0.56771505", "0.5669918", "0.56685334", "0.56631005", "0.5639741", "0.56205034", "0.56197494", "0.5613445", "0.5611131", "0.56023353", "0.55719876", "0.5569237", "0.5567541", "0.5565315", "0.5563018", "0.5548043", "0.55425143", "0.5532621", "0.5532621", "0.55291384", "0.5521103", "0.5511801", "0.55092204", "0.55082303", "0.55004525", "0.5497138", "0.54813784", "0.54784006", "0.547681", "0.5470749", "0.54608303", "0.54572475" ]
0.660301
6
Get full version information of webservice (as a tuple of ints). This method is cached and will only be called once for each service per client object.
def get_webservice_version(self, service): if service is not None and service not in self.services: msg = "Service '%s' not available for current client." % service raise ValueError(msg) if service not in FDSNWS: msg = "Service '%s is not a valid FDSN web service." % service raise ValueError(msg) # Access cache. if service in self.__version_cache: return self.__version_cache[service] url = self._build_url(service, "version") version = self._download(url, return_string=True) version = list(map(int, version.split(b"."))) # Store in cache. self.__version_cache[service] = version return version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()", "def get_version(self):\r\n if not self.endpoint_checker(self.endpointurl):\r\n raise Exception(\"Please use a valid ESRI REST url\")\r\n\r\n parsedurl = urlparse(self.endpointurl)\r\n print(f\"{parsedurl.scheme}://{parsedurl.netloc}/arcgis/rest/services/?f=pjson\")\r\n req = requests.get(\r\n f\"{parsedurl.scheme}://{parsedurl.netloc}/arcgis/rest/services/?f=pjson\"\r\n )\r\n\r\n if req.status_code == 200:\r\n try:\r\n return req.json()[\"currentVersion\"]\r\n except KeyError:\r\n try:\r\n req = requests.get(\r\n self.endpointurl.split(\"services/\")[0] + \"services/?f=pjson\"\r\n )\r\n return req.json()[\"currentVersion\"]\r\n except Exception as e:\r\n raise e\r\n raise Exception(\r\n f\"An Error occurred retrieving vital information, the response status {str(req.status_code)} associate with {req.json()['error']['message']}\"\r\n )", "def get_version(client):\n version = client.info()['version']['number']\n version = version.split('-')[0]\n if len(version.split('.')) > 3:\n version = version.split('.')[:-1]\n else:\n version = version.split('.')\n return tuple(map(int, version))", "def version_info(self):\n if self._api_version is None:\n self.query_api_version()\n return self._api_version['api-major-version'],\\\n self._api_version['api-minor-version']", "def get_version(self):\n return self.__make_api_call('get/version')", "def rpc_version(self):\n\t\tvinfo = {'version': version.version, 'version_info': version.version_info._asdict()}\n\t\tvinfo['rpc_api_version'] = version.rpc_api_version\n\t\treturn vinfo", "def info(self):\n version_str = self.version\n return Utils.version_str2tuple(version_str)", "def ClientVersion(self):\n return (self.VERSION_MAJOR, self.VERSION_MINOR)", "def _get_webservice_versionstring(self, service):\n version = self.get_webservice_version(service)\n return \".\".join(map(str, version))", "def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)", "def version_info():\r\n return tuple(map(int, __version__.split('.')))", "def _get_version(self):", "def get_version(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.version)", "def get_version(self):\n res = requests.get(self.base_url + '/version')\n\n return res", "def get_versions(self, async = False):\n\n\t\tself._send_message(\"VERSION\", \"\\x00\")\n\n\t\tif not async:\n\t\t\treturn EndpointSync(self, \"VERSION\").get_data()", "def versions(self) -> Dict[str, str]:\n self.__logger.debug('Eva.versions called')\n return self.__http_client.api_versions()", "def get_version_info(self):\n sys_info_service = self.robot.all_services.get(\"sys_info\")\n if sys_info_service is not None:\n log.info(\"System version info: %s\" % sys_info_service.system_version)\n else:\n log.warning(\"Service get_version_info is not enabled!\")", "def get_version(self):\n\t\treturn call_sdk_function('PrlApi_GetVersion')", "def get_version(self):\n pass", "def get_version_info(self):\n return self._jadeRpc('get_version_info')", "def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)", "def get_api_version(self):\n return self.connector.request('GET', '/app/webapiVersion')", "def get_version_info() -> Tuple[Text, Text]:", "def get_version(self):\n return self.api_version", "def version(self):\n return self.rpc.call(MsfRpcMethod.CoreVersion)", "def get_version(self):\n\n r = self._create_operation_request(self, method=\"GET\")\n root_info = send_session_request(self._session, r).json()\n return root_info[\"currentVersion\"]", "def version_get():\n try:\n return json_response.success({'version': version.local_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def version(self):\n return self._client.getVersion()", "def version(self) -> Dict[str, str]:\n return self.get_version()", "async def version(self) -> dict:\n if not self.http_session:\n raise RuntimeError('Client has been disconnected')\n\n version_url = f'http://{self.host}:{self.port:d}/json/version'\n\n logger.debug('GET %s', version_url)\n resp = await self.http_session.get(version_url)\n resp.raise_for_status()\n\n return await resp.json()", "def version_info(self):\n\n return __version_info__", "def get(self):\n return {'version': get_version()}", "def get_api_version(self):\n from webapi import VERSION\n return '.'.join(map(str, VERSION))", "def version(self):\n\n data = {\"action\" : \"version\"}\n return rpc_request(self.uri, data)", "def get_versions(self):\n raise NotImplementedError", "def get_client_version(self):\n return self.__aceQLHttpApi.get_client_version()", "def get_product_version(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetProductVersion', self.handle)", "def _get_service_version(service):\n\n return int(service.split(':')[4])", "def get_version_number():\n return [0, 1, 0]", "def _get_client_info():\n client = get_distribution('reportportal-client')\n return client.project_name, client.version", "def version(self):\n version = self.get_rpc().getnetworkinfo()[\"subversion\"]\n version = version.replace(\"/\", \"\").replace(\"Satoshi:\", \"v\")\n return version", "def version(self):\n r = requests.get(\"http://%s/api/version\" %(self.url), headers=self.headers)\n if r.status_code == 200:\n return True, r.content\n else:\n return False, {}", "def version(self):\n return tuple(int(x) for x in self.tag.split('.'))", "def get_server_version(self):\n return self.client.getServerVersion().decode('utf-8')\n return self.client.getServerVersion().decode('utf-8')", "def get_version(self):\n url = '{}/v2/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def get_uni_version(self):\n version, major_version = None, None\n response = self.get_resource(category=VERSION, no_version=True)\n if response and response.get('version'):\n version = response['version']\n version_list = version.split('.')\n major_version = version_list[0][1:] + version_list[1]\n return version, major_version", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def get_server_version():\n url_address = 'https://raw.githubusercontent.com/muhammadfredo/FrMaya/master/FrMaya/version.py'\n url_data = urllib2.urlopen(url_address).read(200)\n result = re.search(r'(\\d+), (\\d+), (\\d+)', url_data, re.MULTILINE)\n if result:\n version_list = [int(v) for v in result.groups()]\n return version_list\n else:\n raise ValueError('Cannot get server version!!!')", "def api(self):\n res = self.client.call('/', 'GET', data='')\n self.logger.debug('Get openstack identity api versions: %s' % truncate(res))\n return res[0]", "def get_server_info(self):\n raise NotImplementedError('Database.get_version()')", "def getVersionInfo(cls):\n\n return __version__ + \"\\n\"", "def get_version(self):\n return 0", "def get_version_info():\n from docplex.cp.model import CpoModel\n try:\n with CpoSolver(CpoModel()) as slvr:\n return slvr.agent.version_info\n except:\n if config.context.log_exceptions:\n traceback.print_exc()\n pass\n return {}", "def get_version(self):\n return self.version", "def get_versions():\n version_py = os.path.join(os.path.split(__file__)[0], \"src/osmium/version.py\")\n v = {}\n with open(version_py) as version_file:\n # Execute the code in version.py.\n exec(compile(version_file.read(), version_py, 'exec'), v)\n\n return v['pyosmium_release'], v['libosmium_version'], v['protozero_version']", "def get(self):\n return self._version", "def version(self):\n return self._get(\"version\")", "def getVersion(self):\n return _libsbml.SBase_getVersion(self)", "def test_GetVersion(self):\n ret = wrap_xmlrpc_call(\n self.am_client.GetVersion, [], {}, settings.TIMEOUT)\n self.assertEqual(ret['geni_api'], 1)", "def getversion(self):\n return self.__version", "def client_version(self) -> str:\n return pulumi.get(self, \"client_version\")", "def get_version():\n return about.get_version()", "def get_api_version(self):\n major, minor, patch = self.client.config['api_version']\n return '%s.%s.%s' % (major, minor, patch)", "def fastlyversion(args):\n pprint(api.version(service_id, args[0]).attrs)", "def get_server_version(self):\n return self.__aceQLHttpApi.get_server_version()", "def get_version(self):\n data = self._get('app_version')\n return data['version']", "def version(self):\n info = json.loads(self.get_info())\n return FapiInfo(info).version", "def getVersion(self):\n return self.get('Version', type=\"numeric\")", "def get_component_versions(session):\n # type: (Session) -> Dict[str, Any]\n return _get_dict(session, \"/version\")", "def version(self):\n response = self._request_call('/version')\n return response.version_etcdserver", "def version(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALBattery\")\n return self.proxy.version()", "def get_version(self):\n return self._version", "def get_version(self):\n return self._version", "def version(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALExpressiveListening\")\n return self.proxy.version()", "def get_version(self, params):\n return self.version", "def version(self):\n return self._call_txtrader_api('version', {})", "def version(self) -> 'outputs.VersionResponse':\n return pulumi.get(self, \"version\")", "def get_version():\n return '%d.%d.%d' % version_info", "def get_version():\n return 1", "def get_versions():\n ret_obj = {'versions': picard_versions(current_app)}\n return make_response(jsonify(ret_obj), 200)", "def getVersion(self, *args):\n return _libsbml.MultiExtension_getVersion(self, *args)", "def api_version(self):\n\n\t\treturn self._api_version", "def version():\n version_info = pbr.version.VersionInfo('ardana-service')\n return version_info.version_string_with_vcs()", "def get_product_version(self):\n\t\treturn call_sdk_function('PrlLoginResponse_GetProductVersion', self.handle)", "def query_api_version(self):\n version_resp = self._session.get('/api/version',\n logon_required=False)\n self._api_version = version_resp\n return self._api_version", "def getVersion(self):\n return _libsbml.SBasePlugin_getVersion(self)", "def _version_info(module):\n try:\n version = module.__version__\n except AttributeError:\n version = str(module)\n\n def cast_as_int(value):\n try:\n return int(value)\n except ValueError:\n return value\n\n return tuple(cast_as_int(x) for x in re.split('[.+]', version))", "def get_version(self):\n for retry in range(0, self.__RETRY_MAX_NUM):\n self._send_command(self.__COMMAND['get version'])\n\n size = int.from_bytes(self._port_handle.read(1), 'little')\n\n version = self._port_handle.read(1)\n commands = self._port_handle.read(size)\n if self._is_acknowledged():\n return version, commands\n else:\n raise DfuException('Get version failed after {} '\n 'retries.'.format(retry + 1))", "def get_version(self):\n return self.bot_data_file[\"version\"]", "def list_versions(self, service_id):\n return [self.fastly_cache[service_id]['service_details']]", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['orionld version']\n except Exception as e:\n pass\n return ''", "async def version(self) -> str:\n response = await self._request(\"status\")\n return response[\"version\"]", "def version (self, type='other'):\n return self._response.version (type)", "def version(self):", "def _get_version(self):\n if _cbc_version is None:\n return _extract_version('')\n return _cbc_version", "def api_versions(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_versions\")", "def api_versions(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_versions\")", "def api_versions(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_versions\")", "def api_versions(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_versions\")", "def list_versions(self):\n if not USE_GCLOUD:\n return self.run_appcfg(['list_versions'])\n data = self.run_gcloud(['app', 'versions', 'list'])\n per_module = collections.defaultdict(list)\n for deployment in data:\n service = deployment['service'].encode('utf-8')\n version_id = deployment['id'].encode('utf-8')\n per_module[service].append(version_id)\n return dict(per_module)" ]
[ "0.6870342", "0.6858191", "0.67632425", "0.6721903", "0.66590106", "0.66541", "0.6623657", "0.65979606", "0.6570758", "0.65674907", "0.6525932", "0.6500156", "0.6479931", "0.6434038", "0.64299476", "0.64263636", "0.6411924", "0.64047104", "0.6403968", "0.6381451", "0.6369189", "0.6368622", "0.63448393", "0.6330122", "0.6307429", "0.62868714", "0.62491566", "0.6249098", "0.6234915", "0.62322116", "0.6232206", "0.62319773", "0.62237", "0.62230474", "0.62210727", "0.6212873", "0.61993456", "0.61712164", "0.6169144", "0.6155161", "0.6149212", "0.6121497", "0.60918313", "0.6083672", "0.6073378", "0.60379326", "0.6025878", "0.5999101", "0.5985154", "0.5976694", "0.59747195", "0.5970533", "0.595738", "0.59556854", "0.5955274", "0.5952793", "0.5946575", "0.5944886", "0.59423715", "0.59390104", "0.59382373", "0.59328365", "0.5930319", "0.59296656", "0.59254825", "0.59181195", "0.5916518", "0.5908288", "0.58922875", "0.5881458", "0.5879044", "0.58789366", "0.58789366", "0.587835", "0.58733106", "0.58701694", "0.58653986", "0.5858756", "0.58545023", "0.58507586", "0.58484566", "0.5841752", "0.5835161", "0.581125", "0.58070093", "0.5806798", "0.5785821", "0.5776696", "0.5772993", "0.5769088", "0.5766193", "0.5755611", "0.57551825", "0.5750117", "0.57459646", "0.57402533", "0.57402533", "0.57402533", "0.57402533", "0.57297087" ]
0.67635494
2