blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7ae4fc95910cd188fb054270ab173da3971e5b01 | 3d36b8608ad38c0c913d8fcff55b8715fbe83cbd | /rsd_lib/resources/v2_3/storage_service/storage_pool.py | 4412ac37b740c9fc63284296eff8624e985f1403 | [
"Apache-2.0"
] | permissive | linericyang/rsd-lib | b6d728a68ff203b261e8da98a46b4c21c0059abd | 898d2be08a1efcbdf72b0ba267f283f1386779bf | refs/heads/master | 2020-04-09T15:57:37.596369 | 2018-12-04T09:11:33 | 2018-12-04T09:11:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,913 | py | # Copyright 2018 Intel, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from sushy.resources import base
from sushy import utils
from rsd_lib.resources.v2_3.storage_service import volume
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class StatusField(base.CompositeField):
state = base.Field('State')
health = base.Field('Health')
health_rollup = base.Field('HealthRollup')
class CapacityField(base.CompositeField):
allocated_bytes = base.Field(['Data', 'AllocatedBytes'],
adapter=rsd_lib_utils.int_or_none)
consumed_bytes = base.Field(['Data', 'ConsumedBytes'],
adapter=rsd_lib_utils.int_or_none)
guaranteed_bytes = base.Field(['Data', 'GuaranteedBytes'],
adapter=rsd_lib_utils.int_or_none)
provisioned_bytes = base.Field(['Data', 'ProvisionedBytes'],
adapter=rsd_lib_utils.int_or_none)
class CapacitySourcesField(base.ListField):
providing_drives = base.Field('ProvidingDrives', default=(),
adapter=utils.get_members_identities)
provided_capacity = CapacityField('ProvidedCapacity')
class IdentifierField(base.CompositeField):
durable_name = base.Field('DurableName')
durable_name_format = base.Field('DurableNameFormat')
class StoragePool(base.ResourceBase):
identity = base.Field('Id', required=True)
"""The storage pool identity string"""
description = base.Field('Description')
"""The storage pool description string"""
name = base.Field('Name')
"""The storage pool name string"""
status = StatusField('Status')
"""The storage pool status"""
capacity = CapacityField('Capacity')
"""The storage pool capacity info"""
capacity_sources = CapacitySourcesField('CapacitySources')
"""The storage pool capacity source info"""
identifier = IdentifierField('Identifier')
"""These identifiers list of this volume"""
def __init__(self, connector, identity, redfish_version=None):
"""A class representing a LogicalDrive
:param connector: A Connector instance
:param identity: The identity of the LogicalDrive resource
:param redfish_version: The version of RedFish. Used to construct
the object according to schema of the given version.
"""
super(StoragePool, self).__init__(connector, identity, redfish_version)
def _get_allocated_volumes_path(self):
"""Helper function to find the AllocatedVolumes path"""
return utils.get_sub_resource_path_by(self, 'AllocatedVolumes')
@property
@utils.cache_it
def allocated_volumes(self):
"""Property to provide reference to `AllocatedVolumes` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return volume.VolumeCollection(
self._conn, self._get_allocated_volumes_path(),
redfish_version=self.redfish_version)
def _get_allocated_pools_path(self):
"""Helper function to find the AllocatedPools path"""
return utils.get_sub_resource_path_by(self, 'AllocatedPools')
@property
@utils.cache_it
def allocated_pools(self):
"""Property to provide reference to `AllocatedPools` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return StoragePoolCollection(
self._conn, self._get_allocated_pools_path(),
redfish_version=self.redfish_version)
class StoragePoolCollection(base.ResourceCollectionBase):
@property
def _resource_type(self):
return StoragePool
def __init__(self, connector, path, redfish_version=None):
"""A class representing a StoragePoolCollection
:param connector: A Connector instance
:param path: The canonical path to the StoragePool collection resource
:param redfish_version: The version of RedFish. Used to construct
the object according to schema of the given version.
"""
super(StoragePoolCollection, self).__init__(connector, path,
redfish_version)
| [
"lin.a.yang@intel.com"
] | lin.a.yang@intel.com |
9d32ed2f9dd6961c4e4fca230d3d5833e4ae41b2 | b50bb3f3094adc3e08cfa6193d11668f921d9057 | /cinema/models/__init__.py | 5a5e004aa7ec9f16fa6345b3edd66ba467eaf23b | [
"BSD-3-Clause"
] | permissive | juliotrigo/restfulwebapi | d5b35956dea067b54185f836ff3e0208117d9387 | 1d97c1f23320e158f264dc265f8fb554debbc9c4 | refs/heads/master | 2016-09-06T08:07:38.552442 | 2014-02-16T15:39:43 | 2014-02-16T15:39:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | # -*- coding: utf-8 -*-
"""cinema models."""
from __future__ import unicode_literals
from cinema.models.film import Film
from cinema.models.film import OrderedFilm
from cinema.models.director import Director
from cinema.models.director import FilmDirector
__all__ = ['Film', 'OrderedFilm', 'Director', 'FilmDirector']
| [
"juliotrigocom@gmail.com"
] | juliotrigocom@gmail.com |
321d2502cf3b66216f826901a60edb84c2149516 | b1a1f2fac4160c108f3311638bf5bec1455c9411 | /barco.py | 73fca0953d181f9d4384351cc5a1b5c4a1dceab9 | [] | no_license | grajales100/ejercicio-medios-de-transporte | f4540374db072529d67f8bded73c081698f03043 | 7dce09e8e619f7e7323242ca3347945d6e988671 | refs/heads/main | 2023-07-10T14:07:25.733524 | 2021-08-19T15:22:39 | 2021-08-19T15:22:39 | 397,044,933 | 0 | 0 | null | 2021-08-19T15:22:41 | 2021-08-17T01:29:57 | Python | UTF-8 | Python | false | false | 468 | py | from medioTransporteAcuatico import MediosTransporteAcuatico
class Barco(MediosTransporteAcuatico):
def __init__(self, nombre, capacidad, medioDesplazamiento, marca):
super().__init__(nombre, capacidad, medioDesplazamiento, marca)
def anclar(self):
print('estamos anclando el barco')
def listar(self):
print('atributos del barco: nombre',self.nombre,'capacidad',self.capacidad,'medio',self.medioDesplazamiento,'marca',self.marca) | [
"osorio7980@gmail.com"
] | osorio7980@gmail.com |
9f545374f3e824e1a09b563f38393ee37958d084 | 3311b2bfb870b0e1f697be885db76aa6535baeb8 | /note/basic/02_字串與變數/test_str_08.py | 521763cd4d1f7961a5e1bd1aff8488195f807715 | [] | no_license | rogers228/Learn_python | 8f06ceceb553f47f59a2c6fe1f4dd5edc10a8079 | 494ef61545bc946e2ba105483b662beba204c263 | refs/heads/master | 2023-08-31T01:56:32.607799 | 2023-08-30T01:23:49 | 2023-08-30T01:23:49 | 245,773,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | a = 5 * 6
print(str(a))
| [
"61651257+rogers228@users.noreply.github.com"
] | 61651257+rogers228@users.noreply.github.com |
187d5e4a2c3a6f4bda7055934dce787d7d1d0339 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_352/ch6_2020_03_04_10_31_08_746058.py | c84cfa947f1fae70b02bc0450ea33744f1c14660 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | def celsius_para_fahrenheit(fahrenheit,celsius):
fahrenheit=(celsius*9)/5+32
return fahrenheit
| [
"you@example.com"
] | you@example.com |
3e85724e61a95926d3f6ed12b671f971c38975c6 | e3c85bc726710672a073b43ae855422fc0f8f5e4 | /intro-regresstion-data.py | 317c88c5c5e1392d3e1da8e62825b7266f70d2c4 | [] | no_license | MyLearing/ML | 45cbf8ff8a58f12d53348e72e302d6c943498a5c | 65b8b265a9e9156734c16c193e5b6dcdd0385c3a | refs/heads/master | 2020-03-27T11:04:37.212308 | 2018-10-20T06:29:42 | 2018-10-20T06:29:42 | 146,463,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | import pandas as pd
import quandl, math
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
df = quandl.get('WIKI/GOOGL')
# print(df.head())
df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
print('-'*100)
print(df.head())
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Close'])/df['Adj. Close']*100.0
df['PCT_change'] = (df['Adj. Close'] - df['Adj. Open'])/df['Adj. Open']*100.0
df = df[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]
forecast_col = 'Adj. Close'
df.fillna(-99999, inplace=True)
forecast_out = int(math.ceil(0.1*len(df)))
print('-'*100)
print(forecast_out)
df['label'] = df[forecast_col].shift(-forecast_out)
# print('-'*100)
# print(df['label'])
print('-'*100)
print(df.head())
df.dropna(inplace=True)
print('-'*100)
print(df.head())
X = np.array(df.drop(['label'], 1))
y = np.array(df['label'])
X = preprocessing.scale(X)
y = np.array(df['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
clf = LinearRegression(n_jobs=-1)
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
print(accuracy)
| [
"teerapat12345678@gmail.com"
] | teerapat12345678@gmail.com |
4c3ae1e23e39da011a0e61b665f565854ae2ca1c | 21bd2c7eabec557e6f1581cdaed4b05e1a753a91 | /monkeymemorytest/testmonkey/learning/subpackage2/__init__.py | 937df6fba34220fbe421c89a132c42808bde67df | [] | no_license | flyingfishercn/tools | f7e80770a375347b3a1f57fdbc49eab1e6c6fd08 | 0c4edd815dcc324fd1058432bb3f030b4d29fb46 | refs/heads/master | 2021-01-15T18:03:47.128479 | 2014-11-05T03:21:56 | 2014-11-05T03:21:56 | 11,115,854 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#created by zhiquan.huang on 13-12-28 at 下午10:11
#Pls contact flyingfishercn@gmail.com or huangzq@oppo.com
#Software Engineer
#Product Software Department
#Guangdong OPPO Mobile Telecommunications Corp.,Ltd
#Gaoxin park South District 1st Road shenzhen, China
print("path is",__path__) | [
"flyingfishercn@gmail.com"
] | flyingfishercn@gmail.com |
b2f36b58e3dcdcbf5a7174f13e87d5c8152975a6 | 6700ac0bc34860af8f7bb762466f0eb8fa1c4e53 | /GenericAPIView_mixins/2/project/settings.py | ba6ec070a6937d8706f3d41ad8f7a5a7082a3c78 | [] | no_license | Durgavasu58/DjangoRestFramework | 8d825459e8f99ec1ba48e97618bf10aaa26dc537 | 89195e89e1b40c8c14cb1bc2572fc036ea1b1452 | refs/heads/master | 2023-08-14T01:44:27.368313 | 2021-10-11T13:27:04 | 2021-10-11T13:27:04 | 413,710,085 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,401 | py | """
Django settings for project project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-^qk1r5tnc8b0^+8zkm0%f%li6q!s2-!pr6l#qicw!-x)-(yfhr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"durgavasu58@gmail.com"
] | durgavasu58@gmail.com |
2cc09c81e60862cf246d6b8773beb89d120f3c54 | 532e337751c44b89e68f0022966d6295116928a9 | /client/tests/filesystem_test.py | 9c7e531de3f22e663c3dd4952f1d1023c288e5c0 | [
"MIT"
] | permissive | laashub-soa/pyre-check | 8f1a2717888a22c15a7f6608e0d732e62fa060f9 | cc1a1b5c1007bf3e0e52e7f8b04c8e8fc365db44 | refs/heads/master | 2022-04-13T12:12:46.317095 | 2020-04-11T03:39:21 | 2020-04-11T03:41:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,467 | py | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import errno
import fcntl
import os
import pathlib # noqa
import subprocess
import tempfile
import unittest
from contextlib import contextmanager
from unittest.mock import MagicMock, Mock, call, patch
from .. import __name__ as client_name, buck, commands, filesystem
from ..analysis_directory import SharedAnalysisDirectory
from ..commands.command import __name__ as command_name
from ..exceptions import EnvironmentException
from ..filesystem import (
Filesystem,
MercurialBackedFilesystem,
__name__ as filesystem_name,
_delete_symbolic_link,
acquire_lock,
acquire_lock_if_needed,
add_symbolic_link,
find_python_paths,
find_root,
remove_if_exists,
)
class FilesystemTest(unittest.TestCase):
def test_find_python_paths(self) -> None:
root = tempfile.mkdtemp()
# When there are no paths, returns empty list.
self.assertListEqual(find_python_paths(root), [])
def create_file(name: str) -> None:
with open(os.path.join(root, name), "w+"):
pass
def create_symlink(target: str, source: str) -> None:
os.symlink(os.path.join(root, target), os.path.join(root, source))
create_file("a.py")
create_file("b.pyi")
create_file("c.cpp")
create_symlink("a.py", "link1.py")
create_symlink("dangling.py", "link2.py")
create_symlink("c.cpp", "link3.py")
create_symlink("a.py", "link4.cpp")
os.mkdir(os.path.join(root, "mypy"))
os.mkdir(os.path.join(root, "scipyi"))
os.mkdir(os.path.join(root, "spy.py"))
create_symlink("spy.py", "directory_symlink.py")
create_file("mypy/my.py")
create_file("scipyi/sci.pyi")
create_symlink("mypy/my.py", "mypy/another.pyi")
create_symlink("scipyi/sci.pyi", "scipyi/another.py")
actual_paths = sorted(
os.path.relpath(path, root) for path in find_python_paths(root)
)
self.assertEqual(
actual_paths,
[
"a.py",
"b.pyi",
"directory_symlink.py",
"link1.py",
"link2.py",
"link3.py",
"mypy/another.pyi",
"mypy/my.py",
"scipyi/another.py",
"scipyi/sci.pyi",
],
)
def test_remove_if_exists(self) -> None:
# File removal.
with patch("os.remove") as os_remove, patch("shutil.rmtree") as shutil_rmtree:
os_remove.side_effect = OSError()
remove_if_exists("path")
os_remove.assert_called_once_with("path")
shutil_rmtree.assert_called_once_with("path")
# Directory removal.
with patch("os.remove") as os_remove, patch("shutil.rmtree") as shutil_rmtree:
shutil_rmtree.side_effect = OSError()
remove_if_exists("path")
os_remove.assert_called_once_with("path")
shutil_rmtree.assert_called_once_with("path")
# Both throw.
with patch("os.remove") as os_remove, patch("shutil.rmtree") as shutil_rmtree:
os_remove.side_effect = FileNotFoundError()
shutil_rmtree.side_effect = OSError()
remove_if_exists("path")
os_remove.assert_called_once_with("path")
shutil_rmtree.assert_called_once_with("path")
@patch("fcntl.lockf")
def test_acquire_lock(self, lock_file: Mock) -> None:
(_, path) = tempfile.mkstemp()
lockfile_file_descriptor = None
with acquire_lock(path, blocking=False) as file_descriptor:
lockfile_file_descriptor = file_descriptor
with acquire_lock(path, blocking=True):
pass
lock_file.assert_has_calls(
[
call(lockfile_file_descriptor, fcntl.LOCK_EX | fcntl.LOCK_NB),
call(lockfile_file_descriptor, fcntl.LOCK_UN),
call(lockfile_file_descriptor, fcntl.LOCK_EX),
call(lockfile_file_descriptor, fcntl.LOCK_UN),
]
)
def fail_on_exclusive(_, lock_kind):
if lock_kind == fcntl.LOCK_EX | fcntl.LOCK_NB:
raise OSError()
return None
lock_file.side_effect = fail_on_exclusive
with self.assertRaises(OSError):
with acquire_lock(path, blocking=False):
pass
@patch.object(filesystem, "acquire_lock")
def test_acquire_lock_if_needed(self, acquire_lock: MagicMock) -> None:
acquire_lock_if_needed("/some/path", blocking=True, needed=True)
acquire_lock.assert_called_once()
@patch.object(filesystem, "acquire_lock")
def test_acquire_lock_if_needed__not_needed(self, acquire_lock: MagicMock) -> None:
acquire_lock_if_needed("/some/path", blocking=True, needed=False)
acquire_lock.assert_not_called()
@patch("shutil.rmtree")
def test_cleanup(self, rmtree) -> None:
shared_analysis_directory = SharedAnalysisDirectory(["first", "second"], [])
shared_analysis_directory.cleanup()
rmtree.assert_not_called()
shared_analysis_directory = SharedAnalysisDirectory(
["first", "second"], [], isolate=True
)
shared_analysis_directory.cleanup()
rmtree.assert_called_with(shared_analysis_directory.get_root())
def test_filesystem_list_bare(self):
filesystem = Filesystem()
with patch.object(subprocess, "run") as run:
filesystem.list(".", [".pyre_configuration.local"])
run.assert_has_calls(
[
call(
["find", ".", "(", "-path", "./.pyre_configuration.local", ")"],
stdout=subprocess.PIPE,
cwd=".",
),
call().stdout.decode("utf-8"),
call().stdout.decode().split(),
]
)
with patch.object(subprocess, "run") as run:
filesystem.list("/root", ["**/*.py", "foo.cpp"], exclude=["bar/*.py"])
run.assert_has_calls(
[
call(
[
"find",
".",
"(",
"-path",
"./**/*.py",
"-or",
"-path",
"./foo.cpp",
")",
"-and",
"!",
"(",
"-path",
"./bar/*.py",
")",
],
stdout=subprocess.PIPE,
cwd="/root",
),
call().stdout.decode("utf-8"),
call().stdout.decode().split(),
]
)
def fail_command(arguments, **kwargs):
return subprocess.CompletedProcess(
args=[], returncode=1, stdout="".encode("utf-8")
)
with patch.object(subprocess, "run") as run:
run.side_effect = fail_command
self.assertEqual([], filesystem.list(".", [".pyre_configuration.local"]))
run.assert_has_calls(
[
call(
["find", ".", "(", "-path", "./.pyre_configuration.local", ")"],
stdout=subprocess.PIPE,
cwd=".",
)
]
)
def test_filesystem_list_mercurial(self):
filesystem = MercurialBackedFilesystem()
with patch.object(subprocess, "run") as run:
filesystem.list(".", [".pyre_configuration.local"])
run.assert_has_calls(
[
call(
["hg", "files", "--include", ".pyre_configuration.local"],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
cwd=".",
),
call().stdout.decode("utf-8"),
call().stdout.decode().split(),
]
)
with patch.object(subprocess, "run") as run:
filesystem.list("/root", ["**/*.py", "foo.cpp"], exclude=["bar/*.py"])
run.assert_has_calls(
[
call(
[
"hg",
"files",
"--include",
"**/*.py",
"--include",
"foo.cpp",
"--exclude",
"bar/*.py",
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
cwd="/root",
),
call().stdout.decode("utf-8"),
call().stdout.decode().split(),
]
)
def fail_command(arguments, **kwargs):
return subprocess.CompletedProcess(
args=[], returncode=1, stdout="".encode("utf-8")
)
with patch.object(subprocess, "run") as run:
run.side_effect = fail_command
self.assertEqual([], filesystem.list(".", [".pyre_configuration.local"]))
run.assert_has_calls(
[
call(
["hg", "files", "--include", ".pyre_configuration.local"],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
cwd=".",
)
]
)
@patch.object(filesystem, "_compute_symbolic_link_mapping")
@patch("os.getcwd")
@patch.object(subprocess, "check_output")
def test_get_scratch_directory(self, check_output, getcwd, compute_symbolic_links):
# No scratch, no local configuration
check_output.side_effect = FileNotFoundError
getcwd.return_value = "default"
shared_analysis_directory = SharedAnalysisDirectory(["first", "second"], [])
directory = shared_analysis_directory.get_scratch_directory()
self.assertEqual(directory, "default/.pyre")
root = shared_analysis_directory.get_root()
self.assertEqual(root, "default/.pyre/shared_analysis_directory")
# Scratch, no local configuration
check_output.side_effect = None
check_output.return_value = "/scratch\n".encode("utf-8")
shared_analysis_directory = SharedAnalysisDirectory(["first", "second"], [])
directory = shared_analysis_directory.get_scratch_directory()
self.assertEqual(directory, "/scratch")
root = shared_analysis_directory.get_root()
self.assertEqual(root, "/scratch/shared_analysis_directory")
# No scratch, using local configuration
check_output.side_effect = FileNotFoundError
getcwd.return_value = "default"
shared_analysis_directory = SharedAnalysisDirectory(
["first", "second"],
[],
filter_paths={"path/to/local"},
local_configuration_root="path/to/local",
)
directory = shared_analysis_directory.get_scratch_directory()
self.assertEqual(directory, "default/.pyre")
root = shared_analysis_directory.get_root()
self.assertEqual(root, "default/.pyre/path/to/local")
# Scratch, using local configuration
check_output.side_effect = None
check_output.return_value = "/scratch\n".encode("utf-8")
shared_analysis_directory = SharedAnalysisDirectory(
["first", "second"],
[],
filter_paths={"path/to/local"},
local_configuration_root="path/to/local",
)
directory = shared_analysis_directory.get_scratch_directory()
self.assertEqual(directory, "/scratch")
root = shared_analysis_directory.get_root()
self.assertEqual(root, "/scratch/path/to/local")
@patch.object(tempfile, "mkdtemp", return_value="/tmp/pyre_tmp_xyz")
@patch.object(filesystem, "find_root", return_value="/buck_root")
@patch("os.makedirs")
@patch(filesystem_name + ".acquire_lock")
@patch.object(SharedAnalysisDirectory, "get_root", return_value="/analysis_root")
def test_prepare(self, get_root, acquire_lock, makedirs, find_root, mkdtemp):
@contextmanager
def acquire(*args, **kwargs):
yield
with patch.object(SharedAnalysisDirectory, "_clear") as clear, patch.object(
SharedAnalysisDirectory, "_merge"
) as merge:
shared_analysis_directory = SharedAnalysisDirectory(["first", "second"], [])
acquire_lock.side_effect = acquire
shared_analysis_directory.prepare()
merge.assert_has_calls([call()])
clear.assert_has_calls([call()])
@patch("{}.Path".format(command_name))
@patch("{}.Path.mkdir".format(command_name))
@patch("os.path.realpath", side_effect=lambda path: "realpath({})".format(path))
@patch("os.getcwd", return_value="/root")
@patch("os.path.exists", return_value=True)
@patch("{}.find_project_root".format(command_name), return_value="/root/local")
@patch("{}.find_local_root".format(command_name), return_value=None)
@patch("os.chdir")
def test_resolve_source_directories(
self,
chdir,
find_local_root,
find_project_root,
exists,
cwd,
realpath,
path_mkdir,
path,
) -> None:
arguments = MagicMock()
arguments.source_directories = []
arguments.command = commands.Check
arguments.use_buck_builder = False
arguments.ignore_unbuilt_dependencies = False
arguments.local_configuration = None
arguments.logger = None
configuration = MagicMock()
configuration.source_directories = []
configuration.local_configuration_root = "/root/local"
configuration.use_buck_builder = False
configuration.ignore_unbuilt_dependencies = False
with self.assertRaises(EnvironmentException):
buck_builder = buck.SimpleBuckBuilder()
analysis_directory = SharedAnalysisDirectory(
[],
[],
original_directory="/root",
filter_paths=set(),
buck_builder=buck_builder,
)
analysis_directory._resolve_source_directories()
# Arguments override configuration.
with patch.object(
buck, "generate_source_directories", return_value=[]
) as buck_source_directories:
arguments.source_directories = ["arguments_source_directory"]
configuration.source_directories = ["configuration_source_directory"]
buck_builder = buck.SimpleBuckBuilder()
analysis_directory = SharedAnalysisDirectory(
["some_source_directory"],
["configuration_source_directory"],
original_directory="/root",
filter_paths=set(),
buck_builder=buck_builder,
)
analysis_directory._resolve_source_directories()
buck_source_directories.assert_called_with(
{"configuration_source_directory"}
)
self.assertEqual(
analysis_directory._source_directories, {"some_source_directory"}
)
with patch.object(
buck, "generate_source_directories", return_value=["arguments_target"]
) as buck_source_directories:
cwd.return_value = "/"
original_directory = "/root"
arguments.source_directories = []
arguments.targets = ["arguments_target"]
configuration.source_directories = ["configuration_source_directory"]
command = commands.Check(arguments, original_directory, configuration)
analysis_directory = command._analysis_directory
assert isinstance(analysis_directory, SharedAnalysisDirectory)
analysis_directory._resolve_source_directories()
buck_source_directories.assert_called_with({"arguments_target"})
self.assertEqual(
analysis_directory._source_directories,
{"realpath(root/arguments_target)"},
)
with patch.object(
buck, "generate_source_directories", return_value=["arguments_target"]
) as buck_source_directories:
# same test as above, but Start instead of Check; build should be False
cwd.return_value = "/"
original_directory = "/root"
command = commands.Start(
arguments,
original_directory,
terminal=False,
store_type_check_resolution=False,
use_watchman=True,
incremental_style=commands.command.IncrementalStyle.FINE_GRAINED,
configuration=configuration,
)
analysis_directory = command._analysis_directory
assert isinstance(analysis_directory, SharedAnalysisDirectory)
analysis_directory._resolve_source_directories()
buck_source_directories.assert_called_with({"arguments_target"})
self.assertEqual(
analysis_directory._source_directories,
{"realpath(root/arguments_target)"},
)
# Restart and start always rebuild buck targets
with patch.object(
buck, "generate_source_directories", return_value=["arguments_target"]
) as buck_source_directories:
cwd.side_effect = ["/", "/", "/"]
original_directory = "/root"
command = commands.Start(
arguments,
original_directory,
terminal=False,
store_type_check_resolution=False,
use_watchman=True,
incremental_style=commands.command.IncrementalStyle.FINE_GRAINED,
configuration=configuration,
)
analysis_directory = command._analysis_directory
assert isinstance(analysis_directory, SharedAnalysisDirectory)
analysis_directory._resolve_source_directories()
buck_source_directories.assert_called_with({"arguments_target"})
command = commands.Restart(arguments, original_directory, configuration)
analysis_directory = command._analysis_directory
assert isinstance(analysis_directory, SharedAnalysisDirectory)
analysis_directory._resolve_source_directories()
buck_source_directories.assert_called_with({"arguments_target"})
# Configuration is picked up when no arguments provided.
with patch.object(
buck,
"generate_source_directories",
return_value=["configuration_source_directory"],
) as buck_source_directories:
cwd.return_value = "/"
original_directory = "/root"
arguments.source_directories = []
arguments.targets = []
arguments.command = commands.Check
configuration.targets = ["configuration_target"]
configuration.source_directories = []
command = commands.Check(arguments, original_directory, configuration)
analysis_directory = command._analysis_directory
assert isinstance(analysis_directory, SharedAnalysisDirectory)
analysis_directory._resolve_source_directories()
buck_source_directories.assert_called_with({"configuration_target"})
self.assertEqual(
analysis_directory._source_directories,
{"realpath(root/configuration_source_directory)"},
)
# Files are translated relative to project root
with patch.object(
buck, "generate_source_directories", return_value=["."]
) as buck_source_directories:
cwd.side_effect = ["/", "/"]
original_directory = "/root"
arguments.source_directories = []
arguments.targets = []
configuration.targets = ["."]
command = commands.Check(arguments, original_directory, configuration)
analysis_directory = command._analysis_directory
assert isinstance(analysis_directory, SharedAnalysisDirectory)
analysis_directory._resolve_source_directories()
self.assertEqual(
analysis_directory._source_directories, {"realpath(root/.)"}
)
@patch("os.path.isfile")
def test_find_configuration(self, os_mock_isfile) -> None:
os_mock_isfile.side_effect = [False, False, False, True]
self.assertEqual(find_root("/a/b/c/d", "configuration"), "/a")
os_mock_isfile.side_effect = [True]
self.assertEqual(find_root("/a", "configuration"), "/a")
os_mock_isfile.side_effect = [False, False]
self.assertEqual(find_root("/a/b", "configuration"), None)
@patch("os.unlink")
def test_delete_symbolic_link(self, unlink):
# delete succeeds
unlink.return_value = None
_delete_symbolic_link("exists")
unlink.assert_called_once_with("exists")
# delete fails
unlink.reset_mock()
unlink.side_effect = OSError
self.assertRaises(OSError, _delete_symbolic_link, "exception_occurs")
unlink.assert_called_once_with("exception_occurs")
@patch("os.unlink")
@patch("os.symlink")
@patch("os.makedirs")
def test_add_symbolic_link(self, makedirs, symlink, unlink):
add_symbolic_link("/a/link", "file.py")
# standard use-cases
makedirs.assert_called_once_with("/a")
symlink.assert_called_once_with("file.py", "/a/link")
symlink.reset_mock()
makedirs.reset_mock()
add_symbolic_link("/a/b/c/d/link", "file.py")
makedirs.assert_called_once_with("/a/b/c/d")
symlink.assert_called_once_with("file.py", "/a/b/c/d/link")
# symlink exists
symlink.reset_mock()
makedirs.reset_mock()
error = OSError()
error.errno = errno.EEXIST
symlink.side_effect = [error, None]
add_symbolic_link("/a/b/link", "file.py")
makedirs.assert_called_once_with("/a/b")
symlink.assert_called_with("file.py", "/a/b/link")
unlink.assert_called_once_with("/a/b/link")
# symlink fails
symlink.reset_mock()
makedirs.reset_mock()
unlink.reset_mock()
symlink.side_effect = OSError()
add_symbolic_link("/a/link", "file.py")
makedirs.assert_called_once_with("/a")
symlink.assert_called_once_with("file.py", "/a/link")
unlink.assert_not_called()
@patch.object(filesystem, "find_paths_with_extensions")
@patch.object(
os.path,
"realpath",
side_effect=lambda path: path.replace("ANALYSIS_ROOT", "LOCAL_ROOT"),
)
def test_compute_symbolic_link_mapping(self, realpath, find_paths_with_extensions):
find_paths_with_extensions.return_value = [
"ANALYSIS_ROOT/a.py",
"ANALYSIS_ROOT/b.thrift",
"ANALYSIS_ROOT/subX/d.pyi",
"ANALYSIS_ROOT/subX/e.py",
"ANALYSIS_ROOT/subY/subZ/g.pyi",
]
self.assertDictEqual(
filesystem._compute_symbolic_link_mapping(
"ANALYSIS_ROOT", ["py", "pyi", "thrift"]
),
{
"LOCAL_ROOT/a.py": "ANALYSIS_ROOT/a.py",
"LOCAL_ROOT/b.thrift": "ANALYSIS_ROOT/b.thrift",
"LOCAL_ROOT/subX/d.pyi": "ANALYSIS_ROOT/subX/d.pyi",
"LOCAL_ROOT/subX/e.py": "ANALYSIS_ROOT/subX/e.py",
"LOCAL_ROOT/subY/subZ/g.pyi": "ANALYSIS_ROOT/subY/subZ/g.pyi",
},
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
b35629b13b4288e9f2d3b4ff7f9c2a7af9103d2b | 2d055595705582784624c6bde5abf1b3854b34a9 | /tweets/mixins.py | e35d46e2850f0171dca2e7dc62983077149c37d8 | [] | no_license | Anubhav722/twitter_clone | fc36568cb6b32ce1942923ffcf55ebcce714e53f | f76190b8f5f3ac8dfad87d35b2650c5285e5082b | refs/heads/master | 2021-05-01T08:25:42.857828 | 2017-02-07T14:32:09 | 2017-02-07T14:32:09 | 79,710,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | from django import forms
from django.forms.utils import ErrorList
class FormUserNeededMixin(object):
def form_valid(self, form):
if self.request.user.is_authenticated():
form.instance.user = self.request.user
return super(FormUserNeededMixin, self).form_valid(form)
else:
form._errors[forms.forms.NON_FIELD_ERRORS] = ErrorList(['User must be logged in to continue.'])
return self.form_invalid(form)
class UserOwnerMixin(object):
def form_valid(self, form):
if form.instance.user == self.request.user:
return super(UserOwnerMixin, self).form_valid(form)
else:
form._errors[forms.forms.NON_FIELD_ERRORS] = ErrorList(['This user is not allowed to change the data'])
return self.form_invalid(form) | [
"anubhavs286@gmail.com"
] | anubhavs286@gmail.com |
368885046e81cec366f4572840a719040d759a31 | a81c36d350be0aeb6769fe25cf6084525841874e | /pony/orm/examples/demo.py | 721fb5f3b5b1f97901b6f8f31f2ecc31c04184fd | [
"Apache-2.0"
] | permissive | ponyorm/pony | 6e9bb581fb87c121ab21a911b2a185e7044fd945 | 27593ffc74184bc334dd301a86fc5f40fdd3ad87 | refs/heads/main | 2023-08-29T09:50:10.246229 | 2022-12-15T14:28:13 | 2022-12-15T14:28:13 | 8,087,816 | 3,117 | 290 | Apache-2.0 | 2023-09-05T02:20:16 | 2013-02-08T04:56:00 | Python | UTF-8 | Python | false | false | 2,361 | py | from __future__ import absolute_import, print_function
from decimal import Decimal
from pony.orm import *
db = Database("sqlite", "demo.sqlite", create_db=True)
class Customer(db.Entity):
id = PrimaryKey(int, auto=True)
name = Required(str)
email = Required(str, unique=True)
orders = Set("Order")
class Order(db.Entity):
id = PrimaryKey(int, auto=True)
total_price = Required(Decimal)
customer = Required(Customer)
items = Set("OrderItem")
class Product(db.Entity):
id = PrimaryKey(int, auto=True)
name = Required(str)
price = Required(Decimal)
items = Set("OrderItem")
class OrderItem(db.Entity):
quantity = Required(int, default=1)
order = Required(Order)
product = Required(Product)
PrimaryKey(order, product)
sql_debug(True)
db.generate_mapping(create_tables=True)
def populate_database():
c1 = Customer(name='John Smith', email='john@example.com')
c2 = Customer(name='Matthew Reed', email='matthew@example.com')
c3 = Customer(name='Chuan Qin', email='chuanqin@example.com')
c4 = Customer(name='Rebecca Lawson', email='rebecca@example.com')
c5 = Customer(name='Oliver Blakey', email='oliver@example.com')
p1 = Product(name='Kindle Fire HD', price=Decimal('284.00'))
p2 = Product(name='Apple iPad with Retina Display', price=Decimal('478.50'))
p3 = Product(name='SanDisk Cruzer 16 GB USB Flash Drive', price=Decimal('9.99'))
p4 = Product(name='Kingston DataTraveler 16GB USB 2.0', price=Decimal('9.98'))
p5 = Product(name='Samsung 840 Series 120GB SATA III SSD', price=Decimal('98.95'))
p6 = Product(name='Crucial m4 256GB SSD SATA 6Gb/s', price=Decimal('188.67'))
o1 = Order(customer=c1, total_price=Decimal('292.00'))
OrderItem(order=o1, product=p1)
OrderItem(order=o1, product=p4, quantity=2)
o2 = Order(customer=c1, total_price=Decimal('478.50'))
OrderItem(order=o2, product=p2)
o3 = Order(customer=c2, total_price=Decimal('680.50'))
OrderItem(order=o3, product=p2)
OrderItem(order=o3, product=p4, quantity=2)
OrderItem(order=o3, product=p6)
o4 = Order(customer=c3, total_price=Decimal('99.80'))
OrderItem(order=o4, product=p4, quantity=10)
o5 = Order(customer=c4, total_price=Decimal('722.00'))
OrderItem(order=o5, product=p1)
OrderItem(order=o5, product=p2)
commit()
| [
"alexander.kozlovsky@gmail.com"
] | alexander.kozlovsky@gmail.com |
bc871b75ba4a48cd1baa270703581d5d3cbdfaaf | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02646/s014246720.py | f54f26a26b36440b0006e25566aa3480de114870 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | import sys
A,V = map(int, input().split())
B,W = map(int, input().split())
T = int(input())
#これO(1)で解けそう。
# スピードが同じ or 逃げる方のスピードが速いと無理
if V <= W:
print("NO")
sys.exit()
# 鬼の方がスピードが速い場合で場合訳
distance_AB = abs(A-B)
speed_AB = abs(V-W)
if speed_AB * T >= distance_AB:
print("YES")
else:
print("NO") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e6ad1f74978b89d8985e666da0a4899d971f9abe | 8d0993fd4ef455f044f05d341bd5b21b640de85b | /apps/iocBoot/iocdclv/generator/gen.py | 89787ccf23220ea48931018ce4854dcdd03cc2fe | [] | no_license | andrea-celentano/clas12-epics | b6e3fb92b571417ac52a931b69217e89b65449af | b0641fae346fc8c166f61045ec84b53fc156c120 | refs/heads/master | 2021-05-07T08:04:59.961543 | 2017-02-08T14:34:59 | 2017-02-08T14:34:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | #!/usr/bin/env python
import os
templateFile='./st-template.cmd'
mappingFile='./mapping.txt'
template=open(templateFile,'r').readlines()
mapping={}
for line in open(mappingFile,'r').readlines():
[sr,host]=line.strip().split()
mapping[sr]=host
for ss in range(1,7):
for rr in range(1,4):
sr='S%dR%d'%(ss,rr)
host=mapping[sr]
outDir='../../iocdclv_'+sr
outFile=outDir+'/st.cmd'
if not os.path.exists(outDir): os.makedirs(outDir)
if os.path.exists(outFile):
print 'File already exists: '+outFile
continue
outFile=open(outFile,'w')
for xx in template:
xx=xx.replace('xxxGPIBHOSTxxx',host)
xx=xx.replace('xxxSECTORxxx',str(ss))
xx=xx.replace('xxxREGIONxxx',str(rr))
outFile.write(xx)
outFile.close()
| [
"baltzell@gmx.com"
] | baltzell@gmx.com |
10821ff07ed84bb3bcceba9260ce598a9dec6ece | dd2bff9202911f9d6b049fbdffe7ebdc702be3db | /ELTask/elcode/jointcnnrecordimplementation.py | 3aa394115a1f987d5988f903aef4e4326f318d28 | [] | no_license | IanGross/BioXrefEntityLinker | 12246c194fd3d82f151828cb240080571b6cc0b7 | 2b273c59c157d2e8d240415d72e2e8f5a60a39fc | refs/heads/master | 2020-03-11T15:42:28.189167 | 2018-08-27T14:25:19 | 2018-08-27T14:25:19 | 130,093,491 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,983 | py | from random import shuffle
import numpy as np
import tensorflow as tf
import data_constants
import pickle
EMBEDFILE_NAME = "../dumps/word_embeddings.pkl"
EMBEDDING_SIZE = 250
#Need to load the ont feat dict now
# embedding_fname is numpy matrix of embeddings (V x d) where V is vocab size and d is embedding dim
def _load_embeddings(embedding_fname):
with open(embedding_fname, 'rb') as pe_f:
pretrained_embed = pickle.load(pe_f)
return pretrained_embed.shape, tf.constant_initializer(pretrained_embed)
def _parse_el_example(array_feats, array_feat_types, quant_feats):
"""
Parse a single serialized example from a tfrecord file.
"""
out_example = []
d_keys = sorted(array_feats.keys())
for k in d_keys:
n_feat = quant_feats[k]
point_feat = tf.decode_raw(array_feats[k], array_feat_types[k])
point_feat = tf.reshape(point_feat, [quant_feats[k]])
out_example.append(point_feat)
return tuple(out_example)
def get_data_itr(**kwargs):
"""
Return a data iterator for the tfrecord files.
These function needs to be made generic enough to read in ONT constants as well
"""
batch_size = kwargs.pop('batch_size', 4)
num_threads = kwargs.pop('num_threads', 8)
fnames = kwargs.pop('fnames', [])
q_capacity = kwargs.pop('q_capacity', 256)
shuff = kwargs.pop('shuffle_input', True)
serialized_keys = kwargs.pop('serialized_keys', data_constants.READER_FEAT_KEY_TYPE_MAP)
out_type_keys = kwargs.pop('out_type_keys', data_constants.TF_FEAT_KEY_TYPE_MAP)
feature_shapes = kwargs.pop('feature_shapes', data_constants.DATA_SHAPES)
#This can still be invariant
quantity_keys = {key: quant_type for key, quant_type in serialized_keys.items() if key[:2] == 'n_'}
arr_key_type = {key: quant_type for key, quant_type in out_type_keys.items() if key[:2] != 'n_'}
with tf.name_scope('input'):
with tf.device('/cpu:0'):
features_dict = {k: tf.FixedLenFeature((), t) for k, t in serialized_keys.items()}
def _parser(serialized_example):
features = tf.parse_single_example(serialized_example,
features=features_dict)
quant_features = {}
for k, t in quantity_keys.items():
quant_features[k[2:]] = tf.to_int32(features[k])
arr_features = {}
for k, t in arr_key_type.items():
arr_features[k] = features[k]
exam = _parse_el_example(arr_features,
arr_key_type,
quant_features)
return exam
dataset = tf.data.TFRecordDataset(fnames)
if num_threads > 1:
dataset = dataset.map(_parser, num_threads=num_threads)
else:
dataset = dataset.map(_parser)
if shuff:
dataset = dataset.shuffle(buffer_size=q_capacity)
ordered_arr_key_type = sorted(arr_key_type.keys())
pad_shape = tuple([feature_shapes[arr_k] for arr_k in ordered_arr_key_type])
dataset = dataset.padded_batch(batch_size=batch_size,
padded_shapes=pad_shape)
itr = dataset.make_initializable_iterator()
return itr, itr.get_next()
if __name__ == '__main__':
def F(a, b):
return a, b
#Defining helper functions for forward prop
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')
#Defining a template function to reuse for 2x2 pooling
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='VALID')
'''
CNN with multiple filter sizes and convolves over same input
'''
def mul_filtercnn(filter_sizes, input_data, name_prefix):
#Need the maximum sequence length amongst the batch
#sequence_length = 0
#sequence_length = input_data.get_shape().as_list()
#print("sequence_length ", input_data.shape)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
num_filters = 2
print("Shape of input data ", input_data.shape)
for i, filter_size in enumerate(filter_sizes):
with tf.variable_scope("%s_conv-maxpool-%s" % (name_prefix, filter_size)):
# Convolution Layer
conv = tf.layers.conv2d(\
inputs=input_data,\
filters=num_filters,\
kernel_size=[filter_size,EMBEDDING_SIZE],\
padding="VALID",\
activation=tf.nn.sigmoid)
'''
Applying 6 filters, so these should give two filter values for each filter size?
Batch, sen_len, dim, 2
Pooling needs to choose the maximal value from each of these two filters
'''
print(" Shape of conv ",conv.shape)
# Apply nonlinearity
# Maxpooling over the outputs
#pooled = tf.layers.max_pooling2d(inputs=conv, pool_size=[2, 2], strides=2)
# Max-pooling over the outputs
# pooled = tf.nn.max_pool(
# conv,
# ksize=[1, 14 - filter_size + 1, 1, 1],
# strides=[1, 1, 1, 1],
# padding='VALID')
pooled = tf.reduce_max(conv, axis=[1])
print("Pooled shape ",pooled.shape)
pooled_outputs.append(pooled)
print(" Pooled ",pooled_outputs)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
h_pool = tf.concat(pooled_outputs,axis=2)
h_pool_flat = tf.expand_dims(tf.reshape(h_pool, [-1, num_filters_total]),-1)
print("Shape of h_pool final ",h_pool_flat)
return h_pool_flat
#Sentence and ont need to share same weights, at some point function needs to be consolidated
def sentmodel(sent_data):
"""
Assembles the output function
"""
# with tf.variable_scope("sent", reuse=tf.AUTO_REUSE):
with tf.variable_scope("sent"):
sent_data = tf.expand_dims(sent_data, -1)
filter_sizes = [2, 3, 5]
filter_bitsent = mul_filtercnn(filter_sizes, sent_data, 'sent')
fc_sent = tf.identity(tf.layers.conv1d(\
inputs=filter_bitsent,\
filters=1,\
kernel_size=1,\
padding="same",\
activation=tf.nn.sigmoid),name="fc_sent")
return fc_sent
def ontmodel(ont_data):
'''
Ont portion, output the ont representation
What does it mean for weights to be shared?
'''
# with tf.variable_scope("ont", reuse=tf.AUTO_REUSE):
with tf.variable_scope("ont"):
ont_data = tf.expand_dims(ont_data, -1)
filter_sizesont = [3, 5, 7]
filter_bitont = mul_filtercnn(filter_sizesont, ont_data, 'ont')
fc_ont = tf.identity(tf.layers.conv1d(\
inputs=filter_bitont,\
filters=1,\
kernel_size=1,\
padding="same",\
activation=tf.nn.sigmoid),name="fc_ont")
return fc_ont
'''
For sentence network - 2 Conv[12 (3 x 3), 12 (3 x 3)], 1 Pool , Fully dense 5112
For ont network - 3 Conv [50 (7x7), 50 (5x5), 75 (3 x 3)], 2 Pool, Fully dense 5112
Both will share logit
loss = max(cos(fc1,fc2))
Generate the output value given the weight of the last ouput layer
and data(either training or testing)
The softmax method will be applied on this matrix
'''
def model(sent_data,ont_data):
fc_sent = sentmodel(sent_data)
fc_ont = ontmodel(ont_data)
return (fc_sent, fc_ont)
'''
Calculate the cosine similarity between ont rep and sent rep
Calc loss for every data point, and take the mean
Find cosine sim between a single point and its correct prediction
How do we find the actual prediction?
'''
def calc_loss(sent_rep,ont_rep):
normalised_sent = tf.nn.l2_normalize(tf.squeeze(sent_rep, [-1]), axis=1)
normalised_ont = tf.nn.l2_normalize(tf.squeeze(ont_rep, [-1]), axis=1)
#Averaging out the loss
#return tf.losses.cosine_distance(normalised_sent,normalised_ont,dim=None,weights=1.0,scope=None,axis=1,loss_collection=tf.GraphKeys.LOSSES,reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS)
return tf.losses.cosine_distance(normalised_sent, normalised_ont, dim=None, scope=None, axis=1)
#This is going completely wrong
#return 1 - tf.reduce_mean(tf.matmul(normalised_sent,normalised_ont,transpose_b=True))
'''
Accuracy is computed by comparing the predicted and actual labels
argmax is used on the 1-K encoded arrays for the same
Then the mean value of the number of matches is returned
Since the softmax is not applied during the training phase a softmax needs to be applied before checking for accuracy
Take the output pred, and check cosine sim between the y and all other terms; run argmax and find the highest
'''
def accuracy(sent_reps, ont_reps, expected):
sent_repsnorm = tf.nn.l2_normalize(tf.squeeze(sent_rep, [-1]), axis=1)
ont_reps = tf.nn.l2_normalize(tf.squeeze(ont_rep, [-1]), axis=1)
# sent_reps = tf.Print(sent_reps, [tf.shape(sent_reps)], message="sent_reps", summarize=1000)
# ont_reps = tf.Print(ont_reps, [tf.shape(ont_reps)], message="ont_reps", summarize=1000)
products = tf.matmul(sent_repsnorm, tf.transpose(ont_reps))
# products = tf.Print(products, [tf.shape(products)], message="products", summarize=1000)
correct_prediction = tf.equal(tf.argmax(products, 1), tf.argmax(expected, 1))
accuracy_val = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))
return accuracy_val*100
'''
Find per class Classification Error
Find the argmax and compare the indices of the argmax at a per class level
Returns a dictionary of the losses against digits
This will later be leveraged to find Classification errors for each class
'''
def calcerror_perclass(predictions,labels):
digits = {i:{"val":0,"size":0} for i in range(0,1827)}
#Need to apply a softmax during the testing step
preds_arg = predictions
labels_np = labels
#Converting both of them to np.array, so that it is easy loop and store values
preds_labels = np.argmax(preds_arg,1)
labels_indices = np.argmax(labels_np,1)
preds_size = len(preds_labels)
print(" Pred size ", preds_size)
print("Shape of preds labels",preds_labels.shape,"Shape of labels",labels_indices.shape,"Size of actual array",preds_size)
for pred_index in range(0,preds_size):
#If the actual label and the predicted label don't match, then add one against the actual label
if labels_indices[pred_index] != preds_labels[pred_index]:
#print("Val is equal",pred_index)
digits[labels_indices[pred_index]]["val"] += 1
#Divide by all mentions
digits[labels_indices[pred_index]]["size"] += 1
for key in digits:
#No data for a particular class?
if digits[key]["size"] != 0:
digits[key]["val"] = (digits[key]["val"]/digits[key]["size"])*100
else:
digits[key]["val"] = 150
return digits
#Reading 40 records at a time?
batch_n = 40
#Make this dynamic later
batch_ont_n = 1827
n_threads = 1
#Need to include ont list as well here
fname_list = ['../dumps/train.data0.tfrecord', '../dumps/train.data1.tfrecord']
fname_testlist = ['../dumps/test.data0.tfrecord']
fname_ontlist = ['../dumps/ontologyall.data0.tfrecord']
#fname_ontlist = []
fname_holder = tf.placeholder(tf.string, shape=[None])
#Google what this means?
buff_size = 200
shuff_data = True
#Need to introduce a set for ont list as well
serial_keys = data_constants.READER_FEAT_KEY_TYPE_MAP
output_type_keys = data_constants.TF_FEAT_KEY_TYPE_MAP
feat_dims = data_constants.DATA_SHAPES
ont_serial_keys = data_constants.READER_ONT_KEY_TYPE_MAP
ont_output_type_keys = data_constants.TF_ONT_KEY_TYPE_MAP
ont_feat_dims = data_constants.ONT_DATA_SHAPES
n_epochs = 4
with tf.variable_scope("Train_iterator"):
#Batch size should be size of test set in case of ont reader stuff?
itr, next_elem = get_data_itr(batch_size=batch_n,
num_threads=n_threads,
fnames=fname_holder,
q_capacity=buff_size,
shuffle_input=shuff_data,
serialized_keys=serial_keys,
out_type_keys=output_type_keys,
feature_shapes=feat_dims
)
with tf.variable_scope("Test_iterator"):
#Batch size should be size of test set in case of ont reader stuff?
test_itr, next_elem_test = get_data_itr(batch_size=batch_n,
num_threads=n_threads,
fnames=fname_holder,
q_capacity=buff_size,
shuffle_input=shuff_data,
serialized_keys=serial_keys,
out_type_keys=output_type_keys,
feature_shapes=feat_dims
)
with tf.variable_scope("Entire_Ontology_iterator"):
#Ont keys should not be shuffled, need to be loaded in order
ontall_itr, next_elem_ont = get_data_itr(batch_size=batch_ont_n,
num_threads=n_threads,
fnames=fname_holder,
q_capacity=buff_size,
shuffle_input=False,
serialized_keys=ont_serial_keys,
out_type_keys=ont_output_type_keys,
feature_shapes=ont_feat_dims
)
embed_shape, embed_init = _load_embeddings(EMBEDFILE_NAME)
E = tf.get_variable('embedding_layer', shape=embed_shape, initializer=embed_init)
with tf.variable_scope("Train_lookup"):
#or can it be done per record as well? 1 x |W| x 1800; choose the maximal here
embed_tf_worddataset = tf.nn.embedding_lookup(E, next_elem[0])
#1 - GO:0000 , GO:000 - [12,24]
embed_tf_ontdataset = tf.nn.embedding_lookup(E, next_elem[1])
with tf.variable_scope("Test_lookup"):
#or can it be done per record as well? 1 x |W| x 1800; choose the maximal here
embed_tf_worddataset_test = tf.nn.embedding_lookup(E, next_elem_test[0])
#1 - GO:0000 , GO:000 - [12,24]
# embed_tf_ontdataset_test = tf.nn.embedding_lookup(E, next_elem_test[1])
with tf.variable_scope("Entire_Ontology_lookup"):
#inefficient does this need to be done?
embed_tf_entireontdataset = tf.nn.embedding_lookup(E, next_elem_ont[0])
#Need to add code for tf.reshape here
with tf.variable_scope("sent_model") as sent_scope:
sent_rep = sentmodel(embed_tf_worddataset)
# ont_rep = ontmodel(embed_tf_ontdataset)
# #Loss per batch is calculated as the cosine distance between the sentence and ontology representation
# loss = calc_loss(sent_rep, ont_rep)
sent_scope.reuse_variables()
sent_rep_test = sentmodel(embed_tf_worddataset_test)
# ont_rep_test = ontmodel(embed_tf_ontdataset_test)
#accuracy_test = accuracy(sent_rep_test, entire_ontrep, next_elem_test[2])
with tf.variable_scope("ont_model") as ont_scope:
ont_rep = ontmodel(embed_tf_ontdataset)
ont_scope.reuse_variables()
#Obtain a matrix rep of all ont terms
entire_ontrep = ontmodel(embed_tf_entireontdataset)
with tf.variable_scope("loss"):
#Loss per batch is calculated as the cosine distance between the sentence and ontology representation
loss = calc_loss(sent_rep, ont_rep)
#Run the Adam Optimiser(AdaGrad + Momentum) with an initial eta of 0.0001
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
with tf.variable_scope("accuracy"):
acc_test = accuracy(sent_rep_test, entire_ontrep, next_elem_test[2])
#with tf.control_dependencies("entire_ontrep"):
# acc_test = accuracy(sent_rep_test, entire_ontrep, next_elem_test[2])
#tensorboard portion
with open('MY_GRAPH.txt', 'w') as f:
f.write(str(tf.get_default_graph().as_graph_def()))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
seen_feats = []
count = 0
#For getting tensor board to work
file_writer = tf.summary.FileWriter('logs/')
file_writer.add_graph(sess.graph)
for _ in range(n_epochs):
print(" Starting an epoch ")
shuffle(fname_list)
'''
Initialize the train, test and ont readers
Access can be determined at different points of time
'''
sess.run(itr.initializer, feed_dict={fname_holder: fname_list})
ctr = 0
while True:
try:
#Initialise the embeddings
# sess.run(E)
#merge = tf.summary.merge_all()
x, y, cos_dist, _, truth_elem = sess.run([ sent_rep, ont_rep, loss, train_step, next_elem[2]])
#print(" Sent embedding ", s)
print(" Description ",x.shape)
print("Sent ", y.shape)
print(" Cosine distance is ", cos_dist)
print("Truth ", truth_elem.shape)
ctr+=1
print(" per batch ctr consumed ", ctr*40, " of data")
#Run the test portion per 400 iterations
if (ctr*40)%400 == 0:
print("Running test portion at ",(ctr*40), "step")
# sess.run(ontall_itr.initializer, feed_dict={fname_holder: fname_ontlist})
#Obtain the ont representation
# while True:
# try:
# ontrep_out = sess.run(entire_ontrep)
# print(" Ontrep ", ontrep_out.shape)
# except tf.errors.OutOfRangeError:
# print(' Finished consuming the ont data ')
# print(" Ontrep ", ontrep_out.shape)
# break
#Initialize the test data to compute accuracy for each of the preds
sess.run(test_itr.initializer, feed_dict={fname_holder: fname_testlist})
epoch_acc = []
#Obtain the ont representation
while True:
try:
sess.run(ontall_itr.initializer, feed_dict={fname_holder: fname_ontlist})
# sent_rep_t, acc_tval = sess.run([sent_rep_test, acc_test])
# acc_test = accuracy(sent_rep_test, ontrep_out, next_elem_test[2])
acc_tval = sess.run([acc_test])
epoch_acc.append(acc_tval)
# sent_rep_t, pred_test = sess.run([sent_rep_test, next_elem_test[2]])
# acc_test = accuracy(sent_rep_t, ontrep_out, pred_test)
# acc_tval = acc_test.eval()
# epoch_acc.append(acc_tval)
except tf.errors.OutOfRangeError:
print(' Finished consuming the test data ')
print(' Accuracy at epoch is ', np.mean(epoch_acc))
break
except tf.errors.OutOfRangeError:
print('Completed epoch')
break
print(" Finished consuming an entire batch ")
sess.close()
| [
"charis@rpi.edu"
] | charis@rpi.edu |
db3779c711392ab68d99c733bcb2d858c18aee3a | f24c35bb0919f9ad75f45e7906691c3189536b33 | /xcb_ws/file/quarotor-master/cv_vision/devel/lib/python2.7/dist-packages/april_pro/msg/__init__.py | 242397c93cfa4f9a9cc60d5c7d837d15d027577f | [] | no_license | mfkiwl/supreme-xcb | 9b941f49bab5a811d23a0cd75790d1e5722aa9f0 | d1287657607bf86d4b1393acf285951760670925 | refs/heads/main | 2023-03-07T12:10:28.288282 | 2021-03-02T11:46:00 | 2021-03-02T11:46:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | from ._camera_pos import *
| [
"xiechengbinin@gmail.com"
] | xiechengbinin@gmail.com |
0c33e1adf91e821cc7a08e50d950cf95850b1690 | 0bfa5e1771cc0d52689571bf523ca410813d138b | /News/venv/Scripts/pip3.7-script.py | c1d053c902a9ceead2d980164bf6e23c23a0e639 | [] | no_license | dndgus482/Sorticle | a00179255ab9f89ac868c47fb39daec3b6f32faa | 8041cd8b67ce53b02a7cfc1f862a43090e69382c | refs/heads/master | 2022-11-06T22:37:15.796927 | 2020-06-18T08:10:33 | 2020-06-18T08:10:33 | 254,592,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | #!C:\Users\hablo\PycharmProjects\News\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"chengeying1025@gmail.com"
] | chengeying1025@gmail.com |
2819c32c29eda8a92e1713eee6bd9346600a6d7c | 12d86bfab3bd6ec239e9126231a469684f06649a | /ipynotebooks/Python2.7/CUSTOM FILES/2015-12-07 Matplotlib GridSpec Tight_layout fiddling.py | a0cf0d8098b04ccb76c73b2aa96a0fc54f6022c4 | [] | no_license | dchug2/pynotebook | 2ea76b6dc4039be0a9d207d65bbf7cac0e3c49d6 | 36e324705b743b5648e7f9d9533b42ae7c85192d | refs/heads/master | 2021-05-27T02:30:02.887975 | 2019-02-13T10:46:12 | 2019-02-13T10:46:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py |
# coding: utf-8
# In[3]:
import matplotlib.pyplot as plt
import warnings
get_ipython().magic(u'matplotlib inline')
import random
fontsizes = [8, 16, 24, 32]
def example_plot(ax):
ax.plot([1, 2])
ax.set_xlabel('x-label', fontsize=random.choice(fontsizes))
ax.set_ylabel('y-label', fontsize=random.choice(fontsizes))
ax.set_title('Title', fontsize=random.choice(fontsizes))
fig = plt.figure()
import matplotlib.gridspec as gridspec
gs1 = gridspec.GridSpec(3, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
ax3 = fig.add_subplot(gs1[2])
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
# This raises warnings since tight layout cannot
# handle gridspec automatically. We are going to
# do that manually so we can filter the warning.
gs1.tight_layout(fig, rect=[None, None, 0.45, None])
gs2 = gridspec.GridSpec(2, 1)
ax4 = fig.add_subplot(gs2[0])
ax5 = fig.add_subplot(gs2[1])
example_plot(ax4)
example_plot(ax5)
with warnings.catch_warnings():
# This raises warnings since tight layout cannot
# handle gridspec automatically. We are going to
# do that manually so we can filter the warning.
warnings.simplefilter("ignore", UserWarning)
gs2.tight_layout(fig, rect=[0.45, None, None, None])
# now match the top and bottom of two gridspecs.
top = min(gs1.top, gs2.top)
bottom = max(gs1.bottom, gs2.bottom)
gs1.update(top=top, bottom=bottom)
gs2.update(top=top, bottom=bottom)
plt.show()
# In[ ]:
| [
"mattijn@gmail.com"
] | mattijn@gmail.com |
1f0e54f6b21b16e128b63ac74d0a580e0d573725 | f1967abc06b3a163b335c2c4ac6965019117ae62 | /Source/Shiroten/Engine Prototype/pubyc_renderer_temp/simulation.py | e5b670664e6410833420826b2f00c7e4f0563fbb | [] | no_license | Shiroten/Planetary-Python-Project | e2255fa3f464d4ad4a267c39076025f874a9202f | 5b2c4b4ae0317b4d1b855701879e53d8f4862184 | refs/heads/master | 2021-09-07T14:29:42.012041 | 2017-12-21T15:54:15 | 2017-12-21T15:54:15 | 106,595,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,052 | py |
import sys
from loop import Loop
from simulation_constants import END_MESSAGE
import numpy as np
import math
import time
def startup(sim_pipe):
#Sonnesystem
python_position = [
#"Sun"
[0, 0, 0],
#"Mercury"
[57_909_175_000, 0, 0],
#"Venus"
[108_208_930_000, 0, 0],
#"Earth"
[149_597_890_000, 0, 0],
#"Moon"
[149_597_890_000, 384_400_000, 0],
#"Mars"
[227_936_640_000, 0, 0],
#"Jupiter"
[778_412_020_000, 0, 0],
#"Saturn"
[1_426_725_400_000, 0, 0],
#"Uranus"
[2_870_972_200_000, 0, 0],
#"Neptune"
[4_498_252_900_000, 0, 0]
]
python_speed = [
#"Sun"
[0, 0, 0],
#"Mercury"
[0, 47_872, 0],
#"Venus"
[0, 35_021, 0],
#"Earth"
[0, 29_786, 0],
#"Moon"
[-1_022, 0, 0],
#"Mars"
[0, 24_131, 0],
#"Jupiter"
[0, 13_069, 0],
#"Saturn"
[0, 9_672, 0],
#"Uranus"
[0, 6_835, 0],
#"Neptune"
[0, 5_477, 0]
]
python_masse = [
#"Sun"
1.9889 * 10 ** 30,
#"Mercury"
3.3022 * 10 ** 23,
#"Venus"
4.8685 * 10 ** 24,
#"Earth"
5.97219 * 10 ** 24,
#"Moon"
7.34767309 * 10 ** 22,
#"Mars"
6.4185 * 10 ** 23,
#"Jupiter"
1.8987 * 10 ** 27,
#"Saturn"
5.6851 * 10 ** 26,
#"Uranus"
8.6849 * 10 ** 25,
#"Neptune"
1.0244 * 10 ** 26
]
FACTOR = 0.1
radius = [
0.25 * FACTOR,
0.02 * FACTOR,
0.06 * FACTOR,
0.06 * FACTOR,
0.01 * FACTOR,
0.03 * FACTOR,
0.18 * FACTOR,
0.15 * FACTOR,
0.1 * FACTOR,
0.1 * FACTOR
]
position = np.array(python_position, dtype=np.float64)
speed = np.array(python_speed,dtype=np.float64)
masse = np.array(python_masse,dtype=np.float64)
dt = 60 * 60 / 4
while True:
if sim_pipe.poll():
message = sim_pipe.recv()
if isinstance(message, str) and message == END_MESSAGE:
print('simulation exiting ...')
sys.exit(0)
for i in range (24 * 7):
Loop(dt, position, speed, masse)
body_array = np.zeros((len(position), 4), dtype=np.float64)
normalization = -11
for body_index in range(len(python_position)):
body_array[body_index][0] = position[body_index][0] / 4_498_252_900_000
body_array[body_index][1] = position[body_index][1] / 4_498_252_900_000
body_array[body_index][2] = position[body_index][2] / 4_498_252_900_000
body_array[body_index][3] = radius[body_index]
#print(body_array)
time.sleep(1/60)
sim_pipe.send(body_array) | [
"shiroten.xisatu@gmail.com"
] | shiroten.xisatu@gmail.com |
0994bc73ae3dfc6dddd9e06649e1fdd52925adf8 | 03e67ed4e375659906a8df76ae5a3c8a6ca7d875 | /app/auth/views.py | 7d4db133a91a7d17ee7d4a373b29ae30bf886fab | [] | no_license | zhuangdaAlx/flask-blog | 45d55ed146ae6af4f458892201b834e5e3df2a85 | b6b29806d995907fe4992e9745232dfc9d6e1913 | refs/heads/master | 2023-04-08T23:22:21.252255 | 2018-05-16T04:36:47 | 2018-05-16T04:36:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,383 | py | from flask import render_template, redirect, request, url_for, flash
from flask_login import login_user, login_required, logout_user, current_user
from . import auth
from ..models import User
from .forms import LoginForm, RegistrationForm
from app import db
from ..email import send_email
# 每次请求前运行,更新已登录用户的访问时间
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.ping()
# 过滤未确认的账户
if not current_user.confirmed \
and request.endpoint \
and request.blueprint != 'auth' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
# 登录路由
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
# 提交表格,为post方法,验证用户登录信息
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
# 把用户标记为已登录,并根据用户选择是否设置cookie
login_user(user, form.remember_me.data)
# 跳转到之前访问页面或者首页
next = request.args.get('next')
if next is None or not next.startwith('/'):
next = url_for('main.index')
return redirect(next)
flash('用户名或密码错误')
# get方法不满足if条件,渲染登录模板
return render_template('auth/login.html', form=form)
# 登出路由
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('您已退出登录')
return redirect(url_for('main.index'))
# 注册路由
@auth.route('/register', methods=['POST', 'GET'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, username=form.username.data, password=form.password.data)
db.session.add(user)
# 确认token需要用到id,不能延后提交
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, '确认您的账户', 'auth/email/confirm', user=user, token=token)
flash('一封确认邮件已发到您的邮箱。')
return redirect(url_for('main.index'))
return render_template('auth/register.html', form=form)
# 确认账户
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('您已成功确认账户!')
else:
flash('确认链接非法或已过期。')
return redirect(url_for('main.index'))
# 要求确认账户
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
# 再次发送确认邮件
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, '确认您的账户', 'auth/email/confirm', user=current_user, token=token)
flash('一封新的确认邮件已发到您的邮箱。')
return redirect(url_for('main.index'))
| [
"605079324@qq.com"
] | 605079324@qq.com |
e69c46f3cee7f01ec37608f025ae05edccdfa011 | 4cf7b2c6f63a8eae3b1150af0ea57c991d55d45d | /mod3/listDays.py | 61be58d7bc23fb0a157af44c8c414c8b0bb7a073 | [] | no_license | bpthoms/CGU-IST303-Examples | 7b77cba8094a5b84d1185e113dc0a07e8062d580 | f759b1245de2b70ada9c48b90ce0501d77e7bb5f | refs/heads/master | 2020-12-29T23:23:11.902880 | 2020-02-06T20:23:58 | 2020-02-06T20:23:58 | 238,774,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | #Let's assign our list
days=['Monday','Tuesday','Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
print("The first day of the week is " + days[1] + "? That can't be right.")
#Let's reassign our list values
days[1]='Sunday'
print("The first day of the week is " + days[1] + ". Now that's more like it.")
| [
"bpthoms@gmail.com"
] | bpthoms@gmail.com |
26f8eda5f0600071a244910d55c4b31448265a78 | 91693e434f5971b6f07d04f0aa289cebd396518b | /scripts/end_detection.py | 9c6e2388699ded9b52816632de7e15e1463c066a | [] | no_license | alexoshri/ardrone_project_work | aef2b8979ac098351fb53ae8b213831d0bd4da56 | ddf4d1f3b368337cad77196f14abfbff983d6498 | refs/heads/master | 2021-01-12T12:45:37.068997 | 2016-12-29T14:14:35 | 2016-12-29T14:14:35 | 69,293,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,056 | py | #!/usr/bin/env python
import roslib
roslib.load_manifest('ardrone_project')
import sys
import rospy
import cv2
from std_msgs.msg import String, Bool
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
# ASSUMES BOTTOM CAMERA IS TOGGLED!
class end_detection:
def __init__(self):
rospy.init_node('end_detection', anonymous=True)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/ardrone/bottom/image_raw_drop", Image, self.callback) # subscribe to drop topic
self.is_end_pub = rospy.Publisher("end_detection/is_end", Bool, queue_size=10) # queue?
def callback(self, data):
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
h , w = cv_image.shape[:2]
# filtering noise
blur = cv2.blur(cv_image, (10, 10))
#yellow mask
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([15, 100, 50])
upper_yellow = np.array([45, 255, 255])
mask_y = cv2.inRange(hsv, lower_yellow, upper_yellow)
closing_y = cv2.erode(mask_y, np.ones((30, 30), np.uint8), iterations=1)
dilation_y = cv2.dilate(closing_y, np.ones((10, 10), np.uint8), iterations=1)
num_yellow = np.count_nonzero(dilation_y)
is_end = num_yellow > 3000
self.is_end_pub.publish(is_end)
#Visualization
cv2.putText(dilation_y, 'num yellow: {}'.format(num_yellow), (w / 2, 100), cv2.FONT_ITALIC, 1, (255, 255, 255),2)
cv2.putText(dilation_y, 'End Detection = {}'.format(is_end), (int(w / 4), 200), cv2.FONT_ITALIC, 1, (255, 255, 255),2)
cv2.imshow('frame',dilation_y)
cv2.imshow('fre',cv_image)
cv2.waitKey(1)
def main(args):
ed = end_detection()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
except rospy.ROSInterruptException:
print("take_off_unit: ROSInterruptException")
finally:
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
| [
"alexoshri@gmail.com"
] | alexoshri@gmail.com |
3122a3d22cc75b8072b1a84cd51d2b3b92d108c8 | 7837c578a83c34a6c2c1dc9d9e4d163f64660914 | /src/backend/expungeservice/models/charge_types/subsection_6.py | 8f2010b71cc29b5385149d674ecdd4acb4d7e1a3 | [] | no_license | aemrich/recordexpungPDX | 03eade611806b264b2d5674f14c961587fb678d2 | 054dfab763a6f5b033c7e74d9043bd82326bbc58 | refs/heads/master | 2020-07-03T06:00:50.965896 | 2020-03-01T23:09:39 | 2020-03-01T23:09:39 | 201,811,565 | 0 | 0 | null | 2019-08-11T20:25:36 | 2019-08-11T20:25:36 | null | UTF-8 | Python | false | false | 1,794 | py | from dataclasses import dataclass
from expungeservice.models.charge import Charge
from expungeservice.models.expungement_result import TypeEligibility, EligibilityStatus
@dataclass(eq=False)
class Subsection6(Charge):
type_name: str = "Subsection 6"
expungement_rules: str = (
"""Subsection (6) names five felony statutes that have specific circumstances of the case under which they are ineligible.
However, two of the statutes named -- 163.165(1)(h) (Assault in the third degree)
This subsection also specifies conditions under which [SexCrimes](#SexCrime) are eligible or ineligible.
The three remaining specifications in the statute are:
* 163.200 (Criminal mistreatment II) is ineligible if the victim at the time of the crime was 65 years of age or older; otherwise eligible as a (Class A) [Misdemeanor](#Misdemeanor).
* 163.205 (Criminal mistreatment I) is ineligible if the victim at the time of the crime was 65 years of age or older or a minor; otherwise eligible as a [Class C Felony](#FelonyClassC).
* 163.575 (Endangering the welfare of a minor) (1)(a) is ineligible when the offense constitutes child abuse as defined in ORS 419B.005 (Definitions); otherwise eligible as a (Class A) [Misdemeanor](#Misdemeanor).
* 163.145 (Criminally negligent homicide) is ineligible when that offense was punishable as a Class C felony.
Dismissals are eligible under 137.225(1)(b)."""
)
def _type_eligibility(self):
if self.dismissed():
return TypeEligibility(EligibilityStatus.ELIGIBLE, reason="Dismissals are eligible under 137.225(1)(b)")
elif self.convicted():
return TypeEligibility(
EligibilityStatus.NEEDS_MORE_ANALYSIS, reason="Ineligible under 137.225(6) in certain circumstances.",
)
| [
"noreply@github.com"
] | noreply@github.com |
b910048a22fd1306dd1bc8186944343933382755 | 25cad8d5a238e421c649695e089a2d77cb511562 | /copy_files_from_cosmos.py | dd657817a0a60bb1d531ac2dbf058fe491a6da64 | [] | no_license | strack-cineuse-sunding/strack-api-tutorial | 902d0efabd2f0bfc62b8312f1cd6b329ea68b03c | c3b14d02fd315aa1376ce52a3ebb277e8c8a42a3 | refs/heads/master | 2022-04-29T03:32:05.747365 | 2017-05-15T06:00:09 | 2017-05-15T06:00:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | # coding: utf-8
# author: NJB
# email: mincau@163.com
# Copyright (c) 2017 Strack
import os
root_dir = r"D:\BaiduNetdiskDownload\Cosmos Laundromat\Disc2\scenes"
dst_dir = r"D:\work\resource\cosmos"
for root, dirs, files in os.walk(root_dir):
rel_path = root[len(root_dir)+1:]
if not rel_path:
rel_path = dst_dir
else:
rel_path = os.path.join(dst_dir, rel_path)
for d in dirs:
sub_dir = os.path.join(rel_path, d)
if not os.path.exists(sub_dir):
os.mkdir(sub_dir)
for f in files:
f_path = os.path.join(rel_path, f)
if os.path.exists(f_path):
f = open(f_path, "w")
f.write("")
f.close()
print "copy files and dirs from 'cosmos laundromat' done!"
| [
"mincau@sina.com"
] | mincau@sina.com |
e91ab6f89a44ae34d606e7f048dcab345ebe5b91 | f49a4da9605020b09b38d342c305732002cbcdcd | /core/clean.py | 0343c05b756f07e75fcb06770d84c9d648089354 | [
"MIT"
] | permissive | hbhammaddyar0/TG-Watermark-bot | 334fd245910eb1e329d61b2bdffcb7686f83b373 | c98a249f6789131777cfb76d0123f1744dc354ad | refs/heads/main | 2023-06-07T15:38:37.907890 | 2021-06-17T06:29:46 | 2021-06-17T06:29:46 | 377,711,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | # (c) @hbhammaddyar
import os
import shutil
from configs import Config
async def delete_trash(file):
try:
os.remove(file)
except Exception as e:
print(e)
async def delete_all():
try:
root = Config.DOWN_PATH + "/WatermarkAdder/"
shutil.rmtree(root)
except Exception as e:
print(e)
| [
"noreply@github.com"
] | noreply@github.com |
ddc010c02efb31bb0c7a69d79709aae033540d48 | d70b1c1282390bcac743cd2a1e66bd4d83fe9b67 | /pychess/ic/managers/ICCAutoLogOutManager.py | 98beb0d4d24dec5acab2374dde57e2baa9f4e279 | [
"MIT"
] | permissive | jacobchrismarsh/chess_senior_project | d3e9a8c6c5bd5f8db67bf3dce0751303130549f2 | 7797b1f96fda5d4d268224a21e54a744d17e7b81 | refs/heads/master | 2022-12-12T18:38:07.913996 | 2019-06-13T20:14:04 | 2019-06-13T20:14:04 | 165,728,158 | 0 | 0 | MIT | 2022-12-10T17:19:06 | 2019-01-14T20:14:25 | Python | UTF-8 | Python | false | false | 264 | py | from gi.repository import GObject
from pychess.ic.managers.AutoLogOutManager import AutoLogOutManager
class ICCAutoLogOutManager(AutoLogOutManager):
def __init__(self, connection):
GObject.GObject.__init__(self)
self.connection = connection
| [
"gaston.aganza@gmail.com"
] | gaston.aganza@gmail.com |
0198b50f604c108e997aac5fca5c852086cf7472 | 8c55db1d12b14c9a7e66722a103d4d3da621a634 | /exampleCSV/TrialSensorFiles/TestWrite.py | b2e9e2a011e014cd696079c95f8cddd1fb9dec40 | [] | no_license | MCLusardi/BIPSpHProject | 3fd3fd5461abfe31457315de896fadbe781f22e4 | be901e2e55f09f5f378ec2b505faf735b004d2b1 | refs/heads/main | 2023-07-14T20:20:53.753949 | 2021-08-14T15:34:01 | 2021-08-14T15:34:01 | 335,359,818 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | file = open("TestWrite.txt", "a") #opening file for writing, will create if it doesn't exsit
if(file == None):
print("File could not be opened\n")
else:
file.write("pH, DateTime")
file.close()
print("All Done!"); | [
"mclntg@mail.missouri.edu"
] | mclntg@mail.missouri.edu |
e6122300ca48004cff845eb4a3a431535292fd2b | 94b373756ab6047165248cc5d4fc59e72a1342da | /exercise04/solution/montecarlo/bench_py2.py | 9284b330625d3f3706e12300b199d43e0291030e | [] | no_license | raju1107/uibk_ps_parsys | 75b87d000cc06b430c66e92c77817b54d722b061 | 314c0f168e0ec3aff26f3f69c68d9e10d4545c94 | refs/heads/master | 2022-01-14T13:37:54.681728 | 2019-01-23T11:51:13 | 2019-01-23T11:51:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,534 | py | import time
import os
import subprocess
import numpy
import sys
PROBLEM_SIZE = 0
NUMBER_OF_ITERATIONS = 10
COMMAND = "./montecarlo %s"
cycles_list = []
instructions_list = []
cache_misses_list = []
seconds_list = []
time_list = []
def parse_filename(filename):
filename = filename[: filename.find(b".")]
name = filename[:filename.rfind(b"_")]
size = filename[filename.rfind(b"_")+1:]
return name, size
def strip_string(line):
line = line.strip()
return line[:line.find(b" ")]
def parse_content(output):
cycles = None
instructions = None
cache_misses = None
seconds = None
for line in output.split(b'\n'):
if b"cycles" in line and not cycles:
cycles = strip_string(line)
elif b"instructions" in line:
instructions = strip_string(line)
elif b"cache-misses" in line:
cache_misses = strip_string(line)
elif b"seconds time elapsed" in line:
seconds = strip_string(line)
break
return int(cycles.replace(b',', b"")), int(instructions.replace(b',', b"")), int(cache_misses.replace(b',', b"")), float(seconds.replace(b',', b""))
def perf_bench():
global cycles_avg, instructions_avg, cache_misses_avg, seconds_avg
for i in range(0, NUMBER_OF_ITERATIONS):
proc = subprocess.Popen(["perf stat -e cycles,instructions,cache-references,cache-misses,bus-cycles %s" % (COMMAND % PROBLEM_SIZE)], stderr=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
cycles, instructions, cache_misses, seconds = parse_content(err)
cycles_list.append(cycles)
instructions_list.append(instructions)
cache_misses_list.append(cache_misses)
seconds_list.append(seconds)
def time_bench():
global time_list
for i in range(0, NUMBER_OF_ITERATIONS):
start_time = time.time()
os.system(COMMAND % PROBLEM_SIZE)
end_time = time.time() - start_time
time_list.append(end_time)
#print("Round %i took %s seconds" % (i+1, end_time))
def print_results():
global time_list, cycles_avg, instructions_avg, cache_misses_avg, seconds_avg
print("\nProblemsize: %s\nIterations: %s" % (PROBLEM_SIZE, NUMBER_OF_ITERATIONS))
print("Average time: %s" % (numpy.mean(time_list)))
print("Median time: %s" % (numpy.median(time_list)))
print("Minimum time: %s" % (min(time_list)))
#print("Average Values\n\tCycles: %i \n\tinstr: %i \n\tCache Misses: %i \n\tSeconds: %s" % (statistics.mean(cycles_list), statistics.mean(instructions_list), statistics.mean(cache_misses_list), statistics.mean(seconds_list)))
#print("\nMedian Values\n\tCycles: %i \n\tinstr: %i \n\tCache Misses: %i \n\tSeconds: %s" % (statistics.median(cycles_list), statistics.median(instructions_list), statistics.median(cache_misses_list), statistics.median(seconds_list)))
def main():
print("Building")
os.system("g++ Main.cpp Montecarlo.h gmp.h Montecarlo.cpp -fopenmp -Wall -O3 -std=c++11 -o montecarlo")
time_bench()
#perf_bench()
print_results()
os.system("rm montecarlo")
def print_help(exe):
print("Usage:\n\tpython3 %s [problemsize] [version]\n\t\tproblemsize: Size of the array which should be merged\n\t\tversion: Either parallel or sequential" % exe)
if __name__ == "__main__":
if len(sys.argv) != 3:
print_help(sys.argv[0])
else:
PROBLEM_SIZE = int(sys.argv[1])
if (sys.argv[2] == "parallel"):
COMMAND = "./montecarlo %s par"
main()
| [
"curvasudfan@gmail.com"
] | curvasudfan@gmail.com |
0e37dca86994a1da13600b508212b627be03ffc0 | 704efad091d0c1fd9c846d32e4904a8c5c69f03e | /mock_server.py | e371b0b7ebe18499a239fe3215bf44f01eb84af7 | [] | no_license | wangyayx/api_mock | 4a54756b248eab84745d309855df50e30e4d6f3e | eeeb7ff075b6122f50b89919c26febfebe93eb2b | refs/heads/master | 2020-04-10T00:32:35.908022 | 2018-12-06T14:16:53 | 2018-12-06T14:16:53 | 160,688,433 | 0 | 0 | null | 2018-12-06T14:45:00 | 2018-12-06T14:45:00 | null | UTF-8 | Python | false | false | 5,881 | py | # -*- coding: utf-8 -*-
from flask import jsonify, Flask,make_response,request
import sys,ConfigParser,requests,json
from flask_sqlalchemy import SQLAlchemy
reload(sys)
sys.setdefaultencoding('utf-8')
#如果在mock_server中没有数据是否进行转发 0为是,非0为否
relay = 0
#转发服务器地址
host1 = 'http://127.0.0.1:5202'
def getconfig():
cf = ConfigParser.ConfigParser()
path = 'db.config'
cf.read(path)
_dburi = cf.get("database","dbhost")
print(_dburi)
return _dburi
app = Flask(__name__)
# app.config['SQLALCHEMY_DATABASE_URI'] = getconfig()
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:123456@127.0.0.1:3306/mock_api'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=True
db = SQLAlchemy(app)
class mock_config(db.Model):
"""定义数据模型"""
__tablename__ = 'mock_config'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50))
reqparams = db.Column(db.String(500))
methods = db.Column(db.String(50))
domain = db.Column(db.String(50))
description = db.Column(db.String(50))
resparams = db.Column(db.String(500))
update_time = db.Column(db.TIMESTAMP)
status = db.Column(db.Integer)
ischeck = db.Column(db.Integer)
project_name = db.Column(db.String(20))
def checksize(domain,method):
mock = mock_config.query.filter_by(domain=domain).first()# 校验domain是否存在
mock1 = mock_config.query.filter(mock_config.domain == domain,mock_config.methods == method).first() # 校验method是否存在
if not mock :
return jsonify({"status":"fail","msg": u"请求方法不存在"})
elif not mock1 :
return jsonify({"status":"fail","msg": u"请求方法对应的请求模式不存在"})
def checkpath(domain,varsvalue,method):
method=method.lower()
varsvalue.sort()
re=checksize(domain,method)#判断请求方法和模式是否匹配
if re != None:
return re
if len(varsvalue) == 0:
mock_data = mock_config.query.filter(mock_config.methods == method,mock_config.status ==0,mock_config.domain ==domain).first()
resparams=mock_data.resparams
if resparams== '':
return jsonify({"status":"fail","msg": u"对应请求没有配置预期返回值"})
else:
return resparams.encode("utf-8")
else:
varsvalue1=getvar(varsvalue)
mock_data = mock_config.query.filter(mock_config.methods == method, mock_config.status == 0,
mock_config.domain == domain).first()
if not varsvalue1:
return jsonify({"status":"fail","msg": u"请求方法和参数不匹配"})
elif mock_data.ischeck==1:
return mock_data.resparams
else:
rdata=checkparams(mock_data,varsvalue1)
return rdata
def checkparams(mock_data,varsvalue1):
varsvalue2 = mock_data.reqparams # 数据库中的预期请求参数
if mock_data.methods.lower()=='get' or (mock_data.reqparams.lower()=='post' and varsvalue1[0] != '}' and varsvalue1[-2] != '}'):
arr = varsvalue2.split('&')
for i in range(len(arr)):
arr[i] = arr[i] + '&'
arr.sort(reverse=True)
str = ''.join(arr)[0:-1]
if str==varsvalue1 and mock_data.resparams != '':
return mock_data.resparams.encode("utf-8")
elif mock_data.resparams == '':
return jsonify({"status":"fail","msg": u"对应请求没有配置预期返回值"})
else:
return jsonify({"status":"fail","msg": u"请求方法和参数不匹配"})
elif mock_data.methods.lower()=='post':
varsvalue1 = varsvalue1.replace("\t", "").replace("\r", "").strip()[:-1]
varsvalue2 = varsvalue2.replace("\t", "").replace("\r", "").strip()
if varsvalue1 == varsvalue2:
return mock_data.resparams.encode("utf-8")
else:
return jsonify({"status":"fail","msg": u"暂不支持该类型请求方法"})
def getvar(value):
value=value[::-1]
result = ''
f = 0
for i in range(len(value)):
for j in range(len(value[i])):
if f % 2 == 0:
result = result + value[i][j] + '='
f = f + 1
else:
result = result + value[i][j] + '&'
f = f + 1
return result[0:-1]
def getres(request,npath,host):
varsvalue = request.args.items()
params = getvar(varsvalue)
url1 = host + npath + '?'
if request.method == 'GET':
re = requests.get(url1, params=params)
else:
re = requests.post(url1, params=params)
return re,params
@app.route('/<path:path>/<path:path1>', methods=['GET','POST'])
def get_all_task(path,path1):
npath='/' + path + '/' + path1
if request.method=='GET':
varsvalue = request.args.items()
else:
varsvalue = request.form.items()
r = checkpath(npath, varsvalue, request.method)
if json.loads(r.data)['status']=='fail' and relay==0:
re1 = getres(request, npath, host1)
return re1[0].content
else:
return r
@app.route('/<path:path>', methods=['GET','POST'])
def get_all_task1(path):
path='/'+path
if request.method=='GET':
varsvalue = request.args.items()
else:
varsvalue = request.form.items()
r = checkpath(path, varsvalue, request.method)
if json.loads(r.data)['status'] == 'fail' and relay == 0:
re1 = getres(request, path, host1)
return re1[0].content
else:
return r
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'msg':'fail','error': '404 Not found'}), 404)
@app.errorhandler(500)
def not_found(error):
return make_response(u"程序报错,可能是因为颜值不够", 500)
if __name__ == '__main__':
app.run(host='0.0.0.0',debug=False, port=5201,threaded=True)
| [
"1107694999@qq.com"
] | 1107694999@qq.com |
1992606aa6af81e11b1db38e833188764ac03b4e | e3d71990fc810c06c01b3a388b6beb507232ccfb | /augmentations/horizontal_flip.py | f0ab903cdde02425329bcdc6133017aeece3df50 | [] | no_license | Kevinkald/asset_tracking | f9902a6f5abd3c517ab7d4b816c921460e9b5716 | e2199544f2f32f73cbe68bc9fe1d125228ac2c88 | refs/heads/master | 2023-03-28T05:19:32.201295 | 2021-03-31T09:37:55 | 2021-03-31T09:37:55 | 299,911,981 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | import numpy as np
import imgaug as ia
import imgaug.augmenters as iaa
import cv2
import os
import xml.etree.ElementTree as ET
folder="../images/train_copy/"
seq = iaa.Sequential([
iaa.Fliplr(0.5)
])
for filename in os.listdir(folder):
img = cv2.imread(os.path.join(folder,filename))
# Accessing each jpg image
if img is not None:
# Create image augmentation
filename_split = filename.split(".jpg")
xml_filename_old = filename_split[0] + ".xml"
new_filename = filename_split[0] + "_cutout.jpg"
xml_filename = filename_split[0] + "_cutout.xml"
# copy and edit old xml file
tree = ET.parse(folder + xml_filename_old)
root = tree.getroot()
k = root.find('filename')
k.text = new_filename
l = root.find('path')
l.text = new_filename
# augment image
image_aug = seq(image=img)
#cv2.imshow("image", image_aug)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# save augmented image + edited xml file
cv2.imwrite(new_filename, image_aug)
tree.write(xml_filename) | [
"kevin.k.kaldvansvik@gmail.com"
] | kevin.k.kaldvansvik@gmail.com |
aa31e701a8d8fe716135273f55b29f14ffd69e2f | ad73697ede53f200a79479d3c4e561c057b74391 | /cfg/fusion_ssd_comap.py | 6d2ed081787a1f1209ef5d3272d6b13d15440a3a | [] | no_license | CV-IP/FPV_RCNN | e8e84f37651f07b4590cb00d68fd50d769e14fde | fbdee427b30ecde45c027d841fb94ada858af808 | refs/heads/main | 2023-08-04T16:33:29.886975 | 2021-08-26T13:45:53 | 2021-08-26T13:45:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,928 | py | import numpy as np
from vlib.visulization import draw_points_boxes_bev_3d as visualization
from cfg import LABEL_COLORS
def update(dcfg_obj):
dcfg_obj.n_classes = len(dcfg_obj.classes)
dcfg_obj.grid_size = np.round((dcfg_obj.pc_range[3:6] - dcfg_obj.pc_range[:3]) /
np.array(dcfg_obj.voxel_size)).astype(np.int64)
dcfg_obj.feature_map_size = [1, *(dcfg_obj.grid_size / 8).astype(np.int64).tolist()[:-1]]
dcfg_obj.TARGET_ASSIGNER['feature_map_size'] = dcfg_obj.feature_map_size
class Dataset(object):
LABEL_COLORS = LABEL_COLORS
# AUGMENTOR = {
# 'random_world_flip': ['x'],
# 'random_world_rotation': [-45, 45], # rotation range in degree
# 'random_world_scaling': [0.95, 1.05] # scale range
# }
BOX_CODER = {
'type': 'GroundBox3dCoderTorch',
'linear_dim': False,
'n_dim': 7,
'angle_vec_encode': False, # encode_angle_vector
'angle_residual_encode': True # encode_angle_with_residual
}
def __init__(self):
super(Dataset, self).__init__()
self.name = 'comap'
self.root = 'path/to/comap'
self.pc_range = np.array([-57.6, -57.6, -0.1, 57.6, 57.6, 3.9])
self.range_clip_mode = 'circle'
self.test_split = ['1148', '753', '599', '53',
'905', '245', '421', '509']
self.train_val_split = ['829', '965', '224', '685', '924', '334', '1175', '139',
'1070', '1050', '1162']
self.train_split_ratio = 0.8
self.ego_cloud_name = 'cloud_ego' # 'noisy_cloud_ego'
self.coop_cloud_name = 'cloud_coop' # 'noisy_cloud_coop'
self.node_selection_mode = 'random_selection_40' # 'kmeans_selection_40'
self.fuse_raw_data = False
self.classes = {1: ['Vehicles'], 2: ['Roads', 'RoadLines']} # 0 is reserved for not-defined class
self.voxel_size = [0.1, 0.1, 0.1]
self.max_points_per_voxel = 5
self.max_num_voxels = 100000
self.cal_voxel_mean_std = False
self.n_point_features = 3 # x,y,z
self.label_downsample = 4
# This part induct info from the info provided above
self.n_classes = len(self.classes)
self.grid_size = np.round((self.pc_range[3:6] - self.pc_range[:3]) /
np.array(self.voxel_size)).astype(np.int64)
self.feature_map_size = [1, *(self.grid_size / 8).astype(np.int64).tolist()[:-1]]
self.add_gps_noise = False
self.gps_noise_std = [0.5, 0.5, 0.0, 0.0, 0.0, 2.0] # [x, y, z, roll, pitch, yaw]
self.TARGET_ASSIGNER ={
'anchor_generator': {
'type': 'AnchorGeneratorRange',
'sizes': [4.41, 1.98, 1.64],
'rotations': [0, 1.57], # remember to change the num_dirs in HEAD cfg
'match_threshold': 0.6,
'unmatch_threshold': 0.45,
'class_name': 'Car'
},
'sample_positive_fraction': None,
'sample_size': 512,
'pos_area_threshold': -1,
'box_coder': self.BOX_CODER,
'out_size_factor': 8,
'enable_similar_type': True,
'feature_map_size': self.feature_map_size
}
self.process_fn = {
'train': ['mask_points_in_range','rm_empty_gt_boxes', 'points_to_voxel', 'assign_target'],
'test': ['mask_points_in_range', 'points_to_voxel', 'assign_target']
}
def update(self):
self.n_classes = len(self.classes)
self.grid_size = np.round((self.pc_range[3:6] - self.pc_range[:3]) /
np.array(self.voxel_size)).astype(np.int64)
self.feature_map_size = [1, *(self.grid_size / 8).astype(np.int64).tolist()[:-1]]
self.TARGET_ASSIGNER['feature_map_size'] = self.feature_map_size
class Model:
def __init__(self):
super(Model, self).__init__()
self.name = 'fusion_ssd'
self.params_train = ['CPMEnc', 'FUDET']
self.VFE = None
self.SPCONV = {
'num_out_features': 64
}
self. MAP2BEV = {
'num_features': 128
}
self.SSFA = {
'layer_nums': [5],
'ds_layer_strides': [1],
'ds_num_filters': [128],
'us_layer_strides': [1],
'us_num_filters': [128],
'num_input_features': 128,
'norm_cfg': None
}
self.HEAD = {
'type': 'MultiGroupHead',
'mode': '3d',
'in_channels': sum([128,]),
'norm_cfg': None,
'num_class': 1,
'num_dirs': 2,
'class_names': ['Car'],
'weights': [1, ],
'with_cls' : True,
'with_reg' : True,
'reg_class_agnostic' : False,
'pred_var': False,
'box_coder': Dataset.BOX_CODER,
'encode_background_as_zeros': True,
'loss_norm': {'type': 'NormByNumPositives',
'pos_cls_weight': 50.0,
'neg_cls_weight': 1.0},
'loss_cls': {'type': 'SigmoidFocalLoss',
'alpha': 0.25,
'gamma': 2.0,
'loss_weight': 1.0},
'use_sigmoid_score': True,
'loss_bbox': {'type': 'WeightedSmoothL1Loss',
'sigma': 3.0,
'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
'codewise': True,
'loss_weight': 2.0},
'loss_iou': {'type': 'WeightedSmoothL1Loss',
'sigma': 3.0,
'code_weights': None,
'codewise': True,
'loss_weight': 1.0},
'encode_rad_error_by_sin': True,
'use_dir_classifier': True,
'loss_aux': {'type': 'WeightedSoftmaxClassificationLoss',
'name': 'direction_classifier',
'loss_weight': 0.2},
'direction_offset': 0.5,
'nms': {
'name': 'normal', # 'iou_weighted',
'score_threshold': 0.3,
'cnt_threshold': 1,
'nms_pre_max_size': 1000,
'nms_post_max_size': 100,
'nms_iou_threshold': 0.01,
},
'logger': None
}
self.CPMEnc = {
'in_channel': 128,
'out_channel': 128,
'n_layers': 2,
'encode_feature': 'ssfa'
}
self.FUDET = {
'fusion_resolution': 0.1,
'fusion_score': 0.3,
'upsample_channels': [64, 32, 16],
'downsample_channels': [16, 32, 64],
'conv_head_channels': [128]
}
class Optimization:
def __init__(self):
self.TRAIN = {
'project_name': None,
'visualization_func': visualization,
'batch_size': 1,
'lr': 0.0001,
'weight_decay': 0.0001,
'betas': [0.95, 0.999],
'scheduler_step': 150,
'scheduler_gamma': 0.5,
'resume': True,
'epoch': 0,
'total_epochs': 10,
'log_every': 20,
'save_ckpt_every': 10
}
self.TEST = {
'save_img': False,
'bev': False,
'n_coop': 0,
'com_range': 40,
'score_threshold': 0.3,
'cnt_threshold': 2,
'nms_pre_max_size': 1000,
'nms_post_max_size': 100,
'nms_iou_threshold': 0.01,
'ap_ious': [0.3, 0.4, 0.5, 0.6, 0.7]
}
self.PATHS = {
'run': '/logging/path'
}
| [
"yunshuang.yuan@ikg.uni-hannover.de"
] | yunshuang.yuan@ikg.uni-hannover.de |
5a15158e5dcab14ba761c04310d97d07c955a3a3 | 534de189d87fca35352799d3fb0cefad6fdd3c72 | /crc.py | f3080f6b14f30b408b89528cedbc3969c11ce7f7 | [] | no_license | h4m3d92/AFE_IOT_Server | 903f0e018d7f09e40f415e7a487e555b9ab8e7da | b6dd23899252974a46ee5d538d2fbc5bfb2c4159 | refs/heads/master | 2023-03-15T02:00:44.509206 | 2021-03-17T10:45:20 | 2021-03-17T10:45:20 | 344,603,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,481 | py | def calCrc(inStr):
dnpCrcTable = [
0x0000, 0x3D65, 0x7ACA, 0x47AF, 0xF594, 0xC8F1, 0x8F5E, 0xB23B, 0xD64D, 0xEB28, 0xAC87, 0x91E2, 0x23D9,
0x1EBC, 0x5913, 0x6476, 0x91FF, 0xAC9A, 0xEB35, 0xD650, 0x646B, 0x590E, 0x1EA1, 0x23C4, 0x47B2, 0x7AD7,
0x3D78, 0x001D, 0xB226, 0x8F43, 0xC8EC, 0xF589, 0x1E9B, 0x23FE, 0x6451, 0x5934, 0xEB0F, 0xD66A, 0x91C5,
0xACA0, 0xC8D6, 0xF5B3, 0xB21C, 0x8F79, 0x3D42, 0x0027, 0x4788, 0x7AED, 0x8F64, 0xB201, 0xF5AE, 0xC8CB,
0x7AF0, 0x4795, 0x003A, 0x3D5F, 0x5929, 0x644C, 0x23E3, 0x1E86, 0xACBD, 0x91D8, 0xD677, 0xEB12, 0x3D36,
0x0053, 0x47FC, 0x7A99, 0xC8A2, 0xF5C7, 0xB268, 0x8F0D, 0xEB7B, 0xD61E, 0x91B1, 0xACD4, 0x1EEF, 0x238A,
0x6425, 0x5940, 0xACC9, 0x91AC, 0xD603, 0xEB66, 0x595D, 0x6438, 0x2397, 0x1EF2, 0x7A84, 0x47E1, 0x004E,
0x3D2B, 0x8F10, 0xB275, 0xF5DA, 0xC8BF, 0x23AD, 0x1EC8, 0x5967, 0x6402, 0xD639, 0xEB5C, 0xACF3, 0x9196,
0xF5E0, 0xC885, 0x8F2A, 0xB24F, 0x0074, 0x3D11, 0x7ABE, 0x47DB, 0xB252, 0x8F37, 0xC898, 0xF5FD, 0x47C6,
0x7AA3, 0x3D0C, 0x0069, 0x641F, 0x597A, 0x1ED5, 0x23B0, 0x918B, 0xACEE, 0xEB41, 0xD624, 0x7A6C, 0x4709,
0x00A6, 0x3DC3, 0x8FF8, 0xB29D, 0xF532, 0xC857, 0xAC21, 0x9144, 0xD6EB, 0xEB8E, 0x59B5, 0x64D0, 0x237F,
0x1E1A, 0xEB93, 0xD6F6, 0x9159, 0xAC3C, 0x1E07, 0x2362, 0x64CD, 0x59A8, 0x3DDE, 0x00BB, 0x4714, 0x7A71,
0xC84A, 0xF52F, 0xB280, 0x8FE5, 0x64F7, 0x5992, 0x1E3D, 0x2358, 0x9163, 0xAC06, 0xEBA9, 0xD6CC, 0xB2BA,
0x8FDF, 0xC870, 0xF515, 0x472E, 0x7A4B, 0x3DE4, 0x0081, 0xF508, 0xC86D, 0x8FC2, 0xB2A7, 0x009C, 0x3DF9,
0x7A56, 0x4733, 0x2345, 0x1E20, 0x598F, 0x64EA, 0xD6D1, 0xEBB4, 0xAC1B, 0x917E, 0x475A, 0x7A3F, 0x3D90,
0x00F5, 0xB2CE, 0x8FAB, 0xC804, 0xF561, 0x9117, 0xAC72, 0xEBDD, 0xD6B8, 0x6483, 0x59E6, 0x1E49, 0x232C,
0xD6A5, 0xEBC0, 0xAC6F, 0x910A, 0x2331, 0x1E54, 0x59FB, 0x649E, 0x00E8, 0x3D8D, 0x7A22, 0x4747, 0xF57C,
0xC819, 0x8FB6, 0xB2D3, 0x59C1, 0x64A4, 0x230B, 0x1E6E, 0xAC55, 0x9130, 0xD69F, 0xEBFA, 0x8F8C, 0xB2E9,
0xF546, 0xC823, 0x7A18, 0x477D, 0x00D2, 0x3DB7, 0xC83E, 0xF55B, 0xB2F4, 0x8F91, 0x3DAA, 0x00CF, 0x4760,
0x7A05, 0x1E73, 0x2316, 0x64B9, 0x59DC, 0xEBE7, 0xD682, 0x912D, 0xAC48]
inLen = int(int(len(inStr)/16)*16)
crc = 0
for i in range(inLen):
crc = ((crc << 8) ^ dnpCrcTable[((crc >> 8) ^ inStr[i]) & 0x00FF]) & 0xFFFF
crc ^= 0xFFFF
return bytes([int(crc / 256) & 0xFF, crc & 0xFF]) | [
"62805548+h4m3d92@users.noreply.github.com"
] | 62805548+h4m3d92@users.noreply.github.com |
5ff1fb385203591c762b667a0517c7c061db70aa | 8c94b3b5998ac171c8942940805fbf55d10df215 | /train_dqn.py | af615415a0fa7f19a8d198f88f5fe601c49e77dc | [] | no_license | wenxingliu/p1_navigation | 049869d2fe321f83ae54f77a664436a5555e99ed | eec82f78729937ea93fadaba7f68095f088f5c36 | refs/heads/master | 2020-03-30T08:21:24.908801 | 2018-09-30T21:37:26 | 2018-09-30T21:37:26 | 151,006,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | from collections import deque
import numpy as np
import torch
def train_dqn(epochs, agent, env, gamma, tau, eps_decay, eps_min, train=True):
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=train)[brain_name]
# number of actions
action_size = brain.vector_action_space_size
# examine the state space
state = env_info.vector_observations[0]
state_size = len(state)
eps = 1.0 if train else 0.0
scores_window = deque(maxlen=100)
scores = []
for epoch in np.arange(1, epochs + 1):
env_info = env.reset(train_mode=True)[brain_name]
state = env_info.vector_observations[0]
score = 0.
done = False
while not done:
action = agent.act(state, eps)
env_info = env.step(action)[brain_name]
next_state = env_info.vector_observations[0]
reward = env_info.rewards[0]
done = env_info.local_done[0]
if train:
agent.step(state, action, reward, next_state, done, gamma, tau)
state = next_state
score += reward
if train:
agent.scheduler.step()
eps = max(eps * eps_decay, eps_min)
elif epoch % 20 == 0:
print("Epoch %d ended... Final score %.2f" % (epoch, float(score)))
scores_window.append(score)
scores.append(score)
if epoch % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(epoch, np.mean(scores_window)))
if (np.mean(scores_window)>=13) and train:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(epoch-100, np.mean(scores_window)))
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
return scores | [
"wenxing.liu.09@gmail.com"
] | wenxing.liu.09@gmail.com |
15f4732d2029dec5ca58338f4f003779f1c6ac44 | 5c26364dc74899f06746da2df43c5438b62dcc5c | /gTest1.py | 9fb758d49945050b089d3a4229db5fce666752bd | [] | no_license | Exochos/ITC110 | 8014bcd7a9149529fce39112c75397755db6efc2 | bfc1cec21f5b2257f0a4fa9dac07952cec01087b | refs/heads/master | 2020-12-07T14:54:14.435866 | 2020-03-26T01:20:11 | 2020-03-26T01:20:11 | 232,740,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py |
from graphics import *
def main():
p = Point(59,60)
print(p.getX())
win = GraphWin()
p.draw(win)
main() | [
"exochos@gmail.com"
] | exochos@gmail.com |
e38518833b226dc97b94e5f89340ddc6917cca57 | c0216b5e8a21f668dfc0980f5a5913eb8084759a | /SearchApp/migrations/0006_profile_name.py | f2c062d2744652dd83ac0a8fec643945783eb391 | [] | no_license | tushdhingra/SearchPage | cc18265317907caf413043bca2f9f54f335dd56c | 86bce55db9475e82ea8f5de64f939beb2cb076bd | refs/heads/main | 2023-01-31T16:21:02.220230 | 2020-12-17T16:41:40 | 2020-12-17T16:41:40 | 302,873,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | # Generated by Django 3.0.6 on 2020-12-17 10:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('SearchApp', '0005_remove_profile_name'),
]
operations = [
migrations.AddField(
model_name='profile',
name='Name',
field=models.CharField(default='Prof', max_length=200),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
369b6deda986697894141aaed5d4ee114c8ff2fc | c98ec703bce8129d5589736f41b415988af3399f | /xt18.py | 64e399362c2c097c5376007106a6af5f17c33dd6 | [] | no_license | WuZhiT/xtpython | 5e5bbcccedf573546ccc3cf03bc5aa10ef257a21 | 5c3a09d56391b01971ab9de73a0da118b99dc6c7 | refs/heads/master | 2022-11-21T13:18:41.336101 | 2020-07-19T10:13:08 | 2020-07-19T10:13:08 | 265,479,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | #this oneis like oyur script with argv
def print_two(*args):
arg1,arg2 = args
print(f"arg1: {arg1}, arg2: {arg2}.")
#ok,that *args is actually pointless, we can just do it
def print_two_again(arg1,arg2):
print(f"arg1: {arg1}, arg2: {arg2}.")
#this just takes one argument
def print_one(arg1):
print(f"arg1: {arg1}")
#this one takes no arguments
def print_none():
print("I got none.")
print_two("Zed","Shaw")
print_two_again("Zed","Shaw")
print_one("First!")
print_none() | [
"t618718@outlook.com"
] | t618718@outlook.com |
582f0dd888efa3c0892391fb055df2e821e1ccb1 | ac9ed4e3366e265368cac64322ada9df55e9439f | /examples/lis2mdl_simpletest.py | 7fa303c8196bbf3f18bb1e61e14a2be847fe16c7 | [
"MIT"
] | permissive | FoamyGuy/Adafruit_CircuitPython_LIS2MDL | 666c81355dafdbde77ad84b2303faca840764a4f | d23fc6ecefee4479d844e13203c13f48ff7c56ad | refs/heads/master | 2021-01-01T07:45:00.957372 | 2020-04-19T14:48:18 | 2020-04-19T14:48:18 | 239,178,496 | 0 | 0 | MIT | 2020-02-08T18:01:09 | 2020-02-08T18:01:08 | null | UTF-8 | Python | false | false | 363 | py | """ Display magnetometer data once per second """
import time
import board
import busio
import adafruit_lis2mdl
i2c = busio.I2C(board.SCL, board.SDA)
sensor = adafruit_lis2mdl.LIS2MDL(i2c)
while True:
mag_x, mag_y, mag_z = sensor.magnetic
print('X:{0:10.2f}, Y:{1:10.2f}, Z:{2:10.2f} uT'.format(mag_x, mag_y, mag_z))
print('')
time.sleep(1.0)
| [
"nospam187+github@gmail.com"
] | nospam187+github@gmail.com |
9cc93006fd395e9f46274e4068d914f4239c7798 | 6d6715ec3c5044da1ec80403da5f2f8592670be1 | /programs/program.py | 1493ad8eb9b762aeec65dedd2d1fa3157d7644c6 | [] | no_license | TheWorldofProgramms/Pygame_progect | 08d357b1a8412d30e7e565a05f2bf3529620545d | 1b1872afc4cd80888ecb10655d677e6c89c7e369 | refs/heads/master | 2021-02-06T13:00:28.006451 | 2020-02-29T08:27:25 | 2020-02-29T08:27:25 | 243,916,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | import pygame, PyQt5, sys
from main_snake import GameSnake
from main_window import Ui_MainWindow
from arrow_game import ArrowGame
from help import Ui_HelpWindow
from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow
pygame.init()
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self): #Главное окно и выбор игры
super().__init__()
self.setupUi(self)
self.pushButton_2.clicked.connect(self.snake)
self.pushButton_3.clicked.connect(self.arrow)
self.pushButton_4.clicked.connect(self.help)
def snake(self):
snake = GameSnake()
snake.main()
def arrow(self):
arrow = ArrowGame()
arrow.main()
def help(self):
self.help = HelpWindow()
self.help.show()
class HelpWindow(QWidget, Ui_HelpWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.a = 0
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MainWindow()
ex.show()
sys.exit(app.exec_())
pygame.quit() | [
"theprogrammworld@gmail.com"
] | theprogrammworld@gmail.com |
21c932c0ef6ee45ed6cad45b7e274656112a77ab | 0dbc1eb9a6279644c7288a171219afeb8101306e | /weekend_9am_batch/p1.py | abaf990078f3414535a59e11fc4fe5e93432dbc1 | [] | no_license | amitsaxena9225/XPATH | 505aa34cf8ca5352dd171e434348016166ce5f4b | 35110f5054aa5b64243bea4fc55518c63fc324e9 | refs/heads/master | 2020-07-11T05:53:15.478851 | 2019-07-24T09:21:40 | 2019-07-24T09:21:40 | 204,460,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py |
'''def prime(*args):
for a in args:
print(type(a))
for i in a:
print(type(i))
if i > 2:
for j in range(2, i):
if i % j == 0:
print(i, "number is not prime")
break
else:
print(i, "number is prime")
prime([7, 2, 3, 4, 5, 6, 7, 8, 8])
def prime(a):
for i in a:
print(type(i))
if i > 2:
for j in range(2, i):
if i % j == 0:
print(i, "number is not prime")
break
else:
print(i, "number is prime")
prime([7, 2, 3, 4, 5, 6, 7, 8, 8])
'''
noprme=[j for i in range(2,8) for j in range(i*2,50,i)]
prime=[x for x in range(2,50) if x not in noprme]
print(prime)
print(noprme)
| [
"amitsaxena9225@gmail.com"
] | amitsaxena9225@gmail.com |
a3e29ce1c00840ff011f687bbfc23e5a7f91fb49 | c2bdab9bed8f2a13eafe62083d55ecbd1a546b12 | /examples/apitest.py | 2f40ec821bcb71f09db36547a51918eac1b0a143 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | iaroslav-ai/rllab | 3938a2d479b7aadae34a2257b85435415628a44d | 786da7acbd657089d3b3466d7db3b2a265effe9b | refs/heads/master | 2021-01-09T20:27:58.019770 | 2016-09-26T22:57:29 | 2016-09-26T22:57:29 | 69,281,153 | 0 | 1 | null | 2016-09-26T18:41:54 | 2016-09-26T18:41:54 | null | UTF-8 | Python | false | false | 487 | py | from rllab.envs.gym_env import GymEnv
import os
import apimock as spx
import shutil
dat_folda = "test"
if os.path.exists(dat_folda):
shutil.rmtree(dat_folda)
env = GymEnv("LunarLander-v2", log_dir=dat_folda)
spx.init(env.observation_space.low.shape[0], env.action_space.n)
for i in range(100000):
o = env.reset()
spx.reset()
while True:
a = spx.get_action(o)
o, r, d, env_info = env.step(a)
spx.reward(r)
if d:
break
| [
"iaroslogos@gmail.com"
] | iaroslogos@gmail.com |
b8132a25e5f3f4a91477a8ecfc2fc7fd18560cab | 0cf1e928715c8683926961c6c2e543fe28a9be3f | /train_dialog.py | aa11539e550067372f4db9b67851f32049b68742 | [] | no_license | HossamEmam95/rasa_trial | 912ff24b697cd183029d69d3d55bdc7e7db6d3ab | faea95582ef9647a4ca83a88247fe98af476249f | refs/heads/master | 2022-09-13T15:03:05.984036 | 2018-10-18T13:33:26 | 2018-10-18T13:33:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | # Imports
#-----------
# rasa core
import logging
from rasa_core import training
from rasa_core.actions import Action
from rasa_core.agent import Agent
from rasa_core.domain import Domain
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.featurizers import MaxHistoryTrackerFeaturizer, BinarySingleStateFeaturizer
# from rasa_core.channels.console import ConsoleInputChannel
from rasa_core.interpreter import RegexInterpreter
from rasa_core.interpreter import RasaNLUInterpreter
# Function
#------------
def train_dialog(dialog_training_data_file, domain_file, path_to_model = 'models/dialogue'):
logging.basicConfig(level='INFO')
agent = Agent(domain_file,
policies=[MemoizationPolicy(max_history=1)])
training_data = agent.load_data(dialog_training_data_file)
agent.train(
training_data,
augmentation_factor=50,
epochs=200,
batch_size=10,
validation_split=0.2)
agent.persist(path_to_model)
# Train
#--------
train_dialog('data/stories.md', 'domain.yml')
print("done")
| [
"hossamsalahemam@gmail.com"
] | hossamsalahemam@gmail.com |
93a79d4b7a6e850940f4273dd7acf56f1f9eeb0b | d859dea94d9cc3f72331bc3fe76274743330b10e | /Mod02Tutorial.py | 517f80a0358c37a4bbbc2b22d6dbe76358a3de44 | [] | no_license | mattalhamilton-zz/Python-and-Bash-Scripts | 9c992f33b2f8a2c942c038babcf86b238ae3e1e3 | db55db2429f58aa914999dbc1f8a85719e9aba58 | refs/heads/main | 2023-03-20T19:20:31.032613 | 2021-03-25T21:28:51 | 2021-03-25T21:28:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,572 | py | ##Matthew Hamilton
##Mod 02 Tutorial
import random
def rando_insert(thing_being_inserted):
position = random.randint(0,9)
my_list.insert(position, thing_being_inserted)
counter = 0
my_list = []
while counter < 10:
list_item = input('Please enter a word or a number: ')
my_list.append(list_item)
counter += 1
ints_only = []
print('\nTask 1 - Check the length of the list\n')
print('This list has 10 items. ' + str(len(my_list) == 10 ))
print('\nTask 2 - Print the list\n')
print(my_list)
print('\nTask 3 - Swapping first item with the last item in the list then print the list.\n')
first_thing = my_list[0]
my_list[0] = my_list[-1]
my_list[-1] = first_thing
print(my_list)
print('\nTask 4 - Print the first 3 items in the list and the last three in the list.\n')
print(my_list[0:3], my_list[-3:])
print('\nTask 5 - Loop through and print all the items in the list.\n')
for i in my_list:
print(i)
print('\nTask 6 - Use an IF statement to check to see if the word "cat" is \nin the list and let the user know.')
if 'cat' in my_list:
print('\nThere is a cat in my list')
else:
print('There is no cat in my list')
print('\nTask 7 - Get the name of a Marvel character from the user and pass that \nto a function that randomly inserts the name into the list (import random).')
another_item = input('\nPlease insert the name of a Marvel character: ')
rando_insert(another_item)
print('\nTask 8 - Get the index for the Marvel character and print it out so \nthat it looks nice.')
print(another_item + ' is at index ' + str(my_list.index(another_item)))
print('\nTask 9 - Copy all the integers in the original list to a new list, then sort and print out that list.')
for matt in my_list:
try:
int(matt)
ints_only.append(int(matt))
except:
continue
ints_only.sort()
print ('\nThese are the integers from the list')
print (ints_only)
print('\nTask 10 - Convert the original list to a tuple and print the tuple.\n')
my_tuple = tuple(my_list)
print(my_tuple)
print('\nTask 11 - Try and change the first item in the tuple to "cat", but catch the error and print out Tuples are immutable!\n')
try:
my_tuple[0] = 'cat'
except:
print('Tuples are immutable!')
print('\nTask 12 - Copt this new list in the text box into your script.\n')
list_in_list = [[1,2,3],['a','b','c']]
for i in list_in_list:
for j in i:
print(j)
print()
print('press enter to end the script')
input()
| [
"noreply@github.com"
] | noreply@github.com |
66561e109d0b7596590f1695b5ea4a41fd34299e | 93b29c9f804f6c4ec1e32d0a2b0fb2f6cebd393f | /auto_complete.py | e4fc0d3b2e019f10692f96e9117f45e6515ef415 | [] | no_license | shira-d111/Auto-complete | f04399dda8f9fe0fbfd6ce2e8b9d7757058141c7 | 248c3bc4c9e6d1ef2fa7f64afcb6a170206dec51 | refs/heads/master | 2022-12-23T19:00:08.531723 | 2020-09-14T08:32:17 | 2020-09-14T08:32:17 | 295,352,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | #import re
import linecache
import json
from auto_complete_data import AutoCompleteDataClass
from json.decoder import JSONDecodeError
def change_to_auto_complete_data(c):
with open("files.json", "r") as f:
try:
files = json.load(f)
except JSONDecodeError:
pass
str_sen = linecache.getline(files[c[0]], c[1])
# =================================================================ofset
# offset=str_sen.index(prefix)#========================================
res = AutoCompleteDataClass(str_sen, files[c[0]], c[1], 1, c[2])
return res
def get_best_k_completions(prefix):
autoComp = []
file_name = prefix[0] + ".json"
with open("json_files/" + file_name, "r") as f:
cache_of_user = json.load(f)
cache_of_user_dict = cache_of_user[0]
comp = cache_of_user_dict.get(prefix)
if comp:
for c in comp:
autoComp.append(change_to_auto_complete_data(c))
if len(autoComp) == 5:
return autoComp
| [
"noreply@github.com"
] | noreply@github.com |
5dc5d4d353faa2567e9e99b1c5fed9cedda94495 | e0dbbbb09bbc53fa9cd78e3642d5c3a7c7475084 | /1-Django/kolt_django_tutorial/blog/views.py | aa21033a972eddac2729c7e8876da7a12ab1d5a0 | [
"MIT"
] | permissive | koltpython/python-workshops | 6c8fe30655b3de56f849c4f6cb0db70aa533aecf | 8b7f5ce1bad0380d90a65a0e033d0acefd7ddcde | refs/heads/master | 2020-09-19T20:14:03.732655 | 2020-04-20T19:26:03 | 2020-04-20T19:26:03 | 224,287,601 | 3 | 0 | MIT | 2020-06-06T00:48:03 | 2019-11-26T21:16:32 | Jupyter Notebook | UTF-8 | Python | false | false | 585 | py | from django.shortcuts import render
from django.http import HttpResponse
from blog.models import Post
# Create your views here.
# index view
def index(request):
all_posts = Post.objects.all()
posts_data = {
# we'll access all post with 'posts' name.
'posts': all_posts
}
return render(request, 'blog/index.html', context=posts_data)
def post_detail(request, pk):
selected_post = Post.objects.get(pk=pk)
posts_data = {
'post_in_detail': selected_post
}
return render(request, 'blog/post_detail.html', context=posts_data) | [
"ftamur16@ku.edu.tr"
] | ftamur16@ku.edu.tr |
24503b611dd166a71acc9c0b32a5c5692a0498f8 | 983e11432b479655c77c7acb6974c7b58562b34b | /Lab/Lab2/Lab2.py | bec4ddf8d6ff509aafc9031a4b4c3b76ae62530f | [] | no_license | baum1982/tensorflow | bba76ab2ddb64ea96f8a6c21e45783669a28e8d8 | ce14411c8b70ab6c0c49115f853511d15171e1e0 | refs/heads/master | 2021-08-23T14:50:01.477738 | 2017-12-05T09:02:26 | 2017-12-05T09:02:26 | 113,057,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | '''
Created on 2017. 12. 5.
@author: baum-work
'''
import tensorflow as tf
x_train = [1,2,3]
y_train = [1,2,3]
W = tf.Variable(tf.random_normal([1]), name="weight")
b = tf.Variable(tf.random_normal([1]), name="bias")
hypothesis = x_train * W + b
# reduce_mean --> 평균 계산
# square --> 제곱
cost = tf.reduce_mean(tf.square(hypothesis - y_train));
# cost 최소화
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost);
sess = tf.Session()
# Variable 을 실행하기 전엔 global_variables_initializer() 를 한번 실행 해 줘야 함
sess.run(tf.global_variables_initializer())
for step in range(2001):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(cost), sess.run(W), sess.run(b)) | [
"jaehwan.kim@tongyang.co.kr"
] | jaehwan.kim@tongyang.co.kr |
15eaba73b9c93281886b7e8ea28132e31d49bb80 | a546508d9bad6c65af9d8f8d502ff7d7c9726724 | /train/word.py | 9f88dfd9a0ab2d98228421731b0563bfb5175969 | [] | no_license | leezqcst/360 | e53f50705f258e9dd4ab1ae65b422edae8d17bf0 | 5319759336fd6557046b12a04ae40bf87b59dbe1 | refs/heads/master | 2021-05-09T21:18:02.714784 | 2018-01-23T11:52:48 | 2018-01-23T11:52:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,713 | py | import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
import sys
sys.path.append("..")
from utils.preprocess import *
import keras
import keras.backend as K
from utils.data import *
from keras.layers import *
from keras.models import *
from keras.optimizers import *
from keras.callbacks import *
from utils.others import *
from models.deepmodel import *
print("Load Train && Val")
train = pd.read_csv(Config.cache_dir+"/train_.csv",sep="\t")
val = pd.read_csv(Config.cache_dir+"/val.csv",sep="\t")
val_label = to_categorical(val.label)
batch_size = 64
model_name = "word_cnn"
trainable_layer = ["word_embedding"]
train_batch_generator = word_cnn_train_batch_generator
print("Load Val Data")
val_word_seq = pickle.load(open(Config.cache_dir+"/g_val_word_seq_%s.pkl"%Config.word_seq_maxlen, "rb"))
val_seq = val_word_seq
print("Load Word")
word_embed_weight = np.load(Config.word_embed_weight_path)
model = get_textcnn(Config.word_seq_maxlen, word_embed_weight)
for i in range(15):
if i==8:
K.set_value(model.optimizer.lr, 0.0001)
if i==12:
for l in trainable_layer:
model.get_layer(l).trainable = True
model.fit_generator(
train_batch_generator(train.content.values, train.label.values, batch_size=batch_size),
epochs = 1,
steps_per_epoch = int(train.shape[0]/batch_size),
validation_data = (val_seq, val_label)
)
pred = np.squeeze(model.predict(val_seq))
pre,rec,f1 = score(pred, val_label)
print(pre,rec,f1)
model.save(Config.cache_dir + "/dp_embed_%s_epoch_%s_%s.h5"%(model_name, i, f1))
| [
"fuliucansheng@gmail.com"
] | fuliucansheng@gmail.com |
710b22b41fb3f58cf8b3eeb6f608e97d65099cbe | 07c05aace39963a0a93ff517f993e4371b3ef96f | /test.py | 80bae811acfe0148bc49a15a78d85eceef022aee | [] | no_license | JUKE89/PythonLearning | d7131a36f1b76c53bb6d450b23a0ad2ee689d2b2 | 6507dbc98323e9fc17623acd95a3d0b2a8b431e3 | refs/heads/master | 2021-01-25T09:38:15.074731 | 2017-06-09T14:32:32 | 2017-06-09T14:32:32 | 93,865,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | print ("you can print texperiment ")
if 43 > 42:
print ("print this stmt")
#they are lists , not arrays
movies = ["list 1", "asdf", "asdf" ]
print (movies[1]) | [
"U6018537@ten.thomsonreuters.com"
] | U6018537@ten.thomsonreuters.com |
443d92d28e04c623852969ca387762c99d411436 | a869ad00fc6811b63bab6c0bb8c227b5e2f82091 | /conftest.py | c71b9bba45ae88fcb52f5bfa52564afa1b130001 | [] | no_license | samkeen/python-lambda-import | 1dd9aba35e868d509051d6242b3361365d847265 | 69d1636b88f56fe55e66fda8aea44f8ffc9fc652 | refs/heads/master | 2020-09-30T01:50:52.921648 | 2019-12-10T18:49:56 | 2019-12-10T18:49:56 | 227,171,066 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | import os, sys
from os.path import join
lambda_dir = join(os.path.dirname(os.path.abspath(__file__)), 'hello_world')
sys.path.append(lambda_dir) | [
"samkeen@amazon.com"
] | samkeen@amazon.com |
13654caad9e66cad471769e6f2a66591fd621c25 | 272f9dcc61b8bf721313eb6c1cf7d34acd6293f3 | /exercises/PyExercises/NameSurnameEx13.py | 3c608cc1a27a9280e35bc06c519947d1da0cfa0c | [
"MIT"
] | permissive | Mathisxy123/PythonCrashCourse | cc7e52a8dac7f7a20425d9f44bebcc075ae96e51 | bb1afaeecfa195d3771ff96fea0b9f907125264e | refs/heads/main | 2023-02-12T09:57:06.944401 | 2021-01-05T09:06:57 | 2021-01-05T09:06:57 | 327,745,771 | 1 | 0 | MIT | 2021-01-07T23:10:40 | 2021-01-07T23:10:40 | null | UTF-8 | Python | false | false | 122 | py | # constantly ask the user for new names and phone numbers
# to add to the phone book, then save them
phone_book = {}
| [
"noreply@github.com"
] | noreply@github.com |
f29241cd5d7f9127aa55a2411375645fc606e2a5 | 46f358b954d2d0067a2093ee9006e222f831a8f8 | /great_expectations/expectations/core/expect_column_max_to_be_between.py | 386953ecb814ea47f4440e4be4ed4e92580474d9 | [
"Apache-2.0"
] | permissive | dhruvvyas90/great_expectations | b963aa99c683a0da3a9e2b5a1046d2a32f622c7b | fddf5336065c644558c528301e601b9f02be87e2 | refs/heads/main | 2023-01-28T15:26:55.331282 | 2020-12-03T18:52:14 | 2020-12-03T18:52:14 | 319,719,900 | 1 | 0 | Apache-2.0 | 2020-12-08T18:02:33 | 2020-12-08T18:02:32 | null | UTF-8 | Python | false | false | 8,969 | py | from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import (
ExecutionEngine,
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from ...execution_engine.sqlalchemy_execution_engine import SqlAlchemyExecutionEngine
from ...render.types import RenderedStringTemplateContent
from ...render.util import (
handle_strict_min_max,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
try:
import sqlalchemy as sa
except ImportError:
pass
from ...render.renderer.renderer import renderer
from ..expectation import ColumnExpectation, InvalidExpectationConfigurationError
from ..registry import extract_metrics
class ExpectColumnMaxToBeBetween(ColumnExpectation):
"""Expect the column max to be between an min and max value
expect_column_max_to_be_between is a \
:func:`column_aggregate_expectation
<great_expectations.execution_engine.MetaExecutionEngine.column_aggregate_expectation>`.
Args:
column (str): \
The column name
min_value (comparable type or None): \
The minimum number of unique values allowed.
max_value (comparable type or None): \
The maximum number of unique values allowed.
Keyword Args:
parse_strings_as_datetimes (Boolean or None): \
If True, parse min_value, max_values, and all non-null column values to datetimes before making \
comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
strict_min (boolean):
If True, the minimal column minimum must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the maximal column minimum must be strictly smaller than max_value, default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The actual column max
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\
metric_dependencies = ("column.max",)
success_keys = ("min_value", "strict_min", "max_value", "strict_max")
# Default values
default_kwarg_values = {
"min_value": None,
"max_value": None,
"strict_min": None,
"strict_max": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
}
""" A Column Map MetricProvider Decorator for the Maximum"""
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
neccessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
super().validate_configuration(configuration)
self.validate_metric_value_between_configuration(configuration=configuration)
@classmethod
@renderer(renderer_type="renderer.prescriptive")
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"min_value",
"max_value",
"parse_strings_as_datetimes",
"row_condition",
"condition_parser",
"strict_min",
"strict_max",
],
)
if (params["min_value"] is None) and (params["max_value"] is None):
template_str = "maximum value may have any numerical value."
else:
at_least_str, at_most_str = handle_strict_min_max(params)
if params["min_value"] is not None and params["max_value"] is not None:
template_str = f"maximum value must be {at_least_str} $min_value and {at_most_str} $max_value."
elif params["min_value"] is None:
template_str = f"maximum value must be {at_most_str} $max_value."
elif params["max_value"] is None:
template_str = f"maximum value must be {at_least_str} $min_value."
if params.get("parse_strings_as_datetimes"):
template_str += " Values should be parsed as datetimes."
if include_column_name:
template_str = "$column " + template_str
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
@classmethod
@renderer(renderer_type="renderer.descriptive.stats_table.max_row")
def _descriptive_stats_table_max_row_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
assert result, "Must pass in result."
return [
{
"content_block_type": "string_template",
"string_template": {
"template": "Maximum",
"tooltip": {"content": "expect_column_max_to_be_between"},
},
},
"{:.2f}".format(result.result["observed_value"]),
]
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
return self._validate_metric_value_between(
metric_name="column.max",
configuration=configuration,
metrics=metrics,
runtime_configuration=runtime_configuration,
execution_engine=execution_engine,
)
| [
"noreply@github.com"
] | noreply@github.com |
85eda1a4341c7f50fcb5c659e3fdcadd75cbd85f | 01e17d9f8c78c39b2012fbd969bab9728c5105c6 | /30_practice/30_practice_urllib.py | 8f96048eb6157061382e7e002b97b35fca2f4067 | [] | no_license | m09902004/Python-Learning | 36fbbf21a4054e0f0269bed9249bb649274f05e4 | 2a8052238b897b336f7873afa0476aae644d45cf | refs/heads/master | 2020-07-11T09:11:19.726444 | 2019-09-20T02:48:11 | 2019-09-20T02:48:11 | 202,258,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 3 13:35:05 2019
@author: ASUS
"""
import urllib.request as ur
url = "https://data.gov.tw"
res = ur.urlopen(url)
print('1.網址:',res.geturl(),type(res.geturl()))
print()
print('2.讀取狀態:',res.status,type(res.status))
print()
print('3.表頭資訊:',res.getheaders(),type(res.getheaders()))
print()
print('4.網頁資料(Byte):',res.read(),type(res.read()))
print()
content = res.read()
print('5.網頁資料(string):',content.decode(),type(content.decode()))
| [
"noreply@github.com"
] | noreply@github.com |
9f16b156d823de007a292cb20a7c907682d2a4ba | 415d76a24690dabd223b047c86a52076565512a1 | /kodistubs/docs/conf.py | 7c9b491367a3f587d95c0a4d5a3088f0028685a3 | [
"Apache-2.0"
] | permissive | thatalex/comforttv | e2698a4212502f374f3c9ee661e55b38b98678ea | 41bcb8c1688d8ebe35e6d8282cb1f8ea9cf35248 | refs/heads/master | 2020-03-31T05:03:43.062193 | 2019-02-12T21:43:45 | 2019-02-12T21:43:45 | 151,931,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,831 | py | # -*- coding: utf-8 -*-
#
# Kodistubs documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 08 14:30:41 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
#'sphinx.ext.ifconfig',
#'sphinx.ext.viewcode',
]
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members', 'show-inheritance']
autosummary_generate = True
intersphinx_mapping = {'https://docs.python.org/2.7': None}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Kodistubs'
copyright = u'2015, Roman V.M.'
author = u'Roman V.M.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2.0.0'
# The full version, including alpha/beta/rc tags.
release = u'2.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = {
'github_button': True,
'github_type': 'star&v=2',
'github_user': 'romanvm',
'github_repo': 'Kodistubs',
'github_banner': True,
'font_family': 'Georgia',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Kodistubsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Kodistubs.tex', u'Kodistubs Documentation',
u'Roman V.M.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'kodistubs', u'Kodistubs Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Kodistubs', u'Kodistubs Documentation',
author, 'Kodistubs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"vialikanau.aliaksei@gmail.com"
] | vialikanau.aliaksei@gmail.com |
0678a3938b8fbf93a95cbe043c10b2e3afda8349 | fd286c7af76c14b0b2797065d7cba176c4ae44d0 | /tests/test_app.py | d88f01951525a2da7f45083f312f045f951ade43 | [] | no_license | ATTX-project/uv-provenance-service | 71da74c3f47c5cdf98c7d4f935c032a69dce9dd2 | 943067ce1b7858fbd478f79d7159b25c6ab0c4fa | refs/heads/master | 2021-08-28T04:50:57.405020 | 2017-12-11T08:07:29 | 2017-12-11T08:07:29 | 103,507,769 | 0 | 0 | null | 2017-12-01T13:26:05 | 2017-09-14T08:35:16 | Python | UTF-8 | Python | false | false | 1,439 | py | # import falcon
import unittest
import httpretty
# import requests
from falcon import testing
from uvprov_api.app import init_api
# from wf_api.app import api_version
# from datetime import datetime
# from wf_api.utils.db import connect_DB
class appTest(testing.TestCase):
"""Testing GM map function and initialize it for that purpose."""
def setUp(self):
"""Setting the app up."""
# self.conn = connect_DB()
self.app = init_api()
def tearDown(self):
"""Tearing down the app up."""
pass
class TestApp(appTest):
"""Test app is ok."""
@httpretty.activate
def test_main(self):
"""Test the server is up and running."""
httpretty.register_uri(httpretty.GET, "http://localhost:4301/", status=404)
response = self.simulate_get('/')
assert(response.status_code == 404)
httpretty.disable()
httpretty.reset()
# @httpretty.activate
# def test_activity_ok(self):
# """Test GET map response is OK."""
# httpretty.register_uri(httpretty.GET, "http://localhost:4301/0.1/activity?modifiedSince=2017-02-03T08%3A14%3A14Z", status=304)
# params = {"modifiedSince": str(datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ"))}
# result = self.simulate_get('/{0}/activity'.format(api_version), params=params)
# assert(result.status == falcon.HTTP_304)
if __name__ == "__main__":
unittest.main()
| [
"stefan@blankdots.com"
] | stefan@blankdots.com |
a553cff8e4f00a8e58b43880196a4c4acfecab9c | aabcbe9640f3e6ebcf062592dcb6ca2569ebc4ec | /tests/integration/fit/test_fit_mlp.py | 469e8c1f16050e3f96de255ad5f912b3fd1eaa29 | [
"Apache-2.0"
] | permissive | AliSheheryar/finetuner | 89ea21674faa1dba383146ad1c46811ce30cada6 | b92df7de90cf07c345971dc1af354e70bdee9708 | refs/heads/main | 2023-09-02T19:14:48.866437 | 2021-10-21T07:21:17 | 2021-10-21T07:21:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,714 | py | import json
import paddle
import tensorflow as tf
import torch
import finetuner
from finetuner.toydata import generate_fashion_match
all_test_losses = [
'CosineSiameseLoss',
'CosineTripletLoss',
'EuclideanSiameseLoss',
'EuclideanTripletLoss',
]
def test_fit_all(tmpdir):
embed_models = {
'keras': lambda: tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(32),
]
),
'pytorch': lambda: torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(
in_features=28 * 28,
out_features=128,
),
torch.nn.ReLU(),
torch.nn.Linear(in_features=128, out_features=32),
),
'paddle': lambda: paddle.nn.Sequential(
paddle.nn.Flatten(),
paddle.nn.Linear(
in_features=28 * 28,
out_features=128,
),
paddle.nn.ReLU(),
paddle.nn.Linear(in_features=128, out_features=32),
),
}
for kb, b in embed_models.items():
for h in all_test_losses:
result = finetuner.fit(
b(),
loss=h,
train_data=lambda: generate_fashion_match(
num_neg=10, num_pos=10, num_total=300
),
eval_data=lambda: generate_fashion_match(
num_neg=10, num_pos=10, num_total=300, is_testset=True
),
epochs=2,
)
result.save(tmpdir / f'result-{kb}-{h}.json')
| [
"noreply@github.com"
] | noreply@github.com |
dfe9f6f53344041305df796dd55d0db632243b1d | 959276633ae1e7d00b7c061b6fe1e7b8a86771bc | /src/Maestro Box/Lecteur Python/afficheur.py | 4b5367f84876b15ee08bd001e35d5c36de7f63f8 | [] | no_license | dhokas/eMaestro | b06d32dbd64eacd52c8552bacf2ef9fd292204eb | 8ee76cc53c1e3659532a5e48ff350b30a9737177 | refs/heads/master | 2021-01-20T02:00:05.925465 | 2017-04-25T13:19:16 | 2017-04-25T13:19:16 | 89,353,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,343 | py | import Image
import ImageDraw
import sched
import time
import gc
from rgbmatrix import Adafruit_RGBmatrix
chemin_images = "drawable/"
temps_affichage_logo = 2
pos_temps = (8,0)
pos_intensite = (16, 8)
pos_passage = (0, 24)
pos_armature_symb = (56, 16)
pos_armature_chiffre = (48, 16)
pos_centaine = (40, 0)
pos_dizaine = (48, 0)
pos_unite = (56, 0)
pos_partie = (40, 8)
pos_alerte = (40, 24)
class afficheur :
def __init__(self,mat):
self.matrix = mat
def lire(self, m, mesure_debut_lecture, passage_debut_lecture, mesure_fin_lecture, passage_fin_lecture):
self.afficher_logo()
gc.disable()
map_mesures_modif = m.copy()
descripteur = map_mesures_modif["1.1"]
descripteur["mesure_courante"] = 1
descripteur["passage_reprise_courant"] = 1
descripteur["intensite_courante"] = -1
while(descripteur["mesure_courante"], descripteur["passage_reprise_courant"]) != (mesure_debut_lecture, passage_debut_lecture):
if (str(descripteur["mesure_courante"]) + '.' + str(descripteur["passage_reprise_courant"])) in map_mesures_modif :
descripteur.update(map_mesures_modif[str(descripteur["mesure_courante"]) + '.' + str(descripteur["passage_reprise_courant"])])
if "prochaine_mesure" in descripteur:
descripteur["mesure_courante"] = descripteur["prochaine_mesure"]
else:
descripteur["mesure_courante"] += 1
if "prochain_passage" in descripteur:
descripteur["passage_reprise_courant"] = descripteur["prochain_passage"]
scheduler = sched.scheduler(time.time, time.sleep)
scheduler.enter(temps_affichage_logo, 1, self.wait, ())
scheduler.run()
self.afficher_decompte(descripteur["temps_par_mesure"], descripteur["tempo"], scheduler)
while(descripteur["mesure_courante"], descripteur["passage_reprise_courant"]) != (mesure_fin_lecture + 1, passage_fin_lecture):
if (str(descripteur["mesure_courante"]) + '.' + str(descripteur["passage_reprise_courant"])) in map_mesures_modif :
descripteur.update(map_mesures_modif[str(descripteur["mesure_courante"]) + '.' + str(descripteur["passage_reprise_courant"])])
if "mesure_non_lue" not in descripteur :
self.afficher(descripteur, scheduler)
if "prochaine_mesure" in descripteur:
descripteur["mesure_courante"] = descripteur["prochaine_mesure"]
else:
descripteur["mesure_courante"] += 1
if "prochain_passage" in descripteur:
descripteur["passage_reprise_courant"] = descripteur["prochain_passage"]
self.afficher_fin()
gc.enable()
def afficher(self, descripteur, scheduler):
tempo_en_seconde = 60 / float(descripteur["tempo"])
scheduler.enter(0,3,self.afficher_mesure,(descripteur["mesure_courante"],))
scheduler.enter(0,3,self.afficher_passage,(descripteur["passage_reprise_courant"],))
if ("label" in descripteur):
scheduler.enter((t - 1) * tempo_en_seconde, 2, self.afficher_partie, (descripteur["label"],))
del descripteur["label"]
for t in range(1, descripteur["temps_par_mesure"] + 1):
scheduler.enter((t - 1) * tempo_en_seconde, 1, self.afficher_temps, (descripteur["temps_par_mesure"], t))
if ("temps_debut_intensite_"+str(t) in descripteur):
if (descripteur["nb_temps_intensite_"+str(t)] > 0):
if (descripteur["intensite_courante"] == -1):
intensite_base = descripteur["intensite_"+str(t)] - descripteur["nb_temps_intensite_"+str(t)]
if intensite_base < 0: intensite_base = 0
descripteur["intensite_courante"] = intensite_base
else:
descripteur["intensite_courante"] += (descripteur["intensite_"+str(t)] - descripteur["intensite_courante"]) / descripteur["nb_temps_intensite_"+str(t)]
prochain_temps = (t % descripteur["temps_par_mesure"]) + 1
descripteur["intensite_"+str(prochain_temps)] = descripteur["intensite_"+str(t)]
descripteur["temps_debut_intensite_"+str(prochain_temps)] = True
descripteur["nb_temps_intensite_"+str(prochain_temps)] = descripteur["nb_temps_intensite_"+str(t)] - 1
else:
descripteur["intensite_courante"] = descripteur["intensite_"+str(t)]
del descripteur["intensite_"+str(t)]
del descripteur["temps_debut_intensite_"+str(t)]
del descripteur["nb_temps_intensite_"+str(t)]
scheduler.enter((t - 1) * tempo_en_seconde, 1, self.afficher_intensite, (descripteur["intensite_courante"],))
if ("temps_alerte_" + str(t) in descripteur):
scheduler.enter((t - 1) * tempo_en_seconde, 1, self.afficher_alerte, (descripteur["couleur_alerte_" + str(t)],))
if (descripteur["couleur_alerte_" + str(t)] != -1) and ("temps_alerte_" + str(t+1) not in descripteur):
descripteur["temps_alerte_" + str(t+1)] = True
descripteur["couleur_alerte_" + str(t+1)] = -1
del descripteur["temps_alerte_" + str(t)]
del descripteur["couleur_alerte_" + str(t)]
if ("temps_debut_armature_" + str(t) in descripteur):
scheduler.enter((t - 1) * tempo_en_seconde, 2, self.afficher_armature, (descripteur["alteration_" + str(t)],))
del descripteur["temps_debut_armature_" + str(t)]
del descripteur["alteration_" + str(t)]
scheduler.enter(descripteur["temps_par_mesure"] * tempo_en_seconde, 1, self.wait, ())
scheduler.run()
def afficher_logo(self):
image = Image.open(chemin_images + 'ema logo.png')
image.load()
self.matrix.SetImage(image.im.id, 0, 0)
def afficher_decompte(self, temps_par_mesure, tempo, scheduler):
tempo_en_seconde = 60 / float(tempo)
for t in range(temps_par_mesure):
scheduler.enter(t * tempo_en_seconde, 1, self.afficher_decompte_aux, (temps_par_mesure - t,))
scheduler.enter(temps_par_mesure * tempo_en_seconde, 1, self.wait, ())
scheduler.run()
image = Image.open(chemin_images + '64_32_black.png')
image.load()
scheduler.enter(0, 1, self.matrix.SetImage, (image.im.id, 0, 0))
scheduler.run()
def afficher_decompte_aux(self, numero):
image = Image.open(chemin_images + 'd' + str(numero) + '.png')
image.load()
self.matrix.SetImage(image.im.id, 0, 0)
def wait(self):
return 0
def afficher_d_helper(self, d):
print time.time(), "decompte", d
def afficher_fin(self):
image = Image.open(chemin_images + 'fin.png')
image.load()
self.matrix.SetImage(image.im.id, 0, 0)
def afficher_temps(self, temps_par_mesure, temps_courant):
image = Image.open(chemin_images + 'a' + str(temps_par_mesure)+'_'+str(temps_courant)+'.png')
image.load()
self.matrix.SetImage(image.im.id, pos_temps[0], pos_temps[1])
def afficher_mesure(self, mesure_courante):
centaine = -1
if mesure_courante >= 100:
centaine = mesure_courante % 1000 / 100
dizaine = -1
if mesure_courante >= 10:
dizaine = mesure_courante % 100 / 10
unite = mesure_courante % 10
if centaine == -1:
image = Image.open(chemin_images + '8_8_black.png')
else:
image = Image.open(chemin_images + 'mesure' + str(centaine)+'.png')
image.load()
self.matrix.SetImage(image.im.id, pos_centaine[0], pos_centaine[1])
if dizaine == -1:
image = Image.open(chemin_images + '8_8_black.png')
else:
image = Image.open(chemin_images + 'mesure' + str(dizaine)+'.png')
image.load()
self.matrix.SetImage(image.im.id, pos_dizaine[0], pos_dizaine[1])
image = Image.open(chemin_images + 'mesure' + str(unite)+'.png')
image.load()
self.matrix.SetImage(image.im.id, pos_unite[0], pos_unite[1])
def afficher_passage(self, passage):
image = Image.open(chemin_images + 'passage' + str(passage)+'.png')
image.load()
self.matrix.SetImage(image.im.id, pos_passage[0], pos_passage[1])
def afficher_intensite(self, intensite):
if int(intensite) == -1:
image = Image.open(chemin_images + '16_16_black.png')
else:
image = Image.open(chemin_images + 'intensite' + str(int(round(intensite)))+'.png')
image.load()
self.matrix.SetImage(image.im.id, pos_intensite[0], pos_intensite[1])
def afficher_partie(self, partie):
if alerte == '-1':
image = Image.open(chemin_images + '8_8_black.png')
else:
image = Image.open(chemin_images + 'partie' + partie +'.png')
image.load()
self.matrix.SetImage(image.im.id, pos_partie[0], pos_partie[1])
def afficher_alerte(self, alerte):
if alerte == -1:
image = Image.open(chemin_images + '8_8_black.png')
else:
image = Image.open(chemin_images + 'alerte' + str(alerte) +'.png')
image.load()
self.matrix.SetImage(image.im.id, pos_alerte[0], pos_alerte[1])
def afficher_armature(self, armature):
if armature < 0:
image1 = Image.open(chemin_images + 'armature' + (-armature) + '.png')
image2 = Image.open(chemin_images + 'bemol.png')
elif armature > 0:
image1 = Image.open(chemin_images + 'armature' + armature + '.png')
image2 = Image.open(chemin_images + 'diese.png')
else:
image1 = Image.open(chemin_images + '8_8_black.png')
image2 = Image.open(chemin_images + '8_8_black.png')
self.matrix.SetImage(image1.im.id, pos_armature_chiffre[0], pos_armature_chiffre[1])
self.matrix.SetImage(image2.im.id, pos_armature_symb[0], pos_armature_symb[1])
| [
"doux.boris@laposte.net"
] | doux.boris@laposte.net |
9549e6df04fc49f3e5f8cea90329b3ee092c8a45 | ff786c44aca8be20946e730b218b5956277ccdcf | /E_Coli_model.py | 64eff99d386d2a5208ecf144667bd17505263a5a | [] | no_license | Alistair-Hickman/Project | 54f7abeb2dcba4dceac27ac71e71834f3b50e5dd | e3a5a9096811d7a8814e3e8d6a3f2a28348c95c4 | refs/heads/main | 2023-08-10T23:36:45.464287 | 2021-09-16T10:37:43 | 2021-09-16T10:37:43 | 383,538,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,129 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 22 13:32:35 2020o
@author: Alist
"""
import numpy as np
from numpy.random import seed
import random
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import math
from numpy import savetxt
#from Matmul_fnc import tumble
def MSD(pos_arr):
MSD_values = []
displacements = []
tau = []
for j in range(20,1000):
for i in range(1, len(pos_arr)):
if (i-j) > 0:
disp_x = pos_arr[i][0] - pos_arr[i-j][0]
disp_y = pos_arr[i][1] - pos_arr[i-j][1]
disp_z = pos_arr[i][2] - pos_arr[i-j][2]
disp = (disp_y**2 + disp_x**2 + disp_z**2)
displacements.append(disp)
MSD = np.mean(displacements)
displacements.clear()
MSD_values.append(MSD)
tau.append(j)
fig, gr=plt.subplots()
gr.plot(tau, MSD_values)
gr.set_title('E.Coli MSD Graph')
gr.set(xlabel = 'tau', ylabel = 'MSD (E-6 m^2)')
fig.savefig('EColi_MSD.png',dpi=80)
#combine data to a single array, transpose and save
MSD_data = np.array([tau, MSD_values])
MSD_data = MSD_data.T
#savetxt('MSD_data.txt', MSD_data)
slope,intercept = np.polyfit(tau, MSD_values, 1)
D = slope*10/6
print("D_eff =", D)
#Lovely and Dahlquist comparison
D_ld = (10*10*1)/(3*(1-0.5))
print("Test Lovely and Dahlquist =", D_ld)
def rotate_y(alpha):
R_y = np.array([[np.cos(alpha), 0, np.sin(alpha)],
[0, 1, 0],
[-1*np.sin(alpha), 0, np.cos(alpha)]])
return R_y
def rotate_z(psi):
R_z = np.array([[np.cos(psi), -1*np.sin(psi), 0],
[np.sin(psi), np.cos(psi), 0],
[0,0,1]])
return R_z
def tumble(vector,alpha):
new_vector = []
vector_z = float(np.array([vector[2]]))
vector_y = float(np.array([vector[1]]))
vector_x = float(np.array([vector[0]]))
r = np.sqrt(float(vector_x)**2 + float(vector_y)**2 + float(vector_z)**2)
#print("r=", r)
theta = np.arccos(float(vector_z)/r)
#print("theta=", theta)
if (vector_x == 0):
phi = 0
else:
phi = math.atan2((vector_y),(vector_x))
rot_1 = rotate_y(alpha) @ [0,0,1]
psi = random.uniform(0,2*np.pi)
rot_2 = rotate_z(psi) @ (rot_1)
new_vector = (rotate_z(phi) @ rotate_y(theta) @ rot_2)
return new_vector
def rotate(vector):
vector_z = float(np.array([vector[2]]))
vector_y = float(np.array([vector[1]]))
vector_x = float(np.array([vector[0]]))
r = np.sqrt(float(vector_x)**2 + float(vector_y)**2 + float(vector_z)**2)
theta = np.arccos(float(vector_z)/r)
if (vector_x == 0):
phi = 0
else:
phi = math.atan2((vector_y),(vector_x))
alpha = random.gauss(0, 4*0.062*0.1)
rot_1 = rotate_y(alpha) @ [0,0,1]
psi = random.uniform(0,2*np.pi)
rot_2 = rotate_z(psi) @ (rot_1)
new_vector = (rotate_z(phi) @ rotate_y(theta) @ rot_2)
return new_vector
#Generate the number of desired steps(n) with a variable 'steps' = n+1
#Works as number of trajectories per step
trajectories, steps = (1,20000)
#Generate the starting position
x_pos,y_pos,z_pos = (0, 0, 0)
alpha=(np.pi/3)
#Generate the three arrays in which to store the coordinates
xpos_arr = []
ypos_arr = []
zpos_arr = []
#define initial vector
x=0
y=0
z=1
swim = np.array([x,
y,
z])
old_vector = np.array([x,
y,
z])
vectors = []
angles = []
#first loop
for i in range(steps):
step = []
#second loop is run through once per step, generating a new x, y & z vector
#These are added to the previous values
for j in range(trajectories):
#Save cooridnates in respective arrays starting at origin
xpos_arr.append(x_pos)
ypos_arr.append(y_pos)
zpos_arr.append(z_pos)
swim = rotate(swim)
#Add vector to each position
x_pos = x_pos + swim[0]
y_pos = y_pos + swim[1]
z_pos = z_pos + swim[2]
#Reset vector to an old vector
vectors.append(swim)
old_vector = swim
#Apply the tumble function to tumble 60* with a random rotation about the z axis
tumb_test = random.random()
if (tumb_test < 0.1):
swim = tumble(swim,alpha)
#dot product angle test.
#dot = swim @ old_vector
#angles.append(dot)
#print("angles=", angles)
#print()
#print("vectors=", vectors)
#The arrays are plotted in 3 dimensions against each other
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot3D(xpos_arr, ypos_arr, zpos_arr)
ax.set_xlabel('X axis (um)')
ax.set_ylabel('Y axis(um)')
ax.set_zlabel('Z axis(um)')
ax.set_title("E.Coli Trajectory")
plt.savefig('EColi_Trajectory.png',dpi=80)
#Combine the arrays into a single array, transpose and save it as a .csv file.
pos_arr = np.array([xpos_arr, ypos_arr, zpos_arr])
pos_arr = pos_arr.T
np.savetxt('trajectory_file.csv', pos_arr)
Mean_Square_Disp = MSD(pos_arr)
| [
"alistair.hickman63@gmail.com"
] | alistair.hickman63@gmail.com |
80ef7835253fcaef8f8fae0b12584c0a61a61921 | 5cd0946e3675c66d80273fa3e1a406c8d4343c12 | /map-filter-reduce/multiproc_test.py | e119f21dd72fef86856999c93578407664e02e83 | [] | no_license | Mansouroopi/python-advance-concept | fa06ac0b3273662e73fd6de37775fec25f51894d | 016341b988b5a64307b1e96d06fb4491ff053e77 | refs/heads/master | 2023-03-07T23:11:32.033633 | 2021-02-23T16:50:12 | 2021-02-23T16:50:12 | 336,753,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | # multiproc_test.py
import random
import multiprocessing
def list_append(count, id, out_list):
"""
Creates an empty list and then appends a
random number to the list 'count' number
of times. A CPU-heavy operation!
"""
for i in range(count):
out_list.append(random.random())
if __name__ == "__main__":
size = 10000000 # Number of random numbers to add
procs = 2 # Number of processes to create
# Create a list of jobs and then iterate through
# the number of processes appending each process to
# the job list
jobs = []
for i in range(0, procs):
out_list = list()
process = multiprocessing.Process(target=list_append,
args=(size, i, out_list))
jobs.append(process)
# Start the processes (i.e. calculate the random number lists)
for j in jobs:
j.start()
# Ensure all of the processes have finished
for j in jobs:
j.join()
print("List processing complete.")
| [
"mansourabdalla22@gmailcom"
] | mansourabdalla22@gmailcom |
a6d6ea3b3be28c17d178c6810e34cf2b263d01b2 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /PTh7tBusAZRgjAWEZ_11.py | 60ec4e9a9fb3660fa0aac2d124fbbae29d276436 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py |
def calc_diff(obj, limit):
return abs(sum(obj.values()) - limit)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
5fe273e4a86567dc6bbb239374d1aeab29760fdc | 9eb9a7c5c18e4847f52ae098882c51c46f6398e7 | /src/GUICtl.py | 67ec858aa92f5e090cc910faa528a158fca10f51 | [] | no_license | AryamanReddi99/Hack-The-Midlands | 2017fb3df455208a4f99f030ebe8561fa20df7c2 | 72a73cfa77749fbace8f1f99e87edd905458c2e9 | refs/heads/master | 2020-08-28T12:52:00.171422 | 2019-11-06T10:18:10 | 2019-11-06T10:18:10 | 217,705,212 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | import pyautogui as pg
import platform
from time import sleep
def lock_screen():
if platform.system() == "Linux":
pg.hotkey('winleft', 'l')
else:
print("Not implemented for this platform")
def workspace_up():
if platform.system() == "Linux":
pg.press('winleft')
sleep(0.3)
pg.press('pageup')
sleep(0.3)
pg.press('winleft')
sleep(0.3)
else:
print("Not implemented for this platform")
def workspace_down():
if platform.system() == "Linux":
pg.press('winleft')
sleep(0.3)
pg.press('pagedown')
sleep(0.3)
pg.press('winleft')
sleep(0.3)
else:
print("Not implemented for this platform")
| [
"david.misc1@aspect135.co.uk"
] | david.misc1@aspect135.co.uk |
03dd33c5872c44c363516af41041b942fc4b82c7 | a6ed990fa4326c625a2a02f0c02eedf758ad8c7b | /meraki/sdk/python/removeNetworkSwitchSwitchStack.py | ea22d146e97d6bbedda21ccbaa78bfaab2c71d73 | [] | no_license | StevenKitavi/Meraki-Dashboard-API-v1-Documentation | cf2352976c6b6c00c17a5f6442cedf0aeed46c22 | 5ed02a7def29a2ce455a3f2cfa185f76f44789f5 | refs/heads/main | 2023-03-02T08:49:34.846055 | 2021-02-05T10:31:25 | 2021-02-05T10:31:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | import meraki
# Defining your API key as a variable in source code is not recommended
API_KEY = '6bec40cf957de430a6f1f2baa056b99a4fac9ea0'
# Instead, use an environment variable as shown under the Usage section
# @ https://github.com/meraki/dashboard-api-python/
dashboard = meraki.DashboardAPI(API_KEY)
network_id = 'L_646829496481105433'
switch_stack_id = ''
serial = 'QBZY-XWVU-TSRQ'
response = dashboard.switch.removeNetworkSwitchSwitchStack(
network_id, switch_stack_id, serial
)
print(response) | [
"shiychen@cisco.com"
] | shiychen@cisco.com |
cfec69c429cf20a1391c0b96aa76a0d3dcc92545 | fd7332717369499f73e8172cd9fca5b41bc32013 | /appJar/lib/png.py | e040792ced2a2944a9637d389207bd7caeded491 | [
"Apache-2.0",
"MIT"
] | permissive | colewebb/green-pixel-analysis | a45e8038e19cde93ba6feeb518b78be09cc91e76 | b3be49dc4f1318056c57c550294f16d99ca5a394 | refs/heads/master | 2023-04-07T11:18:18.557466 | 2021-04-16T14:38:07 | 2021-04-16T14:38:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103,095 | py | #!/usr/bin/env python
from __future__ import print_function
# png.py - PNG encoder/decoder in pure Python
#
# Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org>
# Portions Copyright (C) 2009 David Jones <drj@pobox.com>
# And probably portions Copyright (C) 2006 Nicko van Someren <nicko@nicko.org>
#
# Original concept by Johann C. Rocholl.
#
# LICENCE (MIT)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Pure Python PNG Reader/Writer
This Python module implements support for PNG images (see PNG
specification at http://www.w3.org/TR/2003/REC-PNG-20031110/ ). It reads
and writes PNG files with all allowable bit depths
(1/2/4/8/16/24/32/48/64 bits per pixel) and colour combinations:
greyscale (1/2/4/8/16 bit); RGB, RGBA, LA (greyscale with alpha) with
8/16 bits per channel; colour mapped images (1/2/4/8 bit).
Adam7 interlacing is supported for reading and
writing. A number of optional chunks can be specified (when writing)
and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``.
For help, type ``import png; help(png)`` in your python interpreter.
A good place to start is the :class:`Reader` and :class:`Writer`
classes.
Requires Python 2.3. Limited support is available for Python 2.2, but
not everything works. Best with Python 2.4 and higher. Installation is
trivial, but see the ``README.txt`` file (with the source distribution)
for details.
This file can also be used as a command-line utility to convert
`Netpbm <http://netpbm.sourceforge.net/>`_ PNM files to PNG, and the
reverse conversion from PNG to PNM. The interface is similar to that
of the ``pnmtopng`` program from Netpbm. Type ``python png.py --help``
at the shell prompt for usage and a list of options.
A note on spelling and terminology
----------------------------------
Generally British English spelling is used in the documentation. So
that's "greyscale" and "colour". This not only matches the author's
native language, it's also used by the PNG specification.
The major colour models supported by PNG (and hence by PyPNG) are:
greyscale, RGB, greyscale--alpha, RGB--alpha. These are sometimes
referred to using the abbreviations: L, RGB, LA, RGBA. In this case
each letter abbreviates a single channel: *L* is for Luminance or Luma
or Lightness which is the channel used in greyscale images; *R*, *G*,
*B* stand for Red, Green, Blue, the components of a colour image; *A*
stands for Alpha, the opacity channel (used for transparency effects,
but higher values are more opaque, so it makes sense to call it
opacity).
A note on formats
-----------------
When getting pixel data out of this module (reading) and presenting
data to this module (writing) there are a number of ways the data could
be represented as a Python value. Generally this module uses one of
three formats called "flat row flat pixel", "boxed row flat pixel", and
"boxed row boxed pixel". Basically the concern is whether each pixel
and each row comes in its own little tuple (box), or not.
Consider an image that is 3 pixels wide by 2 pixels high, and each pixel
has RGB components:
Boxed row flat pixel::
list([R,G,B, R,G,B, R,G,B],
[R,G,B, R,G,B, R,G,B])
Each row appears as its own list, but the pixels are flattened so
that three values for one pixel simply follow the three values for
the previous pixel. This is the most common format used, because it
provides a good compromise between space and convenience. PyPNG regards
itself as at liberty to replace any sequence type with any sufficiently
compatible other sequence type; in practice each row is an array (from
the array module), and the outer list is sometimes an iterator rather
than an explicit list (so that streaming is possible).
Flat row flat pixel::
[R,G,B, R,G,B, R,G,B,
R,G,B, R,G,B, R,G,B]
The entire image is one single giant sequence of colour values.
Generally an array will be used (to save space), not a list.
Boxed row boxed pixel::
list([ (R,G,B), (R,G,B), (R,G,B) ],
[ (R,G,B), (R,G,B), (R,G,B) ])
Each row appears in its own list, but each pixel also appears in its own
tuple. A serious memory burn in Python.
In all cases the top row comes first, and for each row the pixels are
ordered from left-to-right. Within a pixel the values appear in the
order, R-G-B-A (or L-A for greyscale--alpha).
There is a fourth format, mentioned because it is used internally,
is close to what lies inside a PNG file itself, and has some support
from the public API. This format is called packed. When packed,
each row is a sequence of bytes (integers from 0 to 255), just as
it is before PNG scanline filtering is applied. When the bit depth
is 8 this is essentially the same as boxed row flat pixel; when the
bit depth is less than 8, several pixels are packed into each byte;
when the bit depth is 16 (the only value more than 8 that is supported
by the PNG image format) each pixel value is decomposed into 2 bytes
(and `packed` is a misnomer). This format is used by the
:meth:`Writer.write_packed` method. It isn't usually a convenient
format, but may be just right if the source data for the PNG image
comes from something that uses a similar format (for example, 1-bit
BMPs, or another PNG file).
And now, my famous members
--------------------------
"""
__version__ = "0.0.18"
import itertools
import math
import re
# http://www.python.org/doc/2.4.4/lib/module-operator.html
import operator
import struct
import sys
# http://www.python.org/doc/2.4.4/lib/module-warnings.html
import warnings
import zlib
from array import array
from functools import reduce
try:
# `cpngfilters` is a Cython module: it must be compiled by
# Cython for this import to work.
# If this import does work, then it overrides pure-python
# filtering functions defined later in this file (see `class
# pngfilters`).
import cpngfilters as pngfilters
except ImportError:
pass
__all__ = ['Image', 'Reader', 'Writer', 'write_chunks', 'from_array']
# The PNG signature.
# http://www.w3.org/TR/PNG/#5PNG-file-signature
_signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
_adam7 = ((0, 0, 8, 8),
(4, 0, 8, 8),
(0, 4, 4, 8),
(2, 0, 4, 4),
(0, 2, 2, 4),
(1, 0, 2, 2),
(0, 1, 1, 2))
def group(s, n):
# See http://www.python.org/doc/2.6/library/functions.html#zip
return list(zip(*[iter(s)]*n))
def isarray(x):
return isinstance(x, array)
def tostring(row):
return row.tostring()
def interleave_planes(ipixels, apixels, ipsize, apsize):
"""
Interleave (colour) planes, e.g. RGB + A = RGBA.
Return an array of pixels consisting of the `ipsize` elements of
data from each pixel in `ipixels` followed by the `apsize` elements
of data from each pixel in `apixels`. Conventionally `ipixels`
and `apixels` are byte arrays so the sizes are bytes, but it
actually works with any arrays of the same type. The returned
array is the same type as the input arrays which should be the
same type as each other.
"""
itotal = len(ipixels)
atotal = len(apixels)
newtotal = itotal + atotal
newpsize = ipsize + apsize
# Set up the output buffer
# See http://www.python.org/doc/2.4.4/lib/module-array.html#l2h-1356
out = array(ipixels.typecode)
# It's annoying that there is no cheap way to set the array size :-(
out.extend(ipixels)
out.extend(apixels)
# Interleave in the pixel data
for i in range(ipsize):
out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize]
for i in range(apsize):
out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize]
return out
def check_palette(palette):
"""Check a palette argument (to the :class:`Writer` class)
for validity. Returns the palette as a list if okay; raises an
exception otherwise.
"""
# None is the default and is allowed.
if palette is None:
return None
p = list(palette)
if not (0 < len(p) <= 256):
raise ValueError("a palette must have between 1 and 256 entries")
seen_triple = False
for i,t in enumerate(p):
if len(t) not in (3,4):
raise ValueError(
"palette entry %d: entries must be 3- or 4-tuples." % i)
if len(t) == 3:
seen_triple = True
if seen_triple and len(t) == 4:
raise ValueError(
"palette entry %d: all 4-tuples must precede all 3-tuples" % i)
for x in t:
if int(x) != x or not(0 <= x <= 255):
raise ValueError(
"palette entry %d: values must be integer: 0 <= x <= 255" % i)
return p
def check_sizes(size, width, height):
"""Check that these arguments, in supplied, are consistent.
Return a (width, height) pair.
"""
if not size:
return width, height
if len(size) != 2:
raise ValueError(
"size argument should be a pair (width, height)")
if width is not None and width != size[0]:
raise ValueError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width))
if height is not None and height != size[1]:
raise ValueError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height))
return size
def check_color(c, greyscale, which):
"""Checks that a colour argument for transparent or
background options is the right form. Returns the colour
(which, if it's a bar integer, is "corrected" to a 1-tuple).
"""
if c is None:
return c
if greyscale:
try:
len(c)
except TypeError:
c = (c,)
if len(c) != 1:
raise ValueError("%s for greyscale must be 1-tuple" %
which)
if not isinteger(c[0]):
raise ValueError(
"%s colour for greyscale must be integer" % which)
else:
if not (len(c) == 3 and
isinteger(c[0]) and
isinteger(c[1]) and
isinteger(c[2])):
raise ValueError(
"%s colour must be a triple of integers" % which)
return c
class Error(Exception):
def __str__(self):
return self.__class__.__name__ + ': ' + ' '.join(self.args)
class FormatError(Error):
"""Problem with input file format. In other words, PNG file does
not conform to the specification in some way and is invalid.
"""
class ChunkError(FormatError):
pass
class Writer:
"""
PNG encoder in pure Python.
"""
def __init__(self, width=None, height=None,
size=None,
greyscale=False,
alpha=False,
bitdepth=8,
palette=None,
transparent=None,
background=None,
gamma=None,
compression=None,
interlace=False,
bytes_per_sample=None, # deprecated
planes=None,
colormap=None,
maxval=None,
chunk_limit=2**20,
x_pixels_per_unit = None,
y_pixels_per_unit = None,
unit_is_meter = False):
"""
Create a PNG encoder object.
Arguments:
width, height
Image size in pixels, as two separate arguments.
size
Image size (w,h) in pixels, as single argument.
greyscale
Input data is greyscale, not RGB.
alpha
Input data has alpha channel (RGBA or LA).
bitdepth
Bit depth: from 1 to 16.
palette
Create a palette for a colour mapped image (colour type 3).
transparent
Specify a transparent colour (create a ``tRNS`` chunk).
background
Specify a default background colour (create a ``bKGD`` chunk).
gamma
Specify a gamma value (create a ``gAMA`` chunk).
compression
zlib compression level: 0 (none) to 9 (more compressed);
default: -1 or None.
interlace
Create an interlaced image.
chunk_limit
Write multiple ``IDAT`` chunks to save memory.
x_pixels_per_unit
Number of pixels a unit along the x axis (write a
`pHYs` chunk).
y_pixels_per_unit
Number of pixels a unit along the y axis (write a
`pHYs` chunk). Along with `x_pixel_unit`, this gives
the pixel size ratio.
unit_is_meter
`True` to indicate that the unit (for the `pHYs`
chunk) is metre.
The image size (in pixels) can be specified either by using the
`width` and `height` arguments, or with the single `size`
argument. If `size` is used it should be a pair (*width*,
*height*).
`greyscale` and `alpha` are booleans that specify whether
an image is greyscale (or colour), and whether it has an
alpha channel (or not).
`bitdepth` specifies the bit depth of the source pixel values.
Each source pixel value must be an integer between 0 and
``2**bitdepth-1``. For example, 8-bit images have values
between 0 and 255. PNG only stores images with bit depths of
1,2,4,8, or 16. When `bitdepth` is not one of these values,
the next highest valid bit depth is selected, and an ``sBIT``
(significant bits) chunk is generated that specifies the
original precision of the source image. In this case the
supplied pixel values will be rescaled to fit the range of
the selected bit depth.
The details of which bit depth / colour model combinations the
PNG file format supports directly, are somewhat arcane
(refer to the PNG specification for full details). Briefly:
"small" bit depths (1,2,4) are only allowed with greyscale and
colour mapped images; colour mapped images cannot have bit depth
16.
For colour mapped images (in other words, when the `palette`
argument is specified) the `bitdepth` argument must match one of
the valid PNG bit depths: 1, 2, 4, or 8. (It is valid to have a
PNG image with a palette and an ``sBIT`` chunk, but the meaning
is slightly different; it would be awkward to press the
`bitdepth` argument into service for this.)
The `palette` option, when specified, causes a colour
mapped image to be created: the PNG colour type is set to 3;
`greyscale` must not be set; `alpha` must not be set;
`transparent` must not be set; the bit depth must be 1,2,4,
or 8. When a colour mapped image is created, the pixel values
are palette indexes and the `bitdepth` argument specifies the
size of these indexes (not the size of the colour values in
the palette).
The palette argument value should be a sequence of 3- or
4-tuples. 3-tuples specify RGB palette entries; 4-tuples
specify RGBA palette entries. If both 4-tuples and 3-tuples
appear in the sequence then all the 4-tuples must come
before all the 3-tuples. A ``PLTE`` chunk is created; if there
are 4-tuples then a ``tRNS`` chunk is created as well. The
``PLTE`` chunk will contain all the RGB triples in the same
sequence; the ``tRNS`` chunk will contain the alpha channel for
all the 4-tuples, in the same sequence. Palette entries
are always 8-bit.
If specified, the `transparent` and `background` parameters must
be a tuple with three integer values for red, green, blue, or
a simple integer (or singleton tuple) for a greyscale image.
If specified, the `gamma` parameter must be a positive number
(generally, a `float`). A ``gAMA`` chunk will be created.
Note that this will not change the values of the pixels as
they appear in the PNG file, they are assumed to have already
been converted appropriately for the gamma specified.
The `compression` argument specifies the compression level to
be used by the ``zlib`` module. Values from 1 to 9 specify
compression, with 9 being "more compressed" (usually smaller
and slower, but it doesn't always work out that way). 0 means
no compression. -1 and ``None`` both mean that the default
level of compession will be picked by the ``zlib`` module
(which is generally acceptable).
If `interlace` is true then an interlaced image is created
(using PNG's so far only interace method, *Adam7*). This does
not affect how the pixels should be presented to the encoder,
rather it changes how they are arranged into the PNG file.
On slow connexions interlaced images can be partially decoded
by the browser to give a rough view of the image that is
successively refined as more image data appears.
.. note ::
Enabling the `interlace` option requires the entire image
to be processed in working memory.
`chunk_limit` is used to limit the amount of memory used whilst
compressing the image. In order to avoid using large amounts of
memory, multiple ``IDAT`` chunks may be created.
"""
# At the moment the `planes` argument is ignored;
# its purpose is to act as a dummy so that
# ``Writer(x, y, **info)`` works, where `info` is a dictionary
# returned by Reader.read and friends.
# Ditto for `colormap`.
width, height = check_sizes(size, width, height)
del size
if width <= 0 or height <= 0:
raise ValueError("width and height must be greater than zero")
if not isinteger(width) or not isinteger(height):
raise ValueError("width and height must be integers")
# http://www.w3.org/TR/PNG/#7Integers-and-byte-order
if width > 2**32-1 or height > 2**32-1:
raise ValueError("width and height cannot exceed 2**32-1")
if alpha and transparent is not None:
raise ValueError(
"transparent colour not allowed with alpha channel")
if bytes_per_sample is not None:
warnings.warn('please use bitdepth instead of bytes_per_sample',
DeprecationWarning)
if bytes_per_sample not in (0.125, 0.25, 0.5, 1, 2):
raise ValueError(
"bytes per sample must be .125, .25, .5, 1, or 2")
bitdepth = int(8*bytes_per_sample)
del bytes_per_sample
if not isinteger(bitdepth) or bitdepth < 1 or 16 < bitdepth:
raise ValueError("bitdepth (%r) must be a positive integer <= 16" %
bitdepth)
self.rescale = None
palette = check_palette(palette)
if palette:
if bitdepth not in (1,2,4,8):
raise ValueError("with palette, bitdepth must be 1, 2, 4, or 8")
if transparent is not None:
raise ValueError("transparent and palette not compatible")
if alpha:
raise ValueError("alpha and palette not compatible")
if greyscale:
raise ValueError("greyscale and palette not compatible")
else:
# No palette, check for sBIT chunk generation.
if alpha or not greyscale:
if bitdepth not in (8,16):
targetbitdepth = (8,16)[bitdepth > 8]
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
else:
assert greyscale
assert not alpha
if bitdepth not in (1,2,4,8,16):
if bitdepth > 8:
targetbitdepth = 16
elif bitdepth == 3:
targetbitdepth = 4
else:
assert bitdepth in (5,6,7)
targetbitdepth = 8
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
if bitdepth < 8 and (alpha or not greyscale and not palette):
raise ValueError(
"bitdepth < 8 only permitted with greyscale or palette")
if bitdepth > 8 and palette:
raise ValueError(
"bit depth must be 8 or less for images with palette")
transparent = check_color(transparent, greyscale, 'transparent')
background = check_color(background, greyscale, 'background')
# It's important that the true boolean values (greyscale, alpha,
# colormap, interlace) are converted to bool because Iverson's
# convention is relied upon later on.
self.width = width
self.height = height
self.transparent = transparent
self.background = background
self.gamma = gamma
self.greyscale = bool(greyscale)
self.alpha = bool(alpha)
self.colormap = bool(palette)
self.bitdepth = int(bitdepth)
self.compression = compression
self.chunk_limit = chunk_limit
self.interlace = bool(interlace)
self.palette = palette
self.x_pixels_per_unit = x_pixels_per_unit
self.y_pixels_per_unit = y_pixels_per_unit
self.unit_is_meter = bool(unit_is_meter)
self.color_type = 4*self.alpha + 2*(not greyscale) + 1*self.colormap
assert self.color_type in (0,2,3,4,6)
self.color_planes = (3,1)[self.greyscale or self.colormap]
self.planes = self.color_planes + self.alpha
# :todo: fix for bitdepth < 8
self.psize = (self.bitdepth/8) * self.planes
def make_palette(self):
"""Create the byte sequences for a ``PLTE`` and if necessary a
``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be
``None`` if no ``tRNS`` chunk is necessary.
"""
p = array(str('B'))
t = array(str('B'))
for x in self.palette:
p.extend(x[0:3])
if len(x) > 3:
t.append(x[3])
p = tostring(p)
t = tostring(t)
if t:
return p,t
return p,None
def write(self, outfile, rows):
"""Write a PNG image to the output file. `rows` should be
an iterable that yields each row in boxed row flat pixel
format. The rows should be the rows of the original image,
so there should be ``self.height`` rows of ``self.width *
self.planes`` values. If `interlace` is specified (when
creating the instance), then an interlaced PNG file will
be written. Supply the rows in the normal image order;
the interlacing is carried out internally.
.. note ::
Interlacing will require the entire image to be in working
memory.
"""
if self.interlace:
fmt = 'BH'[self.bitdepth > 8]
a = array(str(fmt), itertools.chain(*rows))
return self.write_array(outfile, a)
nrows = self.write_passes(outfile, rows)
if nrows != self.height:
raise ValueError(
"rows supplied (%d) does not match height (%d)" %
(nrows, self.height))
def write_passes(self, outfile, rows, packed=False):
"""
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file. For straightlaced images,
this is the usual top to bottom ordering, but for interlaced
images the rows should have already been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row. When
`packed` is ``False`` the rows should be in boxed row flat pixel
format; when `packed` is ``True`` each row should be a packed
sequence of bytes.
"""
# http://www.w3.org/TR/PNG/#5PNG-file-signature
outfile.write(_signature)
# http://www.w3.org/TR/PNG/#11IHDR
write_chunk(outfile, b'IHDR',
struct.pack("!2I5B", self.width, self.height,
self.bitdepth, self.color_type,
0, 0, self.interlace))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(outfile, b'gAMA',
struct.pack("!L", int(round(self.gamma*1e5))))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11sBIT
if self.rescale:
write_chunk(outfile, b'sBIT',
struct.pack('%dB' % self.planes,
*[self.rescale[0]]*self.planes))
# :chunk:order: Without a palette (PLTE chunk), ordering is
# relatively relaxed. With one, gAMA chunk must precede PLTE
# chunk which must precede tRNS and bKGD.
# See http://www.w3.org/TR/PNG/#5ChunkOrdering
if self.palette:
p,t = self.make_palette()
write_chunk(outfile, b'PLTE', p)
if t:
# tRNS chunk is optional. Only needed if palette entries
# have alpha.
write_chunk(outfile, b'tRNS', t)
# http://www.w3.org/TR/PNG/#11tRNS
if self.transparent is not None:
if self.greyscale:
write_chunk(outfile, b'tRNS',
struct.pack("!1H", *self.transparent))
else:
write_chunk(outfile, b'tRNS',
struct.pack("!3H", *self.transparent))
# http://www.w3.org/TR/PNG/#11bKGD
if self.background is not None:
if self.greyscale:
write_chunk(outfile, b'bKGD',
struct.pack("!1H", *self.background))
else:
write_chunk(outfile, b'bKGD',
struct.pack("!3H", *self.background))
# http://www.w3.org/TR/PNG/#11pHYs
if self.x_pixels_per_unit is not None and self.y_pixels_per_unit is not None:
tup = (self.x_pixels_per_unit, self.y_pixels_per_unit, int(self.unit_is_meter))
write_chunk(outfile, b'pHYs', struct.pack("!LLB",*tup))
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
# Choose an extend function based on the bitdepth. The extend
# function packs/decomposes the pixel values into bytes and
# stuffs them onto the data array.
data = array(str('B'))
if self.bitdepth == 8 or packed:
extend = data.extend
elif self.bitdepth == 16:
# Decompose into bytes
def extend(sl):
fmt = '!%dH' % len(sl)
data.extend(array(str('B'), struct.pack(fmt, *sl)))
else:
# Pack into bytes
assert self.bitdepth < 8
# samples per byte
spb = int(8/self.bitdepth)
def extend(sl):
a = array(str('B'), sl)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
l = float(len(a))
extra = math.ceil(l / float(spb))*spb - l
a.extend([0]*int(extra))
# Pack into bytes
l = group(a, spb)
l = [reduce(lambda x,y:
(x << self.bitdepth) + y, e) for e in l]
data.extend(l)
if self.rescale:
oldextend = extend
factor = \
float(2**self.rescale[1]-1) / float(2**self.rescale[0]-1)
def extend(sl):
oldextend([int(round(factor*x)) for x in sl])
# Build the first row, testing mostly to see if we need to
# changed the extend function to cope with NumPy integer types
# (they cause our ordinary definition of extend to fail, so we
# wrap it). See
# http://code.google.com/p/pypng/issues/detail?id=44
enumrows = enumerate(rows)
del rows
# First row's filter type.
data.append(0)
# :todo: Certain exceptions in the call to ``.next()`` or the
# following try would indicate no row data supplied.
# Should catch.
i,row = next(enumrows)
try:
# If this fails...
extend(row)
except:
# ... try a version that converts the values to int first.
# Not only does this work for the (slightly broken) NumPy
# types, there are probably lots of other, unknown, "nearly"
# int types it works for.
def wrapmapint(f):
return lambda sl: f([int(x) for x in sl])
extend = wrapmapint(extend)
del wrapmapint
extend(row)
for i,row in enumrows:
# Add "None" filter type. Currently, it's essential that
# this filter type be used for every scanline as we do not
# mark the first row of a reduced pass image; that means we
# could accidentally compute the wrong filtered scanline if
# we used "up", "average", or "paeth" on such a line.
data.append(0)
extend(row)
if len(data) > self.chunk_limit:
compressed = compressor.compress(tostring(data))
if len(compressed):
write_chunk(outfile, b'IDAT', compressed)
# Because of our very witty definition of ``extend``,
# above, we must re-use the same ``data`` object. Hence
# we use ``del`` to empty this one, rather than create a
# fresh one (which would be my natural FP instinct).
del data[:]
if len(data):
compressed = compressor.compress(tostring(data))
else:
compressed = b''
flushed = compressor.flush()
if len(compressed) or len(flushed):
write_chunk(outfile, b'IDAT', compressed + flushed)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, b'IEND')
return i+1
def write_array(self, outfile, pixels):
"""
Write an array in flat row flat pixel format as a PNG file on
the output file. See also :meth:`write` method.
"""
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`. The pixel data comes from `rows`
which should be in boxed row packed format. Each row should be
a sequence of packed bytes.
Technically, this method does work for interlaced images but it
is best avoided. For interlaced images, the rows should be
presented in the order that they appear in the file.
This method should not be used when the source image bit depth
is not one naturally supported by PNG; the bit depth should be
1, 2, 4, 8, or 16.
"""
if self.rescale:
raise Error("write_packed method not suitable for bit depth %d" %
self.rescale[0])
return self.write_passes(outfile, rows, packed=True)
def convert_pnm(self, infile, outfile):
"""
Convert a PNM file containing raw pixel data into a PNG file
with the parameters set in the writer object. Works for
(binary) PGM, PPM, and PAM formats.
"""
if self.interlace:
pixels = array(str('B'))
pixels.fromfile(infile,
(self.bitdepth/8) * self.color_planes *
self.width * self.height)
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.file_scanlines(infile))
def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile):
"""
Convert a PPM and PGM file containing raw pixel data into a
PNG outfile with the parameters set in the writer object.
"""
pixels = array(str('B'))
pixels.fromfile(ppmfile,
(self.bitdepth/8) * self.color_planes *
self.width * self.height)
apixels = array(str('B'))
apixels.fromfile(pgmfile,
(self.bitdepth/8) *
self.width * self.height)
pixels = interleave_planes(pixels, apixels,
(self.bitdepth/8) * self.color_planes,
(self.bitdepth/8))
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def file_scanlines(self, infile):
"""
Generates boxed rows in flat pixel format, from the input file
`infile`. It assumes that the input file is in a "Netpbm-like"
binary format, and is positioned at the beginning of the first
pixel. The number of pixels to read is taken from the image
dimensions (`width`, `height`, `planes`) and the number of bytes
per value is implied by the image `bitdepth`.
"""
# Values per row
vpr = self.width * self.planes
row_bytes = vpr
if self.bitdepth > 8:
assert self.bitdepth == 16
row_bytes *= 2
fmt = '>%dH' % vpr
def line():
return array(str('H'), struct.unpack(fmt, infile.read(row_bytes)))
else:
def line():
scanline = array(str('B'), infile.read(row_bytes))
return scanline
for y in range(self.height):
yield line()
def array_scanlines(self, pixels):
"""
Generates boxed rows (flat pixels) from flat rows (flat pixels)
in an array.
"""
# Values per row
vpr = self.width * self.planes
stop = 0
for y in range(self.height):
start = stop
stop = start + vpr
yield pixels[start:stop]
def array_scanlines_interlace(self, pixels):
"""
Generator for interlaced scanlines from an array. `pixels` is
the full source image in flat row flat pixel format. The
generator yields each scanline of the reduced passes in turn, in
boxed row flat pixel format.
"""
# http://www.w3.org/TR/PNG/#8InterlaceMethods
# Array type.
fmt = 'BH'[self.bitdepth > 8]
# Value per row
vpr = self.width * self.planes
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# Pixels per row (of reduced image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# number of values in reduced image row.
row_len = ppr*self.planes
for y in range(ystart, self.height, ystep):
if xstep == 1:
offset = y * vpr
yield pixels[offset:offset+vpr]
else:
row = array(str(fmt))
# There's no easier way to set the length of an array
row.extend(pixels[0:row_len])
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
row[i::self.planes] = \
pixels[offset+i:end_offset:skip]
yield row
def write_chunk(outfile, tag, data=b''):
"""
Write a PNG chunk to the output file, including length and
checksum.
"""
# http://www.w3.org/TR/PNG/#5Chunk-layout
outfile.write(struct.pack("!I", len(data)))
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
checksum &= 2**32-1
outfile.write(struct.pack("!I", checksum))
def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(_signature)
for chunk in chunks:
write_chunk(out, *chunk)
def filter_scanline(type, line, fo, prev=None):
"""Apply a scanline filter to a scanline. `type` specifies the
filter type (0 to 4); `line` specifies the current (unfiltered)
scanline as a sequence of bytes; `prev` specifies the previous
(unfiltered) scanline as a sequence of bytes. `fo` specifies the
filter offset; normally this is size of a pixel in bytes (the number
of bytes per sample times the number of channels), but when this is
< 1 (for bit depths < 8) then the filter offset is 1.
"""
assert 0 <= type < 5
# The output array. Which, pathetically, we extend one-byte at a
# time (fortunately this is linear).
out = array(str('B'), [type])
def sub():
ai = -fo
for x in line:
if ai >= 0:
x = (x - line[ai]) & 0xff
out.append(x)
ai += 1
def up():
for i,x in enumerate(line):
x = (x - prev[i]) & 0xff
out.append(x)
def average():
ai = -fo
for i,x in enumerate(line):
if ai >= 0:
x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff
else:
x = (x - (prev[i] >> 1)) & 0xff
out.append(x)
ai += 1
def paeth():
# http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth
ai = -fo # also used for ci
for i,x in enumerate(line):
a = 0
b = prev[i]
c = 0
if ai >= 0:
a = line[ai]
c = prev[ai]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
Pr = a
elif pb <= pc:
Pr = b
else:
Pr = c
x = (x - Pr) & 0xff
out.append(x)
ai += 1
if not prev:
# We're on the first line. Some of the filters can be reduced
# to simpler cases which makes handling the line "off the top"
# of the image simpler. "up" becomes "none"; "paeth" becomes
# "left" (non-trivial, but true). "average" needs to be handled
# specially.
if type == 2: # "up"
type = 0
elif type == 3:
prev = [0]*len(line)
elif type == 4: # "paeth"
type = 1
if type == 0:
out.extend(line)
elif type == 1:
sub()
elif type == 2:
up()
elif type == 3:
average()
else: # type == 4
paeth()
return out
# Regex for decoding mode string
RegexModeDecode = re.compile("(LA?|RGBA?);?([0-9]*)", flags=re.IGNORECASE)
def from_array(a, mode=None, info={}):
"""Create a PNG :class:`Image` object from a 2- or 3-dimensional
array. One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
Unless they are specified using the *info* parameter, the PNG's
height and width are taken from the array size. For a 3 dimensional
array the first axis is the height; the second axis is the width;
and the third axis is the channel number. Thus an RGB image that is
16 pixels high and 8 wide will use an array that is 16x8x3. For 2
dimensional arrays the first axis is the height, but the second axis
is ``width*channels``, so an RGB image that is 16 pixels high and 8
wide will use a 2-dimensional array that is 16x24 (each row will be
8*3 = 24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth (overriding how this
function normally derives the bit depth, see below). Appending
``';16'`` to the mode will cause the PNG to be 16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array, but it can be any
suitable Python sequence. For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact
rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
third dimension, unless an exception is raised in which case a
2-dimensional array is assumed. It's slightly more complicated than
that because an iterator of rows can be used, and it all still
works. Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from the array element's
datatype (but if *mode* specifies a bitdepth then that is used
instead). The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects. A 1 byte datatype will give a bit depth of
8, a 2 byte datatype will give a bit depth of 16. If the datatype
does not have an implicit size, for example it is a plain Python
list of lists, as above, then a default of 8 is used.
The *info* parameter is a dictionary that can be used to specify
metadata (in the same style as the arguments to the
:class:`png.Writer` class). For this function the keys that are
useful are:
height
overrides the height derived from the array dimensions and allows
*a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype (but
must match *mode* if that also specifies a bit depth).
Generally anything specified in the
*info* dictionary will override any implicit choices that this
function would otherwise make, but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and false when
mode is ``'RGB'`` or ``'RGBA'``.
"""
# We abuse the *info* parameter by modifying it. Take a copy here.
# (Also typechecks *info* to some extent).
info = dict(info)
# Syntax check mode string.
match = RegexModeDecode.match(mode)
if not match:
raise Error("mode string should be 'RGB' or 'L;16' or similar.")
mode, bitdepth = match.groups()
alpha = 'A' in mode
if bitdepth:
bitdepth = int(bitdepth)
# Colour format.
if 'greyscale' in info:
if bool(info['greyscale']) != ('L' in mode):
raise Error("info['greyscale'] should match mode.")
info['greyscale'] = 'L' in mode
if 'alpha' in info:
if bool(info['alpha']) != alpha:
raise Error("info['alpha'] should match mode.")
info['alpha'] = alpha
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get("bitdepth") and bitdepth != info['bitdepth']:
raise Error("bitdepth (%d) should match bitdepth of info (%d)." %
(bitdepth, info['bitdepth']))
info['bitdepth'] = bitdepth
# Fill in and/or check entries in *info*.
# Dimensions.
if 'size' in info:
assert len(info["size"]) == 2
# Check width, height, size all match where used.
for dimension,axis in [('width', 0), ('height', 1)]:
if dimension in info:
if info[dimension] != info['size'][axis]:
raise Error(
"info[%r] should match info['size'][%r]." %
(dimension, axis))
info['width'],info['height'] = info['size']
if 'height' not in info:
try:
info['height'] = len(a)
except TypeError:
raise Error("len(a) does not work, supply info['height'] instead.")
planes = len(mode)
if 'planes' in info:
if info['planes'] != planes:
raise Error("info['planes'] should match mode.")
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
a,t = itertools.tee(a)
row = next(t)
del t
try:
row[0][0]
threed = True
testelement = row[0]
except (IndexError, TypeError):
threed = False
testelement = row
if 'width' not in info:
if threed:
width = len(row)
else:
width = len(row) // planes
info['width'] = width
if threed:
# Flatten the threed rows
a = (itertools.chain.from_iterable(x) for x in a)
if 'bitdepth' not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's
# datatype, use a default of 8.
bitdepth = 8
else:
# If we got here without exception, we now assume that
# the array is a numpy array.
if dtype.kind == 'b':
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info['bitdepth'] = bitdepth
for thing in ["width", "height", "bitdepth", "greyscale", "alpha"]:
assert thing in info
return Image(a, info)
# So that refugee's from PIL feel more at home. Not documented.
fromarray = from_array
class Image:
"""A PNG image. You can create an :class:`Image` object from
an array of pixels by calling :meth:`png.from_array`. It can be
saved to disk with the :meth:`save` method.
"""
def __init__(self, rows, info):
"""
.. note ::
The constructor is not public. Please do not call it.
"""
self.rows = rows
self.info = info
def save(self, file):
"""Save the image to *file*. If *file* looks like an open file
descriptor then it is used, otherwise it is treated as a
filename and a fresh file is opened.
In general, you can only call this method once; after it has
been called the first time and the PNG image has been saved, the
source data will have been streamed, and cannot be streamed
again.
"""
w = Writer(**self.info)
try:
file.write
def close(): pass
except AttributeError:
file = open(file, 'wb')
def close(): file.close()
try:
w.write(file, self.rows)
finally:
close()
class _readable:
"""
A simple file-like interface for strings and arrays.
"""
def __init__(self, buf):
self.buf = buf
self.offset = 0
def read(self, n):
r = self.buf[self.offset:self.offset+n]
if isarray(r):
r = r.tostring()
self.offset += n
return r
try:
str(b'dummy', 'ascii')
except TypeError:
as_str = str
else:
def as_str(x):
return str(x, 'ascii')
class Reader:
"""
PNG decoder in pure Python.
"""
def __init__(self, _guess=None, **kw):
"""
Create a PNG decoder object.
The constructor expects exactly one keyword argument. If you
supply a positional argument instead, it will guess the input
type. You can choose among the following keyword arguments:
filename
Name of input file (a PNG file).
file
A file-like object (object with a read() method).
bytes
``array`` or ``string`` with PNG data.
"""
if ((_guess is not None and len(kw) != 0) or
(_guess is None and len(kw) != 1)):
raise TypeError("Reader() takes exactly 1 argument")
# Will be the first 8 bytes, later on. See validate_signature.
self.signature = None
self.transparent = None
# A pair of (len,type) if a chunk has been read but its data and
# checksum have not (in other words the file position is just
# past the 4 bytes that specify the chunk type). See preamble
# method for how this is used.
self.atchunk = None
if _guess is not None:
if isarray(_guess):
kw["bytes"] = _guess
elif isinstance(_guess, str) or isinstance(_guess, unicode):
kw["filename"] = str(_guess)
elif hasattr(_guess, 'read'):
kw["file"] = _guess
if "filename" in kw:
self.file = open(kw["filename"], "rb")
elif "file" in kw:
self.file = kw["file"]
elif "bytes" in kw:
self.file = _readable(kw["bytes"])
else:
raise TypeError("expecting filename, file or bytes array")
def chunk(self, seek=None, lenient=False):
"""
Read the next PNG chunk from the input file; returns a
(*type*, *data*) tuple. *type* is the chunk's type as a
byte string (all PNG chunk types are 4 bytes long).
*data* is the chunk's data content, as a byte string.
If the optional `seek` argument is
specified then it will keep reading chunks until it either runs
out of file or finds the type specified by the argument. Note
that in general the order of chunks in PNGs is unspecified, so
using `seek` can cause you to miss chunks.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
# http://www.w3.org/TR/PNG/#5Chunk-layout
if not self.atchunk:
self.atchunk = self.chunklentype()
length, type = self.atchunk
self.atchunk = None
data = self.file.read(length)
if len(data) != length:
raise ChunkError('Chunk %s too short for required %i octets.'
% (type, length))
checksum = self.file.read(4)
if len(checksum) != 4:
raise ChunkError('Chunk %s too short for checksum.' % type)
if seek and type != seek:
continue
verify = zlib.crc32(type)
verify = zlib.crc32(data, verify)
# Whether the output from zlib.crc32 is signed or not varies
# according to hideous implementation details, see
# http://bugs.python.org/issue1202 .
# We coerce it to be positive here (in a way which works on
# Python 2.3 and older).
verify &= 2**32 - 1
verify = struct.pack('!I', verify)
if checksum != verify:
(a, ) = struct.unpack('!I', checksum)
(b, ) = struct.unpack('!I', verify)
message = "Checksum error in %s chunk: 0x%08X != 0x%08X." % (type, a, b)
if lenient:
warnings.warn(message, RuntimeWarning)
else:
raise ChunkError(message)
return type, data
def chunks(self):
"""Return an iterator that will yield each chunk as a
(*chunktype*, *content*) pair.
"""
while True:
t,v = self.chunk()
yield t,v
if t == b'IEND':
break
def undo_filter(self, filter_type, scanline, previous):
"""Undo the filter for a scanline. `scanline` is a sequence of
bytes that does not include the initial filter type byte.
`previous` is decoded previous scanline (for straightlaced
images this is the previous pixel row, but for interlaced
images, it is the previous scanline in the reduced image, which
in general is not the previous pixel row in the final image).
When there is no previous scanline (the first row of a
straightlaced image, or the first row in one of the passes in an
interlaced image), then this argument should be ``None``.
The scanline will have the effects of filtering removed, and the
result will be returned as a fresh sequence of bytes.
"""
# :todo: Would it be better to update scanline in place?
# Yes, with the Cython extension making the undo_filter fast,
# updating scanline inplace makes the code 3 times faster
# (reading 50 images of 800x800 went from 40s to 16s)
result = scanline
if filter_type == 0:
return result
if filter_type not in (1,2,3,4):
raise FormatError('Invalid PNG Filter Type.'
' See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
# Filter unit. The stride from one pixel to the corresponding
# byte from the previous pixel. Normally this is the pixel
# size in bytes, but when this is smaller than 1, the previous
# byte is used instead.
fu = max(1, self.psize)
# For the first line of a pass, synthesize a dummy previous
# line. An alternative approach would be to observe that on the
# first line 'up' is the same as 'null', 'paeth' is the same
# as 'sub', with only 'average' requiring any special case.
if not previous:
previous = array(str('B'), [0]*len(scanline))
def sub():
"""Undo sub filter."""
ai = 0
# Loop starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(fu, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
def up():
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
def average():
"""Undo average filter."""
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
def paeth():
"""Undo Paeth filter."""
# Also used for ci.
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
# Call appropriate filter algorithm. Note that 0 has already
# been dealt with.
(None,
pngfilters.undo_filter_sub,
pngfilters.undo_filter_up,
pngfilters.undo_filter_average,
pngfilters.undo_filter_paeth)[filter_type](fu, scanline, previous, result)
return result
def deinterlace(self, raw):
"""
Read raw pixel data, undo filters, deinterlace, and flatten.
Return in flat row flat pixel format.
"""
# Values per row (of the target image)
vpr = self.width * self.planes
# Make a result array, and make it big enough. Interleaving
# writes to the output array randomly (well, not quite), so the
# entire output array must be in memory.
fmt = 'BH'[self.bitdepth > 8]
a = array(str(fmt), [0]*vpr*self.height)
source_offset = 0
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# The previous (reconstructed) scanline. None at the
# beginning of a pass to indicate that there is no previous
# line.
recon = None
# Pixels per row (reduced pass image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# Row size in bytes for this pass.
row_size = int(math.ceil(self.psize * ppr))
for y in range(ystart, self.height, ystep):
filter_type = raw[source_offset]
source_offset += 1
scanline = raw[source_offset:source_offset+row_size]
source_offset += row_size
recon = self.undo_filter(filter_type, scanline, recon)
# Convert so that there is one element per pixel value
flat = self.serialtoflat(recon, ppr)
if xstep == 1:
assert xstart == 0
offset = y * vpr
a[offset:offset+vpr] = flat
else:
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
a[offset+i:end_offset:skip] = \
flat[i::self.planes]
return a
def iterboxed(self, rows):
"""Iterator that yields each scanline in boxed row flat pixel
format. `rows` should be an iterator that yields the bytes of
each row in turn.
"""
def asvalues(raw):
"""Convert a row of raw bytes into a flat row. Result will
be a freshly allocated object, not shared with
argument.
"""
if self.bitdepth == 8:
return array(str('B'), raw)
if self.bitdepth == 16:
raw = tostring(raw)
return array(str('H'), struct.unpack('!%dH' % (len(raw)//2), raw))
assert self.bitdepth < 8
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array(str('B'))
mask = 2**self.bitdepth - 1
shifts = [self.bitdepth * i
for i in reversed(list(range(spb)))]
for o in raw:
out.extend([mask&(o>>i) for i in shifts])
return out[:width]
if sys.version_info[0] == 3:
return map(asvalues, rows)
else:
return itertools.imap(asvalues, rows)
def serialtoflat(self, bytes, width=None):
"""Convert serial format (byte stream) pixel data to flat row
flat pixel.
"""
if self.bitdepth == 8:
return bytes
if self.bitdepth == 16:
bytes = tostring(bytes)
return array(str('H'),
struct.unpack('!%dH' % (len(bytes)//2), bytes))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array(str('B'))
mask = 2**self.bitdepth - 1
shifts = list(map(self.bitdepth.__mul__, reversed(list(range(spb)))))
l = width
for o in bytes:
out.extend([(mask&(o>>s)) for s in shifts][:l])
l -= spb
if l <= 0:
l = width
return out
def iterstraight(self, raw):
"""Iterator that undoes the effect of filtering, and yields
each row in serialised format (as a sequence of bytes).
Assumes input is straightlaced. `raw` should be an iterable
that yields the raw bytes in chunks of arbitrary size.
"""
# length of row, in bytes
rb = self.row_bytes
a = array(str('B'))
# The previous (reconstructed) scanline. None indicates first
# line of image.
recon = None
for some in raw:
a.extend(some)
while len(a) >= rb + 1:
filter_type = a[0]
scanline = a[1:rb+1]
del a[:rb+1]
recon = self.undo_filter(filter_type, scanline, recon)
yield recon
if len(a) != 0:
# :file:format We get here with a file format error:
# when the available bytes (after decompressing) do not
# pack into exact rows.
raise FormatError(
'Wrong size for decompressed IDAT chunk.')
assert len(a) == 0
def validate_signature(self):
"""If signature (header) has not been read then read and
validate it; otherwise do nothing.
"""
if self.signature:
return
self.signature = self.file.read(8)
if self.signature != _signature:
raise FormatError("PNG file has invalid signature.")
def preamble(self, lenient=False):
"""
Extract the image metadata by reading the initial part of
the PNG file up to the start of the ``IDAT`` chunk. All the
chunks that precede the ``IDAT`` chunk are read and either
processed for metadata or discarded.
If the optional `lenient` argument evaluates to `True`, checksum
failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
if not self.atchunk:
self.atchunk = self.chunklentype()
if self.atchunk is None:
raise FormatError(
'This PNG file has no IDAT chunks.')
if self.atchunk[1] == b'IDAT':
return
self.process_chunk(lenient=lenient)
def chunklentype(self):
"""Reads just enough of the input to determine the next
chunk's length and type, returned as a (*length*, *type*) pair
where *type* is a string. If there are no more chunks, ``None``
is returned.
"""
x = self.file.read(8)
if not x:
return None
if len(x) != 8:
raise FormatError(
'End of file whilst reading chunk length and type.')
length,type = struct.unpack('!I4s', x)
if length > 2**31-1:
raise FormatError('Chunk %s is too large: %d.' % (type,length))
return length,type
def process_chunk(self, lenient=False):
"""Process the next chunk and its data. This only processes the
following chunk types, all others are ignored: ``IHDR``,
``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``, ``pHYs``.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
type, data = self.chunk(lenient=lenient)
method = '_process_' + as_str(type)
m = getattr(self, method, None)
if m:
m(data)
def _process_IHDR(self, data):
# http://www.w3.org/TR/PNG/#11IHDR
if len(data) != 13:
raise FormatError('IHDR chunk has incorrect length.')
(self.width, self.height, self.bitdepth, self.color_type,
self.compression, self.filter,
self.interlace) = struct.unpack("!2I5B", data)
check_bitdepth_colortype(self.bitdepth, self.color_type)
if self.compression != 0:
raise Error("unknown compression method %d" % self.compression)
if self.filter != 0:
raise FormatError("Unknown filter method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
% self.filter)
if self.interlace not in (0,1):
raise FormatError("Unknown interlace method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods ."
% self.interlace)
# Derived values
# http://www.w3.org/TR/PNG/#6Colour-values
colormap = bool(self.color_type & 1)
greyscale = not (self.color_type & 2)
alpha = bool(self.color_type & 4)
color_planes = (3,1)[greyscale or colormap]
planes = color_planes + alpha
self.colormap = colormap
self.greyscale = greyscale
self.alpha = alpha
self.color_planes = color_planes
self.planes = planes
self.psize = float(self.bitdepth)/float(8) * planes
if int(self.psize) == self.psize:
self.psize = int(self.psize)
self.row_bytes = int(math.ceil(self.width * self.psize))
# Stores PLTE chunk if present, and is used to check
# chunk ordering constraints.
self.plte = None
# Stores tRNS chunk if present, and is used to check chunk
# ordering constraints.
self.trns = None
# Stores sbit chunk if present.
self.sbit = None
def _process_PLTE(self, data):
# http://www.w3.org/TR/PNG/#11PLTE
if self.plte:
warnings.warn("Multiple PLTE chunks present.")
self.plte = data
if len(data) % 3 != 0:
raise FormatError(
"PLTE chunk's length should be a multiple of 3.")
if len(data) > (2**self.bitdepth)*3:
raise FormatError("PLTE chunk is too long.")
if len(data) == 0:
raise FormatError("Empty PLTE is not allowed.")
def _process_bKGD(self, data):
try:
if self.colormap:
if not self.plte:
warnings.warn(
"PLTE chunk is required before bKGD chunk.")
self.background = struct.unpack('B', data)
else:
self.background = struct.unpack("!%dH" % self.color_planes,
data)
except struct.error:
raise FormatError("bKGD chunk has incorrect length.")
def _process_tRNS(self, data):
# http://www.w3.org/TR/PNG/#11tRNS
self.trns = data
if self.colormap:
if not self.plte:
warnings.warn("PLTE chunk is required before tRNS chunk.")
else:
if len(data) > len(self.plte)/3:
# Was warning, but promoted to Error as it
# would otherwise cause pain later on.
raise FormatError("tRNS chunk is too long.")
else:
if self.alpha:
raise FormatError(
"tRNS chunk is not valid with colour type %d." %
self.color_type)
try:
self.transparent = \
struct.unpack("!%dH" % self.color_planes, data)
except struct.error:
raise FormatError("tRNS chunk has incorrect length.")
def _process_gAMA(self, data):
try:
self.gamma = struct.unpack("!L", data)[0] / 100000.0
except struct.error:
raise FormatError("gAMA chunk has incorrect length.")
def _process_sBIT(self, data):
self.sbit = data
if (self.colormap and len(data) != 3 or
not self.colormap and len(data) != self.planes):
raise FormatError("sBIT chunk has incorrect length.")
def _process_pHYs(self, data):
# http://www.w3.org/TR/PNG/#11pHYs
self.phys = data
fmt = "!LLB"
if len(data) != struct.calcsize(fmt):
raise FormatError("pHYs chunk has incorrect length.")
self.x_pixels_per_unit, self.y_pixels_per_unit, unit = struct.unpack(fmt,data)
self.unit_is_meter = bool(unit)
def read(self, lenient=False):
"""
Read the PNG file and decode it. Returns (`width`, `height`,
`pixels`, `metadata`).
May use excessive memory.
`pixels` are returned in boxed row flat pixel format.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
def iteridat():
"""Iterator that yields all the ``IDAT`` chunks as strings."""
while True:
try:
type, data = self.chunk(lenient=lenient)
except ValueError as e:
raise ChunkError(e.args[0])
if type == b'IEND':
# http://www.w3.org/TR/PNG/#11IEND
break
if type != b'IDAT':
continue
# type == b'IDAT'
# http://www.w3.org/TR/PNG/#11IDAT
if self.colormap and not self.plte:
warnings.warn("PLTE chunk is required before IDAT chunk")
yield data
def iterdecomp(idat):
"""Iterator that yields decompressed strings. `idat` should
be an iterator that yields the ``IDAT`` chunk data.
"""
# Currently, with no max_length parameter to decompress,
# this routine will do one yield per IDAT chunk: Not very
# incremental.
d = zlib.decompressobj()
# Each IDAT chunk is passed to the decompressor, then any
# remaining state is decompressed out.
for data in idat:
# :todo: add a max_length argument here to limit output
# size.
yield array(str('B'), d.decompress(data))
yield array(str('B'), d.flush())
self.preamble(lenient=lenient)
raw = iterdecomp(iteridat())
if self.interlace:
raw = array(str('B'), itertools.chain(*raw))
arraycode = 'BH'[self.bitdepth>8]
# Like :meth:`group` but producing an array.array object for
# each row.
pixels = map(lambda *row: array(str(arraycode), row),
*[iter(self.deinterlace(raw))]*self.width*self.planes)
else:
pixels = self.iterboxed(self.iterstraight(raw))
meta = dict()
for attr in 'greyscale alpha planes bitdepth interlace'.split():
meta[attr] = getattr(self, attr)
meta['size'] = (self.width, self.height)
for attr in 'gamma transparent background'.split():
a = getattr(self, attr, None)
if a is not None:
meta[attr] = a
if self.plte:
meta['palette'] = self.palette()
return self.width, self.height, pixels, meta
def read_flat(self):
"""
Read a PNG file and decode it into flat row flat pixel format.
Returns (*width*, *height*, *pixels*, *metadata*).
May use excessive memory.
`pixels` are returned in flat row flat pixel format.
See also the :meth:`read` method which returns pixels in the
more stream-friendly boxed row flat pixel format.
"""
x, y, pixel, meta = self.read()
arraycode = 'BH'[meta['bitdepth']>8]
pixel = array(str(arraycode), itertools.chain(*pixel))
return x, y, pixel, meta
def palette(self, alpha='natural'):
"""Returns a palette that is a sequence of 3-tuples or 4-tuples,
synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These
chunks should have already been processed (for example, by
calling the :meth:`preamble` method). All the tuples are the
same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when
there is a ``tRNS`` chunk. Assumes that the image is colour type
3 and therefore a ``PLTE`` chunk is required.
If the `alpha` argument is ``'force'`` then an alpha channel is
always added, forcing the result to be a sequence of 4-tuples.
"""
if not self.plte:
raise FormatError(
"Required PLTE chunk is missing in colour type 3 image.")
plte = group(array(str('B'), self.plte), 3)
if self.trns or alpha == 'force':
trns = array(str('B'), self.trns or [])
trns.extend([255]*(len(plte)-len(trns)))
plte = list(map(operator.add, plte, group(trns, 1)))
return plte
def asDirect(self):
"""Returns the image data as a direct representation of an
``x * y * planes`` array. This method is intended to remove the
need for callers to deal with palettes and transparency
themselves. Images with a palette (colour type 3)
are converted to RGB or RGBA; images with transparency (a
``tRNS`` chunk) are converted to LA or RGBA as appropriate.
When returned in this format the pixel values represent the
colour value directly without needing to refer to palettes or
transparency information.
Like the :meth:`read` method this method returns a 4-tuple:
(*width*, *height*, *pixels*, *meta*)
This method normally returns pixel values with the bit depth
they have in the source image, but when the source PNG has an
``sBIT`` chunk it is inspected and can reduce the bit depth of
the result pixels; pixel values will be reduced according to
the bit depth specified in the ``sBIT`` chunk (PNG nerds should
note a single result bit depth is used for all channels; the
maximum of the ones specified in the ``sBIT`` chunk. An RGB565
image will be rescaled to 6-bit RGB666).
The *meta* dictionary that is returned reflects the `direct`
format and not the original source image. For example, an RGB
source image with a ``tRNS`` chunk to represent a transparent
colour, will have ``planes=3`` and ``alpha=False`` for the
source image, but the *meta* dictionary returned by this method
will have ``planes=4`` and ``alpha=True`` because an alpha
channel is synthesized and added.
*pixels* is the pixel data in boxed row flat pixel format (just
like the :meth:`read` method).
All the other aspects of the image data are not changed.
"""
self.preamble()
# Simple case, no conversion necessary.
if not self.colormap and not self.trns and not self.sbit:
return self.read()
x,y,pixels,meta = self.read()
if self.colormap:
meta['colormap'] = False
meta['alpha'] = bool(self.trns)
meta['bitdepth'] = 8
meta['planes'] = 3 + bool(self.trns)
plte = self.palette()
def iterpal(pixels):
for row in pixels:
row = [plte[x] for x in row]
yield array(str('B'), itertools.chain(*row))
pixels = iterpal(pixels)
elif self.trns:
# It would be nice if there was some reasonable way
# of doing this without generating a whole load of
# intermediate tuples. But tuples does seem like the
# easiest way, with no other way clearly much simpler or
# much faster. (Actually, the L to LA conversion could
# perhaps go faster (all those 1-tuples!), but I still
# wonder whether the code proliferation is worth it)
it = self.transparent
maxval = 2**meta['bitdepth']-1
planes = meta['planes']
meta['alpha'] = True
meta['planes'] += 1
typecode = 'BH'[meta['bitdepth']>8]
def itertrns(pixels):
for row in pixels:
# For each row we group it into pixels, then form a
# characterisation vector that says whether each
# pixel is opaque or not. Then we convert
# True/False to 0/maxval (by multiplication),
# and add it as the extra channel.
row = group(row, planes)
opa = map(it.__ne__, row)
opa = map(maxval.__mul__, opa)
opa = list(zip(opa)) # convert to 1-tuples
yield array(str(typecode),
itertools.chain(*map(operator.add, row, opa)))
pixels = itertrns(pixels)
targetbitdepth = None
if self.sbit:
sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
targetbitdepth = max(sbit)
if targetbitdepth > meta['bitdepth']:
raise Error('sBIT chunk %r exceeds bitdepth %d' %
(sbit,self.bitdepth))
if min(sbit) <= 0:
raise Error('sBIT chunk %r has a 0-entry' % sbit)
if targetbitdepth == meta['bitdepth']:
targetbitdepth = None
if targetbitdepth:
shift = meta['bitdepth'] - targetbitdepth
meta['bitdepth'] = targetbitdepth
def itershift(pixels):
for row in pixels:
yield [p >> shift for p in row]
pixels = itershift(pixels)
return x,y,pixels,meta
def asFloat(self, maxval=1.0):
"""Return image pixels as per :meth:`asDirect` method, but scale
all pixel values to be floating point values between 0.0 and
*maxval*.
"""
x,y,pixels,info = self.asDirect()
sourcemaxval = 2**info['bitdepth']-1
del info['bitdepth']
info['maxval'] = float(maxval)
factor = float(maxval)/float(sourcemaxval)
def iterfloat():
for row in pixels:
yield [factor * p for p in row]
return x,y,iterfloat(),info
def _as_rescale(self, get, targetbitdepth):
"""Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
width,height,pixels,meta = get()
maxval = 2**meta['bitdepth'] - 1
targetmaxval = 2**targetbitdepth - 1
factor = float(targetmaxval) / float(maxval)
meta['bitdepth'] = targetbitdepth
def iterscale():
for row in pixels:
yield [int(round(x*factor)) for x in row]
if maxval == targetmaxval:
return width, height, pixels, meta
else:
return width, height, iterscale(), meta
def asRGB8(self):
"""Return the image data as an RGB pixels with 8-bits per
sample. This is like the :meth:`asRGB` method except that
this method additionally rescales the values so that they
are all between 0 and 255 (8-bit). In the case where the
source image has a bit depth < 8 the transformation preserves
all the information; where the source image has bit depth
> 8, then rescaling to 8-bit values loses precision. No
dithering is performed. Like :meth:`asRGB`, an alpha channel
in the source image will raise an exception.
This function returns a 4-tuple:
(*width*, *height*, *pixels*, *metadata*).
*width*, *height*, *metadata* are as per the
:meth:`read` method.
*pixels* is the pixel data in boxed row flat pixel format.
"""
return self._as_rescale(self.asRGB, 8)
def asRGBA8(self):
"""Return the image data as RGBA pixels with 8-bits per
sample. This method is similar to :meth:`asRGB8` and
:meth:`asRGBA`: The result pixels have an alpha channel, *and*
values are rescaled to the range 0 to 255. The alpha channel is
synthesized if necessary (with a small speed penalty).
"""
return self._as_rescale(self.asRGBA, 8)
def asRGB(self):
"""Return image as RGB pixels. RGB colour images are passed
through unchanged; greyscales are expanded into RGB
triplets (there is a small speed overhead for doing this).
An alpha channel in the source image will raise an
exception.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha']:
raise Error("will not convert image with alpha channel to RGB")
if not meta['greyscale']:
return width,height,pixels,meta
meta['greyscale'] = False
typecode = 'BH'[meta['bitdepth'] > 8]
def iterrgb():
for row in pixels:
a = array(str(typecode), [0]) * 3 * width
for i in range(3):
a[i::3] = row
yield a
return width,height,iterrgb(),meta
def asRGBA(self):
"""Return image as RGBA pixels. Greyscales are expanded into
RGB triplets; an alpha channel is synthesized if necessary.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``, and
``metadata['alpha']`` will be ``True``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha'] and not meta['greyscale']:
return width,height,pixels,meta
typecode = 'BH'[meta['bitdepth'] > 8]
maxval = 2**meta['bitdepth'] - 1
maxbuffer = struct.pack('=' + typecode, maxval) * 4 * width
def newarray():
return array(str(typecode), maxbuffer)
if meta['alpha'] and meta['greyscale']:
# LA to RGBA
def convert():
for row in pixels:
# Create a fresh target row, then copy L channel
# into first three target channels, and A channel
# into fourth channel.
a = newarray()
pngfilters.convert_la_to_rgba(row, a)
yield a
elif meta['greyscale']:
# L to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_l_to_rgba(row, a)
yield a
else:
assert not meta['alpha'] and not meta['greyscale']
# RGB to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_rgb_to_rgba(row, a)
yield a
meta['alpha'] = True
meta['greyscale'] = False
return width,height,convert(),meta
def check_bitdepth_colortype(bitdepth, colortype):
"""Check that `bitdepth` and `colortype` are both valid,
and specified in a valid combination. Returns if valid,
raise an Exception if not valid.
"""
if bitdepth not in (1,2,4,8,16):
raise FormatError("invalid bit depth %d" % bitdepth)
if colortype not in (0,2,3,4,6):
raise FormatError("invalid colour type %d" % colortype)
# Check indexed (palettized) images have 8 or fewer bits
# per pixel; check only indexed or greyscale images have
# fewer than 8 bits per pixel.
if colortype & 1 and bitdepth > 8:
raise FormatError(
"Indexed images (colour type %d) cannot"
" have bitdepth > 8 (bit depth %d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
if bitdepth < 8 and colortype not in (0,3):
raise FormatError("Illegal combination of bit depth (%d)"
" and colour type (%d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
def isinteger(x):
try:
return int(x) == x
except (TypeError, ValueError):
return False
# === Support for users without Cython ===
try:
pngfilters
except NameError:
class pngfilters(object):
def undo_filter_sub(filter_unit, scanline, previous, result):
"""Undo sub filter."""
ai = 0
# Loops starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(filter_unit, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
undo_filter_sub = staticmethod(undo_filter_sub)
def undo_filter_up(filter_unit, scanline, previous, result):
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
undo_filter_up = staticmethod(undo_filter_up)
def undo_filter_average(filter_unit, scanline, previous, result):
"""Undo up filter."""
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
undo_filter_average = staticmethod(undo_filter_average)
def undo_filter_paeth(filter_unit, scanline, previous, result):
"""Undo Paeth filter."""
# Also used for ci.
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
undo_filter_paeth = staticmethod(undo_filter_paeth)
def convert_la_to_rgba(row, result):
for i in range(3):
result[i::4] = row[0::2]
result[3::4] = row[1::2]
convert_la_to_rgba = staticmethod(convert_la_to_rgba)
def convert_l_to_rgba(row, result):
"""Convert a grayscale image to RGBA. This method assumes
the alpha channel in result is already correctly
initialized.
"""
for i in range(3):
result[i::4] = row
convert_l_to_rgba = staticmethod(convert_l_to_rgba)
def convert_rgb_to_rgba(row, result):
"""Convert an RGB image to RGBA. This method assumes the
alpha channel in result is already correctly initialized.
"""
for i in range(3):
result[i::4] = row[i::3]
convert_rgb_to_rgba = staticmethod(convert_rgb_to_rgba)
# === Command Line Support ===
def read_pam_header(infile):
"""
Read (the rest of a) PAM header. `infile` should be positioned
immediately after the initial 'P7' line (at the beginning of the
second line). Returns are as for `read_pnm_header`.
"""
# Unlike PBM, PGM, and PPM, we can read the header a line at a time.
header = dict()
while True:
l = infile.readline().strip()
if l == b'ENDHDR':
break
if not l:
raise EOFError('PAM ended prematurely')
if l[0] == b'#':
continue
l = l.split(None, 1)
if l[0] not in header:
header[l[0]] = l[1]
else:
header[l[0]] += b' ' + l[1]
required = [b'WIDTH', b'HEIGHT', b'DEPTH', b'MAXVAL']
WIDTH,HEIGHT,DEPTH,MAXVAL = required
present = [x for x in required if x in header]
if len(present) != len(required):
raise Error('PAM file must specify WIDTH, HEIGHT, DEPTH, and MAXVAL')
width = int(header[WIDTH])
height = int(header[HEIGHT])
depth = int(header[DEPTH])
maxval = int(header[MAXVAL])
if (width <= 0 or
height <= 0 or
depth <= 0 or
maxval <= 0):
raise Error(
'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers')
return 'P7', width, height, depth, maxval
def read_pnm_header(infile, supported=(b'P5', b'P6')):
"""
Read a PNM header, returning (format,width,height,depth,maxval).
`width` and `height` are in pixels. `depth` is the number of
channels in the image; for PBM and PGM it is synthesized as 1, for
PPM as 3; for PAM images it is read from the header. `maxval` is
synthesized (as 1) for PBM images.
"""
# Generally, see http://netpbm.sourceforge.net/doc/ppm.html
# and http://netpbm.sourceforge.net/doc/pam.html
# Technically 'P7' must be followed by a newline, so by using
# rstrip() we are being liberal in what we accept. I think this
# is acceptable.
type = infile.read(3).rstrip()
if type not in supported:
raise NotImplementedError('file format %s not supported' % type)
if type == b'P7':
# PAM header parsing is completely different.
return read_pam_header(infile)
# Expected number of tokens in header (3 for P4, 4 for P6)
expected = 4
pbm = (b'P1', b'P4')
if type in pbm:
expected = 3
header = [type]
# We have to read the rest of the header byte by byte because the
# final whitespace character (immediately following the MAXVAL in
# the case of P6) may not be a newline. Of course all PNM files in
# the wild use a newline at this point, so it's tempting to use
# readline; but it would be wrong.
def getc():
c = infile.read(1)
if not c:
raise Error('premature EOF reading PNM header')
return c
c = getc()
while True:
# Skip whitespace that precedes a token.
while c.isspace():
c = getc()
# Skip comments.
while c == '#':
while c not in b'\n\r':
c = getc()
if not c.isdigit():
raise Error('unexpected character %s found in header' % c)
# According to the specification it is legal to have comments
# that appear in the middle of a token.
# This is bonkers; I've never seen it; and it's a bit awkward to
# code good lexers in Python (no goto). So we break on such
# cases.
token = b''
while c.isdigit():
token += c
c = getc()
# Slight hack. All "tokens" are decimal integers, so convert
# them here.
header.append(int(token))
if len(header) == expected:
break
# Skip comments (again)
while c == '#':
while c not in '\n\r':
c = getc()
if not c.isspace():
raise Error('expected header to end with whitespace, not %s' % c)
if type in pbm:
# synthesize a MAXVAL
header.append(1)
depth = (1,3)[type == b'P6']
return header[0], header[1], header[2], depth, header[3]
def write_pnm(file, width, height, pixels, meta):
"""Write a Netpbm PNM/PAM file.
"""
bitdepth = meta['bitdepth']
maxval = 2**bitdepth - 1
# Rudely, the number of image planes can be used to determine
# whether we are L (PGM), LA (PAM), RGB (PPM), or RGBA (PAM).
planes = meta['planes']
# Can be an assert as long as we assume that pixels and meta came
# from a PNG file.
assert planes in (1,2,3,4)
if planes in (1,3):
if 1 == planes:
# PGM
# Could generate PBM if maxval is 1, but we don't (for one
# thing, we'd have to convert the data, not just blat it
# out).
fmt = 'P5'
else:
# PPM
fmt = 'P6'
header = '%s %d %d %d\n' % (fmt, width, height, maxval)
if planes in (2,4):
# PAM
# See http://netpbm.sourceforge.net/doc/pam.html
if 2 == planes:
tupltype = 'GRAYSCALE_ALPHA'
else:
tupltype = 'RGB_ALPHA'
header = ('P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\n'
'TUPLTYPE %s\nENDHDR\n' %
(width, height, planes, maxval, tupltype))
file.write(header.encode('ascii'))
# Values per row
vpr = planes * width
# struct format
fmt = '>%d' % vpr
if maxval > 0xff:
fmt = fmt + 'H'
else:
fmt = fmt + 'B'
for row in pixels:
file.write(struct.pack(fmt, *row))
file.flush()
def color_triple(color):
"""
Convert a command line colour value to a RGB triple of integers.
FIXME: Somewhere we need support for greyscale backgrounds etc.
"""
if color.startswith('#') and len(color) == 4:
return (int(color[1], 16),
int(color[2], 16),
int(color[3], 16))
if color.startswith('#') and len(color) == 7:
return (int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16))
elif color.startswith('#') and len(color) == 13:
return (int(color[1:5], 16),
int(color[5:9], 16),
int(color[9:13], 16))
def _add_common_options(parser):
"""Call *parser.add_option* for each of the options that are
common between this PNG--PNM conversion tool and the gen
tool.
"""
parser.add_option("-i", "--interlace",
default=False, action="store_true",
help="create an interlaced PNG file (Adam7)")
parser.add_option("-t", "--transparent",
action="store", type="string", metavar="#RRGGBB",
help="mark the specified colour as transparent")
parser.add_option("-b", "--background",
action="store", type="string", metavar="#RRGGBB",
help="save the specified background colour")
parser.add_option("-g", "--gamma",
action="store", type="float", metavar="value",
help="save the specified gamma value")
parser.add_option("-c", "--compression",
action="store", type="int", metavar="level",
help="zlib compression level (0-9)")
return parser
def _main(argv):
"""
Run the PNG encoder with options from the command line.
"""
# Parse command line arguments
from optparse import OptionParser
version = '%prog ' + __version__
parser = OptionParser(version=version)
parser.set_usage("%prog [options] [imagefile]")
parser.add_option('-r', '--read-png', default=False,
action='store_true',
help='Read PNG, write PNM')
parser.add_option("-a", "--alpha",
action="store", type="string", metavar="pgmfile",
help="alpha channel transparency (RGBA)")
_add_common_options(parser)
(options, args) = parser.parse_args(args=argv[1:])
# Convert options
if options.transparent is not None:
options.transparent = color_triple(options.transparent)
if options.background is not None:
options.background = color_triple(options.background)
# Prepare input and output files
if len(args) == 0:
infilename = '-'
infile = sys.stdin
elif len(args) == 1:
infilename = args[0]
infile = open(infilename, 'rb')
else:
parser.error("more than one input file")
outfile = sys.stdout
if sys.platform == "win32":
import msvcrt, os
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
if options.read_png:
# Encode PNG to PPM
png = Reader(file=infile)
width,height,pixels,meta = png.asDirect()
write_pnm(outfile, width, height, pixels, meta)
else:
# Encode PNM to PNG
format, width, height, depth, maxval = \
read_pnm_header(infile, (b'P5',b'P6',b'P7'))
# When it comes to the variety of input formats, we do something
# rather rude. Observe that L, LA, RGB, RGBA are the 4 colour
# types supported by PNG and that they correspond to 1, 2, 3, 4
# channels respectively. So we use the number of channels in
# the source image to determine which one we have. We do not
# care about TUPLTYPE.
greyscale = depth <= 2
pamalpha = depth in (2,4)
supported = [2**x-1 for x in range(1,17)]
try:
mi = supported.index(maxval)
except ValueError:
raise NotImplementedError(
'your maxval (%s) not in supported list %s' %
(maxval, str(supported)))
bitdepth = mi+1
writer = Writer(width, height,
greyscale=greyscale,
bitdepth=bitdepth,
interlace=options.interlace,
transparent=options.transparent,
background=options.background,
alpha=bool(pamalpha or options.alpha),
gamma=options.gamma,
compression=options.compression)
if options.alpha:
pgmfile = open(options.alpha, 'rb')
format, awidth, aheight, adepth, amaxval = \
read_pnm_header(pgmfile, 'P5')
if amaxval != '255':
raise NotImplementedError(
'maxval %s not supported for alpha channel' % amaxval)
if (awidth, aheight) != (width, height):
raise ValueError("alpha channel image size mismatch"
" (%s has %sx%s but %s has %sx%s)"
% (infilename, width, height,
options.alpha, awidth, aheight))
writer.convert_ppm_and_pgm(infile, pgmfile, outfile)
else:
writer.convert_pnm(infile, outfile)
if __name__ == '__main__':
try:
_main(sys.argv)
except Error as e:
print(e, file=sys.stderr)
| [
"jjohnson@Jakobs-MacBook-Pro.local"
] | jjohnson@Jakobs-MacBook-Pro.local |
a33b45b686b42f02891fe745b169f339692f91d2 | acdd393c25b32779a637a05b5a5574aaecdda9d6 | /pelican-plugins/more_categories/test_more_categories.py | 41dc4a2241313d4debe74eb3c9a78d38b9c38ad9 | [
"AGPL-3.0-only",
"MIT"
] | permissive | JN-Blog/jn-blog.com | 51f1b8f9011138b3ebf62b93c2ecaba9e2d514bf | 669bf9a9c6813f2b7980792fb137f6718077aea1 | refs/heads/master | 2020-04-02T10:07:31.569949 | 2018-12-30T14:30:49 | 2018-12-30T14:30:49 | 154,325,262 | 0 | 0 | MIT | 2018-12-30T14:30:50 | 2018-10-23T12:36:12 | Python | UTF-8 | Python | false | false | 1,587 | py | """Unit tests for the more_categories plugin"""
import os
import unittest
from . import more_categories
from pelican.generators import ArticlesGenerator
from pelican.tests.support import get_context, get_settings
class TestArticlesGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls):
more_categories.register()
settings = get_settings()
settings['DEFAULT_CATEGORY'] = 'default'
settings['CACHE_CONTENT'] = False
settings['PLUGINS'] = more_categories
context = get_context(settings)
base_path = os.path.dirname(os.path.abspath(__file__))
test_data_path = os.path.join(base_path, 'test_data')
cls.generator = ArticlesGenerator(
context=context, settings=settings,
path=test_data_path, theme=settings['THEME'], output_path=None)
cls.generator.generate_context()
def test_generate_categories(self):
"""Test whether multiple categories are generated correctly,
including ancestor categories"""
cats_generated = [cat.name for cat, _ in self.generator.categories]
cats_expected = ['default', 'foo', 'foo/bar', 'foo/baz',]
self.assertEqual(sorted(cats_generated), sorted(cats_expected))
def test_assign_articles_to_categories(self):
"""Test whether articles are correctly assigned to categories,
including whether articles are not assigned multiple times to the same
ancestor category"""
for cat, articles in self.generator.categories:
self.assertEqual(len(articles), 1) | [
"julien.nuellas@gmail.com"
] | julien.nuellas@gmail.com |
7ae5367511c10e52af51116eab428b8303a2a31f | d385c85c8f462e6908f41fb51e5fb6386a9398a8 | /Desktop/Program/Recursion/1.example.py | ba425c3c2985382113feb9d754c8ae03fdd7a628 | [] | no_license | spiderr7/cls-python | 697b7802c5efa8eeba2d77fa5fd62a3a1e7c7045 | f2a325f858a2f073cb40e5d8e04ce91dba32d28d | refs/heads/master | 2020-09-13T10:39:42.841354 | 2019-11-19T17:00:14 | 2019-11-19T17:00:14 | 222,745,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | def sample(n):
if(n==0):
return
print("Mental Defination")
sample(n-1)
sample(5)
| [
"utkarshrana1@gmail.com"
] | utkarshrana1@gmail.com |
1bcff4b00a2f65e33e1c4164e8d3847607c95b0b | 86464e81ad89833dca7c8d91d2998e4916491516 | /models/train_classifier.py | 95530266ef5fec698c96554d586d0105312d0a42 | [] | no_license | candywendao/Disaster_Response_Pipeline_term2 | 40024151c787a2bbfebfb08e006f83214b94bb33 | 6bc14b311fff9d91a1ddac6fb19e6f99cb750628 | refs/heads/master | 2020-06-08T17:55:17.526018 | 2019-06-22T20:53:22 | 2019-06-22T20:53:22 | 193,277,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,625 | py | # model training script
import sys
import pandas as pd
import re
import sqlite3
import pickle
import nltk
nltk.download(['punkt', 'wordnet', 'stopwords'])
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
import warnings
warnings.filterwarnings("ignore")
def load_data(database_filepath):
'''
This function loads data from database in the provided filepath and
returns:
X - messages
Y - 36 categories
category names
'''
conn = sqlite3.connect(database_filepath)
df = pd.read_sql('SELECT * FROM messages', conn, index_col=None)
X = df.message
Y = df.drop(['index', 'id', 'message', 'genre'], axis = 1)
return X, Y, Y.columns
def tokenize(text):
'''
This function tokenizes data - change to lower case,
remove punctuations, stopwords and shortwords and returns a list of
tokens
'''
text = re.sub(r'[^a-zA-Z0-9]', ' ', text.lower())
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
tokens = [lemmatizer.lemmatize(w) for w in tokens if w not in stopwords.words('english')]
tokens = [token for token in tokens if len(token) > 2]
return tokens
def build_model():
pipeline = Pipeline(
[
('vect', CountVectorizer(tokenizer = tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier(), n_jobs = 1))
])
parameters = {
'vect__ngram_range': ((1, 1), (1, 2)),
'tfidf__use_idf': (True, False),
'tfidf__norm': ['l1', 'l2']
}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
Y_pred = pd.DataFrame(model.predict(X_test))
for i in range(len(category_names)):
print(category_names[i],classification_report(Y_test.iloc[:,i], Y_pred.iloc[:,i]))
def save_model(model, model_filepath):
pickle.dump(model, open(model_filepath, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
| [
"candywendao@gmail.com"
] | candywendao@gmail.com |
3ce5a4ffc463e262f76b00b5dba46dffbf1fdddf | dac55c6d98f7be1ad9d78e23989726d8fbd2c206 | /11_4_XBee_remote/XBee_host.py | d82a2a7d9830ae19b0331060fc944bcff2d5f307 | [] | no_license | AlvinChuang/mbed11 | 4ab35b52e235ec2dfa3424e3d570442027ac354a | 8da9f17ad970bda34bad3dc1a4f81231dea07b96 | refs/heads/master | 2022-08-19T16:03:13.691533 | 2020-05-27T09:05:01 | 2020-05-27T09:05:01 | 267,127,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | import serial
# XBee setting
serdev = '/dev/ttyUSB0'
s = serial.Serial(serdev, 9600)
s.write("+++".encode())
char = s.read(2)
print("Enter AT mode.")
print(char.decode())
#s.write("ATRE\r\n".encode())
#char = s.read(3)
#print(char.decode())
s.write("ATMY 0x127\r\n".encode())
char = s.read(3)
print("Set MY 0x127.")
print(char.decode())
s.write("ATDL 0x227\r\n".encode())
char = s.read(3)
print("Set DL 0x227.")
print(char.decode())
s.write("ATID 0x1\r\n".encode())
char = s.read(3)
print("Set PAN ID 0x1.")
print(char.decode())
s.write("ATWR\r\n".encode())
char = s.read(3)
print("Write config.")
print(char.decode())
s.write("ATMY\r\n".encode())
char = s.read(4)
print("MY :")
print(char.decode())
s.write("ATDL\r\n".encode())
char = s.read(4)
print("DL : ")
print(char.decode())
s.write("ATCN\r".encode())
#s.write("abcd\r".encode()) ////// \n will cause re-read error //////
char = s.read(3)
print("Exit AT mode.")
print(char.decode())
# send to remote
s.write("abcd\r".encode())
#s.write("abcd\r\n".encode()) ////// \n will cause re-read error //////
line = s.read(5)
print('Get:', line.decode())
s.close() | [
"alvin870203@gapp.nthu.edu.tw"
] | alvin870203@gapp.nthu.edu.tw |
cc0aeef9b492a0227dcc394fa37b9cff2270fdd8 | ccbe81651c43bb028482429ff855e5690b11c62f | /prob sheet 1/find the sum of n natural numbers..py | a50a22218a56e92267d8ad926613ee8417b70448 | [] | no_license | balakrish2001/Basics | c5613d7e9be5b1836eb68b7fb12db56ee6158395 | dad834f96c2e7a507a86dd4f60ddecb42b08a432 | refs/heads/main | 2023-03-20T17:56:04.920045 | 2021-01-25T17:04:46 | 2021-01-25T17:04:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | n=int((input("Enter a number:")))
sum=0
i=1
while(i<=n):
sum+=i
i+=1
print(sum)
| [
"balakrish181@gmail.com"
] | balakrish181@gmail.com |
7fc4a1542ea4566dd895b7ca4353106ccf4edc09 | e42d987e9d818c2aeec9a5846b08c2cbb3754d2e | /account/forms.py | b6a90245caf7e6af2ab7d0e11138888b158820c9 | [] | no_license | rachanakafle/Insurance-Agency-Management-System | e5e51dcd6a82d9de312dda29a787ea1156f19f7d | 7d10564c9f7fe31e7a91753799ce3f84a58ea97a | refs/heads/master | 2020-04-21T08:43:03.860258 | 2019-02-06T15:17:34 | 2019-02-06T15:17:34 | 169,427,554 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm
from account.models import MyUser
class SigninForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
fields = ['username','password']
class SignupForm(UserCreationForm):
choice = forms.ChoiceField(choices=[('client', 'Client'), ('agent', 'Agent')])
class Meta:
model = MyUser
fields = [ "full_name","email","choice"]
| [
"rachanakafle32@gmail.com"
] | rachanakafle32@gmail.com |
049c60daccd09762f51324105b89c224b034d34f | 73272de476706ee388eccbfde5fee4ef6f751882 | /board/models.py | 9752c0a8c3845592fc6984b6b0095badb65c720f | [] | no_license | hyunsooDii/travle_web_project | fddb79d65cb20c99094d4e82e94e7ea1d78bb51f | 1b421c09e08a45945bae1562ae4207ea9002c079 | refs/heads/master | 2022-12-15T06:58:05.275705 | 2020-09-04T09:03:19 | 2020-09-04T09:03:19 | 290,417,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,218 | py | from django.db import models
from django.urls import reverse
from taggit.managers import TaggableManager
from django.contrib.auth.models import User
from country.models import Country
from django.core.validators import MinValueValidator, MaxValueValidator
from tinymce.models import HTMLField
class Board(models.Model):
country_index = models.ForeignKey(Country, on_delete=models.CASCADE, verbose_name='COUNTRY_INDEX', blank=True,
null=True)
city = models.CharField(verbose_name='CITY', max_length=50, null=True)
title = models.CharField(verbose_name='TITLE', max_length=50)
star = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(5)], default=0)
content = HTMLField('CONTENT')
create_dt = models.DateTimeField('CREATE DATE', auto_now_add=True)
modify_dt = models.DateTimeField('MODIFY DATE')
tags = TaggableManager(blank=True)
owner = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='OWNER', blank=True, null=True) # 작성자
read_cnt = models.IntegerField(default=0)
class Meta: # Content에 대해서 추가정보를 관리하는 정보를 메타정보라고 함
verbose_name = 'board' # 단수
verbose_name_plural = 'boards' # 복수
db_table = 'board' # 테이블명 재정의
ordering = ('-id',) # orderby 절, -이면 내림차순순 <- 저건 튜플 (,) 가 있긴 때문에
def __str__(self):
return self.title
def get_absolute_url(self): # 현재 데이터의 절대 경로 추출
return reverse('board:detail', args=(self.pk,))
def get_previous(self): # 이젠 데이터 추출
return self.get_previous_by_modify_dt()
def get_next(self): # 다음 데이터 추출
return self.get_next_by_modify_dt()
def first_image(self):
if self.files.all().count() > 0:
return self.files.all()[0].filename;
return ''
@property
def update_read_cnt(self):
self.read_cnt = self.read_cnt + 1
self.save()
return self.read_cnt
class BoardAttachFile(models.Model):
board = models.ForeignKey(Board, on_delete=models.CASCADE, related_name="files", verbose_name='Board', blank=True,
null=True)
upload_file = models.FileField(upload_to="%Y/%m/%d", null=True, blank=True, verbose_name='파일')
filename = models.CharField(max_length=64, null=True, verbose_name='첨부파일명')
content_type = models.CharField(max_length=128, null=True, verbose_name='MIME TYPE')
size = models.IntegerField('파일 크기')
def __str__(self):
return self.filename
class Comment(models.Model):
board = models.ForeignKey(Board, on_delete=models.CASCADE, related_name='comments')
owner = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='OWNER', blank=True, null=True) # 댓글 작성자
text = models.TextField()
create_dt = models.DateTimeField('CREATE DATE', auto_now_add=True)
def get_absolute_url(self): # 현재 데이터의 절대 경로 추출
return reverse('board:detail', args=(self.pk,))
def __str__(self):
return self.text
| [
"hyunsoodii@gmail.com"
] | hyunsoodii@gmail.com |
b64c5c7ef5d4612430e94eed3207217217c4c70a | 45899bc085361274946af8645b2b2c8cbd0bbd74 | /df_order/df_goods/models.py | 0f2a386f17083ef482cd4c7253391382caf2a85e | [] | no_license | seasailz/dailyfresh | 11788bcebea119c9b63aea5fa0647c4e3cb72b42 | 3fbd7c1a10fd15144aa8e61c19e57683a195f39d | refs/heads/master | 2020-09-16T20:21:32.563865 | 2017-08-24T16:00:47 | 2017-08-24T16:00:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | from django.db import models
from tinymce.models import HTMLField
# Create your models here.
class TypeInfo(models.Model):
ttitle=models.CharField(max_length=20)
isDelete=models.BooleanField(default=False)
def __str__(self):
return self.ttitle.encode('utf-8')
class GoodsInfo(models.Model):
gtitle=models.CharField(max_length=20)
gpic=models.ImageField(upload_to='goods')
gprice=models.DecimalField(max_digits=5,decimal_places=2)
isDelete=models.BooleanField(default=False)
gunit=models.CharField(max_length=20,default='500g')
gclick=models.IntegerField()
gjianjie=models.CharField(max_length=200)
gkucun=models.IntegerField()
gcontent=HTMLField()
gtype=models.ForeignKey(TypeInfo)
def __str__(self):
return self.gtitle.encode('utf-8')
| [
"cjbd1g@hotmail.com"
] | cjbd1g@hotmail.com |
2b893ab99679701df2665531c4264510e106ab7e | 97b0af66e8bbb86aa0665eab05c1930b64fa707b | /evil with decimal number.py | c67b93a5a60c297fd515a74180564b88a3d8946d | [] | no_license | Smrity01/python_codes | ed835061470d1d025856620ed7617fca79c90c03 | 77a8299ac8d8b01b5562def32eeb67626824bfce | refs/heads/master | 2021-09-13T05:34:40.277221 | 2018-04-25T14:06:51 | 2018-04-25T14:06:51 | 103,026,976 | 7 | 3 | null | 2018-03-08T08:01:33 | 2017-09-10T12:18:09 | Python | UTF-8 | Python | false | false | 1,192 | py | def check_evil(number):
'''
Objective: To check whether the input is evil or not
Input parameter:
number: The input number as list
Return value: 'evil' or 'not evil' string
'''
#approach: if frequency of '1' is even then return 'evil'
# else return 'not evil '
count = frequency(number)
if count % 2 == 0:
return 'evil'
else: return 'not evil'
def frequency(number):
'''
Objective: Calculate the frequency of '1'
Input parameter:
number: The input number as list
Return value:
'''
#approach: Increment the count for all "1's"
count = 0
while (number != 0):
if number % 2 == 1:
count += 1
number = number // 2
return count
def main():
'''
Objective: Take input from user
Input values:
number: The input number as list
Output value: Print whether the input is evil or not
'''
#approach:
number = int(input("enter the number: "))
result = check_evil(number)
print("This number is " , result,".....!!!!")
if (__name__=='__main__'):
main()
| [
"noreply@github.com"
] | noreply@github.com |
0521ef5a14db2218fe8f5cabb17ba95c0b1cb872 | d5e0d9bd803aa88f48df1357f2fc44a7621b152b | /monta_arq240/trata_campos_arq.py | 659c2b59f04d73f113e0f8d544d2627bf0f53947 | [] | no_license | monzanii/Cole | 4b3bd74dc5971e2e8f622890863666b0883d17fc | 2ad7d1c744ecc97368f31324c51250f4f98f64c5 | refs/heads/master | 2020-12-04T17:52:35.594415 | 2020-01-07T00:57:21 | 2020-01-07T00:57:21 | 231,858,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,799 | py |
class Trata_campos_arq:
def __init__(self, dic_conteudo_fixo_campo, dic_tam_real_campo, dic_nome_campo):
self.__dic_conteudo_fixo_campo = dic_conteudo_fixo_campo
self.__dic_tam_real_campo = dic_tam_real_campo
self.__dic_nome_campo = dic_nome_campo
def preenche_conteudo_campos(self, dados_cliente):
linha_head = []
cont = 1
# print( type(self.__dic_nome_campo))
for linha in self.__dic_nome_campo:
if self.__dic_conteudo_fixo_campo[linha] == "dados_clientePC":
#cont += 1
#print(dados_cliente[cont])
dados_cliente[cont] = str(dados_cliente[cont])
tamanho_conteudo = len(dados_cliente[cont])
#print("dentro do if variavel, conteudo variavel = {} , campo varivel = {} ".format(tamanho_conteudo , dados_cliente[cont]))
novo_campo_linha = dados_cliente[cont]
#print(dados_cliente)
cont += 1
else:
tamanho_conteudo = len(self.__dic_conteudo_fixo_campo[linha])
novo_campo_linha = (self.__dic_conteudo_fixo_campo[linha])
# alteracao01 feita 26/05 porque estava montando o headarq errado, nao estava contando o conteudo fixo
#cont += 1
tamanho_real_campo = int(self.__dic_tam_real_campo[linha])
# print("tamanho real= {} , tamanho do conteudo = {}".format(tamanho_real_campo, tamanho_conteudo))
if (tamanho_conteudo == tamanho_real_campo):
# linha_head.append(self.__dic_conteudo_fixo_campo[linha])
linha_head.append(novo_campo_linha)
# print(linha_head)
# elif len(dic_cont_fixo_campo[linha]) < int(dic_tam_real_campo[linha]):
elif (tamanho_conteudo < tamanho_real_campo):
diferenca_de_caracteres = abs(len(novo_campo_linha) - int(self.__dic_tam_real_campo[linha]))
if (type(self.__dic_tam_real_campo[linha]) == str):
novo_campo_linha = novo_campo_linha + (" " * diferenca_de_caracteres)
# print("else string " + linha, self.__dic_tam_real_campo[linha], len(self.__dic_conteudo_fixo_campo[linha]), self.__dic_conteudo_fixo_campo[linha],
# novo_campo_linha)
else:
novo_campo_linha = ("0" * diferenca_de_caracteres) + novo_campo_linha
# print("else numerico " + linha, self.__dic_tam_real_campo[linha], len(self.__dic_conteudo_fixo_campo[linha]), self.__dic_conteudo_fixo_campo[linha],
# novo_campo_linha)
linha_head.append(novo_campo_linha)
# print(linha_head)
return linha_head | [
"monzanii@gmail.com"
] | monzanii@gmail.com |
588590c87831c8b47f4db86aac5f3be992a9b1eb | 4d95050b3321b99196fa6daa3770d2cfac6938eb | /TestCase/suite.py | 218e8a644607f129efc3c5d2977a62bb762ed6fb | [] | no_license | Stone0408/Test1 | 73219a02d7d3798a38bd603de1486aea7c29527d | dca502db8db1bb7c8ce8099cc96af189ea4687bc | refs/heads/master | 2023-03-03T16:57:03.925208 | 2021-02-18T06:53:14 | 2021-02-18T06:53:14 | 339,963,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | #coding=utf-8
import unittest
import HTMLTestRunner
import time
from Common.function import project_path
if __name__ == '__main__':
test_dir =project_path() + 'TestCases'
tests = unittest.defaultTestLoader.discover(test_dir,pattern = 'test*.py',top_level_dir = None)
now = time.strftime('%Y-%m-%d-%H_%m_%S',time.localtime(time.time()))
filepath = project_path() + '/Reports/' + now + '.html'
fp = open(filepath,'wb')
runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title='test_report', description='test_description')
runner.run(tests)
fp.close() | [
"shaojiahua01@163.com"
] | shaojiahua01@163.com |
029ceebf91aa14f84668d02b80b57d0b245d7ffa | 840c35b4ddd9ebedb64a3d1bfd2bb5d79eca246a | /personal_blog/personal_blog/urls.py | aaf032c441ba499ceb3e5ff1635b6c13d5dffb7f | [] | no_license | AAM77/personal_blog_django_only | b632f31e4451ef762c2906b3cc3d4e56ccc68ae3 | 424f8b49ef2d75dd9855b8987a75ebd30501205a | refs/heads/main | 2023-06-23T08:58:34.168563 | 2021-07-24T04:02:43 | 2021-07-24T04:02:43 | 388,972,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | """personal_blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
]
| [
"adeel.a.mhd@gmail.com"
] | adeel.a.mhd@gmail.com |
18eb37c2ffe3434f8bcd511d3c748630d8feec5c | d6458a979207e00da6dc653c278b9bfb818ce18d | /Additional Stuff/Medium Stuff/PythonCrypto/crypto9.py | c0eb73e93ad223663400383fdecbc56cad757bcf | [] | no_license | Hackman9912/05-Python-Programming | 61ce7bb48188b4cd3cd8e585480325fdd02e579b | d03a319c952794b2f298a3ef4ddd09c253e24d36 | refs/heads/master | 2020-08-29T14:28:48.403323 | 2019-12-18T21:30:55 | 2019-12-18T21:30:55 | 218,061,276 | 0 | 0 | null | 2019-10-28T14:07:31 | 2019-10-28T14:07:31 | null | UTF-8 | Python | false | false | 454 | py | alphabets = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
string_input = input("Enter a string: ")
input_length = len(string_input)
string_output = ""
for i in range(input_length):
character = string_input[i]
location_of_character = alphabets.find(character)
new_location = location_of_character + 3;
string_output = string_output + alphabets[new_location]
print("Encrypted text: ", string_output)
# print(string_input)
# print(input_length)
| [
"charleshackett89@gmail.com"
] | charleshackett89@gmail.com |
977bc165e5b5c3d63feed928b4459302dc933bbd | 9bee8acec14f2cffaf28dde4ff11fdba41d032df | /Modul8Python_Kel30/main.py | 0e39c554a1413261f5af31a891fc352c3b458685 | [] | no_license | RifkyHernanda/PRAK_DKP_RIFKYHERNANDA_KELOMPOK30 | 74331e8600852da56c02551cf4e9f8bfdae8c096 | c90815184d2ad89337c0933643a566a418c2893b | refs/heads/main | 2023-05-09T02:00:30.792846 | 2021-05-25T14:39:06 | 2021-05-25T14:39:06 | 354,746,004 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,827 | py | from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from ipconf import CheckIP
Ipconfig = CheckIP("",0,0,0,0,"","","","","")
def submit():
clear()
InputIP = stringnama.get()
InputCIDR = stringCIDR.get()
Ipconfig.setIP(str(InputIP))
Ipconfig.ngecekIP()
indeks3 = Ipconfig.getIndeksOktet3()
if len(str(InputIP))> indeks3+8 or len(str(InputIP))>18:
messagebox("Warning", "IP nya salah")
else:
Oktet1 = Ipconfig.getOktet1()
Oktet2 = Ipconfig.getOktet2()
Oktet3 = Ipconfig.getOktet3()
Oktet4 = Ipconfig.getOktet4()
if int(Oktet1)>0 and int(Oktet1)<=127:
Kel = "A"
HoID = Oktet1
NeID = Oktet2 + "." + Oktet3 + "." + Oktet4
elif int(Oktet1)>127 and int(Oktet1)<=191:
Kel = "B"
HoID = Oktet1 + "." + Oktet2
NeID = Oktet3 + "." + Oktet4
elif int(Oktet1)>=192 and int(Oktet1)<=254:
Kel = "C"
HoID = Oktet1 + "." + Oktet2 + "." + Oktet3
NeID = Oktet4
else:
messagebox("Warning", "IP nya salah")
Slash = 0
Subnet = ""
if stringCIDR.get() != 0 :
Slash = int(stringCIDR.get())
SisaBagi = Slash%8
CIDR = {
0: "0",
1: "128",
2: "192",
3: "224",
4: "240",
5: "248",
6: "252",
7: "254",
}
lastsub = CIDR.setdefault(SisaBagi)
IPbroadcast = ''
IPhost = ''
IPrange = ''
host= 2**(8-SisaBagi)
berapakali = int(Oktet4)//host
if Slash>=8 and Slash<=15:
Subnet = "255."+lastsub+".0.0"
IPhost = Oktet1+'.' + str(host*berapakali)+'.0.0'
IPbroadcast = Oktet1+'.' + str((host*(berapakali+1))-1)+'.255.255'
IPrange = Oktet1+'.' + str((host*berapakali)+1)+'.0.0'+' - ' + Oktet1+'.' + str((host*(berapakali+1))-1)+'.255.254'
elif Slash>=16 and Slash<=23:
Subnet = "255.255."+lastsub+".0"
IPhost = Oktet1+'.'+Oktet2+'.' + str(host*berapakali)+'.0'
IPbroadcast = Oktet1+'.'+Oktet2+'.' + str((host*(berapakali+1))-1)+'.255'
IPrange = Oktet1+'.'+Oktet2+'.' + str((host*berapakali)+1)+'.0'+' - ' + Oktet1+'.'+Oktet2+'.' + str((host*(berapakali+1))-1)+'.254'
elif Slash>=24 and Slash<=31:
Subnet = "255.255.255."+lastsub
IPhost = Oktet1+'.'+Oktet2+'.' +Oktet3+'.'+ str(host*berapakali)
IPbroadcast = Oktet1+'.'+Oktet2+'.'+Oktet3+'.' + str((host*(berapakali+1))-1)
IPrange = Oktet1+'.'+Oktet2+'.'+Oktet3+'.' + str((host*berapakali)+1)+' - ' + Oktet1+'.'+Oktet2+'.'+Oktet3+'.' + str((host*(berapakali+1))-2)
print(IPhost)
print(IPbroadcast)
print(IPrange)
lbIP = Label(top, text = "IP\t:",background="Deep Sky Blue2").place(x = 30,y = 10)
lbKelas = Label(top, text = "Kelas\t: "+Kel,background="Deep Sky Blue2").place(x = 30,y = 40)
lbHost = Label(text = "Host ID\t: "+HoID,background="Deep Sky Blue2").place(x = 30, y=70)
lbNetwork = Label(text = "Net ID\t: "+NeID,background="Deep Sky Blue2").place(x=30, y=100)
lbOktet1 = Label(text = "Oktet1\t: "+Oktet1,background="Deep Sky Blue2").place(x=30, y=130)
lbOktet2 = Label(text = "Oktet2\t: "+Oktet2,background="Deep Sky Blue2").place(x=30, y=160)
lbOktet3 = Label(text = "Oktet3\t: "+Oktet3,background="Deep Sky Blue2").place(x=175, y=130)
lbOktet4 = Label(text = "Oktet4\t: "+Oktet4,background="Deep Sky Blue2").place(x=175, y=160)
lbSlash = Label(text = "Subnet\t: "+Subnet,background="Deep Sky Blue2").place(x=30, y=190)
lbIPhost = Label(text = "IP Host\t\t: "+IPhost,background="Deep Sky Blue2").place(x=30, y=220)
lbIPBroadcast = Label(text = "IP Broadcast\t: "+IPbroadcast,background="Deep Sky Blue2").place(x=30, y=250)
lbIPRange = Label(text = "IP Range\t\t: "+IPrange,background="Deep Sky Blue2").place(x=30, y=280)
def clear():
lbIP = Label(top, text = "IP\t:",background="Deep Sky Blue2").place(x = 30,y = 10)
lbKelas = Label(top, text = "Kelas\t: ",background="Deep Sky Blue2").place(x = 30,y = 40)
lbHost = Label(text = "Host ID\t: ",background="Deep Sky Blue2").place(x = 30, y=70)
lbNetwork = Label(text = "Net ID\t: ",background="Deep Sky Blue2").place(x=30, y=100)
lbOktet1 = Label(text = "Oktet1\t: ",background="Deep Sky Blue2").place(x=30, y=130)
lbOktet2 = Label(text = "Oktet2\t: ",background="Deep Sky Blue2").place(x=30, y=160)
lbOktet3 = Label(text = "Oktet3\t: ",background="Deep Sky Blue2").place(x=175, y=130)
lbOktet4 = Label(text = "Oktet4\t: ",background="Deep Sky Blue2").place(x=175, y=160)
lbSlash = Label(text = "Subnet\t: ",background="Deep Sky Blue2").place(x=30, y=190)
lbIPhost = Label(text = "IP Host\t\t: ",background="Deep Sky Blue2").place(x=30, y=220)
lbIPBroadcast = Label(text = "IP Broadcast\t: ",background="Deep Sky Blue2").place(x=30, y=250)
lbIPRange = Label(text = "IP Range\t\t: ",background="Deep Sky Blue2").place(x=30, y=280)
Ipconfig.setOktet(0)
top = Tk()
top.geometry("350x450")
top.configure(background='Deep Sky Blue2')
top.title("Kalkulator IP")
#creating label
lbIP = Label(top, text = "IP\t:",background="Deep Sky Blue2").place(x = 30,y = 10)
lbCIDR = Label(top, text = '/', background="Deep Sky Blue2").place(x= 215, y = 10)
lbKelas = Label(top, text = "Kelas\t:",background="Deep Sky Blue2").place(x = 30,y = 40)
lbHost = Label(text = "Host ID\t:",background="Deep Sky Blue2").place(x = 30, y=70)
lbNetwork = Label(text = "Net ID\t:",background="Deep Sky Blue2").place(x=30, y=100)
lbOktet1 = Label(text = "Oktet1\t:",background="Deep Sky Blue2").place(x=30, y=130)
lbOktet2 = Label(text = "Oktet2\t:",background="Deep Sky Blue2").place(x=30, y=160)
lbOktet3 = Label(text = "Oktet3\t:",background="Deep Sky Blue2").place(x=175, y=130)
lbOktet4 = Label(text = "Oktet4\t:",background="Deep Sky Blue2").place(x=175, y=160)
lbSlash = Label(text = "Subnet\t: ",background="Deep Sky Blue2").place(x=30, y=190)
lbIPhost = Label(text = "IP Host\t\t: ",background="Deep Sky Blue2").place(x=30, y=220)
lbIPBroadcast = Label(text = "IP Broadcast\t: ",background="Deep Sky Blue2").place(x=30, y=250)
lbIPRange = Label(text = "IP Range\t\t: ",background="Deep Sky Blue2").place(x=30, y=280)
#create input
stringnama = StringVar()
inama = Entry(top,width = 20, textvariable=stringnama, ).place(x = 90, y = 10)
stringCIDR = StringVar(value='24')
Cb1 = ttk.Combobox(top,width=5, textvariable=stringCIDR,state="readonly")
Cb1.place(x=225, y = 10)
Cb1['values']=('8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31')
#iCIDR = Entry(top,width = 5, textvariable=stringCIDR, ).place(x = 225, y = 10)
#create button
btn1 = Button(top, command = submit, text="SUBMIT").place(x=100,y=340)
btn2 = Button(top, command = clear, text="CLEAR").place(x=170,y=340)
top.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
059ea9617b1374463ba40351351352a6757e41e7 | 2907402a9b675f4b7f3cababaddfba9604c84141 | /topic_recognition.py | b59ce77c41e0824e554552b261c20d2eded0ddf1 | [
"MIT"
] | permissive | dushesms/Writing-Aid | c9a6476d8860c7e892f1e14c8d6a95931a2da4fa | 45abc68760546d5f1db7f08b47d2f6795a381607 | refs/heads/main | 2023-08-05T10:58:28.624084 | 2021-09-28T09:14:19 | 2021-09-28T09:14:19 | 386,705,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | """
Tfidf model for topics recognition.
There are 15 topics chosen on EAQUALs standards for CEFR levels.
Dataset was created manually.
"""
import pickle
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
from sklearn.multiclass import OneVsRestClassifier
topic_classifier: OneVsRestClassifier = pickle.load(open("topic_recognition_model.pkl", "rb"))
vocab = pickle.load(open("topics_vocabulary.pkl", "rb"))
def predict_topic(text):
try:
REPLACE_BY_SPACE_RE = re.compile( '[/(){}\[\]\|@,;]' )
BAD_SYMBOLS_RE = re.compile( '[^0-9a-z #+_]' )
STOPWORDS = set( stopwords.words( 'english' ) )
text = text.lower() # lowercase text
text = text.replace( "\n", " " )
text = REPLACE_BY_SPACE_RE.sub( ' ', text ) # replace REPLACE_BY_SPACE_RE symbols by space in text
text = BAD_SYMBOLS_RE.sub( '', text ) # delete symbols which are in BAD_SYMBOLS_RE from text
text = [word for word in text.split() if word not in STOPWORDS] # delete stopwors from text
tfidf_vectorizer = TfidfVectorizer(token_pattern='(\S+)', vocabulary=vocab)
r = tfidf_vectorizer.fit_transform( text )
prediction = topic_classifier.predict( r )
mlb = pickle.load(open("binarizer.pkl", "rb"))
prediction = mlb.inverse_transform( prediction )
prediction = list( set( [i[0] for i in prediction if len( i ) > 0] ) )[:3]
if len(prediction) == 0:
message = 'It is hard to define your topic'
if len(prediction) != 0:
return ', '.join( prediction )
return message
except Exception:
pass | [
"59807457+dushesms@users.noreply.github.com"
] | 59807457+dushesms@users.noreply.github.com |
b5631a6b34a3cc98324f7486850ba9eb57bafb8b | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_24037.py | 9b3850dd7618c3e9e86acac4668b6393892f6468 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | # Multi-pattern regex in python
<a\s+(?=[^>]*?href="((?:(?!css).)*?)")[^>]*?>(?=[^<]*?(?:support|help))
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
bf9137475e7f0175177e48d16cb80cf89f4c4f5e | 990da93e54784b367f3b435a489a18cc3e8a0af0 | /trees/binary_search_tree_insertion.py | 617670480b739986701ff09eabf5b1d7ad8d8b5e | [] | no_license | tjcdev/leetcode | dcb15614b17afb015c5c9c9e2728f3ace2fdc022 | 441ccff35f0dbd2b7b84cfdf00f3ffd8ef0cb18c | refs/heads/master | 2023-04-21T14:12:55.015507 | 2021-04-20T19:30:14 | 2021-04-20T19:30:14 | 332,500,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | class Node:
def __init__(self, value):
self.left = None
self.right = None
self.data = value
def insert(root, node):
if(root is None):
root = node
return
if(root.data < node.data):
if(root.right is None):
root.right = node
else:
insert(root.right, node)
else:
if(root.left is None):
root.left = node
else:
insert(root.left, node)
def preorder(node):
if(node is not None):
print(node.data)
preorder(node.left)
preorder(node.right)
# 5
# / \
# 3 7
# / \ / \
# 2 4 6 8
tree = Node(5)
# 5
# / \
# None None
insert(tree, Node(3))
# 5
# / \
# 3 None
insert(tree, Node(2))
# 5
# / \
# 3 None
# /
# 2
insert(tree, Node(4))
# 5
# / \
# 3 None
# / \
# 2 4
insert(tree, Node(7))
# 5
# / \
# 3 7
# / \
# 2 4
insert(tree, Node(6))
# 5
# / \
# 3 7
# / \ /
# 2 4 6
insert(tree, Node(8))
# 5
# / \
# 3 7
# / \ / \
# 2 4 6 8
# 5 3 2 4 7 6 8
preorder(tree) | [
"tjcartwright91@gmail.com"
] | tjcartwright91@gmail.com |
d22dce48ca2cb6a0918d2c520ce8921c1c480199 | 0c8c798e10696496a276632bf506e5a28a5a7a65 | /Pizza.py | 15378c6f6f8c1f28899fa867f22d7ed973ad5de5 | [] | no_license | Sale3054/CSCI_4448-Browserator | 540db669ec9563bafdd430a1bc3341b140b892ae | 0e74539beb2c58ca3e73ccf28cfe383f9c8075e2 | refs/heads/master | 2020-03-29T10:06:13.846685 | 2018-11-29T17:58:28 | 2018-11-29T17:58:28 | 149,789,426 | 0 | 0 | null | 2018-11-29T10:55:16 | 2018-09-21T16:26:18 | Python | UTF-8 | Python | false | false | 3,199 | py | #
# Base Pizza construction
#
import abc
class Pizza:
"""
Overarching class structure for each Pizza object. Contains the abstract method for prepare-
to be filled out by a factory.
"""
# metaclass for @abstractmethod
__metaclass__ = abc.ABCMeta
def __init__(self, name = None, dough = None, sauce = None,
veggies = [], cheese = None, meats = [], price = 5):
"""
Initializes all the values and ingredients to their appropriate
empty formats. Default values allow for this function to be called, lest there is a specific
value for each of the variables that needs to be set.
"""
self.name = name
self.dough = dough
self.sauce = sauce
self.veggies = veggies
self.cheese = cheese
self.meats = meats
self.price = price
@abc.abstractmethod
def prepare(self):
"""
abstract method to be filled in by the children classes
"""
pass
def calculate_price(self):
"""
creates a list of each of the attributes belonging to Pizza, and sums the price values of
all of the objects. This is accomplished by concatenating whole lists alongside the singular
variables.
:param total_price: total price of the pizza including ingrediants
:param attr_list: all of the related objects
"""
total_price = self.price
attr_list = [self.dough, self.sauce, self.cheese]
attr_list= attr_list + self.meats
attr_list = attr_list + self.veggies
'''
This code is currently broken...yay
vars() returns a dictionary of key/value pairs of the
name of the attribute and its value. So, we strip off the keys, leaving only
the values(). Then, we case the Dictionary (that has empty key values) into
a list, so that we can remove the items from the list that do not
have a price attribute- e.g. any of the attributes that are not
ingredients.
'''
'''
total_price = 0
print("\nBEGIN ATTR \n")
attr_list = list(vars(self).values())
print(attr_list)
for i in attr_list:
#check if it's an ingredient (e.g. it has a price)
if not hasattr(i, 'price') or i == None or i == []:
attr_list.remove(i)
#check if this is a list, if so, concatenate to the
#overaching list, so we don't have to worry about indexing
if type(i) == list:
temp_list = i
attr_list.remove(temp_list)
attr_list += temp_list
print("Filtered List: \n")
print(attr_list)
for i in attr_list:
total_price += i.price
print(i)
'''
for i in attr_list:
if i.price is not None:
total_price = total_price + i.price
return total_price
def bake(self): print("Cooking your {}! Just 25 more minutes!".format(self.name))
def cut(self): print('A professional is cutting the {} now!'.format(self.name))
def box(self): print("We're placing your {} in a secure transportation mechanism!".format(self.name))
def __str__(self):
final_list = [self.dough, self.sauce, self.cheese] + self.veggies + self.meats
final_pizza = self.name + ', which consists of: \n'
for i in final_list:
final_pizza += str(i)
if i != final_list[-1]:
final_pizza += ", "
else:
final_pizza += '.\n'
return final_pizza
| [
"noreply@github.com"
] | noreply@github.com |
55ab9e48e4d12c30704c44e0ffd29d87969367d3 | 2812040977f41c4e33a5b17ffde4ff1973be8fab | /classifier.py | 75cc478155385ad0e0ee12070f5ad146e0b0d6c3 | [] | no_license | tuka04/mc906 | 2f72103c8c0dbacb0e89988b8ca564d8f5aefdc3 | 9f0fd0524ebbb59443c6c283b3cfa7505232d074 | refs/heads/master | 2020-05-15T14:18:58.491912 | 2013-07-01T20:46:14 | 2013-07-01T20:46:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | #!/usr/bin/env python
# encoding: utf-8
# classificacao das mensagens utilizando svm (support vector machines)
from sklearn import svm,metrics #suport vector machine
from sklearn.feature_extraction.text import HashingVectorizer
from numpy import *
class Classifier:
def __init__(self,d,tr,te):
self.training = tr
self.test = te
self.data = d
self.prediction = list([])
def perform(self):
s = svm.SVC()#support vector classifier
s.fit(self.data,self.training)
self.prediction = s.predict(self.data)
pred = vstack(self.prediction)
pred_h = set(tuple(r) for r in pred)
print "Matriz de confusao"
mc = metrics.confusion_matrix(self.test, pred_h)
print mc
desc = str(s).split('(')[0]
| [
"tuka04@gmail.com"
] | tuka04@gmail.com |
c0ec840cf9f280f37d96032bbc82705abbc4bed9 | f35a28ecb2e212ae5861d4bff68012c014debd2e | /vendor/cache/ruby/1.9.1/gems/nokogiri-1.6.0/ext/nokogiri/tmp/i686-linux-gnu/ports/libxml2/2.8.0/libxml2-2.8.0/python/setup.py | 9ce0d8254baef399d10e386c52d07fffa3880e1d | [
"MIT",
"LicenseRef-scancode-x11-xconsortium-veillard"
] | permissive | klaus33/web_app | 11d882e753bf7cef85c6f95310a39ec6493abd33 | 2a219e6c53881ecc429aaa156ff53eb89c4d120b | refs/heads/master | 2021-01-01T18:29:47.229985 | 2013-07-22T15:31:21 | 2013-07-22T15:31:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,798 | py | #!/usr/bin/python -u
#
# Setup script for libxml2 and libxslt if found
#
import sys, os
from distutils.core import setup, Extension
# Below ROOT, we expect to find include, include/libxml2, lib and bin.
# On *nix, it is not needed (but should not harm),
# on Windows, it is set by configure.js.
ROOT = r'/home/jesus/Documents/WebProject/web_project/vendor/cache/ruby/1.9.1/gems/nokogiri-1.6.0/ports/i686-linux-gnu/libxml2/2.8.0'
# Thread-enabled libxml2
with_threads = 1
# If this flag is set (windows only),
# a private copy of the dlls are included in the package.
# If this flag is not set, the libxml2 and libxslt
# dlls must be found somewhere in the PATH at runtime.
WITHDLLS = 1 and sys.platform.startswith('win')
def missing(file):
if os.access(file, os.R_OK) == 0:
return 1
return 0
try:
HOME = os.environ['HOME']
except:
HOME="C:"
if WITHDLLS:
# libxml dlls (expected in ROOT/bin)
dlls = [ 'iconv.dll','libxml2.dll','libxslt.dll','libexslt.dll' ]
dlls = map(lambda dll: os.path.join(ROOT,'bin',dll),dlls)
# create __init__.py for the libxmlmods package
if not os.path.exists("libxmlmods"):
os.mkdir("libxmlmods")
open("libxmlmods/__init__.py","w").close()
def altImport(s):
s = s.replace("import libxml2mod","from libxmlmods import libxml2mod")
s = s.replace("import libxsltmod","from libxmlmods import libxsltmod")
return s
if sys.platform.startswith('win'):
libraryPrefix = 'lib'
platformLibs = []
else:
libraryPrefix = ''
platformLibs = ["m","z"]
# those are examined to find
# - libxml2/libxml/tree.h
# - iconv.h
# - libxslt/xsltconfig.h
includes_dir = [
"/usr/include",
"/usr/local/include",
"/opt/include",
os.path.join(ROOT,'include'),
HOME
];
xml_includes=""
for dir in includes_dir:
if not missing(dir + "/libxml2/libxml/tree.h"):
xml_includes=dir + "/libxml2"
break;
if xml_includes == "":
print "failed to find headers for libxml2: update includes_dir"
sys.exit(1)
iconv_includes=""
for dir in includes_dir:
if not missing(dir + "/iconv.h"):
iconv_includes=dir
break;
if iconv_includes == "":
print "failed to find headers for libiconv: update includes_dir"
sys.exit(1)
# those are added in the linker search path for libraries
libdirs = [
os.path.join(ROOT,'lib'),
]
xml_files = ["libxml2-api.xml", "libxml2-python-api.xml",
"libxml.c", "libxml.py", "libxml_wrap.h", "types.c",
"xmlgenerator.py", "README", "TODO", "drv_libxml2.py"]
xslt_files = ["libxslt-api.xml", "libxslt-python-api.xml",
"libxslt.c", "libxsl.py", "libxslt_wrap.h",
"xsltgenerator.py"]
if missing("libxml2-py.c") or missing("libxml2.py"):
try:
try:
import xmlgenerator
except:
import generator
except:
print "failed to find and generate stubs for libxml2, aborting ..."
print sys.exc_type, sys.exc_value
sys.exit(1)
head = open("libxml.py", "r")
generated = open("libxml2class.py", "r")
result = open("libxml2.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=0
if missing("libxslt-py.c") or missing("libxslt.py"):
if missing("xsltgenerator.py") or missing("libxslt-api.xml"):
print "libxslt stub generator not found, libxslt not built"
else:
try:
import xsltgenerator
except:
print "failed to generate stubs for libxslt, aborting ..."
print sys.exc_type, sys.exc_value
else:
head = open("libxsl.py", "r")
generated = open("libxsltclass.py", "r")
result = open("libxslt.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=1
else:
with_xslt=1
if with_xslt == 1:
xslt_includes=""
for dir in includes_dir:
if not missing(dir + "/libxslt/xsltconfig.h"):
xslt_includes=dir + "/libxslt"
break;
if xslt_includes == "":
print "failed to find headers for libxslt: update includes_dir"
with_xslt = 0
descr = "libxml2 package"
modules = [ 'libxml2', 'drv_libxml2' ]
if WITHDLLS:
modules.append('libxmlmods.__init__')
c_files = ['libxml2-py.c', 'libxml.c', 'types.c' ]
includes= [xml_includes, iconv_includes]
libs = [libraryPrefix + "xml2"] + platformLibs
macros = []
if with_threads:
macros.append(('_REENTRANT','1'))
if with_xslt == 1:
descr = "libxml2 and libxslt package"
if not sys.platform.startswith('win'):
#
# We are gonna build 2 identical shared libs with merge initializing
# both libxml2mod and libxsltmod
#
c_files = c_files + ['libxslt-py.c', 'libxslt.c']
xslt_c_files = c_files
macros.append(('MERGED_MODULES', '1'))
else:
#
# On windows the MERGED_MODULE option is not needed
# (and does not work)
#
xslt_c_files = ['libxslt-py.c', 'libxslt.c', 'types.c']
libs.insert(0, libraryPrefix + 'exslt')
libs.insert(0, libraryPrefix + 'xslt')
includes.append(xslt_includes)
modules.append('libxslt')
extens=[Extension('libxml2mod', c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros)]
if with_xslt == 1:
extens.append(Extension('libxsltmod', xslt_c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros))
if missing("MANIFEST"):
manifest = open("MANIFEST", "w")
manifest.write("setup.py\n")
for file in xml_files:
manifest.write(file + "\n")
if with_xslt == 1:
for file in xslt_files:
manifest.write(file + "\n")
manifest.close()
if WITHDLLS:
ext_package = "libxmlmods"
if sys.version >= "2.2":
base = "lib/site-packages/"
else:
base = ""
data_files = [(base+"libxmlmods",dlls)]
else:
ext_package = None
data_files = []
setup (name = "libxml2-python",
# On *nix, the version number is created from setup.py.in
# On windows, it is set by configure.js
version = "2.8.0",
description = descr,
author = "Daniel Veillard",
author_email = "veillard@redhat.com",
url = "http://xmlsoft.org/python.html",
licence="MIT Licence",
py_modules=modules,
ext_modules=extens,
ext_package=ext_package,
data_files=data_files,
)
sys.exit(0)
| [
"baiko33@hotmail.com"
] | baiko33@hotmail.com |
1a76c6c4e51d903d2d0e7a39446a88c9235f1f34 | fa31562bf68db2ad03ab56e58cb3fdf987b08759 | /Ciancoders_prueba/Ciancoders/api/models.py | 3a3f2bb328ceb1353749a0e65a57bb00b0d88e0c | [] | no_license | 17004963/cian_coders_prueba | 6dc29b442b4ea5221137f5ee0d3607f73c6c9f61 | 94b7b6a64145f808da8c44dbac90e83845608753 | refs/heads/main | 2023-02-28T05:44:13.109190 | 2021-02-06T20:21:59 | 2021-02-06T20:21:59 | 335,724,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,546 | py | from django.db import models
import string
import random
def generate_unique_code():
length = 6
while True:
code = ''.join(random.choices(string.ascii_uppercase, k=length))
if Room.objects.filter(code=code).count() == 0:
break
return code
# Create your models here.
class Room(models.Model):
code = models.CharField(max_length=8, default="", unique=True)
host = models.CharField(max_length=50, unique=True)
guest_can_pause = models.BooleanField(null=False, default=False)
votes_to_skip = models.IntegerField(null=False, default=1)
created_at = models.DateTimeField(auto_now_add=True)
class Cliente(models.Model):
correo=models.CharField(max_length=35, unique=True, primary_key=True)
contrasena=models.CharField(max_length=15)
Nombre=models.CharField(max_length=60)
tipo=models.CharField(max_length=10, default="1")
class Producto(models.Model):
Id_producto=models.CharField(max_length=40)
Nombre=models.CharField(max_length=40)
Vendedor=models.CharField(max_length=35)
Cantidad=models.IntegerField()
Descripcion=models.TextField(max_length=350)
Precio=models.FloatField(default="1.00")
class Ventas(models.Model):
Id_producto=models.CharField(max_length=40)
Nombre=models.CharField(max_length=40)
Vendedor=models.CharField(max_length=35)
Comprador=models.CharField(max_length=40)
Cantidad=models.IntegerField()
Precio=models.FloatField(default="1.00")
Fecha=models.DateTimeField(auto_now_add=True)
| [
"17004963@gaileo.edu"
] | 17004963@gaileo.edu |
0cf3e8b1f4dccdb26bc504fc2a1e1bb08588490d | 494623968b1ea115ae61fca4e37fee763a5b64ac | /Python_Flask_Web_App/utils.py | 9072c5483ccb2f2acc08ee27b30ae600aeb51f42 | [] | no_license | ClayMav/Contemporary-Programming-Languages | 05f90c191aac2a3711df67414b895a3efdbe0ef2 | df36a032e3d113be99c0bda0ad16eef7b3e930ae | refs/heads/master | 2021-05-06T20:05:24.279247 | 2018-02-19T18:31:35 | 2018-02-19T18:31:35 | 112,271,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,096 | py | """Utility functions for searchinator."""
import datetime
import functools
import json
from flask import flash, redirect, render_template, session, url_for
from condition import Condition
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
"""Expected string format for :class:`datetime.datetime`."""
def load_time(x):
"""Load a :class:`datetime.datetime` from :class:`str`."""
return datetime.datetime.strptime(x, TIME_FORMAT)
def dump_time(x):
"""Dump a :class:`datetime.datetime` to :class:`str`."""
return x.strftime(TIME_FORMAT)
def as_inator(dct):
"""Attempt to construct values of an inator with appropriate types."""
keyset = {'ident', 'name', 'location', 'description', 'condition', 'added'}
if set(dct.keys()) == keyset:
try:
new_dct = dct.copy()
new_dct['added'] = load_time(new_dct['added'])
new_dct['condition'] = Condition(int(new_dct['condition']))
return new_dct
except ValueError:
return dct
else:
return dct
def from_datetime(obj):
"""Convert :class:`datetime.datetime` objects to string."""
if isinstance(obj, datetime.datetime):
return dump_time(obj)
raise TypeError("{} is not JSON serializable".format(repr(obj)))
def add_data_param(path):
"""Wrap a function to facilitate data storage."""
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
with open(path, 'r') as f:
datum = json.loads(f.read(), object_hook=as_inator)
except (OSError, json.decoder.JSONDecodeError):
datum = {}
retval = func(datum, *args, **kwargs)
with open(path, 'w') as f:
f.write(json.dumps(datum, default=from_datetime))
return retval
return wrapped
return wrapper
def uses_template(template):
"""Wrap a function to add HTML template rendering functionality."""
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
template_path = template
ctx = func(*args, **kwargs)
if type(ctx) is dict:
try:
return render_template(template_path,
inators=ctx['inators'])
except KeyError:
try:
return render_template(template_path,
inator=ctx['inator'])
except KeyError:
return render_template(template_path, inators=ctx)
else:
return ctx
return wrapped
return wrapper
def login_required(func):
"""Wrap a function to enforce user authentication."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if 'username' in session:
return func(*args, **kwargs)
else:
flash("You must be logged in to access that page.", 'danger')
return redirect(url_for('login'))
return wrapper
| [
"mcginnis@claymav.com"
] | mcginnis@claymav.com |
a9700496be274223e96b75a2761f3fa8678de50c | 1767072029090e31f1adb88435d7da9a219d7b5c | /PyNTC/pyntc2.py | cf1c88363d43ce0e24f79c5538c4afa9406d16fb | [] | no_license | jafo2128/network-automation-book | a2d44e638ed1aeb2fddcb801450bcfe444c067a9 | bd0738f8c6e46a9d2120dee73fa98e09449ec9d4 | refs/heads/master | 2022-03-03T19:16:22.421438 | 2018-04-11T04:15:41 | 2018-04-11T04:15:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from pyntc import ntc_device
import json
ios = ntc_device(host="192.168.99.1",
username="user",
password="user123",
device_type="cisco_ios_ssh")
ios.open()
ios_int = ios.show("show run int e0/0")
print ios_int
| [
"noreply@github.com"
] | noreply@github.com |
4957c2dfdea4282fdac9acb9014529e1c0fcf72c | d48404bd7ec460f3cfaf4a72876733f2071f35cf | /CodeChef/EVENPSUM.py | b528ca9ebe9e926917770038beacac090cbc7bc2 | [] | no_license | barvaliyavishal/DataStructure | 1c015ff6df1aeb08955dd8dd3a989ced8e08d634 | 33f2bb9cb50c15cbb3a018a0b3d20ec9d99c680e | refs/heads/main | 2023-04-02T11:33:54.272237 | 2021-04-07T03:57:54 | 2021-04-07T03:57:54 | 313,028,546 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | def pairs(a,b):
if a==1 or b==1:
return a*b
if a%2 == 0:
a1=a/2
a2=a/2
else:
a1 = int(a//2)+1
a2 = a//2
if b%2 == 0:
b1=int(b/2)
b2=int(b/2)
else:
b1 = int((b//2)+1)
b2 = int(b//2)
return int((a1*b1)+(a2*b2))
t = int(input())
while t > 0:
t -= 1;
n = input()
fields = n.split(" ")
a = int(fields[0])
b = int(fields[1])
res = pairs(a,b)
print(res)
| [
"vishalbarvaliya112@gmail.com"
] | vishalbarvaliya112@gmail.com |
6d81457f59277f24752cda040437e62c73dc9ceb | 0a186a6a772a7ba62c28e2cc87c9bfb91075c851 | /노래정리_파일이름바꾸기_linux.py | adc5f6572b847f9d3fdad0f295e9938aab7507d3 | [] | no_license | dustjrdk/- | 80fb75990e43d4db3dfa6e234fe5435e32c1a0c9 | 934a39341f6a0b340cc3ac52bba95b8032bfeeb4 | refs/heads/master | 2021-01-22T04:18:07.483951 | 2017-02-10T05:31:59 | 2017-02-10T05:31:59 | 81,530,015 | 0 | 0 | null | null | null | null | UHC | Python | false | false | 348 | py |
import os
ddir = "\\192.168.0.2\\yeon\\HardDisk\\노래\\마이클잭슨"
old_name = "_"
new_name = ' '
os.chdir(ddir)
list_screen = os.listdir(ddir)
print(list_screen)
for file in list_screen:
new_file=file.replace(old_name,new_name)
print(new_file)
os.renames(file,new_file)
#print(list_screen)
| [
"noreply@github.com"
] | noreply@github.com |
03722a6aee3545b21f5bc9399bbd2f59dbe8837f | 8e5d01b7aadd865acf81d6f0a197a9c5fe7a36a6 | /main.py | 2cf75402fc30637367d97bae2d32669e66bc2d09 | [] | no_license | Renatkg20/GDP_of_KR | 16448ecc3b469a0dfb04ea02d93fac172b556efa | 0a3f0be4864fd782c10722dfe9d0ff1105dffd94 | refs/heads/master | 2023-04-15T09:42:25.784827 | 2021-04-26T09:05:38 | 2021-04-26T09:05:38 | 361,685,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py |
import requests
import lxml
from bs4 import BeautifulSoup
import pandas as pd
import matplotlib.pyplot as plt
l1 = []
l2 = []
url = 'http://www.stat.kg/ru/opendata/category/2315/xml'
res = requests.get(url)
soup = BeautifulSoup(res.text,'lxml-xml')
titles = soup.find('data').find_all('value')
titles1 = soup.find('data').find_all('key')
for title1 in titles1:
l1.append(int(title1.get_text()))
for title in titles:
l2.append(float(title.get_text()))
df = pd.DataFrame({"Year" : l1, 'GDP in USD' : l2})
print(df)
df.plot.hist()
| [
""
] | |
a5b1c8c29ebdcc6e81b78b01ef9623b07134fa7a | 17aa3e0f6bf81271b1e36f53e10070ba834edb3c | /AIZU_ONLINE_JUDGE/python/code/alds1_10_a_2.py | 392d460620328b7bffdf1194b30946fa1e36000c | [] | no_license | 9ryuuuuu/hobby | dd44590cf30dff341e62d293a48567e404fc5c5f | 7aad7eb1546a7f6d71e6ba9229fd412fb53d3e4e | refs/heads/master | 2020-04-27T23:36:51.168087 | 2019-04-02T08:27:29 | 2019-04-02T08:27:29 | 174,782,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | n = int(input())
MAX = 45
table = [0] * MAX
for i in range(MAX):
# print(n)
if i == 0 or i == 1:
table[i] = 1
else:
table[i] = table[i - 1] + table[i - 2]
print(table[n]) | [
"ruiakirab@gmail.com"
] | ruiakirab@gmail.com |
29441f3f228659d5914fc9740e748282cce34a6f | fc257bc24ec0a524fd10f4967adc31ec21f2effa | /Building/InitialTest.py | 22712f427637eca753731c97c4971b55439aeb48 | [] | no_license | connor-makowski/AnagramSolver | 0c014193b91e1850d63928f7371eaf43a54685b5 | 8081287d74da6aaadbfcf5d17c348bb247ca1ee0 | refs/heads/master | 2020-03-29T00:34:47.026615 | 2018-09-18T20:32:40 | 2018-09-18T20:32:40 | 149,346,184 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | dictionary={'a':["apple","atom"], 'b':["banana", "bat"], 'e':[], 'l':[], 'm':[], 'o':[], 'p':[], 't':[]}
def find(letters):
consider=[]
found=[]
for i in list(set(letters)):
for j in dictionary[i]:
consider.append(j)
letters.append("")
for i in consider:
ileft=i
for j in letters:
if len(ileft)==0:
found.append(i)
break
if j in i:
ileft=ileft.replace(j,"",1)
return found
x=find(["a","t","o","m"])
print (x)
| [
"connorman528@gmail.com"
] | connorman528@gmail.com |
ce63955b9b3d6f780694b413010a500e6dac2b1f | ed3435b41e9274dfed3505623e1e6cbc31b3f257 | /chatProgram.py | 696d0e5f4516f1a2ec41786fdb6cb64268117b8c | [
"BSD-3-Clause"
] | permissive | MohamedSeliem/Peer-to-Peer-Chatting-Program | c9d2b58e9df5ea571aa1cb9027c77cf92c7a374d | e3549905832963e0df5a144dc41a980e6f17197a | refs/heads/master | 2021-07-06T01:20:05.248103 | 2017-09-30T16:09:05 | 2017-09-30T16:09:05 | 105,381,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,598 | py | """
/*
* Copyright (c) 2017, University of Louisiana at Lafayete
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the Institute nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
/*
* # Peer-to-Peer-Chatting-Program
* \author Mohamed Seliem <mohamed.seliem1@louisiana.edu>
*/
"""
from tkinter import *
from tkinter.ttk import *
import socket
import _thread
class ChatClient(Frame):
def __init__(self, root):
Frame.__init__(self, root)
self.root = root
self.initUI()
self.serverSoc = None
self.serverStatus = 0
self.buffsize = 1024
self.allClients = {}
self.counter = 0
def initUI(self):
self.root.title("Peer-to-Peer Chatting Program")
ScreenSizeX = self.root.winfo_screenwidth()
ScreenSizeY = self.root.winfo_screenheight()
self.FrameSizeX = 800
self.FrameSizeY = 600
FramePosX = (ScreenSizeX - self.FrameSizeX)/2
FramePosY = (ScreenSizeY - self.FrameSizeY)/2
self.root.geometry("%dx%d+%d+%d" % (self.FrameSizeX,self.FrameSizeY,FramePosX,FramePosY))
self.root.resizable(width=False, height=False)
padX = 10
padY = 10
parentFrame = Frame(self.root)
parentFrame.grid(padx=padX, pady=padY, stick=E+W+N+S)
ipGroup = Frame(parentFrame)
serverLabel = Label(ipGroup, text="Set: ")
self.nameVar = StringVar()
self.nameVar.set("SDH")
nameField = Entry(ipGroup, width=10, textvariable=self.nameVar)
self.serverIPVar = StringVar()
self.serverIPVar.set("127.0.0.1")
serverIPField = Entry(ipGroup, width=15, textvariable=self.serverIPVar)
self.serverPortVar = StringVar()
self.serverPortVar.set("12345")
serverPortField = Entry(ipGroup, width=5, textvariable=self.serverPortVar)
serverSetButton = Button(ipGroup, text="Set", width=10, command=self.handleSetServer)
addClientLabel = Label(ipGroup, text="Add friend: ")
self.clientIPVar = StringVar()
self.clientIPVar.set("127.0.0.1")
clientIPField = Entry(ipGroup, width=15, textvariable=self.clientIPVar)
self.clientPortVar = StringVar()
self.clientPortVar.set("12345")
clientPortField = Entry(ipGroup, width=5, textvariable=self.clientPortVar)
clientSetButton = Button(ipGroup, text="Add", width=10, command=self.handleAddClient)
serverLabel.grid(row=0, column=0)
nameField.grid(row=0, column=1)
serverIPField.grid(row=0, column=2)
serverPortField.grid(row=0, column=3)
serverSetButton.grid(row=0, column=4, padx=5)
addClientLabel.grid(row=0, column=5)
clientIPField.grid(row=0, column=6)
clientPortField.grid(row=0, column=7)
clientSetButton.grid(row=0, column=8, padx=5)
readChatGroup = Frame(parentFrame)
self.receivedChats = Text(readChatGroup, bg="white", width=60, height=25, state=DISABLED)
self.friends = Listbox(readChatGroup, bg="white", width=30, height=25)
self.receivedChats.grid(row=0, column=0, sticky=W+N+S, padx = (0,10))
self.friends.grid(row=0, column=1, sticky=E+N+S)
writeChatGroup = Frame(parentFrame)
self.chatVar = StringVar()
self.chatField = Entry(writeChatGroup, width=80, textvariable=self.chatVar)
sendChatButton = Button(writeChatGroup, text="Send", width=10, command=self.handleSendChat)
self.chatField.grid(row=0, column=0, sticky=W)
sendChatButton.grid(row=0, column=1, padx=5)
self.statusLabel = Label(parentFrame)
bottomLabel = Label(parentFrame, text="This code is part of assignment deliverable to CSCE-513 \nPrinciples of computer networks and communication\nDr. Khalid El gazzar, CACS,University of Louisiana at Lafayette")
ipGroup.grid(row=0, column=0)
readChatGroup.grid(row=1, column=0)
writeChatGroup.grid(row=2, column=0, pady=10)
self.statusLabel.grid(row=3, column=0)
bottomLabel.grid(row=4, column=0, pady=10)
def handleSetServer(self):
if self.serverSoc != None:
self.serverSoc.close()
self.serverSoc = None
self.serverStatus = 0
serveraddr = (self.serverIPVar.get().replace(' ',''), int(self.serverPortVar.get().replace(' ','')))
try:
self.serverSoc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serverSoc.bind(serveraddr)
self.serverSoc.listen(5)
self.setStatus("Server listening on %s:%s" % serveraddr)
_thread.start_new_thread(self.listenClients,())
self.serverStatus = 1
self.name = self.nameVar.get().replace(' ','')
if self.name == '':
self.name = "%s:%s" % serveraddr
except:
self.setStatus("Error setting up server")
def listenClients(self):
while 1:
clientsoc, clientaddr = self.serverSoc.accept()
self.setStatus("Client connected from %s:%s" % clientaddr)
self.addClient(clientsoc, clientaddr)
_thread.start_new_thread(self.handleClientMessages, (clientsoc, clientaddr))
self.serverSoc.close()
def handleAddClient(self):
if self.serverStatus == 0:
self.setStatus("Set server address first")
return
clientaddr = (self.clientIPVar.get().replace(' ',''), int(self.clientPortVar.get().replace(' ','')))
try:
clientsoc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsoc.connect(clientaddr)
self.setStatus("Connected to client on %s:%s" % clientaddr)
self.addClient(clientsoc, clientaddr)
_thread.start_new_thread(self.handleClientMessages, (clientsoc, clientaddr))
except:
self.setStatus("Error connecting to client")
def handleClientMessages(self, clientsoc, clientaddr):
while 1:
try:
data = clientsoc.recv(self.buffsize)
if not data:
break
self.addChat("%s:%s" % clientaddr, data)
except:
break
self.removeClient(clientsoc, clientaddr)
clientsoc.close()
self.setStatus("Client disconnected from %s:%s" % clientaddr)
def handleSendChat(self):
if self.serverStatus == 0:
self.setStatus("Set server address first")
return
msg = self.chatVar.get().replace(' ','')
if msg == '':
return
self.addChat("me", msg)
for client in self.allClients.keys():
client.send(str.encode(msg))
def addChat(self, client, msg):
self.receivedChats.config(state=NORMAL)
self.receivedChats.insert("end",client+": "+msg+"\n")
self.receivedChats.config(state=DISABLED)
def addClient(self, clientsoc, clientaddr):
self.allClients[clientsoc]=self.counter
self.counter += 1
self.friends.insert(self.counter,"%s:%s" % clientaddr)
def removeClient(self, clientsoc, clientaddr):
print(self.allClients)
self.friends.delete(self.allClients[clientsoc])
del self.allClients[clientsoc]
print(self.allClients)
def setStatus(self, msg):
self.statusLabel.config(text=msg)
print(msg)
def main():
root = Tk()
app = ChatClient(root)
root.mainloop()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
2360573e0a5cb29fffd13fe988c7d6f093440ffe | b07a7ef99254fd5cb63287eb9552755e33dc4ff0 | /Project_V4/hypothesisEngine/algorithm/parameters.py | e0085ee952771c214dcf6e4154ac9a4a28f4637d | [
"Apache-2.0"
] | permissive | pavandonthireddy/Website-v1 | dc7912f59fbe50912ac63a3ec93aee9efd17c58e | ec143dcf565da23783803c56c323f0105e74d759 | refs/heads/master | 2021-02-04T02:04:49.703095 | 2020-02-27T19:39:08 | 2020-02-27T19:39:08 | 243,600,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,184 | py | from multiprocessing import cpu_count
from os import path
from socket import gethostname
import os
hostname = gethostname().split('.')
machine_name = hostname[0]
#hypothesis_params = {
# 'START DATE':'2004-01-01',
# 'END DATE': '2018-01-15',
# 'PORTFOLIO': 'US_TOP_500_LIQUID',
# 'NEUTRALIZATION': 'DOLLAR',
# 'LONG LEVERAGE' : 0.5,
# 'SHORT LEVERAGE' : 0.5,
# 'STARTING VALUE' : 20000000,
# 'COST THRESHOLD BPS' : 5,
# 'ADV THRESHOLD PERCENTAGE':10,
# 'COMMISSION BPS': 0.1
# }
hypothesis_params = dict()
"""Algorithm parameters"""
params = {
# Set default step and search loop functions
'SEARCH_LOOP': 'search_loop',
'STEP': 'step',
# Evolutionary Parameters
'POPULATION_SIZE': 500,
'GENERATIONS': 50,
'HILL_CLIMBING_HISTORY': 1000,
'SCHC_COUNT_METHOD': "count_all",
# Set optional experiment name
'EXPERIMENT_NAME': None,
# Set default number of runs to be done.
# ONLY USED WITH EXPERIMENT MANAGER.
'RUNS': 1,
# Class of problem
'FITNESS_FUNCTION': "trading_fitness.regression",
# Select problem dataset
'DATASET_TRAIN': None,
'DATASET_TEST': None,
'DATASET_DELIMITER': None,
# Set grammar file
'GRAMMAR_FILE': "grammar/current_grammar.bnf",
# Set the number of depths permutations are calculated for
# (starting from the minimum path of the grammar).
# Mainly for use with the grammar analyser script.
'PERMUTATION_RAMPS': 5,
# Select error metric
'ERROR_METRIC': None,
# Optimise constants in the supervised_learning fitness function.
'OPTIMIZE_CONSTANTS': False,
# Specify target for target problems
'TARGET': "ponyge_rocks",
# Set max sizes of individuals
'MAX_TREE_DEPTH': 90, # SET TO 90 DUE TO PYTHON EVAL() STACK LIMIT.
# INCREASE AT YOUR OWN RISK.
'MAX_TREE_NODES': None,
'CODON_SIZE': 100000,
'MAX_GENOME_LENGTH': None,
'MAX_WRAPS': 0,
# INITIALISATION
# Set initialisation operator.
'INITIALISATION': "operators.initialisation.PI_grow",
# Set the maximum geneome length for initialisation.
'INIT_GENOME_LENGTH': 200,
# Set the maximum tree depth for initialisation.
'MAX_INIT_TREE_DEPTH': 10,
# Set the minimum tree depth for initialisation.
'MIN_INIT_TREE_DEPTH': None,
# SELECTION
# Set selection operator.
'SELECTION': "operators.selection.tournament",
# For tournament selection
'TOURNAMENT_SIZE': 2,
# For truncation selection
'SELECTION_PROPORTION': 0.5,
# Allow for selection of invalid individuals during selection process.
'INVALID_SELECTION': False,
# OPERATOR OPTIONS
# Boolean flag for selecting whether or not mutation is confined to
# within the used portion of the genome. Default set to True.
'WITHIN_USED': True,
# CROSSOVER
# Set crossover operator.
'CROSSOVER': "operators.crossover.variable_onepoint",
# Set crossover probability.
'CROSSOVER_PROBABILITY': 0.75,
# Prevents crossover from generating invalids.
'NO_CROSSOVER_INVALIDS': False,
# MUTATION
# Set mutation operator.
'MUTATION': "operators.mutation.int_flip_per_codon",
# Set mutation probability (None defaults to 1 over the length of
# the genome for each codon)
'MUTATION_PROBABILITY': None,
# Set number of mutation events
'MUTATION_EVENTS': 1,
# Prevents mutation from generating invalids.
'NO_MUTATION_INVALIDS': True,
# REPLACEMENT
# Set replacement operator.
'REPLACEMENT': "operators.replacement.generational",
# Set elite size.
'ELITE_SIZE': None,
# DEBUGGING
# Use this to turn on debugging mode. This mode doesn't write any files
# and should be used when you want to test new methods.
'DEBUG': False,
# PRINTING
# Use this to print out basic statistics for each generation to the
# command line.
'VERBOSE': False,
# Use this to prevent anything being printed to the command line.
'SILENT': False,
'SAVE_STEP': True,
# SAVING
# Save the phenotype of the best individual from each generation. Can
# generate a lot of files. DEBUG must be False.
'SAVE_ALL': True,
# Save a plot of the evolution of the best fitness result for each
# generation.
'SAVE_PLOTS': True,
# MULTIPROCESSING
# Multi-core parallel processing of phenotype evaluations.
'MULTICORE': False,
# Set the number of cpus to be used for multiprocessing
'CORES': cpu_count(),
# STATE SAVING/LOADING
# Save the state of the evolutionary run every generation. You can
# specify how often you want to save the state with SAVE_STATE_STEP.
'SAVE_STATE': False,
# Specify how often the state of the current evolutionary run is
# saved (i.e. every n-th generation). Requires int value.
'SAVE_STATE_STEP': 1,
# Load an evolutionary run from a saved state. You must specify the
# full file path to the desired state file. Note that state files have
# no file type.
'LOAD_STATE': None,
# SEEDING
# Specify a list of PonyGE2 individuals with which to seed the initial
# population.
'SEED_INDIVIDUALS': [],
# Specify a target seed folder in the 'seeds' directory that contains a
# population of individuals with which to seed a run.
'TARGET_SEED_FOLDER': None,
# Set a target phenotype string for reverse mapping into a GE
# individual
'REVERSE_MAPPING_TARGET': None,
# Set Random Seed for all Random Number Generators to be used by
# PonyGE2, including the standard Python RNG and the NumPy RNG.
'RANDOM_SEED': None,
# CACHING
# The cache tracks unique individuals across evolution by saving a
# string of each phenotype in a big list of all phenotypes. Saves all
# fitness information on each individual. Gives you an idea of how much
# repetition is in standard GE/GP.
'CACHE': False,
# Uses the cache to look up the fitness of duplicate individuals. CACHE
# must be set to True if you want to use this.
'LOOKUP_FITNESS': False,
# Uses the cache to give a bad fitness to duplicate individuals. CACHE
# must be True if you want to use this (obviously)
'LOOKUP_BAD_FITNESS': False,
# Removes duplicate individuals from the population by replacing them
# with mutated versions of the original individual. Hopefully this will
# encourage diversity in the population.
'MUTATE_DUPLICATES': False,
# MULTIAGENT Parameters
# True or False for Multiagent
'MULTIAGENT': False,
# Agent Size. Number of agents having their own copy of genetic material
'AGENT_SIZE': 100,
# Interaction Probablity. How frequently the agents can interaction with each other
'INTERACTION_PROBABILITY': 0.5,
# OTHER
# Set machine name (useful for doing multiple runs)
'MACHINE': machine_name
}
#
#params.update(hypothesis_params)
params1 = {
# Set default step and search loop functions
'SEARCH_LOOP': 'search_loop',
'STEP': 'step',
# Evolutionary Parameters
'POPULATION_SIZE': 500,
'GENERATIONS': 50,
'HILL_CLIMBING_HISTORY': 1000,
'SCHC_COUNT_METHOD': "count_all",
# Set optional experiment name
'EXPERIMENT_NAME': None,
# Set default number of runs to be done.
# ONLY USED WITH EXPERIMENT MANAGER.
'RUNS': 1,
# Class of problem
'FITNESS_FUNCTION': "trading_fitness.regression",
# Select problem dataset
'DATASET_TRAIN': None,
'DATASET_TEST': None,
'DATASET_DELIMITER': None,
# Set grammar file
'GRAMMAR_FILE': "trading_grammar/Vladislavleva4.bnf",
# Set the number of depths permutations are calculated for
# (starting from the minimum path of the grammar).
# Mainly for use with the grammar analyser script.
'PERMUTATION_RAMPS': 5,
# Select error metric
'ERROR_METRIC': None,
# Optimise constants in the supervised_learning fitness function.
'OPTIMIZE_CONSTANTS': False,
# Specify target for target problems
'TARGET': "ponyge_rocks",
# Set max sizes of individuals
'MAX_TREE_DEPTH': 90, # SET TO 90 DUE TO PYTHON EVAL() STACK LIMIT.
# INCREASE AT YOUR OWN RISK.
'MAX_TREE_NODES': None,
'CODON_SIZE': 100000,
'MAX_GENOME_LENGTH': None,
'MAX_WRAPS': 0,
# INITIALISATION
# Set initialisation operator.
'INITIALISATION': "operators.initialisation.PI_grow",
# Set the maximum geneome length for initialisation.
'INIT_GENOME_LENGTH': 200,
# Set the maximum tree depth for initialisation.
'MAX_INIT_TREE_DEPTH': 10,
# Set the minimum tree depth for initialisation.
'MIN_INIT_TREE_DEPTH': None,
# SELECTION
# Set selection operator.
'SELECTION': "operators.selection.tournament",
# For tournament selection
'TOURNAMENT_SIZE': 2,
# For truncation selection
'SELECTION_PROPORTION': 0.5,
# Allow for selection of invalid individuals during selection process.
'INVALID_SELECTION': False,
# OPERATOR OPTIONS
# Boolean flag for selecting whether or not mutation is confined to
# within the used portion of the genome. Default set to True.
'WITHIN_USED': True,
# CROSSOVER
# Set crossover operator.
'CROSSOVER': "operators.crossover.variable_onepoint",
# Set crossover probability.
'CROSSOVER_PROBABILITY': 0.75,
# Prevents crossover from generating invalids.
'NO_CROSSOVER_INVALIDS': False,
# MUTATION
# Set mutation operator.
'MUTATION': "operators.mutation.int_flip_per_codon",
# Set mutation probability (None defaults to 1 over the length of
# the genome for each codon)
'MUTATION_PROBABILITY': None,
# Set number of mutation events
'MUTATION_EVENTS': 1,
# Prevents mutation from generating invalids.
'NO_MUTATION_INVALIDS': False,
# REPLACEMENT
# Set replacement operator.
'REPLACEMENT': "operators.replacement.generational",
# Set elite size.
'ELITE_SIZE': None,
# DEBUGGING
# Use this to turn on debugging mode. This mode doesn't write any files
# and should be used when you want to test new methods.
'DEBUG': False,
# PRINTING
# Use this to print out basic statistics for each generation to the
# command line.
'VERBOSE': False,
# Use this to prevent anything being printed to the command line.
'SILENT': False,
'SAVE_STEP': True,
# SAVING
# Save the phenotype of the best individual from each generation. Can
# generate a lot of files. DEBUG must be False.
'SAVE_ALL': True,
# Save a plot of the evolution of the best fitness result for each
# generation.
'SAVE_PLOTS': True,
# MULTIPROCESSING
# Multi-core parallel processing of phenotype evaluations.
'MULTICORE': False,
# Set the number of cpus to be used for multiprocessing
'CORES': cpu_count(),
# STATE SAVING/LOADING
# Save the state of the evolutionary run every generation. You can
# specify how often you want to save the state with SAVE_STATE_STEP.
'SAVE_STATE': False,
# Specify how often the state of the current evolutionary run is
# saved (i.e. every n-th generation). Requires int value.
'SAVE_STATE_STEP': 1,
# Load an evolutionary run from a saved state. You must specify the
# full file path to the desired state file. Note that state files have
# no file type.
'LOAD_STATE': None,
# SEEDING
# Specify a list of PonyGE2 individuals with which to seed the initial
# population.
'SEED_INDIVIDUALS': [],
# Specify a target seed folder in the 'seeds' directory that contains a
# population of individuals with which to seed a run.
'TARGET_SEED_FOLDER': None,
# Set a target phenotype string for reverse mapping into a GE
# individual
'REVERSE_MAPPING_TARGET': None,
# Set Random Seed for all Random Number Generators to be used by
# PonyGE2, including the standard Python RNG and the NumPy RNG.
'RANDOM_SEED': None,
# CACHING
# The cache tracks unique individuals across evolution by saving a
# string of each phenotype in a big list of all phenotypes. Saves all
# fitness information on each individual. Gives you an idea of how much
# repetition is in standard GE/GP.
'CACHE': False,
# Uses the cache to look up the fitness of duplicate individuals. CACHE
# must be set to True if you want to use this.
'LOOKUP_FITNESS': False,
# Uses the cache to give a bad fitness to duplicate individuals. CACHE
# must be True if you want to use this (obviously)
'LOOKUP_BAD_FITNESS': False,
# Removes duplicate individuals from the population by replacing them
# with mutated versions of the original individual. Hopefully this will
# encourage diversity in the population.
'MUTATE_DUPLICATES': False,
# MULTIAGENT Parameters
# True or False for Multiagent
'MULTIAGENT': False,
# Agent Size. Number of agents having their own copy of genetic material
'AGENT_SIZE': 100,
# Interaction Probablity. How frequently the agents can interaction with each other
'INTERACTION_PROBABILITY': 0.5,
# OTHER
# Set machine name (useful for doing multiple runs)
'MACHINE': machine_name
}
def load_params(file_name):
"""
Load in a params text file and set the params dictionary directly.
:param file_name: The name/location of a parameters file.
:return: Nothing.
"""
try:
open(file_name, "r")
except FileNotFoundError:
s = "algorithm.paremeters.load_params\n" \
"Error: Parameters file not found.\n" \
" Ensure file extension is specified, e.g. 'regression.txt'."
raise Exception(s)
with open(file_name, 'r') as parameters:
# Read the whole parameters file.
content = parameters.readlines()
for line in [l for l in content if not l.startswith("#")]:
# Parameters files are parsed by finding the first instance of a
# colon.
split = line.find(":")
# Everything to the left of the colon is the parameter key,
# everything to the right is the parameter value.
key, value = line[:split], line[split+1:].strip()
# Evaluate parameters.
try:
value = eval(value)
except:
# We can't evaluate, leave value as a string.
pass
# Set parameter
params[key] = value
def set_params(command_line_args, create_files=True, output_result =[] ):
"""
This function parses all command line arguments specified by the user.
If certain parameters are not set then defaults are used (e.g. random
seeds, elite size). Sets the correct imports given command line
arguments. Sets correct grammar file and fitness function. Also
initialises save folders and tracker lists in utilities.trackers.
:param command_line_args: Command line arguments specified by the user.
:return: Nothing.
"""
from hypothesisEngine.utilities.algorithm.initialise_run import initialise_run_params
from hypothesisEngine.utilities.algorithm.initialise_run import set_param_imports
from hypothesisEngine.utilities.fitness.math_functions import return_one_percent
from hypothesisEngine.utilities.algorithm.command_line_parser import parse_cmd_args
from hypothesisEngine.utilities.stats import trackers, clean_stats
from hypothesisEngine.representation import grammar
# command_line_args = ""
# cmd_args, unknown = parse_cmd_args(command_line_args)
# if unknown:
# # We currently do not parse unknown parameters. Raise error.
# s = "algorithm.parameters.set_params\nError: " \
# "unknown parameters: %s\nYou may wish to check the spelling, " \
# "add code to recognise this parameter, or use " \
# "--extra_parameters" % str(unknown)
# raise Exception(s)
# LOAD PARAMETERS FILE
# NOTE that the parameters file overwrites all previously set parameters.
# if 'PARAMETERS' in cmd_args:
# load_params(path.join("..", "parameters", cmd_args['PARAMETERS']))
# Join original params dictionary with command line specified arguments.
# NOTE that command line arguments overwrite all previously set parameters.
# params.update(cmd_args)
if params['LOAD_STATE']:
# Load run from state.
from hypothesisEngine.utilities.algorithm.state import load_state
# Load in state information.
individuals = load_state(params['LOAD_STATE'])
# Set correct search loop.
from hypothesisEngine.algorithm.search_loop import search_loop_from_state
params['SEARCH_LOOP'] = search_loop_from_state
# Set population.
setattr(trackers, "state_individuals", individuals)
else:
if params['REPLACEMENT'].split(".")[-1] == "steady_state":
# Set steady state step and replacement.
params['STEP'] = "steady_state_step"
params['GENERATION_SIZE'] = 2
else:
# Elite size is set to either 1 or 1% of the population size,
# whichever is bigger if no elite size is previously set.
if params['ELITE_SIZE'] is None:
params['ELITE_SIZE'] = return_one_percent(1, params[
'POPULATION_SIZE'])
# Set the size of a generation
params['GENERATION_SIZE'] = params['POPULATION_SIZE'] - \
params['ELITE_SIZE']
# Initialise run lists and folders before we set imports.r
initialise_run_params(create_files)
# Set correct param imports for specified function options, including
# error metrics and fitness functions.
set_param_imports()
# Clean the stats dict to remove unused stats.
clean_stats.clean_stats()
# Set GENOME_OPERATIONS automatically for faster linear operations.
if (params['CROSSOVER'].representation == "subtree" or
params['MUTATION'].representation == "subtree"):
params['GENOME_OPERATIONS'] = False
else:
params['GENOME_OPERATIONS'] = True
# Ensure correct operators are used if multiple fitness functions used.
if hasattr(params['FITNESS_FUNCTION'], 'multi_objective'):
# Check that multi-objective compatible selection is specified.
if not hasattr(params['SELECTION'], "multi_objective"):
s = "algorithm.parameters.set_params\n" \
"Error: multi-objective compatible selection " \
"operator not specified for use with multiple " \
"fitness functions."
raise Exception(s)
if not hasattr(params['REPLACEMENT'], "multi_objective"):
# Check that multi-objective compatible replacement is
# specified.
if not hasattr(params['REPLACEMENT'], "multi_objective"):
s = "algorithm.parameters.set_params\n" \
"Error: multi-objective compatible replacement " \
"operator not specified for use with multiple " \
"fitness functions."
raise Exception(s)
# Parse grammar file and set grammar class.
abs_path = os.getcwd()
datasets_address = abs_path+ "/hypothesisEngine/"
params['BNF_GRAMMAR'] = grammar.Grammar(path.join("/home/contact/hypothesis/Project_V4/hypothesisEngine/", params['GRAMMAR_FILE']))
# Population loading for seeding runs (if specified)
if params['TARGET_SEED_FOLDER']:
# Import population loading function.
from hypothesisEngine.operators.initialisation import load_population
# A target folder containing seed individuals has been given.
params['SEED_INDIVIDUALS'] = load_population(
params['TARGET_SEED_FOLDER'])
elif params['REVERSE_MAPPING_TARGET']:
# A single seed phenotype has been given. Parse and run.
# Import GE LR Parser.
from scripts import GE_LR_parser
# Parse seed individual and store in params.
params['SEED_INDIVIDUALS'] = [GE_LR_parser.main()]
| [
"35779303+pavandonthireddy@users.noreply.github.com"
] | 35779303+pavandonthireddy@users.noreply.github.com |
3f3a8cd40a0948b7d0077907e73fa4d7a1254100 | e3d9e2444f873a3e08f00b428dd291c4a995057b | /pysrc/server.py | e2b6b985314811ac92337ad15e1178440e582f6d | [] | no_license | SachinMeier/Contacts | f02905f576cbee14c96ba58f31f597b6b901e1b6 | f466cfe0906f557ac83e7dcf6a63472586d8ffcc | refs/heads/master | 2022-07-16T05:25:11.498036 | 2020-05-10T16:56:31 | 2020-05-10T16:56:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | #!/usr/bin/env python3
import socket
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 65432 # non-privileged ports > 1023
def write_file(f, data):
f = open("pytest.in", 'a')
f.write(data)
def run_server():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# socket.AF_INET = IPv4
s.bind((HOST, PORT)) # since IPv4, pass tuple (HOST, PORT)
s.listen()
conn, addr = s.accept()
with conn:
print('New Connection:', addr)
msg = 'Hello'
f = open("pytest.in", 'a')
while msg != 'exit':
data = conn.recv(1024)
f.write((data).decode('utf-8'))
#print(data.decode('utf-8'))
if not data:
break
#conn.sendall(data) #repeat back
#msg = input("New Message: ")
#conn.sendall(msg.encode('utf-8'))
#work on sending files
f.close()
print('Connection closed.')
if __name__ == "__main__":
run_server()
#write_file("\nHello again")
| [
"Pillagr6@gmail.com"
] | Pillagr6@gmail.com |
53c98c8632974b7b1a5f574fe5fb035dda1104df | c4702d1a06640555829b367852138cc93ba4a161 | /dym_res_partner/models/dym_res_partner.py | 31e22a8a0cc57df5c41d49344807163c1b9cd62d | [] | no_license | Rizalimami/dym | 0ecadf9c049b22ebfebf92e4eab6eaad17dd3e26 | af1bcf7b77a3212bc8a8a0e41e6042a134587ed4 | refs/heads/master | 2020-04-08T10:56:43.605698 | 2018-11-27T06:44:08 | 2018-11-27T06:44:08 | 159,287,876 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 31,555 | py | import time
from datetime import datetime
import string
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import api
from openerp.osv.expression import get_unaccent_wrapper
import re
import phonenumbers
from phonenumbers import carrier
from phonenumbers.phonenumberutil import number_type
class res_partner(osv.osv):
_inherit = 'res.partner'
def _get_payment_term(self, cr, uid, context=None):
obj_payment_term = self.pool.get('account.payment.term')
id_payment_term = obj_payment_term.search(cr, uid, [('name','=','Immediate Payment')])
if id_payment_term :
return id_payment_term[0]
return False
def _get_default_branch(self,cr,uid,ids,context=None):
user_obj = self.pool.get('res.users')
user_browse = user_obj.browse(cr,uid,uid)
branch_ids = False
branch_ids = user_browse.branch_ids and len(user_browse.branch_ids) == 1 and user_browse.branch_ids[0].id or False
return branch_ids
_columns = {
'parent_name': fields.related('parent_id', 'name', type='char', readonly=True, string='Parent name'),
'default_code': fields.char('Partner Code'),
'principle': fields.boolean('Principle'),
'biro_jasa': fields.boolean('Biro Jasa'),
'kas_negara': fields.boolean('Kas Negara'),
'forwarder': fields.boolean('Forwarder'),
'supplier': fields.boolean('General Supplier', help="Check this box if this contact is a supplier. If it's not checked, purchase people will not see it when encoding a purchase order."),
'showroom': fields.boolean('Showroom'),
'ahass': fields.boolean('Ahass'),
'dealer': fields.boolean('Dealer'),
'finance_company': fields.boolean('Finance Company'),
'vat': fields.related('npwp', string="TIN", type="char", help="Tax Identification Number. Check the box if this contact is subjected to taxes. Used by the some of the legal statements.", store=True),
'ahm_code': fields.char('AHM Code'),
'dealer_code': fields.char('Dealer Code'),
'kode_pajak_id':fields.selection([('1','010'),('2','020'),('3','030'),('4','040'),('5','050'),('6','060'),('7','070'),('8','080'),('9','090')],'Kode Transaksi FP'),
'tipe_faktur_pajak' : fields.selection([('tanpa_fp','Tanpa Faktur Pajak'),('satuan','Satuan'),('gabungan','Gabungan')],'Tipe Faktur Pajak'),
'pkp' : fields.boolean('PKP'),
'npwp': fields.char('No.NPWP'),
'tgl_kukuh': fields.date('Tgl Kukuh'),
'mobile_provider': fields.char('Mobile Provider'),
#Alamat di Header
'rt':fields.char('RT', size=3),
'rw':fields.char('RW',size=3),
'zip_id':fields.many2one('dym.kelurahan', 'ZIP Code',domain="[('kecamatan_id','=',kecamatan_id),('state_id','=',state_id),('city_id','=',city_id)]"),
'kelurahan':fields.char('Kelurahan',size=100),
'kecamatan_id':fields.many2one('dym.kecamatan','Kecamatan', size=128,domain="[('state_id','=',state_id),('city_id','=',city_id)]"),
'kecamatan':fields.char('Kecamatan', size=100),
'city_id':fields.many2one('dym.city','City',domain="[('state_id','=',state_id)]"),
#Alamat di Tab Customer Info
'sama':fields.boolean(''), #diberi required True
'street_tab': fields.char('Address'),
'street2_tab': fields.char(),
'rt_tab':fields.char('RT', size=3),
'rw_tab':fields.char('RW',size=3),
'zip_tab_id':fields.many2one('dym.kelurahan', 'ZIP Code',domain="[('kecamatan_id','=',kecamatan_tab_id),('state_id','=',state_tab_id),('city_id','=',city_tab_id)]"),
'kelurahan_tab':fields.char('Kelurahan',size=100),
'kecamatan_tab_id':fields.many2one('dym.kecamatan','Kecamatan', size=128,domain="[('state_id','=',state_tab_id),('city_id','=',city_tab_id)]"),
'kecamatan_tab':fields.char('Kecamatan', size=100),
'city_tab_id':fields.many2one('dym.city','City',domain="[('state_id','=',state_tab_id)]"),
'state_tab_id':fields.many2one('res.country.state', 'Province'),
#Field yang ada di Tab Customer Info
'birthday':fields.date('Date of Birth'),
'hp_status':fields.selection([('aktif','Aktif'),('TidakAktif','Tidak Aktif')],'HP Status'),
'gender':fields.selection([('lakilaki', 'Laki-laki'),('perempuan', 'Perempuan')],'Jenis Kelamin'),
'no_kk':fields.char('No. KK',50),
'religion':fields.selection([('Islam', 'Islam'),('Kristen', 'Kristen'),('Katholik', 'Katholik'),('Hindu', 'Hindu'),('Budha', 'Budha')],'Religion'),
'no_ktp':fields.char('No.KTP',50),
'property_account_payable': fields.property(
type='many2one',
relation='account.account',
string="Account Payable",
domain="[('type', '=', 'payable')]",
help="This account will be used instead of the default one as the payable account for the current partner",
required=False),
'property_account_receivable': fields.property(
type='many2one',
relation='account.account',
string="Account Receivable",
domain="[('type', '=', 'receivable')]",
help="This account will be used instead of the default one as the receivable account for the current partner",
required=False),
'property_account_rounding': fields.property(
type='many2one',
relation='account.account',
string="Account Rounding",
required=False),
'pendidikan':fields.selection([('noSD', 'Tidak Tamat SD'),('sd', 'SD'),('sltp', 'SLTP/SMP'),('slta', 'SLTA/SMA'),('akademik', 'Akademi/Diploma'),('sarjana', 'Sarjana(S1)'),('pascasarjana', 'Pasca Sarjana')],'Pendidikan'),
'pekerjaan':fields.selection([('pNegeri', 'Pegawai Negeri'),('pSwasta', 'Pegawai Swasta'),('ojek', 'Ojek'),('pedagang', 'Pedagang/Wiraswasta'),('pelajar', 'Pelajar/Mahasiswa'),('guru', 'Guru/Dosen'),('tni', 'TNI/Polri'),('irt', 'Ibu Rumah Tangga'),('petani/nelayan', 'Petani/Nelayan'),('pro', 'Profesional(Contoh : Dokter)'),('lain', 'Lainnya')],'Pekerjaan'),
'pengeluaran':fields.selection([('<900', '< Rp.900.000,-'),('900125', 'Rp.900.001,- s/d Rp.1.250.000,-'),('125175', 'Rp.1.250.001,- s/d Rp.1.750.000,-'),('175250', 'Rp.1.750.001,- s/d Rp.2.500.000,-'),('250400', 'Rp.2.500.001,- s/d Rp.4.000.000,-'),('400600', 'Rp.4.000.001,- s/d Rp.6.000.000,-'),('600000', '> Rp.6.000.000,-')],'Pengeluaran /Bulan'),
'rel_code': fields.related('default_code', string='Partner Code', type="char", readonly="True"),
'branch_id':fields.many2one('dym.branch',string='Branch'),
'direct_customer': fields.boolean(string='Direct Customer'),
'branch': fields.boolean(string='Branch (Boolean)'),
'is_customer_depo':fields.boolean('Customer Depo'),
'is_group_customer':fields.boolean('Group Customer'),
'member':fields.char('Member Number'),
'creditur_debitur':fields.boolean('Creditur / Debitur'),
#Forwarder
'driver_lines': fields.one2many('dym.driver.line','partner_id','Driver'),
'plat_number_lines': fields.one2many('dym.plat.number.line','partner_id','Plat Number'),
}
_defaults = {
'tz': api.model(lambda self: self.env.context.get('tz', 'Asia/Jakarta')),
'sama': True,
'default_code': 'BPA/',
'branch_id':_get_default_branch,
}
_sql_constraints = [
('unique_member', 'unique(member)', 'Nomor Member sudah terdaftar!'),
]
# def _unique_no_ktp(self, cr, uid, ids, context=None):
# for l in self.browse(cr, uid, ids, context=context):
# if l.no_ktp:
# if self.search(cr,uid,[('no_ktp','=',l.no_ktp),('id','!=',l.id)]):
# return False
# return True
# _constraints = [
# (_unique_no_ktp, 'No KTP Duplicate!', ['no_ktp']),
# ]
def default_get(self, cr, uid, fields, context=None):
context = context or {}
res = super(res_partner, self).default_get(cr, uid, fields, context=context)
if 'property_payment_term' in fields:
res.update({'property_payment_term': self._get_payment_term(cr, uid)})
return res
def _display_address(self, cr, uid, address, without_company=False, context=None):
'''
The purpose of this function is to build and return an address formatted accordingly to the
standards of the country where it belongs.
:param address: browse record of the res.partner to format
:returns: the address formatted in a display that fit its country habits (or the default ones
if not country is specified)
:rtype: string
'''
'''
<xpath expr="//field[@name='city']" position="before">
<group>
<div>
<field name="street" placeholder="Street..." on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" />
<div>
<field name="street2" placeholder="Street" style="width: 50%%" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" />
<field name="rt" placeholder="RT" style="width: 25%%" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" />
<field name="rw" placeholder="RW" style="width: 25%%" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" />
<field name="state_id" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" class="oe_no_button" placeholder="Province" style="width: 50%%" options='{"no_open": True}' />
<field name="city_id" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" placeholder="City" style="width: 50%%" attrs="{'required': ['|','|',('direct_customer','=',True),('is_group_customer','=',True),('customer','=',True)]}" />
<field name="kecamatan_id" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" placeholder="Kecamatan" style="width: 50%%" />
<field name="kecamatan" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" placeholder="Kecamatan" style="width: 50%%" />
<field name="zip_id" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" placeholder="ZIP" style="width: 50%%" options='{"no_open": True}' />
<field name="kelurahan" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" class="oe_no_button" placeholder="Kelurahan" style="width: 50%%" />
</div>
</div>
</group>
</xpath>
'''
# get the information that will be injected into the display format
# get the address format
# address_format = address.country_id.address_format or \
# "%(street)s\n%(street2)s\nRT: %(rt)s RW: %(rw)s Desa/Kel:%(kelurahan)s Kec:%(kecamatan)s\nKab/Kota:%(city)s Prov:%(state_code)s %(zip)s\n%(country_name)s"
address_format = "%(street)s\n%(street2)s\nRT: %(rt)s RW: %(rw)s Desa/Kel:%(kelurahan)s Kec:%(kecamatan)s\nKab/Kota:%(city_name)s Prov: %(state_name)s Kode Pos: %(kode_pos)s\n%(country_name)s"
args = {
'state_code': address.state_id.code or '',
'state_name': address.state_id.name or '',
'country_code': address.country_id.code or '',
'country_name': address.country_id.name or '',
'company_name': address.parent_name or '',
'rt': address.rt or '-',
'rw': address.rw or '-',
'kelurahan': address.kelurahan or '-',
'kecamatan': address.kecamatan or '-',
'city_name': address.city_id and address.city_id.name or '-',
'kode_pos': address.zip_id and address.zip_id.zip or '-',
}
for field in self._address_fields(cr, uid, context=context):
args[field] = getattr(address, field) or ''
if without_company:
args['company_name'] = ''
elif address.parent_id:
address_format = '%(company_name)s\n' + address_format
return address_format % args
def npwp_onchange(self,cr,uid,ids,npwp,context=None):
warning = {}
value = {}
result = {}
if npwp:
formatted_npwp = ''
npwp_normalize = npwp.replace(' ', '').upper()
splitted_npwp = re.findall(r'\d+', npwp_normalize)
if len(splitted_npwp) == 6:
if len(splitted_npwp[0]) == 2 and len(splitted_npwp[1]) == 3 and len(splitted_npwp[2]) == 3 and len(splitted_npwp[3]) == 1 and len(splitted_npwp[4]) == 3 and len(splitted_npwp[5]) == 3:
formatted_npwp = splitted_npwp[0] + '.' + splitted_npwp[1] + '.' + splitted_npwp[2] + '.' + splitted_npwp[3] + '-' + splitted_npwp[4] + '.' + splitted_npwp[5]
return {'value':{'npwp':formatted_npwp}}
elif len(splitted_npwp) == 1 and len(splitted_npwp[0]) == 15:
formatted_npwp = splitted_npwp[0][:2] + '.' + splitted_npwp[0][2:-10] + '.' + splitted_npwp[0][5:-7] + '.' + splitted_npwp[0][8:-6] + '-' + splitted_npwp[0][9:-3] + '.' + splitted_npwp[0][-3:]
return {'value':{'npwp':formatted_npwp}}
warning = {
'title': ('Perhatian !'),
'message': (('Format nomor npwp salah, mohon isi nomor npwp dengan format yang benar! (ex. 99.999.999.9-999.999)')),
}
value['npwp'] = self.browse(cr, uid, ids).npwp
result['warning'] = warning
result['value'] = value
return result
def onchange_mobile(self, cr, uid, ids, mobile, context=None):
value = {}
warning = {}
if mobile:
id_number = phonenumbers.parse(mobile,"ID")
if not carrier._is_mobile(number_type(id_number)):
warning = {
'title': ('Perhatian !'),
'message': (('Masukkan nomor handphone dengan benar, misal: 0817989800')),
}
value['mobile'] = ''
else:
formatted_mobile = phonenumbers.format_number(id_number, phonenumbers.PhoneNumberFormat.E164)
provider_mobile = eval(repr(carrier.name_for_number(id_number, "en")))
value['mobile'] = formatted_mobile
value['mobile_provider'] = provider_mobile
return {
'warning': warning,
'value': value,
}
def onchange_customer(self, cr, uid, ids, customer):
if not customer:
return {
'value':{
'no_ktp':False,
'birthday':False,
'gender':False,
'religion':False,
'no_kk':False,
'pendidikan':False,
'pekerjaan':False,
'pengeluaran':False,
'sama':'',
}
}
return True
def onchange_dealer(self, cr, uid, ids, dealer, finance_company, principle, ahm_code, dealer_code):
def_ahm_code = False
def_dealer_code = False
if dealer:
def_ahm_code = True
def_dealer_code = True
if finance_company:
def_ahm_code = True
if principle:
def_ahm_code = True
return {
'value':{
'ahm_code':ahm_code if def_ahm_code else False,
'dealer_code': dealer_code if def_dealer_code else False,
}
}
def showroom_ahass_change(self, cr, uid, ids, showroom, ahass, dealer, context=None):
value = {}
value['dealer'] = False
if showroom or ahass :
value['dealer'] = True
return {'value':value}
def onchange_pkp(self, cr, uid, ids, pkp, context=None):
if not pkp==False:
return {
'value':{
'npwp':'',
'tgl_kukuh':False,
}
}
return True
def onchange_forwarder(self, cr, uid, ids, forwarder, context=None):
if not forwarder :
return {'value' : {'plat_number_lines':False, 'driver_lines':False}}
return True
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if record.parent_id and not record.is_company:
name = "%s, %s" % (record.parent_name, name)
if context.get('show_address_only'):
name = self._display_address(cr, uid, record, without_company=True, context=context)
if context.get('show_address'):
name = name + "\n" + self._display_address(cr, uid, record, without_company=True, context=context)
name = name.replace('\n\n','\n')
name = name.replace('\n\n','\n')
if context.get('show_email') and record.email:
name = "%s <%s>" % (name, record.email)
if record.default_code:
name = "[%s] %s %s" % (record.default_code, name, '(' + record.member + ')' if record.member else '')
res.append((record.id, name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name and operator in ('=', 'ilike', '=ilike', 'like', '=like') and len(name) >= 3:
self.check_access_rights(cr, uid, 'read')
where_query = self._where_calc(cr, uid, args, context=context)
self._apply_ir_rules(cr, uid, where_query, 'read', context=context)
from_clause, where_clause, where_clause_params = where_query.get_sql()
where_str = where_clause and (" WHERE %s AND " % where_clause) or ' WHERE '
# search on the name of the contacts and of its company
search_name = name
operator = 'like'
if operator in ('ilike', 'like'):
search_name = '%%%s%%' % name
if operator in ('=ilike', '=like'):
operator = operator[1:]
unaccent = get_unaccent_wrapper(cr)
where_str = where_str.replace('"res_partner"','p')
query = """SELECT p.id
FROM res_partner p
{where} (upper(p.{display_name}) {operator} {percent}
OR upper(p.{default_code}) {operator} {percent}
OR upper(p.{member}) {operator} {percent})
ORDER BY p.{display_name}, p.{default_code}
""".format(where=where_str, operator=operator,
display_name=unaccent('display_name'),
default_code=unaccent('default_code'),
member=unaccent('member'),
percent=unaccent('%s'))
where_clause_params += [search_name.upper(), search_name.upper(), search_name.upper()]
if limit:
query += ' limit %s'
where_clause_params.append(limit)
cr.execute(query, where_clause_params)
ids = map(lambda x: x[0], cr.fetchall())
if ids:
return self.name_get(cr, uid, ids, context)
else:
return []
return []
# def name_search(self, cr, uid, name, args=None, operator='=', context=None, limit=100):
# if not args:
# args = []
# operator = '='
# if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'):
# self.check_access_rights(cr, uid, 'read')
# where_query = self._where_calc(cr, uid, args, context=context)
# self._apply_ir_rules(cr, uid, where_query, 'read', context=context)
# from_clause, where_clause, where_clause_params = where_query.get_sql()
# where_str = where_clause and (" WHERE %s AND " % where_clause) or ' WHERE '
# if '*' in name or '%' in name:
# operator = 'like'
# if '*' in name:
# name = name.replace('*','%')
# search_name = name
# if operator in ('=ilike', '=like'):
# operator = operator[1:]
# unaccent = get_unaccent_wrapper(cr)
# where_str = where_str.replace('"res_partner"','p')
# query = """SELECT p.id
# FROM res_partner p
# {where} (upper(p.{display_name}) {operator} {percent}
# OR upper(p.{default_code}) {operator} {percent}
# OR upper(p.{member}) {operator} {percent})
# ORDER BY p.{display_name}, p.{default_code}
# """.format(where=where_str, operator=operator,
# display_name=unaccent('display_name'),
# default_code=unaccent('default_code'),
# member=unaccent('member'),
# percent=unaccent('%s'))
# where_clause_params += [search_name.upper(), search_name.upper(), search_name.upper()]
# if limit:
# query += ' limit %s'
# where_clause_params.append(limit)
# cr.execute(query, where_clause_params)
# ids = map(lambda x: x[0], cr.fetchall())
# if ids:
# return self.name_get(cr, uid, ids, context)
# else:
# return []
# return []
def create(self, cr, uid, vals, context=None):
if vals.get('default_code','BPA/') == 'BPA/' :
vals['default_code'] = self.pool.get('ir.sequence').get_sequence(cr, uid, 'BPA', division=False, padding=6)
partner_id = super(res_partner, self).create(cr, uid, vals, context=context)
self.write(cr, uid, partner_id, {'company_id':False})
return partner_id
def onchange_letter(self,cr,uid,ids,sama,street=None,street2=None,rt=None,rw=None,state_id=None,city_id=None,kecamatan_id=None,kecamatan=None,zip_id=None,kelurahan=None,context=None):
value ={}
if not sama :
value = {
'street_tab':False,
'street2_tab':False,
'rt_tab':False,
'rw_tab':False,
'state_tab_id':False,
'city_tab_id':False,
'kecamatan_tab_id':False,
'kecamatan_tab':False,
'zip_tab_id':False,
'kelurahan_tab':False,
}
if sama :
value = {
'street_tab':street,
'street2_tab':street2,
'rt_tab':rt,
'rw_tab':rw,
'state_tab_id':state_id,
'city_tab_id':city_id,
'kecamatan_tab_id':kecamatan_id,
'kecamatan_tab':kecamatan,
'zip_tab_id':zip_id,
'kelurahan_tab':kelurahan,
}
return {'value':value}
def _onchange_kecamatan_tab(self, cr, uid, ids, kecamatan_id):
if kecamatan_id:
kec = self.pool.get("dym.kecamatan").browse(cr, uid, kecamatan_id)
return {'value' : {'kecamatan_tab':kec.name}}
else:
return {'value' : {'kecamatan_tab':False}}
return True
def _onchange_zip_tab(self, cr, uid, ids, zip_id):
if zip_id:
kel = self.pool.get("dym.kelurahan").browse(cr, uid, zip_id)
return {'value' : {'kelurahan_tab':kel.name,}}
else:
return {'value' : {'kelurahan_tab':False,}}
return True
def onchange_address(self,cr,uid,ids,street=None,street2=None,rt=None,rw=None,state_id=None,city_id=None,kecamatan_id=None,kecamatan=None,zip_id=None,kelurahan=None,context=None):
value ={}
warning = {}
if street :
value['street_tab'] = street
if street2 :
value['street2_tab'] = street2
if rt :
if len(rt) > 3 :
warning = {
'title': ('Perhatian !'),
'message': (('RT tidak boleh lebih dari 3 digit ! ')),
}
value = {
'rt':False
}
cek = rt.isdigit()
if not cek :
warning = {
'title': ('Perhatian !'),
'message': (('RT hanya boleh angka ! ')),
}
value = {
'rt':False
}
else :
value['rt_tab'] = rt
if rw :
if len(rw) > 3 :
warning = {
'title': ('Perhatian !'),
'message': (('RW tidak boleh lebih dari 3 digit ! ')),
}
value = {
'rw':False
}
cek = rw.isdigit()
if not cek :
warning = {
'title': ('Perhatian !'),
'message': (('RW hanya boleh angka ! ')),
}
value = {
'rw':False
}
else :
value['rw_tab'] = rw
if state_id :
value['state_tab_id'] = state_id
if city_id :
value['city_tab_id'] = city_id
if kecamatan_id :
kec = self.pool.get("dym.kecamatan").browse(cr, uid, kecamatan_id)
value['kecamatan_tab_id'] = kecamatan_id
value['kecamatan_tab'] = kec.name
value['kecamatan'] = kec.name
if zip_id :
kel = self.pool.get("dym.kelurahan").browse(cr, uid, zip_id)
value['zip_tab_id'] = zip_id
value['kelurahan_tab'] = kel.name
value['kelurahan'] = kel.name
return {'value':value,'warning':warning}
def change_nomor(self,cr,uid,ids,nohp,notelp,context=None):
value = {}
warning = {}
# if nohp :
# if len(nohp) > 13 :
# warning = {
# 'title': ('Perhatian !'),
# 'message': (('No HP tidak boleh lebih dari 13 digit ! ')),
# }
# value = {
# 'no_hp':False
# }
# else :
# cek = nohp.isdigit()
# if not cek :
# warning = {
# 'title': ('Perhatian !'),
# 'message': (('No HP hanya boleh angka ! ')),
# }
# value = {
# 'no_hp':False
# }
# if notelp :
# if len(notelp) > 11 :
# warning = {
# 'title': ('Perhatian !'),
# 'message': (('No Telepon tidak boleh lebih dari 11 digit ! ')),
# }
# value = {
# 'no_telp':False
# }
# else :
# cek = notelp.isdigit()
# if not cek :
# warning = {
# 'title': ('Perhatian !'),
# 'message': (('No Telepon hanya boleh angka ! ')),
# }
# value = {
# 'no_telp':False
# }
return {'warning':warning,'value':value}
def onchange_punctuation(self,cr,uid,ids,no_ktp,context=None):
value = {}
warning = {}
if no_ktp:
if no_ktp == '0':
value = {
'no_ktp':no_ktp
}
elif no_ktp != '0' and len(no_ktp) == 16:
# if no_ktp :
ktp = self.search(cr,uid,[('no_ktp','=',no_ktp)])
if ktp :
warning = {
'title': ('Perhatian !'),
'message': (('No KTP %s sudah pernah dibuat ! ')%(no_ktp)),
}
value = {
'no_ktp':False
}
if not warning :
no_ktp = "".join(l for l in no_ktp if l not in string.punctuation)
value = {
'no_ktp':no_ktp
}
elif no_ktp != '0' and len(no_ktp) != '16':
warning = {
'title': ('Perhatian !'),
'message': (('No KTP harus 16 digit ! ')),
}
value = {
'no_ktp':False
}
return {'value':value,'warning':warning}
class dym_driver_line(osv.osv):
_name = "dym.driver.line"
_rec_name = 'driver'
_columns = {
'partner_id': fields.many2one('res.partner', 'Forwarder'),
'driver': fields.char('Driver'),
}
def driver_change(self, cr, uid, ids, driver, context=None):
value = {}
if driver :
driver = driver.upper()
value['driver'] = driver
return {'value':value}
class dym_plat_number_line(osv.osv):
_name = "dym.plat.number.line"
_rec_name = 'plat_number'
_columns = {
'partner_id': fields.many2one('res.partner', 'Forwarder'),
'plat_number': fields.char('Plat Number'),
}
def plat_number_change(self, cr, uid, ids, plat_number, context=None):
value = {}
warning = {}
if plat_number :
plat_number = plat_number.upper()
plat_number = plat_number.replace(' ','')
value['plat_number'] = plat_number
for x in plat_number :
if x in string.punctuation :
warning = {'title': 'Perhatian', 'message': 'Plat Number hanya boleh huruf dan angka !'}
value['plat_number'] = False
return {'value':value, 'warning':warning}
| [
"rizal@portcities.net"
] | rizal@portcities.net |
15f466d20b51d0e199a6bca4759d7a97d12b9d39 | e1aeede7cecf2bdb3317954e042f41810745b980 | /winston/commands/__init__.py | 879f94514a8039ff04a915527499ca075f99746c | [] | no_license | G10DRAS/winston | b0f50822af077d374e864f2eefa559275c673fef | c72c7f77a89f77d1de31cd0f401b3dc836338b36 | refs/heads/master | 2021-01-15T16:04:40.719122 | 2014-02-27T22:31:56 | 2014-02-27T22:31:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | import re
class Command(object):
"""
Stores a command that is executed by external events such as a voice command,
a change of state or a notification.
"""
# The name with which all commands begin. Can be a word or a regex.
# Example: jenkins, alfred, robot. "Jenkins! Turn on the lights!"
signal = "winston"
def on_event(self, event, sender):
"""
Handles events from the interpreter and other sources
"""
# Do something here.
class RegexCommand(Command):
"""
Command that matches against a regex string
Set `polite` to True if the regex should match "could you", "please" and other
command decorations.
"""
# Command prefixes and suffixes. Can be a tuple of words or a regex
prefixes = "( can you| could you)?( please)?"
suffixes = "( please)?"
def __init__(self, regex, polite=False):
super(RegexCommand, self).__init__()
if polite:
final_regex = "{signal}{prefix} {command}{suffix}".format(
signal = self.signal,
command = regex,
prefix = self.prefixes,
suffix = self.suffixes,
)
self.regex = re.compile(final_regex)
else:
self.regex = re.compile(regex)
def match(self, text):
return self.regex.match(text) | [
"contact@nicolasbouliane.com"
] | contact@nicolasbouliane.com |
4e46fc1146171e707de259a46aba0b3f3436bf38 | af0246f2f85121746ea851919cb4ae29bfbf3cf7 | /web_flask/5-number_template.py | b5d1289ccff63834cf0f84a5beff7716c1b9fcc5 | [] | no_license | andreshugueth/AirBnB_clone_v2 | a2a3728e5e351da89896a1abf27432a6c4a552e9 | 2284221834134479d30ebd0a29a5902b1ee2eef3 | refs/heads/master | 2022-12-09T16:11:57.215900 | 2020-09-03T03:32:28 | 2020-09-03T03:32:28 | 286,791,785 | 0 | 1 | null | 2020-08-14T01:49:39 | 2020-08-11T16:17:50 | Python | UTF-8 | Python | false | false | 1,263 | py | #!/usr/bin/python3
"""
script that starts a Flask web application
Your web application must be listening on 0.0.0.0, port 5000
"""
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def index():
"""display Hello HBNB!"""
return "Hello HBNB!"
@app.route('/hbnb', strict_slashes=False)
def second_index():
"""display HBNB"""
return "HBNB"
@app.route('/c/<text>', strict_slashes=False)
def cisfun(text):
"""print C with a text"""
text = text.replace('_', ' ')
return "C {}".format(text)
@app.route('/python', defaults={'text': 'is cool'}, strict_slashes=False)
@app.route('/python/<text>', strict_slashes=False)
def python_text(text):
"""Return python and a text"""
text = text.replace('_', ' ')
return "Python {}".format(text)
@app.route('/number/<int:n>', strict_slashes=False)
def is_numeber(n):
"""display “n is a number” only if n is an integer"""
return "{:d} is a number".format(n)
@app.route('/number_template/<int:n>', strict_slashes=False)
def numbersandtemplates(n):
"""display a HTML page only if n is an integer"""
return render_template('5-number.html', n=n)
if __name__ == "__main__":
app.run(host='0.0.0.0', port='5000')
| [
"andreshugueth@gmail.com"
] | andreshugueth@gmail.com |
8acc9bc358a8f92477e4d4014cb1f0dd864c69da | 375c87462c4ed200cecce0aeab09c6161ac10dcd | /pwg_ls2/RV/dict_2_changes.py | dd80bde0aefbda7ab8b5fe3967bd41d33ad19f5b | [] | no_license | sanskrit-lexicon/PWG | 2e7ab371ec7e4da43d81d50663b06fa2e2b44806 | d32d701366cff1156b7f7bb0aea8ea27cd7fb7dd | refs/heads/master | 2023-02-07T02:49:53.179915 | 2023-02-03T19:53:25 | 2023-02-03T19:53:25 | 15,903,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,421 | py | #-*- coding:utf-8 -*-
""" dict_2_changes.py
"""
import sys,re,codecs
## https:##stackoverflow.com/questions/27092833/unicodeencodeerror-charmap-codec-cant-encode-characters
## This required by git bash to avoid error
## UnicodeEncodeError: 'charmap' codec cannot encode characters
## when run in a git bash script.
sys.stdout.reconfigure(encoding='utf-8')
class Change(object):
def __init__(self,metaline,iline,old,new):
self.metaline = metaline
self.iline = iline
self.old = old
self.new = new
def init_changes(lines1,lines2):
changes = [] # array of Change objects
metaline = None
imetaline1 = None
page = None
for iline,line1 in enumerate(lines1):
line2 = lines2[iline]
if iline == 0: # %***This File is E:\\APTE.ALL, Last update 11.09.06
continue #
if line1.startswith('<L>'):
metaline = line1
imetaline1 = iline+1
if line1 == line2:
continue
# generate a change
change = Change(metaline,iline,line1,line2)
changes.append(change)
print(len(changes),'changes found')
return changes
def change_out(change,ichange):
outarr = []
case = ichange + 1
#outarr.append('; TODO Case %s: (reason = %s)' % (case,change.reason))
try:
ident = change.metaline
except:
print('ERROR:',change.iline,change.old)
exit(1)
if ident == None:
ident = 'No metaline available'
outarr.append('; ' + ident)
# change for iline
lnum = change.iline + 1
line = change.old
new = change.new
outarr.append('%s old %s' % (lnum,line))
outarr.append('%s new %s' % (lnum,new))
outarr.append(';')
return outarr
def write_changes(fileout,changes,filein1,filein2):
with codecs.open(fileout,"w","utf-8") as f:
for ichange,change in enumerate(changes):
outarr = change_out(change,ichange)
for out in outarr:
f.write(out+'\n')
print(len(changes),"changes written to",fileout)
if __name__=="__main__":
filein1 = sys.argv[1] # xxx.txt (first version)
filein2 = sys.argv[2] # xxx.txt (second version)
fileout = sys.argv[3] # possible change transactions
with codecs.open(filein1,"r","utf-8") as f:
lines1 = [x.rstrip('\r\n') for x in f]
with codecs.open(filein2,"r","utf-8") as f:
lines2 = [x.rstrip('\r\n') for x in f]
if len(lines1) != len(lines2):
print('ERROR: require same number of lines in the two input files')
exit(1)
print(len(lines1),'lines compared')
changes = init_changes(lines1,lines2)
write_changes(fileout,changes,filein1,filein2)
| [
"funderburkjim@gmail.com"
] | funderburkjim@gmail.com |
db11fda232937227e154ba2606447e655b44cbe6 | ae9b02e1fa88d9c8e28ace5953c695e2e59a8f70 | /phathom/phenotype/celltype.py | 00a7839f83578923c096cae47d86a96bd70beddc | [
"MIT"
] | permissive | chunglabmit/phathom | 32482c3ff776b86dec5c71cbf6c5a41ac40bb110 | 10a4e34e2ebbad5ca4615099cc08c2539c3c2d6b | refs/heads/master | 2022-08-18T20:29:22.950532 | 2022-07-29T19:39:24 | 2022-07-29T19:39:24 | 124,928,433 | 2 | 1 | MIT | 2018-04-16T14:56:08 | 2018-03-12T17:48:05 | Python | UTF-8 | Python | false | false | 13,065 | py | import multiprocessing
from functools import partial
import warnings
import numpy as np
import tqdm
from sklearn.neighbors import NearestNeighbors, kneighbors_graph
from sklearn.cluster import AgglomerativeClustering, MiniBatchKMeans
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial import Voronoi, voronoi_plot_2d
from scipy.interpolate import griddata
import skimage
from skimage import morphology
import scipy.ndimage as ndi
from scipy.special import expit
from skimage.filters import gaussian
from skimage.feature import peak_local_max
from phathom import utils
from phathom.preprocess import filtering
from phathom.segmentation.segmentation import (eigvals_of_weingarten,
seed_probability,
convex_seeds,
find_centroids)
import matplotlib.colors as mcolors
import matplotlib.cm as cm
from tqdm import tqdm
import torch
#use_cuda = torch.cuda.is_available()
#if use_cuda:
# from phathom.segmentation.segmentation import cpu_eigvals_of_weingarten
warnings.simplefilter(action='ignore', category=FutureWarning)
def smooth(image, sigma):
image = skimage.img_as_float32(image)
g = gaussian(image, sigma=sigma, preserve_range=True)
return g
def calculate_eigvals(g):
eigvals = eigvals_of_weingarten(g)
return eigvals
def negative_curvature_product(image, sigma):
g = smooth(image, sigma)
eigvals = calculate_eigvals(g)
return convex_seeds(eigvals)
def sigmoid(x):
return expit(x)
def curvature_probability(eigvals, steepness, offset):
"""Calculate the interest point probability based on 3D curvature eigenvalues
Parameters
----------
eigvals : ndarray
4D array of curvature eigenvalues
steepness : float
Slope of the logistic function. Larger gives sharper transition between nuclei and background
offset : float
Translation of the logistic function. Larger biases towards more negative curvatures
Returns
-------
prob : ndarray
Curvature interest point probability
"""
p0 = sigmoid(-steepness * (eigvals[..., 0] + offset))
p1 = sigmoid(-steepness * (eigvals[..., 1] + offset))
p2 = sigmoid(-steepness * (eigvals[..., 2] + offset))
return p0 * p1 * p2
def intensity_probability(image, I0=None, stdev=None):
"""Calculate the foreground probability using exponential distribution
Parameters
----------
image : ndarray
Input image
I0 : float, optional
Normalization value. Default, mean of image
stdev : float, optional
Width of the transition to foreground. Default, stdev of normalized image
Returns
-------
prob : ndarray
Foreground probability map
"""
if I0 is None:
I0 = image.mean()
normalized = image / I0
if stdev is None:
stdev = normalized.std()
print(stdev)
return 1 - np.exp(-normalized ** 2 / (2 * stdev ** 2))
def nucleus_probability(image, sigma, steepness=500, offset=0.0005, I0=None, stdev=None):
"""Calculate the nucleus probability map using logistic regression over
curvature eigenvalues
Parameters
----------
image : ndarray
3D image volume of nuclei staining
sigma : int or tuple
Amount to blur the image before processing
steepness : float
Slope of the logistic function. Larger gives sharper transition between nuclei and background
offset : float
Translation of the logistic function. Larger biases towards more negative curvatures
Returns
-------
prob : ndarray
Nuclei probability map
"""
g = smooth(image, sigma)
eigvals = calculate_eigvals(g)
p_curvature = curvature_probability(eigvals, steepness, offset)
p_intensity = intensity_probability(g, I0, stdev)
return p_curvature * p_intensity
def nuclei_centers_ncp(image, sigma, **plm_kwargs):
ncp = negative_curvature_product(image, sigma)
return peak_local_max(-ncp, **plm_kwargs)
def nuclei_centers_probability(prob, threshold, h):
prob = filtering.remove_background(prob, threshold)
hmax = morphology.reconstruction(prob - h, prob, 'dilation')
extmax = prob - hmax
seeds = (extmax > 0)
labels = ndi.label(seeds, morphology.cube(width=3))[0]
return np.round(find_centroids(labels)).astype(np.int)
def nuclei_centers_probability2(prob, threshold, min_dist):
prob = filtering.remove_background(prob, threshold)
return peak_local_max(prob, min_distance=min_dist, threshold_abs=threshold)
def _detect_nuclei_chunk(input_tuple, overlap, sigma, min_intensity, steepness, offset, I0=None, stdev=None, prob_thresh=0.5, min_dist=1, prob_output=None):
arr, start_coord, chunks = input_tuple
ghosted_chunk, start_ghosted, _ = utils.extract_ghosted_chunk(arr, start_coord, chunks, overlap)
if ghosted_chunk.max() < min_intensity:
return None
prob = nucleus_probability(ghosted_chunk, sigma, steepness, offset, I0, stdev)
if prob_output is not None:
start_local = start_coord - start_ghosted
stop_local = np.minimum(start_local + np.asarray(chunks),
np.asarray(ghosted_chunk.shape))
prob_valid = utils.extract_box(prob, start_local, stop_local)
stop_coord = start_coord + np.asarray(prob_valid.shape)
utils.insert_box(prob_output, start_coord, stop_coord, prob_valid)
centers_local = nuclei_centers_probability2(prob, prob_thresh, min_dist)
if centers_local.size == 0:
return None
# Filter out any centers detected in ghosted area
centers_interior = utils.filter_ghosted_points(start_ghosted, start_coord, centers_local, chunks, overlap)
# change to global coordinates
centers = centers_interior + start_ghosted
return centers
def detect_nuclei_parallel(z_arr, sigma, min_intensity, steepness, offset, I0, stdev, prob_thresh, min_dist, chunks, overlap, nb_workers=None, prob_output=None):
f = partial(_detect_nuclei_chunk,
overlap=overlap,
sigma=sigma,
min_intensity=min_intensity,
steepness=steepness,
offset=offset,
I0=I0,
stdev=stdev,
prob_thresh=prob_thresh,
min_dist=min_dist,
prob_output=prob_output)
results = utils.pmap_chunks(f, z_arr, chunks, nb_workers, use_imap=True)
return results
def sample_intensity_cube(center, image, radius):
start = [max(0, int(c - radius)) for c in center]
stop = [min(int(c + radius + 1), d - 1) for c, d in zip(center, image.shape)]
bbox = utils.extract_box(image, start, stop)
return bbox.flatten()
def nuclei_centered_intensities(image, centers, radius, mode='cube', nb_workers=None):
if nb_workers is None:
nb_workers = multiprocessing.cpu_count()
if mode == 'cube':
f = partial(sample_intensity_cube, image=image, radius=radius)
else:
raise ValueError('Only cube sampling is currently supported')
with multiprocessing.Pool(nb_workers) as pool:
intensities = list(tqdm(pool.imap(f, centers), total=centers.shape[0]))
return intensities
def calculate_mfi(input):
"""Calculate the Mean Fluorescence Intensity for input list of nucleus-centered samples
Parameters
----------
input : list
List of ndarrays containing image intensities near nuclei
Returns
-------
output : ndarray
1D array of MFIs for each nucleus
"""
return np.asarray([x.mean() for x in input])
def calculate_stdev(input):
"""Calculate the standard deviation for input list of nucleus-centered samples
Parameters
----------
input : list
List of ndarrays containing image intensities near nuclei
Returns
-------
output : ndarray
1D array of standard deviations for each nucleus
"""
return np.asarray([x.std() for x in input])
def threshold_mfi(mfi, threshold):
positive_idx = np.where(mfi > threshold)[0]
labels = np.zeros(mfi.shape, dtype=np.int)
labels[positive_idx] = 1
return labels
def query_neighbors(pts, n_neighbors, query_pts=None):
if query_pts is None:
query_pts = pts
nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm='kd_tree', n_jobs=-1).fit(pts)
distances, indices = nbrs.kneighbors(query_pts)
return distances, indices
def query_radius(pts, radius, query_pts=None):
if query_pts is None:
query_pts = pts
nbrs = NearestNeighbors(radius=radius, algorithm='kd_tree', n_jobs=-1).fit(pts)
distances, indices = nbrs.radius_neighbors(query_pts)
return distances, indices
def local_densities(distances, indices, sox2_labels, tbr1_labels, radius=None):
features = []
for dist, idx in zip(distances, indices):
sox2_flags = sox2_labels[idx]
tbr1_flags = tbr1_labels[idx]
nb_cells = len(idx)
if nb_cells == 0:
cell_density = 0
sox2_density = 0
tbr1_density = 0
else:
nb_sox2 = sox2_flags.sum()
nb_tbr1 = tbr1_flags.sum()
if radius is None:
radius = dist.mean()
cell_density = nb_cells / (4 / 3 * np.pi * radius ** 3)
sox2_density = nb_sox2 / (4 / 3 * np.pi * radius ** 3)
tbr1_density = nb_tbr1 / (4 / 3 * np.pi * radius ** 3)
# if np.any(sox2_flags == 1):
# sox2_distances = dist[sox2_flags]
# sox2_radius = sox2_distances.max()
# sox2_density = nb_sox2 / (4 / 3 * np.pi * radius ** 3)
# else:
# sox2_density = 0
#
# if np.any(tbr1_flags == 1):
# if len(tbr1_flags) > 1:
# tbr1_distances = dist[tbr1_flags]
# tbr1_radius = tbr1_distances.max()
# tbr1_density = nb_tbr1 / (4 / 3 * np.pi * radius ** 3)
# else:
# tbr1_density = 0
f = np.array([sox2_density, tbr1_density])
features.append(f)
return np.asarray(features)
import matplotlib.pyplot as plt
def cluster_dendrogram(features):
z = linkage(features, method='ward')
plt.figure()
dendrogram(z)
plt.show()
def connectivity(pts, n_neighbors=2, include_self=False):
return kneighbors_graph(pts, n_neighbors=n_neighbors, include_self=include_self)
def cluster(features, n_clusters=2, connectivity=None):
if connectivity is None:
region_labels = AgglomerativeClustering(n_clusters=n_clusters).fit_predict(features)
else:
region_labels = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity).fit_predict(features)
return region_labels
def kmeans(features, n_clusters, random_state=0, batch_size=100, max_iter=10):
return MiniBatchKMeans(n_clusters=n_clusters,
random_state=random_state,
batch_size=batch_size,
max_iter=max_iter).fit(features)
def voronoi(pts):
return Voronoi(pts)
def voronoi_plot(vor, labels=None):
fig = voronoi_plot_2d(vor, show_points=True, show_vertices=False, s=1)
if labels is not None:
norm = mcolors.Normalize(vmin=0, vmax=1, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap=cm.Blues_r)
for r in range(len(vor.point_region)):
region = vor.regions[vor.point_region[r]]
if not -1 in region:
polygon = [vor.vertices[i] for i in region]
plt.fill(*zip(*polygon), color=mapper.to_rgba(labels[r]))
plt.show()
rasterized = None
def _rasterize_chunk(args):
start, shape, chunks, pts, labels = args
global rasterized
stop = np.minimum(shape, start + np.asarray(chunks))
grid_z, grid_y, grid_x = np.mgrid[start[0]:stop[0], start[1]:stop[1], start[2]:stop[2]]
data = griddata(pts, labels, (grid_z, grid_y, grid_x), method='nearest').astype(np.uint8)
rasterized = utils.insert_box(rasterized, start, stop, data)
def rasterize_regions(pts, labels, shape, chunks=None, nb_workers=None):
global rasterized
if nb_workers is None:
nb_workers = multiprocessing.cpu_count()
if chunks is None:
grid_z, grid_y, grid_x = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]]
rasterized = griddata(pts, labels, (grid_z, grid_y, grid_x), method='nearest').astype(np.uint8)
else:
chunk_coords = utils.chunk_coordinates(shape, chunks)
args_list = []
for start in tqdm(chunk_coords, total=len(chunk_coords)):
args_list.append((start, shape, chunks, pts, labels))
rasterized = utils.SharedMemory(shape=shape, dtype=np.uint8)
with multiprocessing.Pool(processes=nb_workers) as pool:
list(tqdm(pool.imap(_rasterize_chunk, args_list), total=len(args_list)))
return rasterized
| [
"jswaney@mit.edu"
] | jswaney@mit.edu |
35d4289b0d5b7197676570e63cb452d1d2bfd5cb | be5b91588f198a665160a574e2eba2dd0be84783 | /database/write_nlu.py | 5a4e3c8356059d789dcec0516b42e509f4a727a7 | [] | no_license | swqsd218219/rasa_uncertain_slot | f60f9162cc629552f2deef0fb6cd6ea8cb93ae42 | ec7a0912b9058e3b19acce6ae05b8e871d720141 | refs/heads/main | 2023-03-08T18:13:26.853865 | 2021-03-01T01:23:20 | 2021-03-01T01:23:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,783 | py |
'''
定义模板:
query attribute:
1. what is the cpu?
- what kind of entity do you want to ask?
- server
- please tell me about the ip of the entity
- 1.2.3.4
- 4 cores
2. what is the cpu of the 1.1.1.1?
- please tell me about the entity of the ip
- server
- 4 cores
3. what is the cpu of the server 1.2.3.1
- 5 cores
query relation
1. list all the server host in ?
- what kind of entity do you ask?(datacenter, cluster)
- cluster
- please tell me about the ip of entity
- 1.1.1.1
- dataframe of servers
2. list all the server host in datacenter?
- please tell me about the ip of entity
- 1.1.1.1
- dataframe of servers
3. list all the server host in datacenter 1.1.1.1
- dataframe of servers
'''
with open('cluster.csv','r',encoding='utf-8') as f1:
cluster = f1.readlines()
with open('datacenter.csv','r',encoding='utf-8') as f2:
datacenter = f2.readlines()
with open('server.csv','r',encoding='utf-8') as f3:
server = f3.readlines()
entity2attribute = {}
entity2ip = {}
entity2ip['cluster'] = []
entity2ip['datacenter'] = []
entity2ip['server'] = []
for index,line in enumerate(cluster):
if index == 0:
line = line.strip()
line = line.split(',')
ip = line[0]
name = line[1]
business = line[2]
city = line[3]
datacenter_ip = line[4]
entity2attribute['cluster'] = [name,business,city,datacenter_ip]
else:
line = line.strip()
line = line.split(',')
# print(line)
ip = line[0]
entity2ip['cluster'].append(ip)
for index,line in enumerate(datacenter):
if index == 0:
line = line.strip()
line = line.split(',')
ip = line[0]
name = line[1]
longitude = line[2]
latitude = line[3]
region = line[4]
cpu = line[5]
entity2attribute['datacenter'] = [name, longitude, latitude, region,cpu]
else:
line = line.strip()
line = line.split(',')
ip = line[0]
entity2ip['datacenter'].append(ip)
for index,line in enumerate(server):
if index == 0:
line = line.strip()
line = line.split(',')
ip = line[0]
name = line[1]
cpu = line[2]
memory = line[3]
disk = line[4]
server_ip = line[5]
datacenter_ip = line[6]
entity2attribute['server'] = [name, cpu, memory, disk,server_ip,datacenter_ip]
else:
line = line.strip()
line = line.split(',')
ip = line[0]
entity2ip['server'].append(ip)
relation2entity = {
'host in':{'server':['cluster','datacenter'],'cluster':['datacenter']},
'configuration by':{'datacenter':['cluster','server'],'cluster':['server']}
}
def write_query_attribute(f):
f.write('## intent: query_attribute' + '\n')
for entity,value in entity2attribute.items():
ips = entity2ip[entity]
for attribute in value:
for ip in ips:
temp1 = '- what is the ['+attribute+'](attribute) ?'
temp2 = '- what is the ['+attribute+'](attribute) of the ['+ip+'](ip) ?'
temp3 = '- what is the ['+attribute+'](attribute) of the [' +entity+'](entity) ['+ip+'](ip) ?'
f.write(temp1 + '\n')
f.write(temp2 + '\n')
f.write(temp3 + '\n')
def write_query_ralation(f):
for relation,entities in relation2entity.items():
relation_ = relation.replace(' ','_')
f.write('## intent: query_'+relation_ + '\n')
for s_entity,o_entities in entities.items():
for o_entity in o_entities:
ips = entity2ip[o_entity]
for ip in ips:
temp1 = '- list all the ['+s_entity+'](s_entity) '+relation + ' ?'
temp2 = '- list all the ['+s_entity+'](s_entity) '+relation+' ['+o_entity+'](o_entity) ?'
temp3 = '- list all the ['+s_entity+'](s_entity) '+relation+' ['+o_entity+'](o_entity) ['+ip+'](ip) ?'
f.write(temp1 + '\n')
f.write(temp2 + '\n')
f.write(temp3 + '\n')
def write_lookup(f):
f.write('## lookup:entity' + '\n')
f.write(' data/lookup/entity.txt' + '\n')
f.write('## lookup:attribute' + '\n')
f.write(' data/lookup/attribute.txt' + '\n')
f.write('## lookup:s_entity' + '\n')
f.write(' data/lookup/s_entity.txt' + '\n')
f.write('## lookup:o_entity' + '\n')
f.write(' data/lookup/o_entity.txt' + '\n')
f.write('## lookup:ip' + '\n')
f.write(' data/lookup/ip.txt' + '\n')
if __name__ == '__main__':
f = open('./nlu.md','a',encoding='utf-8')
write_query_attribute(f)
write_query_ralation(f)
write_lookup(f)
| [
"zhangmw_play@163.com"
] | zhangmw_play@163.com |
7376a63bba0d810d9b3569b2ade1ee4fa29cff6b | ce92f733e9a79a0aa5359afae370c69fe013b3bb | /univer_db/models/models.py | fe7e54dadf16503a44112bfde687423e0a29d2c7 | [] | no_license | yessenovuniversity/python_univer | afb15fac1328f6e3b9a0d2f7dd1fce69a5be4fd2 | f4000f541bd971c2c1d0c5fab2c0ef22fdd2298b | refs/heads/master | 2023-08-21T23:26:58.545578 | 2021-09-29T13:41:37 | 2021-09-29T13:41:37 | 294,068,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,986 | py | from sqlalchemy import Column, Integer, String, Float, Boolean, ForeignKey, Date, DateTime
from sqlalchemy.orm import relationship
from univer_db.orm import get_base
Base = get_base()
class Speciality(Base):
"""
Модель "Специальность"
"""
__tablename__ = 'univer_speciality'
id = Column('speciality_id', Integer, primary_key=True)
status = Column(Integer)
faculty_id = Column(ForeignKey('univer_faculty.faculty_id'))
faculty = relationship('Faculty')
stage_id = Column(ForeignKey('univer_stage.stage_id'))
stage = relationship('Stage')
name_kz = Column('speciality_name_kz', String(200))
name_ru = Column('speciality_name_ru', String(200))
name_en = Column('speciality_name_en', String(200))
code = Column('speciality_okpd', String(10))
description_kz = Column('speciality_discription_kz', String)
description_ru = Column('speciality_discription_ru', String)
description_en = Column('speciality_discription_en', String)
result_ru = Column(String)
result_kz = Column(String)
result_en = Column(String)
type = Column(Integer)
def __repr__(self):
return "<Speciality {}>".format(self)
def __str__(self):
return "{} - {}".format(self.code, self.name_ru)
class SpecialityChair(Base):
"""
Модель отношений "Специальность-Кафедра"
"""
__tablename__ = 'univer_speciality_chair'
speciality_id = Column(ForeignKey('univer_speciality.speciality_id'), primary_key=True)
speciality = relationship('Speciality')
chair_id = Column(ForeignKey('univer_chair.chair_id'), primary_key=True)
chair = relationship('Chair')
def __repr__(self):
return '<SpecialityChair {}>'.format(self)
def __str__(self):
return '{} - {}'.format(self.chair, self.speciality)
class Subject(Base):
"""
Модель "Дисциплина"
"""
__tablename__ = 'univer_subject'
id = Column('subject_id', Integer, primary_key=True)
name_kz = Column('subject_name_kz', String(500))
name_ru = Column('subject_name_ru', String(500))
name_en = Column('subject_name_en', String(500))
description_kz = Column('subject_description_kz', String)
description_ru = Column('subject_description_ru', String)
description_en = Column('subject_description_en', String)
status = Column(Integer)
def __repr__(self):
return '<Subject {}>'.format(self.name_ru)
def __str__(self):
return self.name_ru
class EducType(Base):
__tablename__ = 'univer_educ_type'
id = Column('educ_type_id', Integer, primary_key=True)
name_ru = Column('educ_type_name_ru', String(100))
def __repr__(self):
return '<EducType {}>'.format(self.name_ru)
def __str__(self):
return self.name_ru
class LangDivision(Base):
"""
Модель "Языковый отдел"
"""
__tablename__ = 'univer_lang_division'
id = Column('lang_division_id', Integer, primary_key=True)
name_ru = Column('lang_division_name_ru', String(100))
def __repr__(self):
return '<LangDivision {}>'.format(self.name_ru)
def __str__(self):
return self.name_ru
class DocumentIdentity(Base):
"""
Модель "Тип документа"
Статус: Выполнено
"""
__tablename__ = 'univer_document_identity'
id = Column('document_identity_type', Integer, primary_key=True)
name_ru = Column('document_name_ru', String(100))
name_kz = Column('document_name_kz', String(100))
name_en = Column('document_name_en', String(100))
def __repr__(self):
return '<DocumentIdentity {} (id={})'.format(self, self.id)
def __str__(self):
return self.name_ru
class Institution(Base):
"""
Модель "Учебное заведение"
Статус: Выполняется
"""
__tablename__ = 'univer_edu_institutions'
id = Column('edu_institution_id', Integer, primary_key=True)
name_kz = Column('edu_institution_name_kz', String)
name_ru = Column('edu_institution_name_ru', String)
name_en = Column('edu_institution_name_en', String)
def __repr__(self):
return '<Institution {} (id={})>'.format(self, self.id)
def __str__(self):
return self.name_ru
class GraduateDocType(Base):
"""
Тип документа об окончании учебного заведения перед поступлением в университет
Статус: Выполняется
"""
__tablename__ = 'univer_graduate_doctypes'
id = Column('graduate_doctype_id', Integer, primary_key=True)
name_ru = Column('graduate_doctype_name_ru', String)
name_kz = Column('graduate_doctype_name_kz', String)
name_en = Column('graduate_doctype_name_en', String)
def __repr__(self):
return '<GraduateDocType {} (id={})>'.format(self, self.id)
def __str__(self):
return self.name_ru
class GraduateInfo(Base):
"""
Данные об окончании учебного заведения перед поступлением в университет
Статус: Выполняется
"""
__tablename__ = 'univer_graduate_info'
id = Column('graduate_info_id', Integer, primary_key=True)
date = Column('graduate_info_date', DateTime)
institution_name = Column('graduate_info_institution_name', String)
series = Column('graduate_info_series', String)
number = Column('graduate_info_number', String)
graduate_doctype_id = Column(ForeignKey('univer_graduate_doctypes.graduate_doctype_id'))
graduate_doctype = relationship('GraduateDocType')
institution_id = Column('edu_institution_id', ForeignKey('univer_edu_institutions.edu_institution_id'))
institution = relationship('Institution')
def __repr__(self):
return '<Graduate {} (id={})>'.format(self, self.id)
def __str__(self):
return '{} {}'.format(self.series, self.number)
class Contract(Base):
"""
Модель "Договор студента"
Статус: Выполняется
"""
__tablename__ = 'univer_contract'
id = Column('contract_id', Integer, primary_key=True)
number = Column('contract_number', String)
date_received = Column('contract_date_recieved', DateTime)
status = Column(Integer)
student_id = Column('students_id', ForeignKey('univer_students.students_id'))
student = relationship('Student')
def __repr__(self):
return '<Contract {} (id={})>'.format(self, self.id)
def __str__(self):
'{} ({})'.format(self.student, self.number)
class AcademCalendar(Base):
"""
Модель "Академический календарь"
"""
__tablename__ = 'univer_academ_calendar_pos'
id = Column('acpos_id', Integer, primary_key=True)
educ_plan_id = Column(ForeignKey('univer_educ_plan.educ_plan_id'))
educ_plan = relationship('EducPlan')
acpos_semester = Column(Integer)
acpos_module = Column(Integer)
controll_id = Column('control_id', ForeignKey('univer_control.control_id'))
controll = relationship('Controll')
acpos_weeks = Column(Integer)
acpos_date_start = Column(DateTime)
acpos_date_end = Column(DateTime)
def __repr__(self):
return '<AcademCalendar {}>'.format(self)
def __str__(self):
return '{}'.format(self.id)
class EducPlan(Base):
"""
Модель "Учебный план"
"""
__tablename__ = 'univer_educ_plan'
id = Column('educ_plan_id', Integer, primary_key=True)
speciality_id = Column('speciality_id', ForeignKey('univer_speciality.speciality_id'))
speciality = relationship('Speciality')
education_form_id = Column(ForeignKey('univer_education_form.education_form_id'))
education_form = relationship('EducationForm')
edu_level_id = Column(ForeignKey('univer_edu_levels.edu_level_id'))
edu_level = relationship('EduLevel')
year = Column('educ_plan_adm_year', Integer)
# Статус
status = Column(Integer)
def __repr__(self):
return "<EducPlan {} {}>".format(self.speciality, self.year)
def __str__(self):
return "{} {}".format(self.speciality, self.year)
class EducPlanPos(Base):
"""
Модель "Позиции учебного плана"
Статус: Выполняется
"""
__tablename__ = 'univer_educ_plan_pos'
# Идентификатор
id = Column('educ_plan_pos_id', Integer, primary_key=True)
# Учебный план
educ_plan_id = Column('educ_plan_id', ForeignKey('univer_educ_plan.educ_plan_id'))
educ_plan = relationship('EducPlan')
# Код дисциплины
code = Column('rup_ru', String(50))
# Дисциплина
subject_id = Column('subject_id', ForeignKey('univer_subject.subject_id'))
subject = relationship('Subject')
# Тип контроля
controll_type_id = Column(ForeignKey('univer_controll_type.controll_type_id'))
controll_type = relationship('ControllType')
# Семестр
semester = Column('educ_plan_pos_semestr', Integer)
# Статус
status = Column(Integer)
def __repr__(self):
return "<EducPlanPos {}: {} ({} семестр)>".format(self.educ_plan, self.subject, self.semester)
def __str__(self):
return "{}: {} ({} семестр)".format(self.educ_plan, self.subject, self.semester)
class Attendance(Base):
"""
Модель "Журнал посещаемости"
"""
__tablename__ = 'univer_attendance'
date = Column('att_date', Date, primary_key=True)
grade = Column('ball', Float)
was = Column(Boolean)
student_id = Column(ForeignKey('univer_students.students_id'), primary_key=True)
student = relationship('Student')
group_id = Column(ForeignKey('univer_group.group_id'), primary_key=True)
group = relationship('Group')
def __repr__(self):
return '<Attendance {}: {} балл ({})>'.format(self.student, self.grade, self.date)
def __str__(self):
return '{}: {} балл ({})'.format(self.student, self.grade, self.date)
class Group(Base):
"""
Модель "Группа"
Статус: Выполняется
"""
__tablename__ = 'univer_group'
id = Column('group_id', Integer, primary_key=True)
educ_plan_pos_id = Column('educ_plan_pos_id', ForeignKey('univer_educ_plan_pos.educ_plan_pos_id'))
educ_plan_pos = relationship('EducPlanPos', backref='groups')
teacher_id = Column('teacher_id', ForeignKey('univer_teacher.teacher_id'))
teacher = relationship('Teacher')
year = Column('group_year', Float)
semester = Column('group_semestr', Integer)
educ_type_id = Column('educ_type_id', ForeignKey('univer_educ_type.educ_type_id'))
educ_type = relationship('EducType')
lang_division_id = Column('lang_division_id', ForeignKey('univer_lang_division.lang_division_id'))
lang_division = relationship('LangDivision')
# Семестр повторного обучения
retake_semester = Column('group_retake_semestr', Integer)
def __repr__(self):
return '<Group {} (id={} educ_plan_pos_id={} teacher_id={})>'.format(self, self.id, self.educ_plan_pos_id, self.teacher_id)
def __str__(self):
return '{} ({} год)'.format(self.educ_plan_pos.educ_plan.speciality, self.educ_plan_pos.educ_plan.year)
class GroupStudent(Base):
"""
Модель "Студент в группе"
"""
__tablename__ = 'univer_group_student'
id = Column('group_student_id', Integer, primary_key=True)
group_id = Column(ForeignKey('univer_group.group_id'))
group = relationship('Group', backref='group_students')
student_id = Column(ForeignKey('univer_students.students_id'))
student = relationship('Student')
class Controll(Base):
"""
Модель "Контроль"
"""
__tablename__ = 'univer_control'
# Идентификатор
id = Column('control_id', Integer, primary_key=True)
# Наименование
name_ru = Column('control_name_ru', String(100))
def __repr__(self):
return '<Controll {} (id={})>'.format(self, self.id)
def __str__(self):
return self.name_ru
class ControllType(Base):
"""
Модель "Тип контроля"
"""
__tablename__ = 'univer_controll_type'
# Идентификатор
id = Column('controll_type_id', Integer, primary_key=True)
# Наименование
name_kz = Column('controll_type_name_kz', String(100))
name_ru = Column('controll_type_name_ru', String(100))
name_en = Column('controll_type_name_en', String(100))
def __repr__(self):
return '<ControllType {}>'.format(self)
def __str__(self):
return self.name_ru
class ControllTypeControllLink(Base):
"""
Модель "Связь между Controll и ControllType
Статус: Выполняется
"""
__tablename__ = 'univer_controll_type_control_link'
# Тип контроля
controll_type_id = Column(ForeignKey('univer_controll_type.controll_type_id'), primary_key=True)
controll_type = relationship('ControllType')
# Контроль
controll_id = Column('control_id', ForeignKey('univer_control.control_id'), primary_key=True)
controll = relationship('Controll')
# Тип ведомости
sheet_type_id = Column(ForeignKey('univer_sheet_type.sheet_type_id'), primary_key=True)
sheet_type = relationship('SheetType')
def __repr__(self):
return '<ControllTypeControllLink {} (controll_type_id={} controll_id={} sheet_type_id={}>'.format(self, self.controll_type_id, self.controll_id, self.sheet_type_id)
def __str__(self):
return '{}-{}'.format(self.controll_type, self.controll)
class MarkType(Base):
"""
Модель "Тип оценки"
"""
__tablename__ = 'univer_mark_type'
# Идентификатор
id = Column('mark_type_id', Integer, primary_key=True)
# Символ оценки
symbol = Column('mark_type_symbol', String(10))
# Минимальное значение
min_val = Column('mark_type_minval', Integer)
# Максимальное значение
max_val = Column('mark_type_maxval', Integer)
# GPA
gpa = Column('mark_type_gpa', Float)
def __repr__(self):
return '<MarkType {}>'.format(self)
def __str__(self):
return '{} ({})'.format(self.symbol, self.gpa)
class Progress(Base):
"""
Модель "Прогресс студента"
"""
__tablename__ = 'univer_progress'
# Идентификатор
id = Column('progress_id', Integer, primary_key=True)
# Академический год
academ_year = Column(Integer)
# Академический семестр
academ_semester = Column('semestr', Integer)
# Студент
student_id = Column(ForeignKey('univer_students.students_id'))
student = relationship('Student')
# Дисциплина
subject_id = Column(ForeignKey('univer_subject.subject_id'))
subject = relationship('Subject')
# Наименование дисциплины
subject_name_ru = Column(String(500))
subject_name_kz = Column(String(500))
subject_name_en = Column(String(500))
# Тип оценки
mark_type_id = Column(ForeignKey('univer_mark_type.mark_type_id'))
mark_type = relationship('MarkType')
# Кредит
credit = Column('progress_credit', Integer)
# Оценки
result_rk1 = Column('progress_result_rk1', Integer)
result_rk2 = Column('progress_result_rk2', Integer)
result = Column('progress_result', Integer)
# Семестры
semester = Column('n_seme', Integer)
# Тип контроля
controll_type_id = Column(ForeignKey('univer_controll_type.controll_type_id'))
controll_type = relationship('ControllType')
# Статус
status = Column(Integer)
def __repr__(self):
return '<Progress {}>'.format(self)
def __str__(self):
return '{} - {}'.format(self.student, self.subject)
class Country(Base):
__tablename__ = 'univer_country'
id = Column('country_id', Integer, primary_key=True)
status = Column(Integer)
name_ru = Column('country_name_ru', String(500))
name_kz = Column('country_name_kz', String(500))
name_en = Column('country_name_en', String(500))
code = Column('country_code', Integer)
current = Column('country_current', Integer)
letter_code = Column('country_letter_code', String(10))
alfa3_code = Column(String(5))
def __repr__(self):
return '<Country {}>'.format(self)
def __str__(self):
return self.name_ru | [
"nauryzbek.aitbayev@yu.edu.kz"
] | nauryzbek.aitbayev@yu.edu.kz |
6dbf65dea55f3575b84c21c3e7a60a933815fa0e | 87b4c1e282782ddfa22df95d8f494322bf2f2fb9 | /Flower Classification with Image Histogram/dataset.py | 1b47f756b06a5dd1afd718f35f291a0afe4c1872 | [] | no_license | janFrancoo/Python-Projects | 34e9515ae167bdca2f8e601c3ccc4bd4a6cb48cb | 875ed126e4adb7cd4c2884660f24d6515086995c | refs/heads/master | 2021-06-26T17:40:47.740967 | 2021-01-31T15:27:25 | 2021-01-31T15:27:25 | 199,189,125 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | import os
import cv2
labels = ["Daffodil", "Snowdrop", "Lilly Valley", "Bluebell", "Crocus", "Iris", "Tigerlily", "Tulip", "Fritillary",
"Sunflower", "Daisy", "Colts' Foot", "Dandelion", "Cowslip", "Buttercup", "Windflower", "Pansy"]
def get_flowers(flowers_path, masks_path):
count = -1
masks = []
flowers = []
classes = []
for i, file_name in enumerate(os.listdir(flowers_path)):
if i % 80 == 0:
count += 1
raw_file_name = file_name.split(".")[0]
file_name_for_mask = raw_file_name + ".png"
if os.path.exists(os.path.join(masks_path, file_name_for_mask)):
mask = cv2.imread(os.path.join(masks_path, file_name_for_mask))
masks.append(cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY))
flowers.append(cv2.imread(os.path.join(flowers_path, file_name)))
classes.append(labels[count])
return flowers, masks, classes
| [
"noreply@github.com"
] | noreply@github.com |
7f95dc0c757ee5c602eda0c84f0a8b39f5e022ba | bc181d3e95743e498a1ec0cfbdac369a01d95218 | /apps/accounts/migrations/0001_initial.py | 7daca24efb48c0d09b39887195357f9e09d5df77 | [] | no_license | roman-oxenuk/welltory_test | 09bbbd8502735adb3662318affa3df10ef47f5af | 853dff24bbf38d5c2d6dce75dd5713ab6347a00d | refs/heads/master | 2021-01-21T23:23:55.809175 | 2017-06-23T18:50:54 | 2017-06-23T18:50:54 | 95,241,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,255 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(unique=True, max_length=255, verbose_name='email')),
('first_name', models.CharField(max_length=255, null=True, verbose_name='\u0438\u043c\u044f', blank=True)),
('last_name', models.CharField(max_length=255, null=True, verbose_name='\u0444\u0430\u043c\u0438\u043b\u0438\u044f', blank=True)),
('is_active', models.BooleanField(default=True, verbose_name='\u0430\u043a\u0442\u0438\u0432\u043d\u044b\u0439')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='\u0434\u0430\u0442\u0430 \u0440\u0435\u0433\u0438\u0441\u0442\u0440\u0430\u0446\u0438\u0438')),
('is_staff', models.BooleanField(default=False, verbose_name='is staff')),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
),
]
| [
"roman.oxenuk@gmail.com"
] | roman.oxenuk@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.