blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f5b45500bb75688f6f3ca574206f37660a15e559
|
e9ef558d04f39f0e82ad63e955dd8772e63c99c3
|
/chat_project/chat_project/settings.py
|
a728c23923310ab703ec61e0f1b5ef83ec5c8de4
|
[
"MIT"
] |
permissive
|
nahidsaikat/Chat
|
5634ff91eef394ec2b6288d1adff17f0eb867b15
|
7d314195b03d355844767f7c89cca34f0fad95c9
|
refs/heads/master
| 2022-12-14T03:44:01.105994
| 2021-06-30T15:19:21
| 2021-06-30T15:19:21
| 177,983,242
| 0
| 0
|
MIT
| 2022-12-08T07:44:46
| 2019-03-27T11:49:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,434
|
py
|
"""
Django settings for chat_project project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ti1$n@9k5ub@9r76iw*f(&m*8#wm#-oiiid2jzi)_94bjq_1y&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'chat_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chat_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chat_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
}
|
[
"nahidsaikatft40@gmail.com"
] |
nahidsaikatft40@gmail.com
|
c3a0d221d0881ea417f3e5b03fd1a8fe558c52c1
|
632d58b9f7ae470d9ec2b0e88af0aa8054dfa40e
|
/src/ryzom_django/management/commands/ryzom_bundle.py
|
48c255b344ea621534b03d56660dbf76563dd28f
|
[] |
no_license
|
yourlabs/ryzom
|
8d06bf829ee9d31d33fa9353fdf187241c82b6ef
|
425859e2de30c3b939756a23a064fb1affe04b02
|
refs/heads/master
| 2023-05-13T10:27:09.766272
| 2023-05-02T14:49:25
| 2023-05-02T14:49:25
| 192,992,635
| 5
| 1
| null | 2022-10-11T20:19:52
| 2019-06-20T22:03:37
|
Python
|
UTF-8
|
Python
| false
| false
| 873
|
py
|
import os
from django.core.management.base import BaseCommand, CommandError
from ryzom_django import bundle
class Command(BaseCommand):
help = 'Write JS & CSS bundles to ryzom_django/static/bundle.*'
def handle(self, *args, **options):
static_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
'..',
'static',
)
)
if not os.path.exists(static_path):
os.makedirs(static_path)
with open(f'{static_path}/bundle.js', 'w+') as f:
f.write(bundle.js())
with open(f'{static_path}/bundle.css', 'w+') as f:
f.write(bundle.css())
self.stdout.write(self.style.SUCCESS(f'Successfully wrote {static_path}/bundle.*'))
self.stdout.write('Do not forget to collectstatic!')
|
[
"jpic@yourlabs.org"
] |
jpic@yourlabs.org
|
5da615bcc36f2e359d5a12aac4d401a12f9cf241
|
0a3d71624f104e2a4ca3341988b15ac4e5e40b1b
|
/Coding_Challenge/Arrays/pickFromBothSides.py
|
de0dabf44c28a9fb3b05b841c540c5a0eb6b4d7f
|
[] |
no_license
|
LD1016/Data-Structures-and-Algorithms
|
b6f72735e3280fd0ed457bde665614a1da78756c
|
9087d926f750bdea448579a56e82496c280d9242
|
refs/heads/main
| 2023-03-22T03:12:18.654731
| 2021-03-18T07:10:30
| 2021-03-18T07:10:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
"""
Given an integer array A of size N.
You can pick B elements from either left or right end of the array A to get maximum sum.
Find and return this maximum possible sum.
NOTE: Suppose B = 4 and array A contains 10 elements then:
You can pick first four elements or can pick last four elements or can pick 1 from front and 3 from back etc . you need to return the maximum possible sum of elements you can pick.
Problem Constraints
1 <= N <= 105
1 <= B <= N
-103 <= A[i] <= 103
Input Format
First argument is an integer array A.
Second argument is an integer B.
Output Format
Return an integer denoting the maximum possible sum of elements you picked.
Example Input
Input 1:
A = [5, -2, 3 , 1, 2]
B = 3
Input 2:
A = [1, 2]
B = 1
Example Output
Output 1:
8
Output 2:
2
Example Explanation
Explanation 1:
Pick element 5 from front and element (1, 2) from back so we get 5 + 1 + 2 = 8
Explanation 2:
Pick element 2 from end as this is the maximum we can get
"""
class Solution:
def solve(self, A, B):
maxVal = 0
for i in range(B):
maxVal += A[i]
cur = maxVal
i = 0
while i < B:
cur = cur - A[B-1-i] + A[len(A)-1-i]
maxVal = max(cur, maxVal)
i += 1
return maxVal
test = Solution()
print(test.solve([5, -2, 3, 1, 2], 3))
|
[
"lancedang@Lances-MacBook-Pro.local"
] |
lancedang@Lances-MacBook-Pro.local
|
0162203b7365b76dc6cd9b9925f7f1a04b7a2f35
|
3ddfaf06a0ceba7322f64710a6d145fd27482e13
|
/BackEnd/venv/Lib/site-packages/psycopg/connection.py
|
5999f6ac06e6c22fccfa52a3a85b6f73bafcaf68
|
[] |
no_license
|
NurbakZh/GuideLion
|
7336a7ce9343b62e97db0607004a45642cb3192e
|
ee1145d372735579790aef87986f7c8d46cc214f
|
refs/heads/master
| 2023-08-13T13:34:22.111612
| 2021-10-10T19:26:53
| 2021-10-10T19:26:53
| 399,252,272
| 18
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,986
|
py
|
"""
psycopg connection objects
"""
# Copyright (C) 2020-2021 The Psycopg Team
import logging
import warnings
import threading
from types import TracebackType
from typing import Any, Callable, cast, Dict, Generic, Iterator, List
from typing import NamedTuple, Optional, Type, TypeVar, Union
from typing import overload, TYPE_CHECKING
from weakref import ref, ReferenceType
from functools import partial
from contextlib import contextmanager
from . import pq
from . import adapt
from . import errors as e
from . import waiting
from . import postgres
from . import encodings
from .pq import ConnStatus, ExecStatus, TransactionStatus, Format
from .abc import ConnectionType, Params, PQGen, PQGenConn, Query, RV
from .sql import Composable
from .rows import Row, RowFactory, tuple_row, TupleRow
from ._enums import IsolationLevel
from .cursor import Cursor
from ._cmodule import _psycopg
from .conninfo import make_conninfo, conninfo_to_dict, ConnectionInfo
from .generators import notifies
from ._preparing import PrepareManager
from .transaction import Transaction
from .server_cursor import ServerCursor
if TYPE_CHECKING:
from .pq.abc import PGconn, PGresult
from psycopg_pool.base import BasePool
logger = logging.getLogger("psycopg")
connect: Callable[[str], PQGenConn["PGconn"]]
execute: Callable[["PGconn"], PQGen[List["PGresult"]]]
# Row Type variable for Cursor (when it needs to be distinguished from the
# connection's one)
CursorRow = TypeVar("CursorRow")
if _psycopg:
connect = _psycopg.connect
execute = _psycopg.execute
else:
from . import generators
connect = generators.connect
execute = generators.execute
class Notify(NamedTuple):
"""An asynchronous notification received from the database."""
channel: str
"""The name of the channel on which the notification was received."""
payload: str
"""The message attached to the notification."""
pid: int
"""The PID of the backend process which sent the notification."""
Notify.__module__ = "psycopg"
NoticeHandler = Callable[[e.Diagnostic], None]
NotifyHandler = Callable[[Notify], None]
class BaseConnection(Generic[Row]):
"""
Base class for different types of connections.
Share common functionalities such as access to the wrapped PGconn, but
allow different interfaces (sync/async).
"""
# DBAPI2 exposed exceptions
Warning = e.Warning
Error = e.Error
InterfaceError = e.InterfaceError
DatabaseError = e.DatabaseError
DataError = e.DataError
OperationalError = e.OperationalError
IntegrityError = e.IntegrityError
InternalError = e.InternalError
ProgrammingError = e.ProgrammingError
NotSupportedError = e.NotSupportedError
# Enums useful for the connection
ConnStatus = pq.ConnStatus
TransactionStatus = pq.TransactionStatus
def __init__(self, pgconn: "PGconn"):
self.pgconn = pgconn
self._autocommit = False
self._adapters = adapt.AdaptersMap(postgres.adapters)
self._notice_handlers: List[NoticeHandler] = []
self._notify_handlers: List[NotifyHandler] = []
# Stack of savepoint names managed by current transaction blocks.
# the first item is "" in case the outermost Transaction must manage
# only a begin/commit and not a savepoint.
self._savepoints: List[str] = []
self._closed = False # closed by an explicit close()
self._prepared: PrepareManager = PrepareManager()
wself = ref(self)
pgconn.notice_handler = partial(BaseConnection._notice_handler, wself)
pgconn.notify_handler = partial(BaseConnection._notify_handler, wself)
# Attribute is only set if the connection is from a pool so we can tell
# apart a connection in the pool too (when _pool = None)
self._pool: Optional["BasePool[Any]"]
# Time after which the connection should be closed
self._expire_at: float
self._isolation_level: Optional[IsolationLevel] = None
self._read_only: Optional[bool] = None
self._deferrable: Optional[bool] = None
self._begin_statement = b""
def __del__(self) -> None:
# If fails on connection we might not have this attribute yet
if not hasattr(self, "pgconn"):
return
# Connection correctly closed
if self.closed:
return
# Connection in a pool so terminating with the program is normal
if hasattr(self, "_pool"):
return
warnings.warn(
f"connection {self} was deleted while still open."
f" Please use 'with' or '.close()' to close the connection",
ResourceWarning,
)
def __repr__(self) -> str:
cls = f"{self.__class__.__module__}.{self.__class__.__qualname__}"
info = pq.misc.connection_summary(self.pgconn)
return f"<{cls} {info} at 0x{id(self):x}>"
@property
def closed(self) -> bool:
"""`!True` if the connection is closed."""
return self.pgconn.status == ConnStatus.BAD
@property
def broken(self) -> bool:
"""
`!True` if the connection was interrupted.
A broken connection is always `closed`, but wasn't closed in a clean
way, such as using `close()` or a ``with`` block.
"""
return self.pgconn.status == ConnStatus.BAD and not self._closed
@property
def autocommit(self) -> bool:
"""The autocommit state of the connection."""
return self._autocommit
@autocommit.setter
def autocommit(self, value: bool) -> None:
self._set_autocommit(value)
def _set_autocommit(self, value: bool) -> None:
# Base implementation, not thread safe.
# Subclasses must call it holding a lock
self._check_intrans("autocommit")
self._autocommit = bool(value)
@property
def isolation_level(self) -> Optional[IsolationLevel]:
"""
The isolation level of the new transactions started on the connection.
"""
return self._isolation_level
@isolation_level.setter
def isolation_level(self, value: Optional[IsolationLevel]) -> None:
self._set_isolation_level(value)
def _set_isolation_level(self, value: Optional[IsolationLevel]) -> None:
# Base implementation, not thread safe.
# Subclasses must call it holding a lock
self._check_intrans("isolation_level")
self._isolation_level = (
IsolationLevel(value) if value is not None else None
)
self._begin_statement = b""
@property
def read_only(self) -> Optional[bool]:
"""
The read-only state of the new transactions started on the connection.
"""
return self._read_only
@read_only.setter
def read_only(self, value: Optional[bool]) -> None:
self._set_read_only(value)
def _set_read_only(self, value: Optional[bool]) -> None:
# Base implementation, not thread safe.
# Subclasses must call it holding a lock
self._check_intrans("read_only")
self._read_only = bool(value)
self._begin_statement = b""
@property
def deferrable(self) -> Optional[bool]:
"""
The deferrable state of the new transactions started on the connection.
"""
return self._deferrable
@deferrable.setter
def deferrable(self, value: Optional[bool]) -> None:
self._set_deferrable(value)
def _set_deferrable(self, value: Optional[bool]) -> None:
# Base implementation, not thread safe.
# Subclasses must call it holding a lock
self._check_intrans("deferrable")
self._deferrable = bool(value)
self._begin_statement = b""
def _check_intrans(self, attribute: str) -> None:
# Raise an exception if we are in a transaction
status = self.pgconn.transaction_status
if status != TransactionStatus.IDLE:
if self._savepoints:
raise e.ProgrammingError(
f"can't change {attribute!r} now: "
"connection.transaction() context in progress"
)
else:
raise e.ProgrammingError(
f"can't change {attribute!r} now: "
"connection in transaction status "
f"{TransactionStatus(status).name}"
)
@property
def client_encoding(self) -> str:
"""The Python codec name of the connection's client encoding."""
pgenc = self.pgconn.parameter_status(b"client_encoding") or b"UTF8"
return encodings.pg2py(pgenc)
@client_encoding.setter
def client_encoding(self, name: str) -> None:
self._set_client_encoding(name)
def _set_client_encoding(self, name: str) -> None:
raise NotImplementedError
def _set_client_encoding_gen(self, name: str) -> PQGen[None]:
self.pgconn.send_query_params(
b"SELECT set_config('client_encoding', $1, false)",
[encodings.py2pg(name)],
)
(result,) = yield from execute(self.pgconn)
if result.status != ExecStatus.TUPLES_OK:
raise e.error_from_result(result, encoding=self.client_encoding)
@property
def info(self) -> ConnectionInfo:
"""A `ConnectionInfo` attribute to inspect connection properties."""
return ConnectionInfo(self.pgconn)
@property
def adapters(self) -> adapt.AdaptersMap:
return self._adapters
@property
def connection(self) -> "BaseConnection[Row]":
# implement the AdaptContext protocol
return self
def fileno(self) -> int:
"""Return the file descriptor of the connection.
This function allows to use the connection as file-like object in
functions waiting for readiness, such as the ones defined in the
`selectors` module.
"""
return self.pgconn.socket
def cancel(self) -> None:
"""Cancel the current operation on the connection."""
c = self.pgconn.get_cancel()
c.cancel()
def add_notice_handler(self, callback: NoticeHandler) -> None:
"""
Register a callable to be invoked when a notice message is received.
"""
self._notice_handlers.append(callback)
def remove_notice_handler(self, callback: NoticeHandler) -> None:
"""
Unregister a notice message callable previously registered.
"""
self._notice_handlers.remove(callback)
@staticmethod
def _notice_handler(
wself: "ReferenceType[BaseConnection[Row]]", res: "PGresult"
) -> None:
self = wself()
if not (self and self._notice_handler):
return
diag = e.Diagnostic(res, self.client_encoding)
for cb in self._notice_handlers:
try:
cb(diag)
except Exception as ex:
logger.exception(
"error processing notice callback '%s': %s", cb, ex
)
def add_notify_handler(self, callback: NotifyHandler) -> None:
"""
Register a callable to be invoked whenever a notification is received.
"""
self._notify_handlers.append(callback)
def remove_notify_handler(self, callback: NotifyHandler) -> None:
"""
Unregister a notification callable previously registered.
"""
self._notify_handlers.remove(callback)
@staticmethod
def _notify_handler(
wself: "ReferenceType[BaseConnection[Row]]", pgn: pq.PGnotify
) -> None:
self = wself()
if not (self and self._notify_handlers):
return
enc = self.client_encoding
n = Notify(pgn.relname.decode(enc), pgn.extra.decode(enc), pgn.be_pid)
for cb in self._notify_handlers:
cb(n)
@property
def prepare_threshold(self) -> Optional[int]:
"""
Number of times a query is executed before it is prepared.
- If it is set to 0, every query is prepared the first time is
executed.
- If it is set to `!None`, prepared statements are disabled on the
connection.
Default value: 5
"""
return self._prepared.prepare_threshold
@prepare_threshold.setter
def prepare_threshold(self, value: Optional[int]) -> None:
self._prepared.prepare_threshold = value
@property
def prepared_max(self) -> int:
"""
Maximum number of prepared statements on the connection.
Default value: 100
"""
return self._prepared.prepared_max
@prepared_max.setter
def prepared_max(self, value: int) -> None:
self._prepared.prepared_max = value
# Generators to perform high-level operations on the connection
#
# These operations are expressed in terms of non-blocking generators
# and the task of waiting when needed (when the generators yield) is left
# to the connections subclass, which might wait either in blocking mode
# or through asyncio.
#
# All these generators assume exclusive acces to the connection: subclasses
# should have a lock and hold it before calling and consuming them.
@classmethod
def _connect_gen(
cls: Type[ConnectionType],
conninfo: str = "",
*,
autocommit: bool = False,
) -> PQGenConn[ConnectionType]:
"""Generator to connect to the database and create a new instance."""
pgconn = yield from connect(conninfo)
conn = cls(pgconn)
conn._autocommit = bool(autocommit)
return conn
def _exec_command(
self, command: Query, result_format: Format = Format.TEXT
) -> PQGen["PGresult"]:
"""
Generator to send a command and receive the result to the backend.
Only used to implement internal commands such as "commit", with eventual
arguments bound client-side. The cursor can do more complex stuff.
"""
if self.pgconn.status != ConnStatus.OK:
if self.pgconn.status == ConnStatus.BAD:
raise e.OperationalError("the connection is closed")
raise e.InterfaceError(
f"cannot execute operations: the connection is"
f" in status {self.pgconn.status}"
)
if isinstance(command, str):
command = command.encode(self.client_encoding)
elif isinstance(command, Composable):
command = command.as_bytes(self)
if result_format == Format.TEXT:
self.pgconn.send_query(command)
else:
self.pgconn.send_query_params(
command, None, result_format=result_format
)
result = (yield from execute(self.pgconn))[-1]
if result.status not in (ExecStatus.COMMAND_OK, ExecStatus.TUPLES_OK):
if result.status == ExecStatus.FATAL_ERROR:
raise e.error_from_result(
result, encoding=self.client_encoding
)
else:
raise e.InterfaceError(
f"unexpected result {ExecStatus(result.status).name}"
f" from command {command.decode('utf8')!r}"
)
return result
def _start_query(self) -> PQGen[None]:
"""Generator to start a transaction if necessary."""
if self._autocommit:
return
if self.pgconn.transaction_status != TransactionStatus.IDLE:
return
yield from self._exec_command(self._get_tx_start_command())
def _get_tx_start_command(self) -> bytes:
if self._begin_statement:
return self._begin_statement
parts = [b"BEGIN"]
if self.isolation_level is not None:
val = IsolationLevel(self.isolation_level)
parts.append(b"ISOLATION LEVEL")
parts.append(val.name.replace("_", " ").encode("utf8"))
if self.read_only is not None:
parts.append(b"READ ONLY" if self.read_only else b"READ WRITE")
if self.deferrable is not None:
parts.append(
b"DEFERRABLE" if self.deferrable else b"NOT DEFERRABLE"
)
self._begin_statement = b" ".join(parts)
return self._begin_statement
def _commit_gen(self) -> PQGen[None]:
"""Generator implementing `Connection.commit()`."""
if self._savepoints:
raise e.ProgrammingError(
"Explicit commit() forbidden within a Transaction "
"context. (Transaction will be automatically committed "
"on successful exit from context.)"
)
if self.pgconn.transaction_status == TransactionStatus.IDLE:
return
yield from self._exec_command(b"COMMIT")
def _rollback_gen(self) -> PQGen[None]:
"""Generator implementing `Connection.rollback()`."""
if self._savepoints:
raise e.ProgrammingError(
"Explicit rollback() forbidden within a Transaction "
"context. (Either raise Rollback() or allow "
"an exception to propagate out of the context.)"
)
if self.pgconn.transaction_status == TransactionStatus.IDLE:
return
yield from self._exec_command(b"ROLLBACK")
class Connection(BaseConnection[Row]):
"""
Wrapper for a connection to the database.
"""
__module__ = "psycopg"
cursor_factory: Type[Cursor[Row]]
server_cursor_factory: Type[ServerCursor[Row]]
row_factory: RowFactory[Row]
def __init__(
self, pgconn: "PGconn", row_factory: Optional[RowFactory[Row]] = None
):
super().__init__(pgconn)
self.row_factory = row_factory or cast(RowFactory[Row], tuple_row)
self.lock = threading.Lock()
self.cursor_factory = Cursor
self.server_cursor_factory = ServerCursor
@overload
@classmethod
def connect(
cls,
conninfo: str = "",
*,
autocommit: bool = False,
row_factory: RowFactory[Row],
**kwargs: Union[None, int, str],
) -> "Connection[Row]":
...
@overload
@classmethod
def connect(
cls,
conninfo: str = "",
*,
autocommit: bool = False,
**kwargs: Union[None, int, str],
) -> "Connection[TupleRow]":
...
@classmethod # type: ignore[misc] # https://github.com/python/mypy/issues/11004
def connect(
cls,
conninfo: str = "",
*,
autocommit: bool = False,
row_factory: Optional[RowFactory[Row]] = None,
**kwargs: Any,
) -> "Connection[Any]":
"""
Connect to a database server and return a new `Connection` instance.
"""
params = cls._get_connection_params(conninfo, **kwargs)
conninfo = make_conninfo(**params)
rv = cls._wait_conn(
cls._connect_gen(conninfo, autocommit=autocommit),
timeout=params["connect_timeout"],
)
if row_factory:
rv.row_factory = row_factory
return rv
def __enter__(self) -> "Connection[Row]":
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if self.closed:
return
if exc_type:
# try to rollback, but if there are problems (connection in a bad
# state) just warn without clobbering the exception bubbling up.
try:
self.rollback()
except Exception as exc2:
warnings.warn(
f"error rolling back the transaction on {self}: {exc2}",
RuntimeWarning,
)
else:
self.commit()
# Close the connection only if it doesn't belong to a pool.
if not getattr(self, "_pool", None):
self.close()
@classmethod
def _get_connection_params(
cls, conninfo: str, **kwargs: Any
) -> Dict[str, Any]:
"""Manipulate connection parameters before connecting.
:param conninfo: Connection string as received by `~Connection.connect()`.
:param kwargs: Overriding connection arguments as received by `!connect()`.
:return: Connection arguments merged and eventually modified, in a
format similar to `~conninfo.conninfo_to_dict()`.
"""
params = conninfo_to_dict(conninfo, **kwargs)
# Make sure there is an usable connect_timeout
if "connect_timeout" in params:
params["connect_timeout"] = int(params["connect_timeout"])
else:
params["connect_timeout"] = None
# TODO: SRV lookup (RFC 2782)
# https://github.com/psycopg/psycopg/issues/70
return params
def close(self) -> None:
"""Close the database connection."""
if self.closed:
return
self._closed = True
self.pgconn.finish()
@overload
def cursor(self, *, binary: bool = False) -> Cursor[Row]:
...
@overload
def cursor(
self, *, binary: bool = False, row_factory: RowFactory[CursorRow]
) -> Cursor[CursorRow]:
...
@overload
def cursor(
self,
name: str,
*,
binary: bool = False,
scrollable: Optional[bool] = None,
withhold: bool = False,
) -> ServerCursor[Row]:
...
@overload
def cursor(
self,
name: str,
*,
binary: bool = False,
row_factory: RowFactory[CursorRow],
scrollable: Optional[bool] = None,
withhold: bool = False,
) -> ServerCursor[CursorRow]:
...
def cursor(
self,
name: str = "",
*,
binary: bool = False,
row_factory: Optional[RowFactory[Any]] = None,
scrollable: Optional[bool] = None,
withhold: bool = False,
) -> Union[Cursor[Any], ServerCursor[Any]]:
"""
Return a new cursor to send commands and queries to the connection.
"""
if not row_factory:
row_factory = self.row_factory
cur: Union[Cursor[Any], ServerCursor[Any]]
if name:
cur = self.server_cursor_factory(
self,
name=name,
row_factory=row_factory,
scrollable=scrollable,
withhold=withhold,
)
else:
cur = self.cursor_factory(self, row_factory=row_factory)
if binary:
cur.format = Format.BINARY
return cur
def execute(
self,
query: Query,
params: Optional[Params] = None,
*,
prepare: Optional[bool] = None,
binary: bool = False,
) -> Cursor[Row]:
"""Execute a query and return a cursor to read its results."""
cur = self.cursor()
if binary:
cur.format = Format.BINARY
try:
return cur.execute(query, params, prepare=prepare)
except e.Error as ex:
raise ex.with_traceback(None)
def commit(self) -> None:
"""Commit any pending transaction to the database."""
with self.lock:
self.wait(self._commit_gen())
def rollback(self) -> None:
"""Roll back to the start of any pending transaction."""
with self.lock:
self.wait(self._rollback_gen())
@contextmanager
def transaction(
self,
savepoint_name: Optional[str] = None,
force_rollback: bool = False,
) -> Iterator[Transaction]:
"""
Start a context block with a new transaction or nested transaction.
:param savepoint_name: Name of the savepoint used to manage a nested
transaction. If `!None`, one will be chosen automatically.
:param force_rollback: Roll back the transaction at the end of the
block even if there were no error (e.g. to try a no-op process).
:rtype: Transaction
"""
with Transaction(self, savepoint_name, force_rollback) as tx:
yield tx
def notifies(self) -> Iterator[Notify]:
"""
Yield `Notify` objects as soon as they are received from the database.
"""
while 1:
with self.lock:
ns = self.wait(notifies(self.pgconn))
enc = self.client_encoding
for pgn in ns:
n = Notify(
pgn.relname.decode(enc), pgn.extra.decode(enc), pgn.be_pid
)
yield n
def wait(self, gen: PQGen[RV], timeout: Optional[float] = 0.1) -> RV:
"""
Consume a generator operating on the connection.
The function must be used on generators that don't change connection
fd (i.e. not on connect and reset).
"""
return waiting.wait(gen, self.pgconn.socket, timeout=timeout)
@classmethod
def _wait_conn(cls, gen: PQGenConn[RV], timeout: Optional[int]) -> RV:
"""Consume a connection generator."""
return waiting.wait_conn(gen, timeout=timeout)
def _set_autocommit(self, value: bool) -> None:
with self.lock:
super()._set_autocommit(value)
def _set_isolation_level(self, value: Optional[IsolationLevel]) -> None:
with self.lock:
super()._set_isolation_level(value)
def _set_read_only(self, value: Optional[bool]) -> None:
with self.lock:
super()._set_read_only(value)
def _set_deferrable(self, value: Optional[bool]) -> None:
with self.lock:
super()._set_deferrable(value)
def _set_client_encoding(self, name: str) -> None:
with self.lock:
self.wait(self._set_client_encoding_gen(name))
|
[
"nurbak.zh@gmail.com"
] |
nurbak.zh@gmail.com
|
2c2924c19e6a3a94b0406b64ade076b0ac8980a2
|
0b7e6eff0a50eabc9baa0d8f520fcc3eaa90ab92
|
/prilese/users/migrations/0001_initial.py
|
76994a477641671fbb753695df9f17ae686ae1de
|
[] |
no_license
|
Nicolay-kr/Prilese
|
6771e9f5fef497d82a5e065699eb888674348979
|
8e302dffbe474e0d9367db59ec4b4f86a2a0d49c
|
refs/heads/master
| 2021-03-21T22:07:48.145884
| 2020-03-30T17:19:42
| 2020-03-30T17:19:42
| 247,330,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
# Generated by Django 3.0.3 on 2020-03-29 21:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"nicolay.krischenovich@gmail.com"
] |
nicolay.krischenovich@gmail.com
|
b86f82fa64afbad4e2ec93ee5e6511bee3b549c7
|
8819b3e55756bf6e53bab13714e1e25c887e93bb
|
/pysonic.py
|
3624f1037ac25598b61741de10d628f026b0c544
|
[] |
no_license
|
saibotd/couchpytato
|
c8f049ed9936d3eda537c45642cb972d57176cf4
|
f7146d9cacdb93426e65bbe9a30cb9ec50088320
|
refs/heads/master
| 2021-01-22T04:33:42.223304
| 2013-07-17T16:47:26
| 2013-07-17T16:47:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
import pySonic, time
class Music:
def __init__(self):
self.music = pySonic.Source()
self.playing = False
self.paused = False
def isplaying(self):
return self.music.IsPlaying()
def play(self, name=None):
ok = True
if self.paused:
self.music.Play()
self.playing = True
self.paused = False
elif name:
try:
self.music.Sound = pySonic.FileStream(name)
except:
ok = False
if ok:
self.music.Play()
self.playing = True
self.paused = False
else:
ok = False
return ok
def pause(self):
if self.isplaying():
self.music.Pause()
self.playing = False
self.paused = True
def time(self, what=0):
if self.isplaying():
secs = int(self.music.CurrentTime)
tim = time.localtime(secs)
min = str(tim[4])
sec = str(tim[5])
if len(min) == 1:
min = '0' + min
if len(sec) == 1:
sec = '0' + sec
return min + ':' + sec
else:
return None
def stop(self):
if self.isplaying():
self.music.Stop()
self.playing = 0
|
[
"tobi@saibotd.com"
] |
tobi@saibotd.com
|
74cd5856a7f5c8210f890e8236daaece08eea2ca
|
467f4d4487e046f0e3d4594c226f51fd704b036e
|
/notes/models.py
|
f913c31ca0105a22d9b608f66872d3efd7cfdbe3
|
[] |
no_license
|
ekm79/Intro-Django
|
297835d97413f3d71200bb23db8b2c759bd6ddd5
|
1b5d59432e56a24b93e2f5074e51b348dfe6e3d7
|
refs/heads/master
| 2020-03-28T21:09:56.851925
| 2018-09-24T01:45:06
| 2018-09-24T01:45:06
| 149,134,371
| 0
| 0
| null | 2018-09-17T14:03:25
| 2018-09-17T14:03:25
| null |
UTF-8
|
Python
| false
| false
| 1,796
|
py
|
from django.db import models
from uuid import uuid4
from django.contrib.auth.models import User
# Create your models here.
class Note(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
title = models.CharField(max_length=200)
content = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
class PersonalNote(Note):
user = models.ForeignKey(User, on_delete=models.CASCADE)
class Movie(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
title = models.CharField(max_length=200)
year = models.CharField(max_length=4)
on_dvd = models.BooleanField(default=True)
imdb_link = models.URLField(default='http://www.imdb.com')
created_at = models.DateTimeField(auto_now_add=True)
class FavoriteMovies(Movie):
user = models.ForeignKey(User, on_delete=models.CASCADE)
class Brewery(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
name = models.CharField(max_length=80)
address = models.TextField(blank=True)
website = models.URLField(default='http//untappd.com')
created_at = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Beer(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
name = models.CharField(max_length=80)
description = models.TextField(blank=True)
abv = models.FloatField(default=None)
ibu = models.IntegerField(default=None)
brewery = models.ForeignKey(Brewery, on_delete=models.CASCADE, default=None)
#def __init__(self):
#self.brewery = Brewery()
|
[
"ekmeredith@att.net"
] |
ekmeredith@att.net
|
969f365246c8ceea4c1c5d7d90d7b60f7bc7eb08
|
c9f795e53185be1648748d04591cfc4bfe0e9f62
|
/Dict_Ques18.py
|
5d8eb90557b9a532f7b163c0df9d058b8774148c
|
[] |
no_license
|
pragatirahul123/Dictionary
|
26c3f6d6b0f9dc8f0dd7e4f025cc66072b207469
|
b7241021225cdf0866a8c6b53998c3530150f022
|
refs/heads/main
| 2023-04-10T09:11:07.817534
| 2021-04-14T10:54:37
| 2021-04-14T10:54:37
| 346,099,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
Student = {}
Age = {}
Details = {}
Student['name'] = "bikki"
Age['student_age'] = 14
Details['Student'] = Student
Details['Age'] = Age
print (Details["Student"])
print(len(Details["Student"]))
|
[
"zeba18@navgurukul.org"
] |
zeba18@navgurukul.org
|
faeb48b3793e096e48d2d4c9726e6dd1be41a601
|
2f732dbfcc1932b5ef043c135fb1c2c7b2fe0d02
|
/BVP.py
|
be030c9f2aeea447cc2d801b43df815a3a4dc4d4
|
[] |
no_license
|
Jafaranafi/NEWTON_AIMSCMR1920
|
b6d849e36ec5a25786a16dc37707ed7bd16b7525
|
af26595f1ca9693fd7d391712576b6a6d0688764
|
refs/heads/master
| 2022-11-17T04:37:28.555514
| 2020-06-17T23:34:23
| 2020-06-17T23:34:23
| 256,739,570
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,280
|
py
|
import numpy as np
from scipy.sparse import spdiags
from scipy.sparse.linalg import spsolve, norm
def Bvp_DD(fcn, a, b, ua, ub, Nx):
"""
Solution of 1D boundary value problem
-u" = fcn(x) for a < x < b
with Dirichlet boundary conditions
u(a) = ua, u(b) = ub.
on a uniform mesh
"""
L = b-a # length of the domain
dx = float(L)/float(Nx) # length of the grid cell
x = np.linspace(a, b, Nx+1) # the grid
u = np.zeros(Nx+1) # the solution
# The sparse matrix
dia = 2*np.ones(Nx-1) / dx**2
low = -np.ones(Nx-1) / dx**2
upp = -np.ones(Nx-1) / dx**2
A = spdiags([low,dia,upp],[-1,0,1],Nx-1,Nx-1)
# print(A.todense())
# evaluate right hand side
rhs = np.zeros(Nx-1) # the right hand side
for j in range(Nx-1):
rhs[j] = fcn(x[j+1])
rhs[0] -= low[0]*ua
rhs[-1] -= upp[-1]*ub
# solve the linear system
u[0], u[-1] = ua, ub
u[1:-1] = spsolve(A, rhs)
print(u)
# return the grid and the numerical solution
return u, x
def Bvp_DN(fcn, a, b, ua, ub, Nx):
"""
Solution of 1D boundary value problem
-u" = fcn(x) for a < x < b
with mixed boundary conditions
u(a) = ua, u'(b) = 0.
on a uniform mesh
"""
L = b-a # length of the domain
dx = float(L)/float(Nx) # length of the grid cell
x = np.linspace(a, b, Nx+1) # the grid
u = np.zeros(Nx+1) # the solution
# The sparse matrix
dia = 2*np.ones(Nx) / dx**2
low = -np.ones(Nx) / dx**2
upp = -np.ones(Nx) / dx**2
low[-2] = -2 / dx**2 # correct for left BC
A = spdiags([low,dia,upp],[-1,0,1],Nx,Nx)
# print(A.todense())
# evaluate right hand side
rhs = np.zeros(Nx) # the right hand side
for j in range(Nx):
rhs[j] = fcn(x[j+1])
rhs[0] -= low[0]*ua # eliminate left BC
# solve the linear system
u[0] = ua # set left BC
u[1:] = spsolve(A, rhs)
# return the grid and the numerical solution
return u, x
def Bvp_ND(fcn, a, b, ua, ub, Nx):
"""
Solution of 1D boundary value problem
-u" = fcn(x) for a < x < b
with mixed boundary conditions
u'(a) = 0, u(b) = 0.
on a uniform mesh
"""
L = b-a # length of the domain
dx = float(L)/float(Nx) # length of the grid cell
x = np.linspace(a, b, Nx+1) # the grid
u = np.zeros(Nx+1) # the solution
# The sparse matrix
dia = 2*np.ones(Nx) / dx**2
low = -np.ones(Nx) / dx**2
upp = -np.ones(Nx) / dx**2
upp[1] = -2 / dx**2 # correct for right BC
A = spdiags([low,dia,upp],[-1,0,1],Nx,Nx)
# print(A.todense())
# evaluate right hand side
rhs = np.zeros(Nx) # the right hand side
for j in range(Nx):
rhs[j] = fcn(x[j])
rhs[-1] -= upp[0]*ub # eliminate right BC
# solve the linear system
u[-1] = ub # set right BC
u[:-1] = spsolve(A, rhs)
# return the grid and the numerical solution
return u, x
def demo_DD():
Nx = 10 # Nx = number of grid cells,
# Nx+1 = number of grid points
a = -1. # a = left end of the domain
b = +1. # b = right end of the domain
ua =-.5 # boundary value left side
ub = .5 # boundary value right side
def fcn(x):
return 5*x
def exact(x):
return x*(-5*x**2 + 8)/6
import time
t0 = time.clock()
u,x = Bvp_DD(fcn, a, b, ua, ub, Nx)
t1 = time.clock()
print('cpu time',t1-t0)
# compute the error norm
from scipy.linalg import norm
print('approximation error', abs(norm(u-exact(x))))
xx = np.linspace(a,b)
sol = exact(xx)
# generate a plot of the solution
import matplotlib.pyplot as plt
plt.plot(x,u,'bo-', linewidth=.5)
plt.plot(xx,sol,'r-')
plt.title('numerical solution')
plt.grid()
plt.show()
def demo_DN():
Nx = 20 # Nx = number of grid cells,
# Nx+1 = number of grid points
a = -1. # a = left end of the domain
b = +1. # b = right end of the domain
ua = .5 # boundary value left side
ub = .0 # boundary value right side
def fcn(x):
return 5*x
def exact(x):
return (5*x*(3 - x**2) + 13.)/6.
import time
t0 = time.clock()
u,x = Bvp_DN(fcn, a, b, ua, ub, Nx)
t1 = time.clock()
print('cpu time',t1-t0)
# compute the error norm
from scipy.linalg import norm
print('approximation error', abs(norm(u-exact(x))))
xx = np.linspace(a,b)
sol = exact(xx)
print('abs point error at x=1:',abs(sol[-1] - u[-1]))
print(exact(-1))
print(exact(+1),23/6.)
# generate a plot of the solution
import matplotlib.pyplot as plt
plt.figure(figsize=(8,6))
line1 = plt.plot(x,u,'--.')
plt.setp(line1, linewidth=.5, color='blue')
plt.plot(xx,sol,'r-')
plt.title('numerical solution: $-u^{\prime\prime} = 5x, u(-1) = .5, u^{\prime}(1) = 0$.')
plt.grid()
plt.show()
def demo_ND():
Nx = 20 # Nx = number of grid cells,
# Nx+1 = number of grid points
a = -1. # a = left end of the domain
b = +1. # b = right end of the domain
ua = .0 # boundary value left side
ub = .5 # boundary value right side
def fcn(x):
return 5*x
def exact(x):
return (5*x*(3 - x**2) - 13.)/6. + 1.
import time
t0 = time.clock()
u,x = Bvp_ND(fcn, a, b, ua, ub, Nx)
t1 = time.clock()
print('cpu time',t1-t0)
# compute the error norm
from scipy.linalg import norm
print('approximation error', abs(norm(u-exact(x))))
xx = np.linspace(a,b)
sol = exact(xx)
print('abs point error at x=-1:',abs(sol[0] - u[0]))
# generate a plot of the solution
import matplotlib.pyplot as plt
plt.figure(figsize=(8,6))
line1 = plt.plot(x,u,'--.')
plt.setp(line1, linewidth=.5, color='blue')
plt.plot(xx,sol,'r-')
plt.title('numerical solution: $-u^{\prime\prime} = 5x, u^{\prime}(-1) = 0, u(1) = -.5$')
plt.grid()
plt.show()
if __name__ == '__main__':
demo_DD()
demo_DN()
demo_ND()
|
[
"noreply@github.com"
] |
noreply@github.com
|
32dd6e795e4e8c7656c208ec826cb9d894bcef01
|
ad03fdcabdd4ab333bdfd12f6c41c3d13353df63
|
/newsfeed/migrations/0004_configuration_total_votes.py
|
718f3e7e358c74413b36cbe896ab0ef1c9aee695
|
[] |
no_license
|
ReEnTrust/mediationtool
|
109aefa8354af07568d4b5ab6251c88eab27f925
|
77207ed30c054456e904d1a1ecd3d81baf718b36
|
refs/heads/main
| 2023-04-19T10:08:50.629107
| 2021-03-14T15:10:19
| 2021-03-14T15:10:19
| 364,901,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
# Generated by Django 3.0.4 on 2021-01-08 14:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newsfeed', '0003_auto_20210108_1426'),
]
operations = [
migrations.AddField(
model_name='configuration',
name='total_votes',
field=models.IntegerField(default=0),
),
]
|
[
"blegaste@ed.ac.uk"
] |
blegaste@ed.ac.uk
|
02f13b4e97361be88fb47c40bc0c2c77d644194f
|
d0289b1670fc11e8e5233d6ae87ebda301e313aa
|
/test_mnist.py
|
67bf603f772f47d660f79e5d5a5de77d0741b130
|
[] |
no_license
|
Jthon/wgan-gp
|
201f03d1d649756555a65c6c2d6266b5c92352b5
|
605dc6675f06a48f0161d5e1da813bdef365f3b9
|
refs/heads/master
| 2022-06-14T06:00:51.764708
| 2020-05-07T04:38:53
| 2020-05-07T04:38:53
| 261,795,496
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
import load_mnist
import config as cfg
import numpy as np
import cv2
mnist_dataset=load_mnist.MNIST(cfg.params["mnist_image"],cfg.params["mnist_label"])
for i in range(0,mnist_dataset.datanum):
signal=False
print("num=%d"%int(mnist_dataset.labels[i]))
while True:
key=cv2.waitKey(5)
if key==13:
break
if key==27:
signal=True
break
cv2.imshow("image",np.array(mnist_dataset.images[i],np.uint8))
if signal==True:
break
|
[
"henrycai0909@gmail.com"
] |
henrycai0909@gmail.com
|
4dd83c3d16e32174d0bbaec60784760de259507b
|
f969727f9bfce4984fde818b69129542b03f4bb0
|
/intro/53_validTime.py
|
e04dd241853e23a35c1c594d5e226a3d5359837b
|
[] |
no_license
|
big-ssk/CodeSignalPython
|
dc83476c2fa9c0f43a3fa22fb3507467a705cbeb
|
a9b3e8c66fd4dc6a595e058b1928153d466ecd66
|
refs/heads/master
| 2023-01-07T22:44:41.056283
| 2020-11-10T15:52:06
| 2020-11-10T15:52:06
| 297,742,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
def validTime(time):
hours, minutes = time.split(':')
return 0 <= int(hours) < 24 and 0 <= int(minutes) < 60
|
[
"noreply@github.com"
] |
noreply@github.com
|
4ff054f06535f914bf1194cc99ced72fe1853799
|
36191115c3f91a1dadb675ba4f46611423c0e9d7
|
/telegram_unvoicer_bot/telegram/const.py
|
ac79ca8b37c47a7f9b691907dc78cd0019b54933
|
[
"Apache-2.0"
] |
permissive
|
nabokihms/telegram_unvoicer_bot
|
00805b9e89135fbbc77bbd7b5e28696bb3f34cb5
|
bdc75d8d4bd25d5914523e984c22f2ac05f022e1
|
refs/heads/master
| 2023-02-22T19:25:22.964360
| 2022-10-14T10:35:22
| 2022-10-14T10:35:22
| 133,100,125
| 5
| 0
|
Apache-2.0
| 2023-02-13T02:41:49
| 2018-05-12T00:16:01
|
Python
|
UTF-8
|
Python
| false
| false
| 474
|
py
|
from os import environ
from typing import FrozenSet
TELEGRAM_BOT_API_KEY: str = environ['TELEGRAM_API_KEY']
TELEGRAM_BOT_API_URL_PREFIX: str = 'https://api.telegram.org'
TELEGRAM_BOT_API_URL: str = \
f'{TELEGRAM_BOT_API_URL_PREFIX}/bot{TELEGRAM_BOT_API_KEY}/'
TELEGRAM_BOT_FILE_PATH_API_URL: str = \
f'{TELEGRAM_BOT_API_URL_PREFIX}/file/bot{TELEGRAM_BOT_API_KEY}/'
TELEGRAM_MESSAGE_AUDIO_KEYS: FrozenSet[str] = frozenset(
('voice', 'audio', 'document')
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
602f15f787ddf6f4abf25caf955202c918bb21a8
|
0bde172fb37280fa96758144dcec767990a89565
|
/run.py
|
6b7ebbdfe73be630d7affd8741dcdd2f91792392
|
[] |
no_license
|
JonatanMariscal/P2_E1
|
a809dfaf11e2dedf008b1e0264e596d4786a4f4d
|
04d4de2c91ac83859e7cce3d745024350c82db58
|
refs/heads/master
| 2022-06-17T02:14:45.405225
| 2020-05-10T19:47:10
| 2020-05-10T19:47:10
| 262,860,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,151
|
py
|
import tkinter as tk
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
import SimpleITK as sitk
def resample_image(image, reference):
pixel_spacing = image.GetSpacing()
new_spacing = [old_sz * old_spc / new_sz for old_sz, old_spc, new_sz in
zip(image.GetSize(), pixel_spacing, reference.GetSize())]
image_resampled = sitk.Resample(image, reference.GetSize(), sitk.Transform(), sitk.sitkNearestNeighbor,
image.GetOrigin(), new_spacing,
image.GetDirection(), 0.0, image.GetPixelIDValue())
return image_resampled
# Register two images with same shape.
def register_images(image, reference):
initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(reference, image.GetPixelID()),
image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=250)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkNearestNeighbor)
registration_method.SetOptimizerAsGradientDescent(learningRate=3.0, numberOfIterations=10000,
convergenceMinimumValue=1e-6, convergenceWindowSize=10)
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.SetInitialTransform(initial_transform, inPlace=False)
final_transform = registration_method.Execute(sitk.Cast(reference, sitk.sitkFloat32),
sitk.Cast(image, sitk.sitkFloat32))
register = sitk.ResampleImageFilter()
register.SetReferenceImage(reference)
register.SetInterpolator(sitk.sitkNearestNeighbor)
register.SetTransform(final_transform)
ds_register = register.Execute(image)
return ds_register
def main():
def mode_selector():
status = selector.get()
if not status:
frame_alpha.tkraise()
selector.set(True)
else:
frame.tkraise()
selector.set(False)
def update_slice(self):
pos = slice_selector.get()
alpha = alpha_selector.get()
status = selector.get()
if not status:
axs[0].imshow(ds_array[pos,:,:], cmap=plt.cm.get_cmap(colormap.get()))
axs[1].imshow(phantom_array[pos,:,:], cmap=plt.cm.get_cmap(colormap.get()))
fig.canvas.draw_idle()
else:
ax.imshow(ds_array[pos, :, :], cmap=plt.cm.get_cmap(colormap.get()))
ax.imshow(phantom_array[pos, :, :], cmap=plt.cm.get_cmap("prism"), alpha=alpha/100)
fig2.canvas.draw_idle()
slice_pos = "Nº Slice: " + str(pos)
label_slice.config(text=slice_pos)
#Reading RM_Brain_3D-SPGR DICOM
path_dcm = "data/RM_Brain_3D-SPGR"
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(path_dcm)
reader.SetFileNames(dicom_names)
ds = reader.Execute()
#Reading phantom DICOM
ds_phantom = sitk.ReadImage('data/icbm_avg_152_t1_tal_nlin_symmetric_VI.dcm')
phantom_array = sitk.GetArrayFromImage(ds_phantom) # z, y, x
#Reading atlas DICOM
ds_atlas = sitk.ReadImage('data/AAL3_1mm.dcm')
# Resample Brain DICOM and atlas DICOM to phantom shape
ds_resample = resample_image(ds, ds_phantom)
ds_atlas_resample = resample_image(ds_atlas, ds_phantom)
# Register Brain DICOM and atlas DICOM with phantom
ds_atlas_register = register_images(ds_atlas_resample, ds_phantom)
atlas_array = sitk.GetArrayFromImage(ds_atlas_register) # z, y, x
ds_register = register_images(ds_resample, ds_phantom)
ds_array = sitk.GetArrayFromImage(ds_register) # z, y, x
# Creating window and frames
root = tk.Tk()
root.title("DICOM Image Display")
top_frame = tk.Frame() # frame with buttons and sliders
frame = tk.Frame() #frame with synchron visualizator
frame_alpha = tk.Frame() #frame with alpha visualizator
top_frame.grid(row = 0, column = 0, sticky = tk.W, columnspan=6)
frame.grid(row = 1,sticky="nsew", column = 0, columnspan=6)
frame_alpha.grid(row = 1,sticky="nsew", column = 0, columnspan=6)
frame.tkraise()
selector = tk.BooleanVar()
# Displaying images on synchron visualizator
fig, axs = plt.subplots(1,2, figsize=(15, 6), dpi=100, sharex=True, sharey=True)
axs = axs.ravel()
colormap = tk.StringVar()
colormap.set("bone")
axs[0].imshow(ds_array[0,:,:], cmap=plt.cm.get_cmap(colormap.get()))
axs[1].imshow(phantom_array[0,:,:], cmap=plt.cm.get_cmap(colormap.get()))
canvas = FigureCanvasTkAgg(fig, master=frame)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.TOP, expand=1)
toolbar = NavigationToolbar2Tk(canvas, frame)
toolbar.update()
canvas.get_tk_widget().pack(side=tk.TOP, expand=1)
# Displaying images on alpha visualizator
fig2, ax = plt.subplots(1, figsize=(15, 6), dpi=100, sharex=True, sharey=True)
alpha = 0
ax.imshow(ds_array[0, :, :], cmap=plt.cm.get_cmap(colormap.get()))
ax.imshow(phantom_array[0, :, :], cmap=plt.cm.get_cmap("prism"), alpha=alpha/100)
canvas_alpha = FigureCanvasTkAgg(fig2, master=frame_alpha)
canvas_alpha.draw()
canvas_alpha.get_tk_widget().pack(side=tk.TOP, expand=1)
toolbar_alpha = NavigationToolbar2Tk(canvas_alpha, frame_alpha)
toolbar_alpha.update()
canvas_alpha.get_tk_widget().pack(side=tk.TOP, expand=1)
# Selecting slices
pos = 0
slice_selector = tk.Scale(top_frame, label="Slice selector", from_=0, to=ds_array.shape[0] - 1,
orient=tk.HORIZONTAL, length=400,
command=update_slice, tickinterval=20)
slice_selector.pack(side=tk.LEFT, anchor=tk.NW)
# Showing actual number of slice
label_slice = tk.Label(top_frame)
label_slice.pack(side=tk.TOP, anchor=tk.NW, before=slice_selector)
slice_pos = "Nº Slice: " + str(pos)
label_slice.config(text=slice_pos)
# Change between synchron and alhpa visualization
b = tk.Button(top_frame, text="Mode selector", command=mode_selector, width=10)
b.pack(side=tk.TOP)
# Selecting which percentage of alpha use for alpha visualization
alpha_selector = tk.Scale(top_frame, label="alpha value", from_=0, to=100,
orient=tk.HORIZONTAL, length=400,
command=update_slice, tickinterval=5)
alpha_selector.pack(side=tk.TOP)
root.mainloop()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
f5d2f37e9340e9e1de69ab7c207e279391fe5bf5
|
5343fd379fc858be81c8993019201fe16e44599f
|
/Assignment4/data_loader.py
|
ad3559ba867e89fbf7ac5607be7721c36c73f772
|
[] |
no_license
|
FreddieSun/CS520_IntroToAI
|
d39a7d842108c189655b500365f4a480632bd6ee
|
f53a0fbb7a4431a7667c4894d6f36d324c1b79b4
|
refs/heads/master
| 2021-10-09T02:57:17.464117
| 2018-12-18T07:19:23
| 2018-12-18T07:19:23
| 150,920,609
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,705
|
py
|
import scipy.misc
from glob import glob
import numpy as np
class DataLoader():
def __init__(self, img_res=(128, 128)):
self.img_res = img_res
def load_train_batch(self, batch_size=1, is_testing=False):
path = glob('./datasets/train/*')
self.n_batches = int(len(path) / batch_size)
for i in range(self.n_batches):
batch = path[i*batch_size:(i+1)*batch_size]
imgs_A, imgs_B = [], []
for img in batch:
img = self.imread(img)
h, w, _ = img.shape
half_w = int(w/2)
img_A = img[:, :half_w, :]
img_B = img[:, half_w:, :]
img_A = scipy.misc.imresize(img_A, self.img_res)
img_B = scipy.misc.imresize(img_B, self.img_res)
if not is_testing and np.random.random() > 0.5:
img_A = np.fliplr(img_A)
img_B = np.fliplr(img_B)
imgs_A.append(img_A)
imgs_B.append(img_B)
imgs_A = np.array(imgs_A)/127.5 - 1.
imgs_B = np.array(imgs_B)/127.5 - 1.
yield imgs_A, imgs_B
def load_test_batch(self, batch_size):
path = glob('./datasets/test/*' )
self.n_batches = int(len(path) / batch_size)
for i in range(self.n_batches):
batch = path[i*batch_size:(i+1)*batch_size]
imgs_A, imgs_B = [], []
for img in batch:
img = self.imread(img)
h, w, _ = img.shape
half_w = int(w/2)
img_A = img[:, :half_w, :]
img_B = img[:, half_w:, :]
img_A = scipy.misc.imresize(img_A, self.img_res)
img_B = scipy.misc.imresize(img_B, self.img_res)
imgs_A.append(img_A)
imgs_B.append(img_B)
imgs_A = np.array(imgs_A)/127.5 - 1.
imgs_B = np.array(imgs_B)/127.5 - 1.
yield imgs_A, imgs_B
def load_self_test_batch(self, batch_size):
path = glob('./self_test/*')
self.n_batches = int(len(path) / batch_size)
for i in range(self.n_batches):
batch = path[i * batch_size:(i + 1) * batch_size]
imgs_A, imgs_B = [], []
for img in batch:
img = self.imread(img)
h, w, _ = img.shape
img_A = img[:, :w, :]
img_A = scipy.misc.imresize(img_A, self.img_res)
imgs_A.append(img_A)
imgs_A = np.array(imgs_A) / 127.5 - 1.
yield imgs_A
def imread(self, path):
return scipy.misc.imread(path, mode='RGB').astype(np.float)
|
[
"xl422@scarletmail.rutgers.edu"
] |
xl422@scarletmail.rutgers.edu
|
9af0f22d01f8392e47d62ef608e0396880a6a4dc
|
9d7905e71a4abda02000fc363fb82a16fb1e3bd0
|
/control/actuator_properties.py
|
2ed9690ff6428c5352b73dabe662fe570d823403
|
[] |
no_license
|
gaffarelj/DSE-Mars-SRV
|
088a5cb111f6036be540b7e8defd8e95e9a3fd5a
|
6a70d239e4cba091e24d4423e2c10db1ffe033af
|
refs/heads/master
| 2023-01-30T08:45:16.244930
| 2020-12-16T21:33:56
| 2020-12-16T21:33:56
| 257,545,459
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,970
|
py
|
import numpy as np
from matplotlib import pyplot as plt
#vehicle constants
length_body = 14.01
length = 14.01 + 3.6
body_radius = 7.61 / 2
capsule_radius_bottom = 2.3
capsule_radius_top = 1.4
x_cg = 0.
z_cg = 0.
z_cg_full = 5.276 #m
z_cg_empty = 9.238 #m
z_cg_orbit = 9.007
z_cg_end_ascent = 6.120
x_body_side = body_radius
x_capsule_bottomside = capsule_radius_bottom
x_capsule_topside = capsule_radius_top
y_bdoy_side = body_radius
y_capsule_bottomside = capsule_radius_bottom
y_capsule_topside = capsule_radius_top
z_body_top = length_body
z_capsule_bottom = z_body_top + 1.2
z_capsule_top = z_capsule_bottom + 2.7
Ix = 2875350.278 #kg/m^2
Iz = 306700.3372 #kg/m^2
Iy = 2875350.278 #kg/m^2
#RCS propellant properties
Isp = 140 #bi liquid, LCH4
Isp_mono = 140 #mono liquid H2O2
g = 9.80665
#engines
nx = 6
ny = 6
nz = 4
def thruster_arms(z_cg):
#vehicle constants
length_body = 14.01
length = 14.01 + 3.6
body_radius = 7.61 / 2
capsule_radius_bottom = 2.3
capsule_radius_top = 1.4
x_cg = 0.
y_cg = 0.
y_cg_full = 7.7085 #m
y_cg_empty = 10.0344 #m
y_cg_orbit = 6.62390
x_body_side = body_radius
x_capsule_bottomside = capsule_radius_bottom
x_capsule_topside = capsule_radius_top
z_body_side = body_radius
z_capsule_bottomside = capsule_radius_bottom
z_capsule_topside = capsule_radius_top
y_body_top = length_body
y_capsule_bottom = y_body_top + 1.2
y_capsule_top = y_capsule_bottom + 2.7
lx_bottom = x_body_side
lx_top = x_capsule_bottomside
lz_bottom = z_body_side
lz_top = z_capsule_bottomside
ly_bottom = y_cg - 1.
ly_top = y_cg_orbit * 2 - y_cg
return lx_bottom, lx_top, ly_bottom, ly_top, lz_bottom, lz_top
def RCS_torque_to_thrust(T,axis,cg,scenario):
lx_bottom, lx_top, ly_bottom, ly_top, lz_bottom, lz_top = thruster_arms(cg)
# n: number of thrusters to provide torque
if axis == 'x' or axis == 'z':
if scenario == 'normal':
n_bottom = 2
n_top = 2
thrust = T / (ly_bottom * n_bottom + ly_top * n_top)
elif scenario == 'error_bottom' or scenario == 'error_top':
n = 1
thrust = T / (lz_bottom * n)
elif scenario == 'failure':
n_bottom = 1
n_top = 1
thrust = T / (lz_bottom * n_bottom + lz_top * n_top)
elif axis == "y":
if scenario == 'normal':
n_bottom = 2
n_top = 2
thrust = T / (n_top * ly_top + n_bottom * ly_bottom)
elif scenario == 'error_bottom':
n = 1
thrust = T / (lx_bottom)
elif scenario == 'error_top':
n = 1
thrust = T / (lx_top)
elif scenario == 'failure':
n_bottom = 1
n_top = 1
thrust = T / (lz_bottom * n_bottom + lz_top * n_top)
return thrust
def RCS_displacement_to_thrust(F,axis,scenario):
if axis == "x" or axis == 'z':
if scenario == 'normal':
n_bottom = 1
n_top = 1
n = n_bottom + n_top
elif scenario == 'failure':
n_bottom = 1
n_top = 1
n = n_bottom + n_top
if axis == "y":
if scenario == 'normal':
n_bottom = 4
n_top = 0
n = n_bottom + n_top
if scenario == 'failure':
n_bottom = 2
n_top = 0
n = n_bottom + n_top
f = F / n
return f
def RCS_thrust_to_torque(f,axis,cg):
lx_bottom, lx_top, ly_bottom, ly_top, lz_bottom, lz_top = thruster_arms(cg)
if axis == 'x' or axis == 'z':
n_bottom = 2
n_top = 2
T = f * (lz_bottom * n_bottom + lz_top * n_top)
elif axis == 'y':
#bottom RCS
n_bottom = 2
n_top = 2
T = f * (n_top * ly_top + n_bottom * ly_bottom)
return T
def slew(thrust,tburn,slew_angle,I):
torque = RCS_thrust_to_torque(thrust,'z','normal')
spin_acc = torque / I
spin_rate = spin_acc * tburn
slew_time = slew_angle / spin_rate
return slew_time,torque
def thrust_error(f,cg,angle):
lx_bottom, lx_top, ly_bottom, ly_top, lz_bottom, lz_top = thruster_arms(cg)
T_error_x = np.sin(angle*np.pi/180) * ly_bottom * f
T_error_z = T_error_x
T_error_y = np.sin(angle*np.pi/180) * lx_bottom * f
return T_error_x, T_error_y, T_error_z
def RCSpropellant(f,t,Isp):
g = 9.80665
impulse = f * t
Mp = impulse / (Isp * g)
return Mp
|
[
"wiegerhelsdingen@gmail.com"
] |
wiegerhelsdingen@gmail.com
|
c16aac2e875043d857d88fc4d33e2dd6def2bc57
|
0a24ca351b483e769c44c1651f839fe3fbf4c3e7
|
/vurapy/config/production_env.py
|
ccb6f823e7fe5e59ffc77e485828fca246c8d7f2
|
[
"MIT"
] |
permissive
|
crisboleda/vurapy
|
f7be49f2681f8b47f634a0bc27042ed451e6839b
|
12e35fb9373131181b1b8d4d5701fbbf4231dab8
|
refs/heads/master
| 2022-12-02T20:54:53.375384
| 2020-08-22T17:17:15
| 2020-08-22T17:17:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
from .base import *
from decouple import config
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', default=['*'], cast=list)
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"cristhian.2002.0@gmail.com"
] |
cristhian.2002.0@gmail.com
|
fd2e4f64b8d23dc7435ca8f180adc95a3899a98b
|
3a37b6ce2c1c481f6aded64b2d0c4421f7db1210
|
/hpc_bin/wrf_postproc
|
eb26ac737814efe9bbbab6838f79c0a64670559b
|
[] |
no_license
|
dbreusch/pythonExamples
|
99c24dc1d28b8c3be3b4fadd30a05d8ae317b0c0
|
7b8c06208fefcadf918cb9517ac313535c4df010
|
refs/heads/master
| 2022-10-13T12:21:52.182752
| 2020-06-14T23:10:50
| 2020-06-14T23:10:50
| 265,911,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,217
|
#!/usr/bin/env python
# wrf_postproc
# run wrf_postproc.ncl on a specific file
# output goes to a directory matching yy/mm/dd of input file
# 03/30/17, dbr, removed hardcoded dependence on home directory name
import pdb, sys, os
nargin = len(sys.argv)
if nargin < 2:
print "Syntax: wrf_postproc input_fn ensemble expname model branch"
print " input_fn = name of wrfout file to be processed"
print " ens = ensemble"
print " expname = region name (gis, ant)"
print " model = model name (erai, cesmle)"
print " branch = time period (e.g., historical)"
sys.exit()
# get command line args
input_fn = sys.argv[1]
enss = sys.argv[2]
ens = int( enss )
expname = sys.argv[3]
model = sys.argv[4]
branch = sys.argv[5]
# get home directory
home = os.path.expanduser("~")
# ncl command
ncl_cmd = home+"/wrf/ncl/wrfout_postproc.ncl"
# split filename and gather yy/mm/dd info
dir_tmp = os.path.dirname( input_fn )
file_in = os.path.basename( input_fn )
file_yy = file_in[ 11:15 ]
file_mm = file_in[ 16:18 ]
file_dd = file_in[ 19:21 ]
# convert input dir_in to working dir_in
a = dir_tmp.split('/')
if len(a) < 3:
print "Input file name too short, missing parent directories"
sys.exit()
dir1 = a[-2]
dir2 = a[-1]
if ens > 0:
base = home+"/scratch/wrf/"+expname+"_"+model+"/"+branch+"/wrf/run/"+enss+"/"
else:
base = home+"/scratch/wrf/"+expname+"_"+model+"/"+branch+"/wrf/run/"
dir_in = base+dir1+"/"+dir2+"/"
# output names
if ens > 0:
dir_out = home+"/scratch/wrf/%s_%s/%s/wrf/postproc/%s/%s/%s%s%s/" % (expname, model, branch, enss, file_yy, file_yy, file_mm, file_dd)
else:
dir_out = home+"/scratch/wrf/%s_%s/%s/wrf/postproc/%s/%s%s%s/" % (expname, model, branch, file_yy, file_yy, file_mm, file_dd)
file_out = file_in+".nc"
ofn = dir_out+file_out
if os.path.exists( ofn ):
os.unlink( ofn )
if not os.path.exists( dir_out ):
try:
os.makedirs( dir_out )
except OSError:
pass
# print "Caught makedir of existing dir "+dir_out
arg1 = "'dir_in=\""+dir_in+"\"'"
arg2 = "'file_in=\""+file_in+"\"'"
arg3 = "'dir_out=\""+dir_out+"\"'"
arg4 = "'file_out=\""+file_out+"\"'"
cmd = "ncl "+arg1+" "+arg2+" "+arg3+" "+arg4+" "+ncl_cmd
#pdb.set_trace()
os.system( cmd )
|
[
"dbreusch@me.com"
] |
dbreusch@me.com
|
|
b9504f50d0a006b0e32c812812e12c7e725481ff
|
50ec136d757fbd6033748ea0d0b084e1663fb4ff
|
/train.py
|
dd7a40588b03e4b8bea191ca677398465c207191
|
[] |
no_license
|
Tyler-Shamsuddoha/python-image-classifier-keras
|
2b67cc196f8f692e22b65f6505ad779c57db53bc
|
2e734bbb08290ff70a35e566114014762ff4bd06
|
refs/heads/master
| 2023-06-12T19:55:50.369444
| 2021-07-07T20:20:21
| 2021-07-07T20:20:21
| 353,856,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,729
|
py
|
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D
from tensorflow.keras.preprocessing.image import img_to_array, ImageDataGenerator
from imutils import paths
from tensorflow.keras.losses import categorical_crossentropy #cosine_proximity,
from tensorflow.keras.optimizers import Nadam, Adam
from tensorflow.keras.utils import plot_model
from sklearn.neural_network import MLPClassifier
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.applications import InceptionResNetV2
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import cv2
import numpy as np
import tensorflow as tf
import random
import os
import matplotlib.pyplot as plt
import sklearn
# Set random seeds to ensure the reproducible results
SEED = 309
np.random.seed(SEED)
random.seed(SEED)
tf.random.set_seed(SEED)
IMG_HEIGHT = 64
IMG_WIDTH = 64
INPUT_SHAPE = (64, 64)
# conv_base = InceptionResNetV2(weights = 'imagenet', include_top = False, input_shape = (IMG_HEIGHT,IMG_WIDTH,3))
train_dir = './Train_data'
test_dir = './test_data'
validation_images_dir = './Validation_Sets'
batch_size = 32
epoch = 100
def construct_model():
"""
Construct the CNN model.
***
Please add your model implementation here, and don't forget compile the model
E.g., model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
NOTE, You must include 'accuracy' in as one of your metrics, which will be used for marking later.
***
:return: model: the initial CNN model
"""
model = Sequential([
Conv2D(16, kernel_size=3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
MaxPooling2D(2, 2),
Conv2D(32, kernel_size=3, padding='same', activation='relu'),
MaxPooling2D(2, 2),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(2, 2),
Conv2D(128, 3, padding='same', activation='relu'),
Flatten(),
Dense(64, 'relu'),
Dropout(0.5),
Dense(3, activation='softmax')
])
model.compile(loss=categorical_crossentropy, optimizer= Nadam(), metrics=['accuracy'])
return model
# *************************************
# Building Model Using Transfer Learning
# model = Sequential([
# conv_base,
# Flatten(),
# Dense(512, 'relu'),
# Dense(3, activation='softmax')
# ])
# model.compile(loss=categorical_crossentropy, optimizer= Nadam(), metrics=['accuracy'])
# return model
def train_model(model):
"""
Train the CNN model
***
Please add your training implementation here, including pre-processing and training
***
:param model: the initial CNN model
:return:model: the trained CNN model
"""
print("Loading Images...")
validation_image_generator = ImageDataGenerator(rescale=1./255, rotation_range=40)
val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,
directory=validation_images_dir,
target_size=INPUT_SHAPE,
class_mode='categorical')
print("Loaded Validation Set Images Successfully\n")
train_image_generator = ImageDataGenerator(rescale=1./255, zoom_range=0.2, rotation_range=40)
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=INPUT_SHAPE,
class_mode='categorical')
print("Loaded Training Images Successfully\n")
print("Starting training....\n")
model = construct_model()
filepath = 'model/newmodel111.h5'
model_checkpoint = ModelCheckpoint(filepath, monitor='val_loss', save_best_only= True, verbose=1, mode = 'min')
# early_stop = EarlyStopping(filepath, monitor='val_acc', mode='max', patience=5)
history = model.fit_generator(
train_data_gen,
steps_per_epoch= 3748/batch_size,
epochs= epoch,
validation_data= val_data_gen,
validation_steps= 562/batch_size,
callbacks = [model_checkpoint],
)
visualise_results(history)
return model
def save_model(model):
"""
Save the keras model for later evaluation
:param model: the trained CNN model
:return:
"""
# ***
# Please remove the comment to enable model save.
# However, it will overwrite the baseline model we provided.
# ***
model.save("model/new_model.h5")
print("Model Saved Successfully.")
def visualise_results(history):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
if __name__ == '__main__':
model = train_model(train_dir)
save_model(model)
|
[
"tylershamsuddoha@gmail.com"
] |
tylershamsuddoha@gmail.com
|
f532fa34ab974fc95dff6d166910eebb381c45a5
|
68a155e62686542dc8e7e8069382085c88612280
|
/application/routes.py
|
eab784019f5a8d46370a9c487ff0b720756d01de
|
[] |
no_license
|
MohamedEmad1998/Movie-Recommendation-AI
|
2f4de729452734e221625effa8809a07256cb8d8
|
f6384117c6be758e4043ec6cb39d679bef627b7e
|
refs/heads/main
| 2023-02-19T11:40:48.749679
| 2021-01-20T21:04:09
| 2021-01-20T21:04:09
| 331,429,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
from application import app
import bot
from flask import render_template ,request
@app.route('/')
@app.route('/home')
@app.route('/index')
def hello_world():
return render_template("index2.html")
@app.route('/get')
def form_post():
message = request.args.get('msg')
response = bot.chat(message)
return str(response)
# if request.method == 'POST' :
# message = request.form['usermsg']
# update_html.update(message,'user')
# render_template('index2.html')
# reply=bot.chat(message)
# update_html.update(reply,'bot')
# return render_template('index2.html')
|
[
"noreply@github.com"
] |
noreply@github.com
|
db68fe190b85a166a45fd651494dca56572f4e4d
|
a2793557adc64285f9965d25cefc4cea3cff8333
|
/env.py
|
322886ce58433d697bb194cf6602a6d1c3384a75
|
[] |
no_license
|
liuzhonghaolpp/H-DQN
|
bab3b6c4ea44640b473d2ddf2a7dbcc60e56b894
|
1e70d74424fc7679982db0372cce9bd9446970bb
|
refs/heads/master
| 2023-03-18T06:58:00.697140
| 2021-02-28T13:30:51
| 2021-02-28T13:30:51
| 336,790,849
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,735
|
py
|
import numpy as np
import torch
from gym import spaces, core
from gym.envs.classic_control import rendering
import entity
from utils import uav_mobility
import configparser
import time
config = configparser.RawConfigParser()
config.read('./paramaters.ini')
# 参数
MAX_AoI = 100
DURATION = int(config.get('simulation parameters', 'duration'))
IoT_COMMUNICATION_RANGE = int(config.get('simulation parameters', 'iot_communication_range'))
BS_COMMUNICATION_RAGE = int(config.get('simulation parameters', 'BS_communication_rage'))
class MyEnv(core.Env):
def __init__(self):
self.action_space = spaces.Box(low=-1, high=1, shape=(2,))
self.observation_space = spaces.Box(shape=(14, ))
self.num_sensors = 6
self.sensors = []
self.side_length = 1000 # 目标区域的边长
self.time = 0
self.get_reward = False
self.reward = 0
self.viewer = None
for i in range(self.num_sensors):
sensor = entity.Sensor(i)
self.sensors.append(sensor)
self.uav = entity.UAV()
self.BS = entity.BS()
def reset(self):
# 初始化sensor的位置(非随机初始化)
self.sensors[0].pos = np.array([250, 550])
self.sensors[1].pos = np.array([550, 150])
self.sensors[2].pos = np.array([750, 250])
self.sensors[3].pos = np.array([350, 850])
self.sensors[4].pos = np.array([550, 750])
self.sensors[5].pos = np.array([750, 950])
# 初始化UAV的位置和所携带数据AoI
self.uav.pos = np.array([500, 500])
self.uav.aoi = np.array([MAX_AoI, MAX_AoI, MAX_AoI, MAX_AoI, MAX_AoI, MAX_AoI])
# 初始化BS的AoI
self.BS.pos = np.array([500, 500])
self.BS.aoi = np.array([MAX_AoI, MAX_AoI, MAX_AoI, MAX_AoI, MAX_AoI, MAX_AoI])
# 初始化响应参数
self.time = 0
self.get_reward = False
self.reward = 0
obs = self._get_observation()
return obs
def step(self, action):
self.time += 1
self.get_reward = False
self.reward = 0
self.uav.action = action
# 更新UAV的位置
uav_mobility.get_next_pos(self.uav)
# 查看UAV是否在sensor的通信范围内,且未采集该设备数据,然后更新uav_aoi
self._update_uav_aoi()
# 查看UAV是否在BS通信范围内,且携带有未上传的数据,然后更新bs_aoi
self._update_bs_aoi()
done = self._get_done()
reward = self._get_reward()
obs = self._get_observation()
info = {}
return obs, reward, done, info
def render(self, mode='human'):
screen_width = 500 # 按比例缩小一下,1:2的比例
screen_height = 500
# 如果没有viewer,创建viewer和uav、landmarks
if self.viewer is None:
self.viewer = rendering.Viewer(screen_height, screen_width)
self.viewer.set_bounds(0, 500, 0, 500)
self.viewer.geoms.clear()
for sensor in self.sensors:
geom = rendering.make_circle(sensor.size)
geom.set_color(1, 0, 0)
geom_form = rendering.Transform(translation=(sensor.pos[0]/2, sensor.pos[1]/2))
geom.add_attr(geom_form)
self.viewer.add_geom(geom)
geom = rendering.make_circle(self.BS.size)
geom.set_color(0, 0, 1)
geom_form = rendering.Transform(translation=(self.BS.pos[0]/2, self.BS.pos[1]/2))
geom.add_attr(geom_form)
self.viewer.add_geom(geom)
geom = rendering.make_circle(self.uav.size)
geom.set_color(0, 1, 0)
geom_form = rendering.Transform(translation=(self.uav.pos[0]/2, self.uav.pos[1]/2))
geom.add_attr(geom_form)
self.viewer.add_geom(geom)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def _get_observation(self):
obs_uav = np.concatenate((self.uav.pos, self.uav.aoi), axis=0)
obs_bs = self.BS.aoi
obs = np.concatenate((obs_uav, obs_bs), axis=0)
return obs
def _get_done(self):
done = False
if self.uav.pos[0] < 0 or self.uav.pos[0] > 1000 or self.uav.pos[1] < 0 or self.uav.pos[1] > 1:
done = True
return done
def _get_reward(self):
if self.get_reward:
return self.reward
else:
return 0
def _update_uav_aoi(self):
for i, sensor in enumerate(self.sensors):
distance = np.sqrt(np.sum((sensor.pos - self.uav.pos)**2))
if distance <= IoT_COMMUNICATION_RANGE:
self.uav.aoi[i] = 1
else:
self.uav.aoi[i] += 1
def _update_bs_aoi(self):
distance = np.sqrt(np.sum((self.uav.pos - self.BS.pos)**2))
if distance <= BS_COMMUNICATION_RAGE:
self.get_reward = True
for i in range(len(self.BS.aoi)):
if self.BS.aoi[i] > self.uav.aoi[i]:
self.reward += self.BS.aoi[i] - self.uav.aoi[i]
self.BS.aoi[i] = min(self.BS.aoi[i], self.uav.aoi[i])
else:
for i in range(len(self.BS.aoi)):
self.BS.aoi[i] += 1
if __name__ == '__main__':
env = MyEnv()
obs = env.reset()
env.render()
while True:
action = env.action_space.sample()
obs, reward, done, _ = env.step(action)
print(obs)
print(reward)
print('---------------')
env.render()
time.sleep(4)
# 可以在未全部采集完时完成交付,也可以在全部采集完成后只进行一次交付。
# 动态环境,QoS函数的参数是动态变化的,即用户对应用的延时要求是动态变化的。
|
[
"384037404@qq.com"
] |
384037404@qq.com
|
7a5460a7b68fee36094e0189412d4ec7e108cebe
|
c11666437e45f6b771f4004e919dccfdab6c4640
|
/pet_cats.py
|
cfadb02fc56200f91fb9545b7a676ebdd094354a
|
[] |
no_license
|
mesare11/pet_market
|
30ba4b06161550e2abdd1e2fac073d9b1c906c91
|
c8e4006e578898c381be6f76cf6e05af39b94862
|
refs/heads/master
| 2021-01-02T19:02:34.933362
| 2020-07-23T12:16:01
| 2020-07-23T12:16:01
| 239,755,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 841
|
py
|
from pet_goods import pet_goods
from pet_animals import pet_animals
class pet_cats(pet_animals):
def __init__(self, name, price, kind_pet, age, cats_breed):
pet_animals.__init__(self, name, price, kind_pet, age)
self.cats_breed = cats_breed
def display_cats(self):
print(self.name, self.price, self.kind_pet, self.age, self.cats_breed)
def add_cat():
goods_array=[]
name=input("Please input name goods: ")
price=int(input("Please input price goods: "))
kind_pet=input("Please input kind of pet: ")
age=input("Please input age pet: ")
cats_breed=input("Please input cats breed: ")
goods_array.append(name)
goods_array.append(price)
goods_array.append(kind_pet)
goods_array.append(age)
goods_array.append(cats_breed)
|
[
"mr.mesare@gmail.com"
] |
mr.mesare@gmail.com
|
f9f9bdbea20f756b687e0c829b73e3f40f91236b
|
92dbbd758ec9c8ce0bfa2275cb1e3f0db5938f7d
|
/scripts/delta_scan-join_five.py
|
675afb5bc3c0131a7ca5d58a6fa11be53da061b0
|
[] |
no_license
|
RoslawSzaybo/bosons_on_lattice
|
cb5bdd0ded6d08f93faf1da410bc37939904ba4d
|
3c82c5fbd2b27d806526bd88d23b603a6b26dbc4
|
refs/heads/master
| 2020-03-25T11:39:41.477564
| 2018-11-29T18:26:23
| 2018-11-29T18:26:23
| 143,741,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,839
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 24 12:24:12 2018
"""
import numpy as np
import pickle
import sys
def delta(pkl):
return pkl[1]['delta']
zero = 1e-14
def the_same(a,b):
delta_a = delta(a)
delta_b = delta(b)
diff = abs(delta_b-delta_a)
return (diff < zero)
def repetitions(c):
repeted_idxes = []
for idx, elem in enumerate(c):
if idx == 0:
continue
if the_same(c[idx-1], elem):
repeted_idxes += [idx]
return repeted_idxes
def clean_repetitions(c):
to_del = repetitions(c)
print("Initial number of data points\t\t {dp}".format(dp=len(c)))
print("Number of elements to be removed\t {td}".format(td=len(to_del)))
for idx in reversed(to_del):
c.pop(idx)
print("Final number of data points\t\t {dp}".format(dp=len(c)))
return c
def main():
if len(sys.argv) != 6:
print("it joins four!!!")
print("it joins four!!!")
print("it joins four!!!")
print("I need name of a file as a command line argument! like this:")
print("$ python join.py A.pkl B.pkl C.pkl D.pkl E.pkl output.pkl")
sys.exit()
in_a = sys.argv[1]
in_b = sys.argv[2]
in_c = sys.argv[3]
in_d = sys.argv[4]
out = sys.argv[5]
print("="*80)
print("Join")
print("="*80)
print("Reading files in progress")
with open(in_a, 'rb') as g:
a = pickle.load(g)
with open(in_b, 'rb') as g:
b = pickle.load(g)
with open(in_c, 'rb') as g:
x = pickle.load(g)
with open(in_d, 'rb') as g:
y = pickle.load(g)
print("Files are open.")
c = sorted(a+b+x+y, key = delta)
print("Data in now united to a one, sorted file.")
d = clean_repetitions(c)
print("All repetitions are removed.")
with open(out, 'wb') as g:
pickle.dump(d,g)
print("Data is saved to {}".format(out))
return 0
if __name__ == '__main__':
main()
|
[
"pawel.wojcik5@gmail.com"
] |
pawel.wojcik5@gmail.com
|
4619d834c5879ba0fbbdd46908424b4e26a6a8fa
|
4af4f4f40e95193cf2fed870724e39263d17d22c
|
/compined_testing.py
|
6f80f28f5133e88c74309642716c3e67f854cb31
|
[] |
no_license
|
basharE/pythonFirstProject
|
1a5a138fda0230f4ef40a21c905bc23fbb9c5e7a
|
d9ec307725449096bf5feb1e507506003fbae5c2
|
refs/heads/master
| 2023-04-06T12:14:36.980051
| 2021-03-28T17:57:49
| 2021-03-28T17:57:49
| 338,761,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
import requests
from db_connector import get_user
from selenium import webdriver
user_id = 1001
requests.post('http://localhost:5000/users/' + str(user_id), json={"user_name": "user name " + str(user_id)})
res = requests.get('http://localhost:5000/users/' + str(user_id))
assert res
assert get_user(str(user_id))[0][0] == "user name " + str(user_id)
driver = webdriver.Chrome(executable_path="/Users/basharegbariya/Downloads/chromedriver")
driver.get("http://127.0.0.1:5001/users/get_user_data/"+str(user_id))
assert driver.find_element_by_id("user").text == 'user name ' + str(user_id)
driver.quit()
|
[
"bashar.egbariya@peer39.com"
] |
bashar.egbariya@peer39.com
|
1bc7d1b8360e79b34496216b5177441ca38e9763
|
22a8a20397148dd4aef2fbd529121df8b91c918f
|
/sklearn/iris_classification.py
|
c1cf8b64148764486906cd0a30d043cd5a016d06
|
[] |
no_license
|
greenchapter/playground
|
87313d6926e21ee79c45114b0ff922948e35c601
|
033f300ccb8ef55dcbebb8197d5d1a8709fb258e
|
refs/heads/main
| 2022-06-01T08:57:16.896340
| 2022-05-13T18:09:45
| 2022-05-13T18:09:45
| 96,816,180
| 0
| 0
| null | 2022-05-13T18:09:46
| 2017-07-10T19:59:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,429
|
py
|
#
# Beispiel der Klassifikations von Iris-Blumen
#
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
# Lade den Iris-Datenset
data_train = pd.read_csv('./iris.csv')
# Die 3 zu erkennenden Klassifikationsklassen werden zu numerischen Werten 0, 1 bzw. 2 umgewandelt.
data_train.loc[data_train['species']=='Iris-setosa', 'species']=0
data_train.loc[data_train['species']=='Iris-versicolor', 'species']=1
data_train.loc[data_train['species']=='Iris-virginica', 'species']=2
data_train = data_train.apply(pd.to_numeric)
# Der eingelesene Datenset wird als Matrix dargestellt
data_train_array = data_train.to_numpy()
# Das Datenset wird in zwei separate Kategorie gespaltet: Testdaten und Trainingsdaten.
# 80% der Daten werden zum Trainieren und 20% zum Testen des Modells verwendet.
# Da es sich bei der Eingabe um einen Vektor handelt, werden wird den Großbuchstaben X benutzen;
# Für die Ausgabe hingegen handelt es sich um ein einzelner Werte,
# daher die Bezeichung mit dem Kleinbuchstaben y
X_train, X_test, y_train, y_test = train_test_split(data_train_array[:,:4],
data_train_array[:,4],
test_size=0.2)
# VERSION 1
# Ein neuronales Netz zur Klassifikation (MultiLayerPerceptron) wird mit folgenden Eigenschaften gebildet:
# einem Input-Layer mit 4 Neuronen, die die Merkmale der Iris-Planze repräsentieren;
# einem Hidden-Layer mit 10 Neuronen
# eime Output-Layer mit 4 Neuronen, die die zu erkennenden Klassen repräsentieren.
# Dabei wird als Aktivierungsfunktion relu und als Optimierer adam verwenden.
mlp = MLPClassifier(hidden_layer_sizes=(10,),activation='relu', solver='adam', max_iter=350, batch_size=10, verbose=True)
# VERSION 2
# In der zweiten Variante werden 2 Hidden-Layers mit jeweils 5 bzw. 3 Neuronen verwendet
# Dabei wird als Aktivierungsfunktion tanh und als Optimierer adam verwenden.
# mlp = MLPClassifier(hidden_layer_sizes=(5,3),activation='tanh', solver='adam', max_iter=500, batch_size=10, verbose=True)
# Das neuronale Netz wird mit den Trainingsdaten traniert
mlp.fit(X_train, y_train)
# Das Ergebnis des Training wird ausgegeben
print("Trainingsergebnis: %5.3f" % mlp.score(X_train, y_train))
# Das Modell wird mit den Testdatensdaten evaluiert
predictions = mlp.predict(X_test)
# und die Konfusionsmatrix ausgegeben
print(confusion_matrix(y_test,predictions))
# Aus der Konfusionsmatrix werden precison, recall und f1-score berechnet und ausgebenen
print(classification_report(y_test,predictions))
# Das Modell wird getest und das Ergebnis ausgegeben
print("Testergebnis: %5.3f" % mlp.score(X_test,y_test))
# Folgendes gibt die Werte der Gewichte pro Layer aus
print("WEIGHTS:", mlp.coefs_)
print("BIASES:", mlp.intercepts_)
# Das Modell wird beispielsweise zur Vorhersage auf folgenden Werten
# aus dem Testset angewandt mit den Merkmalen [sepal-length, sepal-width,
# petal-length, petal-width]
print(mlp.predict([[5.1,3.5,1.4,0.2], [5.9,3.,5.1,1.8], [4.9,3.,1.4,0.2], [5.8,2.7,4.1,1.]]))
# Die Loss-Kurve wird visualisiert und in der Datei Plot_of_loss_values.png im PNG-Format gespeichert.
loss_values = mlp.loss_curve_
plt.plot(loss_values)
plt.savefig("./Plot_of_loss_values.png")
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
b9dac58212d011f1c76f030f0631c55f20b3f02f
|
77ab593ed55a6d46b1778f6d41bc70ced3f8cd46
|
/face_ID_net/face_1024s/face_1024_vals.py
|
2b929cc2b67254c5a37f697a6093fc0d6f3d68f1
|
[] |
no_license
|
wosxcc/bot
|
e93b92fbca79a915feb186160f3f72c99218ffcb
|
c097f5455bc6264c9f778fb72900475963836153
|
refs/heads/master
| 2021-06-12T12:43:47.314071
| 2018-12-14T08:51:43
| 2018-12-14T08:51:43
| 128,619,488
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,983
|
py
|
import os
import cv2 as cv
import numpy as np
import random
import tensorflow as tf
from face_ID_net.face_1024s.ID_pb_net1024s import face_net
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
IMG_H=64
IMG_W =64
N_CLASSES =1024
learning_rate =0.001
def face_val(image_arr,run_train):
print('搞毛线啊')
log_dir = './face72/face_big1024/'
with tf.Graph().as_default():
graph = face_net(1, IMG_H,IMG_W, N_CLASSES,learning_rate,2,run_train)
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(log_dir)
if ckpt and ckpt.model_checkpoint_path:
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('没有保存的模型')
if run_train ==True:
pos_d,neg_d = sess.run([graph['d_pos'],graph['d_neg']],feed_dict={graph['x']: np.reshape(image_arr, (3, 64, 64, 3))})
return pos_d, neg_d
elif run_train ==False:
print('下面出错了',len(image_arr),image_arr[0].shape)
anchor_data = sess.run(graph['anchor_out'],feed_dict={graph['x']: np.reshape(image_arr, ( 1, 64, 64, 3))})
print('上面出错了')
return anchor_data
pacth = 'E:/faceID'
for i in range(10):
file = random.sample(os.listdir(pacth),1)[0]
while(1):
negative_file= random.sample(os.listdir(pacth),1)[0]
if negative_file!=file:
break
print(file,negative_file)
anchor_img = random.sample(os.listdir(pacth+'/'+file),1)[0]
while(1):
positive_img = random.sample(os.listdir(pacth+'/'+file),1)[0]
if anchor_img!=positive_img:
break
negative_img = random.sample(os.listdir(pacth+'/'+negative_file),1)[0]
img_anchor=cv.imread(pacth+'/'+file+'/'+anchor_img)
img_positive=cv.imread(pacth+'/'+file+'/'+positive_img)
img_negative=cv.imread(pacth+'/'+negative_file+'/'+negative_img)
sh_anchor=cv.resize(img_anchor,(240,240),interpolation=cv.INTER_CUBIC)
sh_positive=cv.resize(img_positive,(240,240),interpolation=cv.INTER_CUBIC)
sh_negative=cv.resize(img_negative,(240,240),interpolation=cv.INTER_CUBIC)
image_data=[]
image_data.append(cv.resize(img_anchor,(64,64),interpolation=cv.INTER_CUBIC))
image_data.append(cv.resize(img_negative,(64,64),interpolation=cv.INTER_CUBIC))
image_data.append(cv.resize(img_positive,(64,64),interpolation=cv.INTER_CUBIC))
image_data =np.array(image_data,dtype='float32')
image_data =(image_data-128.0)/256.0
anchor_score = face_val(image_data[0],False)
print(anchor_score)
pos_d,neg_d =face_val(image_data,True)
print(pos_d,neg_d)
cv.imshow('anchor', sh_anchor)
cv.imshow('positive', sh_positive)
cv.imshow('negative', sh_negative)
cv.waitKey()
cv.destroyAllWindows()
|
[
"821022156@qq.com"
] |
821022156@qq.com
|
8aa9836ef60eecec8665ea91de8d724b9abc0328
|
f1c3614d6ef3874e816a2616ea0ae83704b052da
|
/tests/selenium_page.py
|
58331945adeee298ace5dec647cb39d512dfa78d
|
[] |
no_license
|
margaritaumaniuc/presto
|
a46ed88c433a68c762a28e80e413bb1b61d46705
|
f95e85c7abb6da3919ed4fbfc96827f4bba473e2
|
refs/heads/master
| 2020-08-04T20:55:34.755285
| 2019-10-11T06:06:22
| 2019-10-11T06:06:22
| 212,275,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,591
|
py
|
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import StaleElementReferenceException, ElementNotVisibleException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from tests.settings import WDW_TIME
class BasePage(object):
def __init__(self, driver):
self.driver = driver
def find_by_locator(self, locator, wait=WDW_TIME):
while self.driver.execute_script('return document.readyState') == 'complete':
return WebDriverWait(self.driver, wait, ignored_exceptions=[StaleElementReferenceException]). \
until(EC.presence_of_element_located(locator))
def find_by_locator_and_clickability(self, locator, wait=WDW_TIME):
while self.driver.execute_script('return document.readyState') == 'complete':
return WebDriverWait(self.driver, wait, ignored_exceptions=[StaleElementReferenceException]). \
until(EC.element_to_be_clickable(locator))
def find_all_elements(self, locator, wait=WDW_TIME):
items = WebDriverWait(self.driver, wait,
ignored_exceptions=[StaleElementReferenceException, ElementNotVisibleException]). \
until(EC.presence_of_all_elements_located(locator))
return items
def fill_element(self, locator, value):
element = self.find_by_locator(locator)
element.send_keys(value)
def press_enter(self, locator):
element = self.find_by_locator(locator)
element.send_keys(Keys.ENTER)
|
[
"margarita.u@sparkcentral.com"
] |
margarita.u@sparkcentral.com
|
4e7b95fde55ee2c89e339d086f06d4dbd57c294b
|
db68071bbdb5cb2cd7713b89c96d554e00470ab2
|
/MotionSensor.py
|
132223c16c76f3075f3216ec076cda53a07bad04
|
[] |
no_license
|
majorpeter/puha-manager
|
a265c9d14d845d0efab27c664420fffc3147054b
|
fd51b560a7e1dad99204ae3c9c7369d17cbcfd32
|
refs/heads/master
| 2020-03-19T04:35:44.466718
| 2018-12-21T20:24:26
| 2018-12-21T20:24:26
| 135,844,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
import logging
from datetime import datetime
class MotionSensor:
def __init__(self, node):
self.node = node
self.node.PulseCount.subscribe_to_changes(self.on_pulse_count_changed)
self.last_movement_time = datetime.now() # let's assume there is movement on startup to avoid checks later
def on_pulse_count_changed(self, name, value):
logging.debug('movement detected! (%s)' % value)
self.last_movement_time = datetime.now()
def get_time_since_last_movement(self):
return datetime.now() - self.last_movement_time
|
[
"majorpeter29@gmail.com"
] |
majorpeter29@gmail.com
|
97fb86a6fc77d10f0b1704b970c7eb502694ccf4
|
693d005c1d1b1dc39d970bb9683282a5eede389f
|
/fourth_project/manage.py
|
9e85fa14b8338c2a7e0f3e9e88dc04cd538a69e3
|
[] |
no_license
|
FranklinMonro/UDEMY---Full-Stack-Django
|
db3d3646e973dec7d74dbb1b50c54d68c7f1a366
|
21ac64652342b9d7e97c34b7d4b8d10247431317
|
refs/heads/master
| 2020-04-19T03:59:26.269493
| 2019-05-14T15:06:41
| 2019-05-14T15:06:41
| 167,949,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fourth_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"noreply@github.com"
] |
noreply@github.com
|
b865443f9f33aa61a37c0e20be96d1d6fc77a124
|
3b062b3dc545e11513da709ef3f525a6d64e37e0
|
/p2.py
|
d7b462a5f6f64fcf947598373efbda37d0f221c3
|
[] |
no_license
|
saraattia412/python_projects
|
75932a8badb77aa06b7fb008594fcacdd6a6b276
|
d2b3f1ec4f7e47a6bc8dd00b5bf8c7d01f5f8ec2
|
refs/heads/master
| 2023-08-28T04:01:02.293203
| 2021-09-25T18:30:10
| 2021-09-25T18:30:10
| 410,351,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
#sum and average any number
print('welcome in sum_average_game ^-^')
count=int(input('how many number :'))
current_count=0
sum=0
while current_count<count :
number=float(input('enter number:'))
sum += number
current_count += 1
print('sum=',sum)
print('average=',sum/count)
|
[
"saraattia412@gmail.com"
] |
saraattia412@gmail.com
|
3b9fd7f7cb708040b5bbad3d59b9b3d09d01e4bd
|
e960a8d11fd8bf86742a82d879ec765e0c801bf6
|
/GeneratePredictionT.py
|
84b77119a8dc9062b6a76af69cea3b825ad2a7a3
|
[] |
no_license
|
limcrong/trafficmanagement
|
91801a2bce86135325f031fff51ae575255e1118
|
c76303020300df265a0c0a05dd1d0c7e679d3a31
|
refs/heads/master
| 2020-06-05T01:30:53.748716
| 2019-06-19T02:40:57
| 2019-06-19T02:40:57
| 192,266,519
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,638
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import xgboost as xgb
from xgboost import XGBClassifier
from xgboost import XGBRegressor
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.externals import joblib
from sklearn.metrics import mean_squared_error
from math import sqrt
import pickle
import ConvertScript
# In[4]:
# load xgb models
model1 = pickle.load(open("model/final1.model.dat", "rb"))
model2 = pickle.load(open("model/final2.model.dat", "rb"))
model3 = pickle.load(open("model/final3.model.dat", "rb"))
model4 = pickle.load(open("model/final4.model.dat", "rb"))
model5 = pickle.load(open("model/final5.model.dat", "rb"))
model6 = pickle.load(open("model/final6.model.dat", "rb"))
#load scaler
scaler = joblib.load('scaler.pkl')
# In[5]:
def increaseT(d,hr,m):
if(hr > 22.5) & (m > 35):
return d+1,0,0
if(m>35):
return d,hr+1,0
return d,hr,m+15
def getLastT(df):
lastday = df.iloc[-1,:]['day']
lasthr = df.iloc[-1,:]['hour']
lastmin = df.iloc[-1,:]['minute']
print("Last time stamp is: {} day {} hour {} min".format(lastday,lasthr,lastmin))
return (lastday,lasthr,lastmin)
def findAndReturnNextT(df):
d,hr,m = getLastT(df)
return increaseT(d,hr,m)
def applyScaling(dfx):
dff = dfx.copy(deep=True)
dff.drop('geohash6',axis=1,inplace=True)
dff = dff.astype(np.float32)
dff = dff.fillna(0)
scaledx = scaler.transform(dff)
print(scaledx.shape)
return scaledx
col2 = ['day', 'long', 'lat', 'min', 'max', 'zone',
'dist_to_high_demand5', 'dist_to_7', 'hour', 'minute', 'demand_s',
'mean', 'ma7', 'ma14', 'ma21', 'ma50', 'ma100', 'std', 'zoneAverage',
'geoEma7', 'geoEma14', 'zoneEma14', 'dayOfWeek', 'peak', 'totalDist',
'sin_hour', 'cos_hour', 'demand_s_2', 'demand_s_3', 'demand_s_4',
'demand_s_5', 'demand_s_6', 'demand_s_7', 'geoEma7_2', 'x', 'y', 'z',
'geo4ZoneEma7', 'geo5ZoneEma7', 'high_demand_perc', 'geoEma7_var',
'ma100_med', 'demand_last_week', 'demand']
# In[6]:
def generatePred(df):
#Create next timestep T
dfnextT = pd.DataFrame()
static = pd.read_hdf('staticValues.h5')
d,hr,m = findAndReturnNextT(df)
print("Next time stamp is: {} day {} hour {} min".format(d,hr,m))
dfnextT['geohash6'] = static['geohash6']
dfnextT['day'] = d
dfnextT['hour'] = hr
dfnextT['minute'] = m
dfn = pd.concat([df,dfnextT])
dfn= dfn[df.columns]
print("Created next timestep..")
#Generate Features
print("Running feature generation script..")
dfcon = ConvertScript.convertDf(dfn)
lastday,lasthr,lastmin = getLastT(dfcon)
dfcon = dfcon.loc[(dfcon.day == lastday)&(dfcon.hour == lasthr)&(dfcon.minute == lastmin)]
print("Generated features..")
#Scale features
scaled = applyScaling(dfcon)
x_test = scaled[:, :-1]
print("Scaled features..")
# Predict demand
y_pred = (model1.predict(x_test) + model2.predict(x_test)+model3.predict(x_test)+
model4.predict(x_test) + model5.predict(x_test) + model6.predict(x_test))/6
print("Predicted demand..")
print("Reconstructed original..")
#Construct original
withPred = np.concatenate([x_test,y_pred.reshape(y_pred.shape[0], 1)],axis=1)
newDf = pd.DataFrame(scaler.inverse_transform(withPred))
newDf.columns = col2
df_static = static[['geohash6','lat','long']]
df_merge = pd.merge(newDf,df_static, how='left', left_on=['lat','long'],right_on = ['lat','long'])
df_merge = df_merge[df.columns]
df_merge.head()
return df_merge
|
[
"rongronggg@gmail.com"
] |
rongronggg@gmail.com
|
0605bbf65a547de08887585ef7e0fee02b015ac9
|
0ceabf1234cc921e1cd81c4290168b2f770d1aa1
|
/home/migrations/0004_auto_20200521_1808.py
|
688eb628541e321563fd3ab8a77d25d3bc2a9478
|
[] |
no_license
|
deekhari00716/Doctor-s-webstite
|
f4cdf915fbda676a17efd8db94712dbe0456d0b4
|
78f5e671277f041d21f362f267d5b9c893caf50c
|
refs/heads/master
| 2022-08-18T16:31:28.691028
| 2020-05-23T13:02:47
| 2020-05-23T13:02:47
| 266,334,136
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
# Generated by Django 3.0.3 on 2020-05-21 18:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0003_project_date'),
]
operations = [
migrations.AlterField(
model_name='project',
name='image',
field=models.ImageField(height_field=330, upload_to='home/images', width_field=185),
),
]
|
[
"deekhsri00716@gmail.com"
] |
deekhsri00716@gmail.com
|
2b3dde4546e2cdd37640fda3bce835cc0e437bce
|
aa39ef39ffe34dcf90c5b7246b118aa17fb382ff
|
/gcaptcha/rest/views.py
|
a6fca4bf34e9df24e25a709f5f34b54ab85d7e58
|
[] |
no_license
|
toantt28/django-bypass-gcaptcha
|
635dcd3ddfbd5fc7b0d0b4d696e4541550970bf2
|
78ae7ad7af1fd866995f0c3988b70e359f0953b2
|
refs/heads/master
| 2022-12-28T11:14:45.468781
| 2020-10-14T07:12:38
| 2020-10-14T07:12:38
| 303,744,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,373
|
py
|
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse
import json
#system libraries
import os
import random
import time
#selenium libraries
from seleniumwire import webdriver
from selenium.webdriver.common.keys import Keys
from webdriver_manager.chrome import ChromeDriverManager
#recaptcha libraries
import speech_recognition as sr
import urllib
import pydub
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.views import APIView
def delay():
print("[INFO] delay")
time.sleep(random.randint(3, 5))
# Create your views here.
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
class Test(APIView):
def get(self, request, *args, **kwargs):
# create chrome driver
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=1920x1080')
options.add_argument("disable-gpu")
driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)
delay()
driver.get(
"https://www.google.com/search?q=hello&oq=hello&aqs=chrome..69i57j69i59j69i60.821j0j1&sourceid=chrome&ie=UTF-8"
)
# go to website
driver.get("https://www.gstatic.com/cloud-site-ux/vision/vision.min.html")
delay()
shadow_section = driver.execute_script('''return document.querySelector("vs-app").shadowRoot''')
element = shadow_section.find_element_by_tag_name('input')
driver.execute_script("document.getElementById('input').removeAttribute('hidden')")
randNum = random.randint(1, 100)
randImg = '{}.png'.format(randNum)
element.send_keys(
os.path.join(
os.getcwd(),
'gcaptcha',
'rest',
'images',
randImg
)
)
delay()
# switch to recaptcha frame
frames = driver.find_elements_by_tag_name("iframe")
driver.switch_to.frame(frames[0])
delay()
# click on checkbox to activate recaptcha
driver.find_element_by_class_name("recaptcha-checkbox-border").click()
# switch to recaptcha audio control frame
driver.switch_to.default_content()
frames = driver.find_elements_by_tag_name("iframe")
driver.switch_to.frame(frames[len(frames) - 1])
delay()
# click on audio challenge
driver.find_element_by_id("recaptcha-audio-button").click()
# switch to recaptcha audio challenge frame
driver.switch_to.default_content()
frames = driver.find_elements_by_tag_name("iframe")
driver.switch_to.frame(frames[-1])
delay()
flag = True
while flag:
try:
# click on the play button
button_div = driver.find_element_by_class_name('rc-audiochallenge-play-button')
button = button_div.find_element_by_class_name('rc-button-default')
button.click()
delay()
# get the mp3 audio file
src = driver.find_element_by_id("audio-source").get_attribute("src")
print("[INFO] Audio src: %s" % src)
# download the mp3 audio file from the source
urllib.request.urlretrieve(src, os.getcwd() + "\\sample.mp3")
sound = pydub.AudioSegment.from_mp3(os.getcwd() + "\\sample.mp3")
sound.export(os.getcwd() + "\\sample.wav", format="wav")
sample_audio = sr.AudioFile(os.getcwd() + "\\sample.wav")
r = sr.Recognizer()
with sample_audio as source:
audio = r.record(source)
# translate audio to text with google voice recognition
key = r.recognize_google(audio)
print("[INFO] Recaptcha Passcode: %s" % key)
time.sleep(1)
# key in results and submit
driver.find_element_by_id("audio-response").send_keys(key.lower())
time.sleep(2)
driver.find_element_by_id("audio-response").send_keys(Keys.ENTER)
delay()
except Exception as e:
# pass
print('[Exception]', e)
driver.save_screenshot(os.path.join(
os.getcwd(),
'gcaptcha',
'rest',
'screenshots',
'error.png'
))
flag = False
driver.switch_to.default_content()
delay()
# HERE IS success image
token = "Google mark as spam. Please try again later."
for request in driver.requests:
if 'https://cxl-services.appspot.com/proxy' in request.url:
key = 'token='
queryString = request.querystring
index = queryString.index(key)
token = queryString[index + len(key): len(queryString)]
print(token)
driver.close()
return Response({
"token": token
})
|
[
"toan.tt@neo-lab.vn"
] |
toan.tt@neo-lab.vn
|
f32e1d9a57e44c6badf99051fc94750d8a7f23e3
|
0c4d481936c5cde601e3b9d6c7e83b8e70ba9fae
|
/.history/train_search_param_20201009092152.py
|
12cf377b195daa81cd3800b44ababebd327c74c8
|
[] |
no_license
|
CGCL-codes/PDAS
|
7d98afaf60abe7ce3075e80125d98e51d92f42f0
|
578011f738f2418fe5be2dd38eb819566e7fc3bb
|
refs/heads/master
| 2023-05-08T03:26:56.450175
| 2021-05-27T02:50:38
| 2021-05-27T02:50:38
| 371,215,287
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,688
|
py
|
import os
import sys
import time
import glob
import math
import random
import logging
import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets as datasets
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
#from resnet_change2 import *
from resnet_new_change import *
from prune_params import ResNet20_Channel_Prune
from net_measure import measure_model, measure_param
from utils import AverageMeter, accuracy, count_parameters_in_MB, save_checkpoint
from architect import Architect
parser = argparse.ArgumentParser(description='Cifar10 Train Search')
parser.add_argument('--data', type=str, default='/home/cyh/workspace/cifar10',
help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.1,
help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001,
help='min learning rate(0.0)')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=int, default=50, help='report frequency')
#parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=120, help='num of training epochs')
parser.add_argument('--save', type=str, default='./checkpoint/',
help='folder to save checkpoints and logs')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.5,
help='portion of training data')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_learning_rate', type=float, default=6e-4,
help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3,
help='weight decay for arch encoding')
parser.add_argument('--change', action='store_true', default=False,
help='change prune ratio during searching')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
args = parser.parse_args()
log = open(os.path.join(args.save, 'channel-search-resnet20-0915.txt'),'w')
prune_index = ResNet20_Channel_Prune.index
prune_ratio = ResNet20_Channel_Prune.prune_ratio
#max2_ratio = torch.zeros(len(prune_index), 3)
min_ratio = torch.zeros(len(prune_index), 3)
min_ratio[:, 2] = -1
channel16 = list(range(2, 17, 2))
channel32 = list(range(2, 33, 2))
channel64 = list(range(2, 65, 2))
def main():
if not torch.cuda.is_available():
print('no gpu device available!!!')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
cudnn.benchmark = True
cudnn.enabled = True
print_log('=> parameters: {}'.format(args), log)
best_acc = 0
best_epoch = 0
criterion = nn.CrossEntropyLoss().cuda()
count_ops, count_params, conv_list = measure_model(depth=20)
print('=> count_ops: {}, count_params: {}'.format(count_ops, count_params))
model = resnet(depth=20).cuda()
#model = torch.nn.DataParallel(model).cuda()
optimizer = torch.optim.SGD(model.parameters(), args.learning_rate,
momentum=args.momentum, weight_decay=args.weight_decay)
'''arch_optimizer = torch.optim.Adam(model.module.arch_parameters(), lr=args.arch_learning_rate,
betas=(0.5, 0.999), weight_decay=args.arch_weight_decay)'''
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),(0.2023, 0.1994, 0.2010))
])
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
train_data = datasets.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
architect = Architect(model, criterion, count_params, args)
print_log('==> arch parameters: {}'.format(model.arch_parameters()), log)
print_log('==> arch parameters ratio: {}'.format(F.softmax(model.arch_params, dim=-1)), log)
for epoch in range(args.epochs):
#scheduler.step()
lr = scheduler.get_last_lr()[0]
print_log('=> epoch {}, lr {}'.format(epoch, lr), log)
if args.change and epoch >= 15 and epoch <= args.epochs-5:
arch_weights = F.softmax(model.arch_params, dim=-1)
_, index = arch_weights.topk(4, 1, True, True)
for j in range(len(prune_index)):
new_index = prune_ratio[j][index[j][3].item()]
old_index = min_ratio[j][1].item()
'''new_ratio = prune_ratio[j][index[j][4].item()]
old_ratio = min_ratio[j][1].item()'''
count = min_ratio[j][0].item()
if abs(new_index-old_index) < 1e-6:
if count >= 19:
max_ratio = prune_ratio[j][index[j][0].item()]
if j < 7:
a = random.randint(max(max_ratio-3, 0), min(max_ratio+3, len(channel16)-1))
elif j < 13:
a = random.randint(max(max_ratio-5, 0), min(max_ratio+5, len(channel32)-1))
else:
a = random.randint(max(max_ratio-11, 0), min(max_ratio+11, len(channel64)-1))
if abs(new_index - min_ratio[j][2].item()) < 1e-6:
prune_ratio[j][index[j][2].item()] = a
else:
prune_ratio[j][index[j][3].item()] = a
min_ratio[j][0] = 0
ratios = 1e-3 * torch.randn(1, 4)
with torch.no_grad():
for k in range(4):
model.module.arch_params[j][k] = ratios[0][k].item()
else:
min_ratio[j][0] += 1
else:
min_ratio[j][0] = 0
min_ratio[j][1] = new_ratio
train_acc, train_loss = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, epoch, count_params, count_ops, conv_list)
scheduler.step()
print_log('=> train acc: {}'.format(train_acc), log)
print_log('=> min ratio: {}'.format(min_ratio), log)
print_log('=> arch parameters ratio: {}'.format(F.softmax(model.arch_params, dim=-1)),log)
print_log('=> prune ratio: {}'.format(prune_ratio), log)
if args.epochs - epoch <= 1:
valid_acc, valid_loss = infer(valid_queue, model, criterion)
print_log('valid_acc: {}'.format(valid_acc), log)
arch_weights = F.softmax(model.arch_params, dim=-1)
_, index = arch_weights.topk(1, 1, True, True)
max_cfg = []
#mix_cfg = []
for j in range(len(prune_index)):
if j < 7:
channel = channel16[prune_ratio[j][index[j][0].item()]]
max_cfg.append(channel)
elif j < 13:
channel = channel32[prune_ratio[j][index[j][0].item()]]
max_cfg.append(channel)
else:
channel = channel64[prune_ratio[j][index[j][0].item()]]
max_cfg.append(channel)
'''channel = max(int(round(cfg[j] * (1 - prune_ratio[j][index[j][0].item()]) / 2) * 2), 2)
max_cfg.append(channel)
mix_prune_ratio = 0
for k in range(5):
mix_prune_ratio += prune_ratio[j][k] * arch_weights[j][k].item()
#mix_channel += max(int(round(cfg[j] * (1 - prune_ratio[j][k]) * arch_weights[j][k].item() / 2) * 2), 2)
mix_channel = max(int(round(cfg[j] * (1 - mix_prune_ratio) / 2) * 2), 2)
mix_cfg.append(mix_channel)'''
print_log('==> max cfg: {}'.format(max_cfg), log)
#print_log('==> mix cfg: {}'.format(mix_cfg), log)
print_log("==> arch parameters: {}".format(model.arch_parameters()), log)
#print_log('==> best acc: {}, best epoch: {}'.format(best_acc, best_epoch), log)
def train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, epoch, count_params, count_ops, conv_list):
losses = AverageMeter()
basic_losses = AverageMeter()
param_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.train()
for index, (inputs, targets) in enumerate(train_queue):
inputs, targets = inputs.cuda(), targets.cuda(non_blocking=True)
try:
input_search, target_search = next(valid_queue_iter)
except:
valid_queue_iter = iter(valid_queue)
input_search, target_search = next(valid_queue_iter)
#input_search, target_search = next(iter(valid_queue))
input_search, target_search = input_search.cuda(), target_search.cuda(non_blocking=True)
if epoch >= 15:
architect.step(inputs, targets, input_search, target_search, lr, optimizer, unrolled=args.unrolled)
optimizer.zero_grad()
logits = model(inputs)
loss = criterion(logits, targets)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = accuracy(logits.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
#basic_losses.update(basic_loss.item(), inputs.size(0))
#param_losses.update(param_loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
else:
arch_optimizer.zero_grad()
output_search = model(input_search)
arch_loss = criterion(output_search, target_search)
arch_loss.backward()
arch_optimizer.step()
optimizer.zero_grad()
logits = model(inputs)
basic_loss = criterion(logits, targets)
total_params = count_model_params(model)
if total_params > (1 + 0.05) * (0.5 * count_params):
param_loss = 2 * math.log(total_params / (0.5 * count_params))
elif total_params < (1 - 0.05) * (0.5 * count_params):
param_loss = -2 * math.log(total_params / (0.5 * count_params))
else:
param_loss = 0
#param_loss = 0.11 * (math.log(total_params) ** 0.9)
#param_loss = 0.086 * (math.log(total_params))
#flops = count_model_flops(model, count_ops, conv_list)
#print('=> flops: {}'.format(flops))
#flop_loss = 0.083*(math.log(flops)**0.9)
#flop_loss = 0.084 * (math.log(flops) ** 0.9)
#flop_loss = 0.06 * math.log(flops)
#print('=> flop loss: {}'.format(flop_loss))
#loss = basic_loss * param_loss
loss = basic_loss + param_loss
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = accuracy(logits.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
#basic_losses.update(basic_loss.item(), inputs.size(0))
#param_losses.update(param_loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
if index % args.report_freq == 0:
print_log('=> time: {}, train index: {}, loss: {}, top1: {}, top5: {}'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), index, losses.avg, top1.avg, top5.avg), log)
return top1.avg, losses.avg
def infer(valid_queue, model, criterion):
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
with torch.no_grad():
for index, (inputs, targets) in enumerate(valid_queue):
inputs, targets = inputs.cuda(), targets.cuda()
logits = model(inputs)
loss = criterion(logits, targets)
prec1, prec5 = accuracy(logits.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
if index % args.report_freq == 0:
print_log('=> time: {}, valid index: {}, loss: {}, top1: {}, top5: {}'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), index, losses.avg, top1.avg, top5.avg), log)
return top1.avg, losses.avg
def count_model_params(model):
arch_weights = F.softmax(model.module.arch_params, dim=-1)
_, index = arch_weights.topk(1, 1, True, True)
cfg = []
for k, m in enumerate(model.module.modules()):
if k in prune_index:
index_p = prune_index.index(k)
if index_p < 7:
channel = channel16[prune_ratio[index_p][index[index_p][0].item()]]
cfg.append(channel)
elif index_p < 13:
channel = channel32[prune_ratio[index_p][index[index_p][0].item()]]
cfg.append(channel)
else:
channel = channel64[prune_ratio[index_p][index[index_p][0].item()]]
cfg.append(channel)
'''pr = prune_ratio[index_p][index[index_p][0].item()]
oC = max(int(round((m.weight.data.shape[0] * (1 - pr)) / 2) * 2), 2)
cfg.append(oC)'''
total = measure_param(depth=20, cfg=cfg)
return total
'''total = sum(p.numel() for p in model.parameters())
arch_weights = F.softmax(model.module.arch_params, dim=-1)
_, index = arch_weights.topk(1, 1, True, True)
for k, m in enumerate(model.module.modules()):
if k in prune_index:
index_p = prune_index.index(k)
if index_p == 0 :
pr = prune_ratio[index_p][index[index_p][0].item()]
oC = m.weight.data.shape[0] - int(round((m.weight.data.shape[0] * (1 - pr)) / 2) * 2)
total -= oC * m.weight.data.shape[1] * m.weight.data.shape[2] * m.weight.data.shape[3]
#total -= int(m.weight.data.numel() * (1 - prune_ratio[index_p][index[index_p][0].item()]))
else:
pr0 = prune_ratio[index_p-1][index[index_p-1][0].item()]
pr1 = prune_ratio[index_p][index[index_p][0].item()]
iC = m.weight.data.shape[1] - int(round((m.weight.data.shape[1] * (1 - pr0)) / 2) * 2)
oC = m.weight.data.shape[0] - int(round((m.weight.data.shape[0] * (1 - pr1)) / 2) * 2)
total -= oC * iC * m.weight.data.shape[2] * m.weight.data.shape[3]
#total -= int(m.weight.data.numel() * (1 - prune_ratio[index_p][index[index_p][0].item()]))
return total'''
def count_model_flops(model, total_flops, conv_list):
arch_weights = F.softmax(model.module.arch_params, dim=-1)
_, index = arch_weights.topk(1, 1, True, True)
total = total_flops
#print(total)
#print('=> prune index: {}'.format(prune_index))
for k, m in enumerate(model.module.modules()):
if k in prune_index:
if k == 1:
pr = prune_ratio[0][index[0][0].item()]
total -= int(conv_list[0] // 2 * pr)
#print('=> total: {}'.format(total))
elif k == 6:
pr0 = 1 - prune_ratio[0][index[0][0].item()]
pr1 = 1 - prune_ratio[1][index[1][0].item()]
total -= int(conv_list[1] // 2 * (1 - pr0 * pr1))
#print('=> total: {}'.format(total))
else:
index_p = prune_index.index(k)
pr = prune_ratio[index_p][index[index_p][0].item()]
total -= int(conv_list[2*index_p-1] // 2 * pr)
#print('=> total: {}'.format(total))
elif k-3 in prune_index and k-3 != 1:
index_p = prune_index.index(k-3)
pr = prune_ratio[index_p][index[index_p][0].item()]
total -= int(conv_list[2*index_p] // 2 * pr)
#print('=> total: {}'.format(total))
return total
def print_log(print_string, log):
print("{}".format(print_string))
log.write('{}\n'.format(print_string))
log.flush()
if __name__ == '__main__':
main()
|
[
"1300302496@qq.com"
] |
1300302496@qq.com
|
6559913c8662fc497e27f7cebfb1cbc71854dcb1
|
4d83dbfc94ba1b0f098f990a1790df36fda66628
|
/Practice/pytorch_tut/pytorch_BatchTraining.py
|
bef2d35ff85610df1e529dc104d6cbd9b33a09c6
|
[] |
no_license
|
CTM-development/ML4B
|
31dfff27796ebfbeefe40f5eb687eb94d5c88acf
|
38a9de7a63bfd4fe77d4bb6af3e228f6dcfbe7f7
|
refs/heads/main
| 2023-05-08T09:42:17.604282
| 2021-06-01T18:12:01
| 2021-06-01T18:12:01
| 371,355,130
| 0
| 0
| null | 2021-05-31T17:45:58
| 2021-05-27T11:49:51
| null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
'''
epoch = 1 forward and backward pass of ALL training samples
batch_size = number of training samples in one forward& backward pass
number of iterations = number of passes, each pass using [batch_size] number of samples
e.g. 100 [samples, batch_size=20 --> 100/20 = 5 iterations for 1 epoch
'''
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
import numpy as np
import math
class WineDataset(Dataset):
def __init__(self):
# data loading
xy = np.loadtxt('../../data/wine.csv', delimiter=",", dtype=np.float32, skiprows=1)
self.x = torch.from_numpy(xy[:, 1:])
self.y = torch.from_numpy(xy[:, [0]])
self.n_samples = xy.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
# len(dataset)
return self.n_samples
dataset = WineDataset()
dataloader = DataLoader(dataset=dataset, batch_size=4, shuffle=True, num_workers=2)
# dataiter = iter(dataloader)
# data = dataiter.next()
# features, labels = data
# print(features, labels)
# training loop
num_epochs = 2
total_samples = len(dataset)
n_iterations = math.ceil(total_samples/4)
print(total_samples, n_iterations)
for epoch in range(num_epochs):
for i, (inputs, labels) in enumerate(dataloader):
#forward backward, update
if (i+1) % 5 == 0:
print(f'epoch {epoch+1}/{num_epochs}, step {i+1}/{n_iterations}, inputs {inputs.shape}')
|
[
"christopher@mminderlein.de"
] |
christopher@mminderlein.de
|
41b90ed3b74875084c2c3186f9d9abf696281e63
|
85eb042f8370100b3bccbfc247746df3ddbbefa3
|
/data_control.py
|
6fe1a1e255788a0b67a010832e599db14f9b8962
|
[
"MIT"
] |
permissive
|
mic100/seloger.com
|
eda05f563bd6a8576f540bf4e393cae3ccba6ad1
|
c713c09783e74846b1ac73ea9cc6cfcddf0cc331
|
refs/heads/master
| 2021-01-10T20:47:22.297843
| 2015-10-30T21:01:05
| 2015-10-30T21:01:05
| 27,082,632
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,165
|
py
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------#
#!!!! #
# #
#please note the code is not up to date. It may not work actually due to #
#change on the web site host from where data is extracted. #
# #
#!!!! #
#-----------------------------------------------------------------------------#
from tool_kit import path_log, create_path, dispatch, curl, oldnew_ipadress
from bs4 import BeautifulSoup
import time
import pandas as pd
#-----------------------------------------------------------------------------#
# get urls from real estate announces #
#-----------------------------------------------------------------------------#
def function_2(path_log, file_name="backup_file1.txt") :
create_path()
url = ["http://www.seloger.com/immobilier/tout/immo-paris-%seme-75/" % (str(i)) for i in range(1,21)]
url_liste = dispatch(path_log + "dispatch1.txt", url)
backup_file2 = open(path_log + "backup_file2.txt", "w")
for url in url_liste :
pool = curl(url)
# oldnew_ipadress(path_log)
for c in pool :
data = c.body.getvalue()
soup1 = BeautifulSoup(data)
s1 = soup1.findAll('div', {'class' : 'content_infos othergroupsite'})
s1 = s1[0].findAll('li')
print "len(s1) : ", len(s1)
print "\n"
som_nbr_annonce = 0
som_list = []
for i in range(len(s1)) :
url = s1[i].findAll('a')[0]['href']
len_url = len(url.split("/"))
len_departement = len(url.split("/")[len_url-4].split("-"))
departement = url.split("/")[len_url-4].split("-")[len_departement-1]
type_bien1 = url.split("/")[len_url-3].replace("bien-", "")
nbr_annonce = s1[i].findAll('b')[0].string
if nbr_annonce != None :
pass
else :
nbr_annonce = 0
som_nbr_annonce = float(som_nbr_annonce) + float(nbr_annonce)
som_list.append(float(som_nbr_annonce))
nbr_piece = s1[i].findAll('a')[0]['title'].replace("Immobilier ", "").replace(type_bien1, "").strip().split(" ")[2]
if nbr_piece == "studio" :
nbr_piece = '1'
else :
pass
type_transaction = s1[i].findAll('a')[0]['title'].replace("Immobilier ", "").replace(type_bien1, "").strip().split(" ")[0]
print i, str(som_nbr_annonce), departement, str(nbr_annonce), type_transaction, type_bien1, nbr_piece, url
backup_file2.write(departement + ";" + str(nbr_annonce)+ ";" + type_transaction + ";" + type_bien1 + ";" + nbr_piece + ";" + url + ";")
backup_file2.write("\n")
backup_file2.close()
print "\n"
#-----------------------------------------------------------------------------#
# Get number of page and urls to get through #
#-----------------------------------------------------------------------------#
def function_3(path_log) :
backup_file = open(path_log + "backup_file2.txt", "r").readlines()
print "len(backup_file) : ", len(backup_file)
print "\n"
urls_parcours = open(path_log + "urls_parcours.txt", "w")
urls_list = []
for i in range(len(backup_file)) :
url = backup_file[i].split(";")[5]
nbr = float(backup_file[i].split(";")[1])
nbr_page_init = nbr/10
partie_entiere = int(str(nbr_page_init).split(".")[0])
apres_dec = int(str(nbr_page_init).split(".")[1])
if apres_dec == 0 :
nbr_page = partie_entiere
elif apres_dec > 0 :
nbr_page = partie_entiere + 1
else :
print "Probleme nbr_page"
print "nbr : ", nbr
print "url : ", url
print nbr, nbr_page_init, "nous donne :", nbr_page, "page(s)", "\n"
if nbr_page == 1 or nbr_page == 0 :
if nbr_page == 0 :
print "Attention prise en charge du cas '0' page releve : ", "\n"
else :
b = url
urls_list.append(b)
urls_parcours.write(b + ";" + "\n")
print b
elif nbr_page == 2 :
b = url
c = url + "?ANNONCEpg=2"
urls_list.append(b)
urls_list.append(c)
urls_parcours.write(b + ";" + "\n")
urls_parcours.write(c + ";" + "\n")
print c
print b
elif nbr_page > 2 :
for j in range(2, nbr_page) :
b = url + "?ANNONCEpg=%s" %(str(j))
urls_list.append(b)
urls_parcours.write(b + ";" + "\n")
print b
else :
print "Problem nbr_page re construction"
print "len(urls_list) : ", len(urls_list)
#-----------------------------------------------------------------------------#
# get urls from real estate announces for each link #
#-----------------------------------------------------------------------------#
def function_4(path_log, file_name="urls_parcours.txt") :
# d = str(time.strftime('%d-%m-%y_%Hh%Mmin%Ssec',time.localtime()))
d2 = str(time.strftime('%d/%m/%y %H:%M:%S',time.localtime()))
d3 = str(time.strftime('%d-%m-%y',time.localtime()))
backup_file1 = open(path_log + file_name, "r").readlines()
url = []
for i in range(len(backup_file1)) :
a = backup_file1[i].split(";")[0].strip()
url.append(a)
url_liste = dispatch(path_log + "dispatch1.txt", url)
url_done = open(path_log + "url_done.txt", "w")
path_logout = "log/"
compteur = 0
for url in url_liste :
compteur += 1
print compteur, "/", len(url_liste)
for i in range(len(url)) :
url_done.write(url[i] + "\n")
pool = curl(url)
# oldnew_ipadress(path_log)
compteur1 = 0
for c in pool :
compteur1 += 1
print compteur1, "/", len(pool)
data = c.body.getvalue()
soup1 = BeautifulSoup(data)
d = str(time.strftime('%d-%m-%y_%Hh%Mmin%Ssec',time.localtime()))
l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16, l17 = [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []
dico = {'TYPE_TRANSACTION' : l0, 'NOMBRE_PHOTOS' : l1 ,
'NOMBRE_PIECE' : l2, 'NOMBRE_M2' : l3, 'ETAGE' : l4,
'BALCON' : l5, 'CUISINE' : l6, 'AUTRE' : l7,
'CHAMBRE(S)' : l8, 'MEUBLE' : l9, 'TYPE_CHAUFFAGE' : l10,
'LOCALISATION' : l11, 'PROXIMITE' : l12, 'PRIX' : l13,
'CHARGE' : l14, 'NOM_AGENCE' : l15, 'URL' : l16,
'EXTRACTION_DATE' : l17}
#-----------------------------------------------------------------#
#HERE LOOKING FOR WORDS LOCATIONS / VENTES / INVESTISSEMENT / VIAGER :
s0 = soup1.findAll('div', {'class' : 'main'})
for i in range(len(s0)) :
if s0[i].findAll('span', {'class' : 'title_recherche'}) == [] :
transaction_type = "NA"
else :
transaction_type = s0[i].findAll('span', {'class' : 'title_recherche'})
transaction_type = transaction_type[0].text
if "locations" in transaction_type :
transaction_type = "LOCATION"
elif "ventes" in transaction_type :
transaction_type = "ACHAT"
elif "investissement" in transaction_type :
transaction_type = "INVESTISSEMENT"
elif "viager" in transaction_type :
transaction_type = "VIAGER"
else :
pass
#-----------------------------------------------------------------#
#We are looking for the photo number in html page then add var TRANSACTION_TYPE
s1 = soup1.findAll('div', {'class' : 'annonce__visuel__pictogrammes'})
for i in range(len(s1)) :
if s1[i].findAll('a', {'class' : 'annonce__visuel__picto picto__photo'}) == [] :
nbr_photo = 0
else :
nbr_photo = s1[i].findAll('a', {'class' : 'annonce__visuel__picto picto__photo'})
nbr_photo = nbr_photo[0]['title']
nbr_photo = nbr_photo.replace(" photos", "")
nbr_photo = int(nbr_photo)
l1.append(nbr_photo)
l0.append(transaction_type)
#-----------------------------------------------------------------#
s2 = soup1.findAll('div', {'class' : 'annonce__detail'})
for i in range(len(s2)) :
details1 = s2[i].findAll('span', {'class' : 'annone__detail__param'})[0].text
details1 = details1.replace("\xe8", "e")
details1 = details1.replace("m\xb2", "m2")
details1 = details1.replace("\xe9", "e")
details1 = details1.split(",")
nbr_piece = "NA"
nbr_m2 = "NA"
etage = "NA"
balcon = "NA"
cuisine = "NA"
autre = "NA"
chambre = "NA"
meuble = "NA"
chauffage = "NA"
for j in details1 :
if "Piece" in j :
if nbr_piece == "NA" :
nbr_piece = j.replace(" Piece", "").replace("s", "").strip()
else :
pass
if "m2" in j :
if nbr_m2 == "NA" :
nbr_m2 = j.replace(" m2", "").strip()
else :
pass
if "Etage" in j :
if etage == "NA" :
etage = j.replace(" Etage", "").strip()
else :
pass
if "Balcon" in j :
if balcon == "NA" :
balcon = j.replace(" Balcon", "").strip()
balcon = j.replace("s", "").strip()
else :
pass
if "cuisine" in j :
if cuisine == "NA" :
cuisine = j.replace(" cuisine", "").strip()
else :
pass
if "Chambre" in j :
if chambre == "NA" :
chambre = j.replace(" Chambre", "")
chambre = chambre.replace("s", "").strip()
else :
pass
if "Meuble" in j :
if meuble == "NA" :
meuble = "YES"
else :
pass
if "chauffage" in j :
if chauffage == "NA" :
chauffage = j.replace("chauffage ", "")
chauffage = j.replace(" radiateur", "")
else :
pass
if "Piece" not in j and "m2" not in j and "Etage" not in j \
and "Balcon" not in j and "cuisine" not in j and "Chambre" not in j \
and "Meuble" not in j and "chauffage" not in j :
autre = j.strip()
else :
pass
l2.append(nbr_piece)
l3.append(nbr_m2)
l4.append(etage)
l5.append(balcon)
l6.append(cuisine)
l7.append(autre)
l8.append(chambre)
l9.append(meuble)
l10.append(chauffage)
#-----------------------------------------------------------------#
#LOCATION :
s3 = soup1.findAll('span', {'class' : 'annone__detail__localisation'})
for i in range(len(s3)) :
details2 = s3[i].findAll('span', {'class' : 'annone__detail__param'})[0].text
details2 = details2.replace(" (Paris)", "")
details2 = details2.replace(" ()", "")
l11.append(details2)
#-----------------------------------------------------------------#
#NEAR LOCATION :
s4 = soup1.findAll('div', {'class' : 'annonce__detail'})
for i in range(len(s4)) :
details3 = s4[i].findAll('span', {'class' : 'annone__detail__proximite'})
if details3 != [] :
details3 = details3[0].text
details3 = details3.replace("É", "E")
details3 = details3.replace("é", "e")
details3 = details3.replace("ê", "e")
details3 = details3.replace("ë", "e")
details3 = details3.replace("â", "a")
details3 = details3.replace("ô", "o")
details3 = details3.replace(""", "")
details3 = details3.replace("Î", "")
details3 = details3.replace("ç", "c")
details3 = details3.replace("M°", "Metro ")
details3 = details3.replace("Metro ", "")
details3 = details3.replace("Metro", "")
details3 = details3.replace("'", "'")
details3 = details3.replace("&", "et")
details3 = details3.replace("è", "e")
details3 = details3.replace("/", ",")
details3 = details3.replace(": ", "")
details3 = details3.replace("metro", "")
details3 = details3.replace("à", "a")
details3 = details3.replace("î", "i")
details3 = details3.replace("ï", "i")
details3 = details3.replace("Centre ville,", "")
details3 = details3.replace("ecole,", "")
details3 = details3.replace("commerces,", "")
details3 = details3.replace("bus,", "")
details3 = details3.replace("*", "")
else :
details3 = "NA"
proximite = details3
l12.append(proximite)
#-----------------------------------------------------------------#
#PRICE AND DETAILS OF ADDITIVE PRICE CHARGES :
s5 = soup1.findAll('div', {'class' : 'annonce__agence'})
for i in range(len(s5)) :
details4 = s5[i].findAll('span', {'class' : 'annonce__agence__prix annonce__nologo'})
details5 = s5[i].findAll('span', {'class' : 'annonce__agence__prix '})
if details4 != [] :
details4 = details4[0].text
details4 = details4.replace("\xa0", "")
details4 = details4.replace("\x80", "")
details4 = details4.split(" ")
else :
details4 = 0
if details5 != [] :
details5 = details5[0].text
details5 = details5.replace("\xa0", "")
details5 = details5.replace("\x80", "")
details5 = details5.split(" ")
else :
details5 = 0
if details4 == 0 :
detailsx = details5
elif details5 == 0 :
detailsx = details4
try :
l13.append(float(detailsx[0].replace(",", ".").replace("Â", "")))
except :
l13.append(str(detailsx[0]))
if "FAI" in detailsx[1] :
new = detailsx[1].replace("FAI", "")
try :
l14.append(float(new))
except :
l14.append(new)
elif "+" in detailsx[1] :
new = detailsx[1].replace("+", "")
l14.append(new)
else :
l14.append(detailsx[1].strip())
#-----------------------------------------------------------------#
#REAL ESTATE AGENCY NAMES :
s6 = soup1.findAll('div', {'class' : 'annonce__agence'})
for i in range(len(s6)) :
details6 = s6[i].findAll('span', {'class' : 'annone__detail__nom'})
if details6 != [] :
details6 = details6[0].text
else :
details6 = "NA"
l15.append(details6)
#-----------------------------------------------------------------#
#GET THE URL VALUE :
s7 = soup1.findAll('div', {'class' : 'annonce__detail'})
for i in range(len(s7)) :
url_cible = s7[i].findAll('a', {'class' : 'annone__detail__title annonce__link'})
url_cible = url_cible[0]['href']
url_cible = url_cible.split("?")[0]
l16.append(url_cible)
#-----------------------------------#
#DATE :
l17.append(d2)
#-----------------------------------------------------------------#
#WRITE DATA IN FILE :
if dico['CUISINE'] == [] :
pass
else :
try :
df = pd.DataFrame(dico)
df.to_csv(path_logout + 'seloger_%s.txt' %(d3), mode="a", header=False)
print compteur, df
print "\n"
except :
print "ValueError : ", ValueError
print "dico : ", dico
log_dico = open(path_log + "log_dico.txt", "a")
for i in dico :
print "len(dico[i]) : ", str(len(dico[i])), str(i), str(dico[i])
log_dico.write(str(len(dico[i])) + ";" + str(i) + ";" + str(dico[i]))
log_dico.close()
print "\n"
|
[
"mic100@hotmail.fr"
] |
mic100@hotmail.fr
|
a26d87e83ed554ff4bfb8c5fe46b37fc647dc7a7
|
24cf672d6f5d8f43b42a847d0537e2feb38729c9
|
/SSModel/InternalSSClassifiers/BERTModel.py
|
cf423b5df71bfe2f145b1cfe985d4fe5c78849ff
|
[] |
no_license
|
chris414862/LiSSA
|
0ffe7670a432d6ee657c73b13dc9c63f8a32aa02
|
63bb3bfeed462453cda97d88f3f8b30d113d252d
|
refs/heads/main
| 2023-09-05T03:16:22.935872
| 2021-11-10T17:45:27
| 2021-11-10T17:45:27
| 327,984,243
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,413
|
py
|
import torch
import torch.nn as nn
from transformers import BertModel, BertTokenizer
from SSModel.InternalSSVectorizers.BERTVectorizer import BERTVectorizer
from SSModel.ModelInterface import Model
from SSModel.VectroizerInterface import Vectorizer
from transformers import AdamW
import pandas as pd
from utils.AnnUtils import get_df_from_csv
from tqdm import tqdm
import re
from SSModel.InternalSSVectorizers.BoWVectorizer import BoWVectorizer
class BERTModel(Model):
def __init__(self, model_class = None, tokenizer_class=None, pretrained_weights=None, num_man_feats=None
, trainable_bert_layers:tuple=None):
self.internal_model = self.BERTInternal( model_class, pretrained_weights, 768, 3, num_man_feats)
self.vectorizer = BERTVectorizer(tokenizer_class, pretrained_weights)
self.class_labels = None
self.model_type = 'BERT'
def my_filter(x):
mo = re.search(r"encoder\.layer\.(\d+)\.", x[0])
if mo is None:
return True
try:
layer_number = int(mo.group(1))
except ValueError as e:
print("Namespace conflict:", x[0], "\n'encoder.layer' should be reserved for bert layer.")
raise e
if trainable_bert_layers[0] <= layer_number+1 <= trainable_bert_layers[1]:
return True
else:
return False
if trainable_bert_layers is not None:
training_params = [p for p in filter(my_filter, self.internal_model.named_parameters())]
else:
training_params = self.internal_model.named_parameters()
self.optimizer = AdamW(training_params)
self.loss_function = nn.CrossEntropyLoss()
def get_class_labels(self) -> list:
return self.class_labels
def get_model_type(self) -> str:
return self.model_type
def get_internal_model(self):
return self.internal_model
def get_weights(self):
return self.internal_model.get_weights()
def train(self, X:pd.Series, y:pd.Series, batch_size=2, epochs =1, man_feats=None):
model = self.get_internal_model()
model.train()
self.class_labels:list = y.unique().tolist()
num_entries = y.shape[0]
for epoch in range(epochs):
X = X.sample(frac=1.0)
y = y[X.index]
y = self.vectorizer.transform_labels(y,labels= self.class_labels)
with tqdm(total=num_entries) as epoch_pbar:
epoch_pbar.set_description(f'Epoch {epoch}')
accum_loss = 0
for idx, i in enumerate(range(0,len(X), batch_size)):
batch_X, batch_y = X[i:i+batch_size], y[i:i+batch_size]
batch_man_feats = man_feats[i:i+batch_size]
batch_X = self.vectorizer.transform_methods(batch_X)
self.optimizer.zero_grad()
predictions : torch.Tensor = model.forward(batch_X, batch_man_feats)
loss = self.loss_function(predictions, batch_y)
loss.backward()
self.optimizer.step()
# Add loss to accumulated loss
accum_loss += loss
# Update progress bar description
avg_loss = accum_loss / (idx + 1)
desc = f'Epoch {epoch} - avg_loss {avg_loss:.4f} - curr_loss {loss:.4f}'
epoch_pbar.set_description(desc)
epoch_pbar.update(batch_size)
def get_vectorizer(self) -> Vectorizer:
raise NotImplementedError()
def predict(self, X):
self.vectorizer.transform_methods(X)
class BERTInternal(nn.Module):
def __init__(self, model_class, pretrained_weights, embed_dimensions, num_classes, num_man_feats):
super(BERTModel.BERTInternal, self).__init__()
self.L1 = model_class.from_pretrained(pretrained_weights)
self.L2 = self.CustomAttentionLayer(embed_dimensions, num_classes)
self.final = nn.Linear(embed_dimensions+num_man_feats, num_classes, bias=False)
self.final_bias = nn.Linear(num_classes, 1, bias=False)
self.softmax = nn.Softmax(dim=1)
def forward(self, encoded_input, man_feats:pd.DataFrame):
input_ids= torch.tensor(encoded_input['input_ids'], dtype=torch.long)
token_type_ids = torch.tensor(encoded_input['token_type_ids'], dtype=torch.long)
attention_mask = torch.tensor(encoded_input['attention_mask'], dtype=torch.long)
# Size of model output ==> (batch_size, seq_len, embed_dimensions)
model_output, _ = self.L1(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
# S1 size ==> (batch_size, num_classes, embed_dimensions)
# Each vector in the class dimension represents the document's feature with respect to that class
S1, word_attention_weights = self.L2.forward(model_output)
# FINAL LAYER DIMENSION TRACKING
# output = softmax(sum(haddarmad(S1, W_c), dim=-1)+b_c) ==>
# X = haddarmad(S1,W_c):
# (batch_size, num_classes, embed_dims + num_manual_features) \hadamard (batch_size, num_classes, embed_dims + num_manual_features)
# ==> (batch_size, num_classes, embed_dims + num_manual_features)
# X = Sum(X, dim=-1):
# \sigma (batch_size, num_classes, embed_dims + num_manual_features) ==> (batch_size, num_classes)
# X = X + b_c:
# (batch_size, num_classes) + (1, num_classes) ==> (batch_size, num_classes)
# softmax(X):
# \softmax (batch_size, num_classes) ==> (batch_size, num_classes)
man_feats_tens = torch.tensor(man_feats.to_numpy(dtype=int), dtype=torch.float32).unsqueeze(dim=1)
# Manual features are repeated for every class
man_feats_tens = man_feats_tens.repeat(1,S1.size()[1],1)
inter = torch.cat((S1,man_feats_tens), dim=-1)
# Using the Hadamard product and summation ensures there's no interaction between the document's
# different class representations. This makes analysis more straight forward
output = self.softmax(torch.sum(torch.mul(inter, self.final.weight), 2, keepdim=False)+self.final_bias.weight)
return output
def get_weights(self):
return None
class CustomAttentionLayer(nn.Module):
def __init__(self, dimensions, num_classes):
super(BERTModel.BERTInternal.CustomAttentionLayer, self).__init__()
self.linear_in = nn.Linear(dimensions, dimensions)
self.tanh = nn.Tanh()
self.queries = nn.Linear(dimensions, num_classes)
self.softmax = nn.Softmax(dim=2)
def forward(self, X:torch.Tensor):
# X.size() == (batch_size, seq_length, embed_dimensions)
# U = tanh(X*W_w) ==> (batch_size, seq_length, embed_dimensions)*(embed_dimensions, embed_dimensions) -->
# (batch_size, seq_length, embed_dimensions)
U = self.tanh(self.linear_in(X))
# A = softmax(X*Q +b_q) ==> (batch_size, seq_length, embed_dimensions)*(embed_dimensions, num_classes/queries) -->
# (batch_size, seq_length, num_classes/queries)
attention_weights = self.softmax(self.queries(U))
# S = A^T*X +b_a (batch_size, num_classes/queries, seq_length)*(batch_size, seq_length, embed_dimensions) -->
# (batch_size, num_classes/queries, embed_dimension)
S = torch.bmm(attention_weights.transpose(1, 2), X)
return S, attention_weights
if __name__ == "__main__":
model_class, tokenizer_class, pretrained_weights = BertModel, BertTokenizer, 'bert-base-cased'
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
# model = model_class.from_pretrained(pretrained_weights)
class_descrip_file = '../../Inputs/class_descriptions_android.csv'
package_descrip_file = '../../Inputs/package_descriptions_android.csv'
ignore_if_next_contains = [r'^javax?\..*', r'^com\..*', r'^dalvic\..*', r'^junit\..*', r'^j\..*', r'^junit\..*']
package_descrip_cols = ['QualPackageName', 'NumMethods', 'Description']
class_descrip_cols = ['QualClassName', 'NumMethods', 'Description']
cols_4_class_sig = (0, 2)
cols_4_package_sig = (0, 1)
create_cache = False
cache_name = 'bert_debug_cache.pickle'
if create_cache:
df = pd.read_pickle('../../Inputs/Caches/cache2.pickle')
class_descrips = get_df_from_csv(class_descrip_file, aggregate_cols=cols_4_class_sig, col_names=class_descrip_cols
, ignore_if_next_contains=ignore_if_next_contains, index_col=class_descrip_cols[0])
package_descrips = get_df_from_csv(package_descrip_file, aggregate_cols=cols_4_package_sig,
col_names=package_descrip_cols
, ignore_if_next_contains=ignore_if_next_contains,
index_col=package_descrip_cols[0], add_period=True)
cols_to_embed = ['Description', "ClassDescription", "PackageDescription"]
df["PackageDescription"] = ''
df['ClassDescription'] = ''
df_qualified_classname = df['QualifiedPackage'].str.cat( df['Classname'].copy(), sep='.')
# print(df_qualified_classname)
for package in package_descrips.index.tolist():
df.loc[df['QualifiedPackage']== package, 'PackageDescription'] = package_descrips.loc[package, 'Description']
for classname in class_descrips.index.tolist():
df.loc[df_qualified_classname== classname, 'ClassDescription'] = class_descrips.loc[classname, 'Description']
def concat_str_cols(X:pd.DataFrame, columns:list=None):
combined_data = pd.Series(index=X.index, dtype='object')
combined_data = combined_data.fillna('')
for col in columns:
combined_data= combined_data.str.cat(X[col].copy().fillna(''), sep=' ')
return combined_data
s = concat_str_cols(df, cols_to_embed)
df2 = pd.DataFrame(index=s.index)
df2['X'] = s.copy()
df2['y'] = df['Source/Sink'].copy()
bow = BoWVectorizer()
mf_cols = bow.find_man_feat_cols(df)
df2[mf_cols] = df[mf_cols].copy()
df2.to_pickle(cache_name)
df = df2
else:
print('reading cache')
df = pd.read_pickle(cache_name)
bow = BoWVectorizer()
mf_cols = bow.find_man_feat_cols(df)
bm = BERTModel(model_class, tokenizer_class, pretrained_weights, len(mf_cols), trainable_bert_layers=(7,12))
bow = BoWVectorizer()
mf_cols = bow.find_man_feat_cols(df)
bm.train(df['X'],df['y'], man_feats = df[mf_cols])
# for little_s, enc in zip(s[:10],t['input_ids']):
# print(re.sub(r"\n", '',little_s))
# print(enc)
# print(len([e for e in enc if e != 0]))
# text = df['Description'].to_list()
# print(text[0])
# encs = tokenizer.batch_encode_plus(text[:2],add_special_tokens=True, max_length=512, pad_to_max_length=True, return_token_type_ids=True)
# doc_lens = []
# input_ids = torch.tensor(encs['input_ids'] , dtype=torch.long)
# print(input_ids.size())
# token_type_ids = torch.tensor(encs['token_type_ids'], dtype=torch.long)
# attention_mask = torch.tensor(encs['attention_mask'], dtype=torch.long)
# # model = model_class.from_pretrained(pretrained_weights)
# # last_hidden_state, pooler_output = model(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
# # print(last_hidden_state.size())
# custom_bert = BERTModel(pretrained_weights, 768, 512, 3)
# custom_bert.forward(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
|
[
"chris414862@gmail.com"
] |
chris414862@gmail.com
|
77e79f9ef67d7b2a99e8a1c2d037a274848b9c17
|
ea3272d707f3a6e5d097301d300a0ea97ddd82b5
|
/psm/oop1/oop1_2/info_hiding_property.py
|
b12f77b06d357a78fd4c81646ba553fa9c6dce8c
|
[] |
no_license
|
gambler1541/BootCamp
|
d05850f256ed7a8baa02545551176959a66a9bb3
|
b025dd07a8fedd58366f96c9b516f134a95138f1
|
refs/heads/master
| 2020-04-07T07:21:51.363439
| 2019-06-08T11:10:27
| 2019-06-08T11:10:27
| 158,173,790
| 1
| 0
| null | 2018-11-19T06:38:36
| 2018-11-19T06:38:36
| null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
class Account:
def __init__(self, name, money):
self.user = name
# 인스턴스 멤버 선언이 아니라 setter 메서드를 호출
self.balance = money
@property
def balance(self):
return self._balance
@balance.setter
def balance(self, money):
if money < 0:
return
# 실제 인스턴스 멤버 선언이 일어나는 부분
self._balance = money
if __name__ == '__main__':
my_acnt = Account('greg', 5000)
# setter 함수를 통해 변경을 시도하므로 _balance 메버의 값은 음수로 변경되지 않음
# 음수로 변경되지 않았으므로 실행 결과는 5000이 나옴
my_acnt.balance =- 3000
# getter 함수인 balance() 메서드를 호출해 _balance apaqjdp wjqrms,
print(my_acnt.balance)
|
[
"sungmin3231@gmail.com"
] |
sungmin3231@gmail.com
|
e56b25cff63a25e70c4cdee0e6b01b2051686c7b
|
d6977cd01645d5259c4e33d8885ae279695d59fa
|
/django_tutorials/Django1.8Pro/mypro/mypro/settings.py
|
105f0902e48e1cf21ce668b3693c4d6029131ac0
|
[] |
no_license
|
Jadatravu/Tutorials
|
092cbea55a4906e5968037915caa29698270397f
|
b07262c46dd997ee43260ea006a7010644f95650
|
refs/heads/master
| 2021-01-17T13:11:21.119328
| 2019-11-01T08:49:53
| 2019-11-01T08:49:53
| 14,251,990
| 1
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,477
|
py
|
"""
Django settings for mypro project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*+kw(pk_khm##%c#bxck0x6vzf7#2b%rdcs2mmkgndp6_+9#6i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
'mapp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mypro.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['/home/ubuntu/developer/18Jul16/mypro/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mypro.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR,'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT ='/home/ubuntu/developer/18Jul16/mypro/staticfiles/dfiles'
STATICFILES_DIRS= (
('/home/ubuntu/developer/18Jul16/mypro/staticfiles/sfiles'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
'/var/www/static/',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters':{
'simple':{
'format':'%(levelname)s %(message)s'
}
},
'handlers': {
'file':{
'level':'DEBUG',
'class':'logging.FileHandler',
'filename':'/home/ubuntu/developer/18Jul16/mypro/logapp.log',
'formatter':'simple'
},
},
'loggers': {
'myapp.views':{
'handlers': ['file'],
'level': 'DEBUG',
},
}
}
|
[
"ubuntu@ip-172-31-35-183.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-35-183.us-west-2.compute.internal
|
382ed93decb7215d582077e08f363fa17473c08f
|
dbb0a4d452ac0faf00411a09b7e32f13ffdb31e8
|
/tests/ssh_test.py
|
e2dda308c8bbe2ee15c26bb156ddb008f3404399
|
[] |
no_license
|
koverman47/EGEN_310
|
3ef66b7fb773b4e5fb833c250c87c7cf4fc84d49
|
f69e292baa48bca441dd0f7d9ba7789db417d42a
|
refs/heads/master
| 2020-04-18T00:39:47.999960
| 2019-04-24T20:14:44
| 2019-04-24T20:14:44
| 167,086,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,130
|
py
|
#!/usr/bin/env python3
import paramiko
import sys
import tty
import termios
# RPi Zero w SSH Credentials
host = "zeropythirty"
ip = "10.152.247.52"
user = "pi"
passw = "pi"
# Establish SSH tunnel
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=ip, username=user, password=passw)
except BadHostKeyException:
print("Host could not be found.")
sys.exit(0)
except AuthenticationException:
print("Could not authenticate host.")
sys.exit(0)
except SSHException:
print("Unknown SSH error.")
sys.exit(0)
# Give stdin to this script
tty.setcbreak(sys.stdin)
try:
while True:
result = None
# Read and pass key over SSH tunnel
key = ord(sys.stdin.read(1))
result = ssh.exec_command(key)
except KeyboardInterrupt:
pass
except SSHException:
if result:
print(result)
finally:
# Return stdin to ECHO
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
old[3] = old[3] | termios.ECHO
termios.tcsetattr(fd, termios.TCSADRAIN, old)
# Close SSH tunnel
ssh.close()
|
[
"knoverman@comcast.net"
] |
knoverman@comcast.net
|
b2ecd23d0488f2b3bea55b15c9d3b6bc34e739ab
|
98e7edf7a3f834580ba6a3c1058506e5a773d1bb
|
/fileHandling.py
|
b4d3e6ae4d6684d77027c28ed20cabc3eedba404
|
[] |
no_license
|
gauravhansda/InterviewQuestions
|
ef95363440d9eb0057ab0551f155793ea3e5e3b0
|
5134c59d3d543982a30465d7d4eaa1c8902185e2
|
refs/heads/master
| 2021-01-01T17:07:11.537838
| 2017-07-22T02:31:23
| 2017-07-22T02:31:23
| 98,001,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
class FILEHANDLING():
def __init__(self):
self.file_name = "file.txt"
# Remove blank lines from the file
def remBlanks(self):
with open(self.file_name, 'r') as inp:
lines = inp.readlines()
print lines
with open("out.txt", 'w') as out:
for line in lines:
if not line.isspace():
out.writelines(line)
if __name__ == '__main__':
fh = FILEHANDLING()
fh.remBlanks()
|
[
"GauravHansda@Gauravs-MacBook-Pro.local"
] |
GauravHansda@Gauravs-MacBook-Pro.local
|
1fe280eafbf7f4ca37046d98d4cf1d1ae08472ed
|
18059513f87f1adc5cae34ac74bd89835c4f6816
|
/Deck_of_cards.py
|
8bdec654c96079de29cafccbb7c1c2ada8122edc
|
[] |
no_license
|
PrzemyslawMisiura/Deck_of_cards
|
fcdc6d47ba10d352d7e29005d4b6ef23734870bf
|
ed0f941ec4456dc2435c87f1f8a111db43549861
|
refs/heads/master
| 2020-07-17T23:32:25.635935
| 2019-09-04T10:35:00
| 2019-09-04T10:35:00
| 206,124,434
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,953
|
py
|
# Specifications
# Card:
# Each instance of Card should have a suit ("Hearts", "Diamonds", "Clubs", or "Spades").
# Each instance of Card should have a value ("A", "2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K").
# Card's __repr__ method should return the card's value and suit (e.g. "A
# of Clubs", "J of Diamonds", etc.)
class Card:
def __init__(self, value, suit):
self.value = value
self.suit = suit
def __repr__(self):
return "{} of {}".format(self.value, self.suit)
# Deck:
# Each instance of Deck should have a cards attribute with all 52 possible
# instances of Card.
# Deck should have an instance method called count which returns a count
# of how many cards remain in the deck.
# Deck's __repr__ method should return information on how many cards are
# in the deck (e.g. "Deck of 52 cards", "Deck of 12 cards", etc.)
# Deck should have an instance method called _deal which accepts a number
# and removes at most that many cards from the deck (it may need to remove
# fewer if you request more cards than are currently in the deck!). If
# there are no cards left, this method should raise a ValueError with the
# message "All cards have been dealt".
# Deck should have an instance method called shuffle which will shuffle a
# full deck of cards. If there are cards missing from the deck, this
# method should raise a ValueError with the message "Only full decks can
# be shuffled". Shuffle should return the shuffled deck.
# Deck should have an instance method called deal_card which uses the
# _deal method to deal a single card from the deck and return that single
# card.
# Deck should have an instance method called deal_hand which accepts a
# number and uses the _deal method to deal a list of cards from the deck
# and return that list of cards.
from random import shuffle
class Deck:
def __init__(self):
suits = ["Hearts", "Diamonds", "Clubs", "Spades"]
values = [
"A",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"J",
"Q",
"K"]
self.cards = [Card(v, s) for v in values for s in suits]
print(self.cards)
def count(self):
return len(self.cards)
def __repr__(self):
return "Deck of {} cards".format(self.count())
def _deal(self, num):
count = self.count()
actual = min([count, num])
if count == 0:
raise ValueError("All cards have been dealt")
cards = self.cards[-actual:]
self.cards = self.cards[:-actual]
return cards
def deal_card(self):
return self._deal(1)[0]
def deal_hand(self,n):
return self._deal(n)
def shuffle(self):
if self.count() < 52:
raise ValueError("Only full decks can be shuffled")
print(shuffle(self.cards))
|
[
"przem.misiura@gmail.com"
] |
przem.misiura@gmail.com
|
c148d463b278c19a66dbc6bfe9f7997cdb5d3cb7
|
e517fcf60b982bb843ae846fa881102d504c368c
|
/poncho/postprocess/prepare_data.py
|
e6bf2e27d2a127b733c008f9ca472b820e8fb482
|
[
"MIT"
] |
permissive
|
waduhek/poncho
|
b79aa9087faf9809872eeb6a36a21de3e34bb5d8
|
47a8f2600d1afcfb8f9fa76ff0c68eb9f132f56c
|
refs/heads/master
| 2020-04-17T15:14:31.865951
| 2019-04-19T09:24:54
| 2019-04-19T09:24:54
| 166,690,766
| 4
| 1
|
MIT
| 2019-04-19T06:35:26
| 2019-01-20T17:45:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,916
|
py
|
import os
import time
from datetime import datetime
import pandas as pd
from poncho.utils.get_base_dir import get_base_dir
def main(unique_years):
(BASE_DIR) = get_base_dir()
# Log file
log = open(os.path.join(BASE_DIR, 'data', 'logs', 'train_{}.txt'.format(str(time.time()).split('.')[0])), mode='a')
log.write('Beginning to create training dataset. Time: {}\n\n'.format(str(datetime.now())))
print('Beginning to create training dataset. Time: {}\n'.format(str(datetime.now())))
for year in unique_years:
log.write('Converting data of {} into train dataset\n. Time: {}'.format(year, str(datetime.now())))
print('Converting data of {} into train dataset. Time: {}'.format(year, str(datetime.now())))
# Open required CSV file
df = pd.read_csv(os.path.join(BASE_DIR, 'data', 'prepared', 'prepared_{}.csv'.format(year))).dropna()
# Convert the comments and replies to a Pandas DataFrame object
comment = pd.DataFrame(df['comment'])
reply = pd.DataFrame(df['reply'])
# Write the comments and replies to separate files in the directory of 'nmt-chatbot'
comment.to_csv(
os.path.join(os.path.dirname(BASE_DIR), 'nmt-chatbot', 'new_data', 'train.from'),
mode='w',
index=False,
header=None
)
reply.to_csv(
os.path.join(os.path.dirname(BASE_DIR), 'nmt-chatbot', 'new_data', 'train.to'),
mode='w',
index=False,
header=None
)
log.write('Finishing up... Time: {}\n'.format(str(datetime.now())))
log.write('==========================================================================================\n\n')
print('Finishing up... Time: {}'.format(str(datetime.now())))
print('==========================================================================================')
log.close()
|
[
"ryannor56a@gmail.com"
] |
ryannor56a@gmail.com
|
9c162425d5991391ecff90fd0305e2d82e9d7f23
|
1e8177c6fcb5f5035de5c463f4e07ba645f5ae49
|
/csv2xl.py
|
6a3be9daaabc2bfe0cf3c0a8670ca2f29678a951
|
[] |
no_license
|
sravanre/python
|
1a97669da262d3067fce6f7bc67141ba976f176c
|
248e3612be2890313be68886d02ff8a39e6c423d
|
refs/heads/master
| 2021-07-03T18:11:01.094397
| 2020-09-18T10:04:28
| 2020-09-18T10:04:28
| 172,246,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
import csv
from openpyxl import Workbook
wb = Workbook()
ws = wb.active
#ws = wb['Sheet1']
f = open("marks.csv","r")
for lines in csv.reader(f):
ws.append(lines)
f.close()
wb.save('marks1.xlsx')
wb.close()
##r=1
##for lines in csv.reader(f):
## for c,val in enumerate(lines):
## ws.cell(row=r,column=c+1, value=val)
## r+=1
|
[
"noreply@github.com"
] |
noreply@github.com
|
b4b2aa8f7d0110d5a1ee9b8e0de04c1e02146c12
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_zoologists.py
|
d5eaad0aa4529df66ccc13452502429859ae6960
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
#calss header
class _ZOOLOGISTS():
def __init__(self,):
self.name = "ZOOLOGISTS"
self.definitions = zoologist
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['zoologist']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
267e66a7d5ee24cddd6245f2bb5aaf4c8f0bee87
|
4d051a2875532ee9825559261927218bbac3dbf4
|
/src/arm/joint/__init__.py
|
c5d36d433fbcca594e664ed8910fe826eec87b91
|
[] |
no_license
|
swol-kat/Arm_Code
|
4554b9f79460929515dc4e5c0dc7f0b7b23985da
|
389a8fe4875c6ab90e2ec79dedf445088b21c855
|
refs/heads/master
| 2023-04-11T11:06:38.759482
| 2021-04-23T00:28:15
| 2021-04-23T00:28:15
| 335,827,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 64
|
py
|
from .joint import Joint
from .virtual_joint import VirtualJoint
|
[
"arjungandhi06@gmail.com"
] |
arjungandhi06@gmail.com
|
f6b693f1370e3d80c736a6b08d507d671d4a8bc5
|
008c065391d766fec2f2af252dd8a5e9bf5cb815
|
/Even Matrix.py
|
7e545a6a78adeb1c5ec75a406ef4644cbe57e481
|
[] |
no_license
|
22Rahul22/Codechef
|
b261ab43ff5ff64648a75ad1195e33cac2cfec52
|
1f645c779a250a71d75598e1eabad7e52dd6b031
|
refs/heads/master
| 2022-11-29T21:51:09.578798
| 2020-08-19T06:20:23
| 2020-08-19T06:20:23
| 288,650,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
t = int(input())
for _ in range(t):
n = int(input())
arr = [[0 for i in range(n)] for j in range(n)]
sr = 0
er = n
sc = 0
ec = n
z = 0
num = 1
if n % 2 == 0:
x = n // 2
else:
x = 1 + (n // 2)
while z != x:
j = sc
while j < ec:
arr[sr][j] = num
num += 1
j += 1
sr += 1
i = sr
while i < er:
arr[i][ec - 1] = num
num += 1
i += 1
ec -= 1
j = ec - 1
while j >= sc:
arr[er - 1][j] = num
num += 1
j -= 1
er -= 1
i = er - 1
while i >= sr:
arr[i][sc] = num
num += 1
i -= 1
sc += 1
z += 1
for i in range(n):
for j in range(n):
print(arr[i][j], end=" ")
print()
|
[
"rahulbhl22@gmail.com"
] |
rahulbhl22@gmail.com
|
f7863d8927d006aaf6bb1f2450af7fe6550ab070
|
e34d4bf879910b8f41068c1efb90915897e53d53
|
/system_design_ladder/GeoHashII.py
|
b15bec1dd5ca21a631b684b5a96092a0772cec5f
|
[] |
no_license
|
ZhouningMan/LeetCodePython
|
6cfc30f0b76f6162502410fef5639fde4801bd74
|
cad9585c440efb329c9321648f94c58ded198438
|
refs/heads/master
| 2020-12-10T03:53:48.824344
| 2020-01-13T02:29:02
| 2020-01-13T02:29:02
| 233,494,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
class GeoHash:
BASE32 = "0123456789bcdefghjkmnpqrstuvwxyz"
"""
@param: geohash: geohash a base32 string
@return: latitude and longitude a location coordinate pair
"""
def decode(self, geohash):
binary = self._to_bin(geohash)
lon_bin = [binary[i] for i in range(0, len(binary), 2)]
lat_bin = [binary[i] for i in range(1, len(binary), 2)]
longitude = self._bin_to_val(-180, 180, lon_bin)
latitude = self._bin_to_val(-90, 90, lat_bin)
return latitude, longitude
def _to_bin(self, geohash):
binary = ''
for c in geohash:
idx = GeoHash.BASE32.index(c)
b = ''
for i in range(5):
b = str(idx % 2) + b
idx = idx // 2
binary += b
return binary
def _bin_to_val(self, low, high, binary):
for b in binary:
mid = (high + low) / 2
if b == '1': # our value is higher
low = mid
else: # our value is lower
high = mid
return (low + high) / 2
if __name__ == '__main__':
geoHash = GeoHash()
geoHash.decode("wx4g0s")
|
[
"linfenglee321@gmail.com"
] |
linfenglee321@gmail.com
|
5a8680c24a91be453228f2818013da9f4dc66067
|
05dc7b4b44200cc022b09f773dd868049ecfb3e1
|
/rss/admin.py
|
c0a8c6aaa5087612094e649d116e9e1783bec26f
|
[] |
no_license
|
micah66/rssFeed
|
e7cea9a3f68713585990aaa039ff80c289d01e75
|
0b17f3d13333bf7e6eae182fdeb6bc565ef8977a
|
refs/heads/master
| 2020-03-21T12:44:29.659306
| 2018-06-28T08:52:45
| 2018-06-28T08:52:45
| 138,569,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
from django.contrib import admin
from .models import Headlines
# Register your models here.
admin.site.register(Headlines)
|
[
"micahgordon66@gmail.com"
] |
micahgordon66@gmail.com
|
0873be18a9c6f1322e01559626604b34c5ef88c1
|
fea6ceb798d612368a27888e6490b4f91c04384f
|
/continue.py
|
bdfc1b2cc6ab4f708e4b8266f81d187ed8caf26f
|
[] |
no_license
|
neogeolee/PythonWorkspace
|
b4a8d8cf0ef451bf3bc0e00ccaecaf48253bc0b8
|
f8b9aff2ce821142990acac1cd2406bbe140ab4b
|
refs/heads/master
| 2022-12-17T11:30:29.974230
| 2020-09-22T16:28:58
| 2020-09-22T16:28:58
| 297,708,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
absent = [2, 5] # 결석
no_book = [7] # 책 안들고 왔음
for student in range(1, 11):
if student in absent:
continue
elif student in no_book:
print('오늘 수업여기까지 {0} 개새끼야 따라와'.format(student))
break
print('{0}, 책읽어라'.format(student))
|
[
"neogeolee@nate.com"
] |
neogeolee@nate.com
|
5e6f288758fa99fdf7f7a34041decfca9b7f7f42
|
dac906538145808a71e94e030d63c6f20753977a
|
/webapp/models.py
|
1816f0d14f43481ce923fe42af4e351a6fad5b98
|
[] |
no_license
|
nikofil/NMProject
|
3a0846b72cf66afb140feff9674d053e144f8087
|
150971c67e509b2bdc874b85c1f6abda4e27c793
|
refs/heads/master
| 2020-12-31T05:24:39.311930
| 2015-07-01T10:16:26
| 2015-07-01T10:16:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,832
|
py
|
from django.db import models
def toint(x):
try:
return int(x)
except ValueError:
return -1
def tofloat(x):
try:
return float(x)
except ValueError:
return -1.0
class BaseStation(models.Model):
# describes a base station record
rid = models.IntegerField(default=-1)
email = models.CharField(max_length=100)
operator = models.CharField(max_length=100)
mcc = models.IntegerField(default=-1)
mnc = models.IntegerField(default=-1)
cid = models.IntegerField(default=-1)
lac = models.IntegerField(default=-1)
latitude = models.FloatField(default=-1.0)
longitude = models.FloatField(default=-1.0)
timestamp = models.DateTimeField()
def __str__(self):
return str(self.rid) + " " + self.email
def setdata(self, data):
self.rid = toint(data[0])
self.email = data[1]
self.operator = data[2]
self.mcc = toint(data[3])
self.mnc = toint(data[4])
self.cid = toint(data[5])
self.lac = toint(data[6])
self.latitude = tofloat(data[7])
self.longitude = tofloat(data[8])
self.timestamp = data[9] + "+03:00"
class BatteryStatus(models.Model):
# describes a battery status record
rid = models.IntegerField(default=-1)
email = models.CharField(max_length=100)
level = models.IntegerField(default=-1)
plugged = models.IntegerField(default=-1)
temperature = models.IntegerField(default=-1)
voltage = models.IntegerField(default=-1)
timestamp = models.DateTimeField()
def __str__(self):
return str(self.rid) + " " + self.email
def setdata(self, data):
self.rid = toint(data[0])
self.email = data[1]
self.level = toint(data[2])
self.plugged = toint(data[3])
self.temperature = toint(data[4])
self.voltage = toint(data[5])
self.timestamp = data[6] + "+03:00"
class GPSStatus(models.Model):
#describes a GPS position record
rid = models.IntegerField(default=-1)
email = models.CharField(max_length=100)
latitude = models.FloatField(default=-1.0)
longitude = models.FloatField(default=-1.0)
timestamp = models.DateTimeField()
def __str__(self):
return str(self.rid) + " " + self.email
def setdata(self, data):
self.rid = toint(data[0])
self.email = data[1]
self.latitude = tofloat(data[2])
self.longitude = tofloat(data[3])
self.timestamp = data[4] + "+03:00"
class WifiPos(models.Model):
#descibes a wifi position (average of positions from wifi statuses)
ssid = models.CharField(max_length=100)
bssid = models.CharField(max_length=100)
latitude = models.FloatField(default=-1.0)
longitude = models.FloatField(default=-1.0)
def __str__(self):
return self.ssid + " - " + self.bssid
class WifiStatus(models.Model):
#descibes a wifi status record
rid = models.IntegerField(default=-1)
email = models.CharField(max_length=100)
ssid = models.CharField(max_length=100)
bssid = models.CharField(max_length=100)
level = models.IntegerField(default=-1)
frequency = models.IntegerField(default=-1)
latitude = models.FloatField(default=-1.0)
longitude = models.FloatField(default=-1.0)
timestamp = models.DateTimeField()
realpos = models.ForeignKey(WifiPos, null=True, blank=True)
def __str__(self):
return str(self.rid) + " " + self.email
def setdata(self, data):
self.rid = toint(data[0])
self.email = data[1]
self.ssid = data[2]
self.bssid = data[3]
self.level = toint(data[4])
self.frequency = toint(data[5])
self.latitude = tofloat(data[6])
self.longitude = tofloat(data[7])
self.timestamp = data[8] + "+03:00"
|
[
"aesmade@gmail.com"
] |
aesmade@gmail.com
|
bec4636056c0dc596b344cbdca2e4857ec559ff4
|
41299f375dbd79fc6e1163333de11a27623ab7fd
|
/server/dbpedia/__init__.py
|
cc74d451e46371ba2a1e49114ba091f920692a32
|
[] |
no_license
|
MatrixReloaded/ArtViz
|
b18315562f30e2f0388d824ee9fcdf02fcca3591
|
b479079287a4e3f82fb1e6f9b1b223ef977af73e
|
refs/heads/master
| 2021-01-14T09:45:47.122465
| 2015-10-03T17:02:31
| 2015-10-03T17:11:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 97
|
py
|
"""
DBpedia quepy.
"""
from basic import *
from people import *
from BasicQuestions import *
|
[
"oana.balaceanu@ymail.com"
] |
oana.balaceanu@ymail.com
|
dec87b22a15123ba554802e7dab90b8df69397a1
|
1a375cf927f2ffd33ef9087637d04ce6c83de566
|
/demo/backtest_f/main.py
|
bbb8fbc0b513361127ffd2d557cb3e313cb85193
|
[
"MIT"
] |
permissive
|
lgh0504/snake
|
af2bd25642f57a35442cfd41161d489db12019b9
|
0fd9929995327a1c23486c0dbc5421e18791eb88
|
refs/heads/master
| 2023-03-18T18:13:53.237850
| 2017-12-18T01:48:43
| 2017-12-18T01:48:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
# coding: utf-8
from datetime import datetime
import pandas_datareader as pdr
from f.portfolios import MarketOnClosePortfolio
from f.strategy.ma_cross import MovingAverageCrossStrategy
# from f.strategy.random_forecast import RandomForecastingStrategy
def run_backtest(symbol, date_range=(datetime(2016, 8, 29), datetime.now())):
# get data from yahoo
bars = pdr.get_data_yahoo(symbol, start=date_range[0], end=date_range[1])
print 'stock bars: ', bars.head(10)
# create strategy class and get signals
strategy_inst = MovingAverageCrossStrategy(symbol, bars)
signals = strategy_inst.generate_signals()
print 'signals', signals.head()
# create a portfolio
portfolio_inst = MarketOnClosePortfolio(
symbol, bars, signals, initial_capital=100000.0, shares_per_position=1000
)
returns = portfolio_inst.backtest_portfolio()
print 'head returns:', returns.head(10)
print 'tail returns:', returns.tail(10)
return returns
if __name__ == '__main__':
run_backtest(
# symbol='000333.SZ',
# symbol='000034.SZ',
symbol='600016.SH',
)
|
[
"lbj.world@gmail.com"
] |
lbj.world@gmail.com
|
1b464ae08fac263159c6d7d58623f8f8b5db8153
|
2e3f7b74e5e14eb9b12d316f609c3f057de1e845
|
/ka_kun_blog/forms.py
|
92a17c224684df6f4661e7cfab7160cff51c5071
|
[] |
no_license
|
katsuhikonakano/blog
|
edf3de81b1cfe45d2739fa55375bafbf326a0263
|
2c46d7e8b7044b1ab05ba9c9e595f9d4861b2674
|
refs/heads/master
| 2021-03-08T08:46:11.347696
| 2020-07-27T03:36:39
| 2020-07-27T03:36:39
| 246,260,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
from django import forms
from .models import Post
from django.forms import TextInput, Textarea, FileInput, Select, SelectMultiple
from django.contrib.auth.forms import AuthenticationForm
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ["title", "content", "thumnail", "image", "category", "tag"]
widgets = {
'title': TextInput(attrs={
'class': 'form-control',
'placeholder': 'タイトル'}),
'content': Textarea(attrs={
'class': 'form-control',
'placeholder': '本文',
'cols': 80, 'rows': 15}),
'thumnail': FileInput(attrs={
'class': 'form-control-file'}),
'image': FileInput(attrs={
'class': 'form-control-file'}),
'category': Select(attrs={
'class': 'form-control'}),
'tag': SelectMultiple(attrs={
'class': 'form-control'
})
}
class LoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for fields in self.fields.values():
fields.widget.attrs['class'] = 'form-control'
fields.widget.attrs['placeholder']= fields.label
|
[
"katsuhiko1211@gmail.com"
] |
katsuhiko1211@gmail.com
|
6edc845cb4ebc87b7e5081731b569ca3d83813bd
|
f876ed037442b60e964bb53e4a0cc7e14818a746
|
/rnn.py
|
54a32e964cce09e970934f341f7459084c3d9bde
|
[] |
no_license
|
shksa/cs231n-Assignment3
|
b08913353372575ff8b04552387008b91e6a0a06
|
528581fdd47471390fcd95a79b626a0a738c9b58
|
refs/heads/master
| 2021-01-20T12:42:09.528428
| 2017-05-05T16:44:33
| 2017-05-05T16:44:33
| 90,397,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,020
|
py
|
import numpy as np
from cs231n.layers import *
from cs231n.rnn_layers import *
class CaptioningRNN(object):
"""
A CaptioningRNN produces captions from image features using a recurrent
neural network.
The RNN receives input vectors of size D, has a vocab size of V, works on
sequences of length T, has an RNN hidden dimension of H, uses word vectors
of dimension W, and operates on minibatches of size N.
Note that we don't use any regularization for the CaptioningRNN.
"""
def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,
hidden_dim=128, cell_type='rnn', dtype=np.float32):
"""
Construct a new CaptioningRNN instance.
Inputs:
- word_to_idx: A dictionary giving the vocabulary. It contains V entries,
and maps each string to a unique integer in the range [0, V).
- input_dim: Dimension D of input image feature vectors.
- wordvec_dim: Dimension W of word vectors.
- hidden_dim: Dimension H for the hidden state of the RNN.
- cell_type: What type of RNN to use; either 'rnn' or 'lstm'.
- dtype: numpy datatype to use; use float32 for training and float64 for
numeric gradient checking.
"""
if cell_type not in {'rnn', 'lstm'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.dtype = dtype
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.items()}
self.params = {}
vocab_size = len(word_to_idx)
self._null = word_to_idx['<NULL>']
self._start = word_to_idx.get('<START>', None)
self._end = word_to_idx.get('<END>', None)
# Initialize word vectors
self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)
self.params['W_embed'] /= 100
# Initialize CNN -> hidden state projection parameters
self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)
self.params['W_proj'] /= np.sqrt(input_dim)
self.params['b_proj'] = np.zeros(hidden_dim)
# Initialize parameters for the RNN
dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]
self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx'] /= np.sqrt(wordvec_dim)
self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh'] /= np.sqrt(hidden_dim)
self.params['b'] = np.zeros(dim_mul * hidden_dim)
# Initialize output to vocab weights
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
# Cast parameters to correct dtype
for k, v in self.params.items():
self.params[k] = v.astype(self.dtype)
def loss(self, features, captions):
"""
Compute training-time loss for the RNN. We input image features and
ground-truth captions for those images, and use an RNN (or LSTM) to compute
loss and gradients on all parameters.
Inputs:
- features: Input image features, of shape (N, D)
- captions: Ground-truth captions; an integer array of shape (N, T) where
each element is in the range 0 <= y[i, t] < V
Returns a tuple of:
- loss: Scalar loss
- grads: Dictionary of gradients parallel to self.params
"""
# Cut captions into two pieces: captions_in has everything but the last word
# and will be input to the RNN; captions_out has everything but the first
# word and this is what we will expect the RNN to generate. These are offset
# by one relative to each other because the RNN should produce word (t+1)
# after receiving word t. The first element of captions_in will be the START
# token, and the first element of captions_out will be the first word.
captions_in = captions[:, :-1]
captions_out = captions[:, 1:]
# You'll need this
mask = (captions_out != self._null)
# Weight and bias for the affine transform from image features to initial
# hidden state
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
# Word embedding matrix
W_embed = self.params['W_embed']
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the forward and backward passes for the CaptioningRNN. #
# In the forward pass you will need to do the following: #
# (1) Use an affine transformation to compute the initial hidden state #
# from the image features. This should produce an array of shape (N, H)#
# (2) Use a word embedding layer to transform the words in captions_in #
# from indices to vectors, giving an array of shape (N, T, W). #
# (3) Use either a vanilla RNN or LSTM (depending on self.cell_type) to #
# process the sequence of input word vectors and produce hidden state #
# vectors for all timesteps, producing an array of shape (N, T, H). #
# (4) Use a (temporal) affine transformation to compute scores over the #
# vocabulary at every timestep using the hidden states, giving an #
# array of shape (N, T, V). #
# (5) Use (temporal) softmax to compute loss using captions_out, ignoring #
# the points where the output word is <NULL> using the mask above. #
# #
# In the backward pass you will need to compute the gradient of the loss #
# with respect to all model parameters. Use the loss and grads variables #
# defined above to store loss and gradients; grads[k] should give the #
# gradients for self.params[k]. #
############################################################################
if self.cell_type == 'rnn':
############################# Forward Pass #################################
# (1) Image features projection onto RNN by affine transformation with W_proj weights array.
IFP_to_RNN, cache_for_feature_projection = affine_forward(features, W_proj, b_proj)
# (2) Convert word indices to word vectors of the captions_in matrix
Embedded_captions_in, cache_for_word_embedding = word_embedding_forward(captions_in, W_embed)
# (3) Run RNN for the "T" length of sequence over the minibatch N.
h0 = IFP_to_RNN
HdSV_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_1 = rnn_forward(Embedded_captions_in, h0, Wx, Wh, b)
# (4) Compute scores over the words in vocabulary for all time stpes over the mini-batch N.
Scores_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_2 = temporal_affine_forward(HdSV_ForAllTimeSteps_OverMiniBatch, W_vocab, b_vocab)
# (5) Compute loss using ground-truth captions_out matrix.
loss, d_Loss__d_Scores_ForAllTimeSteps_OverMiniBatch = temporal_softmax_loss(Scores_ForAllTimeSteps_OverMiniBatch, captions_out, mask)
############################# Backward Pass ################################
# (4) Backprop into temporal_affine_forward function.
d_HdSV_ForAllTimeSteps_OverMiniBatch, d_W_vocab, d_b_vocab = temporal_affine_backward(d_Loss__d_Scores_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_2)
# (3) Backprop into rnn_forward function.
d_Embedded_captions_in, d_h0, d_Wx, d_Wh, d_b = rnn_backward(d_HdSV_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_1)
# (2) Backprop into word_embedding_forward function.
d_W_embed = word_embedding_backward(d_Embedded_captions_in, cache_for_word_embedding)
# (1) Backprop into Image features - affine tranformation function
d_IFP_to_RNN = d_h0
d_features, d_W_proj, d_b_proj = affine_backward(d_IFP_to_RNN, cache_for_feature_projection)
elif self.cell_type == 'lstm':
# (1) Image features projection onto RNN by affine transformation with W_proj weights array.
IFP_to_RNN, cache_for_feature_projection = affine_forward(features, W_proj, b_proj)
# (2) Convert word indices to word vectors of the captions_in matrix
Embedded_captions_in, cache_for_word_embedding = word_embedding_forward(captions_in, W_embed)
# (3) Run RNN for the "T" length of sequence over the minibatch N.
h0 = IFP_to_RNN
HdSV_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_1 = lstm_forward(Embedded_captions_in, h0, Wx, Wh, b)
# (4) Compute scores over the words in vocabulary for all time stpes over the mini-batch N.
Scores_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_2 = temporal_affine_forward(HdSV_ForAllTimeSteps_OverMiniBatch, W_vocab, b_vocab)
# (5) Compute loss using ground-truth captions_out matrix.
loss, d_Loss__d_Scores_ForAllTimeSteps_OverMiniBatch = temporal_softmax_loss(Scores_ForAllTimeSteps_OverMiniBatch, captions_out, mask)
############################# Backward Pass ################################
# (4) Backprop into temporal_affine_forward function.
d_HdSV_ForAllTimeSteps_OverMiniBatch, d_W_vocab, d_b_vocab = temporal_affine_backward(d_Loss__d_Scores_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_2)
# (3) Backprop into rnn_forward function.
d_Embedded_captions_in, d_h0, d_Wx, d_Wh, d_b = lstm_backward(d_HdSV_ForAllTimeSteps_OverMiniBatch, cache_for_rnn_1)
# (2) Backprop into word_embedding_forward function.
d_W_embed = word_embedding_backward(d_Embedded_captions_in, cache_for_word_embedding)
# (1) Backprop into Image features - affine tranformation function
d_IFP_to_RNN = d_h0
d_features, d_W_proj, d_b_proj = affine_backward(d_IFP_to_RNN, cache_for_feature_projection)
grads.update({
'W_proj': d_W_proj,
'b_proj': d_b_proj,
'W_embed': d_W_embed,
'Wx': d_Wx,
'Wh': d_Wh,
'b': d_b,
'W_vocab': d_W_vocab,
'b_vocab': d_b_vocab
})
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
def sample(self, features, max_length=30):
"""
Run a test-time forward pass for the model, sampling captions for input
feature vectors.
At each timestep, we embed the current word, pass it and the previous hidden
state to the RNN to get the next hidden state, use the hidden state to get
scores for all vocab words, and choose the word with the highest score as
the next word. The initial hidden state is computed by applying an affine
transform to the input image features, and the initial word is the <START>
token.
For LSTMs you will also have to keep track of the cell state; in that case
the initial cell state should be zero.
Inputs:
- features: Array of input image features of shape (N, D).
- max_length: Maximum length T of generated captions.
Returns:
- captions: Array of shape (N, max_length) giving sampled captions,
where each element is an integer in the range [0, V). The first element
of captions should be the first sampled word, not the <START> token.
"""
N = features.shape[0]
captions = self._null * np.ones((N, max_length), dtype=np.int32)
# Unpack parameters
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
W_embed = self.params['W_embed']
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
###########################################################################
# TODO: Implement test-time sampling for the model. You will need to #
# initialize the hidden state of the RNN by applying the learned affine #
# transform to the input image features. The first word that you feed to #
# the RNN should be the <START> token; its value is stored in the #
# variable self._start. At each timestep you will need to do to: #
# (1) Embed the previous word using the learned word embeddings #
# (2) Make an RNN step using the previous hidden state and the embedded #
# current word to get the next hidden state. #
# (3) Apply the learned affine transformation to the next hidden state to #
# get scores for all words in the vocabulary #
# (4) Select the word with the highest score as the next word, writing it #
# to the appropriate slot in the captions variable #
# #
# For simplicity, you do not need to stop generating after an <END> token #
# is sampled, but you can if you want to. #
# #
# HINT: You will not be able to use the rnn_forward or lstm_forward #
# functions; you'll need to call rnn_step_forward or lstm_step_forward in #
# a loop. #
###########################################################################
# Initial hidden state for the RNN
h0, cache = affine_forward(features, W_proj, b_proj)
# First word to the RNN should be the special <START> token
V, wordVec_dim = W_embed.shape
Start_Vector_Batch = np.zeros((N, wordVec_dim))
Start_Vector = W_embed[self._start]
Start_Vector_Batch[np.arange(N)] = Start_Vector
Hidden_States = {}
Hidden_States[0] = h0
H = h0.shape[1]
c0 = np.zeros((N, H))
Cell_States = {}
Cell_States[0] = c0
Scores = {}
if self.cell_type == 'rnn':
for t in range(1, max_length+1):
if t == 1:
x = Start_Vector_Batch
else:
word_indices = np.argmax(Scores[t-1], axis=1)
captions[range(N), t-1] = word_indices
x = W_embed[word_indices]
prev_h = Hidden_States[t-1]
Hidden_States[t], cache = rnn_step_forward(x, prev_h, Wx, Wh, b)
Scores[t], cache = affine_forward(Hidden_States[t], W_vocab, b_vocab)
elif self.cell_type == 'lstm':
for t in range(1, max_length+1):
if t == 1:
x = Start_Vector_Batch
else:
word_indices = np.argmax(Scores[t-1], axis=1)
captions[range(N), t-1] = word_indices
x = W_embed[word_indices]
prev_h = Hidden_States[t-1]
prev_c = Cell_States[t-1]
Hidden_States[t], Cell_States[t], cache = lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)
Scores[t], cache = affine_forward(Hidden_States[t], W_vocab, b_vocab)
############################################################################
# END OF YOUR CODE #
############################################################################
return captions
|
[
"noreply@github.com"
] |
noreply@github.com
|
c4cf2cde8e5feb6718ee02fd9cd86914090cb1ec
|
55d9fd08d587c7a724a41c447bf7f57252788f5b
|
/lfp_causal/meso/stat_easy_vs_hard.py
|
3f46205f09f1dcec82319ae802294193264b9147
|
[] |
no_license
|
StanSStanman/lfp_causal
|
1f400b5d5da285eacc273e04ecd2fcc4ee274bc6
|
832c22dd16aab5650355b58b96dbd3743805f640
|
refs/heads/master
| 2022-05-10T03:03:28.256695
| 2022-04-21T14:38:01
| 2022-04-21T14:38:01
| 176,560,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,952
|
py
|
import os
import os.path as op
import xarray as xr
import numpy as np
import pandas as pd
from frites.dataset import DatasetEphy
from frites.workflow import WfMi
from itertools import product
from research.get_dirs import get_dirs
from lfp_causal.IO import read_session
from lfp_causal.compute_bad_epochs import get_ch_bad_epo, get_log_bad_epo
from lfp_causal.compute_power import normalize_power
# os.system("taskset -p 0xff %d" % os.getpid())
from lfp_causal.profiling import (RepeatedTimer, memory_usage, cpu_usage)
import time
import json
def prepare_data(powers, regressors, l_bad, e_bad, conditions, reg_name,
reg_val, times=None, freqs=None, avg_freq=False,
norm=None, bline=None, fbl=None):
if times is not None:
if isinstance(times, tuple):
tmin, tmax = times
elif isinstance(times, (list, np.ndarray)):
tmin, tmax = times[0], times[-1]
else:
raise ValueError('times must be NoneType '
'tuple of values (tmin, tmax),'
'list or numpy array')
if freqs is not None:
if isinstance(freqs, tuple):
fmin, fmax = freqs
else:
raise ValueError('freqs must be NoneType '
'or tuple of values (fmin, fmax)')
all_pow = []
for p in powers:
# for p, r, lb, eb in zip(powers, regresors, l_bad, e_bad):
print('Opening', p)
pow = xr.open_dataset(p)
if fbl is not None:
_fbl = p.split('/')[:-1] + [fbl]
_fbl = '/' + op.join(*_fbl)
if norm is not None:
pow = normalize_power(pow, norm, bline, _fbl)
if isinstance(times, (tuple, list, np.ndarray)):
pow = pow.loc[dict(times=slice(tmin, tmax))]
all_times = pow.times.values
if isinstance(freqs, tuple):
pow = pow.loc[dict(freqs=slice(fmin, fmax))]
all_freqs = pow.freqs.values
pow = pow.to_array().values.transpose(1, 0, 2, 3)
# pow = pow[nans, :, :, :]
if avg_freq:
pow = pow.mean(2)
all_pow.append(pow)
all_reg = {}
for rn in reg_name:
# for rv in reg_val:
_reg = []
for r, lb, eb, cn, _idx in zip(regressors, l_bad, e_bad,
conditions, range(len(regressors))):
xls = pd.read_excel(r, index_col=0)
reg = xls[rn].values
if len(lb) != 0:
reg = np.delete(reg, lb)
if len(eb) != 0:
reg = np.delete(reg, eb)
# I actually don't remember why they should be usefull, consider to eliminate them
# all_pow[_idx] = np.delete(all_pow[_idx], np.where(reg != reg_val),
# axis=0)
# reg = np.delete(reg, np.where(reg != reg_val))
if cn == 'easy':
reg = np.full_like(reg, 0)
elif cn == 'hard':
reg = np.full_like(reg, 1)
_reg.append(reg)
all_reg['{0}_{1}'.format(rn, reg_val)] = _reg
return all_pow, all_reg, all_times, all_freqs
def compute_stats_meso(fname_pow, fname_reg, rois, log_bads, bad_epo,
conditions, regressor, reg_vals, mi_type, inference,
times=None, freqs=None, avg_freq=True, norm=None):
power, regr, \
times, freqs = prepare_data(fname_pow, fname_reg, log_bads,
bad_epo, conditions=conditions,
reg_name=regressor, reg_val=reg_vals,
times=times, freqs=freqs,
avg_freq=avg_freq,
norm=norm, bline=(-.55, -0.05),
fbl='cue_on_pow_beta_mt.nc')
###########################################################################
for i, p in enumerate(power):
power[i] = p[:25, :, :]
for k in regr.keys():
for i, r in enumerate(regr[k]):
regr[k][i] = r[:25]
# for k in cond.keys():
# if cond[k] is not None:
# for i, c in enumerate(cond[k]):
# cond[k][i] = c[:25]
###########################################################################
mi_results = {}
pv_results = {}
conj_ss_results = {}
conj_results = {}
for _r, _mt, _inf in zip(regr, mi_type, inference):
if _mt == 'cc':
regr[_r] = [r.astype('float32') for r in regr[_r]]
elif _mt == 'cd':
regr[_r] = [r.astype('int32') for r in regr[_r]]
elif _mt == 'ccd':
regr[_r] = [r.astype('float32') for r in regr[_r]]
cond[_r] = [c.astype('int32') for c in cond[_r]]
ds_ephy = DatasetEphy(x=power.copy(), y=regr[_r], roi=rois,
z=None, times=times)
wf = WfMi(mi_type=_mt, inference=_inf, kernel=np.hanning(20))
mi, pval = wf.fit(ds_ephy, n_perm=1000, n_jobs=-1)
mi['times'] = times
pval['times'] = times
if _inf == 'rfx':
conj_ss, conj = wf.conjunction_analysis(ds_ephy)
if not avg_freq:
mi.assign_coords({'freqs': freqs})
pval.assign_coords({'freqs': freqs})
mi_results[_r] = mi
pv_results[_r] = pval
if _inf == 'rfx':
conj_ss_results[_r] = conj_ss
conj_results[_r] = conj
ds_mi = xr.Dataset(mi_results)
ds_pv = xr.Dataset(pv_results)
if len(conj_ss_results) == len(conj_results) == 0:
return ds_mi, ds_pv
else:
ds_conj_ss = xr.Dataset(conj_ss_results)
ds_conj = xr.Dataset(conj_results)
return ds_mi, ds_pv, ds_conj_ss, ds_conj
if __name__ == '__main__':
from lfp_causal import MCH, PRJ
dirs = get_dirs(MCH, PRJ)
monkeys = ['teddy']
conditions = ['easy', 'hard']
event = 'trig_off'
norm = 'fbline_relchange'
n_power = '{0}_pow_beta_mt.nc'.format(event)
times = [(-1.5, 1.3)]
# freqs = [(5, 120)]
# freqs = [(8, 15), (15, 30), (25, 45), (40, 70), (60, 120)]
freqs = [(8, 12), (15, 35), (40, 65), (70, 120)]
freqs = [(15, 35)]
avg_frq = True
overwrite = True
# regressors = ['Correct', 'Reward',
# 'is_R|C', 'is_nR|C', 'is_R|nC', 'is_nR|nC',
# 'RnR|C', 'RnR|nC',
# '#R', '#nR', '#R|C', '#nR|C', '#R|nC', '#nR|nC',
# 'learn_5t', 'learn_2t', 'early_late_cons',
# 'P(R|C)', 'P(R|nC)', 'P(R|Cho)', 'P(R|A)',
# 'dP', 'log_dP', 'delta_dP',
# 'surprise', 'surprise_bayes', 'rpe',
# 'q_pcorr', 'q_pincorr', 'q_dP',
# 'q_entropy', 'q_rpe', 'q_absrpe',
# 'q_shann_surp', 'q_bayes_surp']
# conditionals = [None, None,
# None, None, None, None,
# None, None,
# None, None, None, None, None, None,
# None, None, None,
# None, None, None, None,
# None, None, None,
# None, None, None,
# None, None, None,
# None, None, None,
# None, None]
# conditionals = ['Condition' for r in regressors]
# mi_type = ['cd', 'cd',
# 'cd', 'cd', 'cd', 'cd',
# 'cd', 'cd',
# 'cc', 'cc', 'cc', 'cc', 'cc', 'cc',
# 'cd', 'cd', 'cd',
# 'cc', 'cc', 'cc', 'cc',
# 'cc', 'cc', 'cc',
# 'cc', 'cc', 'cc',
# 'cc', 'cc', 'cc',
# 'cc', 'cc', 'cc',
# 'cc', 'cc']
# mi_type = ['ccd' for r in regressors]
regressors = ['Condition']
regressors = ['q_rpe']
reg_vals = 0
conditionals = [None]
mi_type = ['cc']
inference = ['ffx' for r in regressors]
fn_pow_list = []
fn_reg_list = []
rois = []
log_bads = []
bad_epo = []
conds = []
rej_files = []
rej_files += ['1204', '1217', '1231', '0944', # Bad sessions
'0845', '0847', '0939', '0946', '0963', '1036', '1231',
'1233', '1234', '1514', '1699',
'0940', '0944', '0964', '0967', '0969', '0970', '0971',
'0977', '0985', '1037', '1280']
rej_files += ['0210', '0219', '0221', '0225', '0226', '0227', '0230',
'0252', '0268', '0276', '0277', '0279', '0281', '0282',
'0283', '0285', '0288', '0290', '0323', '0362', '0365',
'0393', '0415', '0447', '0449', '0450', '0456', '0541',
'0573', '0622', '0628', '0631', '0643', '0648', '0653',
'0660', '0688', '0689', '0690', '0692', '0697', '0706',
'0710', '0717', '0718', '0719', '0713', '0726', '0732',
'0220', '0223', '0271', '0273', '0278', '0280', '0284',
'0289', '0296', '0303', '0363', '0416', '0438', '0448',
'0521', '0618', '0656', '0691', '0693', '0698', '0705',
'0707', '0711', '0712', '0716', '0720', '0731']
for monkey in monkeys:
for condition in conditions:
epo_dir = dirs['epo'].format(monkey, condition)
power_dir = dirs['pow'].format(monkey, condition)
regr_dir = dirs['reg'].format(monkey, condition)
fname_info = op.join(dirs['ep_cnds'].format(monkey, condition),
'files_info.xlsx')
for d in os.listdir(power_dir):
if d in rej_files:
continue
if op.isdir(op.join(power_dir, d)):
fname_power = op.join(power_dir, d, n_power)
fname_regr = op.join(regr_dir, '{0}.xlsx'.format(d))
fname_epo = op.join(epo_dir,
'{0}_{1}_epo.fif'.format(d, event))
fn_pow_list.append(fname_power)
fn_reg_list.append(fname_regr)
# rois.append(read_session(fname_info, d)['sector'].values)
rois.append(np.array(['striatum']))
lb = get_log_bad_epo(fname_epo)
log_bads.append(lb)
be = get_ch_bad_epo(monkey, condition, d,
fname_info=fname_info)
bad_epo.append(be)
conds.append(condition)
mi_results = {}
pv_results = {}
for t, f in product(times, freqs):
ds_mi, ds_pv = compute_stats_meso(fn_pow_list, fn_reg_list, rois,
log_bads, bad_epo, conds,
regressors, reg_vals,
mi_type, inference,
t, f, avg_frq, norm)
mk = 'teddy'
cd = 'easy_hard_rpe_one_roi'
if avg_frq:
save_dir = op.join(dirs['st_prj'], mk, cd, event, norm,
'{0}_{1}_mt'.format(f[0], f[1]))
elif not avg_frq:
save_dir = op.join(dirs['st_prj'], mk, cd, event, norm,
'{0}_{1}_tf_mt'.format(f[0], f[1]))
os.makedirs(save_dir, exist_ok=True)
fname_mi = op.join(save_dir, 'mi_results.nc'.format(f[0], f[1]))
fname_pv = op.join(save_dir, 'pv_results.nc'.format(f[0], f[1]))
if not overwrite and op.exists(fname_mi):
mi = xr.load_dataset(fname_mi)
pv = xr.load_dataset(fname_pv)
ds_mi['times'] = mi['times']
ds_pv['times'] = pv['times']
ds_mi = mi.update(ds_mi)
ds_pv = pv.update(ds_pv)
ds_mi.to_netcdf(fname_mi)
ds_pv.to_netcdf(fname_pv)
|
[
"ruggero.basanisi@gmail.com"
] |
ruggero.basanisi@gmail.com
|
30afeecf7a442f626392bcc9b54728254bb8a8be
|
60d5ea4f007d49768d250ef394003f554003e4d0
|
/python/Linked List/142.Linked List Cycle II.py
|
dec51f534aabccb931d8e8932d39d11aac643c6f
|
[] |
no_license
|
EvanJamesMG/Leetcode
|
dd7771beb119ea1250dbb3b147a09053298cd63b
|
fa638c7fda3802e9f4e0751a2c4c084edf09a441
|
refs/heads/master
| 2021-01-10T17:11:10.896393
| 2017-12-01T16:04:44
| 2017-12-01T16:04:44
| 46,968,756
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,911
|
py
|
# coding=utf-8
'''
Given a linked list, return the node where the cycle begins. If there is no cycle, return null.
Note: Do not modify the linked list.
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
'''
使用快慢指针。若链表存在环,两指针必在环中相遇,此时将慢指针移回头结点,
两指针以相同的速度移动,在环开始的节点处再次相遇。
图中(http://www.cnblogs.com/zuoyuan/p/3701877.html),head到环路起点的距离为K,起点到fast和slow的相遇点的距离为M,环路周长为L。假设,在fast和slow相遇时,fast走过了Lfast,slow走过了Lslow。根据题意:
Lslow=K+M;Lfast=K+M+n*L(n为正整数);Lfast=2*Lslow
可以推出:Lslow=n*L;K=n*L-M
则当slow重新回到head,而fast还在相遇点,slow和fast都向前走,且每次走一个节点。
则slow从head走到起点走了K,而fast从相遇点出发也走了K,而fast向前走了距离K后到了哪里呢?由于K=(n-1)*L+(L-M),所以fast转了n-1圈,再走L-M,也到了起点。这样起点就找到了。
'''
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head == None or head.next == None:
return None
slow = fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if fast == slow:
break
if slow == fast:
slow = head
while slow != fast:
slow = slow.next
fast = fast.next
return slow
return None
# if __name__ == "__main__":
#
# result = Solution().numTrees(3)
# print result
|
[
"Evan123mg@gmail.com"
] |
Evan123mg@gmail.com
|
a8f1f9f4f7be1763c648b87bb38cc25de8b350de
|
d9b698e156c15bdc3da2190d20529e0acdf24190
|
/entregable/script2.py
|
096bd5a6367f2b9e6bbe68b8be07dccec8ddcfce
|
[] |
no_license
|
turtlean/sistemas-tp1
|
dc2fda54adb331a46962a59adc5867cc071f6e70
|
e0769d4234fb412cd9ca8c860f5bdf969155cf28
|
refs/heads/master
| 2021-05-27T06:29:31.538873
| 2013-11-19T01:30:58
| 2013-11-19T01:30:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,046
|
py
|
#! usr/bin/python
from matplotlib.pyplot import *
import sys
import time
import os
import math
import matplotlib.pyplot as plt
#./simusched <lote.tsk> <num_cores> <costo_cs> <costo_mi> <sched> [<params_sched>]
os.system("rm -rf experimento")
os.system("mkdir experimento")
#for plotting
ywt=[] #waiting times
yta=[] #ta times
x=[] #quantums
desviosWt=[]
desviosTa=[]
#random tasks
wtProm=[]
taProm=[]
wtMin = 1000000
taMin = 1000000
fo = open("wtta.out", "a") #"wtta.out"
sched = str(sys.argv[1]) #scheduler
lote = str(sys.argv[2]) #lote de tareas
qm = int(sys.argv[3]) #quantumLimite
cores = str(sys.argv[4]) #coresLimite
for k in range(1,int(cores)+1):
for i in range (1,qm):
j = str(i)
print j
for s in range (0,40):
if (sched=="SchedLottery"):
os.system("./simusched "+lote+" "+str(k)+" 0 0 "+sched+" "+j+" "+j+" > test")
if (sched=="SchedRR" or sched=="SchedRR2"):
string = "./simusched "+lote+" "+str(k)+" 0 0 "+sched+" "
for h in range(0,k):
string = string+j+" "
string = string+"> test"
os.system(string)
#os.system("python graphsched.py test")
os.system("./wtta < test > tmp")
tmp1 = open("tmp",'r').read()
tmp = open("tmp",'r').readlines()
wt = tmp[0].split()
wt = float(wt[2])
ta = tmp[1].split()
ta = float(ta[2])
wtProm.append(wt)
taProm.append(ta)
wt = sum(wtProm) / float(len(wtProm))
ta = sum(taProm) / float(len(taProm))
desvioWt = [(z-wt)*(z-wt) for z in wtProm]
desvioWt = math.sqrt(sum(desvioWt) / (float(len(desvioWt))-1))
desviosWt.append(desvioWt)
desvioTa = [(b-ta)*(b-ta) for b in taProm]
desvioTa = math.sqrt(sum(desvioTa) / (float(len(desvioTa))-1))
desviosTa.append(desvioTa)
wtProm=[]
taProm=[]
ywt.append(wt)
yta.append(ta)
x.append(i)
if (taMin > ta):
taMin = ta
quantum1 = i
if (wtMin > wt):
wtMin = wt
quantum2 = i
fo.write("Quantum: "+j+"\n")
fo.write("Waiting time: "+str(wt)+"\n")
fo.write("Turnaround time: "+str(ta)+"\n")
#nombre_test = "test_"+j
#os.system("mv test.png experimento/"+nombre_test+".png")
#os.system("mv test experimento/"+nombre_test+".out")
#os.system("rm test")
fo.write("\n")
fo.write("Quantum con menor waiting time: "+str(quantum2)+"("+str(wtMin)+")\n")
fo.write("Quantum con menor turnaround time: "+str(quantum1)+"("+str(taMin)+")")
#os.system("mv wtta.out experimento/")
#Graficador
plt.figure(k)
plt.xlabel("Quantum")
plt.ylabel("Waiting time")
plt.errorbar(x, ywt, yerr=desviosWt, fmt='.', color='black')
plt.axis([0,qm,0,max(ywt)+2])
savefig("experimento/cores_"+str(k)+"_wt.jpg")
plt.figure(k+1)
plt.xlabel("Quantum")
plt.ylabel("Turnaround time")
plt.errorbar(x, yta, yerr=desviosTa, fmt='.', color='black')
plt.axis([0,qm,taMin-60,max(yta)+60])
savefig("experimento/cores_"+str(k)+"_ta.jpg")
wtMin = 1000000
taMin = 1000000
ywt=[] #waiting times
yta=[] #ta times
x=[] #quantums
desviosWt=[]
desviosTa=[]
os.system("rm tmp")
os.system("rm test")
os.system("mv wtta.out experimento/")
|
[
"martin@martin-pc.(none)"
] |
martin@martin-pc.(none)
|
ade97c5dd853443919aed5d2d9e2775c257ab39f
|
b7c788979e8f0f7cb9283874e46f9ec12d13819c
|
/app/Member.py
|
f035869d4271d59c55940c97a23e3e716c457110
|
[
"MIT"
] |
permissive
|
luwoldy/masonite-wcv
|
457ba95c7722f1434e35b31973f5b89102ce5434
|
c520f8b30c1775b3e337c3fa63ae8f08c1b0adf4
|
refs/heads/master
| 2023-01-09T05:34:49.048144
| 2019-12-20T02:55:54
| 2019-12-20T02:55:54
| 210,762,211
| 0
| 0
|
MIT
| 2023-01-04T11:19:06
| 2019-09-25T05:26:03
|
CSS
|
UTF-8
|
Python
| false
| false
| 110
|
py
|
"""Member Model."""
from config.database import Model
class Member(Model):
"""Member Model."""
pass
|
[
"leul.woldeab@gmail.com"
] |
leul.woldeab@gmail.com
|
3393ba44a415f8540c1fe8973955eef2de703ff1
|
c211c25b0dde4435f09357880ac0db97c05fd9a7
|
/Spider.py
|
9575f749de3597014c98b646d7e53ddc7af3777c
|
[] |
no_license
|
Monday666/maoyanSpider
|
2abfd912a56afeef9563b9f6f4e18b3377fc4c7f
|
68036975face7256d4b6286a325a28174943465c
|
refs/heads/master
| 2020-03-31T07:10:07.798737
| 2018-10-08T02:58:28
| 2018-10-08T02:58:28
| 152,010,437
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,377
|
py
|
import requests
from bs4 import BeautifulSoup
import re
import csv
def Gethtml(url):
"""
使用浏览器模拟的方式获取网页源码
:param url: 网页URL地址
:return:html
"""
# 将cookies字符串组装为字典
cookies_str = '__mta=208903469.1537876239606.1537966087518.1537966271718.11; uuid_n_v=v1; uuid=3851AE40C0B911E895764F985E386DE202DFFDFED118403EB9BA5E7A9C9D6698; _lxsdk_cuid=16610912361c8-07a50566ed1d0e-8383268-1fa400-16610912362c8; _lxsdk=3851AE40C0B911E895764F985E386DE202DFFDFED118403EB9BA5E7A9C9D6698; _csrf=33645a5e9922420ef22609cd9965dd58afac2d82a9caca9afd817c97d4a41563; _lx_utm=utm_source%3Dmeituanweb; __mta=208903469.1537876239606.1537964122287.1537964124676.6; _lxsdk_s=16615ccbec7-4dc-ef6-e2a%7C%7C20'
cookies_dict = {}
for cookie in cookies_str.split(";"):
k, v = cookie.split("=", 1)
cookies_dict[k.strip()] = v.strip()
# 其他请求头参数
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
}
# 访问页面
page = requests.get(url=url, cookies=cookies_dict, headers=headers)
# 返回页面HTML
return page.text
def FindMovie(sp):
"""
截取各部电影的所在标签返回列表,每个值是一部电影所在整个标签里的内容
"""
movie = sp.find_all('div', attrs={'class': re.compile(r"show-list(\s\w+)?")})
return movie
def FindPage(sp):
"""
寻找并截取每个日期的界面
:param sp:
:return:
"""
page = sp.find_all('div', attrs={'class': re.compile(r"plist-container(\s\w+)?")})
return page
def FindName(sp):
"""
找到电影名
:param sp:soup
:return:str name
"""
name = sp.find('h3', class_='movie-name')
return name.text
def FindDate(sp):
"""
找到日期
:param sp:
:return:
"""
lsdate = sp.find_all('span', attrs={'class': re.compile(r"date-item(\s\w+)?")})
data = []
for l in lsdate:
data.append(l.text)
return data
def FindTime(sp):
"""
找到时间
:param sp:
:return:
"""
time = []
page = FindPage(sp)
for i in range(len(page)):
lstime = page[i].find_all('span', attrs={'class': 'begin-time'})
timei = []
if lstime == []:
timei.append("无场次")
else:
for l in lstime:
timei.append(l.text)
time.append(timei)
return time
def FindPrice(sp):
"""
找到价格
:param sp:
:return:
"""
lsprice = sp.find('span', class_='value text-ellipsis', text=re.compile(r"(.\d+.\d.)张"))
return lsprice.text
def FindPeople(sp):
"""
找到人数,返回已售票,和未售票
:param sp:
:return:
"""
Npeople = sp.find_all('span', class_='seat selectable')
Hpeople = sp.find_all('span', class_='seat sold')
return Npeople, Hpeople
def ReturnPrice(sp):
"""
到价格界面找到价格并返回价格值
:param sp:
:return:
"""
page = FindPage(sp)
server = "http://maoyan.com"
price = []
for i in range(len(page)):
pricei = []
Url = []
a = page[i].find_all('a', attrs={'class': 'buy-btn normal'})
if a == []:
pricei.append('无')
else:
for each in a:
Url.append(server + each.get('href'))
for j in Url:
pricei.append(FindPrice(BeautifulSoup(Gethtml(j), 'html.parser')))
price.append(pricei)
return price
def ReturnPN(sp):
"""
到人数界面找到人数并返回已售票之和未售票值
:param sp:
:return:
"""
peopleN = []
page = FindPage(sp)
server = "http://maoyan.com"
for i in range(len(page)):
Url = []
peopleNi = []
a = page[i].find_all('a', attrs={'class': 'buy-btn normal'})
if a == []:
peopleNi.append('无')
else:
for each in a:
Url.append(server + each.get('href'))
for j in Url:
people = FindPeople(BeautifulSoup(Gethtml(j), 'html.parser'))
Npeople, Hpeople = people
peopleNi.append("已售出:" + str(len(Hpeople)) + "剩余票数:" + str(len(Npeople)))
peopleN.append(peopleNi)
return peopleN
# 获取网页源码
URL = "http://maoyan.com/cinema/2714?poi=2367020"
sp1 = BeautifulSoup(Gethtml(URL), 'html.parser')
movie = FindMovie(sp1)
name = []
data = []
time = []
price = []
peopleN = []
# 获取数据
for i in range(len(movie)):
name.append(FindName(movie[i]))
data.append(FindDate(movie[i]))
time.append(FindTime(movie[i]))
price.append(ReturnPrice(movie[i]))
peopleN.append(ReturnPN(movie[i]))
# 整合数据
info = []
for i in range(len(movie)):
for j in range(len(data[i])):
for k in range(len(time[i][j])):
infok = [name[i], data[i][j], time[i][j][k], price[i][j][k], peopleN[i][j][k]]
info.append(infok)
#存储数据
with open('myinfo.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(['电影名', '日期', '时间', '票价', '余票'])
for i in range(len(info)):
csvwriter.writerow(info[i])
|
[
"[18361233771@163.com]"
] |
[18361233771@163.com]
|
9ec2452996d72632a51586c21518412a127f1081
|
95a9ee042c6d0aa9cc5ee44c6b9317a10e7b02ee
|
/day07/python/day07.py
|
4b491ffdeb3195bf4f11b03bf979dea285e470e7
|
[] |
no_license
|
ochronus/advent-of-code-2019
|
4e273a236a801cffc347c419d408ebd717366f7f
|
ba47c63913c6b85953d04eaad15f8fac56ed1c5b
|
refs/heads/master
| 2020-09-21T13:32:51.679255
| 2019-12-13T06:05:08
| 2019-12-13T06:05:08
| 224,802,962
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
from itertools import permutations
from queue import SimpleQueue
from computer import Computer, HaltException
def part1(program):
phase_settings = permutations(range(5))
highest_output_signal = 0
for phase_setting in phase_settings:
amplifiers = []
for i in range(5):
amplifiers.append(Computer(program))
amplifiers[i].give_signal(phase_setting[i])
signal = 0
for i in range(5):
amplifiers[i].give_signal(signal)
signal = amplifiers[i].run()
highest_output_signal = max(highest_output_signal, signal)
return(highest_output_signal)
def part2(program):
phase_settings = permutations(range(5, 10))
highest_output_signal = 0
for phase_setting in phase_settings:
amplifiers = []
for i in range(5):
amplifiers.append(Computer(program))
amplifiers[i].give_signal(phase_setting[i])
signal = 0
should_halt = False
while not should_halt:
for i in range(5):
amplifiers[i].give_signal(signal)
try:
signal = amplifiers[i].run()
except HaltException:
should_halt = True
highest_output_signal = max(highest_output_signal, signal)
return(highest_output_signal)
with open("../input.txt") as f:
acs = list([int(i) for i in f.read().split(",")])
print(part1(acs))
print(part2(acs))
|
[
"ochronus@ochronus.com"
] |
ochronus@ochronus.com
|
8dbfd84f504b14bdf50f58fb927e9dff65dae76d
|
4714f19916c27a49d3a29f9bd96bdf92ca5affea
|
/extensions/sympyex/__init__.py
|
223f96e875e112503e4003a6481b283447616b09
|
[] |
no_license
|
Sroka/jupyter_maths
|
803c6834eff5186f8262cbc5246c9aca80dbec41
|
81c385cceae192c23f3c33ccb203708b7fe349d6
|
refs/heads/master
| 2020-03-22T08:11:20.482729
| 2019-03-03T10:19:31
| 2019-03-03T10:19:31
| 139,750,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
from sympy import Expr, Equality, latex
from IPython.display import display, Latex
def expressionEqualityExtension(self: Expr, function):
return Equality(self, function, evaluate=False)
def latexify(self: Expr, mode='plain'):
return latex(self, mode=mode)
def latexify_inline(self: Expr):
return latex(self, mode='inline')
def latexify_equation(self: Expr):
return latex(self, mode='equation')
def display_latex(latex: str):
return display(Latex(latex))
Expr.eq = expressionEqualityExtension
Expr.latex = latexify
Expr.latex_inline = latexify_inline
Expr.latex_equation = latexify_equation
del expressionEqualityExtension
del latexify
del latexify_inline
del latexify_equation
|
[
"srokowski.maciej@gmail.com"
] |
srokowski.maciej@gmail.com
|
1c2f7f5a73f04021dcfaf564920e8a0ffd7f0374
|
93c43eb538a14261f1af29e91d1770a99184eb82
|
/adding milk routes v2.py
|
8b5e441034fca3677075ea48e0b054e984fc133c
|
[] |
no_license
|
VS-DavidSouth/FMD_Truck_python
|
e61bc0c2b278ee52a8848b5eed457ce7c1c775b0
|
6d241f454379936b7192d2422db19bc507d09e1c
|
refs/heads/master
| 2020-03-27T05:41:15.030656
| 2018-08-24T21:03:56
| 2018-08-24T21:03:56
| 146,039,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,383
|
py
|
#adding milk routes v2.py
#Created by David South -- Last updated: 6/1/17
#Adapted from "adding routes v3" for the Feed Truck Model
#Purpose: to create a Feature Class for each region with a specific number
#of blank rows with specific fields that will be used to make routes in the VRP
#(Vehicle Routing Problem) in the Network Analyst addon in ArcMap.
#Note: this can be run in the Python IDE or in the arcpy window.
######################SETUP###################
import arcpy, os
from arcpy import env
#set workplace environment
ws = r'F:\FMD_Project\Davids_work\Milk_Truck_Model\Python'
env.workspace = ws
env.overwriteOutput = True
######################PARAMETERS##############
#max capacity, in gallons per delivery truck
capacities = 6500
#location of routes folder
routes_folder = r'F:\FMD_Project\Davids_work\Milk_Truck_Model\Routes'
#location of the creameries folder
creameries_folder = r'F:\FMD_Project\Davids_work\Milk_Truck_Model\Creameries'
#location of the creameries file
creameries = r'F:\FMD_Project\Davids_work\Milk_Truck_Model\Creameries\creameries_v4_3.shp'
#define fields to search in the creameries file
fields1 = ['FID', 'Trucks_per']
#define fields for the routes file (EStartTime and LStartTime fields are for
#determining which day the route will run, Earliest and Latest start time)
fields2 = ['Name', 'StartDepot', 'EndDepot','EStartTime', 'LStartTime', \
'Capacities']
#these fields were not used but can be added in:
#'MaxOrder'
#name of the new file
newFile = 'MTM_routes.shp' #MTM means Milk Truck Model
newFileFull = os.path.join(routes_folder, newFile)
#define days of the week that the routes can run on. Note there is one extra
#day, this is to incorporate a 24 hour period for each day.
date = ['1994-08-22', '1994-08-23', '1994-08-24']
#define days of the week
DotW = ['Mon', 'Tues']
####################ACTUAL CODE#################
#count how many Depots there are in the region
creameries_C = arcpy.GetCount_management(in_rows= creameries)
creameries_Count = int(creameries_C.getOutput(0))
#define blank lists
trucks_per_creamery = []
#make a search cursor to save the info from the creameries and save it to a list
with arcpy.da.SearchCursor(creameries, fields1) as cursor1:
for row_1 in cursor1:
trucks_per_creamery += [row_1[1]]
#create a new completely blank shapefile for the routes
arcpy.CreateFeatureclass_management(out_path= \
routes_folder, out_name= newFile, geometry_type="POLYLINE", template= "", \
has_m="DISABLED", has_z="DISABLED")
#add new fields
for num1 in range(0, len(fields2)):
arcpy.AddField_management(newFileFull, fields2[num1], "TEXT", "", "", 15)
#define a cursor
cursor2 = arcpy.da.InsertCursor(newFileFull, fields2)
##add in a buncha blank rows for the routes##
#make two copies of the routes, one for each 24 hour period
for s in range (0, len(date)-1):
#do this for each for each creamery
for q in range (0, creameries_Count):
#do this for for each route
for p in range (0, trucks_per_creamery[q]):
#fill the fields from fields2 with the following info
cursor2.insertRow(['Route_' + str(q)+ '_' + str(p) + DotW[s], \
str(q), str(q), date[s], date[s+1], str(capacities)])
#get outta here cursor. You ain't gotta go home, but you can't stay here.
del cursor2
print "Completed."
|
[
"12001003523326@FEDIDCARD.GOV"
] |
12001003523326@FEDIDCARD.GOV
|
29459d2f2495bd6eabb00953ccd6e2064a3749f5
|
d82a8844c7d46c752e567cca41a8ae1c15c975f7
|
/API/urls.py
|
aaae4d1d1c0b11959a544fed6876085e896c1700
|
[] |
no_license
|
golammahmud/job_evaluations_project
|
f1be9f8f8b27c0f9db6539294ccff25254ff08f3
|
fe362f2d6bc57e1d550c39263312ef046eb7754c
|
refs/heads/master
| 2023-08-04T10:20:59.442703
| 2021-09-27T02:31:03
| 2021-09-27T02:31:03
| 410,347,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
from django.contrib import admin
from django.urls import path,include
from rest_framework import routers
from .views import UserInputView,UserBasedInputView
from rest_framework_simplejwt.views import TokenObtainPairView,TokenRefreshView
router=routers.DefaultRouter()
router.register('all-userinputs',UserInputView)
router.register('user-based-inputs',UserBasedInputView)
urlpatterns = [
path('', include(router.urls)),
path('api-auth/',include('rest_framework.urls')),
path('get_token/', TokenObtainPairView.as_view(), name='token_obtain_pair'), #get token
path('token_refresh/', TokenRefreshView.as_view(), name='token_refresh'),# get refresh token
]
|
[
"golam.mahmud99@gmail.com"
] |
golam.mahmud99@gmail.com
|
4cb017fbd99c77bd3a84cd8f89a44b62c7d24911
|
953eb11091193b2668cb71dd225ac80d86da0bc2
|
/src/meron_background.py
|
2d37a0076c413ed48a4322d583b0e2c2c6ad3a3f
|
[] |
no_license
|
haugstve/use-watson-ml-python
|
e9f750a1daa85ba9f83b83d9690d95d1beeb3c9c
|
6fcb2a2059c050de41286bdec90619f61e0fffa6
|
refs/heads/master
| 2020-08-10T14:21:14.586297
| 2019-10-11T11:34:25
| 2019-10-11T11:34:25
| 214,359,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
import schedule
import time
import random
import json
from think_meron import aim_token, evaluate_meron_model
from path import processed_data_path, root
def job():
data_path = processed_data_path / 'payloads_meron.json'
with open(data_path, 'r') as f:
payloads = json.load(f)
token = aim_token()
selection = random.randint(0, len(payloads))
response = evaluate_meron_model(token, payloads[selection])
log_path = root / 'response.log'
with open(log_path, 'a+') as f:
f.write(f'status: {response.status_code}'
f', content: {response.content}\n')
schedule.every().minute.at(":00").do(job)
while True:
schedule.run_pending()
time.sleep(1)
|
[
"daniel.haugstvedt@gmail.com"
] |
daniel.haugstvedt@gmail.com
|
4e6b2f5305a95ee8bd724518db163a25821cd145
|
6e637218f6bb9c9e9ede685665c47c655ee73c35
|
/addon/addon_dependencies/mod/46461-46487M/space_userpref.py
|
269ef41ed1fbed76e8c66c1294cc764d6828a137
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
littleneo/Blender-addon-dependencies
|
e5ffeff27e8f26b7f184c59fa70690f60f952167
|
cafd484ec42c3c5b603de7e04442a201f48375ea
|
refs/heads/master
| 2020-05-27T17:41:38.494409
| 2012-08-20T16:05:05
| 2012-08-20T16:05:05
| 2,091,693
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40,958
|
py
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
lnmod = ('46461-46487M',(0, 72))
import bpy
from bpy.types import Header, Menu, Panel
import os
import addon_utils
def ui_items_general(col, context):
""" General UI Theme Settings (User Interface)
"""
row = col.row()
subsplit = row.split(percentage=0.95)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(context, "outline")
colsub.row().prop(context, "item", slider=True)
colsub.row().prop(context, "inner", slider=True)
colsub.row().prop(context, "inner_sel", slider=True)
subsplit = row.split(percentage=0.85)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(context, "text")
colsub.row().prop(context, "text_sel")
colsub.prop(context, "show_shaded")
subsub = colsub.column(align=True)
subsub.active = context.show_shaded
subsub.prop(context, "shadetop")
subsub.prop(context, "shadedown")
col.separator()
def opengl_lamp_buttons(column, lamp):
split = column.split(percentage=0.1)
split.prop(lamp, "use", text="", icon='OUTLINER_OB_LAMP' if lamp.use else 'LAMP_DATA')
col = split.column()
col.active = lamp.use
row = col.row()
row.label(text="Diffuse:")
row.prop(lamp, "diffuse_color", text="")
row = col.row()
row.label(text="Specular:")
row.prop(lamp, "specular_color", text="")
col = split.column()
col.active = lamp.use
col.prop(lamp, "direction", text="")
class USERPREF_HT_header(Header):
bl_space_type = 'USER_PREFERENCES'
def draw(self, context):
layout = self.layout
layout.template_header(menus=False)
userpref = context.user_preferences
layout.operator_context = 'EXEC_AREA'
layout.operator("wm.save_homefile", text="Save As Default")
layout.operator_context = 'INVOKE_DEFAULT'
if userpref.active_section == 'INPUT':
layout.operator("wm.keyconfig_import")
layout.operator("wm.keyconfig_export")
elif userpref.active_section == 'ADDONS':
layout.operator("wm.addon_install")
layout.menu("USERPREF_MT_addons_dev_guides")
elif userpref.active_section == 'THEMES':
layout.operator("ui.reset_default_theme")
class USERPREF_PT_tabs(Panel):
bl_label = ""
bl_space_type = 'USER_PREFERENCES'
bl_region_type = 'WINDOW'
bl_options = {'HIDE_HEADER'}
def draw(self, context):
layout = self.layout
userpref = context.user_preferences
layout.prop(userpref, "active_section", expand=True)
class USERPREF_MT_interaction_presets(Menu):
bl_label = "Presets"
preset_subdir = "interaction"
preset_operator = "script.execute_preset"
draw = Menu.draw_preset
class USERPREF_MT_appconfigs(Menu):
bl_label = "AppPresets"
preset_subdir = "keyconfig"
preset_operator = "wm.appconfig_activate"
def draw(self, context):
self.layout.operator("wm.appconfig_default", text="Blender (default)")
# now draw the presets
Menu.draw_preset(self, context)
class USERPREF_MT_splash(Menu):
bl_label = "Splash"
def draw(self, context):
layout = self.layout
split = layout.split()
row = split.row()
row.label("")
row = split.row()
row.label("Interaction:")
# XXX, no redraws
# text = bpy.path.display_name(context.window_manager.keyconfigs.active.name)
# if not text:
# text = "Blender (default)"
row.menu("USERPREF_MT_appconfigs", text="Preset")
class USERPREF_PT_interface(Panel):
bl_space_type = 'USER_PREFERENCES'
bl_label = "Interface"
bl_region_type = 'WINDOW'
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
userpref = context.user_preferences
return (userpref.active_section == 'INTERFACE')
def draw(self, context):
layout = self.layout
userpref = context.user_preferences
view = userpref.view
row = layout.row()
col = row.column()
col.label(text="Display:")
col.prop(view, "show_tooltips")
col.prop(view, "show_tooltips_python")
col.prop(view, "show_object_info", text="Object Info")
col.prop(view, "show_large_cursors")
col.prop(view, "show_view_name", text="View Name")
col.prop(view, "show_playback_fps", text="Playback FPS")
col.prop(view, "use_global_scene")
col.prop(view, "object_origin_size")
col.separator()
col.separator()
col.separator()
col.prop(view, "show_mini_axis", text="Display Mini Axis")
sub = col.column()
sub.active = view.show_mini_axis
sub.prop(view, "mini_axis_size", text="Size")
sub.prop(view, "mini_axis_brightness", text="Brightness")
col.separator()
row.separator()
row.separator()
col = row.column()
col.label(text="View Manipulation:")
col.prop(view, "use_mouse_auto_depth")
col.prop(view, "use_zoom_to_mouse")
col.prop(view, "use_rotate_around_active")
col.prop(view, "use_global_pivot")
col.prop(view, "use_camera_lock_parent")
col.separator()
col.prop(view, "use_auto_perspective")
col.prop(view, "smooth_view")
col.prop(view, "rotation_angle")
col.separator()
col.separator()
col.label(text="2D Viewports:")
col.prop(view, "view2d_grid_spacing_min", text="Minimum Grid Spacing")
col.prop(view, "timecode_style")
row.separator()
row.separator()
col = row.column()
#Toolbox doesn't exist yet
#col.label(text="Toolbox:")
#col.prop(view, "show_column_layout")
#col.label(text="Open Toolbox Delay:")
#col.prop(view, "open_left_mouse_delay", text="Hold LMB")
#col.prop(view, "open_right_mouse_delay", text="Hold RMB")
col.prop(view, "show_manipulator")
sub = col.column()
sub.active = view.show_manipulator
sub.prop(view, "manipulator_size", text="Size")
sub.prop(view, "manipulator_handle_size", text="Handle Size")
sub.prop(view, "manipulator_hotspot", text="Hotspot")
col.separator()
col.separator()
col.separator()
col.label(text="Menus:")
col.prop(view, "use_mouse_over_open")
col.label(text="Menu Open Delay:")
col.prop(view, "open_toplevel_delay", text="Top Level")
col.prop(view, "open_sublevel_delay", text="Sub Level")
col.separator()
col.prop(view, "show_splash")
if os.name == 'nt':
col.prop(view, "quit_dialog")
class USERPREF_PT_edit(Panel):
bl_space_type = 'USER_PREFERENCES'
bl_label = "Edit"
bl_region_type = 'WINDOW'
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
userpref = context.user_preferences
return (userpref.active_section == 'EDITING')
def draw(self, context):
layout = self.layout
userpref = context.user_preferences
edit = userpref.edit
row = layout.row()
col = row.column()
col.label(text="Link Materials To:")
col.prop(edit, "material_link", text="")
col.separator()
col.separator()
col.separator()
col.label(text="New Objects:")
col.prop(edit, "use_enter_edit_mode")
col.label(text="Align To:")
col.prop(edit, "object_align", text="")
col.separator()
col.separator()
col.separator()
col.label(text="Undo:")
col.prop(edit, "use_global_undo")
col.prop(edit, "undo_steps", text="Steps")
col.prop(edit, "undo_memory_limit", text="Memory Limit")
row.separator()
row.separator()
col = row.column()
col.label(text="Grease Pencil:")
col.prop(edit, "grease_pencil_manhattan_distance", text="Manhattan Distance")
col.prop(edit, "grease_pencil_euclidean_distance", text="Euclidean Distance")
#~ col.prop(edit, "use_grease_pencil_simplify_stroke", text="Simplify Stroke")
col.prop(edit, "grease_pencil_eraser_radius", text="Eraser Radius")
col.prop(edit, "use_grease_pencil_smooth_stroke", text="Smooth Stroke")
col.separator()
col.separator()
col.separator()
col.label(text="Playback:")
col.prop(edit, "use_negative_frames")
col.separator()
col.separator()
col.separator()
col.label(text="Animation Editors:")
col.prop(edit, "fcurve_unselected_alpha", text="F-Curve Visibility")
row.separator()
row.separator()
col = row.column()
col.label(text="Keyframing:")
col.prop(edit, "use_visual_keying")
col.prop(edit, "use_keyframe_insert_needed", text="Only Insert Needed")
col.separator()
col.prop(edit, "use_auto_keying", text="Auto Keyframing:")
sub = col.column()
#~ sub.active = edit.use_keyframe_insert_auto # incorrect, time-line can enable
sub.prop(edit, "use_keyframe_insert_available", text="Only Insert Available")
col.separator()
col.label(text="New F-Curve Defaults:")
col.prop(edit, "keyframe_new_interpolation_type", text="Interpolation")
col.prop(edit, "keyframe_new_handle_type", text="Handles")
col.prop(edit, "use_insertkey_xyz_to_rgb", text="XYZ to RGB")
col.separator()
col.separator()
col.separator()
col.label(text="Transform:")
col.prop(edit, "use_drag_immediately")
row.separator()
row.separator()
col = row.column()
col.prop(edit, "sculpt_paint_overlay_color", text="Sculpt Overlay Color")
col.separator()
col.separator()
col.separator()
col.label(text="Duplicate Data:")
col.prop(edit, "use_duplicate_mesh", text="Mesh")
col.prop(edit, "use_duplicate_surface", text="Surface")
col.prop(edit, "use_duplicate_curve", text="Curve")
col.prop(edit, "use_duplicate_text", text="Text")
col.prop(edit, "use_duplicate_metaball", text="Metaball")
col.prop(edit, "use_duplicate_armature", text="Armature")
col.prop(edit, "use_duplicate_lamp", text="Lamp")
col.prop(edit, "use_duplicate_material", text="Material")
col.prop(edit, "use_duplicate_texture", text="Texture")
#col.prop(edit, "use_duplicate_fcurve", text="F-Curve")
col.prop(edit, "use_duplicate_action", text="Action")
col.prop(edit, "use_duplicate_particle", text="Particle")
class USERPREF_PT_system(Panel):
bl_space_type = 'USER_PREFERENCES'
bl_label = "System"
bl_region_type = 'WINDOW'
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
userpref = context.user_preferences
return (userpref.active_section == 'SYSTEM')
def draw(self, context):
layout = self.layout
userpref = context.user_preferences
system = userpref.system
split = layout.split()
# 1. Column
column = split.column()
colsplit = column.split(percentage=0.85)
col = colsplit.column()
col.label(text="General:")
col.prop(system, "dpi")
col.prop(system, "frame_server_port")
col.prop(system, "scrollback", text="Console Scrollback")
col.separator()
col.separator()
col.label(text="Sound:")
col.row().prop(system, "audio_device", expand=True)
sub = col.column()
sub.active = system.audio_device != 'NONE'
#sub.prop(system, "use_preview_images")
sub.prop(system, "audio_channels", text="Channels")
sub.prop(system, "audio_mixing_buffer", text="Mixing Buffer")
sub.prop(system, "audio_sample_rate", text="Sample Rate")
sub.prop(system, "audio_sample_format", text="Sample Format")
col.separator()
col.separator()
col.label(text="Screencast:")
col.prop(system, "screencast_fps")
col.prop(system, "screencast_wait_time")
col.separator()
col.separator()
if hasattr(system, 'compute_device'):
col.label(text="Compute Device:")
col.row().prop(system, "compute_device_type", expand=True)
sub = col.row()
sub.active = system.compute_device_type != 'CPU'
sub.prop(system, "compute_device", text="")
# 2. Column
column = split.column()
colsplit = column.split(percentage=0.85)
col = colsplit.column()
col.label(text="OpenGL:")
col.prop(system, "gl_clip_alpha", slider=True)
col.prop(system, "use_mipmaps")
col.prop(system, "use_16bit_textures")
col.label(text="Anisotropic Filtering")
col.prop(system, "anisotropic_filter", text="")
col.prop(system, "use_vertex_buffer_objects")
# Anti-aliasing is disabled as it breaks border/lasso select
#~ col.prop(system, "use_antialiasing")
col.label(text="Window Draw Method:")
col.prop(system, "window_draw_method", text="")
col.label(text="Text Draw Options:")
col.prop(system, "use_text_antialiasing")
col.label(text="Textures:")
col.prop(system, "gl_texture_limit", text="Limit Size")
col.prop(system, "texture_time_out", text="Time Out")
col.prop(system, "texture_collection_rate", text="Collection Rate")
col.separator()
col.separator()
col.separator()
col.label(text="Sequencer:")
col.prop(system, "prefetch_frames")
col.prop(system, "memory_cache_limit")
# 3. Column
column = split.column()
column.label(text="Solid OpenGL lights:")
split = column.split(percentage=0.1)
split.label()
split.label(text="Colors:")
split.label(text="Direction:")
lamp = system.solid_lights[0]
opengl_lamp_buttons(column, lamp)
lamp = system.solid_lights[1]
opengl_lamp_buttons(column, lamp)
lamp = system.solid_lights[2]
opengl_lamp_buttons(column, lamp)
column.separator()
column.label(text="Color Picker Type:")
column.row().prop(system, "color_picker_type", text="")
column.separator()
column.prop(system, "use_weight_color_range", text="Custom Weight Paint Range")
sub = column.column()
sub.active = system.use_weight_color_range
sub.template_color_ramp(system, "weight_color_range", expand=True)
column.separator()
column.prop(system, "use_international_fonts")
if system.use_international_fonts:
column.prop(system, "language")
row = column.row()
row.label(text="Translate:")
row.prop(system, "use_translate_interface", text="Interface")
row.prop(system, "use_translate_tooltips", text="Tooltips")
class USERPREF_MT_interface_theme_presets(Menu):
bl_label = "Presets"
preset_subdir = "interface_theme"
preset_operator = "script.execute_preset"
preset_type = 'XML'
preset_xml_map = (("user_preferences.themes[0]", "Theme"), )
draw = Menu.draw_preset
class USERPREF_PT_theme(Panel):
bl_space_type = 'USER_PREFERENCES'
bl_label = "Themes"
bl_region_type = 'WINDOW'
bl_options = {'HIDE_HEADER'}
@staticmethod
def _theme_generic(split, themedata):
col = split.column()
def theme_generic_recurse(data):
col.label(data.rna_type.name)
row = col.row()
subsplit = row.split(percentage=0.95)
padding1 = subsplit.split(percentage=0.15)
padding1.column()
subsplit = row.split(percentage=0.85)
padding2 = subsplit.split(percentage=0.15)
padding2.column()
colsub_pair = padding1.column(), padding2.column()
props_type = {}
for i, prop in enumerate(data.rna_type.properties):
if prop.identifier == "rna_type":
continue
props_type.setdefault((prop.type, prop.subtype), []).append(prop)
for props_type, props_ls in sorted(props_type.items()):
if props_type[0] == 'POINTER':
for i, prop in enumerate(props_ls):
theme_generic_recurse(getattr(data, prop.identifier))
else:
for i, prop in enumerate(props_ls):
colsub_pair[i % 2].row().prop(data, prop.identifier)
theme_generic_recurse(themedata)
@classmethod
def poll(cls, context):
userpref = context.user_preferences
return (userpref.active_section == 'THEMES')
def draw(self, context):
layout = self.layout
theme = context.user_preferences.themes[0]
split_themes = layout.split(percentage=0.2)
sub = split_themes.column()
sub.label(text="Presets:")
subrow = sub.row(align=True)
subrow.menu("USERPREF_MT_interface_theme_presets", text=USERPREF_MT_interface_theme_presets.bl_label)
subrow.operator("wm.interface_theme_preset_add", text="", icon='ZOOMIN')
subrow.operator("wm.interface_theme_preset_add", text="", icon='ZOOMOUT').remove_active = True
sub.separator()
sub.prop(theme, "theme_area", expand=True)
split = layout.split(percentage=0.4)
layout.separator()
layout.separator()
split = split_themes.split()
if theme.theme_area == 'USER_INTERFACE':
col = split.column()
ui = theme.user_interface.wcol_regular
col.label(text="Regular:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_tool
col.label(text="Tool:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_radio
col.label(text="Radio Buttons:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_text
col.label(text="Text:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_option
col.label(text="Option:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_toggle
col.label(text="Toggle:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_num
col.label(text="Number Field:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_numslider
col.label(text="Value Slider:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_box
col.label(text="Box:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_menu
col.label(text="Menu:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_pulldown
col.label(text="Pulldown:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_menu_back
col.label(text="Menu Back:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_tooltip
col.label(text="Tooltip:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_menu_item
col.label(text="Menu Item:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_scroll
col.label(text="Scroll Bar:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_progress
col.label(text="Progress Bar:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_list_item
col.label(text="List Item:")
ui_items_general(col, ui)
ui = theme.user_interface.wcol_state
col.label(text="State:")
row = col.row()
subsplit = row.split(percentage=0.95)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(ui, "inner_anim")
colsub.row().prop(ui, "inner_anim_sel")
colsub.row().prop(ui, "inner_driven")
colsub.row().prop(ui, "inner_driven_sel")
subsplit = row.split(percentage=0.85)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(ui, "inner_key")
colsub.row().prop(ui, "inner_key_sel")
colsub.row().prop(ui, "blend")
col.separator()
col.separator()
ui = theme.user_interface
col.label("Icons:")
row = col.row()
subsplit = row.split(percentage=0.95)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(ui, "icon_file")
subsplit = row.split(percentage=0.85)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(ui, "icon_alpha")
col.separator()
col.separator()
ui = theme.user_interface.panel
col.label("Panels:")
row = col.row()
subsplit = row.split(percentage=0.95)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
rowsub = colsub.row()
rowsub.prop(ui, "show_header")
rowsub.label()
subsplit = row.split(percentage=0.85)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(ui, "header")
layout.separator()
layout.separator()
elif theme.theme_area == 'BONE_COLOR_SETS':
col = split.column()
for i, ui in enumerate(theme.bone_color_sets):
col.label(text="Color Set" + " %d:" % (i + 1)) # i starts from 0
row = col.row()
subsplit = row.split(percentage=0.95)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(ui, "normal")
colsub.row().prop(ui, "select")
colsub.row().prop(ui, "active")
subsplit = row.split(percentage=0.85)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
colsub.row().prop(ui, "show_colored_constraints")
else:
self._theme_generic(split, getattr(theme, theme.theme_area.lower()))
class USERPREF_PT_file(Panel):
bl_space_type = 'USER_PREFERENCES'
bl_label = "Files"
bl_region_type = 'WINDOW'
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
userpref = context.user_preferences
return (userpref.active_section == 'FILES')
def draw(self, context):
layout = self.layout
userpref = context.user_preferences
paths = userpref.filepaths
system = userpref.system
split = layout.split(percentage=0.7)
col = split.column()
col.label(text="File Paths:")
colsplit = col.split(percentage=0.95)
col1 = colsplit.split(percentage=0.3)
sub = col1.column()
sub.label(text="Fonts:")
sub.label(text="Textures:")
sub.label(text="Texture Plugins:")
sub.label(text="Sequence Plugins:")
sub.label(text="Render Output:")
sub.label(text="Scripts:")
sub.label(text="Sounds:")
sub.label(text="Temp:")
sub.label(text="Image Editor:")
sub.label(text="Animation Player:")
sub = col1.column()
sub.prop(paths, "font_directory", text="")
sub.prop(paths, "texture_directory", text="")
sub.prop(paths, "texture_plugin_directory", text="")
sub.prop(paths, "sequence_plugin_directory", text="")
sub.prop(paths, "render_output_directory", text="")
sub.prop(paths, "script_directory", text="")
sub.prop(paths, "sound_directory", text="")
sub.prop(paths, "temporary_directory", text="")
sub.prop(paths, "image_editor", text="")
subsplit = sub.split(percentage=0.3)
subsplit.prop(paths, "animation_player_preset", text="")
subsplit.prop(paths, "animation_player", text="")
col.separator()
col.separator()
colsplit = col.split(percentage=0.95)
sub = colsplit.column()
sub.label(text="Author:")
sub.prop(system, "author", text="")
col = split.column()
col.label(text="Save & Load:")
col.prop(paths, "use_relative_paths")
col.prop(paths, "use_file_compression")
col.prop(paths, "use_load_ui")
col.prop(paths, "use_filter_files")
col.prop(paths, "show_hidden_files_datablocks")
col.prop(paths, "hide_recent_locations")
col.prop(paths, "show_thumbnails")
col.separator()
col.separator()
col.prop(paths, "save_version")
col.prop(paths, "recent_files")
col.prop(paths, "use_save_preview_images")
col.label(text="Auto Save:")
col.prop(paths, "use_auto_save_temporary_files")
sub = col.column()
sub.active = paths.use_auto_save_temporary_files
sub.prop(paths, "auto_save_time", text="Timer (mins)")
col.separator()
col.label(text="Scripts:")
col.prop(system, "use_scripts_auto_execute")
col.prop(system, "use_tabs_as_spaces")
from bl_ui.space_userpref_keymap import InputKeyMapPanel
class USERPREF_MT_ndof_settings(Menu):
# accessed from the window key-bindings in C (only)
bl_label = "3D Mouse Settings"
def draw(self, context):
layout = self.layout
input_prefs = context.user_preferences.inputs
layout.separator()
layout.prop(input_prefs, "ndof_sensitivity")
if context.space_data.type == 'VIEW_3D':
layout.separator()
layout.prop(input_prefs, "ndof_show_guide")
layout.separator()
layout.label(text="Orbit options")
if input_prefs.view_rotate_method == 'TRACKBALL':
layout.prop(input_prefs, "ndof_roll_invert_axis")
layout.prop(input_prefs, "ndof_tilt_invert_axis")
layout.prop(input_prefs, "ndof_rotate_invert_axis")
layout.prop(input_prefs, "ndof_zoom_invert")
layout.separator()
layout.label(text="Pan options")
layout.prop(input_prefs, "ndof_panx_invert_axis")
layout.prop(input_prefs, "ndof_pany_invert_axis")
layout.prop(input_prefs, "ndof_panz_invert_axis")
layout.label(text="Zoom options")
layout.prop(input_prefs, "ndof_zoom_updown")
layout.separator()
layout.label(text="Fly options")
layout.prop(input_prefs, "ndof_fly_helicopter", icon='NDOF_FLY')
layout.prop(input_prefs, "ndof_lock_horizon", icon='NDOF_DOM')
class USERPREF_PT_input(Panel, InputKeyMapPanel):
bl_space_type = 'USER_PREFERENCES'
bl_label = "Input"
@classmethod
def poll(cls, context):
userpref = context.user_preferences
return (userpref.active_section == 'INPUT')
def draw_input_prefs(self, inputs, layout):
# General settings
row = layout.row()
col = row.column()
sub = col.column()
sub.label(text="Presets:")
subrow = sub.row(align=True)
subrow.menu("USERPREF_MT_interaction_presets", text=bpy.types.USERPREF_MT_interaction_presets.bl_label)
subrow.operator("wm.interaction_preset_add", text="", icon='ZOOMIN')
subrow.operator("wm.interaction_preset_add", text="", icon='ZOOMOUT').remove_active = True
sub.separator()
sub.label(text="Mouse:")
sub1 = sub.column()
sub1.active = (inputs.select_mouse == 'RIGHT')
sub1.prop(inputs, "use_mouse_emulate_3_button")
sub.prop(inputs, "use_mouse_continuous")
sub.prop(inputs, "drag_threshold")
sub.prop(inputs, "tweak_threshold")
sub.label(text="Select With:")
sub.row().prop(inputs, "select_mouse", expand=True)
sub = col.column()
sub.label(text="Double Click:")
sub.prop(inputs, "mouse_double_click_time", text="Speed")
sub.separator()
sub.prop(inputs, "use_emulate_numpad")
sub.separator()
sub.label(text="Orbit Style:")
sub.row().prop(inputs, "view_rotate_method", expand=True)
sub.label(text="Zoom Style:")
sub.row().prop(inputs, "view_zoom_method", text="")
if inputs.view_zoom_method in {'DOLLY', 'CONTINUE'}:
sub.row().prop(inputs, "view_zoom_axis", expand=True)
sub.prop(inputs, "invert_mouse_zoom")
#sub.prop(inputs, "use_mouse_mmb_paste")
#col.separator()
sub = col.column()
sub.label(text="Mouse Wheel:")
sub.prop(inputs, "invert_zoom_wheel", text="Invert Wheel Zoom Direction")
#sub.prop(view, "wheel_scroll_lines", text="Scroll Lines")
col.separator()
sub = col.column()
sub.label(text="NDOF Device:")
sub.prop(inputs, "ndof_sensitivity", text="NDOF Sensitivity")
row.separator()
def draw(self, context):
layout = self.layout
#import time
#start = time.time()
userpref = context.user_preferences
inputs = userpref.inputs
split = layout.split(percentage=0.25)
# Input settings
self.draw_input_prefs(inputs, split)
# Keymap Settings
self.draw_keymaps(context, split)
#print("runtime", time.time() - start)
class USERPREF_MT_addons_dev_guides(Menu):
bl_label = "Development Guides"
# menu to open web-pages with addons development guides
def draw(self, context):
layout = self.layout
layout.operator("wm.url_open", text="API Concepts", icon='URL').url = "http://wiki.blender.org/index.php/Dev:2.5/Py/API/Intro"
layout.operator("wm.url_open", text="Addon Guidelines", icon='URL').url = "http://wiki.blender.org/index.php/Dev:2.5/Py/Scripts/Guidelines/Addons"
layout.operator("wm.url_open", text="How to share your addon", icon='URL').url = "http://wiki.blender.org/index.php/Dev:Py/Sharing"
class USERPREF_PT_addons(Panel):
bl_space_type = 'USER_PREFERENCES'
bl_label = "Addons"
bl_region_type = 'WINDOW'
bl_options = {'HIDE_HEADER'}
_support_icon_mapping = {
'OFFICIAL': 'FILE_BLEND',
'COMMUNITY': 'POSE_DATA',
'TESTING': 'MOD_EXPLODE',
}
@classmethod
def poll(cls, context):
userpref = context.user_preferences
return (userpref.active_section == 'ADDONS')
@staticmethod
def is_user_addon(mod, user_addon_paths):
if not user_addon_paths:
user_script_path = bpy.utils.user_script_path()
if user_script_path is not None:
user_addon_paths.append(os.path.join(user_script_path, "addons"))
user_addon_paths.append(os.path.join(bpy.utils.resource_path('USER'), "scripts", "addons"))
for path in user_addon_paths:
if bpy.path.is_subdir(mod.__file__, path):
return True
return False
@staticmethod
def draw_error(layout, message):
lines = message.split("\n")
box = layout.box()
rowsub = box.row()
rowsub.label(lines[0])
rowsub.label(icon='ERROR')
for l in lines[1:]:
box.label(l)
def draw(self, context):
layout = self.layout
userpref = context.user_preferences
used_ext = {ext.module for ext in userpref.addons}
# collect the categories that can be filtered on
addons = [(mod, addon_utils.module_bl_info(mod)) for mod in addon_utils.modules(addon_utils.addons_fake_modules)]
split = layout.split(percentage=0.2)
col = split.column()
col.prop(context.window_manager, "addon_search", text="", icon='VIEWZOOM')
col.label(text="Supported Level")
col.prop(context.window_manager, "addon_support", expand=True)
col.label(text="Categories")
col.prop(context.window_manager, "addon_filter", expand=True)
col = split.column()
# set in addon_utils.modules(...)
if addon_utils.error_duplicates:
self.draw_error(col,
"Multiple addons using the same name found!\n"
"likely a problem with the script search path.\n"
"(see console for details)",
)
if addon_utils.error_encoding:
self.draw_error(col,
"One or more addons do not have UTF-8 encoding\n"
"(see console for details)",
)
filter = context.window_manager.addon_filter
search = context.window_manager.addon_search.lower()
support = context.window_manager.addon_support
# initialized on demand
user_addon_paths = []
for mod, info in addons:
module_name = mod.__name__
is_enabled = module_name in used_ext
if info["support"] not in support:
continue
# check if addon should be visible with current filters
if ((filter == "All") or
(filter == info["category"]) or
(filter == "Enabled" and is_enabled) or
(filter == "Disabled" and not is_enabled)):
if search and search not in info["name"].lower():
if info["author"]:
if search not in info["author"].lower():
continue
else:
continue
# Addon UI Code
box = col.column().box()
colsub = box.column()
row = colsub.row()
row.operator("wm.addon_expand", icon='TRIA_DOWN' if info["show_expanded"] else 'TRIA_RIGHT', emboss=False).module = module_name
rowsub = row.row()
rowsub.active = is_enabled
rowsub.label(text='%s: %s' % (info['category'], info["name"]))
if info["warning"]:
rowsub.label(icon='ERROR')
# icon showing dependencies (child or parent).
disable_check = module_name
if info["dependencies"] :
rowsub.label(icon='LINKED')
# icon showing support level.
rowsub.label(icon=self._support_icon_mapping.get(info["support"], 'QUESTION'))
if info["childs"] and is_enabled :
row.label(icon='LINKED')
elif is_enabled:
row.operator("wm.addon_disable", icon='CHECKBOX_HLT', text="", emboss=False).module = module_name
else:
row.operator("wm.addon_enable", icon='CHECKBOX_DEHLT', text="", emboss=False).module = module_name
# Expanded UI (only if additional info is available)
if info["show_expanded"]:
if info["description"]:
split = colsub.row().split(percentage=0.15)
split.label(text="Description:")
split.label(text=info["description"])
if info["location"]:
split = colsub.row().split(percentage=0.15)
split.label(text="Location:")
split.label(text=info["location"])
if mod:
split = colsub.row().split(percentage=0.15)
split.label(text="File:")
split.label(text=mod.__file__)
if info["author"]:
split = colsub.row().split(percentage=0.15)
split.label(text="Author:")
split.label(text=info["author"])
if info["version"]:
split = colsub.row().split(percentage=0.15)
split.label(text="Version:")
split.label(text='.'.join(str(x) for x in info["version"]))
if info["dependencies"]:
split = colsub.row().split(percentage=0.15)
split.label(text='Dependencies:')
parent_list, msg = addon_utils.parent_list(info["dependencies"])
if parent_list :
txt = ''
for n,v in parent_list : txt += '%s v%s, '%(n,'.'.join(str(x) for x in v) )
else :
txt = msg
split.label(text=txt[:-2])
if info["childs"] :
split = colsub.row().split(percentage=0.15)
split.label(text='In use by:')
txt = ''
for n in info["childs"] : txt += '%s, '%(n)
split.label(text=txt[:-2])
if info["warning"]:
split = colsub.row().split(percentage=0.15)
split.label(text="Warning:")
split.label(text=' ' + info["warning"], icon='ERROR')
user_addon = USERPREF_PT_addons.is_user_addon(mod, user_addon_paths)
tot_row = bool(info["wiki_url"]) + bool(info["tracker_url"]) + bool(user_addon)
if tot_row:
split = colsub.row().split(percentage=0.15)
split.label(text="Internet:")
if info["wiki_url"]:
split.operator("wm.url_open", text="Link to the Wiki", icon='HELP').url = info["wiki_url"]
if info["tracker_url"]:
split.operator("wm.url_open", text="Report a Bug", icon='URL').url = info["tracker_url"]
if user_addon:
split.operator("wm.addon_remove", text="Remove", icon='CANCEL').module = mod.__name__
for i in range(4 - tot_row):
split.separator()
# Append missing scripts
# First collect scripts that are used but have no script file.
module_names = {mod.__name__ for mod, info in addons}
missing_modules = {ext for ext in used_ext if ext not in module_names}
if missing_modules and filter in {"All", "Enabled"}:
col.column().separator()
col.column().label(text="Missing script files")
module_names = {mod.__name__ for mod, info in addons}
for module_name in sorted(missing_modules):
is_enabled = module_name in used_ext
# Addon UI Code
box = col.column().box()
colsub = box.column()
row = colsub.row()
row.label(text=module_name, icon='ERROR')
if is_enabled:
row.operator("wm.addon_disable", icon='CHECKBOX_HLT', text="", emboss=False).module = module_name
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)
|
[
"jerome.le.chat@free.fr"
] |
jerome.le.chat@free.fr
|
6f54f5939a8fda03c24dfa9d9dbe33c08f498424
|
096ccaca86872b03a137edf58221413073d770cb
|
/helpers/24_web_apis_sources.py
|
0a219f85661a944bd17fb1db67075e5cf05ea372
|
[] |
no_license
|
DH-heima/webscrapping
|
f142962b50deed2628052dd7a48098a4afbcbada
|
1dc8f81f45db0d4366391c3052c5ab36f4d4bc5d
|
refs/heads/master
| 2022-02-02T23:26:22.520064
| 2019-06-13T13:38:10
| 2019-06-13T13:38:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,587
|
py
|
import os
# from ers import shops, COLLECTION_DATE, web_apis_traffic_sources_csv, web_apis_traffic_sources_aggregates_csv
import os.path as op
import numpy as np
import pandas as pd
BASE_DIR = "/code/mhers"
WAVE_NUMBER = 8
shops = pd.read_excel(op.join(BASE_DIR, "ressources/ERS-referential-shops.xlsx"), index_col=None)
COLLECTION_DATE = "2018-06-10"
web_apis_traffic_sources_csv = os.path.join(BASE_DIR,'data/w_{}/final_csvs'.format(WAVE_NUMBER), 'shopgrid_details - web_apis_traffic_sources_w{}.csv'.format(WAVE_NUMBER))
web_apis_traffic_sources_aggregates_csv = os.path.join(BASE_DIR,'data/w_{}/final_csvs'.format(WAVE_NUMBER), 'shopgrid_summary - web_apis_traffic_sources_w{}.csv'.format(WAVE_NUMBER))
# #####################################################################################################################
# web_apis_demographics_csv
# #####################################################################################################################
# This generates the dummy data and shouldn't be in production
mask = pd.DataFrame({'to_delete': [1]})
df = pd.DataFrame()
for c, row in shops.iterrows():
tmp = pd.DataFrame(mask.copy())
for k in ['shop_id', 'continent', 'country', 'region', 'segment']:
tmp[k] = row[k]
df = df.append(tmp)
df.drop(columns=['to_delete'], inplace=True)
# TODO : delete the random data creation and fetch the data in the proper dataset
df['direct'] = np.random.random(size=(df.shape[0], 1)) * 0.3
df['email'] = np.random.random(size=(df.shape[0], 1)) * 0.2
df['referrals'] = np.random.random(size=(df.shape[0], 1)) * 0.2
df['social'] = np.random.random(size=(df.shape[0], 1)) * 0.1
df['paid_search'] = np.random.random(size=(df.shape[0], 1)) * 0.1
df['display_ads'] = np.random.random(size=(df.shape[0], 1)) * 0.1
df['organic_search'] = 1 - df['direct'] - df['email'] - df['referrals'] - df['social'] - df['paid_search'] - df['display_ads']
# Todo : Time Span is the time over which the aggregates are calculated
df['time_span'] = "Apr. 2016 - Aug. 2018"
# Collection date
print('WARNING : PLEASE ENSURE THE COLLECTION_DATE is accurate :', COLLECTION_DATE)
df['collection_date'] = COLLECTION_DATE
final_cols = ['collection_date', 'time_span', 'continent', 'country', 'region', 'segment', 'shop_id', 'direct', 'email',
'referrals', 'social', 'paid_search', 'display_ads', 'organic_search']
df = df[final_cols]
df.to_csv(web_apis_traffic_sources_csv, sep=';', index=False, encoding='utf-8')
print("File web_apis_traffic_sources_csv stored at : ", web_apis_traffic_sources_csv)
# #####################################################################################################################
# web_apis_demographics_aggregates_csv
# #####################################################################################################################
df['region'].fillna("", inplace=True)
# Aggregating
res = []
agregation_levels_list = [
['continent', 'country', 'region', 'segment'],
['continent', 'country', 'segment'],
['continent', 'segment'],
['segment'],
['continent', 'country', 'region'],
['continent', 'country'],
['continent'],
['collection_date']
]
# All agregations
for agg_level in agregation_levels_list:
dfG2 = df.groupby(agg_level, as_index=False)
dfG2 = dfG2.agg({
'direct': {'direct': 'mean'},
'email': {'email': 'mean'},
'referrals': {'referrals': 'mean'},
'social': {'social': 'mean'},
'paid_search': {'paid_search': 'mean'},
'display_ads': {'display_ads': 'mean'},
'organic_search': {'organic_search': 'mean'},
}).reset_index()
dfG2.columns = dfG2.columns.droplevel(1)
dfG2 = pd.DataFrame(dfG2)
print(agg_level, 'adding', dfG2.shape)
res.append(dfG2)
# Aggregate on all-levels
all_dfs = pd.concat(res, axis=0, ignore_index=True)
# Collection date
print('WARNING : PLEASE ENSURE THE COLLECTION_DATE is accurate :', COLLECTION_DATE)
all_dfs['collection_date'] = COLLECTION_DATE
# Todo : Time Span is the time over which the aggregates are calculated
all_dfs['time_span'] = "Apr. 2016 - Aug. 2018"
final_cols = ['collection_date', 'time_span', 'continent', 'country', 'region', 'segment', 'direct', 'display_ads',
'email', 'organic_search', 'paid_search', 'referrals', 'social']
all_dfs = all_dfs[final_cols]
all_dfs.to_csv(web_apis_traffic_sources_aggregates_csv, sep=';', index=None, encoding='utf-8')
print("File web_apis_traffic_sources_aggregates_csv stored at : ", web_apis_traffic_sources_aggregates_csv, " -")
|
[
"pierre.chevalier@epitech.eu"
] |
pierre.chevalier@epitech.eu
|
ea147069bc358894d57aadf2f0981d4f2f8e0902
|
8d6418d8f813961318a962638467cdea5adec882
|
/test.py
|
4e4d2e798b46c69735d1b4597e4283d944ae03bd
|
[] |
no_license
|
demakaituan/code
|
6d7045d7a085ff843b636ecb3e88d9959f68d32e
|
6578bc0dbd7018449e9f8c83f4fc5304126036a6
|
refs/heads/master
| 2021-01-21T14:04:26.549322
| 2016-05-30T08:54:51
| 2016-05-30T08:54:51
| 54,120,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,303
|
py
|
import unittest
import seuif97
class Region1_Test(unittest.TestCase):
def setUp(self):
self.tab5 = [[3, 300, 0.100215168e-2, 0.115331273e3, 0.112324818e3, 0.392294792, 4.17301218, 0.150773921e4],
[80, 300, 0.971180894e-3, 0.184142828e3, 0.106448356e3,
0.368563852, 4.01008987, 0.163469054e4],
[3, 500, 0.120241800e-2, 0.975542239e3, 0.971934985e3, 2.58041912, 4.65580682, 0.124071337e4]]
def test_specific_volume(self):
places = 11
for item in self.tab5:
self.assertAlmostEqual(seuif97.specific_volume(item[0], item[1]), item[2], places)
def test_specific_enthalpy(self):
places = 6
for item in self.tab5:
self.assertAlmostEqual(seuif97.specific_enthalpy(item[0], item[1]), item[3], places)
def test_specific_internal_energy(self):
places = 6
for item in self.tab5:
self.assertAlmostEqual(seuif97.specific_internal_energy(
item[0], item[1]), item[4], places)
def test_specific_entropy(self):
places = 8
for item in self.tab5:
self.assertAlmostEqual(seuif97.specific_entropy(item[0], item[1]), item[5], places)
def test_specific_isobaric_heat_capacity(self):
places = 8
for item in self.tab5:
self.assertAlmostEqual(seuif97.specific_isobaric_heat_capacity(
item[0], item[1]), item[6], places)
def test_speed_of_sound(self):
places = 5
for item in self.tab5:
self.assertAlmostEqual(seuif97.speed_of_sound(item[0], item[1]), item[7], places)
class additional_Test(unittest.TestCase):
def setUp(self):
self.tab6 = [[0.001, 0, 9.800980612e-4],
[90, 0, 91.92954727],
[1500, 3.4, 58.68294423]]
def test_backward_equations(self):
places = 8
for item in self.tab6:
self.assertAlmostEqual(seuif97.backward_equations(item[0], item[1]), item[2], places)
def suite_test():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Region1_Test))
suite.addTest(unittest.makeSuite(additional_Test))
return suite
if __name__ == '__main__':
unittest.main(defaultTest = 'suite_test')
|
[
"599146992@qq.com"
] |
599146992@qq.com
|
5e9e166be4bb079b5d0ce28085ede2532003ca6d
|
0428aa38f62004e25134596c30280fa038f65f1d
|
/trigger_lambda_on_s3_put.py
|
2ad68100488a42aaddafb2351b57700f48d7b608
|
[] |
no_license
|
smusongwe/s3
|
fa762c1f4b62895a0c93055dbce7a2dd6ffef38c
|
540aa034fd0afbe30b998b60db7ab6efaf8063d4
|
refs/heads/main
| 2023-01-19T03:25:22.171853
| 2020-12-01T00:34:29
| 2020-12-01T00:34:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
import json
import urllib.parse
import boto3
print('Loading function')
s3 = boto3.client('s3')
def lambda_handler(event, context):
#1 - Get the bucket name
bucket = event['Records'][0]['s3']['bucket']['name']
#2 - Get the file/key name
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
try:
#3 - Fetch the file from S3
response = s3.get_object(Bucket=bucket, Key=key)
#4 - Deserialize the file's content
text = response["Body"].read().decode()
data = json.loads(text)
#5 - Print the content
print(data)
#6 - Parse and print the transactions
transactions = data['transactions']
for record in transactions:
print(record['transactionType'])
return 'Success!'
except Exception as e:
print(e)
raise e
|
[
"noreply@github.com"
] |
noreply@github.com
|
08ca6256aa20cb142ea6d49f2471e8c6aa0fec33
|
9021d47bb5a47cfd6704161c7db43585808f1d2b
|
/application.py
|
fe6c6a5c21031d4afd693e55c450d5d1180a3606
|
[] |
no_license
|
Mohamed24444/HW-todo-api
|
456d6ad4b4e75b0dcbc9e5c990703b0c9c961d83
|
9db9efb587c1bb7864893e5811eeda3416d1322e
|
refs/heads/main
| 2023-07-18T06:28:06.760694
| 2021-08-31T20:46:49
| 2021-08-31T20:46:49
| 400,887,854
| 0
| 0
| null | 2021-08-28T20:50:12
| 2021-08-28T20:50:12
| null |
UTF-8
|
Python
| false
| false
| 2,998
|
py
|
import helper
from flask import Flask, request, Response
import json
application = app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/item/new', methods=['POST'])
def add_item():
# Get item from the POST body
req_data = request.get_json()
item = req_data['item']
# Add item to the list
res_data = helper.add_to_list(item)
# Return error if item not added
if res_data is None:
response = Response("{'error': 'Item not added - " + item + "'}", status=400 , mimetype='application/json')
return response
# Return response
response = Response(json.dumps(res_data), mimetype='application/json')
return response
@app.route('/items/all')
def get_all_items():
# Get items from the helper
res_data = helper.get_all_items()
# Return response
response = Response(json.dumps(res_data), mimetype='application/json')
return response
@app.route('/item/status', methods=['GET'])
def get_item():
# Get parameter from the URL
item_name = request.args.get('name')
# Get items from the helper
status = helper.get_item(item_name)
# Return 404 if item not found
if status is None:
response = Response("{'error': 'Item Not Found - %s'}" % item_name, status=404 , mimetype='application/json')
return response
# Return status
res_data = {
'status': status
}
response = Response(json.dumps(res_data), status=200, mimetype='application/json')
return response
@app.route('/item/update', methods=['PUT'])
def update_status():
# Get item from the POST body
req_data = request.get_json()
item = req_data['item']
status = req_data['status']
# Update item in the list
res_data = helper.update_status(item, status)
# Return error if the status could not be updated
if res_data is None:
response = Response("{'error': 'Error updating item - '" + item + ", " + status + "}", status=400 , mimetype='application/json')
return response
# Return response
response = Response(json.dumps(res_data), mimetype='application/json')
return response
@app.route('/item/remove', methods=['DELETE'])
def delete_item():
# Get item from the POST body
req_data = request.get_json()
item = req_data['item']
# Delete item from the list
res_data = helper.delete_item(item)
# Return error if the item could not be deleted
if res_data is None:
response = Response("{'error': 'Error deleting item - '" + item + "}", status=400 , mimetype='application/json')
return response
# Return response
response = Response(json.dumps(res_data), mimetype='application/json')
return response
@app.route('/items/delall', methods=['DELETE'])
def del_all():
# Get items from the helper
res_data = helper.del_all_items()
# Return response
response = Response(json.dumps(res_data), mimetype='application/json')
return response
|
[
"noreply@github.com"
] |
noreply@github.com
|
8a53b8e8eadc3fdb0cd8371ce351adebce79def2
|
0fc6370708a3407255a667f29095e287db2fb454
|
/MockGvh/agentThread.py
|
fc31b2f25b3fd0f9404ea200f569b5ab4b715599
|
[] |
no_license
|
jzw2/KoordLanguage
|
257af6a8175319cec5369126e168708bc5934baa
|
d0c56c0124c70e9dc61886080f09ffae7da9583a
|
refs/heads/master
| 2020-05-30T08:28:30.936176
| 2019-07-26T19:13:33
| 2019-07-26T19:20:40
| 187,888,056
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,651
|
py
|
import random
from abc import ABC, abstractmethod
class Pos():
pass
class AgentThread(ABC):
def __init__(self, config):
self._pid = config.pid
self._num_agents = config.num_agents
self._pos = (random.randrange(1, 10), random.randrange(1, 10))
self.shared_vars = config.shared_vars
@abstractmethod
def loop_body(self):
pass
@abstractmethod
def initialize_vars(self):
pass
def pos3d(self, x, y, z):
pass
def write_to_shared(self, var_name, index, value):
self.shared_vars[var_name][index] = value
def read_from_shared(self, var_name, index):
return self.shared_vars[var_name][index]
def read_from_sensor(self, var_name):
if var_name == "Motion.position":
return self._pos
def write_to_actuator(self, var_name, value):
if var_name == "Motion.target":
self._pos = value
def create_ar_var(self, name, type, initial_value=None):
if name not in self.shared_vars:
self.shared_vars[name] = [initial_value] * self._num_agents
def create_aw_var(self, name, type, initial_value=None):
if name not in self.shared_vars:
self.shared_vars[name] = [initial_value] * self._num_agents
def log(self, message):
pass
def pid(self):
return self._pid
def num_agents(self):
return self._num_agents
def start(self):
pass
def run(self):
for i in range(10):
self.loop_body()
def midpoint(self, x, y):
a, b = x
c, d = y
return (a + c) / 2, (b + d) / 2
|
[
"hc825b@gmail.com"
] |
hc825b@gmail.com
|
6ffd2e78972b8fd0b9f8b3259fae0c6b13ecaf63
|
0f3c7d268349336160d592eaa3acf56a1bb12cc2
|
/reports/ISOPE_outline/create_latex_report.py
|
42c30523f9cc8383a9d80aa0358fe68d4ba36499
|
[
"MIT"
] |
permissive
|
ramisetti/Prediction-of-roll-motion-using-fully-nonlinear-potential-flow-and-Ikedas-method
|
ac0f7eac820e5f381d1073bd52556ac313026727
|
cce8abde16a15a2ae45008e48b1bba9f4aeaaad4
|
refs/heads/main
| 2023-05-19T05:54:41.448268
| 2021-06-11T09:48:15
| 2021-06-11T09:48:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 917
|
py
|
import os.path
from src.notebook_to_latex import convert_notebook_to_latex
import reports
notebook_path = os.path.join(reports.path,'ISOPE_outline','01.1.outline.ipynb')
build_directory = os.path.join(reports.path,'ISOPE')
if not os.path.exists(build_directory):
os.mkdir(build_directory)
skip_figures=True
convert_notebook_to_latex(notebook_path=notebook_path, build_directory=build_directory, save_main=False, skip_figures=skip_figures)
if not skip_figures:
## Special treatment
import joblib
import graphviz
from sklearn import tree
clf = joblib.load('models/C_r_tree.pkl')
dot_data = tree.export_graphviz(clf, out_file=None,
feature_names=[r'sigma', r'a_1', r'a_3'], rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph.render("reports/ISOPE/figures/decision_tree")
|
[
"marale@kth.se"
] |
marale@kth.se
|
280a05ec34f5669a8ef15a8fd75fa5be7a8a3981
|
723c3e005d76db1cdc706b2169cc6cc441974201
|
/PDF_Sales/Old_files/PDF_IND_Dates_Old/PDF_IND_Dates_2.py
|
67caa425edbe7d0d1ed6ae4c752ca3cda8b8e0e1
|
[] |
no_license
|
GolovanPriherel/Python_Projects
|
0b801d753efabbe0ca4a0d47bd532cc316024799
|
6108e10cefb05d521f8b4969fed57e92f61ab753
|
refs/heads/main
| 2023-08-28T10:59:35.320556
| 2021-10-13T20:44:33
| 2021-10-13T20:44:33
| 416,886,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,909
|
py
|
import re, fitz, pytesseract, urllib.request
# ---- Работа с PDF
from PIL import Image
# Шаблоны поиска
patternsIND = [' IND ([0-9,]+)', ' IND Number.([0-9,]+)', ' IND No. ([0-9,]+)']
patternsINDDate = ['IND\D{4}(\d{1,2}.\d{1,2}.\d{1,4})', 'IND \d{5,6}..(\d{1,2}.\D+.\d{1,4})',
'IND\D{4}(\d{1,2}.\D+.\d{1,4})', 'IND \d{5,6}..(\d{1,2}.\d{1,2}.\d{1,4})',
'IND.[0-9,]+.Review #01 dated\s\s(\w+.\d+..\d+)', 'was originally submitted']
# Путь для подключения tesseract
pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'
# Открывает по страницам ПДФ (для текста)
def Extract_PDF_Text(pdf_file):
pdf_document = pdf_file
doc = fitz.open(pdf_document)
for pages in range(doc.pageCount):
page = doc.loadPage(pages)
page_text = page.getText("text")
yield page_text
# print(page_text)
# Находит в тексте из ПДФ-а номер и дату
def extract_text(pdf_file, pattern1, pattern2):
Date, IND = [], []
for page in Extract_PDF_Text(pdf_file):
for patt in pattern1:
result_IND = re.findall(patt, page)
for sub in range(len(result_IND)):
IND.append(result_IND[sub].replace('\n', ''))
for patt in pattern2:
result_IND_Date = re.findall(patt, page)
for sub in range(len(result_IND_Date)):
Date.append(result_IND_Date[sub].replace('\n', ''))
if IND:
IND = (max(set(IND), key=IND.count))
if Date:
Date = (max(set(Date), key=Date.count))
elif IND and Date:
IND = (max(set(IND), key=IND.count))
Date = (max(set(Date), key=Date.count))
# print('Текст победил')
return IND, Date
# Получает картинки из ПДФ-а
def extract_png(PDF):
ind_num, ind_date = '',''
doc = fitz.open(PDF)
for i in range(0, len(doc)):
for img in doc.getPageImageList(i):
xref = img[0]
pix = fitz.Pixmap(doc, xref)
if pix.n < 5: # this is GRAY or RGB
pix.writePNG("Text1.png")
else: # CMYK: convert to RGB first
pix1 = fitz.Pixmap(fitz.csRGB, pix)
pix1.writePNG("Text1.png")
pix1 = None
pix = None
text = Find_IND_Date_Tess("Text1.png")
IND_Num, IND_Date = extract_text_png(text, patternsIND, patternsINDDate)
# print('---', i)
if IND_Num and IND_Date:
return IND_Num, IND_Date
elif IND_Num:
ind_num = IND_Num
elif IND_Date:
ind_date = IND_Date
return ind_num, ind_date
# Распознавание текста в картинках
def Find_IND_Date_Tess(picture):
img = Image.open(f'{picture}')
file_name = img.filename
file_name = file_name.split(".")[0]
custom_config = r'--oem 3 --psm 6'
text = pytesseract.image_to_string(img, config=custom_config)
return text
# Находит из картинок номер и дату
def extract_text_png(text, pattern1, pattern2):
IND, Date = [], []
for patt in pattern1:
result_IND = re.findall(patt, text)
for sub in range(len(result_IND)):
IND.append(result_IND[sub].replace('\n', ''))
for patt in pattern2:
result_IND_Date = re.findall(patt, text)
for sub in range(len(result_IND_Date)):
Date.append(result_IND_Date[sub].replace('\n', ''))
if IND:
IND = (max(set(IND), key=IND.count))
elif Date:
Date = (max(set(Date), key=Date.count))
elif IND and Date:
IND = (max(set(IND), key=IND.count))
Date = (max(set(Date), key=Date.count))
# print('Изображения победили')
return IND, Date
# Вызов всех ф-ий для парсера
def Find_IND_Date (url): # Просто впиши ссылку на ПДФ и всё
urllib.request.urlretrieve(url, "../IND1.pdf") # Скачиваем ПДФ
IND_Num, IND_Date = extract_text("../IND1.pdf", patternsIND, patternsINDDate) # Зырим текст
TrueNum, TrueDate = None, None
if IND_Num and IND_Date: # Проверям хватит ли только текста для проверки
return IND_Num, IND_Date
elif IND_Num:
TrueNum = IND_Num
elif IND_Date:
TrueDate = IND_Date
else:
IND_Num1, IND_Date1 = extract_png("IND1.pdf") # Проверка с помощью распознавания
if IND_Num1:
TrueNum = IND_Num1
elif IND_Date1:
TrueDate = IND_Date1
# Доделать распознавание приколсов
return TrueNum, TrueDate
# Проверенные ссылки на пдф
# url = 'https://www.accessdata.fda.gov/drugsatfda_docs/nda/2013/202971Orig1s000PharmR.pdf' # с инд (5 цифр) и датой (через пробел)
# url = 'https://www.accessdata.fda.gov/drugsatfda_docs/nda/2021/212887Orig1s000,212888Orig1s000Approv.pdf' # с инд (6)
# url = 'https://www.accessdata.fda.gov/drugsatfda_docs/nda/2012/202428Orig1s000PharmR.pdf' # с инд (6)
# url = 'https://www.accessdata.fda.gov/drugsatfda_docs/nda/2002/21-456_Aciphex_Medr_P1.pdf' # Картинка с датой IND, но без номера
url = 'https://www.accessdata.fda.gov/drugsatfda_docs/nda/2010/022518Orig1s000PharmR.pdf' # Куча разных номеров инд с датой
IND_Num, IND_Date = Find_IND_Date(url)
print(IND_Num, '|', IND_Date)
|
[
"noreply@github.com"
] |
noreply@github.com
|
baba79af33bbf688b0cc90d14d78060c6b946973
|
3a771b72dae1aae406b94726bcbcf73915577b18
|
/q56.py
|
e7701a5af9e0d8749ed043cc4977a73042423870
|
[] |
no_license
|
SHANK885/Python-Basic-Programs
|
4fcb29280412baa63ffd33efba56d9f59770c9dc
|
157f0f871b31c4523b6873ce5dfe0d6e26a6dc61
|
refs/heads/master
| 2021-07-18T18:24:10.455282
| 2018-11-19T07:02:27
| 2018-11-19T07:02:27
| 138,009,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51
|
py
|
unicodeString =u"Heello world"
print(unicodeString)
|
[
"shashankshekhar885@gmail.com"
] |
shashankshekhar885@gmail.com
|
41255f5976ab13155649263d540c618488794b94
|
08a329d07172a384be41eb58a0586032b18787d2
|
/if1.py
|
c5867a5c4dee5850f5bb2049812193dbe20c31e6
|
[] |
no_license
|
atsuhisa-i/Python_study1
|
9bc39d058fe8bdd00adb35324758ad8fa08f4ca1
|
439a654f09e81208658355d99c8ce1c3cd4bcc4e
|
refs/heads/main
| 2023-04-06T12:44:12.099067
| 2021-04-14T13:24:56
| 2021-04-14T13:24:56
| 348,309,405
| 0
| 0
| null | 2021-04-14T13:24:57
| 2021-03-16T10:45:48
|
Python
|
UTF-8
|
Python
| false
| false
| 69
|
py
|
number = '123456'
if number == '123456':
print('1st Prize:Money')
|
[
"atsuhisa.1124@gmail.com"
] |
atsuhisa.1124@gmail.com
|
6932b26cbfad7fb87ae3a6ac07694d091c54d719
|
e35594083b7dfc15a8e790efa26fc36ac264ccce
|
/pages/base_page.py
|
0583a1adc7f65adf2a1972a4236615998f514a99
|
[] |
no_license
|
quiprodest/testauto-study-project
|
4f10bc8552fe8b8ca354bc856bab2ddcf76982e5
|
b338ea8d83dfa046c38507e2524e47431d172b26
|
refs/heads/master
| 2021-06-19T16:46:40.660719
| 2019-07-30T12:43:27
| 2019-07-30T12:43:27
| 199,621,325
| 0
| 0
| null | 2021-04-20T18:26:23
| 2019-07-30T09:35:41
|
Python
|
UTF-8
|
Python
| false
| false
| 496
|
py
|
from selenium.common.exceptions import NoSuchElementException
class BasePage(object):
def __init__(self, browser, url): # , timeout=10
self.browser = browser
self.url = url
# self.browser.implicitly_wait(timeout)
def open(self):
self.browser.get(self.url)
def is_element_present(self, how, what):
try:
self.browser.find_element(how, what)
except (NoSuchElementException):
return False
return True
|
[
"qui_p@tutanota.com"
] |
qui_p@tutanota.com
|
06b52d5b79166ba67bb054b31a199bbe635cff9f
|
62fb574c97c16645dc029b60014d48c88c1714df
|
/1 - 9/Problem 8.py
|
49ca444c85e1107eb2aadd0a38c01cab8f952d6d
|
[] |
no_license
|
kadirsefaunal/projectEuler
|
ee16faf3161961d4f2adec2ad5466ed7b3127713
|
710ad4112a4d3b7350f33c206db8baa60b5cf9a8
|
refs/heads/master
| 2021-01-01T06:34:58.613504
| 2017-07-21T13:56:42
| 2017-07-21T13:56:42
| 97,455,744
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,392
|
py
|
def main():
sayi = "731671765313306249192251196744265747423553491949349698352031277450632623957831801698480186947885184385861560789112949495459501737958331952853208805511125406987471585238630507156932909632952274430435576689664895044524452316173185640309871112172238311362229893423380308135336276614282806444486645238749303589072962904915604407723907138105158593079608667017242712188399879790879227492190169972088809377665727333001053367881220235421809751254540594752243525849077116705560136048395864467063244157221553975369781797784617406495514929086256932197846862248283972241375657056057490261407972968652414535100474821663704844031998900088952434506585412275886668811642717147992444292823086346567481391912316282458617866458359124566529476545682848912883142607690042242190226710556263211111093705442175069416589604080719840385096245544436298123098787992724428490918884580156166097919133875499200524063689912560717606058861164671094050775410022569831552000559357297257163626956188267042825248360082325753"
#Son 13 hane sığmadı.
enBuyuk = 0
for i in range(0, len(sayi) - 13):
sayilar = []
carpim = 1
for j in range(0, 13):
carpim *= int(sayi[j + i])
sayilar.append(int(sayi[j + i]))
if carpim > enBuyuk:
enBuyuk = carpim
print(sayilar)
print(enBuyuk)
if __name__ == "__main__":
main()
|
[
"kadirsefau@gmail.com"
] |
kadirsefau@gmail.com
|
53afd2bce9ba50e5033d6390c1079e0d1ae46806
|
87eae6cd14dd360d6d8b8e78e6e1e9952d6cd0c1
|
/p5.py
|
424933937bbb58f010d4b44920badcd8fe10f517
|
[] |
no_license
|
karthikgvsk/project-euler
|
afd8a62b6067d2236a9a6b58b0ed01c23136c5e3
|
01e562a97b5c9cd03e19fbea29a63fc0bfcaac46
|
refs/heads/master
| 2020-04-25T21:18:19.061986
| 2013-06-08T17:23:24
| 2013-06-08T17:23:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
#smallest multiple
#2520 is the smallest number that can be divided by each of the numbers from#1 to 10 without any remainder.
#
#What is the smallest positive number that is evenly divisible by all of the#numbers from 1 to 20?
from math import log
def primelist(n):
l = [0] * (n + 1)
p = 2
while p <= n:
q = 2
while q <= n // p:
l[p * q] = 1
q = q + 1
p = p + 1
l[0], l[1], l[2], l[3] = 1, 1, 0, 0
return l
n = 20
l = primelist(n)
prod = 1
for i in range(len(l)):
if l[i] == 0:
power = int(log(n, i))
prod = prod * (i ** power)
print prod
|
[
"karthikgvsk@gmail.com"
] |
karthikgvsk@gmail.com
|
732875b53794690f71a61540b24b81ee5edee29e
|
75e2e9a6fcb2962a9502bf5c3237db35fd62d9da
|
/web_indexer_starter.py
|
a50c9220eb694b1f06745e40104181050539bf12
|
[] |
no_license
|
undefinedobjects/web_indexer.py
|
6a41508fe050b58858b5e9645e41c7adde1f7f96
|
c031a29fd8ba7930b7be36bdcf1b9a3d1dc5c1a7
|
refs/heads/master
| 2023-03-02T23:28:24.635419
| 2021-02-15T11:21:51
| 2021-02-15T11:21:51
| 272,031,613
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
import os
for number in range(90,100):
start = "172." + str(number) + ".0.0"
end = "172." + str((number + 1)) + ".0.0"
os.system("start web_indexer.py -s " + start + " -e " + end)
|
[
"noreply@github.com"
] |
noreply@github.com
|
ef2e1856553562a52e2e69951f8546c91328ccd8
|
5c41073897f791bc6915c874a9304d7d7f6c13db
|
/gravitate/domain/bookings/view_mediator.py
|
853566c6ab0c9429b78407c69be23fce3ed1e7bc
|
[] |
no_license
|
billyrrr/gravitate-backend
|
5c8ce8b95607b06bd2d850b085e9129d9dc8632b
|
f1e98f0002046cb4c932f9f1badbdf2eb8af92d1
|
refs/heads/master
| 2023-05-11T10:58:07.871694
| 2020-05-07T23:04:50
| 2020-05-07T23:04:50
| 156,035,235
| 0
| 0
| null | 2020-01-19T02:32:39
| 2018-11-04T00:19:16
|
Python
|
UTF-8
|
Python
| false
| false
| 4,130
|
py
|
import time
from flask_boiler.utils import snapshot_to_obj
from flask_boiler.view.query_delta import ViewMediatorDeltaDAV, ProtocolBase
from google.cloud.firestore import DocumentSnapshot
from gravitate import CTX
from gravitate.domain.user import User
from . import RiderBooking, BookingStoreBpss, RiderTarget, RiderBookingView, \
RiderBookingForm, RiderBookingReadModel
from google.cloud.firestore import Query
from ..location.models import Sublocation
class UserBookingMediator(ViewMediatorDeltaDAV):
"""
Forwards a rider booking to a user subcollection
"""
class Protocol(ProtocolBase):
@staticmethod
def on_create(snapshot, mediator):
obj = RiderBookingReadModel.new(snapshot=snapshot)
mediator.notify(obj=obj)
@staticmethod
def on_update(snapshot, mediator):
obj: RiderBooking = snapshot_to_obj(snapshot)
if obj.status == "removed":
RiderBookingReadModel.remove_one(obj=obj)
model_cls = RiderBooking
class UserBookingEditMediator(ViewMediatorDeltaDAV):
"""
Forwards a rider booking to a user subcollection
"""
def notify(self, obj):
obj.propagate_change()
class Protocol(ProtocolBase):
@staticmethod
def on_create(snapshot, mediator):
assert isinstance(snapshot, DocumentSnapshot)
path = snapshot.reference
booking_id = path.id
user_id = path.parent.parent.id
d = snapshot.to_dict()
obj = RiderBookingForm.from_dict(doc_id=booking_id,
d=dict(**d, user_id=user_id))
mediator.notify(obj=obj)
snapshot.reference.delete()
class BookingTargetMediator(ViewMediatorDeltaDAV):
"""
Generate booking target from a rider booking newly added or edited.
"""
model_cls = RiderBooking
def notify(self, obj):
obj.save()
class Protocol(ProtocolBase):
@staticmethod
def on_create(snapshot, mediator):
obj: RiderBooking = snapshot_to_obj(snapshot=snapshot)
for from_sublocation_ref in obj.from_location.sublocations:
from_sublocation = Sublocation.get(doc_ref=from_sublocation_ref)
for to_sublocation_ref in obj.to_location.sublocations:
to_sublocation = Sublocation.get(
doc_ref=to_sublocation_ref)
d = dict(
r_ref=obj.doc_ref,
from_lat=from_sublocation.coordinates[
"latitude"],
from_lng=from_sublocation.coordinates[
"longitude"],
from_id=from_sublocation.doc_id,
to_lat=to_sublocation.coordinates["latitude"],
to_lng=to_sublocation.coordinates["longitude"],
to_id=to_sublocation.doc_id,
user_id=obj.user_id
)
ts = dict(
earliest_arrival=obj.earliest_arrival,
latest_arrival=obj.latest_arrival,
earliest_departure=obj.earliest_departure,
latest_departure=obj.latest_departure,
)
ts = {k: v for k, v in ts.items() if v is not None}
target = RiderTarget.new(
**d, **ts
)
mediator.notify(obj=target)
@staticmethod
def on_delete(snapshot, mediator):
obj: RiderBooking = snapshot_to_obj(snapshot=snapshot)
if obj.status in {"matched", }:
"""
Delete targets for matched rider bookings
"""
booking_ref = obj.doc_ref
for target in RiderTarget.where(r_ref=booking_ref):
target.delete()
def _get_query(self):
query = Query(parent=self.model_cls._get_collection())
return query
|
[
"billrao@me.com"
] |
billrao@me.com
|
fe329d17aaaa67a21a1b92f02c926bb41085e379
|
28df0dfb458a3c0f37d5a597307017717f9b1cc3
|
/auth_utils.py
|
4880fdd53294b5dadd961c64186bd5575b88e666
|
[] |
no_license
|
dagou1992/flask
|
5533aa7c48a98c561b4235cbfcc6675ebfd81471
|
67afd661e0d14a62c55789b844ae11e790ca531d
|
refs/heads/master
| 2020-03-24T00:02:55.456559
| 2018-07-25T08:12:49
| 2018-07-25T08:12:49
| 142,269,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
# coding: utf-8
from flask_login import LoginManager, UserMixin
from flask_login import login_user
from app import json_response_error
login_manager = LoginManager()
def init_auth(app):
login_manager.init_app(app)
class User(UserMixin):
def __init__(self, user):
self.info = user
def get_id(self):
"""登录成功后,就会调用get_id()获取到id存入session中"""
return self.info
@login_manager.user_loader
def load_user(user_id):
return User(user_id)
# 在session中记录user已经登录
def register_login(user):
login_user(User(user))
@login_manager.unauthorized_handler
def unauthorized_handler():
return json_response_error(401, "用户未登录。"), 401
|
[
"jbq19920903@163.com"
] |
jbq19920903@163.com
|
e1bb0830c777662e0bc15e9c46b3a760597a28be
|
1e710f5ddc4d4b89ff601412e4a7e97d92423fe3
|
/lesson_1/example_4.py
|
609ef451896008c74c8f80517aaf02d220e649b0
|
[] |
no_license
|
Evgeniy-Nagornyy/Python_1
|
b8b176eafa83d7c7962ee244a9cecef4bbb0b601
|
8ce10ea075f5336460e85a6d75e89e28f377d19f
|
refs/heads/master
| 2022-10-22T09:23:52.062377
| 2020-06-10T13:27:23
| 2020-06-10T13:27:23
| 264,262,317
| 0
| 0
| null | 2020-06-09T23:32:21
| 2020-05-15T17:54:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,521
|
py
|
line_numbers = int(input('Введите положительное число - '))
check_number = line_numbers % 10 # Проверяемое число на максимум (очень не хватает цикла do while)
check_line_number = str(check_number) # проверочное число для выхода из цикла
max_number = 0 # Максимальное число
i = 1 # счетчик для передвижения
if line_numbers == 0: # вдруг введут 0
print(max_number)
else:
while int(check_line_number) <= line_numbers:
check_number = ((line_numbers % (10 ** i)) // (10 ** (i - 1))) # переключение на следующую цифру
if check_number == 9: # проверяем, есть ли смысл проверять дальше
max_number = check_number
break
elif check_number > max_number: # проверка числа
max_number = check_number
check_line_number = str(check_number) + check_line_number # формируем число для проверки выхода из цикла
i += 1
print(f'Самая большая цифра в числе - {max_number}')
# второй вариант решения ("запрещенный")
# line_numbers = input('Введите положительное число - ')
# print(f'Самая большая цифра в числе - {max([int(line_numbers[i]) for i in range(len(line_numbers))])}')
|
[
"Evgeniy_kott@mail.ru"
] |
Evgeniy_kott@mail.ru
|
9a21127696172a92f9de67e17b6cab869625b037
|
f25a62033ce864f9fd22cf85c9abf92a280fca01
|
/CodersLab/MSP/buffor_23.10.2017/workspace/SCIAGA/D8_MySql_laczenie_relacje/__init__.py
|
43f784fec55793c35338081529aea1f341bac558
|
[] |
no_license
|
AleksandraSandor/Learning-Python
|
e397a68e2a3e1103e537016c65a280c6b9a1a90d
|
387befaadbb84a8d76961893208b504ddeccf0ce
|
refs/heads/master
| 2020-04-11T09:46:23.425149
| 2018-02-04T17:03:43
| 2018-02-04T17:03:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,407
|
py
|
# ===============================================================================
# MySql laczenie relacje
# ===============================================================================
from mysql.connector import connect
cnx = connect(user="root", password="1", host="localhost")
cur = cnx.cursor()
sql = "use test_domowy;"
cur.execute(sql)
# cur = cnx.cursor(dictionary=False)
# sql = """create table customers(customer_id int not null auto_increment,
# name varchar(255) not null,
# primary key(customer_id));"""
# cur.execute(sql)
#
# sql = """create table addresses(customer_id int not null,
# street varchar(255),
# primary key(customer_id),
# foreign key(customer_id) references customers(customer_id) on delete cascade);""" #<--------- jeden do jedn
# cur.execute(sql)
#
# sql = """create table orders(order_id int not null auto_increment,
# customer_id int not null,
# order_details varchar(255),
# primary key(order_id),
# foreign key(customer_id) REFERENCES customers(customer_id));""" #<--------- jeden do wielu
# cur.execute(sql)
# #
# sql = """create table items(item_id int not null auto_increment,
# description varchar(255),
# primary key(item_id));"""
# cur.execute(sql)
#
# sql = """create table items_orders(id int auto_increment,
# item_id int not null,
# order_id int not null,
# primary key(id),
# foreign key(order_id) REFERENCES orders(order_id), #<--------- dodatkowa tableawiele do wielu
# FOREIGN KEY(item_id) REFERENCES items(item_id))"""
# cur.execute(sql)
# sql = """insert into items_orders(order_id, item_id) VALUES (1,1), (2,1), (2,2);"""
# cur.execute(sql)
# sql = """insert into customers(name) VALUES ("Januszaaa"), ("Kubaaaa"), ("Wojtekkkk");"""
# cur.execute(sql)
# sql = """insert into addresses(customer_id, street) VALUES (1, "ul. jeden"), (2, "ulica dwa");"""
# cur.execute(sql)
# sql = """insert into orders(customer_id, order_details) VALUES (3, "zam1"), (3, "zam2"), (1, "zam3");"""
# cur.execute(sql)
# sql = """insert into items(description) VALUES ("itm1"), ("itm2"), ("itm3");"""
# cur.execute(sql);
# sql = """select * from customers join addresses on customers.customer_id=addresses.customer_id
# where customers.customer_id=2;"""
# cur.execute(sql)
# for row in cur:
# print(row)
# sql = """select * from customers join orders on customers.customer_id=orders.customer_id
# where customers.customer_id=3;"""
# cur.execute(sql)
# for row in cur:
# print(row)
# sql = """select * from orders join items_orders on orders.order_id=items_orders.order_id;"""
# cur.execute(sql)
# for row in cur:
# print(row)
# insert into addresses(customer_id, street) values (6,"ul. zaciszna"); #<---6 dopisze adres jeśli jest ID u customers
# delete from customers where customer_id = 6; # <--- skasowało również adres
# ===============================================================================
# Formatowanie zapytania
# ===============================================================================
a = "user_name"
b = "user_id"
i = 1
ii = 6
sql = """"select {}, {} from user where user_id = %s or user_id = %s""".format(b, a) #/todo dlaczegoto nie działą 1
cur.execute(sql,(i, ii))
for row in cur:
print(row)
cnx.commit()
cur.close()
cnx.close()
|
[
"wojciech.gaudnik@gmail.com"
] |
wojciech.gaudnik@gmail.com
|
f7e2098e769e91b1838c62aee43d87264b9aa9cb
|
052d6ac57f2026aba22249368149b18027c78342
|
/frontstage_api/resources/register/confirm_organisation_survey.py
|
6331b7150306a3ab3887cebaf9c1d5eb733780ca
|
[
"MIT"
] |
permissive
|
ONSdigital/ras-frontstage-api
|
c34b41185cc825b49262c1879ad559778a54dbfc
|
7bb32a85868e2a241b8a0331b884155a36450669
|
refs/heads/master
| 2018-07-15T00:35:22.130352
| 2018-06-01T14:09:13
| 2018-06-01T14:09:13
| 105,001,932
| 2
| 1
|
MIT
| 2018-06-01T14:09:14
| 2017-09-27T09:54:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,917
|
py
|
import logging
from flask import jsonify, make_response, request
from flask_restplus import Resource, fields
from structlog import wrap_logger
from frontstage_api import auth, register_api
from frontstage_api.controllers import case_controller, collection_exercise_controller, iac_controller, party_controller, survey_controller
logger = wrap_logger(logging.getLogger(__name__))
enrolment_details = register_api.model('EnrolmentDetails', {
'enrolment_code': fields.String(required=True),
})
@register_api.route('/confirm-organisation-survey')
class ConfirmOrganisationSurvey(Resource):
@staticmethod
@auth.login_required
@register_api.expect(enrolment_details, validate=True)
def post():
logger.info('Attempting to retrieve organisation and survey data')
enrolment_code = request.get_json().get('enrolment_code')
# Verify enrolment code is active
iac = iac_controller.get_iac_from_enrolment(enrolment_code)
if not iac['active']:
return make_response(jsonify(iac), 401)
# Get organisation name
case = case_controller.get_case_by_enrolment_code(enrolment_code)
business_party_id = case['caseGroup']['partyId']
organisation_name = party_controller.get_party_by_business_id(business_party_id).get('name')
# Get survey name
collection_exercise_id = case['caseGroup']['collectionExerciseId']
collection_exercise = collection_exercise_controller.get_collection_exercise(collection_exercise_id)
survey_id = collection_exercise['surveyId']
survey_name = survey_controller.get_survey(survey_id).get('longName')
response_json = {
"organisation_name": organisation_name,
"survey_name": survey_name
}
logger.info('Successfully retrieved organisation and survey data')
return make_response(jsonify(response_json), 200)
|
[
"noreply@github.com"
] |
noreply@github.com
|
39784b00ffbcca86d07d92e619feaf2a638306a7
|
ff81b6f0e467db2dde8afdf8370fae4bae179460
|
/flaskapp-docker/flaskapp/setup.py
|
d9ae9d271b4cf8c9b708fd4af8d5733387fb17a4
|
[
"MIT"
] |
permissive
|
hammadyasir/my_flask_docker
|
c1ca66bf0be482de82e8fc4c9018fbb926524bc0
|
ca142c0bac4ff99e6765630fb4b38e09142fd30a
|
refs/heads/main
| 2023-06-27T06:28:02.711996
| 2021-07-22T14:03:46
| 2021-07-22T14:03:46
| 388,434,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
from setuptools import setup
setup(
name='project',
packages=['project'],
include_package_data=True,
install_requires=[
'flask',
],
)
|
[
"49805944+hammadyasir@users.noreply.github.com"
] |
49805944+hammadyasir@users.noreply.github.com
|
a35d8d4bfeb2701f3c8a5b9ffc1d88786aa08395
|
41106cdb25e50720dd7e06c2a9cfea5928a42c6c
|
/scripts/convert/converter/convert_caffe2.py
|
17275491fa576107a2b251d691d776863ed6f85b
|
[
"Apache-2.0"
] |
permissive
|
judgeeeeee/klever-model-registry
|
58edb1da904667d429bb35b3ebeaa3361e860364
|
7ae97a4babf0861132976494fc8ac04ca40d4af3
|
refs/heads/master
| 2023-02-22T22:47:01.736458
| 2020-12-03T06:25:24
| 2020-12-03T06:25:24
| 317,134,591
| 0
| 1
|
Apache-2.0
| 2020-11-30T06:46:06
| 2020-11-30T06:46:05
| null |
UTF-8
|
Python
| false
| false
| 1,877
|
py
|
import os
import json
import argparse
import numpy as np
import onnx
import caffe2.python.onnx.frontend
from caffe2.proto import caffe2_pb2
from .base_convert import BaseConverter
INIT_NET = 'init_net.pb'
PREDICT_NET = 'predict_net.pb'
DEL_ATTR = 'ws_nbytes_limit'
MODEL_NAME = 'model.onnx'
def del_attr(netdef):
for op in netdef.op:
for i, attr in enumerate(op.arg):
if attr.name == DEL_ATTR:
op.arg.pop(i)
def np2onnx(s):
def _modified2np(_s):
if _s == 'float32':
return 'float'
if _s == 'float64':
return 'double'
return _s
s = _modified2np(s)
return onnx.TensorProto.DataType.Value(s.upper())
class Caffe2ToONNX(BaseConverter):
def _load_model(self):
self.init_net = self._find_with_name(INIT_NET)
self.predict_net = self._find_with_name(PREDICT_NET)
def _parse_input(self):
value_info = {}
for input in self.input_value:
value_info[input['name']] = (np2onnx(input['dType']),
tuple(input['size']))
return value_info
def _convert(self):
self._load_model()
value_info = self._parse_input()
predict_net = caffe2_pb2.NetDef()
with open(self.predict_net, 'rb') as f:
predict_net.ParseFromString(f.read())
init_net = caffe2_pb2.NetDef()
with open(self.init_net, 'rb') as f:
init_net.ParseFromString(f.read())
del_attr(predict_net)
out_path = os.path.join(self.output_dir, 'model', MODEL_NAME)
onnx_model = caffe2.python.onnx.frontend.caffe2_net_to_onnx_model(
predict_net,
init_net,
value_info,
)
onnx.save(onnx_model, out_path)
if __name__ == '__main__':
convert = Caffe2ToONNX()
convert.convert()
|
[
"noreply@github.com"
] |
noreply@github.com
|
135b320f762c1a45836db74f49c2c6bc2fe6c8fe
|
d5ed53c58da60caba413869572f7c4abb0e5f666
|
/class_factorial.py
|
89845687e40e9b96da1996073fc246e5a315afcc
|
[] |
no_license
|
Me-Pri/Python-programs
|
e8269f06ef30e7360fda1fa4398577d4845b8fa6
|
96fa0d17e4de4a21640105b6387a483de652987f
|
refs/heads/main
| 2023-01-31T14:04:33.327260
| 2020-12-17T06:38:54
| 2020-12-17T06:38:54
| 322,203,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
class factorial:
no=0;
fact=1;
def input(self):
self.no=int(input("Enter the no of terms: "))
def factor(self):
for i in range(1,self.no+1):
self.fact=self.fact*i
print("The factorial of {0} is {1}".format(self.no,self.fact))
fac=factorial();
fac.input();
fac.factor();
|
[
"noreply@github.com"
] |
noreply@github.com
|
97d8d09c16f1edd913cb3db8bb84efd98193c1b5
|
fb41b080244208be9aedfeed517c93289ca0ecca
|
/files/shop/migrations/0030_delete_kurs_d.py
|
f70850f386c5e18501cb5637dd5953982618d2a6
|
[] |
no_license
|
EddieMorra/mebli
|
8824b7943f81a7738dea3f65397e97e9e98f0f62
|
a3b3ba7aa0abc82ab688f53263dd7a3a6164c1f6
|
refs/heads/master
| 2022-12-12T01:37:27.306728
| 2020-04-06T17:36:48
| 2020-04-06T17:36:48
| 253,088,733
| 0
| 0
| null | 2022-12-08T04:00:04
| 2020-04-04T20:03:54
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 295
|
py
|
# Generated by Django 3.0.2 on 2020-03-28 02:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0029_remove_product_kurs_d'),
]
operations = [
migrations.DeleteModel(
name='Kurs_d',
),
]
|
[
"rom4egful@gmail.com"
] |
rom4egful@gmail.com
|
b0e53f0386e068a848db093380db2c99a669e4ea
|
5f4cf695ffa0460aa42d245b77cbe273d249bd9c
|
/lists/tests.py
|
c23480c4e2089804ef59e85ce28fc7ffb23b2c88
|
[] |
no_license
|
codebleeder/superlist
|
d616ad77f601ac28db9f075fb8f4547534927b27
|
fc850ab7d4f8bea90805a79117509726ae323192
|
refs/heads/master
| 2021-01-10T13:10:02.072566
| 2015-12-30T09:00:13
| 2015-12-30T09:00:13
| 48,778,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
from django.test import TestCase
from django.core.urlresolvers import resolve
from lists.views import home_page
# Create your tests here.
class HomePageTest(TestCase):
def test_root_url_resolves_to_home_page_view(self):
found = resolve('/')
self.assertEqual(found.func, home_page)
|
[
"sharad.shivmath@gmail.com"
] |
sharad.shivmath@gmail.com
|
6464b7dec09ac442b5f4aa129661e465bc584d1b
|
bbd44d1f91bf1ed83778bf5086b9fa625794849c
|
/test2.py
|
69b16dc8ba9c36cb9c54fead66cd5bbe9fc83cc5
|
[] |
no_license
|
hyperloop11/flask-blog
|
a1c1175653a2183285d737a119021a1ab6a72519
|
a701f3995a96da72da6dbff2c024265cc9438f35
|
refs/heads/main
| 2023-03-03T19:02:38.717568
| 2021-02-15T17:59:26
| 2021-02-15T17:59:26
| 335,084,219
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
from test1 import age
class Person:
p_age = age
def __init__(self, name):
self.name = name
|
[
"shirin.kaul11@gmail.com"
] |
shirin.kaul11@gmail.com
|
60b4a65b29fce7b704390b16827b172a69b43b49
|
00105bf59b9f4b250fdcc33c01aef954173fd4a3
|
/7. Linear_Regression/Linear Regression.py
|
e7232e2263e99d9bae5b2ce670d43dccf0c8bd9c
|
[] |
no_license
|
up-data-science/ML_1_Exercise
|
7305410e25b159813c70dc05141c9dee2f75b189
|
417ede59dd32370b30b0fc5b8305f71da6a5774d
|
refs/heads/master
| 2020-06-28T16:15:53.243595
| 2019-08-02T18:18:31
| 2019-08-02T18:18:31
| 200,279,229
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,130
|
py
|
# coding: utf-8
# # Linear Regression
#
# In this tutorial we will implement a linear regression model. We will also implement a function that splits the available data into a training and a testting part.
#
# ## Problem Setting
#
# We will use the Boston Housing Dataset. This dataset contains information collected by the U.S Census Service concerning housing in the city of Boston in the state of Massachusetts in 1978. Our goal is to predict the median value of the houses in a particular town in the city of Boston given its attributes. Check the file ’housing.names’ for more information on the attributes.
# In[ ]:
import urllib
import pandas as pd
import numpy as np
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
get_ipython().magic(u'load_ext autoreload')
get_ipython().magic(u'autoreload 2')
from sklearn.datasets import load_boston
boston=load_boston()
testfile = urllib.URLopener()
testfile.retrieve("https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.names", "housing.names")
df=pd.DataFrame(boston.data)
df.columns=['crime_rate','res_land_zoned','industry','charles_river','nox','avg_num_rooms','prop_bf_1940','dst_emply_center','rd_highway_idx','tax_rate','stdnt_tchr_ratio','prop_blacks','low_status_pct']
X=boston.data
y=boston.target
# In[ ]:
df.head(10)
# ### Exercise 1
#
# Write the *split_train_test(X,y,split,seed)*, given an instance matrix $X \in \mathbb{R}^{N \times D}$, labels $y \in Y^N$, a split ratio in $[0, 1]$ and a random seed $\in \mathbb{Z}$. Split the dataset in $(split×100)\%$ of the instances for training our model and the rest for testing, i.e.
#
# $$ \left|X_{\text{train}}\right| = \lceil \text{split} \cdot N \rceil, \qquad |X_{\text{train}}| + |X_{\text{test}}| = N. $$
# Make sure you use the given random number generator seed so we all get the same results. The function is supposed to return:
#
# - X_train, y_train: the training instances and labels;
# - X_test, y_test: the test instances and labels,
#
# in the same order as was mentioned.
#
# Hint: It may be helpful to use shuffling functionality (e.g. np.random.shuffle).
# In[ ]:
def split_train_test(X,y,split,seed):
##################
#INSERT CODE HERE#
##################
return None # X_train, y_train, X_test, y_test
# ### Exercise 2
#
# Write the function *train_linear_reg(X_train,y_train,lmbd)*.
# Implement the ridge regression model (slide 24). The function should output the learned weight vector $\theta \in \mathbb{R}^D$ or $\mathbb{R}^{D+1}$ (depending on whether you are adding *bias*).
# In[ ]:
def train_linear_reg(X, y, lmbd):
##################
#INSERT CODE HERE#
##################
return None # theta
# ### Exercise 3
#
# Write the function *predict(X,theta)* which predicts housing values vector pred for a dataset X and a previously trained parameter vector $\theta$.
# In[ ]:
def predict(X, theta):
##################
#INSERT CODE HERE#
##################
return None # y_pred
# ### Exercise 4
#
# Write the function *mean_abs_loss(y_true,y_pred)* which computes the mean of the absolute differences between our prediction vector $y\_pred$ and the real housing values $y\_true$.
# In[ ]:
def mean_abs_loss(y_true,y_pred):
##################
#INSERT CODE HERE#
##################
return 0
# ### Exercise 5
#
# Evaluate your solutions by running the following code.
#
# Moreover, answer the following questions: What is the most important feature in your model? Are there features that are not so important? What happens if you remove them? Are there outliers with a high absolute loss?
# In[ ]:
seed = 3
lmbd=1
split=0.7
X_train,y_train,X_test,y_test=split_train_test(X,y,split,seed)
theta=train_linear_reg(X_train,y_train,lmbd)
y_pred=predict(X_test,theta)
mae=mean_abs_loss(y_test,y_pred)
print 'The mean absolute loss is {loss:0.3f}'.format(loss=mae*1000)
|
[
"noreply@github.com"
] |
noreply@github.com
|
3bafddf779602141a9656eb0ef245fd5e9719bcd
|
f5752707e33e456adecb1f6f20f8bcb53f320adf
|
/Utility_Scripts/USGS2018_spectrum.py
|
f43787449ac48dd70da6ef41c4546f2c56eb2704
|
[] |
no_license
|
alborzgh/Work_Scripts
|
4eb22f39d3ff1377a2f2fab629f65b359fda250a
|
7aa3a2e5853a4b5c050be72df3056c3fdf60dd6e
|
refs/heads/master
| 2022-12-06T08:48:50.673196
| 2020-07-15T20:06:36
| 2020-07-15T20:06:36
| 276,767,977
| 0
| 1
| null | 2020-07-15T20:06:37
| 2020-07-03T00:02:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,870
|
py
|
import numpy as np
import matplotlib.pylab as plt
from io import BytesIO
from io import StringIO
from zipfile import ZipFile
from scipy.interpolate import interp2d
from scipy.interpolate import interp1d
site_classes = {
'A/B':{'Vs30':1500, 'name':'AB'},
'AB':{'Vs30':1500, 'name':'AB'},
'B' :{'Vs30':1080, 'name':'B'},
'B/C':{'Vs30':760 , 'name':'BC'},
'BC':{'Vs30':760 , 'name':'BC'},
'C' :{'Vs30':530 , 'name':'C'},
'C/D':{'Vs30':365 , 'name':'CD'},
'CD':{'Vs30':365 , 'name':'CD'},
'D' :{'Vs30':260 , 'name':'D'},
'D/E':{'Vs30':185 , 'name':'DE'},
'DE':{'Vs30':185 , 'name':'DE'},
'E' :{'Vs30':150 , 'name':'E'},
}
main_zip_file_address = r'C:\AlborzFiles\MyDesktop\Literature\USGS-Hazard-Map\0p01 Degree WUS Basin Map Data.zip'
def _get_hazard_curve(site_class='B', ordinate='PGA'):
with ZipFile(main_zip_file_address, 'r') as USGS_zip_file:
lower_zip_name = fr'0p01 Degree WUS Basin Map Data/2018_nshm_{site_classes[site_class]["name"]}_vs30_{str(site_classes[site_class]["Vs30"])}_0p01_degree_seattle_basin_maps.zip'
lower_zip = BytesIO(USGS_zip_file.read(lower_zip_name))
with ZipFile(lower_zip) as lower_zip_file:
csv_address = fr'2018_nshm_{site_classes[site_class]["name"]}_vs30_{str(site_classes[site_class]["Vs30"])}_0p01_degree_seattle_basin_maps/{ordinate}/curves.csv'
with lower_zip_file.open(csv_address, 'r') as curve_file:
top_row = curve_file.readline().decode('utf-8').rstrip().split(',')[3:]
hazard_x = np.array([float(x) for x in top_row])
phantom_file = StringIO(curve_file.read().decode('utf-8'))
data = np.loadtxt(phantom_file, delimiter=',', usecols=tuple(range(1,23)))
lon = data[:,0]
lat = data[:,1]
hazard_y = data[:,2:]
del data
return (lat, lon, hazard_x, hazard_y)
def get_USGS_hazard_2018(lat, lon, site_class='B', return_period=2475):
x_vals = np.array([0.0,0.01,0.02,0.03,0.05,0.075,0.1,0.15,0.2,0.25,0.3,0.4,0.5,0.75,1.0,1.5,2.0,3.0,4.0,5.0,7.5,10.0])
y_vals = np.zeros(x_vals.shape)
for ii, x in enumerate(x_vals):
ordinate_text = ''
if x == 0:
ordinate_text = 'PGA'
else:
ordinate_text = 'SA' + str(x).replace('.','P')
lat_list, lon_list, hazard_x, hazard_y = _get_hazard_curve(site_class, ordinate_text)
loc_to_del = np.where(np.abs(lat_list - lat) > 0.02)
lat_list = np.delete(lat_list,loc_to_del)
lon_list = np.delete(lon_list,loc_to_del)
hazard_y = np.delete(hazard_y,loc_to_del, 0)
loc_to_del = np.where(np.abs(lon_list - lon) > 0.02)
lat_list = np.delete(lat_list,loc_to_del)
lon_list = np.delete(lon_list,loc_to_del)
hazard_y = np.delete(hazard_y,loc_to_del, 0)
cur_loc_hazard = np.zeros(hazard_x.shape)
for jj, _ in enumerate(hazard_x):
z = hazard_y[:,jj]
f = interp2d(lat_list, lon_list, z, kind='linear')
cur_loc_hazard[jj] = f(lat, lon)
y_vals[ii] = np.exp(interp1d(np.log(cur_loc_hazard), np.log(hazard_x), kind='linear')(np.log(1.0/return_period)))
return x_vals, y_vals
def main():
print('150 year return period:')
x_vals, y_vals = get_USGS_hazard_2018(lat=47.572260,lon = -122.347509, site_class='E', return_period=150)
for x, y in zip(x_vals, y_vals):
print(f'{x} {y}')
print('2500 year return period:')
x_vals, y_vals = get_USGS_hazard_2018(lat=47.572260,lon = -122.347509, site_class='E', return_period=2500)
for x, y in zip(x_vals, y_vals):
print(f'{x} {y}')
if __name__ == "__main__":
main()
|
[
"alborzgh@uw.edu"
] |
alborzgh@uw.edu
|
a507e0c2ed7cc0b1606723d494231252e4ea77cc
|
ef0f296e4615d3e2109e4b906a81cc4ba24b2b29
|
/fusuma/Fusuma/DataMan.py
|
b65f16ad2cf1119ad280760c9722ffb77933ba67
|
[] |
no_license
|
hylom/fusuma
|
48504235db0fb086ecda3d711c510c700207fe42
|
0d776ae29f56826c33942fae17c45468b8353a09
|
refs/heads/master
| 2021-01-23T19:45:49.387978
| 2009-06-23T16:52:21
| 2009-06-23T16:52:21
| 159,778
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,424
|
py
|
#!/usr/bin/env python
#######################################################################
# This file is part of Fusuma website management system.
#
# Copyright (c) hylom <hylomm at gmail.com>, 2008.
#
# This file is released under the GPL.
#
# $Id: DataMan.py,v 1.1.1.1 2008/11/27 17:15:36 hylom Exp $
#######################################################################
"""
This is Fusuma's article data management module.
"""
__revision__ = "$Revision: 1.1.1.1 $"
import datetime
import sys
import os
VERSION = "0.0.1"
VERSION_DATE = VERSION + " 09/26/2008"
VERSION_SPLIT = tuple(VERSION.split('.'))
class DataMan(object):
"""
This is Fusuma's story data management class.
"""
def __init__(self, document_root):
"""
Initialize Data Manager.
@param document_root: document root directory
@type document_root: string
"""
self._document_root = document_root
def new_story(self):
"""
create new story.
"""
story = Story()
story.output_dir = _get_output_path
# Story.property()[""] =
def _get_output_path(self):
"""
return directory which story is saved.
default is: <document_root>/yyyy/mm/dd/
"""
dt = datetime.datetime.today();
current_locale = locale.getlocale( locale.LC_CTYPE )
locale.setlocale( locale.LC_CTYPE, "" )
path = dt_expire.strftime( "%Y/%m/%d" )
locale.setlocale( locale.LC_CTYPE, current_locale )
return path;
class Story:
"""
DataMan's Story object.
"""
def __init__(self):
"""
Initialize Story.
"""
# store session object
# TODO: This session-data-file-name-rule may be insecure!
self._output_file = ""
self._output_dir = ""
self._story = ""
self._property = {}
def save(self):
"""
Save story to file.
"""
## accessor
def property(self):
return self._property
def output_file(self):
return self._output_file
def set_output_file(self, str):
self._output_file = str
def output_dir(self):
return self._output_dir
def set_output_dir(self, str):
self._output_dir = str
def story(self):
return self._story
def set_story(self, str):
self._story = str
|
[
"hylom@users.sourceforge.jp"
] |
hylom@users.sourceforge.jp
|
6c47ab3f00e510c29034f7c38073fbb1375a1270
|
2672228cd79938f112802a7d3c6209e907c46935
|
/testsuite/tests/ui/devel/test_devel_smoke.py
|
3a477c44d2a2e96d67c821889012d437d2b338e3
|
[
"Apache-2.0"
] |
permissive
|
Hchrast/3scale-tests
|
82233d4015fc5ec9f1cad82ce411e6d48f3c056f
|
ab64592f1438a6cb878b81897164a0e495fed961
|
refs/heads/main
| 2023-08-27T21:04:35.108744
| 2021-11-08T10:08:59
| 2021-11-08T10:08:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,672
|
py
|
"""Developer portal smoke tests"""
import pytest
from testsuite import settings
from testsuite.ui.views.admin.audience import BaseAudienceView
from testsuite.ui.views.devel import BaseDevelView, AccessView, LandingView
@pytest.fixture(scope="module")
def provider_account(provider_account):
"""Fixture returns Provider account.
If `site_access_code` was changed in tests, it is restored to its original value"""
access_code = provider_account['site_access_code']
yield provider_account
provider_account.update(dict(site_access_code=access_code))
# pylint: disable=unused-argument
@pytest.mark.smoke
def test_devel_from_admin(login, navigator, browser):
"""Tests if developer portal is accessible via navigation menu (Developer portal > Visit Portal)"""
audience = navigator.navigate(BaseAudienceView)
with browser.new_tab(audience.visit_portal):
assert settings["threescale"]["devel"]["url"] in browser.url
view = LandingView(browser)
view.post_navigate()
assert view.is_displayed
# pylint: disable=unused-argument
@pytest.mark.smoke
def test_devel_login(devel_login, browser):
"""Tests simple developer portal login"""
assert BaseDevelView(browser).is_displayed
@pytest.mark.smoke
def test_empty_access_code(browser, provider_account):
"""Test developer portal accessibility when `site_access_code` is empty"""
browser.selenium.delete_all_cookies()
browser.url = settings["threescale"]["devel"]["url"]
assert AccessView(browser).is_displayed
provider_account.update(dict(site_access_code=""))
browser.selenium.refresh()
assert LandingView(browser).is_displayed
|
[
"jsmolar@redhat.com"
] |
jsmolar@redhat.com
|
998358bbb4e00e3a37420a1318e0751c5ae23214
|
edeb309cefeddfaac8dbad653a71f32d97a29d35
|
/FireModules/Websurfing/google_search_deleting_files.py
|
f6540aeef174471b43e0493f53af5b2dd6897f27
|
[
"MIT"
] |
permissive
|
alex14324/DumpsterFire
|
746c0b42734abb3a8539bdb96477d6a6488d3079
|
58a6b94d4beadb43776610bbb3bcb2a2416efe8a
|
refs/heads/master
| 2020-08-07T05:33:15.058377
| 2019-10-07T07:23:40
| 2019-10-07T07:23:40
| 213,318,310
| 0
| 0
|
MIT
| 2019-10-11T04:39:20
| 2019-10-07T07:15:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
#!/usr/bin/python
#
# Filename:
#
# Version: 1.0.0
#
# Author: Joe Gervais (TryCatchHCF)
#
# Summary:
#
# Part of the DumpsterFire Toolset. See documentation at https://github.com/TryCatchHCF/DumpsterFire
#
#
# Description:
#
#
# Example:
#
#
import urllib, time, random
from FireModules.fire_module_base_class import *
class google_search_deleting_files( FireModule ):
def __init__(self):
self.commentsStr = "Websurfing/google_search_deleting_files"
def __init__(self, moofStr):
self.moofStr = moofStr
self.commentsStr = "Websurfing/google_search_deleting_files"
return;
def Description( self ):
self.Description = "Performs Google search on securely deleting files"
return self.Description
def Configure( self ):
return
def GetParameters( self ):
return ""
def SetParameters( self, parametersStr ):
print parametersStr
return
def ActivateLogging( self, logFlag ):
print self.commentsStr + ": Setting Logging flag!"
print logFlag
return
def Ignite( self ):
print self.commentsStr + ": Opening URL session for Google search on securely deleting files"
self.webSession = urllib.urlopen( 'https://www.google.com/search?q=securely+deleting+files&oq=securely+deleting+files' )
trash = self.webSession.read()
return
|
[
"noreply@github.com"
] |
noreply@github.com
|
94ebbf777a68a1f870eb8d7132960630fcd5f534
|
7573bbf969a0b90ba9015a35b0ab59c29db4688f
|
/architectures/cnn_utils.py
|
49b39fa00c4c11420eafaf715574aa4f712894ee
|
[] |
no_license
|
Huetous/pytoss
|
525adf877e5b6d1e089e718a814d0645a2890b12
|
329ac86bc7f6289cba6e25c0b9df6cfc0eb00eb8
|
refs/heads/master
| 2023-05-05T21:26:13.495008
| 2021-05-27T02:30:42
| 2021-05-27T02:30:42
| 335,869,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
import torch.nn as nn
def init_cnn(module):
if getattr(module, "bias", None) is not None:
nn.init.constant_(module.bias, 0)
if isinstance(module, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(module.weight)
for m in module.children():
init_cnn(m)
def conv(n_in, nf, ks=3, stride=1, bias=False):
return nn.Conv2d(n_in, nf, kernel_size=ks,
stride=stride, padding=ks // 2, bias=bias)
def conv_layer(n_in, n_out, kernel_size=3, stride=1, bn=True, zero_bn=False, act=True):
layers = [conv(n_in, n_out, kernel_size, stride)]
if bn:
bn = nn.BatchNorm2d(n_out)
nn.init.constant_(bn.weight, 0. if zero_bn else 1.)
layers.append(bn)
if act:
layers.append(nn.ReLU())
return nn.Sequential(*layers)
|
[
"daddudota22@mail.ru"
] |
daddudota22@mail.ru
|
d8cb4d738e3fca2d4ddb17040fa4fe5a789e0334
|
63e2bed7329c79bf67279f9071194c9cba88a82c
|
/SevOneApi/python-client/test/test_flow_falcon_visualization.py
|
51a57732e471078c158cccc29b73d4aae5586ecf
|
[] |
no_license
|
jsthomason/LearningPython
|
12422b969dbef89578ed326852dd65f65ab77496
|
2f71223250b6a198f2736bcb1b8681c51aa12c03
|
refs/heads/master
| 2021-01-21T01:05:46.208994
| 2019-06-27T13:40:37
| 2019-06-27T13:40:37
| 63,447,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
# coding: utf-8
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.flow_falcon_visualization import FlowFalconVisualization # noqa: E501
from swagger_client.rest import ApiException
class TestFlowFalconVisualization(unittest.TestCase):
"""FlowFalconVisualization unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFlowFalconVisualization(self):
"""Test FlowFalconVisualization"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.flow_falcon_visualization.FlowFalconVisualization() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"johnsthomason@gmail.com"
] |
johnsthomason@gmail.com
|
8b69611ea55eff694e4f24935e45e153a3593a8b
|
fbd9cf0b31e5d433b1d2d7dfe562a660d60d27de
|
/taskmanager_app/migrations/0001_initial.py
|
23410d47e27a30020cf91198ba0ddd1c98b785c6
|
[] |
no_license
|
pragatisinghdev/taskmanager
|
b18c8f8323031e583a990aa62b0bc282a7f592c3
|
64daa154be39285996aeb4c94e58c01e49b5fbc6
|
refs/heads/master
| 2020-09-12T11:54:33.437867
| 2019-12-28T17:29:52
| 2019-12-28T17:29:52
| 222,417,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,247
|
py
|
# Generated by Django 2.2.5 on 2019-12-27 05:25
from django.db import migrations, models
import taskmanager_app.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(blank=True, max_length=50)),
('phone', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('file', models.FilePathField(path=taskmanager_app.models.images_path)),
('end_date', models.DateField()),
('author', models.ForeignKey(on_delete='models.CASCADE', to='taskmanager_app.User')),
],
),
]
|
[
"psingh@endosoft.com"
] |
psingh@endosoft.com
|
a6feea4e0041758fbcfcdf08169d6272e1d4ea41
|
bdba52c756cc09f192b720ea318510c265665dcd
|
/swagger_client/api/character_api.py
|
c1e1cd12abcec0f72554fd46436981b2dad6fbd7
|
[
"MIT"
] |
permissive
|
rseichter/bootini-star
|
6b38195890f383615cc2b422c365ac28c5b87292
|
a80258f01a05e4df38748b8cb47dfadabd42c20d
|
refs/heads/master
| 2020-03-14T03:17:11.385048
| 2018-06-28T17:23:23
| 2018-06-28T17:23:23
| 131,416,504
| 0
| 0
|
MIT
| 2018-05-01T14:26:04
| 2018-04-28T14:28:46
|
Python
|
UTF-8
|
Python
| false
| false
| 94,170
|
py
|
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class CharacterApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_characters_character_id(self, character_id, **kwargs): # noqa: E501
"""Get character's public information # noqa: E501
Public information about a character --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get character's public information # noqa: E501
Public information about a character --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v4/characters/{character_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCharactersCharacterIdOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_agents_research(self, character_id, **kwargs): # noqa: E501
"""Get agents research # noqa: E501
Return a list of agents research information for a character. The formula for finding the current research points with an agent is: currentPoints = remainderPoints + pointsPerDay * days(currentTime - researchStartDate) --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_agents_research(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdAgentsResearch200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_agents_research_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_agents_research_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_agents_research_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get agents research # noqa: E501
Return a list of agents research information for a character. The formula for finding the current research points with an agent is: currentPoints = remainderPoints + pointsPerDay * days(currentTime - researchStartDate) --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_agents_research_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdAgentsResearch200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_agents_research" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_agents_research`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_agents_research`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/agents_research/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdAgentsResearch200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_blueprints(self, character_id, **kwargs): # noqa: E501
"""Get blueprints # noqa: E501
Return a list of blueprints the character owns --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_blueprints(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdBlueprints200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_blueprints_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_blueprints_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_blueprints_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get blueprints # noqa: E501
Return a list of blueprints the character owns --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_blueprints_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdBlueprints200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_blueprints" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_blueprints`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_blueprints`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/characters/{character_id}/blueprints/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdBlueprints200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_chat_channels(self, character_id, **kwargs): # noqa: E501
"""Get chat channels # noqa: E501
Return chat channels that a character is the owner or operator of --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_chat_channels(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdChatChannels200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_chat_channels_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_chat_channels_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_chat_channels_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get chat channels # noqa: E501
Return chat channels that a character is the owner or operator of --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_chat_channels_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdChatChannels200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_chat_channels" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_chat_channels`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_chat_channels`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/chat_channels/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdChatChannels200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_corporationhistory(self, character_id, **kwargs): # noqa: E501
"""Get corporation history # noqa: E501
Get a list of all the corporations a character has been a member of --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_corporationhistory(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdCorporationhistory200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_corporationhistory_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_corporationhistory_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_corporationhistory_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get corporation history # noqa: E501
Get a list of all the corporations a character has been a member of --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_corporationhistory_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdCorporationhistory200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_corporationhistory" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_corporationhistory`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_corporationhistory`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/corporationhistory/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdCorporationhistory200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_fatigue(self, character_id, **kwargs): # noqa: E501
"""Get jump fatigue # noqa: E501
Return a character's jump activation and fatigue information --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_fatigue(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdFatigueOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_fatigue_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_fatigue_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_fatigue_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get jump fatigue # noqa: E501
Return a character's jump activation and fatigue information --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_fatigue_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdFatigueOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_fatigue" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_fatigue`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_fatigue`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/fatigue/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCharactersCharacterIdFatigueOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_medals(self, character_id, **kwargs): # noqa: E501
"""Get medals # noqa: E501
Return a list of medals the character has --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_medals(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdMedals200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_medals_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_medals_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_medals_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get medals # noqa: E501
Return a list of medals the character has --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_medals_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdMedals200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_medals" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_medals`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_medals`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/medals/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdMedals200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_notifications(self, character_id, **kwargs): # noqa: E501
"""Get character notifications # noqa: E501
Return character notifications --- This route is cached for up to 600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_notifications(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdNotifications200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_notifications_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_notifications_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_notifications_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get character notifications # noqa: E501
Return character notifications --- This route is cached for up to 600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_notifications_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdNotifications200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_notifications" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_notifications`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_notifications`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/characters/{character_id}/notifications/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdNotifications200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_notifications_contacts(self, character_id, **kwargs): # noqa: E501
"""Get new contact notifications # noqa: E501
Return notifications about having been added to someone's contact list --- This route is cached for up to 600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_notifications_contacts(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdNotificationsContacts200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_notifications_contacts_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_notifications_contacts_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_notifications_contacts_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get new contact notifications # noqa: E501
Return notifications about having been added to someone's contact list --- This route is cached for up to 600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_notifications_contacts_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdNotificationsContacts200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_notifications_contacts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_notifications_contacts`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_notifications_contacts`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/notifications/contacts/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdNotificationsContacts200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_portrait(self, character_id, **kwargs): # noqa: E501
"""Get character portraits # noqa: E501
Get portrait urls for a character --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_portrait(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdPortraitOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_portrait_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_portrait_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_portrait_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get character portraits # noqa: E501
Get portrait urls for a character --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_portrait_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdPortraitOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_portrait" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_portrait`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_portrait`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v2/characters/{character_id}/portrait/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCharactersCharacterIdPortraitOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_roles(self, character_id, **kwargs): # noqa: E501
"""Get character corporation roles # noqa: E501
Returns a character's corporation roles --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_roles(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdRolesOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_roles_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_roles_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_roles_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get character corporation roles # noqa: E501
Returns a character's corporation roles --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_roles_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdRolesOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_roles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_roles`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_roles`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/characters/{character_id}/roles/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCharactersCharacterIdRolesOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_standings(self, character_id, **kwargs): # noqa: E501
"""Get standings # noqa: E501
Return character standings from agents, NPC corporations, and factions --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_standings(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdStandings200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_standings_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_standings_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_standings_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get standings # noqa: E501
Return character standings from agents, NPC corporations, and factions --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_standings_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdStandings200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_standings" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_standings`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_standings`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/standings/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdStandings200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_stats(self, character_id, **kwargs): # noqa: E501
"""Yearly aggregate stats # noqa: E501
Returns aggregate yearly stats for a character --- This route is cached for up to 86400 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_stats(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdStats200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_stats_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_stats_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_stats_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Yearly aggregate stats # noqa: E501
Returns aggregate yearly stats for a character --- This route is cached for up to 86400 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_stats_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdStats200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_stats" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_stats`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_stats`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/characters/{character_id}/stats/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdStats200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_titles(self, character_id, **kwargs): # noqa: E501
"""Get character corporation titles # noqa: E501
Returns a character's titles --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_titles(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdTitles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_titles_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_titles_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_titles_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get character corporation titles # noqa: E501
Returns a character's titles --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_titles_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdTitles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_titles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_titles`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_titles`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/titles/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdTitles200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_names(self, character_ids, **kwargs): # noqa: E501
"""Get character names # noqa: E501
Resolve a set of character IDs to character names --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_names(character_ids, async=True)
>>> result = thread.get()
:param async bool
:param list[int] character_ids: A comma separated list of character IDs (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersNames200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_names_with_http_info(character_ids, **kwargs) # noqa: E501
else:
(data) = self.get_characters_names_with_http_info(character_ids, **kwargs) # noqa: E501
return data
def get_characters_names_with_http_info(self, character_ids, **kwargs): # noqa: E501
"""Get character names # noqa: E501
Resolve a set of character IDs to character names --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_names_with_http_info(character_ids, async=True)
>>> result = thread.get()
:param async bool
:param list[int] character_ids: A comma separated list of character IDs (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersNames200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_ids', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_names" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_ids' is set
if ('character_ids' not in params or
params['character_ids'] is None):
raise ValueError("Missing the required parameter `character_ids` when calling `get_characters_names`") # noqa: E501
if ('character_ids' in params and
len(params['character_ids']) > 1000):
raise ValueError("Invalid value for parameter `character_ids` when calling `get_characters_names`, number of items must be less than or equal to `1000`") # noqa: E501
if ('character_ids' in params and
len(params['character_ids']) < 1):
raise ValueError("Invalid value for parameter `character_ids` when calling `get_characters_names`, number of items must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'character_ids' in params:
query_params.append(('character_ids', params['character_ids'])) # noqa: E501
collection_formats['character_ids'] = 'csv' # noqa: E501
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/characters/names/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersNames200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_characters_affiliation(self, characters, **kwargs): # noqa: E501
"""Character affiliation # noqa: E501
Bulk lookup of character IDs to corporation, alliance and faction --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_characters_affiliation(characters, async=True)
>>> result = thread.get()
:param async bool
:param list[int] characters: The character IDs to fetch affiliations for. All characters must exist, or none will be returned. (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[PostCharactersAffiliation200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_characters_affiliation_with_http_info(characters, **kwargs) # noqa: E501
else:
(data) = self.post_characters_affiliation_with_http_info(characters, **kwargs) # noqa: E501
return data
def post_characters_affiliation_with_http_info(self, characters, **kwargs): # noqa: E501
"""Character affiliation # noqa: E501
Bulk lookup of character IDs to corporation, alliance and faction --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_characters_affiliation_with_http_info(characters, async=True)
>>> result = thread.get()
:param async bool
:param list[int] characters: The character IDs to fetch affiliations for. All characters must exist, or none will be returned. (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[PostCharactersAffiliation200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['characters', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_characters_affiliation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'characters' is set
if ('characters' not in params or
params['characters'] is None):
raise ValueError("Missing the required parameter `characters` when calling `post_characters_affiliation`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'characters' in params:
body_params = params['characters']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/characters/affiliation/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[PostCharactersAffiliation200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_characters_character_id_cspa(self, character_id, characters, **kwargs): # noqa: E501
"""Calculate a CSPA charge cost # noqa: E501
Takes a source character ID in the url and a set of target character ID's in the body, returns a CSPA charge cost --- # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_characters_character_id_cspa(character_id, characters, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param list[int] characters: The target characters to calculate the charge for (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: float
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_characters_character_id_cspa_with_http_info(character_id, characters, **kwargs) # noqa: E501
else:
(data) = self.post_characters_character_id_cspa_with_http_info(character_id, characters, **kwargs) # noqa: E501
return data
def post_characters_character_id_cspa_with_http_info(self, character_id, characters, **kwargs): # noqa: E501
"""Calculate a CSPA charge cost # noqa: E501
Takes a source character ID in the url and a set of target character ID's in the body, returns a CSPA charge cost --- # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_characters_character_id_cspa_with_http_info(character_id, characters, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param list[int] characters: The target characters to calculate the charge for (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: float
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'characters', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_characters_character_id_cspa" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `post_characters_character_id_cspa`") # noqa: E501
# verify the required parameter 'characters' is set
if ('characters' not in params or
params['characters'] is None):
raise ValueError("Missing the required parameter `characters` when calling `post_characters_character_id_cspa`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `post_characters_character_id_cspa`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'characters' in params:
body_params = params['characters']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v4/characters/{character_id}/cspa/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='float', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"github@seichter.de"
] |
github@seichter.de
|
6dd5458bc61691ad70c9076ff0135c5343b37efb
|
cc343475ca61c167944a0c0f59f531ca4f43b5c8
|
/NetworksLabCode/httpclient.py
|
197f5da8fc07afac1bef6611ebba7a87d3d40e8d
|
[] |
no_license
|
sambathkumar02/Python
|
55f20f47c333eb5bb3947c8b4d14495c9fb210dc
|
ce569a18c4f35de70aeba0007bbf4bc48730b3d4
|
refs/heads/master
| 2023-03-15T05:25:50.531048
| 2021-03-18T11:08:21
| 2021-03-18T11:08:21
| 257,019,558
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
import socket
client=socket.socket()
server=('saranathan.ac.in',80)
client.connect(server)
header = b'GET / HTTP/1.1\r\n'
header += b'Host: saranathan.ac.in:80\r\n'
header += b'Accept:text/html\r\n'
header += b'Connection: close\r\n'
header += b'\r\n'
client.send(header)
response=b''
while True:
buffer=client.recv(4096)
if not buffer:
break
response +=buffer
print(response.decode())
client.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.