blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d36e69caa543c23bad4fe19a08123f7541511399 | 673f9b85708affe260b892a4eb3b1f6a0bd39d44 | /Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/joblib/_store_backends.py | ddbd209f9fedb00f32d3e93e0fcf6f572bf2c58d | [
"MIT"
] | permissive | i2tResearch/Ciberseguridad_web | feee3fe299029bef96b158d173ce2d28ef1418e4 | e6cccba69335816442c515d65d9aedea9e7dc58b | refs/heads/master | 2023-07-06T00:43:51.126684 | 2023-06-26T00:53:53 | 2023-06-26T00:53:53 | 94,152,032 | 14 | 0 | MIT | 2023-09-04T02:53:29 | 2017-06-13T00:21:00 | Jupyter Notebook | UTF-8 | Python | false | false | 14,530 | py | """Storage providers backends for Memory caching."""
import re
import os
import os.path
import datetime
import json
import shutil
import warnings
import collections
import operator
import threading
from abc import ABCMeta, abstractmethod
from ._compat import with_metaclass, _basestring
from .backports import concurrency_safe_rename
from .disk import mkdirp, memstr_to_bytes, rm_subdirs
from . import numpy_pickle
CacheItemInfo = collections.namedtuple('CacheItemInfo',
'path size last_access')
def concurrency_safe_write(object_to_write, filename, write_func):
"""Writes an object into a unique file in a concurrency-safe way."""
thread_id = id(threading.current_thread())
temporary_filename = '{}.thread-{}-pid-{}'.format(
filename, thread_id, os.getpid())
write_func(object_to_write, temporary_filename)
return temporary_filename
class StoreBackendBase(with_metaclass(ABCMeta)):
"""Helper Abstract Base Class which defines all methods that
a StorageBackend must implement."""
location = None
@abstractmethod
def _open_item(self, f, mode):
"""Opens an item on the store and return a file-like object.
This method is private and only used by the StoreBackendMixin object.
Parameters
----------
f: a file-like object
The file-like object where an item is stored and retrieved
mode: string, optional
the mode in which the file-like object is opened allowed valued are
'rb', 'wb'
Returns
-------
a file-like object
"""
@abstractmethod
def _item_exists(self, location):
"""Checks if an item location exists in the store.
This method is private and only used by the StoreBackendMixin object.
Parameters
----------
location: string
The location of an item. On a filesystem, this corresponds to the
absolute path, including the filename, of a file.
Returns
-------
True if the item exists, False otherwise
"""
@abstractmethod
def _move_item(self, src, dst):
"""Moves an item from src to dst in the store.
This method is private and only used by the StoreBackendMixin object.
Parameters
----------
src: string
The source location of an item
dst: string
The destination location of an item
"""
@abstractmethod
def create_location(self, location):
"""Creates a location on the store.
Parameters
----------
location: string
The location in the store. On a filesystem, this corresponds to a
directory.
"""
@abstractmethod
def clear_location(self, location):
"""Clears a location on the store.
Parameters
----------
location: string
The location in the store. On a filesystem, this corresponds to a
directory or a filename absolute path
"""
@abstractmethod
def get_items(self):
"""Returns the whole list of items available in the store.
Returns
-------
The list of items identified by their ids (e.g filename in a
filesystem).
"""
@abstractmethod
def configure(self, location, verbose=0, backend_options=dict()):
"""Configures the store.
Parameters
----------
location: string
The base location used by the store. On a filesystem, this
corresponds to a directory.
verbose: int
The level of verbosity of the store
backend_options: dict
Contains a dictionnary of named paremeters used to configure the
store backend.
"""
class StoreBackendMixin(object):
"""Class providing all logic for managing the store in a generic way.
The StoreBackend subclass has to implement 3 methods: create_location,
clear_location and configure. The StoreBackend also has to provide
a private _open_item, _item_exists and _move_item methods. The _open_item
method has to have the same signature as the builtin open and return a
file-like object.
"""
def load_item(self, path, verbose=1, msg=None):
"""Load an item from the store given its path as a list of
strings."""
full_path = os.path.join(self.location, *path)
if verbose > 1:
if verbose < 10:
print('{0}...'.format(msg))
else:
print('{0} from {1}'.format(msg, full_path))
mmap_mode = (None if not hasattr(self, 'mmap_mode')
else self.mmap_mode)
filename = os.path.join(full_path, 'output.pkl')
if not self._item_exists(filename):
raise KeyError("Non-existing item (may have been "
"cleared).\nFile %s does not exist" % filename)
# file-like object cannot be used when mmap_mode is set
if mmap_mode is None:
with self._open_item(filename, "rb") as f:
item = numpy_pickle.load(f)
else:
item = numpy_pickle.load(filename, mmap_mode=mmap_mode)
return item
def dump_item(self, path, item, verbose=1):
"""Dump an item in the store at the path given as a list of
strings."""
try:
item_path = os.path.join(self.location, *path)
if not self._item_exists(item_path):
self.create_location(item_path)
filename = os.path.join(item_path, 'output.pkl')
if verbose > 10:
print('Persisting in %s' % item_path)
def write_func(to_write, dest_filename):
with self._open_item(dest_filename, "wb") as f:
numpy_pickle.dump(to_write, f,
compress=self.compress)
self._concurrency_safe_write(item, filename, write_func)
except: # noqa: E722
" Race condition in the creation of the directory "
def clear_item(self, path):
"""Clear the item at the path, given as a list of strings."""
item_path = os.path.join(self.location, *path)
if self._item_exists(item_path):
self.clear_location(item_path)
def contains_item(self, path):
"""Check if there is an item at the path, given as a list of
strings"""
item_path = os.path.join(self.location, *path)
filename = os.path.join(item_path, 'output.pkl')
return self._item_exists(filename)
def get_item_info(self, path):
"""Return information about item."""
return {'location': os.path.join(self.location,
*path)}
def get_metadata(self, path):
"""Return actual metadata of an item."""
try:
item_path = os.path.join(self.location, *path)
filename = os.path.join(item_path, 'metadata.json')
with self._open_item(filename, 'rb') as f:
return json.loads(f.read().decode('utf-8'))
except: # noqa: E722
return {}
def store_metadata(self, path, metadata):
"""Store metadata of a computation."""
try:
item_path = os.path.join(self.location, *path)
self.create_location(item_path)
filename = os.path.join(item_path, 'metadata.json')
def write_func(to_write, dest_filename):
with self._open_item(dest_filename, "wb") as f:
f.write(json.dumps(to_write).encode('utf-8'))
self._concurrency_safe_write(metadata, filename, write_func)
except: # noqa: E722
pass
def contains_path(self, path):
"""Check cached function is available in store."""
func_path = os.path.join(self.location, *path)
return self.object_exists(func_path)
def clear_path(self, path):
"""Clear all items with a common path in the store."""
func_path = os.path.join(self.location, *path)
if self._item_exists(func_path):
self.clear_location(func_path)
def store_cached_func_code(self, path, func_code=None):
"""Store the code of the cached function."""
func_path = os.path.join(self.location, *path)
if not self._item_exists(func_path):
self.create_location(func_path)
if func_code is not None:
filename = os.path.join(func_path, "func_code.py")
with self._open_item(filename, 'wb') as f:
f.write(func_code.encode('utf-8'))
def get_cached_func_code(self, path):
"""Store the code of the cached function."""
path += ['func_code.py', ]
filename = os.path.join(self.location, *path)
try:
with self._open_item(filename, 'rb') as f:
return f.read().decode('utf-8')
except: # noqa: E722
raise
def get_cached_func_info(self, path):
"""Return information related to the cached function if it exists."""
return {'location': os.path.join(self.location, *path)}
def clear(self):
"""Clear the whole store content."""
self.clear_location(self.location)
def reduce_store_size(self, bytes_limit):
"""Reduce store size to keep it under the given bytes limit."""
items_to_delete = self._get_items_to_delete(bytes_limit)
for item in items_to_delete:
if self.verbose > 10:
print('Deleting item {0}'.format(item))
try:
self.clear_location(item.path)
except (OSError, IOError):
# Even with ignore_errors=True shutil.rmtree
# can raise OSError (IOError in python 2) with
# [Errno 116] Stale file handle if another process
# has deleted the folder already.
pass
def _get_items_to_delete(self, bytes_limit):
"""Get items to delete to keep the store under a size limit."""
if isinstance(bytes_limit, _basestring):
bytes_limit = memstr_to_bytes(bytes_limit)
items = self.get_items()
size = sum(item.size for item in items)
to_delete_size = size - bytes_limit
if to_delete_size < 0:
return []
# We want to delete first the cache items that were accessed a
# long time ago
items.sort(key=operator.attrgetter('last_access'))
items_to_delete = []
size_so_far = 0
for item in items:
if size_so_far > to_delete_size:
break
items_to_delete.append(item)
size_so_far += item.size
return items_to_delete
def _concurrency_safe_write(self, to_write, filename, write_func):
"""Writes an object into a file in a concurrency-safe way."""
temporary_filename = concurrency_safe_write(to_write,
filename, write_func)
self._move_item(temporary_filename, filename)
def __repr__(self):
"""Printable representation of the store location."""
return '{class_name}(location="{location}")'.format(
class_name=self.__class__.__name__, location=self.location)
class FileSystemStoreBackend(StoreBackendBase, StoreBackendMixin):
"""A StoreBackend used with local or network file systems."""
_open_item = staticmethod(open)
_item_exists = staticmethod(os.path.exists)
_move_item = staticmethod(concurrency_safe_rename)
def clear_location(self, location):
"""Delete location on store."""
if (location == self.location):
rm_subdirs(location)
else:
shutil.rmtree(location, ignore_errors=True)
def create_location(self, location):
"""Create object location on store"""
mkdirp(location)
def get_items(self):
"""Returns the whole list of items available in the store."""
items = []
for dirpath, _, filenames in os.walk(self.location):
is_cache_hash_dir = re.match('[a-f0-9]{32}',
os.path.basename(dirpath))
if is_cache_hash_dir:
output_filename = os.path.join(dirpath, 'output.pkl')
try:
last_access = os.path.getatime(output_filename)
except OSError:
try:
last_access = os.path.getatime(dirpath)
except OSError:
# The directory has already been deleted
continue
last_access = datetime.datetime.fromtimestamp(last_access)
try:
full_filenames = [os.path.join(dirpath, fn)
for fn in filenames]
dirsize = sum(os.path.getsize(fn)
for fn in full_filenames)
except OSError:
# Either output_filename or one of the files in
# dirpath does not exist any more. We assume this
# directory is being cleaned by another process already
continue
items.append(CacheItemInfo(dirpath, dirsize,
last_access))
return items
def configure(self, location, verbose=1, backend_options=None):
"""Configure the store backend.
For this backend, valid store options are 'compress' and 'mmap_mode'
"""
if backend_options is None:
backend_options = {}
# setup location directory
self.location = location
if not os.path.exists(self.location):
mkdirp(self.location)
# item can be stored compressed for faster I/O
self.compress = backend_options.get('compress', False)
# FileSystemStoreBackend can be used with mmap_mode options under
# certain conditions.
mmap_mode = backend_options.get('mmap_mode')
if self.compress and mmap_mode is not None:
warnings.warn('Compressed items cannot be memmapped in a '
'filesystem store. Option will be ignored.',
stacklevel=2)
self.mmap_mode = mmap_mode
self.verbose = verbose
| [
"ulcamilo@gmail.com"
] | ulcamilo@gmail.com |
133d9271892fc9ec2092352c9c8710004c210f59 | c69b8b1ac98f26de39292c5e058f8de5d6776204 | /ditto/utils/commands.py | 4bc2c63b8d5f0846600f8b3629a42eaf0ff5006d | [
"MIT"
] | permissive | Kuchenmampfer/Ditto | 1ba9c2909d43c4d7f36f62126523b510d76500db | 435d539c059a25207c70c74818233afe26ad4a38 | refs/heads/master | 2023-07-10T19:29:47.375307 | 2021-08-10T10:52:39 | 2021-08-10T10:52:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | from typing import TypeVar
from discord.ext import commands
from .. import Context
__all__ = ("auto_help",)
_CT = TypeVar("_CT", bound=commands.Command)
_GT = TypeVar("_GT", bound=commands.Group)
async def _call_help(ctx: Context):
await ctx.send_help(ctx.command.parent)
def auto_help(group: _GT, *, cls: type[_CT] = commands.Command) -> _GT:
if not isinstance(group, commands.Group):
raise TypeError("Auto help can only be applied to groups.")
command = cls(_call_help, name="help", hidden=True)
group.add_command(command)
return group
| [
"josh.ja.butt@gmail.com"
] | josh.ja.butt@gmail.com |
888ab9a479623576501f7242269eb64cd0e309a9 | 8a043d4e36122ba36d0650506bf52e8c7af7611e | /backend/manage.py | 93e859dce800959c22b4e590f9797dbb5cfd025c | [] | no_license | crowdbotics-apps/test-20218 | 6f033b2d05d8ae8f6261f06de499aa5f5482cf7b | 6fdf428586e05ba97e9ef4030a93c0ac9e464770 | refs/heads/master | 2022-12-16T18:10:21.201149 | 2020-09-12T10:29:19 | 2020-09-12T10:29:19 | 294,920,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_20218.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
4fbb695f1765d44bcbfa2e7b38fbc1edd9e9df08 | c719e7be97de57f4ffaebbf4edbd15ef0bebac75 | /webapp/__init__.py | 5adc2d1b61f9e4d194c416b843910fd80dd30945 | [] | no_license | jehiah/gtfs-data-exchange | 01feccec5da08afed61e78f51ae46fd2a9a650f6 | e9eb891d0067e20c2b157c6093c57654849c87e9 | refs/heads/master | 2021-01-21T12:11:54.754952 | 2016-04-04T00:58:58 | 2016-04-04T00:58:58 | 828,058 | 7 | 3 | null | 2015-04-28T00:32:09 | 2010-08-10T05:21:41 | Python | UTF-8 | Python | false | false | 4,133 | py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# import wsgiref.headers
# import wsgiref.util
from google.appengine.ext.webapp import*
class RequestHandler(RequestHandler):
def __init__(self):
self.template_file = 'index.html'
self.template_vals = {}
def __before__(self,*args):
"""
Allows common code to be used for all get/post/delete methods
"""
pass
def __after__(self,*args):
"""
This runs AFTER response is returned to browser.
If you have follow up work that you don't want to do while
browser is waiting put it here such as sending emails etc
"""
pass
def render(self,template_file=None,template_vals={}):
"""
Helper method to render the appropriate template
"""
raise NotImplemented()
# if not template_file == None:
# self.template_file = template_file
# self.template_vals.update(template_vals)
# path = os.path.join(os.path.dirname(__file__), self.template_file)
# self.response.out.write(template.render(path, self.template_vals))
class WSGIApplication2(WSGIApplication):
"""
Modifyed to add new methods __before__ and __after__
before the get/post/delete/etc methods and then
AFTER RESPONSE. This is important because it means you
can do work after the response has been returned to the browser
"""
def __init__(self, url_mapping, debug=False):
"""Initializes this application with the given URL mapping.
Args:
url_mapping: list of (URI, RequestHandler) pairs (e.g., [('/', ReqHan)])
debug: if true, we send Python stack traces to the browser on errors
"""
self._init_url_mappings(url_mapping)
self.__debug = debug
WSGIApplication.active_instance = self
self.current_request_args = ()
def __call__(self, environ, start_response):
"""Called by WSGI when a request comes in."""
request = Request(environ)
response = Response()
WSGIApplication.active_instance = self
handler = None
groups = ()
for regexp, handler_class in self._url_mapping:
match = regexp.match(request.path)
if match:
handler = handler_class()
handler.initialize(request, response)
groups = match.groups()
break
self.current_request_args = groups
if handler:
try:
handler.__before__(*groups)
method = environ['REQUEST_METHOD']
if method == 'GET':
handler.get(*groups)
elif method == 'POST':
handler.post(*groups)
elif method == 'HEAD':
handler.head(*groups)
elif method == 'OPTIONS':
handler.options(*groups)
elif method == 'PUT':
handler.put(*groups)
elif method == 'DELETE':
handler.delete(*groups)
elif method == 'TRACE':
handler.trace(*groups)
else:
handler.error(501)
response.wsgi_write(start_response)
handler.__after__(*groups)
except Exception, e:
handler.handle_exception(e, self.__debug)
else:
response.set_status(404)
response.wsgi_write(start_response)
return ['']
| [
"jehiah@gmail.com"
] | jehiah@gmail.com |
e5ad9ee4ab2db3069159bc1728396b460821eb9d | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/gui/shared/actions/__init__.py | 8ea7bfcba92ea4a75fd6dbc98c3916a8d11dd437 | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 8,979 | py | # 2016.11.19 19:52:36 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/actions/__init__.py
import BigWorld
from ConnectionManager import connectionManager
from adisp import process
from debug_utils import LOG_DEBUG, LOG_ERROR
from gui.LobbyContext import g_lobbyContext
from gui.Scaleform.Waiting import Waiting
from gui.Scaleform.framework import ViewTypes
from gui.app_loader import g_appLoader
from gui.prb_control.settings import PREBATTLE_ACTION_NAME, FUNCTIONAL_FLAG
from gui.shared import g_eventBus, EVENT_BUS_SCOPE
from gui.shared.actions.chains import ActionsChain
from gui.shared.events import LoginEventEx, GUICommonEvent
from helpers import dependency
from predefined_hosts import g_preDefinedHosts, getHostURL
from skeletons.gui.login_manager import ILoginManager
__all__ = ['LeavePrbModalEntity',
'DisconnectFromPeriphery',
'ConnectToPeriphery',
'PrbInvitesInit',
'ActionsChain']
class Action(object):
def __init__(self):
super(Action, self).__init__()
self._completed = False
self._running = False
def invoke(self):
pass
def isInstantaneous(self):
return True
def isRunning(self):
return self._running
def isCompleted(self):
return self._completed
CONNECT_TO_PERIPHERY_DELAY = 2.0
class LeavePrbModalEntity(Action):
def __init__(self):
super(LeavePrbModalEntity, self).__init__()
self._running = False
def invoke(self):
from gui.prb_control.dispatcher import g_prbLoader
dispatcher = g_prbLoader.getDispatcher()
if dispatcher:
state = dispatcher.getFunctionalState()
if state.hasModalEntity:
factory = dispatcher.getControlFactories().get(state.ctrlTypeID)
if factory:
ctx = factory.createLeaveCtx(flags=FUNCTIONAL_FLAG.SWITCH)
if ctx:
self._running = True
self.__doLeave(dispatcher, ctx)
else:
LOG_ERROR('Leave modal entity. Can not create leave ctx', state)
else:
LOG_ERROR('Leave modal entity. Factory is not found', state)
else:
LOG_DEBUG('Leave modal entity. Player has not prebattle')
self._completed = True
def isInstantaneous(self):
return False
@process
def __doLeave(self, dispatcher, ctx):
self._completed = yield dispatcher.leave(ctx)
if self._completed:
LOG_DEBUG('Leave modal entity. Player left prebattle/unit.')
else:
LOG_DEBUG('Leave modal entity. Action was failed.')
self._running = False
class DisconnectFromPeriphery(Action):
def __init__(self):
super(DisconnectFromPeriphery, self).__init__()
def isInstantaneous(self):
return False
def invoke(self):
self._running = True
g_appLoader.goToLoginByRQ()
def isRunning(self):
app = g_appLoader.getApp()
if app:
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
view = app.containerManager.getView(ViewTypes.DEFAULT)
if view and view.settings.alias == VIEW_ALIAS.LOGIN and view._isCreated() and connectionManager.isDisconnected():
LOG_DEBUG('Disconnect action. Player came to login')
self._completed = True
self._running = False
return self._running
class ConnectToPeriphery(Action):
loginManager = dependency.descriptor(ILoginManager)
def __init__(self, peripheryID):
super(ConnectToPeriphery, self).__init__()
self.__host = g_preDefinedHosts.periphery(peripheryID)
self.__endTime = None
self.__credentials = g_lobbyContext.getCredentials()
return
def isInstantaneous(self):
return False
def isRunning(self):
if self.__endTime and self.__endTime <= BigWorld.time():
self.__endTime = None
self.__doConnect()
return super(ConnectToPeriphery, self).isRunning()
def invoke(self):
if self.__host and self.__credentials:
if len(self.__credentials) < 2:
self._completed = False
LOG_ERROR('Connect action. Login info is invalid')
return
login, token2 = self.__credentials
if not login or not token2:
self._completed = False
LOG_ERROR('Connect action. Login info is invalid')
return
self._running = True
self.__endTime = BigWorld.time() + CONNECT_TO_PERIPHERY_DELAY
Waiting.show('login')
else:
LOG_ERROR('Connect action. Login info is invalid')
self._completed = False
self._running = False
def __doConnect(self):
login, token2 = self.__credentials
self.__addHandlers()
self.loginManager.initiateRelogin(login, token2, getHostURL(self.__host, token2))
def __addHandlers(self):
g_eventBus.addListener(LoginEventEx.ON_LOGIN_QUEUE_CLOSED, self.__onLoginQueueClosed, scope=EVENT_BUS_SCOPE.LOBBY)
connectionManager.onConnected += self.__onConnected
connectionManager.onRejected += self.__onRejected
def __removeHandlers(self):
g_eventBus.removeListener(LoginEventEx.ON_LOGIN_QUEUE_CLOSED, self.__onLoginQueueClosed, scope=EVENT_BUS_SCOPE.LOBBY)
connectionManager.onConnected -= self.__onConnected
connectionManager.onRejected -= self.__onRejected
def __onConnected(self):
self.__removeHandlers()
self._completed = True
self._running = False
def __onRejected(self, status, responseData):
self.__removeHandlers()
self._completed = False
self._running = False
def __onLoginQueueClosed(self, _):
self.__removeHandlers()
self._completed = False
self._running = False
LOG_DEBUG('Connect action. Player exit from login queue')
class PrbInvitesInit(Action):
def __init__(self):
super(PrbInvitesInit, self).__init__()
def isInstantaneous(self):
return False
def invoke(self):
from gui.prb_control.dispatcher import g_prbLoader
invitesManager = g_prbLoader.getInvitesManager()
if invitesManager:
if invitesManager.isInited():
LOG_DEBUG('Invites init action. Invites init action. List of invites is build')
self._completed = True
else:
self._running = True
invitesManager.onInvitesListInited += self.__onInvitesListInited
else:
LOG_ERROR('Invites init action. Invites manager not found')
self._completed = False
def __onInvitesListInited(self):
from gui.prb_control.dispatcher import g_prbLoader
invitesManager = g_prbLoader.getInvitesManager()
if invitesManager:
LOG_DEBUG('Invites init action. List of invites is build')
invitesManager.onInvitesListInited -= self.__onInvitesListInited
else:
LOG_ERROR('Invites manager not found')
self._completed = True
self._running = False
class WaitFlagActivation(Action):
def __init__(self):
super(WaitFlagActivation, self).__init__()
self._isActive = False
def activate(self):
LOG_DEBUG('Flag is activated')
self._isActive = True
def inactivate(self):
LOG_DEBUG('Flag is inactivated')
self._isActive = False
def invoke(self):
if not self._isActive:
self._running = True
else:
self._completed = True
def isRunning(self):
if self._isActive:
self._running = False
self._completed = True
return self._running
def isInstantaneous(self):
return False
class ShowCompanyWindow(Action):
def __init__(self):
super(ShowCompanyWindow, self).__init__()
self.__isLobbyInited = False
g_eventBus.addListener(GUICommonEvent.LOBBY_VIEW_LOADED, self.__onLobbyInited)
def invoke(self):
self._running = True
self._completed = False
if self.__isLobbyInited:
from gui.Scaleform.daapi.view.lobby.header import battle_selector_items
battle_selector_items.getItems().select(PREBATTLE_ACTION_NAME.COMPANIES_LIST)
self._completed = True
self._running = False
def __onLobbyInited(self, _):
self.__isLobbyInited = True
g_eventBus.removeListener(GUICommonEvent.LOBBY_VIEW_LOADED, self.__onLobbyInited)
self.invoke()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\actions\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:52:36 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
ae0e9dfa494523f579bf8050481c1ecf75a95d7c | 286c7b7dd9bd48c73fd94f8e89bde99a8d3f74c5 | /modelscript/base/brackets.py | 69de0b6d4787b90d2ada466c53d75e57287aad8b | [
"MIT"
] | permissive | ScribesZone/ModelScript | e7738471eff24a74ee59ec88d8b66a81aae16cdc | a36be1047283f2e470dc2dd4353f2a714377bb7d | refs/heads/master | 2023-03-18T02:43:57.953318 | 2021-03-08T15:26:40 | 2021-03-08T15:26:40 | 31,960,218 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,158 | py | # coding=utf-8
""" Conversion of a regular indented file into a bracketed file.
This module makes it possible to use regular parser like textX
with indented based language. Basically BracketedScript is a
preprocesssor that replace all indentations by some "brackets"
like { and } so that a parser can find these block markers.
The preprocessor is aware of comments (//) and documentation
lines (starting with |).
Consider for instance the following text ::
class model Cl_association01
class Elephant
class Banana
class Tree
abstract association class Meal
| Take record of the bananas eaten by elephants.
roles
eater : Elephant[0..1]
bananas : Banana[*]
The text above is bracketed as following. The character separator
are prefixed with special character to avoid confusion with the text
itself. ::
class model Cl_association01 ;
class Elephant ;
class Banana ;
class Tree ;
abstract association class Meal {
| Take record of the bananas eaten by elephants. | ;
roles {
eater : Elephant[0..1] ;
bananas : Banana[*] ; } ; } ;
For more examples see for instance testcases/cls/.mdl/*.clsb
"""
__all__ = (
'BracketError',
'BracketedScript'
)
from typing import Match, ClassVar, List
import re
# The following dependencies could be removed if necessary.
# The environment is used only to save bracketed file in a
# convenient way.
from modelscript.interfaces.environment import Environment
class BracketError(Exception):
""" Error message for an illegal indentation. """
def __init__(self, message, line):
super(BracketError, self).__init__(message)
self.line = line
class BracketedScript(object):
""" Converter of a indented file into a bracketed file.
"""
# -- input parameters ------------------------------------------
SPACE_INDENT: int = 4
""" Number of spaces for each indentation. """
IS_BLANK_LINE: ClassVar[Match[str]] = \
re.compile('^ *((--[^@|]*)|(//.*))?$')
""" Regular expressions matching blank lines (includes comments).
Comments are just ignored for indentation purposes but they are still
taken into account for regular parsing.
The definition of comments is also implemented in the grammar
textblocks/parser/grammar.tx
"""
# TODO:2 remove support for ModelScript. See below
# ModelScript1:
# added [^@\|] so that --@ and --| are not treated as comment
IS_DOC_LINE_REGEX: ClassVar[Match[str]] = re.compile('^ *\|')
""" Regular expression for a documentation line. """
# -- output parameters -----------------------------------------
OPENING_BRACKET: ClassVar[str] = '\000{'
""" Opening bracket string. """
CLOSING_BRACKET: ClassVar[str] = '\000}'
""" Closing bracket string. """
EOL: ClassVar[str] = '\000;'
""" End of line string. """
CLOSING_DOC_LINE: ClassVar[str] = '\000|'
""" Closing documentation line string. """
DOC_LINE_CONTENT: ClassVar[Match[str]] = \
re.compile(' *\| ?(?P<content>.*)\000\|\000;(\000}\000;)*$')
""" Regular expression for a documentation line. """
# -- output parameters -----------------------------------------
file: str
""" Name of the input file. """
lines: List[str]
""" Content of the input file represented as list of lines. """
bracketedLines: List[str]
""" """
targetFilename: str
""" Name of the output file.
The location of the output file is computed by the Environment.
See modelscript.interfaces.environment. """
def __init__(self, file: str) -> None:
self.file = file
self.lines = [line.rstrip('\n') for line in open(file)]
self.bracketedLines = []
basic_file_name = self.file+'b'
self.targetFilename = Environment.getWorkerFileName(basic_file_name)
def _is_blank_line(self, index: int) -> bool:
""" Check if the line is blank or a comment line """
m = re.match(self.IS_BLANK_LINE, self.lines[index])
return m is not None
def _is_doc_line(self, index: int) -> bool:
m = re.match(self.IS_DOC_LINE_REGEX, self.lines[index])
return m is not None
def _terminate_doc_line(self, docLine: str) -> str:
return docLine + self.CLOSING_DOC_LINE
@classmethod
def extractDocLineText(cls, docLine: str) -> str:
m = re.match(cls.DOC_LINE_CONTENT, docLine)
assert m is not None
return m.group('content')
def _nb_spaces(self, index: int) -> int:
m = re.match(' *', self.lines[index])
if m:
return len(m.group(0))
else:
return 0
def _line_indent(self, index: int) -> int:
blanks = self._nb_spaces(index)
if blanks % self.SPACE_INDENT == 0:
return blanks // self.SPACE_INDENT
else:
raise BracketError( # raise:OK
message = '%i spaces found. Multiple of %i expected.'
% (blanks, self.SPACE_INDENT),
line = index+1)
def _suffix(self, delta: int) -> str:
if delta == 1:
return self.OPENING_BRACKET
elif delta == 0:
return self.EOL
else:
return (
self.EOL
+ (self.CLOSING_BRACKET+self.EOL) * - delta
)
@property
def text(self) -> str:
""" Returns the bracketed text. """
self.bracketedLines = list(self.lines)
# LNBL = Last Non Black Line
lnbl_index = -1
lnbl_indent = 0
# take all lines + a extra virtual line to close everything
for (index, line) in enumerate(self.lines):
if not self._is_blank_line(index):
indent = self._line_indent(index)
delta = indent-lnbl_indent
if self._is_doc_line(index):
self.bracketedLines[index] = (
self._terminate_doc_line(self.bracketedLines[index])
)
if delta > 1:
# this will never happened for the last line
raise BracketError( # raise:OK
message = '"%s"' % line,
line=index+1)
else:
if lnbl_index != -1:
self.bracketedLines[lnbl_index] \
+= self._suffix(delta)
lnbl_index = index
lnbl_indent = indent
# close the last line if any
if lnbl_index != -1:
delta = 0-lnbl_indent
self.bracketedLines[lnbl_index] += self._suffix(delta)
return '\n'.join(self.bracketedLines)
def save(self) -> str:
""" Save the bracked text into the output file.
:return: the name of the output file
"""
f = open(self.targetFilename, "w")
f.write(self.text)
f.close()
return self.targetFilename
import sys
if __name__ == "__main__":
source=sys.argv[1]
text = BracketedScript(source).save()
| [
"escribis@users.noreply.github.com"
] | escribis@users.noreply.github.com |
da328818e0e708e72e7445267b75ac3eacc9d658 | 67b3a18730887046d67b4930ffc6fa0793a28011 | /integration_tests/test_drawing_matplotlib_backend.py | defd0e53a041e686422c1ea694239173d19faffd | [
"MIT"
] | permissive | hh-wu/ezdxf | 38eeef4e4498411758ef87039532d9df2d5bb178 | 62509ba39b826ee9b36f19c0a5abad7f3518186a | refs/heads/master | 2022-11-11T17:53:15.144144 | 2020-07-02T10:31:36 | 2020-07-02T10:31:36 | 266,539,503 | 0 | 0 | NOASSERTION | 2020-07-02T10:31:37 | 2020-05-24T12:48:03 | null | UTF-8 | Python | false | false | 443 | py | # Created: 06.2020
# Copyright (c) 2020, Matthew Broadway
# License: MIT License
import pytest
plt = pytest.importorskip('matplotlib.pyplot')
from ezdxf.addons.drawing.matplotlib_backend import MatplotlibBackend
@pytest.fixture()
def backend():
fig, ax = plt.subplots()
return MatplotlibBackend(ax)
def test_get_text_width(backend):
assert backend.get_text_line_width(' abc', 100) > backend.get_text_line_width('abc', 100)
| [
"me@mozman.at"
] | me@mozman.at |
c459f9c7006657b277a0ba92551c3cbb372ba3a9 | 78d5a6e0846cb6b03544e4f717651ca59dfc620c | /treasury-admin/treasury/migrations/0024_auto_20180327_1612.py | c9a2524b3f70c92aa04a2a876672520105bf4e78 | [] | no_license | bsca-bank/treasury-admin | 8952788a9a6e25a1c59aae0a35bbee357d94e685 | 5167d6c4517028856701066dd5ed6ac9534a9151 | refs/heads/master | 2023-02-05T12:45:52.945279 | 2020-12-13T08:07:41 | 2020-12-13T08:07:41 | 320,323,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-03-27 15:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('util', '0011_auto_20180326_1157'),
('contenttypes', '0002_remove_content_type_name'),
('treasury', '0023_auto_20180325_1515'),
]
operations = [
migrations.AlterField(
model_name='fxcli',
name='content_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType'),
),
migrations.AlterField(
model_name='fxcli',
name='object_id',
field=models.PositiveIntegerField(),
),
migrations.AlterUniqueTogether(
name='fxcli',
unique_together=set([('type_product', 'content_type', 'object_id')]),
),
migrations.AlterIndexTogether(
name='fxcli',
index_together=set([('content_type', 'object_id')]),
),
]
| [
"cn.makodo@gmail.com"
] | cn.makodo@gmail.com |
0749ecae97425b881a47a22d2d5edaa1aac3752d | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/jmoiron-humanize/allPythonContent.py | 9d04b076f717a85afdaf5edba219c06ce9b45538 | [] | no_license | aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,548 | py | __FILENAME__ = conf
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from datetime import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
projpath = os.path.abspath('..')
sys.path.append(projpath)
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'humanize'
copyright = u'2010 Jason Moiron'
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
version = None
for line in open(os.path.join(projpath, 'setup.py'), 'r'):
if line.startswith('version'):
exec line
if version is None:
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = version
print ("Building release: %s, version: %s" % (release, version))
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'default'
html_theme = 'nature'
html_theme_path = ['_theme']
#html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'humanizedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'humanize.tex', u'humanize Documentation',
u'Jason Moiron', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
########NEW FILE########
__FILENAME__ = compat
import sys
if sys.version_info < (3,):
string_types = (basestring,)
else:
string_types = (str,)
########NEW FILE########
__FILENAME__ = filesize
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Bits & Bytes related humanization."""
suffixes = {
'decimal': ('kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'),
'binary': ('KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'),
'gnu': "KMGTPEZY",
}
def naturalsize(value, binary=False, gnu=False, format='%.1f'):
"""Format a number of byteslike a human readable filesize (eg. 10 kB). By
default, decimal suffixes (kB, MB) are used. Passing binary=true will use
binary suffixes (KiB, MiB) are used and the base will be 2**10 instead of
10**3. If ``gnu`` is True, the binary argument is ignored and GNU-style
(ls -sh style) prefixes are used (K, M) with the 2**10 definition.
Non-gnu modes are compatible with jinja2's ``filesizeformat`` filter."""
if gnu: suffix = suffixes['gnu']
elif binary: suffix = suffixes['binary']
else: suffix = suffixes['decimal']
base = 1024 if (gnu or binary) else 1000
bytes = float(value)
if bytes == 1 and not gnu: return '1 Byte'
elif bytes < base and not gnu: return '%d Bytes' % bytes
elif bytes < base and gnu: return '%dB' % bytes
for i,s in enumerate(suffix):
unit = base ** (i+2)
if bytes < unit and not gnu:
return (format + ' %s') % ((base * bytes / unit), s)
elif bytes < unit and gnu:
return (format + '%s') % ((base * bytes / unit), s)
if gnu:
return (format + '%s') % ((base * bytes / unit), s)
return (format + ' %s') % ((base * bytes / unit), s)
########NEW FILE########
__FILENAME__ = i18n
# -*- coding: utf-8 -*-
import gettext as gettext_module
from threading import local
import os.path
__all__ = ['activate', 'deactivate', 'gettext', 'ngettext']
_TRANSLATIONS = {None: gettext_module.NullTranslations()}
_CURRENT = local()
_DEFAULT_LOCALE_PATH = os.path.join(os.path.dirname(__file__), 'locale')
def get_translation():
try:
return _TRANSLATIONS[_CURRENT.locale]
except (AttributeError, KeyError):
return _TRANSLATIONS[None]
def activate(locale, path=None):
"""Set 'locale' as current locale. Search for locale in directory 'path'
@param locale: language name, eg 'en_GB'"""
if path is None:
path = _DEFAULT_LOCALE_PATH
if locale not in _TRANSLATIONS:
translation = gettext_module.translation('humanize', path, [locale])
_TRANSLATIONS[locale] = translation
_CURRENT.locale = locale
return _TRANSLATIONS[locale]
def deactivate():
_CURRENT.locale = None
def gettext(message):
return get_translation().gettext(message)
def pgettext(msgctxt, message):
"""'Particular gettext' function.
It works with 'msgctxt' .po modifiers and allow duplicate keys with
different translations.
Python 2 don't have support for this GNU gettext function, so we
reimplement it. It works by joining msgctx and msgid by '4' byte."""
key = msgctxt + '\x04' + message
translation = get_translation().gettext(key)
return message if translation == key else translation
def ngettext(message, plural, num):
return get_translation().ngettext(message, plural, num)
def gettext_noop(message):
"""Example usage:
CONSTANTS = [gettext_noop('first'), gettext_noop('second')]
def num_name(n):
return gettext(CONSTANTS[n])"""
return message
########NEW FILE########
__FILENAME__ = number
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Humanizing functions for numbers."""
import re
from fractions import Fraction
from .import compat
from .i18n import gettext as _, gettext_noop as N_, pgettext as P_
def ordinal(value):
"""Converts an integer to its ordinal as a string. 1 is '1st', 2 is '2nd',
3 is '3rd', etc. Works for any integer or anything int() will turn into an
integer. Anything other value will have nothing done to it."""
try:
value = int(value)
except (TypeError, ValueError):
return value
t = (P_('0', 'th'),
P_('1', 'st'),
P_('2', 'nd'),
P_('3', 'rd'),
P_('4', 'th'),
P_('5', 'th'),
P_('6', 'th'),
P_('7', 'th'),
P_('8', 'th'),
P_('9', 'th'))
if value % 100 in (11, 12, 13): # special case
return "%d%s" % (value, t[0])
return '%d%s' % (value, t[value % 10])
def intcomma(value):
"""Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'. To maintain
some compatability with Django's intcomma, this function also accepts
floats."""
try:
if isinstance(value, compat.string_types):
float(value.replace(',', ''))
else:
float(value)
except (TypeError, ValueError):
return value
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
powers = [10 ** x for x in (6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 100)]
human_powers = (N_('million'), N_('billion'), N_('trillion'), N_('quadrillion'),
N_('quintillion'), N_('sextillion'), N_('septillion'),
N_('octillion'), N_('nonillion'), N_('decillion'), N_('googol'))
def intword(value, format='%.1f'):
"""Converts a large integer to a friendly text representation. Works best for
numbers over 1 million. For example, 1000000 becomes '1.0 million', 1200000
becomes '1.2 million' and '1200000000' becomes '1.2 billion'. Supports up to
decillion (33 digits) and googol (100 digits). You can pass format to change
the number of decimal or general format of the number portion. This function
returns a string unless the value passed was unable to be coaxed into an int."""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < powers[0]:
return str(value)
for ordinal, power in enumerate(powers[1:], 1):
if value < power:
chopped = value / float(powers[ordinal - 1])
return (' '.join([format, _(human_powers[ordinal - 1])])) % chopped
return str(value)
def apnumber(value):
"""For numbers 1-9, returns the number spelled out. Otherwise, returns the
number. This follows Associated Press style. This always returns a string
unless the value was not int-able, unlike the Django filter."""
try:
value = int(value)
except (TypeError, ValueError):
return value
if not 0 < value < 10:
return str(value)
return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'),
_('seven'), _('eight'), _('nine'))[value - 1]
def fractional(value):
'''
There will be some cases where one might not want to show
ugly decimal places for floats and decimals.
This function returns a human readable fractional number
in form of fractions and mixed fractions.
Pass in a string, or a number or a float, and this function returns
a string representation of a fraction
or whole number
or a mixed fraction
Examples:
fractional(0.3) will return '1/3'
fractional(1.3) will return '1 3/10'
fractional(float(1/3)) will return '1/3'
fractional(1) will return '1'
This will always return a string.
'''
try:
number = float(value)
except (TypeError, ValueError):
return value
wholeNumber = int(number)
frac = Fraction(number - wholeNumber).limit_denominator(1000)
numerator = frac._numerator
denominator = frac._denominator
if wholeNumber and not numerator and denominator == 1:
return '%.0f' % wholeNumber # this means that an integer was passed in (or variants of that integer like 1.0000)
elif not wholeNumber:
return '%.0f/%.0f' % (numerator, denominator)
else:
return '%.0f %.0f/%.0f' % (wholeNumber, numerator, denominator)
########NEW FILE########
__FILENAME__ = time
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Time humanizing functions. These are largely borrowed from Django's
``contrib.humanize``."""
import time
from datetime import datetime, timedelta, date
from .i18n import ngettext, gettext as _
__all__ = ['naturaldelta', 'naturaltime', 'naturalday', 'naturaldate']
def _now():
return datetime.now()
def abs_timedelta(delta):
"""Returns an "absolute" value for a timedelta, always representing a
time distance."""
if delta.days < 0:
now = _now()
return now - (now + delta)
return delta
def date_and_delta(value):
"""Turn a value into a date and a timedelta which represents how long ago
it was. If that's not possible, return (None, value)."""
now = _now()
if isinstance(value, datetime):
date = value
delta = now - value
elif isinstance(value, timedelta):
date = now - value
delta = value
else:
try:
value = int(value)
delta = timedelta(seconds=value)
date = now - delta
except (ValueError, TypeError):
return (None, value)
return date, abs_timedelta(delta)
def naturaldelta(value, months=True):
"""Given a timedelta or a number of seconds, return a natural
representation of the amount of time elapsed. This is similar to
``naturaltime``, but does not add tense to the result. If ``months``
is True, then a number of months (based on 30.5 days) will be used
for fuzziness between years."""
now = _now()
date, delta = date_and_delta(value)
if date is None:
return value
use_months = months
seconds = abs(delta.seconds)
days = abs(delta.days)
years = days // 365
days = days % 365
months = int(days // 30.5)
if not years and days < 1:
if seconds == 0:
return _("a moment")
elif seconds == 1:
return _("a second")
elif seconds < 60:
return ngettext("%d second", "%d seconds", seconds) % seconds
elif 60 <= seconds < 120:
return _("a minute")
elif 120 <= seconds < 3600:
minutes = seconds // 60
return ngettext("%d minute", "%d minutes", minutes) % minutes
elif 3600 <= seconds < 3600 * 2:
return _("an hour")
elif 3600 < seconds:
hours = seconds // 3600
return ngettext("%d hour", "%d hours", hours) % hours
elif years == 0:
if days == 1:
return _("a day")
if not use_months:
return ngettext("%d day", "%d days", days) % days
else:
if not months:
return ngettext("%d day", "%d days", days) % days
elif months == 1:
return _("a month")
else:
return ngettext("%d month", "%d months", months) % months
elif years == 1:
if not months and not days:
return _("a year")
elif not months:
return ngettext("1 year, %d day", "1 year, %d days", days) % days
elif use_months:
if months == 1:
return _("1 year, 1 month")
else:
return ngettext("1 year, %d month",
"1 year, %d months", months) % months
else:
return ngettext("1 year, %d day", "1 year, %d days", days) % days
else:
return ngettext("%d year", "%d years", years) % years
def naturaltime(value, future=False, months=True):
"""Given a datetime or a number of seconds, return a natural representation
of that time in a resolution that makes sense. This is more or less
compatible with Django's ``naturaltime`` filter. ``future`` is ignored for
datetimes, where the tense is always figured out based on the current time.
If an integer is passed, the return value will be past tense by default,
unless ``future`` is set to True."""
now = _now()
date, delta = date_and_delta(value)
if date is None:
return value
# determine tense by value only if datetime/timedelta were passed
if isinstance(value, (datetime, timedelta)):
future = date > now
ago = _('%s from now') if future else _('%s ago')
delta = naturaldelta(delta)
if delta == _("a moment"):
return _("now")
return ago % delta
def naturalday(value, format='%b %d'):
"""For date values that are tomorrow, today or yesterday compared to
present day returns representing string. Otherwise, returns a string
formatted according to ``format``."""
try:
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't date-ish
return value
except (OverflowError, ValueError):
# Date arguments out of range
return value
delta = value - date.today()
if delta.days == 0:
return _('today')
elif delta.days == 1:
return _('tomorrow')
elif delta.days == -1:
return _('yesterday')
return value.strftime(format)
def naturaldate(value):
"""Like naturalday, but will append a year for dates that are a year
ago or more."""
try:
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't date-ish
return value
except (OverflowError, ValueError):
# Date arguments out of range
return value
delta = abs_timedelta(value - date.today())
if delta.days >= 365:
return naturalday(value, '%b %d %Y')
return naturalday(value)
########NEW FILE########
__FILENAME__ = base
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests base classes."""
from unittest import TestCase
class HumanizeTestCase(TestCase):
def assertManyResults(self, function, args, results):
"""Goes through a list of arguments and makes sure that function called
upon them lists a similarly ordered list of results. If more than one
argument is required, each position in args may be a tuple."""
for arg, result in zip(args, results):
if isinstance(arg, tuple):
self.assertEqual(function(*arg), result)
else:
self.assertEqual(function(arg), result)
def assertEqualDatetime(self, dt1, dt2):
self.assertEqual((dt1 - dt2).seconds, 0)
def assertEqualTimedelta(self, td1, td2):
self.assertEqual(td1.days, td2.days)
self.assertEqual(td1.seconds, td2.seconds)
########NEW FILE########
__FILENAME__ = filesize
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for filesize humanizing."""
from humanize import filesize
from .base import HumanizeTestCase
class FilesizeTestCase(HumanizeTestCase):
def test_naturalsize(self):
tests = (300, 3000, 3000000, 3000000000, 3000000000000, (300, True),
(3000, True), (3000000, True), (300, False, True), (3000, False, True),
(3000000, False, True), (1024, False, True), (10**26 * 30, False, True),
(10**26 * 30, True), 10**26 * 30,
(3141592, False, False, '%.2f'), (3000, False, True, '%.3f'),
(3000000000, False, True, '%.0f'), (10**26 * 30, True, False, '%.3f'),)
results = ('300 Bytes', '3.0 kB', '3.0 MB', '3.0 GB', '3.0 TB',
'300 Bytes', '2.9 KiB', '2.9 MiB', '300B', '2.9K', '2.9M', '1.0K', '2481.5Y',
'2481.5 YiB', '3000.0 YB',
'3.14 MB', '2.930K', '3G', '2481.542 YiB')
self.assertManyResults(filesize.naturalsize, tests, results)
########NEW FILE########
__FILENAME__ = number
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Number tests."""
from humanize import number
from .base import HumanizeTestCase
class NumberTestCase(HumanizeTestCase):
def test_ordinal(self):
test_list = ('1', '2', '3', '4', '11', '12', '13', '101', '102', '103',
'111', 'something else', None)
result_list = ('1st', '2nd', '3rd', '4th', '11th', '12th', '13th',
'101st', '102nd', '103rd', '111th', 'something else', None)
self.assertManyResults(number.ordinal, test_list, result_list)
def test_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25, '100',
'1000', '10123', '10311', '1000000', '1234567.1234567', None)
result_list = ('100', '1,000', '10,123', '10,311', '1,000,000',
'1,234,567.25', '100', '1,000', '10,123', '10,311', '1,000,000',
'1,234,567.1234567', None)
self.assertManyResults(number.intcomma, test_list, result_list)
def test_intword(self):
# make sure that powers & human_powers have the same number of items
self.assertEqual(len(number.powers), len(number.human_powers))
# test the result of intword
test_list = ('100', '1000000', '1200000', '1290000', '1000000000',
'2000000000', '6000000000000', '1300000000000000',
'3500000000000000000000', '8100000000000000000000000000000000',
None, ('1230000', '%0.2f'), 10**101)
result_list = ('100', '1.0 million', '1.2 million', '1.3 million',
'1.0 billion', '2.0 billion', '6.0 trillion', '1.3 quadrillion',
'3.5 sextillion', '8.1 decillion', None, '1.23 million',
'1'+'0'*101)
self.assertManyResults(number.intword, test_list, result_list)
def test_apnumber(self):
test_list = (1, 2, 4, 5, 9, 10, '7', None)
result_list = ('one', 'two', 'four', 'five', 'nine', '10', 'seven', None)
self.assertManyResults(number.apnumber, test_list, result_list)
def test_fractional(self):
test_list = (1, 2.0, (4.0/3.0), (5.0/6.0), '7', '8.9', 'ten', None)
result_list = ('1', '2', '1 1/3', '5/6', '7', '8 9/10', 'ten', None)
self.assertManyResults(number.fractional, test_list, result_list)
########NEW FILE########
__FILENAME__ = time
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for time humanizing."""
from mock import patch
from humanize import time
from datetime import date, datetime, timedelta
from .base import HumanizeTestCase
today = date.today()
one_day = timedelta(days=1)
class fakedate(object):
def __init__(self, year, month, day):
self.year, self.month, self.day = year, month, day
class TimeUtilitiesTestCase(HumanizeTestCase):
"""These are not considered "public" interfaces, but require tests anyway."""
def test_date_and_delta(self):
now = datetime.now()
td = timedelta
int_tests = (3, 29, 86399, 86400, 86401*30)
date_tests = [now - td(seconds=x) for x in int_tests]
td_tests = [td(seconds=x) for x in int_tests]
results = [(now - td(seconds=x), td(seconds=x)) for x in int_tests]
for t in (int_tests, date_tests, td_tests):
for arg, result in zip(t, results):
dt, d = time.date_and_delta(arg)
self.assertEqualDatetime(dt, result[0])
self.assertEqualTimedelta(d, result[1])
self.assertEqual(time.date_and_delta("NaN"), (None, "NaN"))
class TimeTestCase(HumanizeTestCase):
"""Tests for the public interface of humanize.time"""
def test_naturaldelta_nomonths(self):
now = datetime.now()
test_list = [
timedelta(days=7),
timedelta(days=31),
timedelta(days=230),
timedelta(days=400),
]
result_list = [
'7 days',
'31 days',
'230 days',
'1 year, 35 days',
]
with patch('humanize.time._now') as mocked:
mocked.return_value = now
nd_nomonths = lambda d: time.naturaldelta(d, months=False)
self.assertManyResults(nd_nomonths, test_list, result_list)
def test_naturaldelta(self):
now = datetime.now()
test_list = [
0,
1,
30,
timedelta(minutes=1, seconds=30),
timedelta(minutes=2),
timedelta(hours=1, minutes=30, seconds=30),
timedelta(hours=23, minutes=50, seconds=50),
timedelta(days=1),
timedelta(days=500),
timedelta(days=365*2 + 35),
timedelta(seconds=1),
timedelta(seconds=30),
timedelta(minutes=1, seconds=30),
timedelta(minutes=2),
timedelta(hours=1, minutes=30, seconds=30),
timedelta(hours=23, minutes=50, seconds=50),
timedelta(days=1),
timedelta(days=500),
timedelta(days=365*2 + 35),
# regression tests for bugs in post-release humanize
timedelta(days=10000),
timedelta(days=365+35),
30,
timedelta(days=365*2 + 65),
timedelta(days=365 + 4),
timedelta(days=35),
timedelta(days=65),
timedelta(days=9),
timedelta(days=365),
"NaN",
]
result_list = [
'a moment',
'a second',
'30 seconds',
'a minute',
'2 minutes',
'an hour',
'23 hours',
'a day',
'1 year, 4 months',
'2 years',
'a second',
'30 seconds',
'a minute',
'2 minutes',
'an hour',
'23 hours',
'a day',
'1 year, 4 months',
'2 years',
'27 years',
'1 year, 1 month',
'30 seconds',
'2 years',
'1 year, 4 days',
'a month',
'2 months',
'9 days',
'a year',
"NaN",
]
with patch('humanize.time._now') as mocked:
mocked.return_value = now
self.assertManyResults(time.naturaldelta, test_list, result_list)
def test_naturaltime(self):
now = datetime.now()
test_list = [
now,
now - timedelta(seconds=1),
now - timedelta(seconds=30),
now - timedelta(minutes=1, seconds=30),
now - timedelta(minutes=2),
now - timedelta(hours=1, minutes=30, seconds=30),
now - timedelta(hours=23, minutes=50, seconds=50),
now - timedelta(days=1),
now - timedelta(days=500),
now - timedelta(days=365*2 + 35),
now + timedelta(seconds=1),
now + timedelta(seconds=30),
now + timedelta(minutes=1, seconds=30),
now + timedelta(minutes=2),
now + timedelta(hours=1, minutes=30, seconds=30),
now + timedelta(hours=23, minutes=50, seconds=50),
now + timedelta(days=1),
now + timedelta(days=500),
now + timedelta(days=365*2 + 35),
# regression tests for bugs in post-release humanize
now + timedelta(days=10000),
now - timedelta(days=365+35),
30,
now - timedelta(days=365*2 + 65),
now - timedelta(days=365 + 4),
"NaN",
]
result_list = [
'now',
'a second ago',
'30 seconds ago',
'a minute ago',
'2 minutes ago',
'an hour ago',
'23 hours ago',
'a day ago',
'1 year, 4 months ago',
'2 years ago',
'a second from now',
'30 seconds from now',
'a minute from now',
'2 minutes from now',
'an hour from now',
'23 hours from now',
'a day from now',
'1 year, 4 months from now',
'2 years from now',
'27 years from now',
'1 year, 1 month ago',
'30 seconds ago',
'2 years ago',
'1 year, 4 days ago',
"NaN",
]
with patch('humanize.time._now') as mocked:
mocked.return_value = now
self.assertManyResults(time.naturaltime, test_list, result_list)
def test_naturalday(self):
tomorrow = today + one_day
yesterday = today - one_day
if today.month != 3:
someday = date(today.year, 3, 5)
someday_result = 'Mar 05'
else:
someday = date(today.year, 9, 5)
someday_result = 'Sep 05'
valerrtest = fakedate(290149024, 2, 2)
overflowtest = fakedate(120390192341, 2, 2)
test_list = (today, tomorrow, yesterday, someday, '02/26/1984',
(date(1982, 6, 27), '%Y.%M.%D'), None, "Not a date at all.",
valerrtest, overflowtest
)
result_list = ('today', 'tomorrow', 'yesterday', someday_result, '02/26/1984',
date(1982, 6, 27).strftime('%Y.%M.%D'), None, "Not a date at all.",
valerrtest, overflowtest
)
self.assertManyResults(time.naturalday, test_list, result_list)
def test_naturaldate(self):
tomorrow = today + one_day
yesterday = today - one_day
if today.month != 3:
someday = date(today.year, 3, 5)
someday_result = 'Mar 05'
else:
someday = date(today.year, 9, 5)
someday_result = 'Sep 05'
test_list = (today, tomorrow, yesterday, someday, date(1982, 6, 27))
result_list = ('today', 'tomorrow', 'yesterday', someday_result, 'Jun 27 1982')
self.assertManyResults(time.naturaldate, test_list, result_list)
########NEW FILE########
| [
"dyangUCI@github.com"
] | dyangUCI@github.com |
4042a684d5c1f62a2bcd39fd6130f48e889cee75 | a14ec6e367e6a471bfc74c066fb958ef585bc269 | /2019/13/common.py | 671c2ca179b9cdb8fcb560b79faf104e864337eb | [] | no_license | jimhendy/AoC | 90641814ed431f46a8500ff0f022c6c957567563 | a1727f88bc2e6f739d65902dce188377966b3fb4 | refs/heads/master | 2023-09-02T14:48:39.860352 | 2023-08-28T08:09:19 | 2023-08-28T08:09:19 | 225,152,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | from enum import Enum
import pandas as pd
class Tile(Enum):
EMPTY = 0
WALL = 1
BLOCK = 2
PADDLE = 3
BALL = 4
pass
tile_content = {
Tile.EMPTY: " ",
Tile.WALL: "|",
Tile.BLOCK: "#",
Tile.PADDLE: "_",
Tile.BALL: "o",
}
def print_game(tiles):
display = pd.DataFrame(tiles, columns=["x", "y", "c"])
display["cell"] = display["c"].map(Tile)
display = display.pivot_table(
index="y",
columns="x",
values="cell",
aggfunc="last",
).values
for row in display:
for cell in row:
print(tile_content[cell], end="")
pass
print()
pass
pass
| [
"jimhendy88@gmail.com"
] | jimhendy88@gmail.com |
c2dcce0e83d9778b483b0c915a696f05e3d0666b | 3b89c0a97ac6b58b6923a213bc8471e11ad4fe69 | /python/CodingExercises/LeetCode518.py | e1374f7eb905e1a098517a80b3af1c7968d5d55b | [] | no_license | ksayee/programming_assignments | b187adca502ecf7ff7b51dc849d5d79ceb90d4a6 | 13bc1c44e1eef17fc36724f20b060c3339c280ea | refs/heads/master | 2021-06-30T07:19:34.192277 | 2021-06-23T05:11:32 | 2021-06-23T05:11:32 | 50,700,556 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | '''
518. Coin Change 2
You are given coins of different denominations and a total amount of money. Write a function to compute the number of combinations that make up that amount. You may assume that you have infinite number of each kind of coin.
Note: You can assume that
0 <= amount <= 5000
1 <= coin <= 5000
the number of coins is less than 500
the answer is guaranteed to fit into signed 32-bit integer
Example 1:
Input: amount = 5, coins = [1, 2, 5]
Output: 4
Explanation: there are four ways to make up the amount:
5=5
5=2+2+1
5=2+1+1+1
5=1+1+1+1+1
Example 2:
Input: amount = 3, coins = [2]
Output: 0
Explanation: the amount of 3 cannot be made up just with coins of 2.
'''
import collections
def LeetCode518(coins, amt):
fnl_lst=[]
tmp=[]
Combinations_recur(coins,fnl_lst,tmp,amt)
return fnl_lst
def Combinations_recur(coins,fnl_lst,tmp,amt):
if amt==0:
if sorted(tmp) not in fnl_lst:
fnl_lst.append(sorted(tmp.copy()))
for i in range(0,len(coins)):
if coins[i]>amt:
break
tmp.append(coins[i])
Combinations_recur(coins, fnl_lst, tmp, amt-coins[i])
tmp.pop()
def main():
coins=[1, 2, 5]
amt=5
print(LeetCode518(coins,amt))
coins = [2]
amt = 3
print(LeetCode518(coins, amt))
if __name__=='__main__':
main() | [
"kartiksayee@gmail.com"
] | kartiksayee@gmail.com |
062a1ea6d662fa8571cfb07a3a76e6dd8640867c | 64d8d80c9a292f1552190af17cf1fe984968d5dc | /python/8kyu/8kyu - Calculate BMI.py | a2730d1915df136e63922afb1959c1b6dedf05a2 | [] | no_license | zurgis/codewars | 3acc880e0f3a40fc77532bcac537452d419fc268 | 045d74d6a36f4bc8a69a76dd3f21fef22c338ca2 | refs/heads/master | 2021-04-20T22:47:45.833147 | 2020-05-19T14:01:55 | 2020-05-19T14:01:55 | 249,723,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # Write function bmi that calculates body mass index (bmi = weight / height ^ 2).
# if bmi <= 18.5 return "Underweight"
# if bmi <= 25.0 return "Normal"
# if bmi <= 30.0 return "Overweight"
# if bmi > 30 return "Obese"
def bmi(weight, height):
bmi = weight / (height ** 2)
if bmi <= 18.5: return "Underweight"
if bmi <= 25.0: return "Normal"
if bmi <= 30.0: return "Overweight"
if bmi > 30: return "Obese" | [
"khdr437@gmail.com"
] | khdr437@gmail.com |
70b39f393505ebedcff0c5d67930ca3c6fb34989 | 0b3e9b3bd28a611ac4081931c8434590eba2898c | /DiabeticRetinopathyApp/DiabeticRetinopathy/settings.py | 06b2f24f286c039994631c25961621a5a61b2eda | [] | no_license | Ram-Aditya/Diabetic-Retinopathy-Application | 4b8d2fdf95cd554b1bd9305dcff2f719e3326f95 | aafd1b858a213f53d5f3bb80f216533b23f1d004 | refs/heads/master | 2022-12-03T03:23:23.340165 | 2019-11-25T02:43:26 | 2019-11-25T02:43:26 | 223,847,563 | 0 | 0 | null | 2022-11-22T04:50:56 | 2019-11-25T02:42:09 | Python | UTF-8 | Python | false | false | 3,322 | py | """
Django settings for DiabeticRetinopathy project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yr^xgob_*_1n*$873^6-y_4m4gr_+e=$i145xfx1)_65du6pjv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'api',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DiabeticRetinopathy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DiabeticRetinopathy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'api/static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| [
"ramaditya.danbrown@gmail.com"
] | ramaditya.danbrown@gmail.com |
bd93eb04763d5659ef3de8964ef2be3b14b6a7d4 | cec0cdfbd057c2d2ba153aa6f163adb250565e9a | /Core_Python_Programming/chapter-2/simple/tsUclnt.py | fecf61afe58d25842614847570dd8ee0473fa0d1 | [] | no_license | Best1s/python_re | 91117cd5b1f896c2b2f3987f1625663aa1952354 | abd526743c67a1bf72ddce39a0268b8e9fe15d26 | refs/heads/master | 2020-05-05T13:37:41.428881 | 2020-02-25T03:41:00 | 2020-02-25T03:41:00 | 180,086,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | #!/usr/bin/env python
from socket import *
HOST = 'localhost'
PORT = 22223
BUFSIZ = 1024
ADDR = (HOST,PORT)
udpCliSock = socket(AF_INET,SOCK_DGRAM)
while True:
data = raw_input('> ')
if not data:
break
udpCliSock.sendto(data,ADDR)
data,ADDR = udpCliSock.recvfrom(BUFSIZ)
if not data:
break
print data
udpCliSock.close() | [
"best.oneself@foxmail.com"
] | best.oneself@foxmail.com |
776eba5668f5e00eb940c9681271776ff85d72c6 | ff984452d6c3584e230f09885000a123ebf02d78 | /venv/bin/python-config | a58bde950ef6ec83b31a881e89bf0b1377743b8e | [
"MIT"
] | permissive | dushyantRathore/Image_Downloader | 6ba2822a59ae1f59d60fd532c2890b1b8493b1ea | 177b89b84d2c792e2892977816093474812a002c | refs/heads/master | 2021-01-11T21:46:46.534190 | 2017-03-14T14:03:18 | 2017-03-14T14:03:18 | 78,847,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,369 | #!/home/dushyant/Desktop/Github/Image_Downloader/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"dushyant.bgs@gmail.com"
] | dushyant.bgs@gmail.com | |
21d38e67dbf5c49ef0e73d458e7debff36a17027 | d46b04d4a25a4a1fbabe092c93f7ae3302e94b27 | /py/ztools/Fs/Nca.py | e2145aef163d91efbfa10c985247558afff5c0b1 | [
"MIT"
] | permissive | SwitchTools/NSC_BUILDER | 633867070609b0eb62caf1e4fce18dc147de8f0b | 7f4e96e3417f7a1156bb269ebaa869ccd30f9e0d | refs/heads/master | 2022-12-31T07:20:37.122435 | 2020-10-08T12:22:15 | 2020-10-08T12:22:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104,513 | py | import aes128
import Title
import Titles
import Hex
import math
from binascii import hexlify as hx, unhexlify as uhx
from struct import pack as pk, unpack as upk
from hashlib import sha256
import Fs.Type
import os
import re
import pathlib
import Keys
import Config
import Print
import Nsps
from tqdm import tqdm
import Fs
from Fs import Type
from Fs.File import File
from Fs.File import MemoryFile
from Fs.Rom import Rom
from Fs.Pfs0 import Pfs0
from Fs.BaseFs import BaseFs
from Fs.Ticket import Ticket
from Fs.Nacp import Nacp
import sq_tools
import pykakasi
from Fs.pyNCA3 import NCA3
import io
from googletrans import Translator
MEDIA_SIZE = 0x200
RSA_PUBLIC_EXPONENT = 0x10001
FS_HEADER_LENGTH = 0x200
nca_header_fixed_key_modulus_00 = 0xBFBE406CF4A780E9F07D0C99611D772F96BC4B9E58381B03ABB175499F2B4D5834B005A37522BE1A3F0373AC7068D116B904465EB707912F078B26DEF60007B2B451F80D0A5E58ADEBBC9AD649B964EFA782B5CF6D7013B00F85F6A908AA4D676687FA89FF7590181E6B3DE98A68C92604D980CE3F5E92CE01FF063BF2C1A90CCE026F16BC92420A4164CD52B6344DAEC02EDEA4DF27683CC1A060AD43F3FC86C13E6C46F77C299FFAFDF0E3CE64E735F2F656566F6DF1E242B08340A5C3202BCC9AAECAED4D7030A8701C70FD1363290279EAD2A7AF3528321C7BE62F1AAA407E328C2742FE8278EC0DEBE6834B6D8104401A9E9A67F67229FA04F09DE4F403
nca_header_fixed_key_modulus_01 = 0xADE3E1FA0435E5B6DD49EA8929B1FFB643DFCA96A04A13DF43D9949796436548705833A27D357B96745E0B5C32181424C258B36C227AA1B7CB90A7A3F97D4516A5C8ED8FAD395E9E4B51687DF80C35C63F91AE44A592300D46F840FFD0FF06D21C7F9618DCB71D663ED173BC158A2F94F300C183F1CDD78188ABDF8CEF97DD1B175F58F69AE9E8C22F3815F52107F837905D2E024024150D25B7265D09CC4CF4F21B94705A9EEEED7777D45199F5DC761EE36C8CD112D457D1B683E4E4FEDAE9B43B33E5378ADFB57F89F19B9EB015B23AFEEA61845B7D4B23120B8312F2226BB922964B260B635E965752A3676422CAD0563E74B5981F0DF8B334E698685AAD
indent = 1
tabs = '\t' * indent
from Crypto.Hash import SHA256
from Crypto.Cipher import AES
from Crypto.Util import Counter
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5, PKCS1_PSS
class SectionTableEntry:
def __init__(self, d):
self.mediaOffset = int.from_bytes(d[0x0:0x4], byteorder='little', signed=False)
self.mediaEndOffset = int.from_bytes(d[0x4:0x8], byteorder='little', signed=False)
self.offset = self.mediaOffset * MEDIA_SIZE
self.endOffset = self.mediaEndOffset * MEDIA_SIZE
self.unknown1 = int.from_bytes(d[0x8:0xc], byteorder='little', signed=False)
self.unknown2 = int.from_bytes(d[0xc:0x10], byteorder='little', signed=False)
self.sha1 = None
def GetSectionFilesystem(buffer, cryptoKey):
fsType = buffer[0x3]
if fsType == Fs.Type.Fs.PFS0:
return Fs.Pfs0(buffer, cryptoKey = cryptoKey)
if fsType == Fs.Type.Fs.ROMFS:
return Fs.Rom(buffer, cryptoKey = cryptoKey)
return BaseFs(buffer, cryptoKey = cryptoKey)
class NcaHeader(File):
def __init__(self, path = None, mode = None, cryptoType = -1, cryptoKey = -1, cryptoCounter = -1):
self.signature1 = None
self.signature2 = None
self.magic = None
self.isGameCard = None
self.contentType = None
self.cryptoType = None
self.keyIndex = None
self.size = None
self.titleId = None
self.sdkVersion1 = None
self.sdkVersion2 = None
self.sdkVersion3 = None
self.sdkVersion4 = None
self.cryptoType2 = None
self.sigKeyGen = None
self.rightsId = None
self.titleKeyDec = None
self.masterKey = None
self.sectionTables = []
self.keys = []
super(NcaHeader, self).__init__(path, mode, cryptoType, cryptoKey, cryptoCounter)
def open(self, file = None, mode = 'rb', cryptoType = -1, cryptoKey = -1, cryptoCounter = -1):
super(NcaHeader, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.rewind()
self.signature1 = self.read(0x100)
self.signature2 = self.read(0x100)
self.magic = self.read(0x4)
self.isGameCard = self.readInt8()
self.contentType = self.readInt8()
try:
self.contentType = Fs.Type.Content(self.contentType)
except:
pass
self.cryptoType = self.readInt8()
self.keyIndex = self.readInt8()
self.size = self.readInt64()
self.titleId = hx(self.read(8)[::-1]).decode('utf-8').upper()
self.readInt32() # padding
self.sdkVersion1 = self.readInt8()
self.sdkVersion2 = self.readInt8()
self.sdkVersion3 = self.readInt8()
self.sdkVersion4 = self.readInt8()
self.cryptoType2 = self.readInt8()
self.sigKeyGen = self.readInt8()
self.read(0xE) # padding
self.rightsId = hx(self.read(0x10))
if self.magic not in [b'NCA3', b'NCA2']:
raise Exception('Failed to decrypt NCA header: ' + str(self.magic))
self.sectionHashes = []
for i in range(4):
self.sectionTables.append(SectionTableEntry(self.read(0x10)))
for i in range(4):
self.sectionHashes.append(self.sectionTables[i])
if self.cryptoType > self.cryptoType2:
self.masterKeyRev=self.cryptoType
else:
self.masterKeyRev=self.cryptoType2
self.masterKey = self.masterKeyRev-1
if self.masterKey < 0:
self.masterKey = 0
self.encKeyBlock = self.getKeyBlock()
#for i in range(4):
# offset = i * 0x10
# key = encKeyBlock[offset:offset+0x10]
# Print.info('enc %d: %s' % (i, hx(key)))
if Keys.keyAreaKey(self.masterKey, self.keyIndex):
crypto = aes128.AESECB(Keys.keyAreaKey(self.masterKey, self.keyIndex))
self.keyBlock = crypto.decrypt(self.encKeyBlock)
self.keys = []
for i in range(4):
offset = i * 0x10
key = self.keyBlock[offset:offset+0x10]
#Print.info('dec %d: %s' % (i, hx(key)))
self.keys.append(key)
else:
self.keys = [None, None, None, None, None, None, None]
if self.hasTitleRights():
if self.titleId.upper() in Titles.keys() and Titles.get(self.titleId.upper()).key:
rid=self.titleId.upper()
if not str(self.titleId).endswith('0'):
if str(nca.header.contentType) == 'Content.PROGRAM' or str(nca.header.contentType) == 'Content.DATA':
rid=str(self.titleId.upper())[:-1]+'0'
self.titleKeyDec = Keys.decryptTitleKey(uhx(Titles.get(rid).key), self.masterKey)
else:
pass
#Print.info('could not find title key!')
else:
self.titleKeyDec = self.key()
def key(self):
return self.keys[2]
return self.keys[self.cryptoType]
def hasTitleRights(self):
return self.rightsId != (b'0' * 32)
def getTitleID(self):
self.seek(0x210)
return self.read(8)[::-1]
def setTitleID(self,tid):
self.seek(0x210)
tid=bytes.fromhex(tid)[::-1]
return self.write(tid)
def getKeyBlock(self):
self.seek(0x300)
return self.read(0x40)
def getKB1L(self):
self.seek(0x300)
return self.read(0x10)
def setKeyBlock(self, value):
if len(value) != 0x40:
raise IOError('invalid keyblock size')
self.seek(0x300)
return self.write(value)
def getCryptoType(self):
self.seek(0x206)
return self.readInt8()
def setCryptoType(self, value):
self.seek(0x206)
self.writeInt8(value)
def setgamecard(self, value):
self.seek(0x204)
self.writeInt8(value)
def getgamecard(self):
self.seek(0x204)
return self.readInt8()
def getCryptoType2(self):
self.seek(0x220)
return self.readInt8()
def setCryptoType2(self, value):
self.seek(0x220)
self.writeInt8(value)
def getSigKeyGen(self):
self.seek(0x221)
return self.readInt8()
def setSigKeyGen(self, value):
self.seek(0x221)
self.writeInt8(value)
def getRightsId(self):
self.seek(0x230)
return self.readInt128('big')
def setRightsId(self, value):
self.seek(0x230)
self.writeInt128(value, 'big')
def get_hblock_hash(self):
self.seek(0x280)
return self.read(0x20)
def set_hblock_hash(self, value):
self.seek(0x280)
return self.write(value)
def calculate_hblock_hash(self):
indent = 2
tabs = '\t' * indent
self.seek(0x400)
hblock = self.read(0x200)
sha=sha256(hblock).hexdigest()
sha_hash= bytes.fromhex(sha)
#Print.info(tabs + 'calculated header block hash: ' + str(hx(sha_hash)))
return sha_hash
def get_hblock_version(self):
self.seek(0x400)
return self.read(0x02)
def get_hblock_filesystem(self):
self.seek(0x403)
return self.read(0x01)
def get_hblock_hash_type(self):
self.seek(0x404)
return self.read(0x01)
def get_hblock_crypto_type(self):
self.seek(0x405)
return self.read(0x01)
def get_htable_hash(self):
self.seek(0x408)
return self.read(0x20)
def set_htable_hash(self, value):
self.seek(0x408)
return self.write(value)
def get_hblock_block_size(self):
self.seek(0x428)
return self.readInt32()
def get_hblock_uk1(self):
self.seek(0x42C)
return self.read(0x04)
def get_htable_offset(self):
self.seek(0x430)
return self.readInt64()
def get_htable_size(self):
self.seek(0x438)
return self.readInt64()
def get_pfs0_offset(self):
self.seek(0x440)
return self.readInt64()
def get_pfs0_size(self):
self.seek(0x448)
return self.readInt64()
class Nca(File):
def __init__(self, path = None, mode = 'rb', cryptoType = -1, cryptoKey = -1, cryptoCounter = -1):
self.header = None
self.sectionFilesystems = []
super(Nca, self).__init__(path, mode, cryptoType, cryptoKey, cryptoCounter)
def __iter__(self):
return self.sectionFilesystems.__iter__()
def __getitem__(self, key):
return self.sectionFilesystems[key]
def open(self, file = None, mode = 'rb', cryptoType = -1, cryptoKey = -1, cryptoCounter = -1):
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.header = NcaHeader()
self.partition(0x0, 0xC00, self.header, Fs.Type.Crypto.XTS, uhx(Keys.get('header_key')))
#Print.info('partition complete, seeking')
self.header.seek(0x400)
#Print.info('reading')
#Hex.dump(self.header.read(0x200))
#exit()
for i in range(4):
fs = GetSectionFilesystem(self.header.read(0x200), cryptoKey = self.header.titleKeyDec)
#Print.info('fs type = ' + hex(fs.fsType))
#Print.info('fs crypto = ' + hex(fs.cryptoType))
#Print.info('st end offset = ' + str(self.header.sectionTables[i].endOffset - self.header.sectionTables[i].offset))
#Print.info('fs offset = ' + hex(self.header.sectionTables[i].offset))
#Print.info('fs section start = ' + hex(fs.sectionStart))
#Print.info('titleKey = ' + str(hx(self.header.titleKeyDec)))
try:
self.partition(self.header.sectionTables[i].offset + fs.sectionStart, self.header.sectionTables[i].endOffset - self.header.sectionTables[i].offset, fs, cryptoKey = self.header.titleKeyDec)
except BaseException as e:
pass
#Print.info(e)
#raise
if fs.fsType:
self.sectionFilesystems.append(fs)
self.titleKeyDec = None
self.masterKey = None
def get_hblock(self):
version = self.header.get_hblock_version()
Print.info('version: ' + str(int.from_bytes(version, byteorder='little')))
filesystem = self.header.get_hblock_filesystem()
Print.info('filesystem: ' + str(int.from_bytes(filesystem, byteorder='little')))
hash_type = self.header.get_hblock_hash_type()
Print.info('hash type: ' + str(int.from_bytes(hash_type, byteorder='little')))
crypto_type = self.header.get_hblock_crypto_type()
Print.info('crypto type: ' + str(int.from_bytes(crypto_type, byteorder='little')))
hash_from_htable = self.header.get_htable_hash()
Print.info('hash from hash table: ' + str(hx(hash_from_htable)))
block_size = self.header.get_hblock_block_size()
Print.info('block size in bytes: ' + str(hx(block_size.to_bytes(8, byteorder='big'))))
v_unkn1 = self.header.get_hblock_uk1()
htable_offset = self.header.get_htable_offset()
Print.info('hash table offset: ' + str(hx(htable_offset.to_bytes(8, byteorder='big'))))
htable_size = self.header.get_htable_size()
Print.info('Size of hash-table: ' + str(hx(htable_size.to_bytes(8, byteorder='big'))))
pfs0_offset = self.header.get_pfs0_offset()
Print.info('Pfs0 offset: ' + str(hx(pfs0_offset.to_bytes(8, byteorder='big'))))
pfs0_size = self.header.get_pfs0_size()
Print.info('Pfs0 size: ' + str(hx(pfs0_size.to_bytes(8, byteorder='big'))))
def get_pfs0_hash_data(self):
block_size = self.header.get_hblock_block_size()
#Print.info('block size in bytes: ' + str(hx(block_size.to_bytes(8, byteorder='big'))))
pfs0_size = self.header.get_pfs0_size()
#Print.info('Pfs0 size: ' + str(hx(pfs0_size.to_bytes(8, byteorder='big'))))
multiplier=pfs0_size/block_size
multiplier=math.ceil(multiplier)
#Print.info('Multiplier: ' + str(multiplier))
return pfs0_size,block_size,multiplier
def pfs0_MULT(self):
block_size = self.header.get_hblock_block_size()
#Print.info('block size in bytes: ' + str(hx(block_size.to_bytes(8, byteorder='big'))))
pfs0_size = self.header.get_pfs0_size()
#Print.info('Pfs0 size: ' + str(hx(pfs0_size.to_bytes(8, byteorder='big'))))
multiplier=pfs0_size/block_size
multiplier=math.ceil(multiplier)
#Print.info('Multiplier: ' + str(multiplier))
return multiplier
def get_pfs0_hash(self, file = None, mode = 'rb', cryptoType = -1, cryptoKey = -1, cryptoCounter = -1):
mult=self.pfs0_MULT()
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(0xC00+self.header.get_htable_offset())
hash_from_pfs0=self.read(0x20*mult)
return hash_from_pfs0
def extract(self,ofolder,buffer):
ncaname = str(self._path)[:-4]+'_nca'
ncafolder = os.path.join(ofolder,ncaname)
if not os.path.exists(ncafolder):
os.makedirs(ncafolder)
nca3 = NCA3(open(str(self._path), 'rb'))
nca3.extract_conts(ncafolder, disp=True)
def calc_htable_hash(self):
indent = 2
tabs = '\t' * indent
htable = self.get_pfs0_hash()
sha=sha256(htable).hexdigest()
sha_hash= bytes.fromhex(sha)
#Print.info(tabs + 'calculated table hash: ' + str(hx(sha_hash)))
return sha_hash
def calc_pfs0_hash(self, file = None, mode = 'rb'):
mult=self.pfs0_MULT()
indent = 2
tabs = '\t' * indent
for f in self:
cryptoType2=f.get_cryptoType()
cryptoKey2=f.get_cryptoKey()
cryptoCounter2=f.get_cryptoCounter()
super(Nca, self).open(file, mode, cryptoType2, cryptoKey2, cryptoCounter2)
pfs0_offset = self.header.get_pfs0_offset()
pfs0_size = self.header.get_pfs0_size()
block_size = self.header.get_hblock_block_size()
self.seek(0xC00+self.header.get_htable_offset()+pfs0_offset)
if mult>1:
pfs0=self.read(block_size)
else:
pfs0=self.read(pfs0_size)
sha=sha256(pfs0).hexdigest()
#Print.info('caculated hash from pfs0: ' + sha)
sha_signature = bytes.fromhex(sha)
#Print.info(tabs + 'calculated hash from pfs0: ' + str(hx(sha_signature)))
return sha_signature
def set_pfs0_hash(self,value):
file = None
mode = 'r+b'
for f in self:
cryptoType2=f.get_cryptoType()
cryptoKey2=f.get_cryptoKey()
cryptoCounter2=f.get_cryptoCounter()
super(Nca, self).open(file, mode, cryptoType2, cryptoKey2, cryptoCounter2)
self.seek(0xC00+self.header.get_htable_offset())
self.write(value)
def test(self,titleKeyDec, file = None, mode = 'rb'):
indent = 1
tabs = '\t' * indent
self.header = NcaHeader()
self.partition(0x0, 0xC00, self.header, Fs.Type.Crypto.XTS, uhx(Keys.get('header_key')))
Print.info('partition complete, seeking')
self.rewind()
self.header.seek(0x400)
Print.info('reading')
Hex.dump(self.header.read(0x200))
#exit()
for i in range(4):
fs = GetSectionFilesystem(self.header.read(0x200), cryptoKey = titleKeyDec)
Print.info('fs type = ' + hex(fs.fsType))
Print.info('fs crypto = ' + hex(fs.cryptoType))
Print.info('st end offset = ' + str(self.header.sectionTables[i].endOffset - self.header.sectionTables[i].offset))
Print.info('fs offset = ' + hex(self.header.sectionTables[i].offset))
Print.info('fs section start = ' + hex(fs.sectionStart))
Print.info('titleKey = ' + str(hx(titleKeyDec)))
try:
self.partition(self.header.sectionTables[i].offset + fs.sectionStart, self.header.sectionTables[i].endOffset - self.header.sectionTables[i].offset, fs, cryptoKey = titleKeyDec)
except BaseException as e:
pass
#Print.info(e)
#raise
if fs.fsType:
self.sectionFilesystems.append(fs)
for f in self:
print(type(f))
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
for g in f:
print(type(g))
if type(g) == File:
print(str(g._path))
print(g.read(0x10))
def pr_noenc_check(self, file = None, mode = 'rb'):
indent = 1
tabs = '\t' * indent
check = False
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
for g in f:
if type(g) == File:
if (str(g._path)) == 'main.npdm':
check = True
break
if check==False:
for f in self:
if f.fsType == Type.Fs.ROMFS and f.cryptoType == Type.Crypto.CTR:
if f.magic==b'IVFC':
check=True
return check
def pr_noenc_check_dlc(self, file = None, mode = 'rb'):
crypto1=self.header.getCryptoType()
crypto2=self.header.getCryptoType2()
if crypto1 == 2:
if crypto1 > crypto2:
masterKeyRev=crypto1
else:
masterKeyRev=crypto2
else:
masterKeyRev=crypto2
decKey = Keys.decryptTitleKey(self.header.titleKeyDec, Keys.getMasterKeyIndex(masterKeyRev))
for f in self.sectionFilesystems:
#print(f.fsType);print(f.cryptoType)
if f.fsType == Type.Fs.ROMFS and f.cryptoType == Type.Crypto.CTR:
ncaHeader = NcaHeader()
self.header.rewind()
ncaHeader = self.header.read(0x400)
#Hex.dump(ncaHeader)
pfs0=f
#Hex.dump(pfs0.read())
sectionHeaderBlock = f.buffer
levelOffset = int.from_bytes(sectionHeaderBlock[0x18:0x20], byteorder='little', signed=False)
levelSize = int.from_bytes(sectionHeaderBlock[0x20:0x28], byteorder='little', signed=False)
pfs0Header = pfs0.read(levelSize)
if sectionHeaderBlock[8:12] == b'IVFC':
data = pfs0Header;
#Hex.dump(pfs0Header)
if hx(sectionHeaderBlock[0xc8:0xc8+0x20]).decode('utf-8') == str(sha256(data).hexdigest()):
return True
else:
return False
else:
data = pfs0Header;
#Hex.dump(pfs0Header)
magic = pfs0Header[0:4]
if magic != b'PFS0':
return False
else:
return True
def get_cnmt_titleid(self, file = None, mode = 'rb'):
indent = 1
tabs = '\t' * indent
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset+0x8)
pfs0_table_size=self.readInt32()
cmt_offset=pfs0_offset+0x28+pfs0_table_size
self.seek(cmt_offset)
titleid=self.read(8)[::-1]
return titleid
def get_cnmt_updateid(self, file = None, mode = 'rb'):
indent = 1
tabs = '\t' * indent
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset+0x8)
pfs0_table_size=self.readInt32()
cmt_offset=pfs0_offset+0x28+pfs0_table_size
self.seek(cmt_offset+0x20)
updtitleid=self.read(8)[::-1]
return updtitleid
def write_cnmt_titleid(self, value,file = None, mode = 'rb'):
indent = 1
tabs = '\t' * indent
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset+0x8)
pfs0_table_size=self.readInt32()
cmt_offset=pfs0_offset+0x28+pfs0_table_size
self.seek(cmt_offset)
titleid=bytes.fromhex(value)[::-1]
self.write(titleid)
return(titleid)
def exchange_idoffset(self,old,new,file = None, mode = 'rb'):
indent = 1
tabs = '\t' * indent
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset+0x8)
pfs0_table_size=self.readInt32()
cmt_offset=pfs0_offset+0x28+pfs0_table_size
self.seek(cmt_offset+0xE)
offset=self.readInt16()
content_entries=self.readInt16()
self.seek(cmt_offset+offset+0x20)
for i in range(content_entries):
cc=i+1
vhash = self.read(0x20)
NcaId = self.read(0x10)
size = self.read(0x6)
ncatype = self.read(0x1)
IdOffset = self.read(0x1)
IdOffset=int.from_bytes(IdOffset, byteorder='little', signed=True)
print(IdOffset)
# print(self.tell())
if IdOffset==int(old):
print(IdOffset)
self.seek(self.tell()-0x1)
try:
self.write(bytes.fromhex(str(new))[::-1])
except:
self.write(bytes.fromhex('0'+str(new))[::-1])
elif IdOffset==int(new):
self.seek(self.tell()-0x1)
print(IdOffset)
try:
self.write(bytes.fromhex(str(old))[::-1])
except:
self.write(bytes.fromhex('0'+str(old))[::-1])
def write_cnmt_updid(self,value,file = None, mode = 'rb'):
indent = 1
tabs = '\t' * indent
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset+0x8)
pfs0_table_size=self.readInt32()
cmt_offset=pfs0_offset+0x28+pfs0_table_size
self.seek(cmt_offset+0x20)
updid=bytes.fromhex(value)[::-1]
self.write(updid)
return(updid)
def get_req_system(self, file = None, mode = 'rb'):
indent = 1
tabs = '\t' * indent
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset+0x8)
pfs0_table_size=self.readInt32()
cmt_offset=pfs0_offset+0x28+pfs0_table_size
self.seek(cmt_offset+0x28)
min_sversion=self.readInt32()
#Print.info(tabs + 'RequiredSystemVersion = ' + str(min_sversion))
return min_sversion
def write_req_system(self, verNumber):
indent = 1
tabs = '\t' * indent
file = None
mode = 'r+b'
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset+0x8)
pfs0_table_size=self.readInt32()
cmt_offset=pfs0_offset+0x28+pfs0_table_size
self.seek(cmt_offset+0x28)
min_sversion=self.readInt32()
#Print.info('Original RequiredSystemVersion = ' + str(min_sversion))
self.seek(cmt_offset+0x28)
self.writeInt32(verNumber)
self.seek(cmt_offset+0x28)
min_sversion=self.readInt32()
#Print.info(tabs + 'New RequiredSystemVersion = ' + str(min_sversion))
return min_sversion
def write_version(self, verNumber):
indent = 1
tabs = '\t' * indent
file = None
mode = 'r+b'
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset+0x8)
pfs0_table_size=self.readInt32()
cmt_offset=pfs0_offset+0x28+pfs0_table_size
self.seek(cmt_offset)
titleid=self.readInt64()
tnumber = verNumber.to_bytes(0x04, byteorder='little')
titleversion = self.write(tnumber)
self.seek(cmt_offset)
titleid=self.readInt64()
titleversion = self.read(0x4)
Print.info('version = ' + str(int.from_bytes(titleversion, byteorder='little')))
return titleversion
def removeTitleRightsnca(self, masterKeyRev, titleKeyDec):
Print.info('titleKeyDec =\t' + str(hx(titleKeyDec)))
Print.info('masterKeyRev =\t' + hex(masterKeyRev))
Print.info('writing masterKeyRev for %s, %d' % (str(self._path), masterKeyRev))
crypto = aes128.AESECB(Keys.keyAreaKey(Keys.getMasterKeyIndex(masterKeyRev), self.header.keyIndex))
encKeyBlock = crypto.encrypt(titleKeyDec * 4)
self.header.setRightsId(0)
self.header.setKeyBlock(encKeyBlock)
Hex.dump(encKeyBlock)
def printtitleId(self, indent = 0):
Print.info(str(self.header.titleId))
def print_nca_type(self, indent = 0):
Print.info(str(self.header.contentType))
def cardstate(self, indent = 0):
Print.info(hex(self.header.isGameCard))
def read_pfs0_header(self, file = None, mode = 'rb'):
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset)
pfs0_magic = self.read(4)
pfs0_nfiles=self.readInt32()
pfs0_table_size=self.readInt32()
pfs0_reserved=self.read(0x4)
Print.info('PFS0 Magic = ' + str(pfs0_magic))
Print.info('PFS0 number of files = ' + str(pfs0_nfiles))
Print.info('PFS0 string table size = ' + str(hx(pfs0_table_size.to_bytes(4, byteorder='big'))))
for i in range(pfs0_nfiles):
Print.info('........................')
Print.info('PFS0 Content number ' + str(i+1))
Print.info('........................')
f_offset = self.readInt64()
Print.info('offset = ' + str(hx(f_offset.to_bytes(8, byteorder='big'))))
f_size = self.readInt32()
Print.info('Size =\t' + str(hx(pfs0_table_size.to_bytes(4, byteorder='big'))))
filename_offset = self.readInt32()
Print.info('offset of filename = ' + str(hx(f_offset.to_bytes(8, byteorder='big'))))
f_reserved= self.read(0x4)
def read_cnmt(self, file = None, mode = 'rb'):
feed=''
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
pfs0_size = self.header.get_pfs0_size()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset+0x8)
pfs0_table_size=self.readInt32()
cmt_offset=pfs0_offset+0x28+pfs0_table_size
self.seek(cmt_offset)
titleid=self.readInt64()
titleversion = self.read(0x4)
type_n = self.read(0x1)
self.seek(cmt_offset+0xE)
offset=self.readInt16()
content_entries=self.readInt16()
meta_entries=self.readInt16()
self.seek(cmt_offset+0x20)
original_ID=self.readInt64()
self.seek(cmt_offset+0x28)
min_sversion=self.readInt32()
length_of_emeta=self.readInt32()
basename=str(os.path.basename(os.path.abspath(self._path)))
message='...........................................';print(message);feed+=message+'\n'
message='Reading: ' + str(basename);print(message);feed+=message+'\n'
message='...........................................';print(message);feed+=message+'\n'
message='Titleid = ' + (str(hx(titleid.to_bytes(8, byteorder='big')))[2:-1]).upper();print(message);feed+=message+'\n'
message='Version = ' + str(int.from_bytes(titleversion, byteorder='little'));print(message);feed+=message+'\n'
message='Table offset = '+ str(hx((offset+0x20).to_bytes(2, byteorder='big')));print(message);feed+=message+'\n'
message='Number of content = '+ str(content_entries);print(message);feed+=message+'\n'
message='Number of meta entries = '+ str(meta_entries);print(message);feed+=message+'\n'
message='Application id\Patch id = ' + (str(hx(original_ID.to_bytes(8, byteorder='big')))[2:-1]).upper();print(message);feed+=message+'\n'
message='RequiredVersion = ' + str(min_sversion);print(message);feed+=message+'\n'
message='Length of exmeta = ' + str(length_of_emeta);print(message);feed+=message+'\n'
self.seek(cmt_offset+offset+0x20)
for i in range(content_entries):
message='........................';print(message);feed+=message+'\n'
message='Content number ' + str(i+1);print(message);feed+=message+'\n'
message='........................';print(message);feed+=message+'\n'
vhash = self.read(0x20)
message='Hash =\t' + str(hx(vhash));print(message);feed+=message+'\n'
NcaId = self.read(0x10)
message='NcaId =\t' + str(hx(NcaId));print(message);feed+=message+'\n'
size = self.read(0x6)
message='Size =\t' + str(int.from_bytes(size, byteorder='little', signed=True));print(message);feed+=message+'\n'
ncatype = self.read(0x1)
message='Ncatype = ' + str(int.from_bytes(ncatype, byteorder='little', signed=True));print(message);feed+=message+'\n'
IdOffset = self.read(0x1)
message='IdOffset = ' + str(int.from_bytes(IdOffset, byteorder='little', signed=True));print(message);feed+=message+'\n'
self.seek(pfs0_offset+pfs0_size-0x20)
digest = self.read(0x20)
message='\ndigest= '+str(hx(digest))+'\n';print(message);feed+=message+'\n'
self.seek(cmt_offset+offset+0x20+content_entries*0x38)
if length_of_emeta>0:
message='----------------';print(message);feed+=message+'\n'
message='Extended meta';print(message);feed+=message+'\n'
message='----------------';print(message);feed+=message+'\n'
num_prev_cnmt=self.read(0x4)
num_prev_delta=self.read(0x4)
num_delta_info=self.read(0x4)
num_delta_application =self.read(0x4)
num_previous_content=self.read(0x4)
num_delta_content=self.read(0x4)
self.read(0x4)
message='Number of previous cnmt entries = ' + str(int.from_bytes(num_prev_cnmt, byteorder='little'));print(message);feed+=message+'\n'
message='Number of previous delta entries = ' + str(int.from_bytes(num_prev_delta, byteorder='little'));print(message);feed+=message+'\n'
message='Number of delta info entries = ' + str(int.from_bytes(num_delta_info, byteorder='little'));print(message);feed+=message+'\n'
message='Number of previous content entries = ' + str(int.from_bytes(num_previous_content, byteorder='little'));print(message);feed+=message+'\n'
message='Number of delta content entries = ' + str(int.from_bytes(num_delta_content, byteorder='little'));print(message);feed+=message+'\n'
for i in range(int.from_bytes(num_prev_cnmt, byteorder='little')):
message='...........................................';print(message);feed+=message+'\n'
message='Previous cnmt records: '+ str(i+1);print(message);feed+=message+'\n'
message='...........................................';print(message);feed+=message+'\n'
titleid=self.readInt64()
titleversion = self.read(0x4)
type_n = self.read(0x1)
unknown1=self.read(0x3)
vhash = self.read(0x20)
unknown2=self.read(0x2)
unknown3=self.read(0x2)
unknown4=self.read(0x4)
message='Titleid = ' + str(hx(titleid.to_bytes(8, byteorder='big')));print(message);feed+=message+'\n'
message='Version = ' + str(int.from_bytes(titleversion, byteorder='little'));print(message);feed+=message+'\n'
message='Type number = ' + str(hx(type_n));print(message);feed+=message+'\n'
message='Hash =\t' + str(hx(vhash));print(message);feed+=message+'\n'
message='Content nca number = ' + str(int.from_bytes(unknown2, byteorder='little'));print(message);feed+=message+'\n'
for i in range(int.from_bytes(num_prev_delta, byteorder='little')):
message='...........................................';print(message);feed+=message+'\n'
message='Previous delta records: '+ str(i+1);print(message);feed+=message+'\n'
message='...........................................';print(message);feed+=message+'\n'
oldtitleid=self.readInt64()
newtitleid=self.readInt64()
oldtitleversion = self.read(0x4)
newtitleversion = self.read(0x4)
size = self.read(0x8)
unknown1=self.read(0x8)
message='Old titleid = ' + str(hx(oldtitleid.to_bytes(8, byteorder='big')));print(message);feed+=message+'\n'
message='New titleid = ' + str(hx(newtitleid.to_bytes(8, byteorder='big')));print(message);feed+=message+'\n'
message='Old version = ' + str(int.from_bytes(oldtitleversion, byteorder='little'));print(message);feed+=message+'\n'
message='New version = ' + str(int.from_bytes(newtitleversion, byteorder='little'));print(message);feed+=message+'\n'
message='Size = ' + str(int.from_bytes(size, byteorder='little', signed=True));print(message);feed+=message+'\n'
#Print.info('unknown1 = ' + str(int.from_bytes(unknown1, byteorder='little')))
for i in range(int.from_bytes(num_delta_info, byteorder='little')):
message='...........................................';print(message);feed+=message+'\n'
message='Delta info: '+ str(i+1);print(message);feed+=message+'\n'
message='...........................................';print(message);feed+=message+'\n'
oldtitleid=self.readInt64()
newtitleid=self.readInt64()
oldtitleversion = self.read(0x4)
newtitleversion = self.read(0x4)
index1=self.readInt64()
index2=self.readInt64()
message='Old titleid = ' + str(hx(oldtitleid.to_bytes(8, byteorder='big')));print(message);feed+=message+'\n'
message='New titleid = ' + str(hx(newtitleid.to_bytes(8, byteorder='big')));print(message);feed+=message+'\n'
message='Old version = ' + str(int.from_bytes(oldtitleversion, byteorder='little'));print(message);feed+=message+'\n'
message='New version = ' + str(int.from_bytes(newtitleversion, byteorder='little'));print(message);feed+=message+'\n'
message='Index1 = ' + str(hx(index1.to_bytes(8, byteorder='big')));print(message);feed+=message+'\n'
message='Index2 = ' + str(hx(index2.to_bytes(8, byteorder='big')));print(message);feed+=message+'\n'
#Print.info('unknown1 = ' + str(int.from_bytes(unknown1, byteorder='little')))
for i in range(int.from_bytes(num_delta_application, byteorder='little')):
message='...........................................';print(message);feed+=message+'\n'
message='Delta application info: '+ str(i+1);print(message);feed+=message+'\n'
message='...........................................';print(message);feed+=message+'\n'
OldNcaId = self.read(0x10)
NewNcaId = self.read(0x10)
old_size = self.read(0x6)
up2bytes = self.read(0x2)
low4bytes = self.read(0x4)
unknown1 = self.read(0x2)
ncatype = self.read(0x1)
installable = self.read(0x1)
unknown2 = self.read(0x4)
message='OldNcaId = ' + str(hx(OldNcaId));print(message);feed+=message+'\n'
message='NewNcaId = ' + str(hx(NewNcaId));print(message);feed+=message+'\n'
message='Old size = ' + str(int.from_bytes(old_size, byteorder='little', signed=True));print(message);feed+=message+'\n'
message='Unknown1 = ' + str(int.from_bytes(unknown1, byteorder='little'));print(message);feed+=message+'\n'
message='Ncatype = ' + str(int.from_bytes(ncatype, byteorder='little', signed=True));print(message);feed+=message+'\n'
message='Installable = ' + str(int.from_bytes(installable, byteorder='little'));print(message);feed+=message+'\n'
message='Upper 2 bytes of the new size=' + str(hx(up2bytes));print(message);feed+=message+'\n'
message='Lower 4 bytes of the new size=' + str(hx(low4bytes));print(message);feed+=message+'\n'
#Print.info('unknown2 =\t' + str(int.from_bytes(unknown2, byteorder='little')))
for i in range(int.from_bytes(num_previous_content, byteorder='little')):
message='...........................................';print(message);feed+=message+'\n'
message='Previous content records: '+ str(i+1);print(message);feed+=message+'\n'
message='...........................................';print(message);feed+=message+'\n'
NcaId = self.read(0x10)
size = self.read(0x6)
ncatype = self.read(0x1)
unknown1 = self.read(0x1)
message='NcaId = '+ str(hx(NcaId));print(message);feed+=message+'\n'
message='Size = '+ str(int.from_bytes(size, byteorder='little', signed=True));print(message);feed+=message+'\n'
message='Ncatype = '+ str(int.from_bytes(ncatype, byteorder='little', signed=True));print(message);feed+=message+'\n'
#Print.info('unknown1 = '+ str(int.from_bytes(unknown1, byteorder='little')))
for i in range(int.from_bytes(num_delta_content, byteorder='little')):
message='...........................................';print(message);feed+=message+'\n'
message='Delta content entry ' + str(i+1);print(message);feed+=message+'\n'
message='...........................................';print(message);feed+=message+'\n'
vhash = self.read(0x20)
message='Hash =\t' + str(hx(vhash));print(message);feed+=message+'\n'
NcaId = self.read(0x10)
message='NcaId =\t' + str(hx(NcaId));print(message);feed+=message+'\n'
size = self.read(0x6)
message='Size =\t' + str(int.from_bytes(size, byteorder='little', signed=True));print(message);feed+=message+'\n'
ncatype = self.read(0x1)
message='Ncatype = ' + str(int.from_bytes(ncatype, byteorder='little', signed=True));print(message);feed+=message+'\n'
unknown = self.read(0x1)
return feed
def xml_gen(self,ofolder,nsha):
file = None
mode = 'rb'
crypto1=self.header.getCryptoType()
crypto2=self.header.getCryptoType2()
if crypto2>crypto1:
keygeneration=crypto2
if crypto2<=crypto1:
keygeneration=crypto1
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
pfs0_size = self.header.get_pfs0_size()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset+0x8)
pfs0_table_size=self.readInt32()
cmt_offset=pfs0_offset+0x28+pfs0_table_size
self.seek(cmt_offset)
titleid=self.readInt64()
titleversion = self.read(0x4)
type_n = self.read(0x1)
self.seek(cmt_offset+0xE)
offset=self.readInt16()
content_entries=self.readInt16()
meta_entries=self.readInt16()
self.seek(cmt_offset+0x18)
RDSV=self.readInt64()
self.seek(cmt_offset+0x20)
original_ID=self.readInt64()
self.seek(cmt_offset+0x28)
min_sversion=self.readInt32()
length_of_emeta=self.readInt32()
self.seek(cmt_offset+offset+0x20)
if str(hx(type_n)) == "b'1'":
type='SystemProgram'
if str(hx(type_n)) == "b'2'":
type='SystemData'
if str(hx(type_n)) == "b'3'":
type='SystemUpdate'
if str(hx(type_n)) == "b'4'":
type='BootImagePackage'
if str(hx(type_n)) == "b'5'":
type='BootImagePackageSafe'
if str(hx(type_n)) == "b'80'":
type='Application'
if str(hx(type_n)) == "b'81'":
type='Patch'
if str(hx(type_n)) == "b'82'":
type='AddOnContent'
if str(hx(type_n)) == "b'83'":
type='Delta'
titleid=str(hx(titleid.to_bytes(8, byteorder='big')))
titleid='0x'+titleid[2:-1]
version = str(int.from_bytes(titleversion, byteorder='little'))
RDSV=str(RDSV)
xmlname = str(self._path)
xmlname = xmlname[:-4] + '.xml'
outfolder = str(ofolder)+'\\'
textpath = os.path.join(outfolder, xmlname)
with open(textpath, 'w+') as tfile:
tfile.write('<?xml version="1.0" encoding="utf-8"?>' + '\n')
tfile.write('<ContentMeta>' + '\n')
tfile.write(' <Type>'+ type +'</Type>' + '\n')
tfile.write(' <Id>'+ titleid +'</Id>' + '\n')
tfile.write(' <Version>'+ version +'</Version>' + '\n')
tfile.write(' <RequiredDownloadSystemVersion>'+ RDSV +'</RequiredDownloadSystemVersion>' + '\n')
for i in range(content_entries):
vhash = self.read(0x20)
NcaId = self.read(0x10)
size = self.read(0x6)
ncatype = self.readInt8()
unknown = self.read(0x1)
if ncatype==0:
type='Meta'
if str(ncatype)=="1":
type='Program'
if ncatype==2:
type='Data'
if ncatype==3:
type='Control'
if ncatype==4:
type='HtmlDocument'
if ncatype==5:
type='LegalInformation'
if ncatype==6:
type='DeltaFragment'
NcaId=str(hx(NcaId))
NcaId=NcaId[2:-1]
size=str(int.from_bytes(size, byteorder='little'))
vhash=str(hx(vhash))
vhash=vhash[2:-1]
with open(textpath, 'a') as tfile:
tfile.write(' <Content>' + '\n')
tfile.write(' <Type>'+ type +'</Type>' + '\n')
tfile.write(' <Id>'+ NcaId +'</Id>' + '\n')
tfile.write(' <Size>'+ size +'</Size>' + '\n')
tfile.write(' <Hash>'+ vhash +'</Hash>' + '\n')
tfile.write(' <KeyGeneration>'+ str(self.header.cryptoType2) +'</KeyGeneration>' + '\n')
tfile.write(' </Content>' + '\n')
self.seek(pfs0_offset+pfs0_size-0x20)
digest = str(hx(self.read(0x20)))
digest=digest[2:-1]
original_ID=str(hx(original_ID.to_bytes(8, byteorder='big')))
original_ID='0x'+original_ID[2:-1]
metaname=os.path.basename(os.path.abspath(self._path))
metaname = metaname[:-9]
size=str(os.path.getsize(self._path))
with open(textpath, 'a') as tfile:
tfile.write(' <Content>' + '\n')
tfile.write(' <Type>'+ 'Meta' +'</Type>' + '\n')
tfile.write(' <Id>'+ metaname +'</Id>' + '\n')
tfile.write(' <Size>'+ size +'</Size>' + '\n')
tfile.write(' <Hash>'+ nsha +'</Hash>' + '\n')
tfile.write(' <KeyGeneration>'+ str(keygeneration) +'</KeyGeneration>' + '\n')
tfile.write(' </Content>' + '\n')
tfile.write(' <Digest>'+ digest +'</Digest>' + '\n')
tfile.write(' <KeyGenerationMin>'+ str(keygeneration) +'</KeyGenerationMin>' + '\n')
tfile.write(' <RequiredSystemVersion>'+ str(min_sversion) +'</RequiredSystemVersion>' + '\n')
tfile.write(' <OriginalId>'+ original_ID +'</OriginalId>' + '\n')
tfile.write('</ContentMeta>')
return textpath
def xml_gen_mod(self,ofolder,nsha,keygeneration):
file = None
mode = 'rb'
crypto1=self.header.getCryptoType()
crypto2=self.header.getCryptoType2()
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
pfs0_size = self.header.get_pfs0_size()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset+0x8)
pfs0_table_size=self.readInt32()
cmt_offset=pfs0_offset+0x28+pfs0_table_size
self.seek(cmt_offset)
titleid=self.readInt64()
titleversion = self.read(0x4)
type_n = self.read(0x1)
self.seek(cmt_offset+0xE)
offset=self.readInt16()
content_entries=self.readInt16()
meta_entries=self.readInt16()
self.seek(cmt_offset+0x18)
RDSV=self.readInt64()
self.seek(cmt_offset+0x20)
original_ID=self.readInt64()
self.seek(cmt_offset+0x28)
min_sversion=self.readInt32()
length_of_emeta=self.readInt32()
self.seek(cmt_offset+offset+0x20)
if str(hx(type_n)) == "b'1'":
type='SystemProgram'
if str(hx(type_n)) == "b'2'":
type='SystemData'
if str(hx(type_n)) == "b'3'":
type='SystemUpdate'
if str(hx(type_n)) == "b'4'":
type='BootImagePackage'
if str(hx(type_n)) == "b'5'":
type='BootImagePackageSafe'
if str(hx(type_n)) == "b'80'":
type='Application'
if str(hx(type_n)) == "b'81'":
type='Patch'
if str(hx(type_n)) == "b'82'":
type='AddOnContent'
if str(hx(type_n)) == "b'83'":
type='Delta'
titleid=str(hx(titleid.to_bytes(8, byteorder='big')))
titleid='0x'+titleid[2:-1]
version = str(int.from_bytes(titleversion, byteorder='little'))
RDSV=str(RDSV)
xmlname = str(self._path)
xmlname = xmlname[:-4] + '.xml'
outfolder = str(ofolder)+'\\'
textpath = os.path.join(outfolder, xmlname)
with open(textpath, 'w+') as tfile:
tfile.write('<?xml version="1.0" encoding="utf-8"?>' + '\n')
tfile.write('<ContentMeta>' + '\n')
tfile.write(' <Type>'+ type +'</Type>' + '\n')
tfile.write(' <Id>'+ titleid +'</Id>' + '\n')
tfile.write(' <Version>'+ version +'</Version>' + '\n')
tfile.write(' <RequiredDownloadSystemVersion>'+ RDSV +'</RequiredDownloadSystemVersion>' + '\n')
for i in range(content_entries):
vhash = self.read(0x20)
NcaId = self.read(0x10)
size = self.read(0x6)
ncatype = self.readInt8()
unknown = self.read(0x1)
if ncatype==0:
type='Meta'
if str(ncatype)=="1":
type='Program'
if ncatype==2:
type='Data'
if ncatype==3:
type='Control'
if ncatype==4:
type='HtmlDocument'
if ncatype==5:
type='LegalInformation'
if ncatype==6:
type='DeltaFragment'
NcaId=str(hx(NcaId))
NcaId=NcaId[2:-1]
size=str(int.from_bytes(size, byteorder='little'))
vhash=str(hx(vhash))
vhash=vhash[2:-1]
with open(textpath, 'a') as tfile:
tfile.write(' <Content>' + '\n')
tfile.write(' <Type>'+ type +'</Type>' + '\n')
tfile.write(' <Id>'+ NcaId +'</Id>' + '\n')
tfile.write(' <Size>'+ size +'</Size>' + '\n')
tfile.write(' <Hash>'+ vhash +'</Hash>' + '\n')
tfile.write(' <KeyGeneration>'+ str(self.header.cryptoType2) +'</KeyGeneration>' + '\n')
tfile.write(' </Content>' + '\n')
self.seek(pfs0_offset+pfs0_size-0x20)
digest = str(hx(self.read(0x20)))
digest=digest[2:-1]
original_ID=str(hx(original_ID.to_bytes(8, byteorder='big')))
original_ID='0x'+original_ID[2:-1]
metaname=os.path.basename(os.path.abspath(self._path))
metaname = metaname[:-9]
size=str(os.path.getsize(self._path))
with open(textpath, 'a') as tfile:
tfile.write(' <Content>' + '\n')
tfile.write(' <Type>'+ 'Meta' +'</Type>' + '\n')
tfile.write(' <Id>'+ metaname +'</Id>' + '\n')
tfile.write(' <Size>'+ size +'</Size>' + '\n')
tfile.write(' <Hash>'+ nsha +'</Hash>' + '\n')
tfile.write(' <KeyGeneration>'+ str(keygeneration) +'</KeyGeneration>' + '\n')
tfile.write(' </Content>' + '\n')
tfile.write(' <Digest>'+ digest +'</Digest>' + '\n')
tfile.write(' <KeyGeneration>'+ str(keygeneration) +'</KeyGeneration>' + '\n')
min_sversion=sq_tools.getMinRSV(keygeneration,min_sversion)
tfile.write(' <RequiredSystemVersion>'+ str(min_sversion) +'</RequiredSystemVersion>' + '\n')
tfile.write(' <OriginalId>'+ original_ID +'</OriginalId>' + '\n')
tfile.write('</ContentMeta>')
return textpath
def ret_xml(self):
file = None
mode = 'rb'
crypto1=self.header.getCryptoType()
crypto2=self.header.getCryptoType2()
if crypto2>crypto1:
keygeneration=crypto2
if crypto2<=crypto1:
keygeneration=crypto1
xmlname = str(self._path)
xmlname = xmlname[:-4] + '.xml'
metaname=str(self._path)
metaname = metaname[:-9]
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
pfs0_size = self.header.get_pfs0_size()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset+0x8)
pfs0_table_size=self.readInt32()
cmt_offset=pfs0_offset+0x28+pfs0_table_size
self.seek(cmt_offset)
titleid=self.readInt64()
titleversion = self.read(0x4)
type_n = self.read(0x1)
self.seek(cmt_offset+0xE)
offset=self.readInt16()
content_entries=self.readInt16()
meta_entries=self.readInt16()
self.seek(cmt_offset+0x18)
RDSV=self.readInt64()
self.seek(cmt_offset+0x20)
original_ID=self.readInt64()
self.seek(cmt_offset+0x28)
min_sversion=self.readInt32()
length_of_emeta=self.readInt32()
self.seek(cmt_offset+offset+0x20)
if str(hx(type_n)) == "b'1'":
type='SystemProgram'
if str(hx(type_n)) == "b'2'":
type='SystemData'
if str(hx(type_n)) == "b'3'":
type='SystemUpdate'
if str(hx(type_n)) == "b'4'":
type='BootImagePackage'
if str(hx(type_n)) == "b'5'":
type='BootImagePackageSafe'
if str(hx(type_n)) == "b'80'":
type='Application'
if str(hx(type_n)) == "b'81'":
type='Patch'
if str(hx(type_n)) == "b'82'":
type='AddOnContent'
if str(hx(type_n)) == "b'83'":
type='Delta'
titleid=str(hx(titleid.to_bytes(8, byteorder='big')))
titleid='0x'+titleid[2:-1]
version = str(int.from_bytes(titleversion, byteorder='little'))
RDSV=str(RDSV)
xml_string = '<?xml version="1.0" encoding="utf-8"?>' + '\n'
xml_string += '<ContentMeta>' + '\n'
xml_string +=' <Type>'+ type +'</Type>' + '\n'
xml_string +=(' <Id>'+ titleid +'</Id>' + '\n')
xml_string +=(' <Version>'+ version +'</Version>' + '\n')
xml_string +=(' <RequiredDownloadSystemVersion>'+ RDSV +'</RequiredDownloadSystemVersion>' + '\n')
for i in range(content_entries):
vhash = self.read(0x20)
NcaId = self.read(0x10)
size = self.read(0x6)
ncatype = self.readInt8()
unknown = self.read(0x1)
if ncatype==0:
type='Meta'
if str(ncatype)=="1":
type='Program'
if ncatype==2:
type='Data'
if ncatype==3:
type='Control'
if ncatype==4:
type='HtmlDocument'
if ncatype==5:
type='LegalInformation'
if ncatype==6:
type='DeltaFragment'
NcaId=str(hx(NcaId))
NcaId=NcaId[2:-1]
size=str(int.from_bytes(size, byteorder='little'))
vhash=str(hx(vhash))
vhash=vhash[2:-1]
xml_string +=(' <Content>' + '\n')
xml_string +=(' <Type>'+ type +'</Type>' + '\n')
xml_string +=(' <Id>'+ NcaId +'</Id>' + '\n')
xml_string +=(' <Size>'+ size +'</Size>' + '\n')
xml_string +=(' <Hash>'+ vhash +'</Hash>' + '\n')
xml_string +=(' <KeyGeneration>'+ str(keygeneration) +'</KeyGeneration>' + '\n')
xml_string +=(' </Content>' + '\n')
self.seek(pfs0_offset+pfs0_size-0x20)
digest = str(hx(self.read(0x20)))
digest=digest[2:-1]
original_ID=str(hx(original_ID.to_bytes(8, byteorder='big')))
original_ID='0x'+original_ID[2:-1]
size=str(os.path.getsize(self._path))
xml_string +=(' <Content>' + '\n')
xml_string +=(' <Type>'+ 'Meta' +'</Type>' + '\n')
xml_string +=(' <Id>'+ metaname +'</Id>' + '\n')
xml_string +=(' <Size>'+ size +'</Size>' + '\n')
xml_string +=(' <Hash>'+ nsha +'</Hash>' + '\n')
xml_string +=(' <KeyGeneration>'+ str(keygeneration) +'</KeyGeneration>' + '\n')
xml_string +=(' </Content>' + '\n')
xml_string +=(' <Digest>'+ digest +'</Digest>' + '\n')
xml_string +=(' <KeyGeneration>'+ str(keygeneration) +'</KeyGeneration>' + '\n')
xml_string +=(' <RequiredSystemVersion>'+ str(min_sversion) +'</RequiredSystemVersion>' + '\n')
xml_string +=(' <OriginalId>'+ original_ID +'</OriginalId>' + '\n')
xml_string +=('</ContentMeta>')
#print(xml_string)
size=len(xml_string)
return xmlname,size,xml_string
def printInfo(self, indent = 0):
tabs = '\t' * indent
Print.info('\n%sNCA Archive\n' % (tabs))
super(Nca, self).printInfo(indent)
# Print.info(tabs + 'Header Block Hash: ' + str(hx(self.header.get_hblock_hash())))
# self.header.calculate_hblock_hash()
# self.get_hblock()
# self.calc_htable_hash()
# Print.info('hash from pfs0: ' + str(hx(self.get_pfs0_hash())))
# self.calc_pfs0_hash()
# self.get_req_system()
# Print.info(tabs + 'RSA-2048 signature 1 = ' + str(hx(self.header.signature1)))
# Print.info(tabs + 'RSA-2048 signature 2 = ' + str(hx(self.header.signature2)))
Print.info(tabs + 'magic = ' + str(self.header.magic))
Print.info(tabs + 'titleId = ' + str(self.header.titleId))
Print.info(tabs + 'rightsId = ' + str(self.header.rightsId))
Print.info(tabs + 'isGameCard = ' + hex(self.header.isGameCard))
Print.info(tabs + 'contentType = ' + str(self.header.contentType))
#Print.info(tabs + 'cryptoType = ' + str(self.header.getCryptoType()))
Print.info(tabs + 'SDK version = ' + self.get_sdkversion())
Print.info(tabs + 'Size: ' + str(self.header.size))
Print.info(tabs + 'Crypto-Type1: ' + str(self.header.cryptoType))
Print.info(tabs + 'Crypto-Type2: ' + str(self.header.cryptoType2))
Print.info(tabs + 'key Index: ' + str(self.header.keyIndex))
#Print.info(tabs + 'key Block: ' + str(self.header.getKeyBlock()))
for key in self.header.keys:
Print.info(tabs + 'key Block: ' + str(hx(key)))
Print.info('\n%sPartitions:' % (tabs))
for s in self:
s.printInfo(indent+1)
#self.read_pfs0_header()
#self.read_cnmt()
def ncalist_bycnmt(self, file = None, mode = 'rb'):
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
pfs0_size = self.header.get_pfs0_size()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset+0x8)
pfs0_table_size=self.readInt32()
cmt_offset=pfs0_offset+0x28+pfs0_table_size
self.seek(cmt_offset)
titleid=self.readInt64()
titleversion = self.read(0x4)
type_n = self.read(0x1)
self.seek(cmt_offset+0xE)
offset=self.readInt16()
content_entries=self.readInt16()
meta_entries=self.readInt16()
self.seek(cmt_offset+0x20)
original_ID=self.readInt64()
self.seek(cmt_offset+0x28)
min_sversion=self.readInt32()
length_of_emeta=self.readInt32()
self.seek(cmt_offset+offset+0x20)
ncalist=list()
for i in range(content_entries):
vhash = self.read(0x20)
NcaId = self.read(0x10)
size = self.read(0x6)
ncatype = self.read(0x1)
unknown = self.read(0x1)
nca2append=str(hx(NcaId))
nca2append=nca2append[2:-1]+'.nca'
ncalist.append(nca2append)
nca_meta=str(self._path)
ncalist.append(nca_meta)
return ncalist
def return_cnmt(self,file = None, mode = 'rb'):
for f in self:
cryptoType=f.get_cryptoType()
cryptoKey=f.get_cryptoKey()
cryptoCounter=f.get_cryptoCounter()
pfs0_offset=0xC00+self.header.get_htable_offset()+self.header.get_pfs0_offset()
super(Nca, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)
self.seek(pfs0_offset+0x8)
pfs0_table_size=self.readInt32()
cmt_offset=pfs0_offset+0x28+pfs0_table_size
self.seek(cmt_offset)
return self.read()
def copy_files(self,buffer,ofolder=False,filepath=False,io=0,eo=False):
i=0
if ofolder == False:
outfolder = 'ofolder'
if not os.path.exists(outfolder):
os.makedirs(outfolder)
for f in self:
if filepath==False:
filename = str(i)
i+=1
filepath = os.path.join(outfolder, filename)
fp = open(filepath, 'w+b')
self.rewind();f.seek(io)
for data in iter(lambda: f.read(int(buffer)), ""):
fp.write(data)
fp.flush()
if not data:
fp.close()
break
def get_nacp_offset(self):
for f in self:
self.rewind()
f.rewind()
Langue = list()
Langue = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
SupLg=list()
regionstr=""
offset=0x14200
for i in Langue:
try:
f.seek(offset+i*0x300)
test=f.read(0x200)
test = test.split(b'\0', 1)[0].decode('utf-8')
test = (re.sub(r'[\/\\]+', ' ', test))
test = test.strip()
test2=f.read(0x100)
test2 = test2.split(b'\0', 1)[0].decode('utf-8')
test2 = (re.sub(r'[\/\\]+', ' ', test2))
test2 = test2.strip()
if test == "" or test2 == "":
offset=0x14400
f.seek(offset+i*0x300)
test=f.read(0x200)
test = test.split(b'\0', 1)[0].decode('utf-8')
test = (re.sub(r'[\/\\]+', ' ', test))
test = test.strip()
test2=f.read(0x100)
test2 = test2.split(b'\0', 1)[0].decode('utf-8')
test2 = (re.sub(r'[\/\\]+', ' ', test2))
test2 = test2.strip()
if test == "" or test2 == "":
offset=0x14000
while offset<=(0x14200+i*0x300):
offset=offset+0x100
f.seek(offset+i*0x300)
test=f.read(0x200)
test = test.split(b'\0', 1)[0].decode('utf-8')
test = (re.sub(r'[\/\\]+', ' ', test))
test = test.strip()
test2=f.read(0x100)
test2 = test2.split(b'\0', 1)[0].decode('utf-8')
test2 = (re.sub(r'[\/\\]+', ' ', test2))
test2 = test2.strip()
if test != "" and test2 != "" :
offset=offset
break
if test != "":
offset=offset
break
if test != "":
offset=offset
break
else:
break
except:
pass
try:
f.seek(offset+0x3060)
ediver = f.read(0x10)
#print('here2')
#print(hx(ediver))
try:
ediver = ediver.split(b'\0', 1)[0].decode('utf-8')
ediver = (re.sub(r'[\/\\]+', ' ', ediver))
ediver = ediver.strip()
except:
offset=0x16900-0x300*14
f.seek(offset+0x3060)
ediver = f.read(0x10)
ediver = ediver.split(b'\0', 1)[0].decode('utf-8')
ediver = (re.sub(r'[\/\\]+', ' ', ediver))
ediver = ediver.strip()
try:
int(ediver[0])+1
except:
ediver="-"
if ediver == '-':
for i in Langue:
try:
i=i+1
offset2=offset-0x300*i
f.seek(offset2+0x3060)
ediver = f.read(0x10)
ediver = ediver.split(b'\0', 1)[0].decode('utf-8')
ediver = (re.sub(r'[\/\\]+', ' ', ediver))
ediver = ediver.strip()
try:
int(ediver[0])+1
offset=offset2
break
except:
ediver="-"
except:
pass
if ediver == '-':
try:
while (offset2+0x3060)<=0x18600:
offset2+=0x100
f.seek(offset2+0x3060)
ediver = f.read(0x10)
ediver = ediver.split(b'\0', 1)[0].decode('utf-8')
ediver = (re.sub(r'[\/\\]+', ' ', ediver))
ediver = ediver.strip()
if ediver != '':
if str(ediver[0])!='v' and str(ediver[0])!='V':
try:
int(ediver[0])+1
offset=offset2
break
except:
ediver="-"
break
except:
ediver="-"
except:
pass
f.seek(offset)
#data=f.read()
return offset
def get_langueblock(self,title,roman=True,trans=False):
for f in self:
self.rewind()
f.rewind()
Langue = list()
Langue = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
SupLg=list()
regionstr=""
offset=0x14200
for i in Langue:
try:
f.seek(offset+i*0x300)
test=f.read(0x200)
test = test.split(b'\0', 1)[0].decode('utf-8')
test = (re.sub(r'[\/\\]+', ' ', test))
test = test.strip()
test2=f.read(0x100)
test2 = test2.split(b'\0', 1)[0].decode('utf-8')
test2 = (re.sub(r'[\/\\]+', ' ', test2))
test2 = test2.strip()
if test == "" or test2 == "":
offset=0x14400
f.seek(offset+i*0x300)
test=f.read(0x200)
test = test.split(b'\0', 1)[0].decode('utf-8')
test = (re.sub(r'[\/\\]+', ' ', test))
test = test.strip()
test2=f.read(0x100)
test2 = test2.split(b'\0', 1)[0].decode('utf-8')
test2 = (re.sub(r'[\/\\]+', ' ', test2))
test2 = test2.strip()
if test == "" or test2 == "":
offset=0x14000
while offset<=(0x14200+i*0x300):
offset=offset+0x100
f.seek(offset+i*0x300)
test=f.read(0x200)
test = test.split(b'\0', 1)[0].decode('utf-8')
test = (re.sub(r'[\/\\]+', ' ', test))
test = test.strip()
test2=f.read(0x100)
test2 = test2.split(b'\0', 1)[0].decode('utf-8')
test2 = (re.sub(r'[\/\\]+', ' ', test2))
test2 = test2.strip()
if test != "" and test2 != "" :
offset=offset
break
if test != "":
offset=offset
break
if test != "":
offset=offset
break
else:
break
except:
pass
try:
f.seek(offset+0x3060)
ediver = f.read(0x10)
#print('here2')
#print(hx(ediver))
try:
ediver = ediver.split(b'\0', 1)[0].decode('utf-8')
ediver = (re.sub(r'[\/\\]+', ' ', ediver))
ediver = ediver.strip()
except:
offset=0x16900-0x300*14
f.seek(offset+0x3060)
ediver = f.read(0x10)
ediver = ediver.split(b'\0', 1)[0].decode('utf-8')
ediver = (re.sub(r'[\/\\]+', ' ', ediver))
ediver = ediver.strip()
try:
int(ediver[0])+1
except:
ediver="-"
if ediver == '-':
for i in Langue:
try:
i=i+1
offset2=offset-0x300*i
f.seek(offset2+0x3060)
ediver = f.read(0x10)
ediver = ediver.split(b'\0', 1)[0].decode('utf-8')
ediver = (re.sub(r'[\/\\]+', ' ', ediver))
ediver = ediver.strip()
try:
int(ediver[0])+1
offset=offset2
break
except:
ediver="-"
except:
pass
if ediver == '-':
try:
while (offset2+0x3060)<=0x18600:
offset2+=0x100
f.seek(offset2+0x3060)
ediver = f.read(0x10)
ediver = ediver.split(b'\0', 1)[0].decode('utf-8')
ediver = (re.sub(r'[\/\\]+', ' ', ediver))
ediver = ediver.strip()
if ediver != '':
if str(ediver[0])!='v' and str(ediver[0])!='V':
try:
int(ediver[0])+1
offset=offset2
break
except:
ediver="-"
break
except:
ediver="-"
except:
pass
Langue = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
for i in Langue:
try:
f.seek(offset+i*0x300)
title = f.read(0x200)
title = title.split(b'\0', 1)[0].decode('utf-8')
title = (re.sub(r'[\/\\]+', ' ', title))
title = title.strip()
if title != "":
if i==0:
SupLg.append("US (eng)")
regionstr+='1|'
if i==1:
SupLg.append("UK (eng)")
regionstr+='1|'
if i==2:
SupLg.append("JP")
regionstr+='1|'
if i==3:
SupLg.append("FR")
regionstr+='1|'
if i==4:
SupLg.append("DE")
regionstr+='1|'
if i==5:
SupLg.append("LAT (spa)")
regionstr+='1|'
if i==6:
SupLg.append("SPA")
regionstr+='1|'
if i==7:
SupLg.append("IT")
regionstr+='1|'
if i==8:
SupLg.append("DU")
regionstr+='1|'
if i==9:
SupLg.append("CAD (fr)")
regionstr+='1|'
if i==10:
SupLg.append("POR")
regionstr+='1|'
if i==11:
SupLg.append("RU")
regionstr+='1|'
if i==12:
SupLg.append("KOR")
regionstr+='1|'
if i==13:
SupLg.append("TW (ch)")
regionstr+='1|'
if i==14:
SupLg.append("CH")
regionstr+='1|'
else:
regionstr+='0|'
except:
pass
Langue = [0,1,6,5,7,10,3,4,9,8,2,11,12,13,14]
for i in Langue:
try:
f.seek(offset+i*0x300)
title = f.read(0x200)
editor = f.read(0x100)
title = title.split(b'\0', 1)[0].decode('utf-8')
title = (re.sub(r'[\/\\]+', ' ', title))
title = title.strip()
editor = editor.split(b'\0', 1)[0].decode('utf-8')
editor = (re.sub(r'[\/\\]+', ' ', editor))
editor = editor.strip()
if title == "":
title = 'DLC'
if title != 'DLC':
title = title
f.seek(offset+0x3060)
ediver = f.read(0x10)
ediver = ediver.split(b'\0', 1)[0].decode('utf-8')
ediver = (re.sub(r'[\/\\]+', ' ', ediver))
ediver = ediver.strip()
if ediver == '':
try:
while (offset+0x3060)<=0x18600:
offset+=0x100
f.seek(offset+0x3060)
ediver = f.read(0x10)
ediver = ediver.split(b'\0', 1)[0].decode('utf-8')
ediver = (re.sub(r'[\/\\]+', ' ', ediver))
ediver = ediver.strip()
if ediver != '':
if str(ediver[0])!='v' and str(ediver[0])!='V':
try:
int(ediver[0])+1
break
except:
ediver="-"
break
except:
ediver="-"
f.seek(offset+0x3028)
isdemo = f.readInt8('little')
if ediver !='-':
if isdemo == 0:
isdemo = 0
elif isdemo == 1:
isdemo = 1
elif isdemo == 2:
isdemo = 2
else:
isdemo = 0
else:
isdemo = 0
if i == 2:
if roman == True:
kakasi = pykakasi.kakasi()
kakasi.setMode("H", "a")
kakasi.setMode("K", "a")
kakasi.setMode("J", "a")
kakasi.setMode("s", True)
kakasi.setMode("E", "a")
kakasi.setMode("a", None)
kakasi.setMode("C", False)
converter = kakasi.getConverter()
title=converter.do(title)
title=title[0].upper()+title[1:]
editor=converter.do(editor)
editor=editor[0].upper()+editor[1:]
if trans==True:
try:
translator = Translator()
translation=translator.translate(title,src='ja',dest='en')
title=translation.text
translation=translator.translate(editor,src='ja',dest='en')
editor=translation.text
except BaseException as e:
Print.error('Exception: ' + str(e))
else:pass
elif i == 14 or i == 13 or i==12:
if roman == True:
kakasi = pykakasi.kakasi()
kakasi.setMode("H", "a")
kakasi.setMode("K", "a")
kakasi.setMode("J", "a")
kakasi.setMode("s", True)
kakasi.setMode("E", "a")
kakasi.setMode("a", None)
kakasi.setMode("C", False)
converter = kakasi.getConverter()
title=converter.do(title)
title=title[0].upper()+title[1:]
editor=converter.do(editor)
editor=editor[0].upper()+editor[1:]
if trans==True:
try:
translator = Translator()
translation=translator.translate(title,src='ja',dest='en')
title=translation.text
translation=translator.translate(editor,src='ja',dest='en')
editor=translation.text
except BaseException as e:
Print.error('Exception: ' + str(e))
else:pass
else:
if roman == True:
kakasi = pykakasi.kakasi()
kakasi.setMode("H", "a")
kakasi.setMode("K", "a")
kakasi.setMode("J", "a")
kakasi.setMode("s", True)
kakasi.setMode("E", "a")
kakasi.setMode("a", None)
kakasi.setMode("C", False)
converter = kakasi.getConverter()
ogname=title
ogeditor=editor
title=converter.do(title)
title=title[0].upper()+title[1:]
editor=converter.do(editor)
editor=editor[0].upper()+editor[1:]
title=self.choose_name(title,ogname)
editor=self.choose_name(editor,ogeditor)
else:pass
title=re.sub(' +', ' ',title)
editor=re.sub(' +', ' ',editor)
return(title,editor,ediver,SupLg,regionstr[:-1],isdemo)
except:
pass
regionstr="0|0|0|0|0|0|0|0|0|0|0|0|0|0"
ediver='-'
return(title,"",ediver,"",regionstr,"")
def choose_name(self,name,ogname):
from difflib import SequenceMatcher
_name=name;_ogname=ogname
name = re.sub(r'[àâá@äå]', 'a', name);name = re.sub(r'[ÀÂÁÄÅ]', 'A', name)
name = re.sub(r'[èêéë]', 'e', name);name = re.sub(r'[ÈÊÉË]', 'E', name)
name = re.sub(r'[ìîíï]', 'i', name);name = re.sub(r'[ÌÎÍÏ]', 'I', name)
name = re.sub(r'[òôóöø]', 'o', name);name = re.sub(r'[ÒÔÓÖØ]', 'O', name)
name = re.sub(r'[ùûúü]', 'u', name);name = re.sub(r'[ÙÛÚÜ]', 'U', name)
name=name.lower()
name = list([val for val in name if val.isalnum()])
name = "".join(name)
ogname = re.sub(r'[àâá@äå]', 'a', ogname);ogname = re.sub(r'[ÀÂÁÄÅ]', 'A', ogname)
ogname = re.sub(r'[èêéë]', 'e', ogname);ogname = re.sub(r'[ÈÊÉË]', 'E', ogname)
ogname = re.sub(r'[ìîíï]', 'i', ogname);ogname = re.sub(r'[ÌÎÍÏ]', 'I', ogname)
ogname = re.sub(r'[òôóöø]', 'o', ogname);ogname = re.sub(r'[ÒÔÓÖØ]', 'O', ogname)
ogname = re.sub(r'[ùûúü]', 'u', ogname);ogname = re.sub(r'[ÙÛÚÜ]', 'U', ogname)
ogname=ogname.lower()
ogname = list([val for val in ogname if val.isalnum()])
ogname = "".join(ogname)
ratio=SequenceMatcher(None, name, ogname).ratio()
if ratio==1.0:
return _ogname
return _name
def get_sdkversion(self):
sdkversion=str(self.header.sdkVersion4)+'.'+str(self.header.sdkVersion3)+'.'+str(self.header.sdkVersion2)+'.'+str(self.header.sdkVersion1)
return sdkversion
def simple_sig_check(self):
self.rewind()
sign1 = self.header.signature1
hcrypto = aes128.AESXTS(uhx(Keys.get('header_key')))
self.header.rewind()
orig_header= self.header.read(0xC00)
self.header.seek(0x200)
headdata = self.header.read(0x200)
if self.header.getSigKeyGen() == 0:
pubkey=RSA.RsaKey(n=nca_header_fixed_key_modulus_00, e=RSA_PUBLIC_EXPONENT)
else:
pubkey=RSA.RsaKey(n=nca_header_fixed_key_modulus_01, e=RSA_PUBLIC_EXPONENT)
rsapss = PKCS1_PSS.new(pubkey)
digest = SHA256.new(headdata)
verification=rsapss.verify(digest, sign1)
return verification
def verify(self,feed,targetkg=False,endcheck=False,progress=False,bar=False):
if feed == False:
feed=''
indent=' > '
if self._path.endswith('cnmt.nca'):
arrow=' -> '
else:
arrow=tabs+' -> '
self.rewind()
#print('Signature 1:')
sign1 = self.header.signature1
#Hex.dump(sign1)
#print('')
#print('Header data:')
hcrypto = aes128.AESXTS(uhx(Keys.get('header_key')))
self.header.rewind()
orig_header= self.header.read(0xC00)
self.header.seek(0x200)
headdata = self.header.read(0x200)
#print(hx(orig_header))
#Hex.dump(headdata)
if self.header.getSigKeyGen() == 0:
pubkey=RSA.RsaKey(n=nca_header_fixed_key_modulus_00, e=RSA_PUBLIC_EXPONENT)
else:
pubkey=RSA.RsaKey(n=nca_header_fixed_key_modulus_01, e=RSA_PUBLIC_EXPONENT)
rsapss = PKCS1_PSS.new(pubkey)
digest = SHA256.new(headdata)
verification=rsapss.verify(digest, sign1)
crypto1=self.header.getCryptoType()
crypto2=self.header.getCryptoType2()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
currkg=masterKeyRev
if os.path.exists(self._path):
printname=str(os.path.basename(os.path.abspath(self._path)))
else:
printname=str(self._path)
if verification == True:
try:
bar.close()
except:pass
message=(indent+printname+arrow+'is PROPER');print(message);feed+=message+'\n'
#print(hx(headdata))
return True,False,self._path,feed,currkg,False,False,self.header.getgamecard()
else:
crypto = aes128.AESECB(Keys.keyAreaKey(Keys.getMasterKeyIndex(masterKeyRev), self.header.keyIndex))
KB1L=self.header.getKB1L()
KB1L = crypto.decrypt(KB1L)
if sum(KB1L) != 0:
#print(hx(headdata))
checkrights,kgchg,titlekey,tr,headdata,orkg=self.restorehead_tr()
if headdata != False:
orig_header=orig_header[0x00:0x200]+headdata+orig_header[0x400:]
#print(hx(orig_header))
orig_header=hcrypto.encrypt(orig_header)
else:
orig_header = False
if checkrights == True:
message=(indent+printname+arrow+'is PROPER');print(message);feed+=message+'\n'
message=(tabs+'* '+"TITLERIGHTS WERE REMOVED");print(message);feed+=message+'\n'
if kgchg == False:
message=(tabs+'* '+"Original titlerights id is : "+(str(hx(tr)).upper())[2:-1]);print(message);feed+=message+'\n'
message=(tabs+'* '+"Original titlekey is : "+(str(hx(titlekey)).upper())[2:-1]);print(message);feed+=message+'\n'
tcheck=(str(hx(titlekey)).upper())[2:-1]
if tcheck == '00000000000000000000000000000000':
message=(tabs+'* '+"WARNING: sum(titlekey)=0 -> S.C. conversion may be incorrect and come from nsx file");print(message);feed+=message+'\n'
elif kgchg == True:
message=(tabs+'* '+"KEYGENERATION WAS CHANGED FROM "+str(orkg)+" TO "+str(currkg));print(message);feed+=message+'\n'
message=(tabs+'* '+"Original titlerights id is -> "+(str(hx(tr)).upper())[2:-1]);print(message);feed+=message+'\n'
message=(tabs+'* '+"Original titlekey is -> "+(str(hx(titlekey)).upper())[2:-1]);print(message);feed+=message+'\n'
tcheck=(str(hx(titlekey)).upper())[2:-1]
if tcheck == '00000000000000000000000000000000':
message=(tabs+'* '+"WARNING: sum(titlekey)=0 -> S.C. conversion may be incorrect and come from nsx file");print(message);feed+=message+'\n'
return True,orig_header,self._path,feed,orkg,tr,titlekey,self.header.getgamecard()
else:
message=(indent+self._path+arrow+'was MODIFIED');print(message);feed+=message+'\n'
message=(tabs+'* '+"NOT VERIFIABLE COULD'VE BEEN TAMPERED WITH");print(message);feed+=message+'\n'
return False,False,self._path,feed,False,False,False,self.header.getgamecard()
else:
if targetkg != False:
if self.header.contentType == Type.Content.META:
ver,kgchg,cardchange,headdata,orkg=self.verify_cnmt_withkg(targetkg)
else:
ver,kgchg,cardchange,headdata,orkg=self.restorehead_ntr()
if headdata != False:
orig_header=orig_header[0x00:0x200]+headdata+orig_header[0x400:]
#print(hx(orig_header))
orig_header=hcrypto.encrypt(orig_header)
else:
orig_header = False
if ver == True:
OGGC=self.header.getgamecard()
chkkg=currkg
if targetkg == False:
message=(indent+printname+arrow+'is PROPER');print(message);feed+=message+'\n'
else:
if progress != False:
message=(tabs+'* '+"ORIGIN OF CNMT FILE IS PROPER");bar.write(message);feed+=message+'\n'
else:
message=(tabs+'* '+"ORIGIN OF CNMT FILE IS PROPER");print(message);feed+=message+'\n'
if kgchg == True:
if progress != False:
message=(tabs+'* '+"KEYGENERATION WAS CHANGED FROM "+str(orkg)+" TO "+str(currkg));bar.write(message);feed+=message+'\n'
else:
message=(tabs+'* '+"KEYGENERATION WAS CHANGED FROM "+str(orkg)+" TO "+str(currkg));print(message);feed+=message+'\n'
chkkg=orkg
if cardchange == True:
if self.header.getgamecard() != 0:
OGGC=0
if progress != False:
message=(tabs+'* '+"ISGAMECARD WAS CHANGED FROM 0 TO 1");bar.write(message);feed+=message+'\n'
else:
message=(tabs+'* '+"ISGAMECARD WAS CHANGED FROM 0 TO 1");print(message);feed+=message+'\n'
else:
OGGC=1
if progress != False:
message=(tabs+'* '+"ISGAMECARD WAS CHANGED FROM 1 TO 0");bar.write(message);feed+=message+'\n'
else:
message=(tabs+'* '+"ISGAMECARD WAS CHANGED FROM 1 TO 0");print(message);feed+=message+'\n'
return True,orig_header,self._path,feed,chkkg,False,False,OGGC
else:
if self.header.contentType == Type.Content.META:
if targetkg == False:
if progress != False:
pass
else:
message=(indent+printname+arrow+'needs RSV check');print(message);feed+=message+'\n'
message=(tabs+'* '+"CHECKING INTERNAL HASHES");print(message);feed+=message+'\n'
if progress == False:
feed,correct=self.check_cnmt_hashes(feed)
if correct == True:
if progress != False:
message=(tabs+'* '+"INTERNAL HASHES MATCH");bar.write(message);feed+=message+'\n'
else:
message=(tabs+'* '+"INTERNAL HASHES MATCH");print(message);feed+=message+'\n'
if correct == False:
if progress != False:
message=(tabs+'* '+"INTERNAL HASH MISSMATCH");bar.write(message);feed+=message+'\n'
message=(tabs+'* '+"BAD CNMT FILE!!!");bar.write(message);feed+=message+'\n'
else:
message=(tabs+'* '+"INTERNAL HASH MISSMATCH");print(message);feed+=message+'\n'
message=(tabs+'* '+"BAD CNMT FILE!!!");print(message);feed+=message+'\n'
return 'BADCNMT',False,self._path,feed,False,False,False,self.header.getgamecard()
else:
if endcheck == False:
pass
elif endcheck == True:
if progress != False:
message=(indent+printname+arrow+'was MODIFIED');bar.write(message);feed+=message+'\n'
message=(tabs+'* '+"NOT VERIFIABLE!!!");bar.write(message);feed+=message+'\n'
else:
message=(indent+printname+arrow+'was MODIFIED');print(message);feed+=message+'\n'
message=(tabs+'* '+"NOT VERIFIABLE!!!");print(message);feed+=message+'\n'
return False,False,self._path,feed,False,False,False,self.header.getgamecard()
else:
if os.path.exists(self._path):
printname=str(os.path.basename(os.path.abspath(self._path)))
else:
printname=str(self._path)
if progress != False:
message=(indent+printname+arrow+'was MODIFIED');bar.write(message);feed+=message+'\n'
message=(tabs+'* '+"NOT VERIFIABLE!!!");bar.write(message);feed+=message+'\n'
else:
message=(indent+printname+arrow+'was MODIFIED');print(message);feed+=message+'\n'
message=(tabs+'* '+"NOT VERIFIABLE!!!");print(message);feed+=message+'\n'
return False,False,self._path,feed,False,False,False,self.header.getgamecard()
if progress != False:
message=(indent+printname+arrow+'was MODIFIED');bar.write(message);feed+=message+'\n'
message=(tabs+'* '+"NOT VERIFIABLE!!!");bar.write(message);feed+=message+'\n'
else:
message=(indent+printname+arrow+'was MODIFIED');print(message);feed+=message+'\n'
message=(tabs+'* '+"NOT VERIFIABLE!!!");print(message);feed+=message+'\n'
return False,False,self._path,feed,False,False,False,self.header.getgamecard()
def verify_cnmt_withkg(self,targetkg):
sign1 = self.header.signature1
self.header.seek(0x200)
headdata = self.header.read(0x200)
card='01';card=bytes.fromhex(card)
eshop='00';eshop=bytes.fromhex(eshop)
#print(hx(headdata))
#Hex.dump(headdata)
if self.header.getSigKeyGen() == 0:
pubkey=RSA.RsaKey(n=nca_header_fixed_key_modulus_00, e=RSA_PUBLIC_EXPONENT)
else:
pubkey=RSA.RsaKey(n=nca_header_fixed_key_modulus_01, e=RSA_PUBLIC_EXPONENT)
rsapss = PKCS1_PSS.new(pubkey)
digest = SHA256.new(headdata)
verification=rsapss.verify(digest, sign1)
crypto1=self.header.getCryptoType()
crypto2=self.header.getCryptoType2()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
if self.header.getgamecard() == 0:
headdata2 = b''
headdata2=headdata[0x00:0x04]+eshop+headdata[0x05:]
digest2 = SHA256.new(headdata2)
verification2=rsapss.verify(digest2, sign1)
if verification2 == True:
return True,False,False,headdata2,masterKeyRev
else:
headdata2 = b''
headdata2=headdata[0x00:0x04]+card+headdata[0x05:]
digest2 = SHA256.new(headdata2)
verification2=rsapss.verify(digest2, sign1)
if verification2 == True:
return True,False,True,headdata2,masterKeyRev
else:
headdata2 = b''
headdata2=headdata[0x00:0x04]+eshop+headdata[0x05:]
digest2 = SHA256.new(headdata2)
verification2=rsapss.verify(digest2, sign1)
if verification2 == True:
return True,False,True,headdata2,masterKeyRev
else:
headdata2 = b''
headdata2=headdata[0x00:0x04]+card+headdata[0x05:]
digest2 = SHA256.new(headdata2)
verification2=rsapss.verify(digest2, sign1)
if verification2 == True:
return True,False,False,headdata2,masterKeyRev
key = Keys.keyAreaKey(Keys.getMasterKeyIndex(masterKeyRev), self.header.keyIndex)
crypto = aes128.AESECB(key)
encKeyBlock = self.header.getKeyBlock()
decKeyBlock = crypto.decrypt(encKeyBlock)
newMasterKeyRev=targetkg
key = Keys.keyAreaKey(Keys.getMasterKeyIndex(newMasterKeyRev), self.header.keyIndex)
crypto = aes128.AESECB(key)
reEncKeyBlock = crypto.encrypt(decKeyBlock)
i=targetkg
cr2=str(hex(i))[2:]
if i<3:
crypto1='0'+str(i)
crypto2='00'
else:
cr2=str(hex(i))[2:]
if len(str(cr2))==1:
crypto1='02'
crypto2='0'+str(cr2)
elif len(str(cr2))==2:
crypto1='02'
crypto2=str(cr2)
crypto1=bytes.fromhex(crypto1);crypto2=bytes.fromhex(crypto2)
headdata1 = b''
headdata1=headdata[0x00:0x04]+card+headdata[0x05:0x06]+crypto1+headdata[0x07:0x20]+crypto2+headdata[0x21:0x100]+reEncKeyBlock+headdata[0x140:]
#print(hx(headdata1))
headdata2 = b''
headdata2=headdata[0x00:0x04]+eshop+headdata1[0x05:]
#print(hx(headdata2))
digest1 = SHA256.new(headdata1)
digest2 = SHA256.new(headdata2)
verification1=rsapss.verify(digest1, sign1)
verification2=rsapss.verify(digest2, sign1)
if verification1 == True:
if self.header.getgamecard() == 0:
return True,True,True,headdata1,newMasterKeyRev
else:
return True,True,False,headdata1,newMasterKeyRev
if verification2 == True:
if self.header.getgamecard() == 0:
return True,True,False,headdata2,newMasterKeyRev
else:
return True,True,True,headdata2,newMasterKeyRev
return False,False,False,False,masterKeyRev
def check_cnmt_hashes(self,feed):
sha=self.calc_pfs0_hash()
sha_get=self.calc_pfs0_hash()
correct=True
if sha == sha_get:
message=(tabs+' '+" - PFS0 hash is CORRECT");print(message);feed+=message+'\n'
#print(hx(sha))
#print(hx(sha_get))
else:
message=(tabs+' '+" - PFS0 hash is INCORRECT!!!");print(message);feed+=message+'\n'
#print(hx(sha))
#print(hx(sha_get))
correct=False
sha2=self.calc_htable_hash()
sha2_get=self.calc_htable_hash()
if sha2 == sha2_get:
message=(tabs+' '+" - HASH TABLE hash is CORRECT");print(message);feed+=message+'\n'
#print(hx(sha2))
#print(hx(sha2_get))
else:
message=(tabs+' '+" - HASH TABLE hash is INCORRECT!!!");print(message);feed+=message+'\n'
#print(hx(sha2))
#print(hx(sha2_get))
correct=False
sha3=self.header.calculate_hblock_hash()
sha3_get=self.header.calculate_hblock_hash()
if sha3 == sha3_get:
message=(tabs+' '+" - HEADER BLOCK hash is CORRECT");print(message);feed+=message+'\n'
#print(hx(sha3))
#print(hx(sha3_get))
else:
message=(tabs+' '+" - HEADER BLOCK hash is INCORRECT!!!");print(message);feed+=message+'\n'
#print(hx(sha3))
#print(hx(sha3_get))
correct=False
return feed,correct
def restorehead_tr(self):
sign1 = self.header.signature1
crypto1=self.header.getCryptoType()
crypto2=self.header.getCryptoType2()
nca_id=self.header.titleId
cr2=str(hex(crypto2))[2:]
trstart=str(nca_id)
if str(self.header.contentType) == 'Content.PROGRAM' or str(self.header.contentType) == 'Content.MANUAL':
trstart=nca_id[:-3]+'000'
if len(str(cr2))==1:
tr=trstart+'000000000000000'+str(cr2)
elif len(str(cr2))==2:
tr=trstart+'00000000000000'+str(cr2)
tr=bytes.fromhex(tr)
if crypto1>crypto2:
masterKeyRev = crypto1
else:
masterKeyRev = crypto2
encKeyBlock = self.header.getKeyBlock()
key = Keys.keyAreaKey(Keys.getMasterKeyIndex(masterKeyRev), self.header.keyIndex)
crypto = aes128.AESECB(key)
decKeyBlock = crypto.decrypt(encKeyBlock[:16])
titleKeyEnc = Keys.encryptTitleKey(decKeyBlock, Keys.getMasterKeyIndex(masterKeyRev))
currdecKeyBlock=decKeyBlock
self.header.seek(0x200)
headdata = b''
headdata += self.header.read(0x30)
headdata += tr
self.header.read(0x10)
headdata += self.header.read(0x100-0x40)
headdata += bytes.fromhex('00'*0x10*4)
self.header.seek(0x340)
headdata += self.header.read(0x100-0x40)
#print(hx(headdata))
#Hex.dump(headdata)
if self.header.getSigKeyGen() == 0:
pubkey=RSA.RsaKey(n=nca_header_fixed_key_modulus_00, e=RSA_PUBLIC_EXPONENT)
else:
pubkey=RSA.RsaKey(n=nca_header_fixed_key_modulus_01, e=RSA_PUBLIC_EXPONENT)
rsapss = PKCS1_PSS.new(pubkey)
digest = SHA256.new(headdata)
verification=rsapss.verify(digest, sign1)
if verification == True:
return True,False,titleKeyEnc,tr,headdata,masterKeyRev
else:
cr2=str(hex(crypto2))[2:]
if len(str(cr2))==1:
tr2=nca_id[:-3]+'800000000000000000'+str(cr2)
elif len(str(cr2))==2:
tr2=nca_id[:-3]+'80000000000000000'+str(cr2)
tr2=bytes.fromhex(tr2)
headdata2 = b''
headdata2=headdata[0x00:0x30]+tr2+headdata[0x40:]
digest2 = SHA256.new(headdata2)
verification=rsapss.verify(digest2, sign1)
if verification == True:
return True,False,titleKeyEnc,tr2,headdata2,masterKeyRev
else:
nlist=list()
for i in range(12):
nlist.append(i)
nlist=sorted(nlist, key=int, reverse=True)
for i in nlist:
if i<3:
crypto1='0'+str(i)
crypto2='00'
else:
cr2=str(hex(i))[2:]
if len(str(cr2))==1:
crypto1='02'
crypto2='0'+str(cr2)
elif len(str(cr2))==2:
crypto1='02'
crypto2=str(cr2)
masterKeyRev = i
encKeyBlock = self.header.getKeyBlock()
key = Keys.keyAreaKey(Keys.getMasterKeyIndex(masterKeyRev), self.header.keyIndex)
crypto = aes128.AESECB(key)
decKeyBlock = currdecKeyBlock
titleKeyEnc = Keys.encryptTitleKey(decKeyBlock, Keys.getMasterKeyIndex(masterKeyRev))
trstart=str(nca_id)
if str(self.header.contentType) == 'Content.PROGRAM':
trstart=nca_id[:-3]+'000'
tr1=trstart+'000000000000000'+str(crypto2[1])
tr2=nca_id[:-3]+'800000000000000000'+str(crypto2[1])
tr1=bytes.fromhex(tr1);tr2=bytes.fromhex(tr2)
crypto1=bytes.fromhex(crypto1);crypto2=bytes.fromhex(crypto2)
headdata1 = b''
headdata1=headdata[0x00:0x06]+crypto1+headdata[0x07:0x20]+crypto2+headdata[0x21:0x30]+tr1+headdata[0x40:]
headdata2 = b''
headdata2=headdata1[0x00:0x30]+tr2+headdata[0x40:]
digest1 = SHA256.new(headdata1)
digest2 = SHA256.new(headdata2)
verification1=rsapss.verify(digest1, sign1)
verification2=rsapss.verify(digest2, sign1)
if verification1 == True:
return True,True,titleKeyEnc,tr1,headdata1,masterKeyRev
if verification2 == True:
return True,True,titleKeyEnc,tr2,headdata2,masterKeyRev
return False,False,False,False,False,masterKeyRev
def restorehead_ntr(self):
sign1 = self.header.signature1
self.header.seek(0x200)
headdata = self.header.read(0x200)
card='01';card=bytes.fromhex(card)
eshop='00';eshop=bytes.fromhex(eshop)
#print(hx(headdata))
#Hex.dump(headdata)
if self.header.getSigKeyGen() == 0:
pubkey=RSA.RsaKey(n=nca_header_fixed_key_modulus_00, e=RSA_PUBLIC_EXPONENT)
else:
pubkey=RSA.RsaKey(n=nca_header_fixed_key_modulus_01, e=RSA_PUBLIC_EXPONENT)
rsapss = PKCS1_PSS.new(pubkey)
digest = SHA256.new(headdata)
verification=rsapss.verify(digest, sign1)
crypto1=self.header.getCryptoType()
crypto2=self.header.getCryptoType2()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
if self.header.getgamecard() == 0:
headdata2 = b''
headdata2=headdata[0x00:0x04]+eshop+headdata[0x05:]
digest2 = SHA256.new(headdata2)
verification2=rsapss.verify(digest2, sign1)
if verification2 == True:
return True,False,False,headdata2,masterKeyRev
else:
headdata2 = b''
headdata2=headdata[0x00:0x04]+card+headdata[0x05:]
digest2 = SHA256.new(headdata2)
verification2=rsapss.verify(digest2, sign1)
if verification2 == True:
return True,False,True,headdata2,masterKeyRev
else:
headdata2 = b''
headdata2=headdata[0x00:0x04]+eshop+headdata[0x05:]
digest2 = SHA256.new(headdata2)
verification2=rsapss.verify(digest2, sign1)
if verification2 == True:
return True,False,True,headdata2,masterKeyRev
else:
headdata2 = b''
headdata2=headdata[0x00:0x04]+card+headdata[0x05:]
digest2 = SHA256.new(headdata2)
verification2=rsapss.verify(digest2, sign1)
if verification2 == True:
return True,False,False,headdata2,masterKeyRev
else:
pass
if self.header.contentType == Type.Content.META:
return False,False,False,False,masterKeyRev
key = Keys.keyAreaKey(Keys.getMasterKeyIndex(masterKeyRev), self.header.keyIndex)
crypto = aes128.AESECB(key)
encKeyBlock = self.header.getKeyBlock()
decKeyBlock = crypto.decrypt(encKeyBlock)
nlist=list()
for i in range(12):
nlist.append(i)
nlist=sorted(nlist, key=int, reverse=True)
for i in nlist:
if i<3:
crypto1='0'+str(i)
crypto2='00'
else:
cr2=str(hex(i))[2:]
if len(str(cr2))==1:
crypto1='02'
crypto2='0'+str(cr2)
elif len(str(cr2))==2:
crypto1='02'
crypto2=str(cr2)
newMasterKeyRev=i
key = Keys.keyAreaKey(Keys.getMasterKeyIndex(newMasterKeyRev), self.header.keyIndex)
crypto = aes128.AESECB(key)
reEncKeyBlock = crypto.encrypt(decKeyBlock)
crypto1=bytes.fromhex(crypto1);crypto2=bytes.fromhex(crypto2)
headdata1 = b''
headdata1=headdata[0x00:0x04]+card+headdata[0x05:0x06]+crypto1+headdata[0x07:0x20]+crypto2+headdata[0x21:0x100]+reEncKeyBlock+headdata[0x140:]
#print(hx(headdata1))
headdata2 = b''
headdata2=headdata[0x00:0x04]+eshop+headdata1[0x05:]
#print(hx(headdata2))
if self.header.contentType != Type.Content.META:
digest1 = SHA256.new(headdata1)
digest2 = SHA256.new(headdata2)
verification1=rsapss.verify(digest1, sign1)
verification2=rsapss.verify(digest2, sign1)
if verification1 == True:
if self.header.getgamecard() == 0:
return True,True,True,headdata1,newMasterKeyRev
else:
return True,True,False,headdata1,newMasterKeyRev
if verification2 == True:
if self.header.getgamecard() == 0:
return True,True,False,headdata2,newMasterKeyRev
else:
return True,True,True,headdata2,newMasterKeyRev
return False,False,False,False,masterKeyRev
def ret_nacp(self):
if str(self.header.contentType) == 'Content.CONTROL':
offset=self.get_nacp_offset()
for f in self:
f.seek(offset)
return f.read()
#READ NACP FILE WITHOUT EXTRACTION
def read_nacp(self,feed=''):
if str(self.header.contentType) == 'Content.CONTROL':
offset=self.get_nacp_offset()
for f in self:
f.seek(offset)
nacp = Nacp()
feed=nacp.par_getNameandPub(f.read(0x300*15),feed)
message='...............................';print(message);feed+=message+'\n'
message='NACP FLAGS';print(message);feed+=message+'\n'
message='...............................';print(message);feed+=message+'\n'
f.seek(offset+0x3000)
feed=nacp.par_Isbn(f.read(0x24),feed)
f.seek(offset+0x3025)
feed=nacp.par_getStartupUserAccount(f.readInt8('little'),feed)
feed=nacp.par_getUserAccountSwitchLock(f.readInt8('little'),feed)
feed=nacp.par_getAddOnContentRegistrationType(f.readInt8('little'),feed)
feed=nacp.par_getContentType(f.readInt8('little'),feed)
f.seek(offset+0x3030)
feed=nacp.par_getParentalControl(f.readInt8('little'),feed)
f.seek(offset+0x3034)
feed=nacp.par_getScreenshot(f.readInt8('little'),feed)
feed=nacp.par_getVideoCapture(f.readInt8('little'),feed)
feed=nacp.par_dataLossConfirmation(f.readInt8('little'),feed)
feed=nacp.par_getPlayLogPolicy(f.readInt8('little'),feed)
f.seek(offset+0x3038)
feed=nacp.par_getPresenceGroupId(f.readInt64('little'),feed)
f.seek(offset+0x3040)
listages=list()
message='...............................';print(message);feed+=message+'\n'
message='Age Ratings';print(message);feed+=message+'\n'
message='...............................';print(message);feed+=message+'\n'
for i in range(12):
feed=nacp.par_getRatingAge(f.readInt8('little'),i,feed)
f.seek(offset+0x3060)
message='...............................';print(message);feed+=message+'\n'
message='NACP ATTRIBUTES';print(message);feed+=message+'\n'
message='...............................';print(message);feed+=message+'\n'
try:
feed=nacp.par_getDisplayVersion(f.read(0xF),feed)
f.seek(offset+0x3070)
feed=nacp.par_getAddOnContentBaseId(f.readInt64('little'),feed)
f.seek(offset+0x3078)
feed=nacp.par_getSaveDataOwnerId(f.readInt64('little'),feed)
f.seek(offset+0x3080)
feed=nacp.par_getUserAccountSaveDataSize(f.readInt64('little'),feed)
f.seek(offset+0x3088)
feed=nacp.par_getUserAccountSaveDataJournalSize(f.readInt64('little'),feed)
f.seek(offset+0x3090)
feed=nacp.par_getDeviceSaveDataSize(f.readInt64('little'),feed)
f.seek(offset+0x3098)
feed=nacp.par_getDeviceSaveDataJournalSize(f.readInt64('little'),feed)
f.seek(offset+0x30A0)
feed=nacp.par_getBcatDeliveryCacheStorageSize(f.readInt64('little'),feed)
f.seek(offset+0x30A8)
feed=nacp.par_getApplicationErrorCodeCategory(f.read(0x07),feed)
f.seek(offset+0x30B0)
feed=nacp.par_getLocalCommunicationId(f.readInt64('little'),feed)
f.seek(offset+0x30F0)
feed=nacp.par_getLogoType(f.readInt8('little'),feed)
feed=nacp.par_getLogoHandling(f.readInt8('little'),feed)
feed=nacp.par_getRuntimeAddOnContentInstall(f.readInt8('little'),feed)
f.seek(offset+0x30F6)
feed=nacp.par_getCrashReport(f.readInt8('little'),feed)
feed=nacp.par_getHdcp(f.readInt8('little'),feed)
feed=nacp.par_getSeedForPseudoDeviceId(f.readInt64('little'),feed)
f.seek(offset+0x3100)
feed=nacp.par_getBcatPassphrase(f.read(0x40),feed)
f.seek(offset+0x3148)
feed=nacp.par_UserAccountSaveDataSizeMax(f.readInt64('little'),feed)
f.seek(offset+0x3150)
feed=nacp.par_UserAccountSaveDataJournalSizeMax(f.readInt64('little'),feed)
f.seek(offset+0x3158)
feed=nacp.par_getDeviceSaveDataSizeMax(f.readInt64('little'),feed)
f.seek(offset+0x3160)
feed=nacp.par_getDeviceSaveDataJournalSizeMax(f.readInt64('little'),feed)
f.seek(offset+0x3168)
feed=nacp.par_getTemporaryStorageSize(f.readInt64('little'),feed)
feed=nacp.par_getCacheStorageSize(f.readInt64('little'),feed)
f.seek(offset+0x3178)
feed=nacp.par_getCacheStorageJournalSize(f.readInt64('little'),feed)
feed=nacp.par_getCacheStorageDataAndJournalSizeMax(f.readInt64('little'),feed)
f.seek(offset+0x3188)
feed=nacp.par_getCacheStorageIndexMax(f.readInt64('little'),feed)
f.seek(offset+0x3188)
feed=nacp.par_getPlayLogQueryableApplicationId(f.readInt64('little'),feed)
f.seek(offset+0x3210)
feed=nacp.par_getPlayLogQueryCapability(f.readInt8('little'),feed)
feed=nacp.par_getRepair(f.readInt8('little'),feed)
feed=nacp.par_getProgramIndex(f.readInt8('little'),feed)
feed=nacp.par_getRequiredNetworkServiceLicenseOnLaunch(f.readInt8('little'),feed)
except:continue
return feed
#PATCH NETWORK LICENSE
def patch_netlicense(self):
if str(self.header.contentType) == 'Content.CONTROL':
offset=self.get_nacp_offset()
for f in self:
nacp = Nacp()
print('CURRENT VALUES:')
f.seek(offset+0x3025)
startup_acc=f.readInt8('little')
netlicense=f.readInt8('little')
f.seek(offset+0x3213)
netlicense=f.readInt8('little')
nacp.par_getStartupUserAccount(startup_acc)
nacp.par_getRequiredNetworkServiceLicenseOnLaunch(netlicense)
if netlicense==0 and startup_acc<2:
print(str(self._path)+" doesn't need a linked account")
return False
else:
print(' -> '+str(self._path)+" needs a linked account. Patching...")
print('NEW VALUES:')
if startup_acc==2:
f.seek(offset+0x3025)
f.writeInt8(1)
if netlicense==1:
f.seek(offset+0x3213)
f.writeInt8(0)
f.seek(offset+0x3025)
nacp.par_getStartupUserAccount(f.readInt8('little'))
f.seek(offset+0x3213)
nacp.par_getRequiredNetworkServiceLicenseOnLaunch(f.readInt8('little'))
return True
def redo_lvhashes(self):
if str(self.header.contentType) == 'Content.CONTROL':
#offset=self.get_nacp_offset()
for fs in self.sectionFilesystems:
pfs0=fs
sectionHeaderBlock = fs.buffer
inmemoryfile = io.BytesIO(sectionHeaderBlock)
self.seek(fs.offset)
pfs0Offset=fs.offset
leveldata,hash,masterhashsize,superhashoffset=self.prIVFCData(inmemoryfile)
return leveldata,superhashoffset
def set_lv_hash(self,j,leveldata):
if str(self.header.contentType) == 'Content.CONTROL':
for fs in self.sectionFilesystems:
levelnumb=leveldata[j][0]
lvoffs=leveldata[j][1]
levelsize=leveldata[j][2]
lvbsize=leveldata[j][3]
fs.seek(lvoffs)
data = fs.read(lvbsize)
newhash=(str(sha256(data).hexdigest()))
fs.seek((j-1)*0x4000)
hashlv=(hx(fs.read(32))).decode('utf-8')
if str(hashlv) != str(newhash):
fs.seek((j-1)*0x4000)
sha=bytes.fromhex(newhash)
fs.write(sha)
print('Old lv'+str(j)+' hash: '+str(hashlv))
print('New lv'+str(j)+' hash: '+str(newhash))
def set_lvsuperhash(self,leveldata,superhashoffset):
if str(self.header.contentType) == 'Content.CONTROL':
for fs in self.sectionFilesystems:
memlv0 = io.BytesIO(fs.read((leveldata[0][2])*(len(leveldata)-1)))
memlv0.seek(0);newlvdata=memlv0.read()
memlv0.seek(0);ndat=memlv0.read(0x4000)
superhash=(str(sha256(ndat).hexdigest()))
self.header.seek(0x400+superhashoffset)
test = hx((self.header.read(32))).decode('utf-8');print('-OLD IVFC_Hash: '+str(test))
self.header.seek(0x400+superhashoffset)
self.header.write(bytes.fromhex(superhash))
self.header.seek(0x400+superhashoffset)
newivfchash = hx((self.header.read(32))).decode('utf-8');print('-NEW IVFC_Hash: '+str(newivfchash))
fs.seek(0)
fs.write(newlvdata)
def prIVFCData(self,inmemoryfile):
#Hex.dump(inmemoryfile.read())
inmemoryfile.seek(0)
version=int.from_bytes(inmemoryfile.read(0x2), byteorder='little', signed=True);print('-Version: '+str(version))
fstype=int.from_bytes(inmemoryfile.read(0x1), byteorder='little', signed=True);print('-FileSystemtype: '+str(fstype))
hashtype=int.from_bytes(inmemoryfile.read(0x1), byteorder='little', signed=True);print('-HashType: '+str(hashtype))
enctype=int.from_bytes(inmemoryfile.read(0x1), byteorder='little', signed=True);print('-EncType: '+str(enctype))
nulldata=inmemoryfile.read(0x3)
magic=inmemoryfile.read(0x4);print('-Magic: '+str(magic))
magicnumber=int.from_bytes(inmemoryfile.read(0x4), byteorder='little', signed=True);print('-MagicNumber: '+str(magicnumber))
masterhashsize=int.from_bytes(inmemoryfile.read(0x4), byteorder='little', signed=True)*0x200;print('-MasterHashSize: '+str(masterhashsize))
numberLevels=int.from_bytes(inmemoryfile.read(0x4), byteorder='little', signed=True);print('-Number: '+str(numberLevels))
leveldata=list();c=24
for i in range(numberLevels-1):
lvoffs=int.from_bytes(inmemoryfile.read(0x8), byteorder='little', signed=True);print('-level'+str(i)+' offs: '+str(lvoffs))
lvsize=int.from_bytes(inmemoryfile.read(0x8), byteorder='little', signed=True);print('-level'+str(i)+' size: '+str(lvsize))
lvbsize=2**int.from_bytes(inmemoryfile.read(0x4), byteorder='little', signed=True);print('-level'+str(i)+' block size: '+str(lvbsize))
treserved=int.from_bytes(inmemoryfile.read(0x4), byteorder='little', signed=True);print('-level'+str(i)+' Reserved: '+str(treserved))
leveldata.append([i,lvoffs,lvsize,lvbsize])
c=c+24
inmemoryfile.read(32);c=c+32
hash = hx((inmemoryfile.read(32))).decode('utf-8');print('-IVFC_Hash: '+str(hash))
return leveldata,hash,masterhashsize,c
def pr_ivfcsuperhash(self, file = None, mode = 'rb'):
crypto1=self.header.getCryptoType()
crypto2=self.header.getCryptoType2()
if crypto1 == 2:
if crypto1 > crypto2:
masterKeyRev=crypto1
else:
masterKeyRev=crypto2
else:
masterKeyRev=crypto2
decKey = Keys.decryptTitleKey(self.header.titleKeyDec, Keys.getMasterKeyIndex(masterKeyRev))
for f in self.sectionFilesystems:
#print(f.fsType);print(f.cryptoType)
if f.fsType == Type.Fs.ROMFS and f.cryptoType == Type.Crypto.CTR:
ncaHeader = NcaHeader()
self.header.rewind()
ncaHeader = self.header.read(0x400)
#Hex.dump(ncaHeader)
pfs0=f
#Hex.dump(pfs0.read())
sectionHeaderBlock = f.buffer
levelOffset = int.from_bytes(sectionHeaderBlock[0x18:0x20], byteorder='little', signed=False)
levelSize = int.from_bytes(sectionHeaderBlock[0x20:0x28], byteorder='little', signed=False)
pfs0Header = pfs0.read(levelSize)
if sectionHeaderBlock[8:12] == b'IVFC':
data = pfs0Header;
Hex.dump(pfs0Header)
print(str('1: ')+hx(sectionHeaderBlock[0xc8:0xc8+0x20]).decode('utf-8'))
print(str('2: ')+str(sha256(data).hexdigest()))
superhash=str(sha256(data).hexdigest())
return superhash
def verify_hash_nca(self,buffer,origheader,didverify,feed):
verdict=True; basename=str(os.path.basename(os.path.abspath(self._path)))
if feed == False:
feed=''
message='***************';print(message);feed+=message+'\n'
message=('HASH TEST');print(message);feed+=message+'\n'
message='***************';print(message);feed+=message+'\n'
message=(str(self.header.titleId)+' - '+str(self.header.contentType));print(message);feed+=message+'\n'
ncasize=self.header.size
t = tqdm(total=ncasize, unit='B', unit_scale=True, leave=False)
i=0
self.rewind();
rawheader=self.read(0xC00)
self.rewind()
for data in iter(lambda: self.read(int(buffer)), ""):
if i==0:
sha=sha256()
self.seek(0xC00)
sha.update(rawheader)
if origheader != False:
sha0=sha256()
sha0.update(origheader)
i+=1
t.update(len(data))
self.flush()
else:
sha.update(data)
if origheader != False:
sha0.update(data)
t.update(len(data))
self.flush()
if not data:
break
t.close()
sha=sha.hexdigest()
if origheader != False:
sha0=sha0.hexdigest()
message=(' - File name: '+basename);print(message);feed+=message+'\n'
message=(' - SHA256: '+sha);print(message);feed+=message+'\n'
if origheader != False:
message=(' - ORIG_SHA256: '+sha0);print(message);feed+=message+'\n'
if str(basename)[:16] == str(sha)[:16]:
message=(' > FILE IS CORRECT');print(message);feed+=message+'\n'
elif origheader != False:
if str(basename)[:16] == str(sha0)[:16]:
message=(' > FILE IS CORRECT');print(message);feed+=message+'\n'
else:
message=(' > FILE IS CORRUPT');print(message);feed+=message+'\n'
verdict = False
elif self.header.contentType == Type.Content.META and didverify == True:
message=(' > RSV WAS CHANGED');print(message);feed+=message+'\n'
#print(' > CHECKING INTERNAL HASHES')
message=(' * FILE IS CORRECT');print(message);feed+=message+'\n'
else:
message=(' > FILE IS CORRUPT');print(message);feed+=message+'\n'
verdict = False
message=('');print(message);feed+=message+'\n'
if verdict == False:
message=("VERDICT: NCA FILE IS CORRUPT");print(message);feed+=message+'\n'
if verdict == True:
message=('VERDICT: NCA FILE IS CORRECT');print(message);feed+=message+'\n'
return verdict,feed | [
"42461174+julesontheroad@users.noreply.github.com"
] | 42461174+julesontheroad@users.noreply.github.com |
2680ecba6fb8682cdeb0d6871c0bba8c11da6300 | 41523dd4871e8ed1043d2b3ddf73417fcbdde209 | /day06/函数.py | 3ae295c7e48faa9b12c2b531cf7a0362c7e36bf6 | [] | no_license | WayneChen1994/Python1805 | 2aa1c611f8902b8373b8c9a4e06354c25f8826d6 | a168cd3b7749afc326ec4326db413378fd3677d5 | refs/heads/master | 2020-03-30T23:19:00.773288 | 2018-11-02T10:47:40 | 2018-11-02T10:47:40 | 151,697,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,457 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# author: Wayne.Chen
'''
函数的定义:
在项目开发的过程中,有些功能,我们会反复的使用,为了方便使用,我们将这些功能封装成一个函数,在需要使用此功能的时候,调用即可。
优点:
1、简化代码结构,增加代码的复用性
2、增加代码的可维护性,若想修改某个bug或者某个功能,找到对应的函数更改即可
'''
'''
语法:
def 函数名(参数列表):
语句块
return 表达式
def:关键字来声明函数
函数名:标识符
参数列表:参数与参数之间使用逗号隔开
: 标识函数语句块的开始
语句块:函数要实现的功能
return:函数的返回值
return的结果可以是数值也可以是表达式
【注意:return可以写也可以不写,具体由函数功能开决定,当return不写的时候,默认return None】
注意:return的时候,函数体已经结束,因此return后边的语句不会执行到
'''
# 最简单的函数:无参无返回值
def myPrint():
for x in range(5):
print("hello, world!!!")
'''
函数的调用:
函数名(参数列表)
参数列表的作用:函数的调用者,给函数传递信息的
实质:实参给形参赋值的过程
'''
myPrint()
'''
需求:1+2+3+……+100
'''
def sum1():
res = 0
for x in range(1, 101):
res += x
return res
print(sum1())
| [
"waynechen1994@163.com"
] | waynechen1994@163.com |
a9b8e43931d529c548c0a4547aff0caa186cbc3a | 8b68fb2eeb5d10082fc2083bc6323aca5b4378b7 | /Server/app/views/__init__.py | c23988213ce9d89d5948846467aa10feceffa455 | [
"MIT"
] | permissive | JoMingyu/BookCheck-Backend | edf7529db95f9183939c0f81ef0ef0906b3c2318 | fbe71a39e385a3c739e7e40ab1153efbe7835576 | refs/heads/master | 2021-09-10T00:07:29.923714 | 2018-03-20T09:55:25 | 2018-03-20T09:55:25 | 113,041,979 | 1 | 0 | MIT | 2017-12-09T13:18:35 | 2017-12-04T12:51:21 | null | UTF-8 | Python | false | false | 783 | py | from flask_restful import Api
from flasgger import Swagger
from app.docs import TEMPLATE
from app.views.user import *
from app.views.library.book import *
from app.views.library.borrow import *
from app.views.library.library import *
class ViewInjector(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
Swagger(app, template=TEMPLATE)
api = Api(app)
api.add_resource(Signup, '/signup')
api.add_resource(AuthCommonUser, '/auth/common')
api.add_resource(AuthAdmin, '/auth/admin')
api.add_resource(Refresh, '/refresh')
api.add_resource(Book, '/book')
api.add_resource(Borrow, '/borrow')
api.add_resource(Library, '/library')
| [
"city7310@naver.com"
] | city7310@naver.com |
c8b65743ca50b6f191972c80546139408638786f | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v5/googleads-py/tests/unit/gapic/googleads.v5/services/test_conversion_action_service.py | e6349cb7efad46563360ae950642334b77708b9a | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,960 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v5.common.types import tag_snippet
from google.ads.googleads.v5.enums.types import attribution_model
from google.ads.googleads.v5.enums.types import conversion_action_category
from google.ads.googleads.v5.enums.types import conversion_action_counting_type
from google.ads.googleads.v5.enums.types import conversion_action_status
from google.ads.googleads.v5.enums.types import conversion_action_type
from google.ads.googleads.v5.enums.types import data_driven_model_status
from google.ads.googleads.v5.enums.types import mobile_app_vendor
from google.ads.googleads.v5.enums.types import tracking_code_page_format
from google.ads.googleads.v5.enums.types import tracking_code_type
from google.ads.googleads.v5.resources.types import conversion_action
from google.ads.googleads.v5.services.services.conversion_action_service import ConversionActionServiceClient
from google.ads.googleads.v5.services.services.conversion_action_service import transports
from google.ads.googleads.v5.services.types import conversion_action_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ConversionActionServiceClient._get_default_mtls_endpoint(None) is None
assert ConversionActionServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert ConversionActionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert ConversionActionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert ConversionActionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert ConversionActionServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_conversion_action_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = ConversionActionServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_conversion_action_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = ConversionActionServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = ConversionActionServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_conversion_action_service_client_get_transport_class():
transport = ConversionActionServiceClient.get_transport_class()
assert transport == transports.ConversionActionServiceGrpcTransport
transport = ConversionActionServiceClient.get_transport_class("grpc")
assert transport == transports.ConversionActionServiceGrpcTransport
@mock.patch.object(ConversionActionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ConversionActionServiceClient))
def test_conversion_action_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v5.services.services.conversion_action_service.ConversionActionServiceClient.get_transport_class') as gtc:
transport = transports.ConversionActionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = ConversionActionServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v5.services.services.conversion_action_service.ConversionActionServiceClient.get_transport_class') as gtc:
client = ConversionActionServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v5.services.services.conversion_action_service.transports.ConversionActionServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = ConversionActionServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v5.services.services.conversion_action_service.transports.ConversionActionServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = ConversionActionServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v5.services.services.conversion_action_service.transports.ConversionActionServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = ConversionActionServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = ConversionActionServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = ConversionActionServiceClient()
@mock.patch.object(ConversionActionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ConversionActionServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_conversion_action_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v5.services.services.conversion_action_service.transports.ConversionActionServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = ConversionActionServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v5.services.services.conversion_action_service.transports.ConversionActionServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = ConversionActionServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v5.services.services.conversion_action_service.transports.ConversionActionServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = ConversionActionServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_conversion_action_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v5.services.services.conversion_action_service.transports.ConversionActionServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = ConversionActionServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_conversion_action(transport: str = 'grpc', request_type=conversion_action_service.GetConversionActionRequest):
client = ConversionActionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversion_action),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversion_action.ConversionAction(
resource_name='resource_name_value',
id=205,
name='name_value',
status=conversion_action_status.ConversionActionStatusEnum.ConversionActionStatus.UNKNOWN,
type_=conversion_action_type.ConversionActionTypeEnum.ConversionActionType.UNKNOWN,
category=conversion_action_category.ConversionActionCategoryEnum.ConversionActionCategory.UNKNOWN,
owner_customer='owner_customer_value',
include_in_conversions_metric=True,
click_through_lookback_window_days=3602,
view_through_lookback_window_days=3527,
counting_type=conversion_action_counting_type.ConversionActionCountingTypeEnum.ConversionActionCountingType.UNKNOWN,
phone_call_duration_seconds=2856,
app_id='app_id_value',
mobile_app_vendor=mobile_app_vendor.MobileAppVendorEnum.MobileAppVendor.UNKNOWN,
)
response = client.get_conversion_action(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == conversion_action_service.GetConversionActionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, conversion_action.ConversionAction)
assert response.resource_name == 'resource_name_value'
assert response.id == 205
assert response.name == 'name_value'
assert response.status == conversion_action_status.ConversionActionStatusEnum.ConversionActionStatus.UNKNOWN
assert response.type_ == conversion_action_type.ConversionActionTypeEnum.ConversionActionType.UNKNOWN
assert response.category == conversion_action_category.ConversionActionCategoryEnum.ConversionActionCategory.UNKNOWN
assert response.owner_customer == 'owner_customer_value'
assert response.include_in_conversions_metric is True
assert response.click_through_lookback_window_days == 3602
assert response.view_through_lookback_window_days == 3527
assert response.counting_type == conversion_action_counting_type.ConversionActionCountingTypeEnum.ConversionActionCountingType.UNKNOWN
assert response.phone_call_duration_seconds == 2856
assert response.app_id == 'app_id_value'
assert response.mobile_app_vendor == mobile_app_vendor.MobileAppVendorEnum.MobileAppVendor.UNKNOWN
def test_get_conversion_action_from_dict():
test_get_conversion_action(request_type=dict)
def test_get_conversion_action_field_headers():
client = ConversionActionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = conversion_action_service.GetConversionActionRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversion_action),
'__call__') as call:
call.return_value = conversion_action.ConversionAction()
client.get_conversion_action(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_conversion_action_flattened():
client = ConversionActionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversion_action),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversion_action.ConversionAction()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_conversion_action(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_conversion_action_flattened_error():
client = ConversionActionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_conversion_action(
conversion_action_service.GetConversionActionRequest(),
resource_name='resource_name_value',
)
def test_mutate_conversion_actions(transport: str = 'grpc', request_type=conversion_action_service.MutateConversionActionsRequest):
client = ConversionActionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_conversion_actions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversion_action_service.MutateConversionActionsResponse(
)
response = client.mutate_conversion_actions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == conversion_action_service.MutateConversionActionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, conversion_action_service.MutateConversionActionsResponse)
def test_mutate_conversion_actions_from_dict():
test_mutate_conversion_actions(request_type=dict)
def test_mutate_conversion_actions_field_headers():
client = ConversionActionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = conversion_action_service.MutateConversionActionsRequest()
request.customer_id = 'customer_id/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_conversion_actions),
'__call__') as call:
call.return_value = conversion_action_service.MutateConversionActionsResponse()
client.mutate_conversion_actions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'customer_id=customer_id/value',
) in kw['metadata']
def test_mutate_conversion_actions_flattened():
client = ConversionActionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_conversion_actions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversion_action_service.MutateConversionActionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.mutate_conversion_actions(
customer_id='customer_id_value',
operations=[conversion_action_service.ConversionActionOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].customer_id == 'customer_id_value'
assert args[0].operations == [conversion_action_service.ConversionActionOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))]
def test_mutate_conversion_actions_flattened_error():
client = ConversionActionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.mutate_conversion_actions(
conversion_action_service.MutateConversionActionsRequest(),
customer_id='customer_id_value',
operations=[conversion_action_service.ConversionActionOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ConversionActionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ConversionActionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ConversionActionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = ConversionActionServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.ConversionActionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = ConversionActionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.ConversionActionServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.ConversionActionServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_conversion_action_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v5.services.services.conversion_action_service.transports.ConversionActionServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.ConversionActionServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_conversion_action',
'mutate_conversion_actions',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_conversion_action_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v5.services.services.conversion_action_service.transports.ConversionActionServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ConversionActionServiceTransport()
adc.assert_called_once()
def test_conversion_action_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ConversionActionServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_conversion_action_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.ConversionActionServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_conversion_action_service_host_no_port():
client = ConversionActionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_conversion_action_service_host_with_port():
client = ConversionActionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_conversion_action_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.ConversionActionServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.ConversionActionServiceGrpcTransport])
def test_conversion_action_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.ConversionActionServiceGrpcTransport,])
def test_conversion_action_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_conversion_action_path():
customer = "squid"
conversion_action = "clam"
expected = "customers/{customer}/conversionActions/{conversion_action}".format(customer=customer, conversion_action=conversion_action, )
actual = ConversionActionServiceClient.conversion_action_path(customer, conversion_action)
assert expected == actual
def test_parse_conversion_action_path():
expected = {
"customer": "whelk",
"conversion_action": "octopus",
}
path = ConversionActionServiceClient.conversion_action_path(**expected)
# Check that the path construction is reversible.
actual = ConversionActionServiceClient.parse_conversion_action_path(path)
assert expected == actual
def test_customer_path():
customer = "oyster"
expected = "customers/{customer}".format(customer=customer, )
actual = ConversionActionServiceClient.customer_path(customer)
assert expected == actual
def test_parse_customer_path():
expected = {
"customer": "nudibranch",
}
path = ConversionActionServiceClient.customer_path(**expected)
# Check that the path construction is reversible.
actual = ConversionActionServiceClient.parse_customer_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = ConversionActionServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = ConversionActionServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ConversionActionServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder, )
actual = ConversionActionServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = ConversionActionServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ConversionActionServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization, )
actual = ConversionActionServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = ConversionActionServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ConversionActionServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project, )
actual = ConversionActionServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = ConversionActionServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ConversionActionServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = ConversionActionServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = ConversionActionServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ConversionActionServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.ConversionActionServiceTransport, '_prep_wrapped_messages') as prep:
client = ConversionActionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.ConversionActionServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = ConversionActionServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
6a55346eb4a479e9cfec583f80024042ffc59f42 | e8d5471bd4a47794d66162060343f740e0febca4 | /server/src/uds/core/jobs/__init__.py | a36a52b16819d2d15be2ddb8f0a4383942b120aa | [] | no_license | git38438/openuds | ef939c2196d6877e00e92416609335d57dd1bd55 | 7d66d92f85f01ad1ffd549304672dd31008ecc12 | refs/heads/master | 2020-06-22T14:07:33.227703 | 2019-07-18T11:03:56 | 2019-07-18T11:03:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,934 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 Virtual Cable S.L.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
UDS jobs related modules
.. moduleauthor:: Adolfo Gómez, dkmaster at dkmon dot com
"""
from uds.core.jobs.Job import Job
from uds.core.jobs.DelayedTask import DelayedTask
def factory():
"""
Returns a singleton to a jobs factory
"""
from uds.core.jobs.JobsFactory import JobsFactory
return JobsFactory.factory()
| [
"dkmaster@dkmon.com"
] | dkmaster@dkmon.com |
5ba702808ada3c9cd1aae54e283990fe3232d401 | ddb8c14775dfbe9424691dabf1617273d118d317 | /catkin_ws/build/geographic_msgs/catkin_generated/pkg.develspace.context.pc.py | eb6b51c3a7776951fe341bcd8fb40bc99747dfcb | [] | no_license | rishabhdevyadav/fastplanneroctomap | e8458aeb1f2d3b126d27dc57011c87ae4567687a | de9d7e49cb1004f3b01b7269dd398cf264ed92b4 | refs/heads/main | 2023-05-12T22:12:27.865900 | 2021-05-26T19:25:31 | 2021-05-26T19:25:31 | 356,674,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/rishabh/catkin_ws/devel/.private/geographic_msgs/include".split(';') if "/home/rishabh/catkin_ws/devel/.private/geographic_msgs/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;geometry_msgs;uuid_msgs;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "geographic_msgs"
PROJECT_SPACE_DIR = "/home/rishabh/catkin_ws/devel/.private/geographic_msgs"
PROJECT_VERSION = "0.5.5"
| [
"rishabhdevyadav95@gmail.com"
] | rishabhdevyadav95@gmail.com |
fce89a0524d511165b52d279a419cfc86ad5c216 | e81576012330e6a6024d14f3e241f88ca34b73cd | /python_code/vnev/Lib/site-packages/jdcloud_sdk/services/jdccs/models/DescribeAlarm.py | 99d362a38e2debde465939d0cae11309536c8aa1 | [
"MIT"
] | permissive | Ureimu/weather-robot | eba6a84147755aa83c941a306bac1a7c4e95e23e | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | refs/heads/master | 2021-01-15T07:23:42.274413 | 2020-03-23T02:30:19 | 2020-03-23T02:30:19 | 242,912,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,499 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class DescribeAlarm(object):
def __init__(self, alarmId=None, name=None, idc=None, idcName=None, resourceType=None, resourceId=None, resourceName=None, metric=None, metricName=None, period=None, statisticMethod=None, operator=None, threshold=None, times=None, noticePeriod=None, status=None):
"""
:param alarmId: (Optional) 规则实例ID
:param name: (Optional) 规则名称
:param idc: (Optional) 机房英文标识
:param idcName: (Optional) 机房名称
:param resourceType: (Optional) 资源类型 bandwidth:带宽
:param resourceId: (Optional) 资源ID
:param resourceName: (Optional) 资源名称
:param metric: (Optional) 监控项英文标识
:param metricName: (Optional) 监控项名称
:param period: (Optional) 统计周期(单位:分钟)
:param statisticMethod: (Optional) 统计方法:平均值=avg、最大值=max、最小值=min
:param operator: (Optional) 计算方式 >=、>、<、<=、=、!=
:param threshold: (Optional) 阈值
:param times: (Optional) 连续多少次后报警
:param noticePeriod: (Optional) 通知周期 单位:小时
:param status: (Optional) 规则状态 disabled:禁用 enabled:启用
"""
self.alarmId = alarmId
self.name = name
self.idc = idc
self.idcName = idcName
self.resourceType = resourceType
self.resourceId = resourceId
self.resourceName = resourceName
self.metric = metric
self.metricName = metricName
self.period = period
self.statisticMethod = statisticMethod
self.operator = operator
self.threshold = threshold
self.times = times
self.noticePeriod = noticePeriod
self.status = status
| [
"a1090693441@163.com"
] | a1090693441@163.com |
6f1428522b9f54c1de6924fc916004f5dffe6e3f | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v5/googleads-py/google/ads/googleads/v5/enums/types/keyword_plan_network.py | 5495e33f567dabc034343b0886868ec8d424bf1e | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v5.enums',
marshal='google.ads.googleads.v5',
manifest={
'KeywordPlanNetworkEnum',
},
)
class KeywordPlanNetworkEnum(proto.Message):
r"""Container for enumeration of keyword plan forecastable
network types.
"""
class KeywordPlanNetwork(proto.Enum):
r"""Enumerates keyword plan forecastable network types."""
UNSPECIFIED = 0
UNKNOWN = 1
GOOGLE_SEARCH = 2
GOOGLE_SEARCH_AND_PARTNERS = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
0c0025b66e84787c79a192d52506191ad76db35a | ad372f7753c70e3997d035097ee03f740a5fb068 | /trace_challenge/admin.py | f7a8a726f1094caab378bda4e82f5e630813155d | [] | no_license | Insper/servidor-de-desafios | a5f09fe9368887b06b98800f2bb8f35ff13f80a9 | 9875e9b9248c14237161ca73983595f7d929e963 | refs/heads/master | 2022-12-14T17:28:42.963112 | 2022-09-12T19:18:36 | 2022-09-12T19:18:36 | 167,026,050 | 3 | 42 | null | 2022-12-08T07:36:47 | 2019-01-22T16:19:46 | Python | UTF-8 | Python | false | false | 267 | py | from django.contrib import admin
from trace_challenge.models import TraceChallenge, TraceStateSubmission, UserTraceChallengeInteraction
admin.site.register(TraceChallenge)
admin.site.register(TraceStateSubmission)
admin.site.register(UserTraceChallengeInteraction)
| [
"andrew.kurauchi@gmail.com"
] | andrew.kurauchi@gmail.com |
46f0fbed19875c78825487e43ce6a3c1936dc4b7 | c2ddadd3cf14dfc56ec1e4b8d52b8c1a23ea1e61 | /index/models.py | 0e00f72ef97a2fe38f9fa7e54588261bdab74a29 | [] | no_license | ashimmitra/Varsity-Final-Project-by-Django | 09f944a9f1aae7be4212f0c09cfe5d2c596bd848 | 6274d966f09d9ead2344542b56576a77e0758d5a | refs/heads/main | 2023-07-17T15:50:04.414565 | 2021-08-20T12:31:24 | 2021-08-20T12:31:24 | 342,790,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | from django.db import models
class AboutSite(models.Model):
title=models.CharField(max_length=150,blank=False)
description=models.TextField(max_length=800,blank=False)
def __str__(self):
return self.title
class Slider(models.Model):
title=models.CharField(max_length=150,blank=False)
description=models.TextField(max_length=800,blank=False)
image=models.ImageField(upload_to='slider/',blank=False)
def __str__(self):
return self.title
class Contact(models.Model):
name=models.CharField(max_length=100,blank=False)
email=models.EmailField(max_length=100,blank=False)
subject=models.CharField(max_length=200,blank=False)
message=models.TextField(max_length=800,blank=False)
def __str__(self):
return self.subject
class Books(models.Model):
title = models.CharField(max_length=50)
image=models.ImageField(upload_to='static/img/',blank=False)
pdf = models.FileField(upload_to='static/media')
def __str__(self):
return self.title
class Notice(models.Model):
title = models.CharField(max_length=50)
image=models.ImageField(upload_to='static/img/',blank=False)
description=models.TextField(max_length=800,blank=False)
def __str__(self):
return self.title | [
"34328617+ashimmitra@users.noreply.github.com"
] | 34328617+ashimmitra@users.noreply.github.com |
4a3abee9f4cc03df78a630501c76c024d5238aad | 1369717e645fe33ac2eec0d485dde79289a34ace | /server/src/uds/transports/HTML5RDP/HTML5RDP.py | ee2a318e870bea73304b54a0918cb915ddb13ae0 | [] | no_license | johnp/openuds | 0b9c1fa3823854deb1ccd53c51fc27447e28593a | b8e1e1773559b91875ccf096d37418e3b70675d0 | refs/heads/master | 2021-01-22T12:54:04.092698 | 2017-04-19T06:23:44 | 2017-04-19T06:23:44 | 68,590,975 | 0 | 0 | null | 2016-09-19T09:34:33 | 2016-09-19T09:34:32 | null | UTF-8 | Python | false | false | 8,959 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Virtual Cable S.L.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
@author: Adolfo Gómez, dkmaster at dkmon dot com
'''
from __future__ import unicode_literals
from django.utils.translation import ugettext_noop as _
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from uds.core.ui.UserInterface import gui
from uds.core.transports.BaseTransport import Transport
from uds.core.transports.BaseTransport import TUNNELED_GROUP
from uds.core.transports import protocols
from uds.core.util import OsDetector
from uds.models import TicketStore
import logging
logger = logging.getLogger(__name__)
READY_CACHE_TIMEOUT = 30
class HTML5RDPTransport(Transport):
'''
Provides access via RDP to service.
This transport can use an domain. If username processed by authenticator contains '@', it will split it and left-@-part will be username, and right password
'''
typeName = _('HTML5 RDP Transport')
typeType = 'HTML5RDPTransport'
typeDescription = _('RDP Transport using HTML5 client')
iconFile = 'html5.png'
ownLink = True
supportedOss = OsDetector.allOss
protocol = protocols.RDP
group = TUNNELED_GROUP
guacamoleServer = gui.TextField(label=_('Tunnel Server'), order=1, tooltip=_('Host of the tunnel server (use http/https & port if needed) as accesible from users'), defvalue='https://', length=64, required=True, tab=gui.TUNNEL_TAB)
useEmptyCreds = gui.CheckBoxField(label=_('Empty creds'), order=2, tooltip=_('If checked, the credentials used to connect will be emtpy'), tab=gui.CREDENTIALS_TAB)
fixedName = gui.TextField(label=_('Username'), order=3, tooltip=_('If not empty, this username will be always used as credential'), tab=gui.CREDENTIALS_TAB)
fixedPassword = gui.PasswordField(label=_('Password'), order=4, tooltip=_('If not empty, this password will be always used as credential'), tab=gui.CREDENTIALS_TAB)
withoutDomain = gui.CheckBoxField(label=_('Without Domain'), order=5, tooltip=_('If checked, the domain part will always be emptied (to connecto to xrdp for example is needed)'), tab=gui.CREDENTIALS_TAB)
fixedDomain = gui.TextField(label=_('Domain'), order=6, tooltip=_('If not empty, this domain will be always used as credential (used as DOMAIN\\user)'), tab=gui.CREDENTIALS_TAB)
wallpaper = gui.CheckBoxField(label=_('Show wallpaper'), order=20, tooltip=_('If checked, the wallpaper and themes will be shown on machine (better user experience, more bandwidth)'), tab=gui.PARAMETERS_TAB)
desktopComp = gui.CheckBoxField(label=_('Allow Desk.Comp.'), order=22, tooltip=_('If checked, desktop composition will be allowed'), tab=gui.PARAMETERS_TAB)
smooth = gui.CheckBoxField(label=_('Font Smoothing'), order=23, tooltip=_('If checked, fonts smoothing will be allowed (windows clients only)'), tab=gui.PARAMETERS_TAB)
enableAudio = gui.CheckBoxField(label=_('Enable Audio'), order=7, tooltip=_('If checked, the audio will be redirected to client (if client browser supports it)'), tab=gui.PARAMETERS_TAB)
enablePrinting = gui.CheckBoxField(label=_('Enable Printing'), order=8, tooltip=_('If checked, the printing will be redirected to client (if client browser supports it)'), tab=gui.PARAMETERS_TAB)
serverLayout = gui.ChoiceField(order=9,
label=_('Layout'),
tooltip=_('Keyboards Layout of server'),
required=True,
values=[ gui.choiceItem('-', 'default'),
gui.choiceItem('en-us-qwerty', _('English (US) keyboard')),
gui.choiceItem('de-de-qwertz', _('German keyboard (qwertz)')),
gui.choiceItem('fr-fr-azerty', _('French keyboard (azerty)')),
gui.choiceItem('it-it-qwerty', _('Italian keyboard')),
gui.choiceItem('sv-se-qwerty', _('Swedish keyboard')),
gui.choiceItem('failsafe', _('Failsafe')),
],
defvalue='-',
tab=gui.PARAMETERS_TAB
)
def initialize(self, values):
if values is None:
return
self.guacamoleServer.value = self.guacamoleServer.value.strip()
if self.guacamoleServer.value[0:4] != 'http':
raise Transport.ValidationException(_('The server must be http or https'))
# Same check as normal RDP transport
def isAvailableFor(self, userService, ip):
'''
Checks if the transport is available for the requested destination ip
Override this in yours transports
'''
logger.debug('Checking availability for {0}'.format(ip))
ready = self.cache.get(ip)
if ready is None:
# Check again for readyness
if self.testServer(userService, ip, '3389') is True:
self.cache.put(ip, 'Y', READY_CACHE_TIMEOUT)
return True
else:
self.cache.put(ip, 'N', READY_CACHE_TIMEOUT)
return ready == 'Y'
def processedUser(self, userService, userName):
v = self.processUserPassword(userService, userName, '')
return v['username']
def processUserPassword(self, service, user, password):
username = user.getUsernameForAuth()
if self.fixedName.value != '':
username = self.fixedName.value
proc = username.split('@')
if len(proc) > 1:
domain = proc[1]
else:
domain = ''
username = proc[0]
if self.fixedPassword.value != '':
password = self.fixedPassword.value
if self.fixedDomain.value != '':
domain = self.fixedDomain.value
if self.useEmptyCreds.isTrue():
username, password, domain = '', '', ''
if self.withoutDomain.isTrue():
domain = ''
if '.' in domain: # Dotter domain form
username = username + '@' + domain
domain = ''
# Fix username/password acording to os manager
username, password = service.processUserPassword(username, password)
return {'protocol': self.protocol, 'username': username, 'password': password, 'domain': domain}
def getLink(self, userService, transport, ip, os, user, password, request):
ci = self.processUserPassword(userService, user, password)
username, password, domain = ci['username'], ci['password'], ci['domain']
if domain != '':
username = domain + '\\' + username
# Build params dict
params = {
'protocol': 'rdp',
'hostname': ip,
'username': username,
'password': password,
'ignore-cert': 'true'
}
if self.serverLayout.value != '-':
params['server-layout'] = self.serverLayout.value
if self.enableAudio.isTrue() is False:
params['disable-audio'] = 'true'
if self.enablePrinting.isTrue() is True:
params['enable-printing'] = 'true'
if self.wallpaper.isTrue() is True:
params['enable-wallpaper'] = 'true'
if self.desktopComp.isTrue() is True:
params['enable-desktop-composition'] = 'true'
if self.smooth.isTrue() is True:
params['enable-font-smoothing'] = 'true'
logger.debug('RDP Params: {0}'.format(params))
ticket = TicketStore.create(params)
return HttpResponseRedirect("{}/transport/?{}&{}".format(self.guacamoleServer.value, ticket, request.build_absolute_uri(reverse('Index'))))
| [
"dkmaster@dkmon.com"
] | dkmaster@dkmon.com |
29e7404835a884d2b0f0858126f3b4f4788249c0 | 1996b0e9252362d91c809c4e7f95e7075f13816b | /test/test_tfidf.py | bfa334d962d609402c8a406da134ce352d1a2522 | [] | no_license | scheeloong/MovieQA_benchmark | 846c63d8a361bd4f630e31e3120772bc4965f999 | fb3e1b8fe9ddc6084b3c93206e7dfaed5ad42149 | refs/heads/master | 2020-05-30T08:41:44.764034 | 2017-05-03T05:30:19 | 2017-05-03T05:30:19 | 70,122,910 | 1 | 1 | null | 2016-10-06T04:33:34 | 2016-10-06T04:33:34 | null | UTF-8 | Python | false | false | 796 | py | """
Test for Term Frequency Inverse Document Frequency
TODO(scheeloong): Implement test
"""
import unittest
# Import the package (which is made by having a file called __init__.py
import src
import MovieQA
# Import the module tfidf.py
from src import tfidf
# From tfidf.py, import the class TfIdf
from src.tfidf import TfIdf
class TestTfIdf(unittest.TestCase):
def test_nothing(self):
self.assertEqual('lala', 'lala')
dL = MovieQA.DataLoader()
# Use training data for training
[story, qa] = dL.get_story_qa_data('train', 'plot')
# Use test data for testing
[story2, qa2] = dL.get_story_qa_data('test', 'plot')
# TODO: Uncomment this once done questions
tfidf_ = TfIdf(story)
if __name__ == '__main__':
unittest.main()
| [
"scheeloong@gmail.com"
] | scheeloong@gmail.com |
920cebd7f494e6045568714339e6673f1f1fd4c5 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/largestTime_20200903104758.py | ba9e15dd86034374abcafc517e4c6befc8e91e2c | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10 | py | def Time() | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
4c65e49cfbd1385e17184100805b7ad76143d4e5 | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res_bw/scripts/client/postprocessing/effects/distortiontransfer.py | fba89b3f70bcc804cbf5dba56a150d3693d47d9d | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,958 | py | # 2015.11.10 21:32:32 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/PostProcessing/Effects/DistortionTransfer.py
from PostProcessing.RenderTargets import *
from PostProcessing import Effect
from PostProcessing.Phases import *
from PostProcessing.FilterKernels import *
from PostProcessing import getEffect
from PostProcessing.Effects.Properties import *
from PostProcessing.Effects import implementEffectFactory
import Math
alpha = MaterialFloatProperty('Fisheye', -1, 'alpha', 1, primary=True)
scale = MaterialFloatProperty('Fisheye', -1, 'scale', 1)
tile = MaterialFloatProperty('Fisheye', -1, 'tile', 1)
@implementEffectFactory('Distortion transfer', 'Redraw the scene, using a normal map to distort the image.', 'system/maps/post_processing/hexagonal_norms.bmp')
def distortionTransfer(distortionTexture):
"""This method creates and returns a post-process effect that redraws
the screen, using a normal map to distort the image. Use this for
a fish-eye effect, full-screen shimmer/distort etc.
"""
backBufferCopy = rt('PostProcessing/backBufferCopy')
c = buildBackBufferCopyPhase(backBufferCopy)
r = buildPhase(backBufferCopy.texture, None, 'shaders/post_processing/legacy/transfer_distort.fx', straightTransfer4Tap, BW_BLEND_SRCALPHA, BW_BLEND_INVSRCALPHA)
r.name = 'distort and transfer'
r.material.distortionTexture = distortionTexture
e = Effect()
e.name = 'Distort and Transfer'
e.phases = [c, r]
return e
@implementEffectFactory('Fisheye', 'Distortion transfer that defaults to a fisheye lens effect.')
def fisheye():
e = distortionTransfer('system/maps/post_processing/fisheye_norms.bmp')
e.name = 'Fisheye'
return e
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\client\postprocessing\effects\distortiontransfer.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:32:32 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
281dece3986aa0e47c5f5d16610e3fa153dcd132 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Spacy/source2.7/thinc/extra/wrappers.py | 7e51741312749e804eaf2d8a6a8439286f23079c | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 2,346 | py | from ..compat import BytesIO
from ..neural._classes.model import Model
try:
import torch.autograd
import torch
except ImportError:
pass
class PytorchWrapper(Model):
'''Wrap a PyTorch model, so that it has the same API as Thinc models.
To optimize the model, you'll need to create a PyTorch optimizer and call
optimizer.step() after each batch --- see examples/wrap_pytorch.py
'''
def __init__(self, model):
Model.__init__(self)
self._model = model
def begin_update(self, x_data, drop=0.):
'''Return the output of the wrapped PyTorch model for the given input,
along with a callback to handle the backward pass.
'''
x_var = torch.autograd.Variable(torch.Tensor(x_data),
requires_grad=True)
# Make prediction
y_var = self._model(x_var)
def backward_pytorch(dy_data, sgd=None):
dy_var = torch.autograd.Variable(torch.Tensor(dy_data))
torch.autograd.backward((y_var,), grad_variables=(dy_var,))
dX = self.ops.asarray(x_var.grad.data)
if sgd is not None:
optimizer.step()
return dX
return self.ops.asarray(y_var.data), backward
def to_disk(self, path):
# TODO: Untested
torch.save(self._model.state_dict(), str(path))
def from_disk(self, path):
# TODO: Untested
self._model.load_state_dict(torch.load(path))
def to_bytes(self):
# TODO: Untested
filelike = BytesIO()
torch.save(self._model.state_dict(), filelike)
return filelike.read()
def from_bytes(self, data):
# TODO: Untested
filelike = BytesIO(data)
self._model.load_state_dict(torch.load(filelike))
def to_gpu(self, device_num):
# TODO: Implement
raise NotImplementedError
def to_cpu(self):
# TODO: Implement
raise NotImplementedError
def resize_output(self):
# TODO: Required for spaCy add label
raise NotImplementedError
def resize_input(self):
# TODO: Not required yet, but should be useful
raise NotImplementedError
@contextlib.contextmanager
def use_params(self, params): # pragma: no cover
# TODO: Implement
raise NotImplementedError
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
d0a5805d8de348ebd9b1de3b91221773c58040fc | 1d60c5a7b8ce6277bff514e376f79848f706344c | /Machine Learning Scientist with Python/19. Image Processing with Keras in Python/03. Going Deeper/04. Write your own pooling operation.py | af1617492f97a83b0f689958bdbfb890957a90cf | [] | no_license | DidiMilikina/DataCamp | 338c6e6d3b4f5b6c541c1aba155a36e9ee24949d | 3bf2cf3c1430190a7f8e54efda7d50a5fd66f244 | refs/heads/master | 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | '''
Write your own pooling operation
As we have seen before, CNNs can have a lot of parameters. Pooling layers are often added between the convolutional layers of a neural network to summarize their outputs in a condensed manner, and reduce the number of parameters in the next layer in the network. This can help us if we want to train the network more rapidly, or if we don't have enough data to learn a very large number of parameters.
A pooling layer can be described as a particular kind of convolution. For every window in the input it finds the maximal pixel value and passes only this pixel through. In this exercise, you will write your own max pooling operation, based on the code that you previously used to write a two-dimensional convolution operation.
Instructions
100 XP
Index into the input array (im) and select the right window.
Find the maximum in this window.
Allocate this into the right entry in the output array (result).
'''
SOLUTION
# Result placeholder
result = np.zeros((im.shape[0]//2, im.shape[1]//2))
# Pooling operation
for ii in range(result.shape[0]):
for jj in range(result.shape[1]):
result[ii, jj] = np.max(im[ii*2:ii*2+2, jj*2:jj*2+2]) | [
"didimilikina8@gmail.com"
] | didimilikina8@gmail.com |
09ea2d36a061bd2ef4ac973b25a693b6625b6703 | df0062217e45a1fe9d9af83ba1768aab385d2c28 | /proboscis/decorators.py | 54f5264568242dca31520f87584c5c0fcbfd74c5 | [
"Apache-2.0"
] | permissive | rassilon/python-proboscis | 678b20a149a22b036d2fb3044a53a9a1a02cedc7 | 214c1c317c6575ecc1b3ccb2dc60303d57fbc417 | refs/heads/master | 2020-12-24T16:58:46.572787 | 2012-07-09T22:37:32 | 2012-07-09T22:37:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,536 | py | # Copyright (c) 2011 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Decorators useful to the tests."""
from functools import wraps
from proboscis.asserts import assert_raises_instance
from proboscis import compatability
from proboscis.core import TestRegistry
DEFAULT_REGISTRY = TestRegistry()
def expect_exception(exception_type):
"""Decorates a test method to show it expects an exception to be raised."""
def return_method(method):
@wraps(method)
def new_method(*args, **kwargs):
assert_raises_instance(exception_type, method, *args, **kwargs)
return new_method
return return_method
class TimeoutError(RuntimeError):
"""Thrown when a method has exceeded the time allowed."""
pass
def time_out(time):
"""Raises TimeoutError if the decorated method does not finish in time."""
if not compatability.supports_time_out():
raise ImportError("time_out not supported for this version of Python.")
import signal
def cb_timeout(signum, frame):
raise TimeoutError("Time out after waiting " + str(time) + " seconds.")
def return_method(func):
"""Turns function into decorated function."""
@wraps(func)
def new_method(*kargs, **kwargs):
previous_handler = signal.signal(signal.SIGALRM, cb_timeout)
try:
signal.alarm(time)
return func(*kargs, **kwargs)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, previous_handler)
return new_method
return return_method
def register(**kwargs):
"""Registers a test in proboscis's default registry.
:param home: The target class or function.
This also allows all of the parameters used by the @test decorator.
This function works differently than a decorator as it allows the class or
function which is being registered to appear in the same call as all of the
options.
Its designed to make it easier to register class or functions with
Proboscis after they're defined.
"""
DEFAULT_REGISTRY.register(**kwargs)
def test(home=None, **kwargs):
"""Decorates a test class or function to cause Proboscis to run it.
The behavior differs depending the target:
- If put on a stand-alone function, the function will run by itself.
- If put on a class inheriting unittest.TestCase, then the class will
run just like a normal unittest class by using the method names and
instantiate a new instance of the class for each test method.
- If the class does not inherit from unittest.TestCase, the class will
be instantiated once and this instance will be passed to each method
decorated with @test (this increases encapsulation over using class
fields as the instance can not be accessed outside of its methods).
Note that due to how decorators work its impossible to know if a
function is or is not part of a class; thus if a class method is
decorated with test but its class is not then
ProboscisTestMethodNotDecorated will be raised.
:param groups: A list of strings representing the groups this test method
or class belongs to. By default this is an empty list.
:param depends_on: A list of test functions or classes which must run
before this test. By default this is an empty list.
:param depends_on_groups: A list of strings each naming a group that must
run before this test. By default this is an empty
list.
:param enabled: By default, true. If set to false this test will not run.
:param always_run: If true this test will run even if the tests listed in
depends_on or depends_on_groups have failed.
"""
if home:
return DEFAULT_REGISTRY.register(home, **kwargs)
else:
def cb_method(home_2):
return DEFAULT_REGISTRY.register(home_2, **kwargs)
return cb_method
def before_class(home=None, **kwargs):
"""Like @test but indicates this should run before other class methods.
All of the arguments sent to @test work with this decorator as well.
"""
kwargs.update({'run_before_class':True})
return test(home=home, **kwargs)
def after_class(home=None, **kwargs):
"""Like @test but indicates this should run after other class methods.
This will run even if methods inside the class fail.
All of the arguments sent to @test work with this decorator as well.
"""
kwargs.update({'run_after_class':True})
return test(home=home, **kwargs)
def factory(func=None, **kwargs):
"""Decorates a function which returns new instances of Test classes."""
if func:
return DEFAULT_REGISTRY.register_factory(func)
else:
raise ValueError("Arguments not supported on factories.")
| [
"tim.simpson@rackspace.com"
] | tim.simpson@rackspace.com |
abe20bfb2a6b3bcbe0ba10177cc733b42e8086ec | b1303152c3977a22ff9a0192c0c32310e65a6d77 | /python/109.convert-sorted-list-to-binary-search-tree.py | e02cf628fa34d2da85972e66a21ada048e2fdcaf | [
"Apache-2.0"
] | permissive | stavanmehta/leetcode | 1b8da1c2bfacaa76ddfb96b8dbce03bf08c54c27 | 1224e43ce29430c840e65daae3b343182e24709c | refs/heads/master | 2021-07-15T16:02:16.107962 | 2021-06-24T05:39:14 | 2021-06-24T05:39:14 | 201,658,706 | 0 | 0 | Apache-2.0 | 2021-06-24T05:39:15 | 2019-08-10T16:59:32 | Java | UTF-8 | Python | false | false | 382 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sortedListToBST(self, head: ListNode) -> TreeNode:
| [
"noreply@github.com"
] | stavanmehta.noreply@github.com |
0b125c0bcb4a80ca0f836c735b815a89f509b757 | 64cd09628f599fe18bf38528309349f7ac0df71e | /Introduction/10_Introduction_tensorflow/8_Tensorflow_functions/neg or -.py | 59e33f4ff044010ae0dcca84e5ff111aaca3e10b | [] | no_license | JunyoungJang/Python | 958c057b2fd37c03876d3cf566ee27ee637bb020 | 76d4cd441deff8061e10608e0848360bc4f34490 | refs/heads/master | 2021-01-19T21:54:42.208469 | 2020-02-14T09:54:17 | 2020-02-14T09:54:17 | 83,768,220 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | import tensorflow as tf
X = tf.constant([[2., 2.], [-3., 3.]])
# tf.neg(X) is equivalent to - X
a = tf.neg(X)
b = - X
with tf.Session() as sess:
print sess.run(a)
print sess.run(b)
| [
"lakino@yonsei.ac.kr"
] | lakino@yonsei.ac.kr |
aa7a142d7fefe454e13433d58de97643501b3332 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/gigasecond/bf994fcc2d8b4c1c9fdea7adaf87f2e0.py | 5dcf780303f3a3dbbff6c18d0077b59ff3fd94f6 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 216 | py | from datetime import datetime
from datetime import timedelta
def add_gigasecond(initial_date):
gigasecond = 10**9
return initial_date + timedelta(seconds = gigasecond)
print add_gigasecond(datetime(2011, 4, 25))
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
a3871737df16daae3fef7ab62128c7f893347cfb | e75a890b39f046b2a44e3433acabc5dd12be7dbd | /leecode/9.回文数.py | 30763f64ebc4fad122eff3357105915a75a8ac0e | [] | no_license | zzf531/leetcode | 53c82ad96fef66ab666b658c1a60b9f81646c72a | cdb22e44c9fac2bc06a840bf7433aeb9be9ae2b2 | refs/heads/master | 2020-08-03T03:47:16.895530 | 2020-03-30T02:48:20 | 2020-03-30T02:48:20 | 211,615,639 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | class Solution:
def isPalindrome(self, x: int) -> bool:
if x < 0:return False
s = str(x)
s2 = s[::-1]
i = int(s2)
if i == x and x == 0:
return True
else:
return False
a = Solution()
print(a.isPalindrome(0))
| [
"2315519934@qq.com"
] | 2315519934@qq.com |
5a9f6e7d85368a6da5243971ae8f5e41576d135a | 7f114a1fb511b816c116d5b9e67cb998e3e23956 | /Pys109.py | 9d9c6b230aae65bab5143dcba55983a5a1aedd99 | [] | no_license | Bharanij27/bharanirep | 90ac34eb28deaa7ec96d042de456de71b96866d7 | 982133a7939c889d433c178a601441fa087293d9 | refs/heads/master | 2021-08-07T20:22:36.244395 | 2020-06-05T04:58:10 | 2020-06-05T04:58:10 | 186,580,768 | 0 | 6 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | a,b,c=map(int,input().split())
x=a*b
y=x%c
print(y)
| [
"noreply@github.com"
] | Bharanij27.noreply@github.com |
88c4363bf66d4e01bd9cd255ee394b2bf14d66e5 | 9c5e09b4f048a13961c0f4a1370a7bf01a421d92 | /gym/core.py | 1c8769a6c487c9206f83f1b37080df79121a2e02 | [
"MIT"
] | permissive | StanfordVL/Gym | daa8c780f5ace3e33c3bf0f7109f40a0a820d59e | 5e14d19e57d8ba318b97a5edda0ab2ea591dea08 | refs/heads/master | 2023-02-03T02:44:40.185713 | 2020-12-17T14:10:16 | 2020-12-17T14:10:16 | 280,579,514 | 9 | 4 | null | null | null | null | UTF-8 | Python | false | false | 10,310 | py | from ...Gym import gym
from . import error
from .utils import closer
env_closer = closer.Closer()
class Env(object):
r"""The main OpenAI Gym class. It encapsulates an environment with
arbitrary behind-the-scenes dynamics. An environment can be
partially or fully observed.
The main API methods that users of this class need to know are:
step
reset
render
close
seed
And set the following attributes:
action_space: The Space object corresponding to valid actions
observation_space: The Space object corresponding to valid observations
reward_range: A tuple corresponding to the min and max possible rewards
Note: a default reward range set to [-inf,+inf] already exists. Set it if you want a narrower range.
The methods are accessed publicly as "step", "reset", etc.. The
non-underscored versions are wrapper methods to which we may add
functionality over time.
"""
# Set this in SOME subclasses
metadata = {'render.modes': []}
reward_range = (-float('inf'), float('inf'))
spec = None
# Set these in ALL subclasses
action_space = None
observation_space = None
def step(self, action):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): an action provided by the agent
Returns:
observation (object): agent's observation of the current environment
reward (float) : amount of reward returned after previous action
done (bool): whether the episode has ended, in which case further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
"""
raise NotImplementedError
def reset(self):
"""Resets the state of the environment and returns an initial observation.
Returns:
observation (object): the initial observation.
"""
raise NotImplementedError
def render(self, mode='human'):
"""Renders the environment.
The set of supported modes varies per environment. (And some
environments do not support rendering at all.) By convention,
if mode is:
- human: render to the current display or terminal and
return nothing. Usually for human consumption.
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image, suitable
for turning into a video.
- ansi: Return a string (str) or StringIO.StringIO containing a
terminal-style text representation. The text can include newlines
and ANSI escape sequences (e.g. for colors).
Note:
Make sure that your class's metadata 'render.modes' key includes
the list of supported modes. It's recommended to call super()
in implementations to use the functionality of this method.
Args:
mode (str): the mode to render with
Example:
class MyEnv(Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def render(self, mode='human'):
if mode == 'rgb_array':
return np.array(...) # return RGB frame suitable for video
elif mode == 'human':
... # pop up a window and render
else:
super(MyEnv, self).render(mode=mode) # just raise an exception
"""
raise NotImplementedError
def close(self):
"""Override close in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
pass
def seed(self, seed=None):
"""Sets the seed for this env's random number generator(s).
Note:
Some environments use multiple pseudorandom number generators.
We want to capture all such seeds used in order to ensure that
there aren't accidental correlations between multiple generators.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
return
@property
def unwrapped(self):
"""Completely unwrap this env.
Returns:
gym.Env: The base non-wrapped gym.Env instance
"""
return self
def __str__(self):
if self.spec is None:
return '<{} instance>'.format(type(self).__name__)
else:
return '<{}<{}>>'.format(type(self).__name__, self.spec.id)
def __enter__(self):
"""Support with-statement for the environment. """
return self
def __exit__(self, *args):
"""Support with-statement for the environment. """
self.close()
# propagate exception
return False
class GoalEnv(Env):
"""A goal-based environment. It functions just as any regular OpenAI Gym environment but it
imposes a required structure on the observation_space. More concretely, the observation
space is required to contain at least three elements, namely `observation`, `desired_goal`, and
`achieved_goal`. Here, `desired_goal` specifies the goal that the agent should attempt to achieve.
`achieved_goal` is the goal that it currently achieved instead. `observation` contains the
actual observations of the environment as per usual.
"""
def reset(self):
# Enforce that each GoalEnv uses a Goal-compatible observation space.
if not isinstance(self.observation_space, gym.spaces.Dict):
raise error.Error('GoalEnv requires an observation space of type gym.spaces.Dict')
for key in ['observation', 'achieved_goal', 'desired_goal']:
if key not in self.observation_space.spaces:
raise error.Error('GoalEnv requires the "{}" key to be part of the observation dictionary.'.format(key))
def compute_reward(self, achieved_goal, desired_goal, info):
"""Compute the step reward. This externalizes the reward function and makes
it dependent on an a desired goal and the one that was achieved. If you wish to include
additional rewards that are independent of the goal, you can include the necessary values
to derive it in info and compute it accordingly.
Args:
achieved_goal (object): the goal that was achieved during execution
desired_goal (object): the desired goal that we asked the agent to attempt to achieve
info (dict): an info dictionary with additional information
Returns:
float: The reward that corresponds to the provided achieved goal w.r.t. to the desired
goal. Note that the following should always hold true:
ob, reward, done, info = env.step()
assert reward == env.compute_reward(ob['achieved_goal'], ob['goal'], info)
"""
raise NotImplementedError
class Wrapper(Env):
r"""Wraps the environment to allow a modular transformation.
This class is the base class for all wrappers. The subclass could override
some methods to change the behavior of the original environment without touching the
original code.
.. note::
Don't forget to call ``super().__init__(env)`` if the subclass overrides :meth:`__init__`.
"""
def __init__(self, env):
self.env = env
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self.reward_range = self.env.reward_range
self.metadata = self.env.metadata
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError("attempted to get missing private attribute '{}'".format(name))
return getattr(self.env, name)
@property
def spec(self):
return self.env.spec
@classmethod
def class_name(cls):
return cls.__name__
def step(self, action):
return self.env.step(action)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def render(self, mode='human', **kwargs):
return self.env.render(mode, **kwargs)
def close(self):
return self.env.close()
def seed(self, seed=None):
return self.env.seed(seed)
def compute_reward(self, achieved_goal, desired_goal, info):
return self.env.compute_reward(achieved_goal, desired_goal, info)
def __str__(self):
return '<{}{}>'.format(type(self).__name__, self.env)
def __repr__(self):
return str(self)
@property
def unwrapped(self):
return self.env.unwrapped
class ObservationWrapper(Wrapper):
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
return self.observation(observation)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return self.observation(observation), reward, done, info
def observation(self, observation):
raise NotImplementedError
class RewardWrapper(Wrapper):
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return observation, self.reward(reward), done, info
def reward(self, reward):
raise NotImplementedError
class ActionWrapper(Wrapper):
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
return self.env.step(self.action(action))
def action(self, action):
raise NotImplementedError
def reverse_action(self, action):
raise NotImplementedError
| [
"shawn@DNa1c068f.SUNet"
] | shawn@DNa1c068f.SUNet |
f53ada130a5b5651bd8b1089ba2582f5cb6eb12b | 5651e0d643e13d9f309e5ce5272a393d570e451f | /sla_cli/src/db/schema.py | d4561cfb06839df25e468b777c1b7f5a973a02ba | [
"MIT",
"CC-BY-4.0"
] | permissive | rdoreilly/SLA-CLI | a722ba5cf435399215c0368cf26a44a8f5c16957 | c92ca8a6e57eb51bf9c9433013ce16d443f8d152 | refs/heads/main | 2023-04-03T23:26:17.741218 | 2021-04-15T11:07:42 | 2021-04-15T11:07:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,381 | py | """
Author: David Walshe
Date: 07 April 2021
"""
import logging
from typing import Dict, List, Union
import json
import attr
from attr.validators import instance_of
from colorama import Fore
from sla_cli.src.common.path import Path
logger = logging.getLogger(__name__)
@attr.s
class Schema:
pass
@attr.s
class Info(Schema):
"""
Maps the meta information of a dataset object.
"""
availability: str = attr.ib(validator=instance_of(str))
capture_method: str = attr.ib(validator=instance_of(str))
size: float = attr.ib(validator=instance_of(float), converter=lambda size: round(float(size), 2))
references: Union[List[str]] = attr.ib(validator=instance_of(list))
download: Union[List[str], None] = attr.ib(default=[""], converter=lambda config: [] if config is None else config)
def __getitem__(self, item):
"""Allows for [] indexing."""
return self.__getattribute__(item)
def __str__(self):
indent = "\n - "
return f" Availability: {Fore.LIGHTGREEN_EX if self.availability.lower() == 'public' else Fore.LIGHTRED_EX}{self.availability}{Fore.RESET}\n" \
f" Capture method: {Fore.LIGHTCYAN_EX if self.capture_method.lower() == 'dermoscopy' else Fore.LIGHTYELLOW_EX}{self.capture_method}{Fore.RESET}\n" \
f" Size: {'--' if self.size < 0 else round(self.size, 2)} MB\n" \
f" References:\n" \
f" - {indent.join(self.references)}\n" \
f" Data source URL:\n" \
f" - {indent.join(self.download)}"
@attr.s
class Dataset(Schema):
"""
Maps to an individual dataset.
"""
info: Info = attr.ib(validator=instance_of(Info), converter=lambda config: Info(**config))
labels: Dict[str, int] = attr.ib(validator=instance_of(dict))
@attr.s
class Datasets(Schema):
"""
Maps to the available dataset statistics in the db file.
"""
atlas_of_dermoscopy: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
bcn_20000: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
bcn_2020_challenge: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
brisbane_isic_challenge_2020: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
dermofit: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
dermoscopedia_cc_by: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
dermis: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
dermquest: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
ham10000: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
isic_2020_challenge_mskcc_contribution: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
isic_2020_vienna_part_1: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
isic_2020_vienna_part_2: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
jid_editorial_images_2018: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
mclass_d: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
mclass_nd: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
mednode: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
msk_1: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
msk_2: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
msk_3: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
msk_4: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
msk_5: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
pad_ufes_20: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
ph2: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
sonic: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
sydney_mia_smdc_2020_isic_challenge_contribution: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
uda_1: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
uda_2: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
@property
def as_dict(self):
"""Returns all scalar and collect objects for this class that are Dataset objects."""
return {key: value for key, value in self.__dict__.items() if isinstance(value, Dataset)}
@property
def labels(self):
"""Retrieves all the label entries for dataset objects."""
return {key: value.labels for key, value in self.as_dict.items()}
@property
def info(self):
"""Retrieves all the info entries for the dataset objects."""
return {key: value.info for key, value in self.as_dict.items()}
@property
def names(self):
"""Returns a list of all dataset names."""
return list(self.as_dict.keys())
def __getitem__(self, item) -> Dataset:
"""Allows [] indexing of attributes."""
return self.__getattribute__(item)
@attr.s
class DB(Schema):
"""
Maps to the db.json file.
"""
datasets: Datasets = attr.ib(validator=instance_of(Datasets), converter=lambda config: Datasets(**config))
abbrev: Dict[str, str] = attr.ib(validator=instance_of(dict))
@staticmethod
def get_db():
"""
Factory method to return an instance of the DB object.
:return: A instance of DB.
"""
with open(Path.db()) as fh:
db = json.load(fh)
return DB(**db)
| [
"david.walshe93@gmail.com"
] | david.walshe93@gmail.com |
39196050d48bc0215006c07b5fad2ebb8ef47221 | 59dd5ca4d22fc8b377b89977d68fa3c812e37d7b | /tests/case07_change_type/models_pre.py | 46645ed4624a371229a9d57aa0175129349d5946 | [] | no_license | buriy/deseb2 | 24b42996f3c503a87ba7f5d8f9abcfa09a293a5d | 26d5934ca1481a54a3e901b75f693869dcd0cb64 | refs/heads/master | 2021-01-01T15:30:48.017759 | 2008-10-13T06:13:14 | 2008-10-13T06:13:14 | 223,088 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | from django.db import models
import deseb
class Poll(models.Model):
"""this model originally had fields named pub_date and the_author. you can use either a str
or a tuple for the aka value. (tuples are used if you have changed its name more than once)"""
question = models.CharField(max_length=200, default='test')
pub_date = models.DateTimeField('date published', aka=('pub_date', 'publish_date'))
the_author = models.CharField(max_length=200, aka='the_author')
if deseb.version == 'trunk':
rank = models.FloatField(default=1)
else:
rank = models.FloatField(max_digits=5, decimal_places=2, default=3)
def __str__(self):
return self.question
class Choice(models.Model):
poll = models.ForeignKey(Poll)
choice = models.CharField(max_length=200)
votes = models.IntegerField(aka='votes')
def __str__(self):
return self.choice
| [
"burchik@gmail.com"
] | burchik@gmail.com |
d273506c35ba2c968db21c8939dac046e9ae9e6c | 1e2aed03b02817811809c5d60c0cc65c253be59a | /211-etl.py | 97bd37b9da8139f4b24c7a69c5c59110431c0110 | [] | no_license | WPRDC/211-etl | 8a9a7cdf697bfbf37d97e85f6119e317040e9e31 | 387f91baf2224833dfd7e38548184075fa01755d | refs/heads/master | 2020-04-16T21:02:12.092268 | 2019-01-16T21:33:49 | 2019-01-16T21:33:49 | 165,912,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,626 | py | import os, sys, csv, json, re, datetime
from marshmallow import fields, pre_load, post_load
from datetime import datetime
from dateutil import parser
from pprint import pprint
sys.path.insert(0, '/Users/drw/WPRDC/etl-dev/wprdc-etl') # A path that we need to import code from
import pipeline as pl
from subprocess import call
from pprint import pprint
import time
from collections import OrderedDict
from parameters.local_parameters import SETTINGS_FILE, DATA_PATH
from util.notify import send_to_slack
from util.ftp import fetch_files
from nonchalance import encrypt_value
def write_to_csv(filename,list_of_dicts,keys):
with open(filename, 'w') as output_file:
dict_writer = csv.DictWriter(output_file, keys, extrasaction='ignore', lineterminator='\n')
dict_writer.writeheader()
#for d in list_of_dicts:
# dict_writer.writerow({k:v.encode('utf8') if type(v) == type('A') else v for k,v in d.items()})
dict_writer.writerows(list_of_dicts)
def rename_field(dict_i, old_field_name, new_field_name):
if old_field_name in dict_i:
dict_i[new_field_name] = str(dict_i[old_field_name])
del(dict_i[old_field_name])
return new_field_name
else:
#print("Unable to find {} in passed dictionary.".format(old_field_name))
return None
def int_or_none(item):
try:
val = int(item)
except:
return None
def boolify(s):
if s == 'True':
return True
if s == 'False':
return False
raise ValueError("huh?")
def autoconvert(s):
for fn in (boolify, int, float):
try:
return fn(s)
except ValueError:
pass
return s
def handle_date_and_time(d,date_field_name,time_field_name):
hour, minute = d[time_field_name].split(':')
dt = datetime.strptime(d[date_field_name],'%m/%d/%Y').replace(hour = int(hour), minute = int(minute))
del(d[time_field_name])
del(d[date_field_name])
return dt
def form_key(d,rename):
if rename:
return "{} | {}".format(d['contact_record_id'], d['needs_category'])
else:
return "{} | {}".format(d['Contact Record ID'], d['Presenting Needs: Taxonomy Category'])
def get_headers(filename):
with open(filename, 'r') as f:
headers_string = f.readline()
fieldnames = headers_string.split(',')
fieldnames[-1] = fieldnames[-1].rstrip('\n').rstrip('\r')
return(fieldnames)
class Base211Schema(pl.BaseSchema):
created = fields.Date(allow_none=True)
agency_name = fields.String(allow_none=False)
contact_record_id = fields.String(allow_none=False)
client_id = fields.String(allow_none=False)
age = fields.String(allow_none=True)
children_in_home = fields.Boolean(allow_none=True)
contact_medium = fields.String(allow_none=True)
county = fields.String(allow_none=False)
region = fields.String(allow_none=False)
state = fields.String(allow_none=False)
zip_code = fields.String(allow_none=False)
gender = fields.String(allow_none=True)
military_household = fields.String(allow_none=True)
health_insurance_for_household = fields.String(allow_none=True)
main_reason_for_call = fields.String(allow_none=True)
class Meta:
ordered = True
class NeedsSchema(Base211Schema):
needs_category = fields.String(allow_none=True)
code_level_1 = fields.String(allow_none=True)
code_level_1_name = fields.String(allow_none=True)
needs_code = fields.String(allow_none=True)
code_level_2 = fields.String(allow_none=True)
code_level_2_name = fields.String(allow_none=True)
were_needs_unmet = fields.String(allow_none=True)
why_needs_unmet = fields.String(allow_none=True)
class ClientSchema(Base211Schema):
pass
class ContactSchema(Base211Schema):
pass
#amount = fields.Float(dump_to="amount", allow_none=True)
# Never let any of the key fields have None values. It's just asking for
# multiplicity problems on upsert.
# [Note that since this script is taking data from CSV files, there should be no
# columns with None values. It should all be instances like [value], [value],, [value],...
# where the missing value starts as as a zero-length string, which this script
# is then responsible for converting into something more appropriate.
# From the Marshmallow documentation:
# Warning: The invocation order of decorated methods of the same
# type is not guaranteed. If you need to guarantee order of different
# processing steps, you should put them in the same processing method.
#@pre_load
#def plaintiffs_only_and_avoid_null_keys(self, data):
#if data['party_type'] != 'Plaintiff':
# data['party_type'] = '' # If you make these values
# # None instead of empty strings, CKAN somehow
# # interprets each None as a different key value,
# # so multiple rows will be inserted under the same
# # DTD/tax year/lien description even though the
# # property owner has been redacted.
# data['party_name'] = ''
# #data['party_first'] = '' # These need to be referred
# # to by their schema names, not the name that they
# # are ultimately dumped to.
# #data['party_middle'] = ''
# data['plaintiff'] = '' # A key field can not have value
# # None or upserts will work as blind inserts.
#else:
# data['plaintiff'] = str(data['party_name'])
#del data['party_type']
#del data['party_name']
# The stuff below was originally written as a separate function
# called avoid_null_keys, but based on the above warning, it seems
# better to merge it with omit_owners.
# if data['plaintiff'] is None:
# data['plaintiff'] = ''
# print("Missing plaintiff")
# if data['block_lot'] is None:
# data['block_lot'] = ''
# print("Missing block-lot identifier")
# pprint(data)
# if data['pin'] is None:
# data['pin'] = ''
# print("Missing PIN")
# pprint(data)
# if data['case_id'] is None:
# pprint(data)
# raise ValueError("Found a null value for 'case_id'")
# if data['docket_type'] is None:
# data['docket_type'] = ''
# pprint(data)
# print("Found a null value for 'docket_type'")
#@pre_load
#def fix_date_and_bin_age(self, data):
# if data['filing_date']:
# data['filing_date'] = parser.parse(data['filing_date']).date().isoformat()
# else:
# print("No filing date for {} and data['filing_date'] = {}".format(data['dtd'],data['filing_date']))
# data['filing_date'] = None
def bin_age(data):
"""Convert age string to a U.S. Census range of ages. Handle ridiculously large/negative ages and non-integer ages."""
age = data['age']
try:
age = int(age)
if age < 0:
data['age'] = None
elif age < 6:
data['age'] = '0 to 5'
elif age < 18:
data['age'] = '6 to 17'
elif age < 25:
data['age'] = '18 to 24'
elif age < 45:
data['age'] = '25 to 44'
elif age < 65:
data['age'] = '45 to 64'
elif age < 130:
data['age'] = '65 and over'
else: # Observed examples: 220, 889, 15025, 15401, 101214
data['age'] = None
except ValueError:
data['age'] = None
def standardize_county(data):
known_counties = []
data['county'] = data['county'].upper()
typo_fixes = {'15214': None,
#'ALLEGANY': 'ALLEGHENY', The Allegany record is actually from Allegany, New York.
'ALLEGHANY': 'ALLEGHENY',
'ALLEGHEBY': 'ALLEGHENY',
'ALLEGHEN': 'ALLEGHENY',
'ALLEGHEY': 'ALLEGHENY',
'ALLEGHNEY': 'ALLEGHENY',
'ALLEGHNY': 'ALLEGHENY',
'ALLEHGNY': 'ALLEGHENY',
'ARMSTORNG': 'ARMSTRONG'}
if data['region'] == 'Southwest - Pittsburgh': # Let's not make
# any presumptions about data from other regions.
if data['county'] in typo_fixes:
data['county'] = typo_fixes[data['county']]
def standardize_date(data):
if data['created']:
data['created'] = parser.parse(data['created']).date().isoformat()
else:
print("Unable to turn data['created'] = {} into a valid date.".format(data['created']))
data['created'] = None
def remove_bogus_zip_codes(data):
"""The United Way is coding unknown ZIP codes as 12345. These codes should be converted to blanks
before we get the data. This function is just a precautionary backstop."""
if data['zip_code'] == '12345':
data['zip_code'] = None
def convert_na_values(data,filecode):
if filecode == 'needs':
if data['code_level_1_name'] == '#N/A':
data['code_level_1_name'] = None
if data['code_level_2_name'] == '#N/A':
data['code_level_2_name'] = None
def translate_headers(headers, alias_lookup):
return [alias_lookup[header] for header in headers]
def process(raw_file_location,processed_file_location,filecode,schema):
"""Rename fields, bin ages, and hash IDs here."""
headers = get_headers(raw_file_location)
print("headers of {} = {}".format(raw_file_location,headers))
# Option 1: Parse the whole CSV file, modify the field names, reconstruct it, and output it as a new file.
just_change_headers = False
alias_lookup = {'Contact: System Create Date': 'created',
'Contact: Agency Name': 'agency_name',
'Contact Record ID': 'unencrypted_contact_record_id',
'Client ID': 'unencrypted_client_id',
'Age': 'age',
'Are there children in the home?': 'children_in_home',
'Type of Contact': 'contact_medium',
'County': 'county',
'Region': 'region',
'State': 'state',
'Zip': 'zip_code',
'Gender': 'gender',
'Have you or anyone in the household served in the military?': 'military_household',
'SW - HealthCare - Does everyone in your household have health insurance?': 'health_insurance_for_household',
'Does everyone in your household have health insurance?': 'health_insurance_for_household',
'Primary reason for calling': 'main_reason_for_call',
'Presenting Needs: Taxonomy Category': 'needs_category',
'Taxonomy L1': 'code_level_1',
'Taxonomy L1 Name': 'code_level_1_name',
'Presenting Needs: Taxonomy Code': 'needs_code',
'Taxonomy L2': 'code_level_2',
'Taxonomy L2 Name': 'code_level_2_name',
'Presenting Needs: Unmet?': 'were_needs_unmet',
'Presenting Needs: Reason Unmet': 'why_needs_unmet'}
fields_with_types = schema().serialize_to_ckan_fields()
#fields0.pop(fields0.index({'type': 'text', 'id': 'party_type'}))
fields = [f['id'] for f in fields_with_types]
new_headers = fields
if not just_change_headers:
alias_lookup['Contact Record ID'] = 'unencrypted_contact_record_id'
alias_lookup['Client ID'] = 'unencrypted_client_id'
with open(raw_file_location, 'r') as f:
dr = csv.DictReader(f)
rows = []
ds = []
for d in dr:
# row is a dict with keys equal to the CSV-file column names
# and values equal to the corresponding values of those parameters.
# FIX FIELD TYPES HERE.
for old_field, new_field in alias_lookup.items():
rename_field(d, old_field, new_field)
del(d['Call Type Detail'])
bin_age(d)
standardize_date(d)
standardize_county(d)
remove_bogus_zip_codes(d)
convert_na_values(d,filecode)
d['client_id'] = encrypt_value(d['unencrypted_client_id'])
del(d['unencrypted_client_id'])
d['contact_record_id'] = encrypt_value(d['unencrypted_contact_record_id'])
del(d['unencrypted_contact_record_id'])
ds.append(d)
write_to_csv(processed_file_location,ds,new_headers)
else: # option 2: just read the first line of the file, translate the headers, write the headers to a new file and pipe through the rest of the file contents.
new_headers = translate_headers(headers, alias_lookup)
with open(processed_file_location, 'w') as outfile:
with open(raw_file_location, 'r') as infile:
for k,line in enumerate(infile):
if k == 0:
outfile.write(','.join(new_headers)+'\n')
else:
outfile.write(line)
def main(**kwargs):
schema_by_code = OrderedDict( [('clients', ClientSchema), ('contacts', ContactSchema), ('needs', NeedsSchema)] )
specify_resource_by_name = True
if specify_resource_by_name:
kwparams = {'resource_name': '211 Clients (beta)'}
#else:
#kwargs = {'resource_id': ''}
server = kwargs.get('server','211-testbed')
output_to_csv = kwargs.get('output_to_csv',False)
push_to_CKAN = kwargs.get('push_to_CKAN',False)
# Code below stolen from prime_ckan/*/open_a_channel() but really from utility_belt/gadgets
#with open(os.path.dirname(os.path.abspath(__file__))+'/ckan_settings.json') as f: # The path of this file needs to be specified.
with open(SETTINGS_FILE) as f:
settings = json.load(f)
site = settings['loader'][server]['ckan_root_url']
package_id = settings['loader'][server]['package_id']
# Get CSV files that contain the data. These will come from
# either a remote server or a local cache.
fetch_data = False
if fetch_data:
print("Pulling the latest 2-1-1 data from the source server.")
# Change path to script's path for cron job
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
local_path = dname + "/raw_data" # This is called "latest_pull" for the foreclosures ETL.
# It's just an archive of previously obtained raw-data files. fetch_files relies on
# the filename to change and archives whatever it pulls.
# If this path doesn't exist, create it.
if not os.path.exists(local_path):
os.makedirs(local_path)
most_recent_path = dname + "/most_recent" # This is where the most recently pulled
# file will be stored in raw format and then in processed format.
# If this path doesn't exist, create it.
if not os.path.exists(most_recent_path):
os.makedirs(most_recent_path)
filecodes = ['clients', 'contacts', 'needs']
processed_file_locations = ["{}/{}.csv".format(most_recent_path,fc) for fc in filecodes]
if fetch_data:
search_terms = ['opendata']
raw_file_locations = fetch_files(SETTINGS_FILE,local_path,DATA_PATH,search_terms)
else:
raw_file_locations = ["{}/raw-{}.csv".format(most_recent_path,fc) for fc in filecodes]
for raw_file_location, processed_file_location,filecode in zip(raw_file_locations,processed_file_locations,filecodes):
schema = schema_by_code[filecode]
process(raw_file_location,processed_file_location,filecode,schema)
if push_to_CKAN:
print("Preparing to pipe data from {} to resource {} package ID {} on {}".format(processed_file_location,list(kwparams.values())[0],package_id,site))
time.sleep(1.0)
fields0 = schema().serialize_to_ckan_fields()
# Eliminate fields that we don't want to upload.
#fields0.pop(fields0.index({'type': 'text', 'id': 'party_type'}))
#fields0.pop(fields0.index({'type': 'text', 'id': 'party_name'}))
#fields0.append({'id': 'assignee', 'type': 'text'})
fields_to_publish = fields0
print("fields_to_publish = {}".format(fields_to_publish))
###############
# FOR SOME PART OF THE BELOW PIPELINE, I THINK...
#The package ID is obtained not from this file but from
#the referenced settings.json file when the corresponding
#flag below is True.
two_one_one_pipeline = pl.Pipeline('two_one_one_pipeline',
'Pipeline for 2-1-1 Data',
log_status=False,
settings_file=SETTINGS_FILE,
settings_from_file=True,
start_from_chunk=0
)
two_one_one_pipeline = two_one_one_pipeline.connect(pl.FileConnector, target, encoding='utf-8') \
.extract(pl.CSVExtractor, firstline_headers=True) \
.schema(schema) \
.load(pl.CKANDatastoreLoader, server,
fields=fields_to_publish,
#package_id=package_id,
#resource_id=resource_id,
#resource_name=resource_name,
key_fields=['case_id','pin','block_lot','plaintiff','docket_type'],
# A potential problem with making the pin field a key is that one property
# could have two different PINs (due to the alternate PIN) though I
# have gone to some lengths to avoid this.
method='upsert',
**kwparams).run()
log = open('uploaded.log', 'w+')
if specify_resource_by_name:
print("Piped data to {}".format(kwparams['resource_name']))
log.write("Finished upserting {}\n".format(kwparams['resource_name']))
else:
print("Piped data to {}".format(kwparams['resource_id']))
log.write("Finished upserting {}\n".format(kwparams['resource_id']))
log.close()
if output_to_csv:
print("This is where the table should be written to a CSV file for testing purposes.")
if __name__ == "__main__":
# stuff only to run when not called via 'import' here
if len(sys.argv) > 1:
args = sys.argv[1:]
output_to_csv = False
push_to_CKAN = False
copy_of_args = list(args)
list_of_servers = ["211-testbed",
] # This list could be automatically harvested from SETTINGS_FILE.
kwparams = {}
# This is a new way of parsing command-line arguments that cares less about position
# and just does its best to identify the user's intent.
for k,arg in enumerate(copy_of_args):
if arg in ['scan', 'save', 'csv']:
output_to_csv = True
args.remove(arg)
elif arg in ['pull', 'push', 'ckan']:
push_to_CKAN = True
args.remove(arg)
elif arg in list_of_servers:
kwparams['server'] = arg
args.remove(arg)
else:
print("I have no idea what do with args[{}] = {}.".format(k,arg))
kwparams['output_to_csv'] = output_to_csv
kwparams['push_to_CKAN'] = push_to_CKAN
print(kwparams)
main(**kwparams)
else:
print("Please specify some command-line parameters next time.")
main()
| [
"drw@users.noreply.github.com"
] | drw@users.noreply.github.com |
b94c10ffe1e2cebe6c2841f11409493c979fc88d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03806/s842087595.py | d6c3b9ca4a01b491de47129a0e62c223068e0ba0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | #!/usr/bin/env python3
#ABC54 D
import sys
import math
import bisect
sys.setrecursionlimit(1000000000)
from heapq import heappush, heappop
from collections import defaultdict
from itertools import accumulate
from collections import Counter
from collections import deque
from operator import itemgetter
from itertools import permutations
mod = 10**9 + 7
n,ma,mb = map(int,input().split())
x = [list(map(int,input().split())) for _ in range(n)]
sa = 0
sb = 0
for a,b,c in x:
sa += a
sb += b
dp = [[float('inf')]*(sb+1) for _ in range(sa+1)]
dp[0][0] = 0
for a,b,c in x:
y = [[True]*(sb+1) for _ in range(sa+1)]
for i in range(sa+1-a):
for j in range(sb+1-b):
if y[i][j]:
if dp[i+a][j+b] > dp[i][j] + c:
dp[i+a][j+b] = dp[i][j] + c
y[i+a][j+b] = False
ans = float('inf')
for i in range(1,sa+1):
for j in range(1,sb+1):
if i*mb == j*ma:
ans = min(ans,dp[i][j])
if ans == float('inf'):
print(-1)
else:
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0f208f06bcdb8fb441e0b83c7cf87c10e7b1823e | 000a4b227d970cdc6c8db192f4437698cb782721 | /python/helpers/typeshed/stdlib/importlib/__init__.pyi | 0e99786775b0b6fbcc2709c7e75cbd15358973c7 | [
"MIT",
"Apache-2.0"
] | permissive | trinhanhngoc/intellij-community | 2eb2f66a2a3a9456e7a0c5e7be1eaba03c38815d | 1d4a962cfda308a73e0a7ef75186aaa4b15d1e17 | refs/heads/master | 2022-11-03T21:50:47.859675 | 2022-10-19T16:39:57 | 2022-10-19T23:25:35 | 205,765,945 | 1 | 0 | Apache-2.0 | 2019-09-02T02:55:15 | 2019-09-02T02:55:15 | null | UTF-8 | Python | false | false | 791 | pyi | from importlib.abc import Loader
from types import ModuleType
from typing import Mapping, Sequence
__all__ = ["__import__", "import_module", "invalidate_caches", "reload"]
# Signature of `builtins.__import__` should be kept identical to `importlib.__import__`
def __import__(
name: str,
globals: Mapping[str, object] | None = ...,
locals: Mapping[str, object] | None = ...,
fromlist: Sequence[str] = ...,
level: int = ...,
) -> ModuleType: ...
# `importlib.import_module` return type should be kept the same as `builtins.__import__`
def import_module(name: str, package: str | None = ...) -> ModuleType: ...
def find_loader(name: str, path: str | None = ...) -> Loader | None: ...
def invalidate_caches() -> None: ...
def reload(module: ModuleType) -> ModuleType: ...
| [
"intellij-monorepo-bot-no-reply@jetbrains.com"
] | intellij-monorepo-bot-no-reply@jetbrains.com |
12571535c2293ff73d15e6d288a9fb979737dd66 | 2d40a56ca2e9f8a37018eba1edfe3f93f7bd2176 | /app/__init__.py | 5ef65987471aee2297711a150741be280508af5c | [
"MIT"
] | permissive | Mariga123/BLOGS | 27f119d12c50a1b3e39e62f091b2dec362a63f08 | 5578540f5fc9ec3aed73d7cca869117d2df9a298 | refs/heads/master | 2023-01-31T01:04:12.795202 | 2020-12-15T23:04:57 | 2020-12-15T23:04:57 | 320,508,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | from flask import Flask
from config import config_options
from flask_mail import Mail
from flask_login import LoginManager
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_uploads import IMAGES, UploadSet, configure_uploads
db = SQLAlchemy()
mail = Mail()
bootstap = Bootstrap()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
photos = UploadSet('photos', IMAGES)
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config_options[config_name])
from .auth import auth as authentication_blueprint
from .main import main as main_blueprint
# app.add_url_rule('/', endpoint='main.index')
# app.register_blueprint(auth_blueprint, url_prefix='/authenticate')
app.register_blueprint(authentication_blueprint)
app.register_blueprint(main_blueprint)
login_manager.init_app(app)
db.init_app(app)
bootstap.init_app(app)
configure_uploads(app, photos)
mail.init_app(app)
return app
| [
"johnmariga8@gmail.com"
] | johnmariga8@gmail.com |
c100e4099c1b15f54c023a6ded6ae3dbe74cd1e2 | 10f1bbac126bb187febc630ab13b09ac6d9270cb | /Examples/swordfish.py | a86c83506091e74070d7a8fc63061f23c89010ff | [] | no_license | akshirapov/automate-the-boring-stuff | 481827efd8f53117d73bc2f6b846b49736bb9d46 | fb36141e249f8c5db8e1c1c40856d5f8134606cc | refs/heads/master | 2022-12-17T10:55:33.412077 | 2020-01-10T16:02:13 | 2020-01-10T16:02:13 | 187,468,744 | 0 | 0 | null | 2022-12-08T05:15:44 | 2019-05-19T11:37:46 | Python | UTF-8 | Python | false | false | 253 | py | while True:
print('Who are you?')
name = input()
if name != 'Joe':
continue
print('Hello, Joe. What is the password? (It is a fish)')
password = input()
if password == 'swordfish':
break
print('Access granted.')
| [
"cccp2006_06@mail.ru"
] | cccp2006_06@mail.ru |
b72ab49459fa091603bd0dbb3eb1c0427da0a8b8 | 2448c41b6914cce852a6b0624298936029d62d0f | /apps/tests/accounts/token_tests.py | 0e514d08adb03afa128e5935047ed95f2d215c77 | [] | no_license | navill/ut_project | ade4f7ddede3096ee22a6f8f1d7da100bf73eacf | ef639e79bcdd59bd7b7d68edd185d88bfc82d4d3 | refs/heads/master | 2023-04-20T02:50:43.337465 | 2021-05-21T00:17:22 | 2021-05-21T00:17:22 | 314,426,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,703 | py | import time
import pytest
from rest_framework.reverse import reverse
from rest_framework_simplejwt.token_blacklist.models import *
from rest_framework_simplejwt.utils import datetime_from_epoch
from accounts.api.authentications import CustomJWTTokenUserAuthentication, CustomRefreshToken
from accounts.models import *
@pytest.mark.django_db
def test_custom_refresh_token():
user = BaseUser.objects.get(id=2)
# expired time
token = CustomRefreshToken.for_user(user)
assert user.token_expired == token.access_token['exp']
assert BlacklistedToken.objects.all().exists() is False
outstanding_token = OutstandingToken.objects.first()
assert outstanding_token.token == str(token)
assert outstanding_token.jti == token['jti']
assert outstanding_token.expires_at == datetime_from_epoch(token['exp'])
token.blacklist()
black_token = BlacklistedToken.objects.get(token_id=outstanding_token.id)
assert black_token
@pytest.mark.django_db
def test_token_for_user_with_error(doctor_with_group):
with pytest.raises(Exception):
CustomRefreshToken.for_user(doctor_with_group.user, raise_error=True)
# CustomRefreshToken.for_user() 중간에 에러가 발생할 경우 user.token_expired=<epoch_time> 및 OutstandingToken은 생성되면 안됨
assert doctor_with_group.user.token_expired == 0
assert OutstandingToken.objects.all().exists() is False
CustomRefreshToken.for_user(doctor_with_group.user)
assert doctor_with_group.user.token_expired != 0
@pytest.mark.django_db
def test_authenticate_jwt_token_user(rf):
doctor = Doctor.objects.first()
token = CustomRefreshToken.for_user(doctor.user)
access_token = token.access_token
url = reverse('token-login')
request = rf.post(url, HTTP_AUTHORIZATION=f'Bearer {str(access_token)}')
authentication = CustomJWTTokenUserAuthentication()
auth_user, validated_token = authentication.authenticate(request)
assert auth_user == doctor.user
assert token['token_type'] == 'refresh'
assert access_token['token_type'] == 'access'
assert access_token['jti'] == validated_token['jti']
@pytest.mark.django_db
def test_compare_user_token_expired_with_accesstoken_expired(get_token_from_doctor):
doctor = Doctor.objects.first()
token = CustomRefreshToken.for_user(doctor.user)
access_token = token.access_token
# 토큰 타입 검사
assert get_token_from_doctor['token_type'] == 'refresh'
assert access_token['token_type'] == 'access'
# user 모델에 등록된 토큰 만료 시간과 발급된 토큰(access_token)의 만료 시간이 동일한지 확인
assert access_token['exp'] == doctor.user.token_expired
| [
"blue_jihoon@naver.com"
] | blue_jihoon@naver.com |
b6489a92789fa5c732d255eb213c6d5f6a9e3dd2 | 16ca50defdb822904aa310552ea614db3c50a7b8 | /src/posts/views.py | 1f3b72094a75a2a6ec9e4353cd07de97c6a9dd42 | [] | no_license | Elsaeed97/django-cbv | 5cd199ae5df7a94f9c93efa6e06d32292894f154 | 464f0a0da73b114e8c06ded4ee3c1e6fe9ece45e | refs/heads/master | 2020-07-15T09:21:53.545096 | 2019-09-02T20:31:05 | 2019-09-02T20:31:05 | 205,531,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | from django.shortcuts import render
from .models import Post
from django.views.generic import TemplateView, DetailView, CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
# Create your views here.
class HomePage(TemplateView):
template_name = "home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['posts'] = Post.objects.all()
return context
class PostsDetail(DeleteView):
context_object_name = 'post_details'
model = Post
template_name = 'posts/post_detail.html'
class PostCreate(CreateView):
fields = ('title', 'content','author')
model = Post
class PostUpdate(UpdateView):
fields = ('title', 'content')
model = Post
class PostDelete(DeleteView):
model = Post
success_url = reverse_lazy('home')
| [
"elsaeedahmed97@gmail.com"
] | elsaeedahmed97@gmail.com |
2a81b395742d7db7d66bb03781bd253dc966537d | d7016f69993570a1c55974582cda899ff70907ec | /sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2022_04_01/_application_insights_management_client.py | e03358a7d7f81eb9a7e45644bb061dfd5af07347 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 4,005 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import ApplicationInsightsManagementClientConfiguration
from .operations import WorkbooksOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class ApplicationInsightsManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Composite Swagger for Application Insights Management Client.
:ivar workbooks: WorkbooksOperations operations
:vartype workbooks: azure.mgmt.applicationinsights.v2022_04_01.operations.WorkbooksOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2022-04-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ApplicationInsightsManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.workbooks = WorkbooksOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "ApplicationInsightsManagementClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details: Any) -> None:
self._client.__exit__(*exc_details)
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
3f349e600c67907fd25a7668627c2f0fa4b8d8ef | 86c0e5e717955f615093788b3c3e772c2e9bf86f | /2019work_update/360elib/utils.py | aac4a2e95739ae001cfa99585039063f14cf9e34 | [] | no_license | Sustartpython/My-Python-Examples | 17cba596de3a5818f32d72c5a6abf25cd599dac0 | 6af2314693d929c35a4636cf047eea9490969e78 | refs/heads/master | 2022-09-09T10:04:09.936098 | 2020-03-16T03:32:48 | 2020-03-16T03:32:48 | 214,319,813 | 3 | 0 | null | 2022-09-01T23:18:15 | 2019-10-11T01:47:42 | Java | UTF-8 | Python | false | false | 9,632 | py | import sqlite3
import mysql.connector
import pypyodbc
from PIL import Image
import io
import sys
import time
import os
import threading
import traceback
import json
import requests
import base64
import hashlib
def BaseEncodeID(strRaw):
r""" 自定义base编码 """
strEncode = base64.b32encode(strRaw.encode('utf8')).decode('utf8')
if strEncode.endswith('======'):
strEncode = '%s%s' % (strEncode[0:-6], '0')
elif strEncode.endswith('===='):
strEncode = '%s%s' % (strEncode[0:-4], '1')
elif strEncode.endswith('==='):
strEncode = '%s%s' % (strEncode[0:-3], '8')
elif strEncode.endswith('='):
strEncode = '%s%s' % (strEncode[0:-1], '9')
table = str.maketrans('0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'ZYXWVUTSRQPONMLKJIHGFEDCBA9876543210')
strEncode = strEncode.translate(table)
return strEncode
def BaseDecodeID(strEncode):
r""" 自定义base解码 """
table = str.maketrans('ZYXWVUTSRQPONMLKJIHGFEDCBA9876543210', '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ')
strEncode = strEncode.translate(table)
if strEncode.endswith('0'):
strEncode = '%s%s' % (strEncode[0:-1], '======')
elif strEncode.endswith('1'):
strEncode = '%s%s' % (strEncode[0:-1], '====')
elif strEncode.endswith('8'):
strEncode = '%s%s' % (strEncode[0:-1], '===')
elif strEncode.endswith('9'):
strEncode = '%s%s' % (strEncode[0:-1], '=')
strRaw = base64.b32decode(strEncode.encode('utf8')).decode('utf8')
return strRaw
def GetLngid(sub_db_id, rawid, case_insensitive=False):
r""" 由 sub_db_id 和 rawid 得到 lngid。
case_insensitive 标识源网站的 rawid 是否区分大小写
"""
uppercase_rawid = '' # 大写版 rawid
if case_insensitive: # 源网站的 rawid 区分大小写
for ch in rawid:
if ch.upper() == ch:
uppercase_rawid += ch
else:
uppercase_rawid += ch.upper() + '_'
else:
uppercase_rawid = rawid.upper()
limited_id = uppercase_rawid # 限长ID
if len(uppercase_rawid.encode('utf8')) > 20:
limited_id = hashlib.md5(uppercase_rawid.encode('utf8')).hexdigest().upper()
else:
limited_id = BaseEncodeID(uppercase_rawid)
lngid = sub_db_id + limited_id
return lngid
def Img2Jpg(buf, dstFile):
exMsg = ''
try:
srcImg = Image.open(io.BytesIO(buf))
dstImg = srcImg.resize((108, 150), Image.ANTIALIAS).convert('RGB')
dstImg.save(dstFile, 'JPEG')
except:
exMsg = '* ' + traceback.format_exc()
print(exMsg)
print(dstFile)
if exMsg:
return False
return True
def parse_results_to_sql(conn, stmt, results, size=1):
"""
批量执行SQL语句且提交到数据库
Arguments:
conn {sql.connect} -- 数据库连接实例
stmt {string} -- 需要执行的SQL语句
results {[(val,[val])]} -- 元素为元组的数组
Keyword Arguments:
size {integer} -- 当 results 为多大的时候执行 (default: {1})
Raises:
e -- SQL异常
Returns:
bool -- results有没有成功保存到数据库,成功返回True,失败返回False
"""
sign = False
if len(results) >= size:
try:
cur = conn.cursor()
cur.executemany(stmt, results)
conn.commit()
cur.close()
except Exception as e:
raise e
sign = True
return sign
def printf(*args):
print(time.strftime("%Y/%m/%d %X") + ' [info]', *args)
lock = threading.Lock()
def logerror(line):
global lock
cur_dir_fullpath = os.path.dirname(os.path.abspath(__file__))
logpath = os.path.abspath(os.path.join(cur_dir_fullpath, r"..\log"))
if not os.path.exists(logpath):
os.makedirs(logpath)
fname = logpath + '/' + time.strftime("%Y%m%d") + '.txt'
lock.acquire()
try:
with open(fname, mode='a', encoding='utf8') as f:
f.write(line + '\n')
except Exception as e:
raise e
finally:
lock.release()
def file_list(filepath):
"""
文件夹的遍历
Arguments:
filepath {string} -- 需要遍历的文件夹
Yields:
string,string -- 返回文件名跟文件绝对目录
"""
for root, dirs, files in os.walk(filepath):
for file in files:
# print(file)
yield file, os.path.join(root, file)
def msg2weixin(msg):
Headers = {
'Accept':
'*/*',
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
}
corpid = r'wwa7df1454d730c823'
corpsecret = r'dDAusBg3gK7hKhLfqIRlyp84UDtII6NkMW7s8Wn2wgs'
url = r'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=%s&corpsecret=%s' % (corpid, corpsecret)
count = 0
while count < 3:
try:
r = requests.get(url)
content = r.content.decode('utf8')
dic = json.loads(content)
accessToken = dic['access_token']
usr = GolobalConfig['weixin']['User']
url = r'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=%s' % accessToken
form = {"touser": usr, "msgtype": "text", "agentid": 1000015, "text": {"content": msg}, "safe": 0}
r = requests.post(url=url, data=json.dumps(form), headers=Headers, timeout=30)
break
except:
count += 1
printf('发送消息到企业微信失败')
def all_2_one(src, dst, size=2):
"""
合并单个文件到一个大文件中,每个文件大小限制为2GB
"""
import random
new_dirname = time.strftime("%Y%m%d")
new_file = dst + '/' + new_dirname + '_' + repr(random.randrange(111, 999)) + ".big_json"
count = 0
for _, files in file_list(src):
with open(files, mode='r', encoding="utf-8") as fp:
text = fp.readline()
while text:
with open(new_file, mode='a', encoding="utf-8") as f:
f.write(text)
if os.path.getsize(new_file) // (1024 * 1024 * 1024) >= size:
new_file = dst + '/' + new_dirname + '_' + repr(random.randrange(111, 999)) + ".big_json"
count += 1
text = fp.readline()
print(count)
def ProcOne(client, srcFile, dstFile):
print('ProcOne \n%s\n -> \n%s ' % (srcFile, dstFile))
#目标文件已经存在且大小相同
if client.exists(dstFile) and \
(os.path.getsize(srcFile) == client.list_status(dstFile)[0].length):
print('file exists: %s ' % dstFile)
return True
#注意,如果已存在会被覆盖
client.copy_from_local(srcFile, dstFile, overwrite=True)
if os.path.getsize(srcFile) == client.list_status(dstFile)[0].length: #校验文件大小
return True
return False
def _get_http_respanse(method, url, feature, kind=None, **kwargs):
"""
返回 HTTP Response
:param type_: requests or requests.Session
:param method: post or get
:return: requetst.Response or None
"""
HEADER = {
# "Accept" : "application/json,*/*",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36",
# 'Content-Type' :'application/json'
}
if method not in ['post', 'get']:
raise ValueError("http request must be a right method like post or get")
try:
if kind is None:
if method == "post":
resp = requests.post(url, headers=HEADER, **kwargs)
else:
resp = requests.get(url, headers=HEADER, **kwargs)
else:
if not isinstance(kind, requests.Session):
raise ValueError("session's value must be isinstance of requests.Session")
if method == "post":
resp = kind.post(url, headers=HEADER, **kwargs)
else:
resp = kind.get(url, headers=HEADER, **kwargs)
except requests.ConnectionError as e:
printf(e, "Connection Error")
return None
except requests.Timeout as e:
printf(e, "ReadTime out")
return None
if resp.status_code != 200:
printf('the status_code is', resp.status_code, "not 200")
return None
if feature:
if resp.content.decode('gb18030').find(feature) == -1:
printf(url, "返回的页面没有包含特征值 {}".format(feature))
return None
return resp
def get_html(url, feature=None, timeout=20, **kwargs):
"""
用GET请求来获取HTTP响应
Arguments:
url {string} -- 要请求的URL
Keyword Arguments:
feature {string} -- 正确网页的特征码 (default: {None})
timeout {integer} -- 连接超时最大时间 (default: {20})
Returns:
requests.Response -- HTTP响应
"""
return _get_http_respanse('get', url=url, feature=feature, timeout=timeout, **kwargs)
def get_post_html(url, feature=None, timeout=20, **kwargs):
"""
用GET请求来获取HTTP响应
Arguments:
url {string} -- 要请求的URL
Keyword Arguments:
feature {string} -- 正确网页的特征码 (default: {None})
timeout {integer} -- 连接超时最大时间 (default: {20})
Returns:
requests.Response -- HTTP响应
"""
return _get_http_respanse('post', url=url, feature=feature, timeout=timeout, **kwargs) | [
"476274024@qq.com"
] | 476274024@qq.com |
d1cb8b220ff10c0541a9ae9919eca7d78c4451e2 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/image_classification/DBPN_ID2917_for_PyTorch/dataset.py | 107b2ac1e75bf747ca40f827a1d8b65f0fbc430b | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,223 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
import torch.utils.data as data
import torch
import numpy as np
import os
from os import listdir
from os.path import join
from PIL import Image, ImageOps
import random
from random import randrange
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"])
def load_img(filepath):
img = Image.open(filepath).convert('RGB')
#y, _, _ = img.split()
return img
def rescale_img(img_in, scale):
size_in = img_in.size
new_size_in = tuple([int(x * scale) for x in size_in])
img_in = img_in.resize(new_size_in, resample=Image.BICUBIC)
return img_in
def get_patch(img_in, img_tar, img_bic, patch_size, scale, ix=-1, iy=-1):
(ih, iw) = img_in.size
(th, tw) = (scale * ih, scale * iw)
patch_mult = scale #if len(scale) > 1 else 1
tp = patch_mult * patch_size
ip = tp // scale
if ix == -1:
ix = random.randrange(0, iw - ip + 1)
if iy == -1:
iy = random.randrange(0, ih - ip + 1)
(tx, ty) = (scale * ix, scale * iy)
img_in = img_in.crop((iy,ix,iy + ip, ix + ip))
img_tar = img_tar.crop((ty,tx,ty + tp, tx + tp))
img_bic = img_bic.crop((ty,tx,ty + tp, tx + tp))
info_patch = {
'ix': ix, 'iy': iy, 'ip': ip, 'tx': tx, 'ty': ty, 'tp': tp}
return img_in, img_tar, img_bic, info_patch
def augment(img_in, img_tar, img_bic, flip_h=True, rot=True):
info_aug = {'flip_h': False, 'flip_v': False, 'trans': False}
if random.random() < 0.5 and flip_h:
img_in = ImageOps.flip(img_in)
img_tar = ImageOps.flip(img_tar)
img_bic = ImageOps.flip(img_bic)
info_aug['flip_h'] = True
if rot:
if random.random() < 0.5:
img_in = ImageOps.mirror(img_in)
img_tar = ImageOps.mirror(img_tar)
img_bic = ImageOps.mirror(img_bic)
info_aug['flip_v'] = True
if random.random() < 0.5:
img_in = img_in.rotate(180)
img_tar = img_tar.rotate(180)
img_bic = img_bic.rotate(180)
info_aug['trans'] = True
return img_in, img_tar, img_bic, info_aug
class DatasetFromFolder(data.Dataset):
def __init__(self, image_dir, patch_size, upscale_factor, data_augmentation, transform=None):
super(DatasetFromFolder, self).__init__()
self.image_filenames = [join(image_dir, x) for x in listdir(image_dir) if is_image_file(x)]
self.patch_size = patch_size
self.upscale_factor = upscale_factor
self.transform = transform
self.data_augmentation = data_augmentation
def __getitem__(self, index):
target = load_img(self.image_filenames[index])
input = target.resize((int(target.size[0]/self.upscale_factor),int(target.size[1]/self.upscale_factor)), Image.BICUBIC)
bicubic = rescale_img(input, self.upscale_factor)
input, target, bicubic, _ = get_patch(input,target,bicubic,self.patch_size, self.upscale_factor)
if self.data_augmentation:
input, target, bicubic, _ = augment(input, target, bicubic)
if self.transform:
input = self.transform(input)
bicubic = self.transform(bicubic)
target = self.transform(target)
return input, target, bicubic
def __len__(self):
return len(self.image_filenames)
class DatasetFromFolderEval(data.Dataset):
def __init__(self, lr_dir, upscale_factor, transform=None):
super(DatasetFromFolderEval, self).__init__()
self.image_filenames = [join(lr_dir, x) for x in listdir(lr_dir) if is_image_file(x)]
self.upscale_factor = upscale_factor
self.transform = transform
def __getitem__(self, index):
input = load_img(self.image_filenames[index])
_, file = os.path.split(self.image_filenames[index])
bicubic = rescale_img(input, self.upscale_factor)
if self.transform:
input = self.transform(input)
bicubic = self.transform(bicubic)
return input, bicubic, file
def __len__(self):
return len(self.image_filenames)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
16e946ad9dcd11ce7bf78b923c7d03ec8901301e | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /HpJCBwggQMDLWTHsM_9.py | 899cb569cb973f6979a58e5944f0756877e31ba0 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | """
Create a function that takes in a sentence and returns the average length of
each word in that sentence. Round your result to two decimal places.
### Examples
average_word_length("A B C.") ➞ 1.00
average_word_length("What a gorgeous day.") ➞ 4.00
average_word_length("Dude, this is so awesome!") ➞ 3.80
### Notes
Ignore punctuation when counting the length of a word.
"""
def average_word_length(txt):
txt, tot = txt.split(), sum(sum(1 for c in w if c.isalpha()) for w in txt)
return round(tot / len(txt), 2)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
87e1f5f4d8c3542a34727f8b44668b8a8d6c135a | 46baa88abe88e226afede3abf721c2056369745a | /articles/urls.py | 80ae9f355b51743f0982ba0a32d1efb679b0f46c | [] | no_license | almazkun/django_news_app | 267aa8775c2ffeba72e22f647b0db38f65a526ec | ae66fb1df0f87b3c52ad59546986b93a94c30083 | refs/heads/master | 2020-04-21T00:20:49.926012 | 2019-02-11T09:01:24 | 2019-02-11T09:01:24 | 169,193,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from django.urls import path
from .views import (
ArticleListView,
ArticleUpdateView,
ArticleDetailView,
ArticleDeleteView,
ArticleCreateView,
)
urlpatterns = [
path('<int:pk>/edit/', ArticleUpdateView.as_view(), name='article_edit'),
path('<int:pk>/', ArticleDetailView.as_view(), name='article_detail'),
path('<int:pk>/delete/', ArticleDeleteView.as_view(), name='article_delete'),
path('new/', ArticleCreateView.as_view(), name='article_new'),
path('', ArticleListView.as_view(), name='article_list'),
]
| [
"almaz.kun@gmail.com"
] | almaz.kun@gmail.com |
86b33544ef12bb04bf642015bf69fedbca7451c0 | 3fd6e85c36a7e9e4f9ddec163a55f3602ccfb98c | /old/imu/test_ukf_filter.py | 9fb00c012ec4fbe0edf8f64b2e68194f5d687353 | [
"Apache-2.0"
] | permissive | SiChiTong/mjmech | acc5da4ac6edd9f1446cc13e471aedeea3e1c419 | a71f35e6ad6bc9c1530a0a33d68c45d073390b79 | refs/heads/master | 2020-03-20T03:44:13.276650 | 2018-05-06T02:59:55 | 2018-05-06T03:04:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,718 | py | #!/usr/bin/python
# Copyright 2014 Josh Pieper, jjp@pobox.com. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
from numpy import array
import unittest
import ukf_filter
class UkfFilterTest(unittest.TestCase):
def test_basic(self):
def test_process(x, dt_s):
return x + array(
[[0],
[x[0,0] * dt_s],
[x[1,0] * dt_s + 0.5 * x[0,0] * dt_s ** 2]])
def test_measurement(x):
return array([[x[2, 0]]])
dut = ukf_filter.UkfFilter(
initial_state=array([[0.2], [0.0], [0.0]]),
initial_covariance=numpy.diag([1.0, 2.0, 3.0]),
process_function=test_process,
process_noise=numpy.diag([0.1, 0.1, 0.1]),
measurement_function = test_measurement,
measurement_noise=array([[2.0]]))
meas = 0.5
for x in range(200):
meas += 0.5
dut.update_state(0.1)
dut.update_measurement(array([[meas]]))
self.assertAlmostEqual(round(dut.state[2, 0], 2), meas)
self.assertAlmostEqual(round(dut.state[1, 0], 2), 0.5 / 0.1)
if __name__ == '__main__':
unittest.main()
| [
"jjp@pobox.com"
] | jjp@pobox.com |
12757534cd6969e75c3a3b1f495af6b6da5536ba | 521a6a1f121f8dd569618b96184457c7427d20a9 | /compiler/tests/04_pnand2_test.py | bc066cfc726767bc881d3b6be153618e3d8a08bc | [
"BSD-3-Clause"
] | permissive | mguthaus/OpenRAM | e9682c9148be42cdd84d115d0855ce91dae5b567 | 46c86d3bb3df82e150532ede75cbf6180a697cfd | refs/heads/master | 2021-05-02T13:43:36.618374 | 2019-10-20T00:43:33 | 2019-10-20T00:43:33 | 216,284,207 | 1 | 1 | NOASSERTION | 2019-10-19T23:48:09 | 2019-10-19T23:48:09 | null | UTF-8 | Python | false | false | 993 | py | #!/usr/bin/env python3
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import unittest
from testutils import *
import sys,os
sys.path.append(os.getenv("OPENRAM_HOME"))
import globals
from globals import OPTS
from sram_factory import factory
import debug
class pnand2_test(openram_test):
def runTest(self):
globals.init_openram("config_{0}".format(OPTS.tech_name))
debug.info(2, "Checking 2-input nand gate")
tx = factory.create(module_type="pnand2", size=1)
self.local_check(tx)
globals.end_openram()
# run the test from the command line
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main(testRunner=debugTestRunner())
| [
"mrg@ucsc.edu"
] | mrg@ucsc.edu |
856f691c05670bf2301b7c6348f2a85058d9f65a | 3011e024b5f31d6c747a2bd4a143bb6a0eeb1e1d | /chapter05/class_view_demo/class_view_demo/urls.py | e6f3d7b1c943bca748af711f0e39b48e0f559a19 | [] | no_license | yingkun1/python-django | a3084460a83682f3e0848d5b40c881f93961ecc2 | 08c9ed3771eb245ee9ff66f67cf28730d2675bbe | refs/heads/master | 2022-12-11T12:33:20.788524 | 2019-06-12T09:30:59 | 2019-06-12T09:30:59 | 189,977,625 | 1 | 0 | null | 2022-11-22T02:57:01 | 2019-06-03T09:43:30 | Python | UTF-8 | Python | false | false | 1,372 | py | """class_view_demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from . import views
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.index,name="index"),
path('book/',views.BookListView.as_view(),name="book_list"),
path('add_book/',views.AddBookView.as_view(),name="add_book"),
path('book_detail/<book_id>/',views.BookDetailView.as_view(),name="book_detail"),
#以后如果渲染的这个模板不需要传递任何的参数,那么建议在urls中使用TemplateView
# path('about/',TemplateView.as_view(template_name="about.html"))
path('about/',views.AboutView.as_view(),name="about"),
path('article/',include("front.urls"))
]
| [
"925712087@qq.com"
] | 925712087@qq.com |
35f4bd12ddfba68de1548e67e34008152d69b9bf | 64809a6995c361d8f62fac40b97c6fbac8206637 | /tests/test_PyTorch_NN.py | cc0e30e559eb14f74566070563417a93b3400e42 | [
"MIT"
] | permissive | josiahls/nn_builder | 857436e78ae38a0395adb43b05aab18ee97311d8 | 3d202af1bae7b4ae95beb446ef13f73b0565dea0 | refs/heads/master | 2020-05-23T18:23:10.549979 | 2019-05-15T19:52:42 | 2019-05-15T19:52:42 | 186,887,013 | 0 | 0 | null | 2019-05-15T19:05:21 | 2019-05-15T19:05:21 | null | UTF-8 | Python | false | false | 12,056 | py | # Run from home directory with python -m pytest tests
import pytest
import torch
import random
import numpy as np
import torch.nn as nn
import torch.optim as optim
from nn_builder.pytorch.NN import NN
N = 250
X = torch.randn((N, 5))
X[:, [2, 4]] += 10.0
y = X[:, 0] > 0
y = y.float()
def test_linear_hidden_units_user_input():
"""Tests whether network rejects an invalid linear_hidden_units input from user"""
inputs_that_should_fail = ["a", ["a", "b"], [2, 4, "ss"], [-2], 2]
for input_value in inputs_that_should_fail:
with pytest.raises(AssertionError):
NN(input_dim=2, layers=input_value, hidden_activations="relu", output_activation="relu")
def test_input_dim_output_dim_user_input():
"""Tests whether network rejects an invalid input_dim from user"""
inputs_that_should_fail = [-1, "aa", ["dd"], [2], 0, 2.5, {2}]
for input_value in inputs_that_should_fail:
with pytest.raises(AssertionError):
NN(input_dim=input_value, layers=[2], hidden_activations="relu", output_activation="relu")
def test_activations_user_input():
"""Tests whether network rejects an invalid hidden_activations or output_activation from user"""
inputs_that_should_fail = [-1, "aa", ["dd"], [2], 0, 2.5, {2}, "Xavier_"]
for input_value in inputs_that_should_fail:
with pytest.raises(AssertionError):
NN(input_dim=2, layers=[2], hidden_activations=input_value,
output_activation="relu")
NN(input_dim=2, layers=[2], hidden_activations="relu",
output_activation=input_value)
def test_initialiser_user_input():
"""Tests whether network rejects an invalid initialiser from user"""
inputs_that_should_fail = [-1, "aa", ["dd"], [2], 0, 2.5, {2}, "Xavier_"]
for input_value in inputs_that_should_fail:
with pytest.raises(AssertionError):
NN(input_dim=2, layers=[2], hidden_activations="relu",
output_activation="relu", initialiser=input_value)
NN(input_dim=2, layers=[2], hidden_activations="relu",
output_activation="relu", initialiser="xavier")
def test_output_shape_correct():
"""Tests whether network returns output of the right shape"""
input_dims = [x for x in range(1, 3)]
output_dims = [x for x in range(4, 6)]
linear_hidden_units_options = [ [2, 3, 4], [2, 9, 1], [55, 55, 55, 234, 15]]
for input_dim, output_dim, linear_hidden_units in zip(input_dims, output_dims, linear_hidden_units_options):
linear_hidden_units.append(output_dim)
nn_instance = NN(input_dim=input_dim, layers=linear_hidden_units, hidden_activations="relu",
output_activation="relu", initialiser="xavier")
data = torch.randn((25, input_dim))
output = nn_instance.forward(data)
assert output.shape == (25, output_dim)
def test_output_activation():
"""Tests whether network outputs data that has gone through correct activation function"""
RANDOM_ITERATIONS = 20
for _ in range(RANDOM_ITERATIONS):
data = torch.randn((1, 100))
nn_instance = NN(input_dim=100, layers=[5, 5, 5],
hidden_activations="relu",
output_activation="relu", initialiser="xavier")
out = nn_instance.forward(data)
assert all(out.squeeze() >= 0)
nn_instance = NN(input_dim=100, layers=[5, 5, 5],
hidden_activations="relu",
output_activation="sigmoid", initialiser="xavier")
out = nn_instance.forward(data)
assert all(out.squeeze() >= 0)
assert all(out.squeeze() <= 1)
nn_instance = NN(input_dim=100, layers=[5, 5, 5],
hidden_activations="relu",
output_activation="softmax", initialiser="xavier")
out = nn_instance.forward(data)
assert all(out.squeeze() >= 0)
assert all(out.squeeze() <= 1)
assert round(torch.sum(out.squeeze()).item(), 3) == 1.0
nn_instance = NN(input_dim=100, layers=[5, 5, 5],
hidden_activations="relu",
)
out = nn_instance.forward(data)
assert not all(out.squeeze() >= 0)
assert not round(torch.sum(out.squeeze()).item(), 3) == 1.0
def test_linear_layers():
"""Tests whether create_hidden_layers method works correctly"""
for input_dim, output_dim, hidden_units in zip( range(5, 8), range(9, 12), [[2, 9, 2], [3, 5, 6], [9, 12, 2]]):
hidden_units.append(output_dim)
nn_instance = NN(input_dim=input_dim, layers=hidden_units,
hidden_activations="relu",
output_activation="relu", initialiser="xavier", print_model_summary=False)
for layer in nn_instance.hidden_layers:
assert isinstance(layer, nn.Linear)
assert nn_instance.hidden_layers[0].in_features == input_dim
assert nn_instance.hidden_layers[0].out_features == hidden_units[0]
assert nn_instance.hidden_layers[1].in_features == hidden_units[0]
assert nn_instance.hidden_layers[1].out_features == hidden_units[1]
assert nn_instance.hidden_layers[2].in_features == hidden_units[1]
assert nn_instance.hidden_layers[2].out_features == hidden_units[2]
assert len(nn_instance.hidden_layers) == 3
def test_embedding_layers():
"""Tests whether create_embedding_layers method works correctly"""
for embedding_in_dim_1, embedding_out_dim_1, embedding_in_dim_2, embedding_out_dim_2 in zip(range(5, 8), range(3, 6), range(1, 4), range(24, 27)):
nn_instance = NN(input_dim=5, layers=[5],
embedding_dimensions =[[embedding_in_dim_1, embedding_out_dim_1], [embedding_in_dim_2, embedding_out_dim_2]])
for layer in nn_instance.embedding_layers:
assert isinstance(layer, nn.Embedding)
assert len(nn_instance.embedding_layers) == 2
assert nn_instance.embedding_layers[0].num_embeddings == embedding_in_dim_1
assert nn_instance.embedding_layers[0].embedding_dim == embedding_out_dim_1
assert nn_instance.embedding_layers[1].num_embeddings == embedding_in_dim_2
assert nn_instance.embedding_layers[1].embedding_dim == embedding_out_dim_2
def test_non_integer_embeddings_rejected():
"""Tests whether an error is raised if user tries to provide non-integer data to be embedded"""
with pytest.raises(AssertionError):
nn_instance = NN(input_dim=5, layers=[5],
columns_of_data_to_be_embedded=[2, 4],
embedding_dimensions=[[50, 3],
[55, 4]])
out = nn_instance.forward(X)
def test_incorporate_embeddings():
"""Tests the method incorporate_embeddings"""
X_new = X
X_new[:, [2, 4]] = torch.round(X_new[:, [2, 4]])
nn_instance = NN(input_dim=5, layers=[5],
columns_of_data_to_be_embedded=[2, 4],
embedding_dimensions=[[50, 3],
[55, 4]])
out = nn_instance.incorporate_embeddings(X)
assert out.shape == (N, X.shape[1]+3+4-2)
def test_embedding_network_can_solve_simple_problem():
"""Tests whether network can solve simple problem using embeddings"""
X = torch.randn(N, 2) * 5.0 + 20.0
y = (X[:, 0] >= 20) * (X[:, 1] <= 20)
X = X.long()
nn_instance = NN(input_dim=2, layers=[5, 1],
columns_of_data_to_be_embedded=[0, 1],
embedding_dimensions=[[50, 3],
[55, 3]])
assert solves_simple_problem(X, y.float(), nn_instance)
def test_batch_norm_layers():
"""Tests whether batch_norm_layers method works correctly"""
for input_dim, output_dim, hidden_units in zip( range(5, 8), range(9, 12), [[2, 9, 2], [3, 5, 6], [9, 12, 2]]):
hidden_units.append(output_dim)
nn_instance = NN(input_dim=input_dim, layers=hidden_units,
hidden_activations="relu", batch_norm=True,
output_activation="relu", initialiser="xavier", print_model_summary=False)
for layer in nn_instance.batch_norm_layers:
assert isinstance(layer, nn.BatchNorm1d)
assert len(nn_instance.batch_norm_layers) == len(hidden_units) - 1
assert nn_instance.batch_norm_layers[0].num_features == hidden_units[0]
assert nn_instance.batch_norm_layers[1].num_features == hidden_units[1]
assert nn_instance.batch_norm_layers[2].num_features == hidden_units[2]
def test_model_trains():
"""Tests whether a small range of networks can solve a simple task"""
for output_activation in ["sigmoid", "None"]:
nn_instance = NN(input_dim=X.shape[1], layers=[10, 10, 10, 1],
output_activation=output_activation, dropout=0.01, batch_norm=True)
assert solves_simple_problem(X, y, nn_instance)
z = X[:, 0:1] > 0
z = torch.cat([z ==1, z==0], dim=1).float()
nn_instance = NN(input_dim=X.shape[1], layers=[10, 10, 10, 2],
output_activation="softmax", dropout=0.01, batch_norm=True)
assert solves_simple_problem(X, z, nn_instance)
def solves_simple_problem(X, y, nn_instance):
"""Checks if a given network is able to solve a simple problem"""
optimizer = optim.Adam(nn_instance.parameters(), lr=0.15)
for ix in range(800):
out = nn_instance.forward(X)
loss = torch.sum((out.squeeze() - y) ** 2) / N
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss < 0.1
def test_dropout():
"""Tests whether dropout layer reads in probability correctly"""
nn_instance = NN(input_dim=X.shape[1], layers=[10, 10, 1], dropout=0.9999)
assert nn_instance.dropout_layer.p == 0.9999
assert not solves_simple_problem(X, y, nn_instance)
nn_instance = NN(input_dim=X.shape[1], layers=[10, 10, 1], dropout=0.00001)
assert solves_simple_problem(X, y, nn_instance)
def test_y_range_user_input():
"""Tests whether network rejects invalid y_range inputs"""
invalid_y_range_inputs = [ (4, 1), (2, 4, 8), [2, 4], (np.array(2.0), 6.9)]
for y_range_value in invalid_y_range_inputs:
with pytest.raises(AssertionError):
print(y_range_value)
nn_instance = NN(input_dim=5, layers=[10, 10, 3],
y_range=y_range_value)
def test_y_range():
"""Tests whether setting a y range works correctly"""
for _ in range(100):
val1 = random.random() - 3.0*random.random()
val2 = random.random() + 2.0*random.random()
lower_bound = min(val1, val2)
upper_bound = max(val1, val2)
nn_instance = NN(input_dim=5, layers=[10, 10, 3], y_range=(lower_bound, upper_bound))
random_data = torch.randn(15, 5)
out = nn_instance.forward(random_data)
assert torch.sum(out > lower_bound).item() == 3*15, "lower {} vs. {} ".format(lower_bound, out)
assert torch.sum(out < upper_bound).item() == 3*15, "upper {} vs. {} ".format(upper_bound, out)
def test_deals_with_None_activation():
"""Tests whether is able to handle user inputting None as output activation"""
assert NN(input_dim=5, layers=[10, 10, 3], output_activation=None)
def test_check_input_data_into_forward_once():
"""Tests that check_input_data_into_forward_once method only runs once"""
data_to_throw_error = torch.randn(N, 2)
X = torch.randn(N, 2) * 5.0 + 20.0
y = (X[:, 0] >= 20) * (X[:, 1] <= 20)
X = X.long()
nn_instance = NN(input_dim=2, layers=[5, 1],
columns_of_data_to_be_embedded=[0, 1],
embedding_dimensions=[[50, 3],
[55, 3]])
with pytest.raises(AssertionError):
nn_instance.forward(data_to_throw_error)
with pytest.raises(RuntimeError):
nn_instance.forward(X)
nn_instance.forward(data_to_throw_error)
| [
"p.christodoulou2@gmail.com"
] | p.christodoulou2@gmail.com |
449468d693eff6360da1441d3305fb079152cb99 | 5fb32bc4f1de0dfd2fa22bb92108b27386d77298 | /tools/workspace/gst-plugins-ugly/repository.bzl | 2d1a8de42c4912fa94012d10f973b2255d0feff6 | [
"Apache-2.0"
] | permissive | mjbots/bazel_deps | 5415e61324c6167cba7c3c0917cad387d9e5107a | 6c9ba1867b5d0ab5e59a7f1205adfd750a6c3610 | refs/heads/master | 2023-07-24T05:19:51.945623 | 2023-07-12T18:22:50 | 2023-07-12T18:22:50 | 139,143,430 | 96 | 38 | Apache-2.0 | 2021-01-05T13:02:09 | 2018-06-29T11:52:09 | Python | UTF-8 | Python | false | false | 1,109 | bzl | # -*- python -*-
# Copyright 2018 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def gst_plugins_ugly_repository(name):
http_archive(
name = name,
urls = [
"https://gstreamer.freedesktop.org/src/gst-plugins-ugly/gst-plugins-ugly-1.14.1.tar.xz",
],
sha256 = "cff2430bb13f54ef81409a0b3d65ce409a376d4a7bab57a14a97d602539fe1d3",
strip_prefix = "gst-plugins-ugly-1.14.1",
build_file = Label("//tools/workspace/gst-plugins-ugly:package.BUILD"),
)
| [
"jjp@pobox.com"
] | jjp@pobox.com |
bd32ed5d6d14f48b505ab51db2fcca8dee046b1f | 65b55130f41747ccb239219ae9010ab06b60d430 | /src/tweets/api/pagination.py | c8bb1934c835473c8b741cc39a8030efc01de161 | [] | no_license | amrebrahem22/TweetMe-App | d5c2f5fc20565356a88fdde357433ac54bc5dfac | cad027a34c84f9b2530759ec6b080a5f80a02ffc | refs/heads/master | 2020-11-24T19:12:27.526977 | 2020-03-24T21:44:30 | 2020-03-24T21:44:30 | 228,306,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from rest_framework import pagination
class TweetsPagination(pagination.PageNumberPagination):
page_size = 10
page_size_query_param = 'page_size'
max_page_size = 10000 | [
"amrebrahem226@gmail.com"
] | amrebrahem226@gmail.com |
1ee7bc3cf79a503523f42dcbf8538f749df6872e | ae646229187ab11607e4889e1cf0e380b26fae5c | /test_joyce_code/data/buildSupplementDict.py | 00c80ec76f7df5ab019c7877bde0a4d4d5e6aed3 | [] | no_license | aschein/tensor_analysis | cb60caf56713cfb7191c46d3cc20c32ea591d382 | 155754be7fa8cfb97432997cb66aa37b1a7b582b | refs/heads/master | 2021-01-17T07:44:00.657311 | 2014-09-11T20:45:14 | 2014-09-11T20:45:14 | 34,183,143 | 1 | 2 | null | 2018-08-25T20:15:18 | 2015-04-18T21:19:08 | Python | UTF-8 | Python | false | false | 1,680 | py | '''
Scrape a list of supplements and herbs
'''
import urllib2
from bs4 import BeautifulSoup
import json
import string
def scrapeNIH():
"""
Function to scrape MedlinePlus Herbs & Supplements Page:
http://www.nlm.nih.gov/medlineplus/druginfo/herb_All.html
"""
supplements = []
PAGE_URL = "http://www.nlm.nih.gov/medlineplus/druginfo/herb_All.html"
soup = BeautifulSoup(urllib2.urlopen(PAGE_URL).read())
ulList = soup.find_all('ul', 'herbul')
for ul in ulList:
for li in ul.findAll('li'):
supplements.append(li.find('a').getText().lower())
print li.find('a').getText()
supplements = list(set(supplements))
return supplements
def scrapeRXList():
"""
Function to scrape rxlist for their classified supplements
"""
supplementDict = {}
PAGE_URLS = ["http://www.rxlist.com/supplements/alpha_"+i+".html" for i in string.lowercase]
for page in PAGE_URLS:
print "Scraping page:" + str(page)
soup = BeautifulSoup(urllib2.urlopen(page).read())
contentMaterial = soup.find_all('div', 'contentstyle')
for li in contentMaterial[0].findAll('li'):
txt = li.find('a').getText() + ' '
## try to encode it in ascii
txt = txt.encode('ascii', 'ignore').lower()
suppClass = str(txt)
if txt.find("("):
suppClass = txt[txt.rfind("(")+1:txt.find(")")]
txt = txt[:txt.find("(")].strip()
supplementDict[txt] = suppClass
## make sure all the values are keys themselves
vals = supplementDict.values()
valDict = zip(vals, vals)
supplementDict.update(valDict)
return supplementDict
def main():
supplements = scrapeRXList()
with open('supplement.json', 'wb') as outfile:
json.dump(supplements, outfile)
if __name__ == "__main__":
main() | [
"robchen401@gmail.com"
] | robchen401@gmail.com |
fff06f4add7041f373fa5d4e1126cde49020c91b | 933f2a9f155b2a4f9746bf2020d1b828bfe49e81 | /面向对象编程/day1/__init__.py | fef6889ac4b4519d0d8df9ec966f87b8ff5b113e | [] | no_license | WuAlin0327/python3-notes | d65ffb2b87c8bb23d481ced100d17cda97aef698 | 1d0d66900f6c4b667b3b84b1063f24ee7823e1bb | refs/heads/master | 2020-03-26T04:49:34.937700 | 2018-12-31T11:12:58 | 2018-12-31T11:12:58 | 144,524,404 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | class LuffyStudent:
school = 'luffycity' #数据属性
def __init__(self,name,sex,age):# __init__方法用来对象定制对象独有的特征
self.Name = name
self.Sex = sex
self.Age = age
def learn(self):# 函数属性
print('is learing')
def eat(self):
print('is eating')
# 后产生的对象
stu1 = LuffyStudent('wualin','man',29)#LuffyStudent.__init__(stu1,'wualin','man',20)
# 加上__init__方法后,实例化步骤
'''
1.txt. 先产生一个空对象stu1
2. 触发LuffyStudent.__init__(stu1,'wualin','man',29)
'''
#
# # 查
# print(stu1.Name)
# print(stu1.Sex)
# print(stu1.Age)
# # 改
# stu1.Name='520'
#
# # 删除
# del stu1.Name
#
# # 增
# stu1.class_name = 'python开发'
| [
"1032298871@qq.com"
] | 1032298871@qq.com |
b16288bd80014d9349042292618ccd0a8980cd5e | bd0fe4df0e442b02add84ae12f932a0e5511b2f5 | /product/context_processors.py | 6f296e1c0f3b727473691d21b4b4eea8a67bc7b1 | [] | no_license | yeboahd24/Simple-Ecommerce | a1bdae28ec192f2f80ee1ef615dd614e3fd4aca7 | 0cabae9f968da7d176748b0cb4feb5b7e7b9e9ab | refs/heads/main | 2023-08-02T01:01:41.791169 | 2021-09-30T20:06:35 | 2021-09-30T20:06:35 | 374,789,214 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from product.models import Category
def menu_categories(request):
categories = Category.objects.all()
return {'menu_categories': categories} | [
"yeboahd24@gmail.com"
] | yeboahd24@gmail.com |
a3eafbc37dcd3a3bdd09cd7591251894aafe9927 | 1eefb6b82b8e8aac088da9d6e9ff40d235885b5c | /misc/local_occu_to_world_map.py | 8a71f32b768b3de9a9145541450815efe746f9af | [
"Apache-2.0"
] | permissive | danielchandg/ROAR | be513500ba9a44676ed75188933e45512c7f5bfc | a661fbf42cf72c2a8c24ec89a0fb84e77e6af561 | refs/heads/main | 2023-03-28T16:09:44.514515 | 2021-03-31T22:20:11 | 2021-03-31T22:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,785 | py | from pathlib import Path
import numpy as np
import cv2
import glob
import os
from scipy import sparse
def load_meta_data(f_path: Path) -> np.ndarray:
assert f_path.exists(), f"{f_path} does not exist"
return np.load(f_path.as_posix())
def create_global_occu_map(meta_data: np.ndarray, local_occu_map_dir_path: Path, regex: str) -> np.ndarray:
assert local_occu_map_dir_path.exists(), f"{local_occu_map_dir_path} does not exist"
min_x, min_y, max_x, max_y, map_additiona_padding = meta_data
x_total = max_x - min_x + 2 * map_additiona_padding
y_total = max_y - min_y + 2 * map_additiona_padding
curr_map = np.zeros(shape=(x_total, y_total),
dtype=np.float16)
file_paths = sorted(glob.glob((local_occu_map_dir_path.as_posix() + regex)), key=os.path.getmtime)
for fpath in file_paths:
data = sparse.load_npz(fpath).toarray()
# data = np.load(fpath)
curr_map = np.logical_or(data, curr_map)
visualize(curr_map)
return curr_map
def visualize(m: np.ndarray, wait_key=1):
m = np.float32(m)
cv2.imshow("map", cv2.resize(m, dsize=(500, 500)))
cv2.waitKey(wait_key)
if __name__ == "__main__":
meta_data_folder_path = Path("../data/output/occupancy_map/")
meta_data_file_path = meta_data_folder_path / "meta_data.npy"
try:
meta_data: np.ndarray = load_meta_data(meta_data_file_path)
global_occu_map = create_global_occu_map(meta_data, meta_data_folder_path, regex="/03_*.npz")
print("Press any key to exit")
visualize(global_occu_map, wait_key=0)
except Exception as e:
meta_data = np.array([-550, -550, 550, 550, 40])
np.save(meta_data_file_path.as_posix(), meta_data)
print(f"Meta data {meta_data} Saved")
| [
"wuxiaohua1011@berkeley.edu"
] | wuxiaohua1011@berkeley.edu |
db1befcc09293bbdb4053444eb972c6ea37f890f | b87f66b13293782321e20c39aebc05defd8d4b48 | /convert/h52txt.py | a546ad8345b9bdc9034fc1027919ae1440a430dc | [] | no_license | m-elhussieny/code | 5eae020932d935e4d724c2f3d16126a0d42ebf04 | 5466f5858dbd2f1f082fa0d7417b57c8fb068fad | refs/heads/master | 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | """
Convert 2D array HDF5 files to raw ASCII format.
Fernando Paolo <fpaolo@ucsd.edu>
January 1, 2010
"""
import numpy as np
import tables as tb
import os
import sys
files = sys.argv[1:]
if len(files) < 1:
print 'usage: python %s infiles.txt' % sys.argv[0]
sys.exit()
print 'converting files: %d... ' % len(files)
for f in files:
h5f = tb.openFile(f, 'r')
data = h5f.root.data.read()
h5f.close()
np.savetxt(os.path.splitext(f)[0] + '.txt', data, fmt='%f')
print 'done!'
| [
"fspaolo@gmail.com"
] | fspaolo@gmail.com |
ab72ee0b427d9eb3f188b6eab9a7fa7f2fe882d9 | e15653ec81af4e6ee2e46e966bcef3e82ca40337 | /examples/fiv.py | adc1c5934881cac06dfedd23ee84c8a897afea31 | [] | no_license | ktdreyer/rhcephbugs | fab340619cf16cce2b45200c5a57e9a9087f82a2 | 72601a61393b3df4dd3ad4f2996cfba3b7f2b086 | refs/heads/master | 2023-04-27T10:00:58.049310 | 2023-04-13T20:57:30 | 2023-04-13T21:06:47 | 80,675,653 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | import re
from rhcephbugs.fixed_in_version import FixedInVersion, Build
import bugzilla
# Pretend these values came from CI:
branch = 'ceph-2-rhel-7'
build = 'ceph-10.2.5-1.el7cp'
# Pretend this bug number came from a library that can parse CI messages for
# rhbz ID numbers:
ids = [1367539]
BZURL = 'partner-bugzilla.redhat.com'
# BZURL = 'bugzilla.redhat.com'
def get_distro(branch):
if re.search('-rhel-\d+$', branch):
return 'RHEL'
if re.search('-(?:ubuntu|trusty|xenial)$', branch):
return 'Ubuntu'
raise RuntimeError('unknown distro in branch %s' % branch)
def update_fiv(bzapi, ids, build):
bugs = bzapi.getbugs(ids, include_fields=['id', 'fixed_in'])
for bug in bugs:
url = 'https://%s/%d' % (BZURL, bug.id)
fiv = FixedInVersion(bug.fixed_in)
new = Build.factory(build, get_distro(branch))
fiv.update(new)
if bug.fixed_in == str(fiv):
print('%s Fixed In Version is already set to "%s"' % (url, fiv))
continue
print('%s changing Fixed In Version "%s" to "%s"' % (url, bug.fixed_in,
fiv))
update = bzapi.build_update(fixed_in=str(fiv))
bzapi.update_bugs(bug.id, update)
if __name__ == '__main__':
bzapi = bugzilla.Bugzilla(BZURL)
if not bzapi.logged_in:
raise SystemExit('Not logged into %s. See ~/.bugzillatoken.' % BZURL)
update_fiv(bzapi, ids, build)
| [
"kdreyer@redhat.com"
] | kdreyer@redhat.com |
7981bbedcf212c97629525d6fabc949ce97fad7a | 10e1c07d665f9304d5ffd7033c64a164ea2a3ad9 | /Django_Backend/AuthUser/migrations/0002_auto_20181027_1939.py | a48988b7ec0e123dcbed5c9a9b1da32570d1e24a | [] | no_license | garvitkataria/AI_Hackathon_Server_Code | fba56605b25f4698110ebf92aa21809ebdcec462 | 1feee4122615bf0d1384889625a62db84c9ddb8b | refs/heads/master | 2023-01-11T11:26:00.619674 | 2019-09-29T06:09:07 | 2019-09-29T06:09:07 | 207,013,287 | 0 | 0 | null | 2022-12-31T03:04:05 | 2019-09-07T19:00:28 | Python | UTF-8 | Python | false | false | 358 | py | # Generated by Django 2.1.2 on 2018-10-27 19:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('AuthUser', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='is_faculty',
new_name='is_farmer',
),
]
| [
"garvit.k16@iiits.in"
] | garvit.k16@iiits.in |
cd452c8ca68141ccb24d86166024badd87b739b3 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/BLUECOAT-SG-AUTHENTICATION-MIB.py | f7db7ffd3aed15935a72a55e6fd0921536dcea27 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 16,245 | py | #
# PySNMP MIB module BLUECOAT-SG-AUTHENTICATION-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BLUECOAT-SG-AUTHENTICATION-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:22:36 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
blueCoatMgmt, = mibBuilder.importSymbols("BLUECOAT-MIB", "blueCoatMgmt")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter64, Counter32, NotificationType, ModuleIdentity, Integer32, IpAddress, TimeTicks, Unsigned32, Bits, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Gauge32, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Counter32", "NotificationType", "ModuleIdentity", "Integer32", "IpAddress", "TimeTicks", "Unsigned32", "Bits", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Gauge32", "MibIdentifier")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
bluecoatSGAuthentication = ModuleIdentity((1, 3, 6, 1, 4, 1, 3417, 2, 15))
bluecoatSGAuthentication.setRevisions(('2014-08-06 03:00',))
if mibBuilder.loadTexts: bluecoatSGAuthentication.setLastUpdated('201408060300Z')
if mibBuilder.loadTexts: bluecoatSGAuthentication.setOrganization('Blue Coat Systems, Inc.')
class ToggleState(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("enabled", 1), ("disabled", 2))
schannelStats = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 15, 2))
lsaDomainControllerStats = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 15, 3))
schannelServerStats = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4))
authNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 15, 5))
authNotificationsPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 15, 5, 0))
schannelStatsTable = MibTable((1, 3, 6, 1, 4, 1, 3417, 2, 15, 2, 1), )
if mibBuilder.loadTexts: schannelStatsTable.setStatus('current')
schannelStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3417, 2, 15, 2, 1, 1), ).setIndexNames((0, "BLUECOAT-SG-AUTHENTICATION-MIB", "schannelStatsIndex"))
if mibBuilder.loadTexts: schannelStatsEntry.setStatus('current')
schannelStatsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: schannelStatsIndex.setStatus('current')
domainName = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: domainName.setStatus('current')
domainStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 2, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: domainStatus.setStatus('current')
timeouts = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 2, 1, 1, 4), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: timeouts.setStatus('current')
transactions = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 2, 1, 1, 5), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: transactions.setStatus('current')
currentWaiters = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 2, 1, 1, 6), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: currentWaiters.setStatus('current')
maxWaiters = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 2, 1, 1, 7), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: maxWaiters.setStatus('current')
resets = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 2, 1, 1, 8), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: resets.setStatus('current')
lsaDomainControllerStatsTable = MibTable((1, 3, 6, 1, 4, 1, 3417, 2, 15, 3, 1), )
if mibBuilder.loadTexts: lsaDomainControllerStatsTable.setStatus('current')
lsaDomainControllerStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3417, 2, 15, 3, 1, 1), ).setIndexNames((0, "BLUECOAT-SG-AUTHENTICATION-MIB", "lsaDomainControllerStatsIndex"))
if mibBuilder.loadTexts: lsaDomainControllerStatsEntry.setStatus('current')
lsaDomainControllerStatsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: lsaDomainControllerStatsIndex.setStatus('current')
domainControllerName = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 3, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: domainControllerName.setStatus('current')
address = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 3, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: address.setStatus('current')
siteName = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 15, 3, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: siteName.setStatus('current')
avgLDAPPingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 3, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: avgLDAPPingTime.setStatus('current')
lastLDAPPingTime = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 15, 3, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lastLDAPPingTime.setStatus('current')
flags = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 3, 1, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: flags.setStatus('current')
schannelServerStatsTable = MibTable((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1), )
if mibBuilder.loadTexts: schannelServerStatsTable.setStatus('current')
schannelServerStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1), ).setIndexNames((0, "BLUECOAT-SG-AUTHENTICATION-MIB", "schannelServerStatsIndex"))
if mibBuilder.loadTexts: schannelServerStatsEntry.setStatus('current')
schannelServerStatsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: schannelServerStatsIndex.setStatus('current')
serverName = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverName.setStatus('current')
connectionsInUse = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: connectionsInUse.setStatus('current')
availableConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: availableConnections.setStatus('current')
averageTransactions = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: averageTransactions.setStatus('current')
authsByDomainLast1Minute = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 6), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: authsByDomainLast1Minute.setStatus('current')
authsByDomainLast3Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 7), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: authsByDomainLast3Minutes.setStatus('current')
authsByDomainLast5Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 8), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: authsByDomainLast5Minutes.setStatus('current')
authsByDomainLast15Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 9), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: authsByDomainLast15Minutes.setStatus('current')
authsByDomainLast60Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 10), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: authsByDomainLast60Minutes.setStatus('current')
failedAuthsByDomainLast1Minute = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 11), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: failedAuthsByDomainLast1Minute.setStatus('current')
failedAuthsByDomainLast3Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 12), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: failedAuthsByDomainLast3Minutes.setStatus('current')
failedAuthsByDomainLast5Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 13), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: failedAuthsByDomainLast5Minutes.setStatus('current')
failedAuthsByDomainLast15Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 14), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: failedAuthsByDomainLast15Minutes.setStatus('current')
failedAuthsByDomainLast60Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 15), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: failedAuthsByDomainLast60Minutes.setStatus('current')
avgLatencyPerDomainLast1Minute = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 16), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: avgLatencyPerDomainLast1Minute.setStatus('current')
avgLatencyPerDomainLast3Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 17), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: avgLatencyPerDomainLast3Minutes.setStatus('current')
avgLatencyPerDomainLast5Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 18), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: avgLatencyPerDomainLast5Minutes.setStatus('current')
avgLatencyPerDomainLast15Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 19), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: avgLatencyPerDomainLast15Minutes.setStatus('current')
avgLatencyPerDomainLast60Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 20), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: avgLatencyPerDomainLast60Minutes.setStatus('current')
maxLatencyPerDomainLast1Minute = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 21), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: maxLatencyPerDomainLast1Minute.setStatus('current')
maxLatencyPerDomainLast3Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 22), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: maxLatencyPerDomainLast3Minutes.setStatus('current')
maxLatencyPerDomainLast5Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 23), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: maxLatencyPerDomainLast5Minutes.setStatus('current')
maxLatencyPerDomainLast15Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 24), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: maxLatencyPerDomainLast15Minutes.setStatus('current')
maxLatencyPerDomainLast60Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 25), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: maxLatencyPerDomainLast60Minutes.setStatus('current')
minLatencyPerDomainLast1Minute = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 26), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: minLatencyPerDomainLast1Minute.setStatus('current')
minLatencyPerDomainLast3Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 27), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: minLatencyPerDomainLast3Minutes.setStatus('current')
minLatencyPerDomainLast5Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 28), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: minLatencyPerDomainLast5Minutes.setStatus('current')
minLatencyPerDomainLast15Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 29), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: minLatencyPerDomainLast15Minutes.setStatus('current')
minLatencyPerDomainLast60Minutes = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 15, 4, 1, 1, 30), Counter32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: minLatencyPerDomainLast60Minutes.setStatus('current')
schannelLatencyTrap = NotificationType((1, 3, 6, 1, 4, 1, 3417, 2, 15, 5, 0, 1)).setObjects(("BLUECOAT-SG-AUTHENTICATION-MIB", "domainName"), ("BLUECOAT-SG-AUTHENTICATION-MIB", "latencyType"), ("BLUECOAT-SG-AUTHENTICATION-MIB", "latencyValue"))
if mibBuilder.loadTexts: schannelLatencyTrap.setStatus('current')
mibBuilder.exportSymbols("BLUECOAT-SG-AUTHENTICATION-MIB", authsByDomainLast3Minutes=authsByDomainLast3Minutes, transactions=transactions, siteName=siteName, avgLatencyPerDomainLast15Minutes=avgLatencyPerDomainLast15Minutes, lsaDomainControllerStatsIndex=lsaDomainControllerStatsIndex, resets=resets, address=address, schannelStatsIndex=schannelStatsIndex, maxWaiters=maxWaiters, authsByDomainLast15Minutes=authsByDomainLast15Minutes, maxLatencyPerDomainLast1Minute=maxLatencyPerDomainLast1Minute, schannelStats=schannelStats, availableConnections=availableConnections, currentWaiters=currentWaiters, schannelLatencyTrap=schannelLatencyTrap, domainName=domainName, bluecoatSGAuthentication=bluecoatSGAuthentication, domainStatus=domainStatus, serverName=serverName, PYSNMP_MODULE_ID=bluecoatSGAuthentication, connectionsInUse=connectionsInUse, schannelStatsEntry=schannelStatsEntry, authsByDomainLast5Minutes=authsByDomainLast5Minutes, lsaDomainControllerStatsTable=lsaDomainControllerStatsTable, minLatencyPerDomainLast1Minute=minLatencyPerDomainLast1Minute, minLatencyPerDomainLast5Minutes=minLatencyPerDomainLast5Minutes, authNotificationsPrefix=authNotificationsPrefix, lsaDomainControllerStatsEntry=lsaDomainControllerStatsEntry, authsByDomainLast60Minutes=authsByDomainLast60Minutes, flags=flags, avgLatencyPerDomainLast5Minutes=avgLatencyPerDomainLast5Minutes, domainControllerName=domainControllerName, schannelServerStatsTable=schannelServerStatsTable, minLatencyPerDomainLast3Minutes=minLatencyPerDomainLast3Minutes, failedAuthsByDomainLast5Minutes=failedAuthsByDomainLast5Minutes, maxLatencyPerDomainLast3Minutes=maxLatencyPerDomainLast3Minutes, schannelStatsTable=schannelStatsTable, maxLatencyPerDomainLast15Minutes=maxLatencyPerDomainLast15Minutes, minLatencyPerDomainLast15Minutes=minLatencyPerDomainLast15Minutes, lsaDomainControllerStats=lsaDomainControllerStats, averageTransactions=averageTransactions, minLatencyPerDomainLast60Minutes=minLatencyPerDomainLast60Minutes, avgLatencyPerDomainLast60Minutes=avgLatencyPerDomainLast60Minutes, avgLatencyPerDomainLast1Minute=avgLatencyPerDomainLast1Minute, failedAuthsByDomainLast15Minutes=failedAuthsByDomainLast15Minutes, failedAuthsByDomainLast1Minute=failedAuthsByDomainLast1Minute, failedAuthsByDomainLast60Minutes=failedAuthsByDomainLast60Minutes, authNotifications=authNotifications, failedAuthsByDomainLast3Minutes=failedAuthsByDomainLast3Minutes, ToggleState=ToggleState, timeouts=timeouts, schannelServerStatsIndex=schannelServerStatsIndex, avgLatencyPerDomainLast3Minutes=avgLatencyPerDomainLast3Minutes, maxLatencyPerDomainLast5Minutes=maxLatencyPerDomainLast5Minutes, avgLDAPPingTime=avgLDAPPingTime, authsByDomainLast1Minute=authsByDomainLast1Minute, maxLatencyPerDomainLast60Minutes=maxLatencyPerDomainLast60Minutes, lastLDAPPingTime=lastLDAPPingTime, schannelServerStats=schannelServerStats, schannelServerStatsEntry=schannelServerStatsEntry)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
664956ece2799c6d70ff0cd93d28d24230d21daa | afe5c625d818a85598785b43089b65ebf950cb15 | /template_lib/examples/test_graphviz.py | 90c7ba54ab8c3b08038a44ab62f903b40e416cb1 | [] | no_license | weroks/Omni-GAN-DGP | 52039dafa041bd977debba69c1a1d30094e8bfcc | d85898d0634c63f3176a21e3e398aea0a88f2634 | refs/heads/main | 2023-08-20T06:33:18.307812 | 2021-10-28T14:17:00 | 2021-10-28T14:17:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,942 | py | import os
import sys
import unittest
import argparse
from template_lib.examples import test_bash
from template_lib import utils
class TestingGraphviz(unittest.TestCase):
def test_hello(self):
"""
"""
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
if 'PORT' not in os.environ:
os.environ['PORT'] = '6006'
if 'TIME_STR' not in os.environ:
os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
# func name
assert sys._getframe().f_code.co_name.startswith('test_')
command = sys._getframe().f_code.co_name[5:]
class_name = self.__class__.__name__[7:] \
if self.__class__.__name__.startswith('Testing') \
else self.__class__.__name__
outdir = f'results/{class_name}/{command}'
import shutil
shutil.rmtree(outdir, ignore_errors=True)
os.makedirs(outdir, exist_ok=True)
from graphviz import Digraph
filename = os.path.join(outdir, 'hello')
g = Digraph('G', filename=filename, format='png')
g.edge('Hello', 'World')
g.view()
def test_process(self):
"""
"""
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
if 'PORT' not in os.environ:
os.environ['PORT'] = '6006'
if 'TIME_STR' not in os.environ:
os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
# func name
assert sys._getframe().f_code.co_name.startswith('test_')
command = sys._getframe().f_code.co_name[5:]
class_name = self.__class__.__name__[7:] \
if self.__class__.__name__.startswith('Testing') \
else self.__class__.__name__
outdir = f'results/{class_name}/{command}'
import shutil
shutil.rmtree(outdir, ignore_errors=True)
os.makedirs(outdir, exist_ok=True)
from graphviz import Digraph
filename = os.path.join(outdir, 'hello')
from graphviz import Graph
g = Graph('G', filename=filename, format='png')
g.edge('run', 'intr')
g.edge('intr', 'runbl')
g.edge('runbl', 'run')
g.edge('run', 'kernel')
g.edge('kernel', 'zombie')
g.edge('kernel', 'sleep')
g.edge('kernel', 'runmem')
g.edge('sleep', 'swap')
g.edge('swap', 'runswap')
g.edge('runswap', 'new')
g.edge('runswap', 'runmem')
g.edge('new', 'runmem')
g.edge('sleep', 'runmem')
g.view()
def test_fsm(self):
"""
"""
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
if 'PORT' not in os.environ:
os.environ['PORT'] = '6006'
if 'TIME_STR' not in os.environ:
os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
# func name
assert sys._getframe().f_code.co_name.startswith('test_')
command = sys._getframe().f_code.co_name[5:]
class_name = self.__class__.__name__[7:] \
if self.__class__.__name__.startswith('Testing') \
else self.__class__.__name__
outdir = f'results/{class_name}/{command}'
import shutil
shutil.rmtree(outdir, ignore_errors=True)
os.makedirs(outdir, exist_ok=True)
from graphviz import Digraph
filename = os.path.join(outdir, 'hello')
from graphviz import Digraph
f = Digraph('finite_state_machine', filename=filename, format='png')
f.attr(rankdir='LR', size='8,5')
f.attr('node', shape='doublecircle')
f.node('LR_0')
f.node('LR_3')
f.node('LR_4')
f.node('LR_8')
f.attr('node', shape='circle')
f.edge('LR_0', 'LR_2', label='SS(B)')
f.edge('LR_0', 'LR_1', label='SS(S)')
f.edge('LR_1', 'LR_3', label='S($end)')
f.edge('LR_2', 'LR_6', label='SS(b)')
f.edge('LR_2', 'LR_5', label='SS(a)')
f.edge('LR_2', 'LR_4', label='S(A)')
f.edge('LR_5', 'LR_7', label='S(b)')
f.edge('LR_5', 'LR_5', label='S(a)')
f.edge('LR_6', 'LR_6', label='S(b)')
f.edge('LR_6', 'LR_5', label='S(a)')
f.edge('LR_7', 'LR_8', label='S(b)')
f.edge('LR_7', 'LR_5', label='S(a)')
f.edge('LR_8', 'LR_6', label='S(b)')
f.edge('LR_8', 'LR_5', label='S(a)')
f.view()
def test_cluster(self):
"""
"""
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
if 'PORT' not in os.environ:
os.environ['PORT'] = '6006'
if 'TIME_STR' not in os.environ:
os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
# func name
assert sys._getframe().f_code.co_name.startswith('test_')
command = sys._getframe().f_code.co_name[5:]
class_name = self.__class__.__name__[7:] \
if self.__class__.__name__.startswith('Testing') \
else self.__class__.__name__
outdir = f'results/{class_name}/{command}'
import shutil
shutil.rmtree(outdir, ignore_errors=True)
os.makedirs(outdir, exist_ok=True)
from graphviz import Digraph
filename = os.path.join(outdir, 'hello')
from graphviz import Digraph
g = Digraph('G', filename=filename, format='png')
# NOTE: the subgraph name needs to begin with 'cluster' (all lowercase)
# so that Graphviz recognizes it as a special cluster subgraph
with g.subgraph(name='cluster_0') as c:
c.attr(style='filled', color='lightgrey')
c.node_attr.update(style='filled', color='white')
c.edges([('a0', 'a1'), ('a1', 'a2'), ('a2', 'a3')])
c.attr(label='process #1')
with g.subgraph(name='cluster_1') as c:
c.attr(color='blue')
c.node_attr['style'] = 'filled'
c.edges([('b0', 'b1'), ('b1', 'b2'), ('b2', 'b3')])
c.attr(label='process #2')
g.edge('start', 'a0')
g.edge('start', 'b0')
g.edge('a1', 'b3')
g.edge('b2', 'a3')
g.edge('a3', 'a0')
g.edge('a3', 'end')
g.edge('b3', 'end')
g.node('start', shape='Mdiamond')
g.node('end', shape='Msquare')
g.view()
def test_rank_same(self):
"""
"""
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
if 'PORT' not in os.environ:
os.environ['PORT'] = '6006'
if 'TIME_STR' not in os.environ:
os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
# func name
assert sys._getframe().f_code.co_name.startswith('test_')
command = sys._getframe().f_code.co_name[5:]
class_name = self.__class__.__name__[7:] \
if self.__class__.__name__.startswith('Testing') \
else self.__class__.__name__
outdir = f'results/{class_name}/{command}'
import shutil
shutil.rmtree(outdir, ignore_errors=True)
os.makedirs(outdir, exist_ok=True)
from graphviz import Digraph
filename = os.path.join(outdir, 'hello')
d = Digraph('G', filename=filename, format='png')
with d.subgraph() as s:
s.attr(rank='same')
s.node('A')
s.node('X')
d.node('C')
with d.subgraph() as s:
s.attr(rank='same')
s.node('B')
s.node('D')
s.node('Y')
d.edges(['AB', 'AC', 'CD', 'XY'])
d.view()
| [
"zhoupengcv@sjtu.edu.cn"
] | zhoupengcv@sjtu.edu.cn |
4b3e7223a2519962a38b27c71678023ccb425d4a | d02508f5ebbbdb4ba939ba830a8e8d9abc69774a | /Implementation/beautifulTriplets.py | 6d68d2dc8232b8c956171e12e015bb5b1dc96efe | [] | no_license | sameersaini/hackerank | e30c6270aaa0e288fa8b25392819509849cdabad | 3e66f89e02ade703715237722eda2fa2b135bb79 | refs/heads/master | 2021-06-12T09:24:15.266218 | 2019-10-18T02:22:00 | 2019-10-18T02:22:00 | 31,360,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | #!/bin/python3
import os
# Complete the beautifulTriplets function below.
def beautifulTriplets(d, arr):
return len([1 for number in arr if number + d in arr and number + 2*d in arr])
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nd = input().split()
n = int(nd[0])
d = int(nd[1])
arr = list(map(int, input().rstrip().split()))
result = beautifulTriplets(d, arr)
fptr.write(str(result) + '\n')
fptr.close() | [
"sameersaini40@gmail.com"
] | sameersaini40@gmail.com |
bda379d481f9d5aa07a6d4fcb8d7ab28d72843c6 | a2b20597759990445081057d35d113434cfcf970 | /stubs/typeshed/typeshed/stdlib/multiprocessing/queues.pyi | 7ba17dcfbe0583d4395d2ede1a5f005137d3083b | [
"MIT",
"Apache-2.0"
] | permissive | facebook/pyre-check | 34059599c02b65605c574f13555229f3b931fd4e | fe8ccedc572cc1faa1fd01e9138f65e982875002 | refs/heads/main | 2023-09-03T19:10:11.587028 | 2023-09-02T07:40:35 | 2023-09-02T07:40:35 | 110,274,488 | 6,703 | 575 | MIT | 2023-09-13T17:02:32 | 2017-11-10T17:31:36 | OCaml | UTF-8 | Python | false | false | 1,238 | pyi | import queue
import sys
from typing import Any, Generic, TypeVar
if sys.version_info >= (3, 9):
from types import GenericAlias
__all__ = ["Queue", "SimpleQueue", "JoinableQueue"]
_T = TypeVar("_T")
class Queue(queue.Queue[_T]):
# FIXME: `ctx` is a circular dependency and it's not actually optional.
# It's marked as such to be able to use the generic Queue in __init__.pyi.
def __init__(self, maxsize: int = 0, *, ctx: Any = ...) -> None: ...
def get(self, block: bool = True, timeout: float | None = None) -> _T: ...
def put(self, obj: _T, block: bool = True, timeout: float | None = None) -> None: ...
def put_nowait(self, item: _T) -> None: ...
def get_nowait(self) -> _T: ...
def close(self) -> None: ...
def join_thread(self) -> None: ...
def cancel_join_thread(self) -> None: ...
class JoinableQueue(Queue[_T]): ...
class SimpleQueue(Generic[_T]):
def __init__(self, *, ctx: Any = ...) -> None: ...
if sys.version_info >= (3, 9):
def close(self) -> None: ...
def empty(self) -> bool: ...
def get(self) -> _T: ...
def put(self, item: _T) -> None: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
ad4f84c5b22032880bc78e204e2fddfdeba69aec | bec2ccc5f19575518649932fb3f2853adf54c11e | /blog/static_file/static_file/static_file/myapp/templatetags/myfilter.py | 6f40debee780c4c407a83b4bc88233fa4d5bc915 | [] | no_license | liuxinqiqi/djangosite | 08831c63c5fa5a4c8a14dd4bf8beed62138eb58a | 9a1b425cbdb73feb34d7fb1f60c3f2923e262d64 | refs/heads/master | 2022-12-13T11:00:07.039593 | 2017-08-12T08:40:03 | 2017-08-12T08:40:03 | 100,082,409 | 0 | 0 | null | 2022-12-08T00:43:01 | 2017-08-12T01:55:14 | JavaScript | UTF-8 | Python | false | false | 388 | py | # coding=utf-8
from django import template
register = template.Library()
# 定义一个将日期中的月份转换为大写的过滤器,如8转换为八
# @register.filter
def month_to_upper(key):
return ['一', '二', '三', '四', '五', '六', '七', '八', '九', '十', '十一', '十二'][key.month-1]
# 注册过滤器
register.filter('month_to_upper', month_to_upper)
| [
"XinQi_Liu@outlook.com"
] | XinQi_Liu@outlook.com |
3e8113768b6f7f970999769e54013464cfb82d4d | 42a0760a051935b2e765d57c445235221a28f49e | /problemSets/top75/383.py | 46c7213de37ce6088699552420409617f18ed531 | [] | no_license | Th3Lourde/l33tcode | 3bea3a3e7c633a2d5a36f7d76d5d776d275d8ee3 | eb6b11f97a022b66716cb3890cc56c58f62e8aa4 | refs/heads/master | 2022-12-22T19:05:04.384645 | 2022-12-18T19:38:46 | 2022-12-18T19:38:46 | 232,450,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | class Solution:
def canConstruct(self, ransomNote, magazine):
d = {}
for chr in magazine:
if chr in d:
d[chr] += 1
else:
d[chr] = 1
for chr in ransomNote:
if chr in d:
if d[chr] > 0:
d[chr] -= 1
else:
return False
else:
return False
return True
| [
"th3lourde@Eli.local"
] | th3lourde@Eli.local |
078be585911c8382c5c3cf1f080eb2f8af4f1d8c | 06ffc855b6a739b384f78241a3a368b4f76b6885 | /onsim/folder_structure.py | 6cb502268f3f07eab3fdda3d23808b0ecb8c8a1b | [] | no_license | hasanmoudud/darcoda | c719a7244832cc2698478bd41d088ca4e55ba165 | 413ad9475920baa256562f7c94bfa4bf0c6091d6 | refs/heads/master | 2021-01-17T07:28:38.554719 | 2015-03-29T02:48:47 | 2015-03-29T02:48:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | ../bin/folder_structure.py | [
"psteger@phys.ethz.ch"
] | psteger@phys.ethz.ch |
6e8233795f475c68be39cde2527a85c241ecb3fa | b9b15de6abaf44d14f94cacbc7a0df4c66ea7c83 | /43.py | 51e58b3b564b7f82849a4241918883925cabe040 | [] | no_license | humachine/pe | 96ded174431031e4ca7c9c83401495148257b903 | 01d8b33174f4e100838d040c1bd401e066bb768a | refs/heads/master | 2020-04-06T06:23:54.672102 | 2015-07-28T22:40:10 | 2015-07-28T22:40:10 | 38,980,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | import itertools
N=['0','1','3','4','6']
a=list(itertools.permutations(N))
total=0
for i in a:
j=''.join(i)
if j[0]=='0':
continue
if int(j[3]) % 2 !=0:
continue
if int(j[2:5]) % 3 !=0:
continue
if int(j[-1]+'57') % 3 !=0:
continue
print j
| [
"swarun@gmail.com"
] | swarun@gmail.com |
9d2f30bb06a792619fb6a794b828b783ff70cbed | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2397/60618/320984.py | 9a61980142d5804459be4101d887d5bb42b2f36c | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | a=int(input())
b=int(input())
if a==3 and b==19:
print(17)
elif a==7:
print(15)
elif a==12:
print(15)
elif a==3:
print(32)
elif a==1:
print(4)
elif a==15:
print(704)
elif a==32:
print(10)
else:
print(a)
#print(17)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
c9ac7a3e3c7954ce57d886098b7c8e74f60df27b | 3d9eb7e24090adff31862a3e6614217d76ff60f7 | /testing/sample.py | 3ce9262b13f8890eafc45edb9d1db51e909e5cf8 | [] | no_license | asvetlov/articles | 4ec342d3346b77897b4d2ecf3c1eb170859edabb | e9a8d35e755221a0b22c8a99b8680e5ef8baa80f | refs/heads/master | 2016-09-08T01:59:11.800396 | 2015-06-03T08:46:54 | 2015-06-03T08:46:54 | 19,406,486 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | from datetime import datetime, timedelta
class Billing(object):
now = datetime.now
def __init__(self):
self.timestamp = self.now()
def can_show(self):
return self.now() - self.timestamp < timedelta(seconds=5)
#### Test
import unittest
import mocker
class TestBillling(unittest.TestCase):
def setUp(self):
self.mocker = mocker.Mocker()
def tearDown(self):
self.mocker = None
def test_can_show(self):
billing = Billing()
now = self.mocker.mock()
stamp = billing.timestamp
billing.now = now
# mocker setup
with self.mocker.order():
# first call - just now
now()
self.mocker.result(stamp)
# after 4 seconds
now()
self.mocker.result(stamp + timedelta(seconds=4))
# after next 4 seconds
now()
self.mocker.result(stamp + timedelta(seconds=8))
# test replay
with self.mocker:
# first call
self.assertEqual(True, billing.can_show())
# second call
self.assertEqual(True, billing.can_show())
# third call
self.assertEqual(False, billing.can_show())
unittest.main()
| [
"andrew.svetlov@gmail.com"
] | andrew.svetlov@gmail.com |
3c47065803c2c70d16235f16edd5bb25405c0b57 | 1e12a6f1957dc47c50845a39d626ea9a1a541268 | /backend/articles/urls.py | 82913153c2c86d190d1335b0ed5e486cb3d0834f | [] | no_license | sungguenja/fincat-findog | 6e7d276bcd8853300916987f70b0d159ba5cff4d | c62d17f64f4f1e8d86a982feb4842d3729b587c5 | refs/heads/master | 2023-01-03T02:11:56.612927 | 2020-10-24T16:47:48 | 2020-10-24T16:47:48 | 306,927,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from django.urls import path
from . import views
app_name = 'articles'
urlpatterns = [
path('city/', views.city_list, name='city'),
path('borough/', views.borough_list, name='borough'),
path('species/<int:animal_pk>/', views.species_list, name='species'),
path('animal/', views.animal_list, name='animal'),
path('myarticles/', views.article_list, name="article_list"),
path('search_api/',views.search_api, name="search_api"),
]
| [
"59605197+sungguenja@users.noreply.github.com"
] | 59605197+sungguenja@users.noreply.github.com |
933927f9173118bff7796bb05e32536c956cf44d | b0bd3342c244ebf30ae5ab29daa078f2b39010f7 | /EmbedModel.py | e4d22900e04f20f54fcf83aa983ee4d2d26e26bb | [] | no_license | naiqili/itime_learning | 30a8af7f1234277162ccdd4c69cd9f9a4a7ab412 | d9b191bb32a7e49cb99443d7dccea5bb392aee90 | refs/heads/master | 2021-06-19T04:54:06.239320 | 2017-06-26T13:35:39 | 2017-06-26T13:35:39 | 92,792,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,151 | py | import tensorflow as tf
import numpy as np
class EmbedModel():
def __init__(self, conf):
self.conf = conf
self.uif_mat = np.load(conf.uif_path)
self.embed_user_mat = np.load("%sembed_user.npy" % conf.embedPath)
self.embed_item_mat = np.load("%sembed_item.npy" % conf.embedPath)
self.context_user_mat = np.load("%scontext_user.npy" % conf.embedPath)
self.context_item_mat = np.load("%scontext_item.npy" % conf.embedPath)
self.feat_mat = np.load("%sfeat_mat.npy" % conf.featMatDir)
def add_variables(self, reuse=False):
conf = self.conf
with tf.variable_scope('Fixed', reuse=reuse):
self.uif = tf.get_variable('uif',
[conf.user_size,
conf.item_size,
len(conf.recAlgos)],
initializer=tf.constant_initializer(self.uif_mat),
trainable=False)
self.embed_user = tf.get_variable('embed_user',
[conf.user_size,
conf.embed_size],
initializer=tf.constant_initializer(self.embed_user_mat),
trainable=False)
self.embed_item = tf.get_variable('embed_item',
[conf.item_size,
conf.embed_size],
initializer=tf.constant_initializer(self.embed_item_mat),
trainable=False)
self.context_user = tf.get_variable('context_user',
[conf.user_size,
conf.embed_size],
initializer=tf.constant_initializer(self.context_user_mat),
trainable=False)
self.context_item = tf.get_variable('context_item',
[conf.item_size,
conf.embed_size],
initializer=tf.constant_initializer(self.context_item_mat),
trainable=False)
self.feat_embed = tf.get_variable('feat',
[conf.item_size,
conf.feat_size],
initializer=tf.constant_initializer(self.feat_mat),
trainable=False)
if self.conf.drop_embed:
self.embed_user = tf.contrib.layers.dropout(self.embed_user, self.conf.keep_prob, is_training=self.conf.is_training)
self.embed_item = tf.contrib.layers.dropout(self.embed_item, self.conf.keep_prob, is_training=self.conf.is_training)
self.context_user = tf.contrib.layers.dropout(self.context_user, self.conf.keep_prob, is_training=self.conf.is_training)
self.context_item = tf.contrib.layers.dropout(self.context_item, self.conf.keep_prob, is_training=self.conf.is_training)
self.item_joint_embed = tf.concat([self.embed_item, self.context_item], 1)
self.user_joint_embed = tf.concat([self.embed_user, self.context_user], 1)
self.item_feat_joint_embed = tf.concat([self.item_joint_embed, self.feat_embed], 1)
with tf.variable_scope('Weights', reuse=reuse):
self.v1 = tf.get_variable('v1',
[len(conf.recAlgos), 1])
self.v2 = tf.get_variable('v2',
[conf.z_size, 1])
self.W_z = tf.get_variable('W_z',
[conf.z_size,
2*conf.embed_size+conf.feat_size,
2*conf.embed_size+conf.feat_size])
self.W_rel = tf.get_variable('W_rel',
[2*conf.embed_size,
2*conf.embed_size])
self.ph_selected_items = tf.placeholder(tf.int32, shape=(None,))
self.ph_all_items = tf.placeholder(tf.int32, shape=(None,))
self.ph_groundtruth = tf.placeholder(tf.int32, shape=[])
self.ph_user = tf.placeholder(tf.int32, shape=[])
def build_model(self):
uif_u = self.uif[self.ph_user]
if self.conf.drop_matrix:
uif_u = tf.contrib.layers.dropout(uif_u, self.conf.keep_prob, is_training=self.conf.is_training) # Add dropout layer
rel_score1 = tf.matmul(uif_u, self.v1)
user_embed_u = tf.expand_dims(tf.nn.embedding_lookup(self.user_joint_embed, self.ph_user), 1)
rel_score2 = tf.matmul(tf.matmul(self.item_joint_embed, self.W_rel), user_embed_u)
rel_score = rel_score1 + rel_score2
def fn_i0(): # (choices, score_sum) when i = 0
return (self.ph_all_items, tf.squeeze(rel_score))
def fn_not_i0(): # (choices, score_sum) when i != 0
selected_items = self.ph_selected_items
iur = self.item_feat_joint_embed
if self.conf.drop_matrix:
iur = tf.contrib.layers.dropout(iur, self.conf.keep_prob, is_training=self.conf.is_training) # Add dropout layer
se = tf.nn.embedding_lookup(iur, selected_items)
se = tf.transpose(se)
# see test/einsum_test.py
iur_w = tf.einsum('nu,zud->znd', iur, self.W_z)
iur_w_se = tf.einsum('znu,uk->znk', iur_w, se)
mp_iur_w_se = tf.reduce_max(iur_w_se, axis=2) # z x n
mp_iur_w_se = tf.transpose(mp_iur_w_se) # n x z
mp_iur_w_se = tf.tanh(mp_iur_w_se)
div_score = tf.matmul(mp_iur_w_se, self.v2) # n x 1
score_sum = tf.squeeze(rel_score + div_score) # vec of n
choices = tf.reshape(tf.sparse_tensor_to_dense(tf.sets.set_difference([self.ph_all_items], [selected_items])), [-1]) # vec of remaining choices
return (choices, score_sum)
i = tf.shape(self.ph_selected_items)[0]
choices, score_sum = tf.cond(tf.equal(i, 0),
lambda: fn_i0(),
lambda: fn_not_i0())
eff_score = tf.gather(score_sum, choices, validate_indices=False) # vec of choices
_argmax = tf.argmax(eff_score, axis=0)
_pred = tf.gather(choices, _argmax, validate_indices=False)
_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=score_sum, labels=self.ph_groundtruth)
self.loss = _loss
self.pred = _pred
self.loss_summary = tf.summary.scalar('Loss', self.loss)
if self.conf.is_training:
self.train_op = tf.train.AdamOptimizer(self.conf.lr).minimize(self.loss)
| [
"naiqil@student.unimelb.edu.au"
] | naiqil@student.unimelb.edu.au |
e29b0eccefaf4aa55fcb3248506c534cda082e6c | 0a0bf0c955e98ffebf0bee81496291e984366887 | /maxinai/letters/letters_service_adv.py | b5cdf1e213ec9d84db3b0976a28c42d43b42af98 | [] | no_license | MaxinAI/school-of-ai | 11ee65c935638b8bb9f396f25c943bd6e8e7fc0f | 3c8f11ae6cb61df186d4dfa30fa5aba774bfbeba | refs/heads/master | 2023-01-22T17:24:33.208956 | 2023-01-20T14:49:40 | 2023-01-20T14:49:40 | 212,200,415 | 52 | 77 | null | 2020-04-10T07:15:06 | 2019-10-01T21:11:52 | Jupyter Notebook | UTF-8 | Python | false | false | 3,667 | py | """
Created on Nov 15, 2017
Service for model interface
@author: Levan Tsinadze
"""
import logging
import numpy as np
import PIL
import torch
from flask import Flask, json, render_template, request
from maxinai.letters.image_reader import request_file
from maxinai.letters.service_config import configure
from torch import nn, no_grad
from torchvision import transforms
logger = logging.getLogger(__name__)
tfms = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
_PREDICTION_KEY = 'prediction'
# Initializes web container
app = Flask(__name__)
@torch.inference_mode()
class ModelWrapper(object):
"""Model wrapper for inference"""
def __init__(self, model: nn.Module, trfms: transforms):
self.model = model.eval()
self.trfms = trfms
@no_grad()
def forward(self, *imgs: PIL.Image) -> np.ndarray:
itns = torch.stack([self.trfms(x) for x in imgs])
otns = self.model(itns)
results = otns.cpu().data.numpy()
return results
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def init_wrapper():
"""
Load model from disk and initialize model wrapper
Returns:
wrapper: model wrapper
"""
net = torch.load(flags.model_path, map_location='cpu')
net.eval()
wrapper = ModelWrapper(net, tfms)
return wrapper
def recognize_image(image_data):
"""
Recognizes from binary image
Args:
image_data: binary image
Returns:
response_json: prediction response
"""
img = request_file(flags, image_data)
predictions = model(img)
predictions = np.argmax(predictions)
response_dict = {'geoletters': 'true',
_PREDICTION_KEY: class_names[predictions]}
response_json = json.dumps(response_dict)
return response_json
@app.route('/', methods=['GET', 'POST'])
def cnn_recognize():
"""Web method for recognition
Returns:
resp - recognition response
"""
if request.method == 'POST':
resp = recognize_image(request.data)
elif request.method == 'GET':
resp = render_template('index.html')
return resp
@app.route('/upload', methods=['GET', 'POST'])
def cnn_upload():
"""Recognizes uploaded images
Returns:
resp - recognition response
"""
return recognize_image(request.data)
def read_labels(flags):
"""Reads labels
Args:
flags - configuration parameters
Returns:
model_labels - labels JSON dictionary
"""
labels_file = flags.label_path
if labels_file is not None:
with open(labels_file, 'r') as fp:
model_labels = json.load(fp)
logger.debug('model_labels - ', model_labels)
else:
model_labels = {}
return model_labels
def load_labels(flags):
"""Reads labels JSON file
Args:
flags - configuration parameters
Returns:
tuple of -
labels_json - labels JSON with indices
class_names - class labels
"""
labels_json = read_labels(flags)
class_names = {
int(idx): class_name for idx, class_name in labels_json.items()}
logger.debug(class_names)
return labels_json, class_names
if __name__ == "__main__":
flags = configure()
logging.basicConfig(
level=logging.DEBUG if flags.verbose else logging.INFO)
model = init_wrapper()
_, class_names = load_labels(flags)
flags.num_classes = len(class_names) if len(
class_names) > 0 else flags.num_classes
app.run(host=flags.host, port=flags.port, threaded=True)
| [
"levantsinadze@gmail.com"
] | levantsinadze@gmail.com |
907cf454e454edb4d7c50fb9a5aaab80dda21d54 | c3c7398ec14865ea34c7f03aa5e012ddb19f0d5b | /app/forms.py | 63ce71ce50793d33f24d2f80063d2d67c44f9eda | [] | no_license | mzm5466/blog | 0e022f0ce85a0079cb72ffd9f472c7684f94d9fb | 13625fe7028a0df11a30d7de32751e34d681de00 | refs/heads/master | 2021-01-23T16:51:58.296591 | 2018-11-17T06:05:50 | 2018-11-17T06:05:50 | 102,748,039 | 0 | 0 | null | 2018-11-12T23:28:57 | 2017-09-07T14:36:32 | JavaScript | UTF-8 | Python | false | false | 195 | py | #!/usr/bin/python
#-*- coding:utf-8 -*-
from django.forms import ModelForm
from app.models import Moment
class MomentForm(ModelForm):
class Meta:
model=Moment
fields='__all__' | [
"you@example.com"
] | you@example.com |
b730e53bfea1689a32bc9d152957504af28beb0c | e24a007cba8cc63dbc29699e2651fbf27b3e7644 | /알고리즘4일차_0206/당근.py | 259c6bd059c2a66f2002ab4aa9f53f073c8bdd9a | [] | no_license | doyeon-kim-93/algorithm | 14df56481a727651a772cbaed7c7dec90fe38b14 | a706e55a6c5933f4901de5832cb0066cdb4665c3 | refs/heads/master | 2023-04-26T17:40:39.598842 | 2021-05-18T14:27:56 | 2021-05-18T14:27:56 | 241,492,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | T = int(input())
for tc in range(1,T+1):
N = int(input())
carrot = list(map(int,input().split()))
result = []
for i in range(N-1):
sum1 = 0
sum2 = 0
for j in range(i+1):
sum1 += carrot[j]
for z in range(i+1,N):
sum2 += carrot[z]
result.append(abs(sum1-sum2))
result2 = min(result)
idx=result.index(result2)
print(idx+1, result2) | [
"kdymay93@gmail.com"
] | kdymay93@gmail.com |
761730fd57eab2fb14f011bc79f7b6a26af7c9d4 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/leetCode/DP/SubarrayProductLessThanK.py | f0441c2ed62e7ca890c7f6b1bd76bfb2d20a4f91 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,645 | py | """
Your are given an array of positive integers nums.
Count and print the number of (contiguous) subarrays where the product of all the elements in the subarray is less than k.
Example 1:
Input: nums = [10, 5, 2, 6], k = 100
Output: 8
Explanation: The 8 subarrays that have product less than 100 are: [10], [5], [2], [6], [10, 5], [5, 2], [2, 6], [5, 2, 6].
Note that [10, 5, 2] is not included as the product of 100 is not strictly less than k.
Note:
0 < nums.length <= 50000.
0 < nums[i] < 1000.
0 <= k < 10^6.
思路 Dp,处理下 1 即可。 不考虑 0,nums[i] 不会为 0。
beat 19%
测试地址:
https://leetcode.com/problems/subarray-product-less-than-k/description/
可剪枝优化。
"""
c.. Solution o..
___ numSubarrayProductLessThanK nums, k
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
dp # list
result = 0
start = 0
___ i __ r..(l..(nums)):
__ nums[i] < k:
result += 1
dp = [nums[i]]
start = i
______
___ i __ r..(start+1, l..(nums)):
__ nums[i] __ 1 a.. nums[i] < k:
dp.a.. 1)
result += l..(dp)
c_
new # list
__ nums[i] < k:
result += 1
new.a.. nums[i])
___ j __ dp:
__ j * nums[i] < k:
result += 1
new.a.. j * nums[i])
dp = new
r_ result
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
6c4889f32d0f490fc3cecc4bc39dd6ac40054ac6 | 7b2dc269c3766deadb13415284d9848409d850c5 | /tests/test_load_arc.py | 86bb68bebcee5e99f44c5ef43586c441676d39cc | [] | no_license | Joaggi/demande | 8c3f32125cdf6377c9bd8a5b33bf162f8a5ec5cc | 289b8237d8e872e067dd4f6ab2297affe3903f4e | refs/heads/main | 2023-04-18T20:47:21.769183 | 2023-03-08T21:36:08 | 2023-03-08T21:36:08 | 611,455,062 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | import pytest
from neuraldensityestimation.load_arc import load_arc
import matplotlib.pylab as plt
def test_load_arc():
X_train, X_train_density, X_test, X_test_density = load_arc(1000, 1000, 2)
plt.axes(frameon = 0)
plt.grid()
plt.scatter(X_test[:,0], X_test[:,1], c = X_test_density , alpha = .2, s = 3, linewidths= 0.0000001)
plt.colorbar()
plt.title('arc dataset')
plt.savefig('reports/arc_dataset.png',dpi = 300)
plt.show()
| [
"joaggi@gmail.com"
] | joaggi@gmail.com |
ad2247d3109cfc819cd987f6f07106d9d8927c6e | d68cb993f5011ac2f6fe6be298a14ba370d4a661 | /cleanrl/experiments/docker/aws/setup.py | 2cc53b8fb7517ca8775cc0a3d909a4b6c795e725 | [
"MIT"
] | permissive | lydia99992/cleanrl | b6cb196a11730e89068a179d27ec99ccc85e9be1 | 418bfc01fe69712c5b617d49d810a1df7f4f0c14 | refs/heads/master | 2022-10-21T08:30:00.561062 | 2020-06-15T18:18:16 | 2020-06-15T18:18:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | # pip install boto3
import boto3
import re
client = boto3.client('batch')
print("creating job queue")
response = client.create_job_queue(
jobQueueName='cleanrl',
state='ENABLED',
priority=100,
computeEnvironmentOrder=[
{
'order': 100,
'computeEnvironment': 'cleanrl'
}
]
)
print(response)
print("job queue created \n=============================")
# print("creating on demand job queue")
# response = client.create_job_queue(
# jobQueueName='cleanrl_ondemand',
# state='ENABLED',
# priority=101,
# computeEnvironmentOrder=[
# {
# 'order': 100,
# 'computeEnvironment': 'cleanrl_ondemand'
# }
# ]
# )
# print(response)
# print("on demand job queue created \n=============================")
print("creating job definition")
response = client.register_job_definition(
jobDefinitionName='cleanrl',
type='container',
containerProperties={
'image': 'vwxyzjn/cleanrl_shared_memory:latest',
'vcpus': 1,
'memory': 1000,
},
retryStrategy={
'attempts': 3
},
timeout={
'attemptDurationSeconds': 1800
}
)
print(response)
print("job definition created \n=============================")
| [
"costa.huang@outlook.com"
] | costa.huang@outlook.com |
8f44127439c751b32545cc6501eb7ad41de5abf3 | 81407be1385564308db7193634a2bb050b4f822e | /the-python-standard-library-by-example/string/string_template_missing.py | 35e360476f9a07060b26218ba1e06bb57963651a | [
"MIT"
] | permissive | gottaegbert/penter | 6db4f7d82c143af1209b4259ba32145aba7d6bd3 | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | refs/heads/master | 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 | MIT | 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null | UTF-8 | Python | false | false | 300 | py | # -*- coding: utf-8 -*-
import string
values = {'var': 'foo'}
t = string.Template("$var is here but $missing is not provided")
try:
print('substitute() :', t.substitute(values))
except KeyError as err:
print('ERROR:', str(err))
print('safe_substitute():', t.safe_substitute(values))
| [
"350840291@qq.com"
] | 350840291@qq.com |
e08743e4fc371d5d885083bc88c8b5d9c32be2b2 | e916a80eba284b399f9bff3a1f4c676502946059 | /binary_tree_diameter.py | c12f5b5818703dae60f54443b7d824dbf603b0a7 | [] | no_license | valmsmith39a/u-data-structures-algorithms | 109e7d9345bbf19bfd5896bb72afb0020f67c39f | 26c2ce76f46fe43f8ea40314b69b41784c461c40 | refs/heads/master | 2020-12-12T20:25:18.490231 | 2020-05-24T23:22:50 | 2020-05-24T23:22:50 | 234,222,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,234 | py |
from queue import Queue
def diameter_of_binary_tree(root):
return diameter_of_binary_tree_func(root)[1]
def diameter_of_binary_tree_func(root):
if root is None:
return 0, 0
left_height, left_diameter = diameter_of_binary_tree_func(root.left)
right_height, right_diameter = diameter_of_binary_tree_func(root.right)
current_height = max(left_height, right_height) + 1
height_diameter = left_height + right_height
current_diameter = max(left_diameter, right_diameter, height_diameter)
return current_height, current_diameter
class BinaryTreeNode:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def convert_arr_to_binary_tree(arr):
"""
Takes arr representing level-order traversal of Binary Tree
"""
index = 0
length = len(arr)
if length <= 0 or arr[0] == -1:
return None
root = BinaryTreeNode(arr[index])
index += 1
queue = Queue()
queue.put(root)
while not queue.empty():
current_node = queue.get()
left_child = arr[index]
index += 1
if left_child is not None:
left_node = BinaryTreeNode(left_child)
current_node.left = left_node
queue.put(left_node)
right_child = arr[index]
index += 1
if right_child is not None:
right_node = BinaryTreeNode(right_child)
current_node.right = right_node
queue.put(right_node)
return root
def test_function(test_case):
arr = test_case[0]
solution = test_case[1]
root = convert_arr_to_binary_tree(arr)
output = diameter_of_binary_tree(root)
print(output)
if output == solution:
print("Pass")
else:
print("Fail")
arr = [1, 2, 3, 4, 5, None, None, None, None, None, None]
solution = 3
test_case = [arr, solution]
test_function(test_case)
arr = [1, 2, 3, 4, None, 5, None, None, None, None, None]
solution = 4
test_case = [arr, solution]
test_function(test_case)
arr = [1, 2, 3, None, None, 4, 5, 6, None, 7, 8, 9, 10,
None, None, None, None, None, None, 11, None, None, None]
solution = 6
test_case = [arr, solution]
test_function(test_case)
| [
"valmsmith39a@gmail.com"
] | valmsmith39a@gmail.com |
e86658d7feac073ee6a8dd2fa49c068b8e6e1086 | a617b546d29b144b6e951cefbfa41a72e9b38ddc | /data/add_stop.py | ccb700eca2a71c9224b5c36950d4d52886071166 | [] | no_license | thangbk2209/natural_language_understanding | 5a5840662b2deb3361a44f83861b75d157d7f587 | 62f59c733996dd75c532d103f2dd1167d9a59c55 | refs/heads/master | 2020-03-25T22:26:47.462682 | 2018-10-18T02:31:29 | 2018-10-18T02:31:29 | 144,223,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from nltk.tokenize import sent_tokenize, word_tokenize
data_file = open('text_classifier_ver7_fix.txt','w',encoding="utf8")
with open ('text_classifier_ver7.txt',encoding = 'utf-8') as corpus_file:
lines = corpus_file.readlines()
for line in lines:
# token_file.write(words[j] + '\t' + 'O' + '\n')
if('?' in line):
data_file.write(line)
else:
data_file.write(line.rstrip('\n') + ' .' +'\n') | [
"thangbk2209@gmail.com"
] | thangbk2209@gmail.com |
9b2da78b192e59efaa38b4e27b1e24f0b2594f54 | caaf1b0754db1e676c37a6f1e58f19183754e654 | /sdk/network/azure-mgmt-network/generated_samples/configuration_policy_group_put.py | ab6749bf9e7cf800fcf920523b217ca684a8f8cf | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | rdomenzain/azure-sdk-for-python | 45dfb39121a0abda048c22e7309733a56259f525 | 58984255aeb904346b6958c5ba742749a2cc7d1b | refs/heads/master | 2023-07-07T06:53:12.967120 | 2023-07-04T16:27:37 | 2023-07-04T16:27:37 | 258,050,134 | 0 | 0 | MIT | 2020-04-23T00:12:14 | 2020-04-23T00:12:13 | null | UTF-8 | Python | false | false | 2,111 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.network import NetworkManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-network
# USAGE
python configuration_policy_group_put.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.configuration_policy_groups.begin_create_or_update(
resource_group_name="rg1",
vpn_server_configuration_name="vpnServerConfiguration1",
configuration_policy_group_name="policyGroup1",
vpn_server_configuration_policy_group_parameters={
"properties": {
"isDefault": True,
"policyMembers": [
{"attributeType": "RadiusAzureGroupId", "attributeValue": "6ad1bd08", "name": "policy1"},
{"attributeType": "CertificateGroupId", "attributeValue": "red.com", "name": "policy2"},
],
"priority": 0,
}
},
).result()
print(response)
# x-ms-original-file: specification/network/resource-manager/Microsoft.Network/stable/2022-11-01/examples/ConfigurationPolicyGroupPut.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | rdomenzain.noreply@github.com |
a7d07d20476888591eaba5d53db73c0733db2002 | 0a2cc497665f2a14460577f129405f6e4f793791 | /sdk/keyvault/azure-keyvault-administration/azure/keyvault/administration/_internal/__init__.py | dbc467573af1172194c53285d70e835a00dbab30 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | hivyas/azure-sdk-for-python | 112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b | 8b3258fa45f5dc25236c22ad950e48aa4e1c181c | refs/heads/master | 2023-06-17T12:01:26.392186 | 2021-05-18T19:56:01 | 2021-05-18T19:56:01 | 313,761,277 | 1 | 1 | MIT | 2020-12-02T17:48:22 | 2020-11-17T22:42:00 | Python | UTF-8 | Python | false | false | 2,987 | py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from collections import namedtuple
from six.moves.urllib_parse import urlparse
from .challenge_auth_policy import ChallengeAuthPolicy, ChallengeAuthPolicyBase
from .client_base import KeyVaultClientBase
from .http_challenge import HttpChallenge
from . import http_challenge_cache as HttpChallengeCache
__all__ = [
"ChallengeAuthPolicy",
"ChallengeAuthPolicyBase",
"HttpChallenge",
"HttpChallengeCache",
"KeyVaultClientBase",
]
_VaultId = namedtuple("VaultId", ["vault_url", "collection", "name", "version"])
def parse_vault_id(url):
try:
parsed_uri = urlparse(url)
except Exception: # pylint: disable=broad-except
raise ValueError("'{}' is not not a valid url".format(url))
if not (parsed_uri.scheme and parsed_uri.hostname):
raise ValueError("'{}' is not not a valid url".format(url))
path = list(filter(None, parsed_uri.path.split("/")))
if len(path) < 2 or len(path) > 3:
raise ValueError("'{}' is not not a valid vault url".format(url))
return _VaultId(
vault_url="{}://{}".format(parsed_uri.scheme, parsed_uri.hostname),
collection=path[0],
name=path[1],
version=path[2] if len(path) == 3 else None,
)
BackupLocation = namedtuple("BackupLocation", ["container_url", "folder_name"])
def parse_folder_url(folder_url):
# type: (str) -> BackupLocation
"""Parse the blob container URL and folder name from a backup's blob storage URL.
For example, https://<account>.blob.core.windows.net/backup/mhsm-account-2020090117323313 parses to
(container_url="https://<account>.blob.core.windows.net/backup", folder_name="mhsm-account-2020090117323313").
"""
try:
parsed = urlparse(folder_url)
# the first segment of the path is the container name
stripped_path = parsed.path.strip("/")
container = stripped_path.split("/")[0]
# the rest of the path is the folder name
folder_name = stripped_path[len(container) + 1 :]
# this intentionally discards any SAS token in the URL--methods require the SAS token as a separate parameter
container_url = "{}://{}/{}".format(parsed.scheme, parsed.netloc, container)
return BackupLocation(container_url, folder_name)
except: # pylint:disable=broad-except
raise ValueError(
'"folder_url" should be the URL of a blob holding a Key Vault backup, for example '
'"https://<account>.blob.core.windows.net/backup/mhsm-account-2020090117323313"'
)
try:
# pylint:disable=unused-import
from .async_challenge_auth_policy import AsyncChallengeAuthPolicy
from .async_client_base import AsyncKeyVaultClientBase
__all__.extend(["AsyncChallengeAuthPolicy", "AsyncKeyVaultClientBase"])
except (SyntaxError, ImportError):
pass
| [
"noreply@github.com"
] | hivyas.noreply@github.com |
421356549a8bf9d1cd5079fe809a2e1e3314f3ec | 4626631c5e68a13ed4dde041212da39d344d74d9 | /examples/scripts/get-managed-sans.py | 561948b1d35765367c528692004cd57cd6306cb1 | [
"MIT"
] | permissive | xod442/python-hpOneView | a1482677e3252dabf1e14f9349c119428331089f | b78fb81cba34992bb84ed3814aae04ce05ef913f | refs/heads/master | 2021-01-18T05:53:42.466348 | 2015-08-11T15:59:16 | 2015-08-11T15:59:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,389 | py | #!/usr/bin/env python3
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
import sys
import re
if sys.version_info < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def get_managed_sans(fcs):
sans = fcs.get_managed_sans()
pprint(sans)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Display or list the available Managed SAN resources in the appliance
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
args = parser.parse_args()
credential = {'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
fcs = hpov.fcsans(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
get_managed_sans(fcs)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| [
"troy@debdev.org"
] | troy@debdev.org |
fc81cd518d3585626eb7564962acc4f1ac8cb8b3 | 4a216ef92a3acca38e8705a67642f1bf2037b571 | /benAnadolu_con/makale/migrations/0005_auto_20210813_2255.py | 57daf3967650a65a2b7d519ab893b8b5cdbc4fd3 | [] | no_license | US3B3/Django-Books-Template | be2bfe53bfc01633a1e087d5852e76720905e406 | 82f4923174d36ffd3f34728c318f1e5ac74973da | refs/heads/main | 2023-08-15T04:57:06.742671 | 2021-09-19T02:58:17 | 2021-09-19T02:58:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # Generated by Django 3.2.3 on 2021-08-13 22:55
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('makale', '0004_auto_20210811_1600'),
]
operations = [
migrations.AddField(
model_name='kategori',
name='aciklama',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True),
),
migrations.AddField(
model_name='kategori',
name='resim',
field=models.ImageField(default='varsayilan.jpg', upload_to='kategori/%Y/%m/%d/'),
),
]
| [
"="
] | = |
8f54c3fa3599e855b9119cf3cb6e475466c83ce9 | 50d331aec35c1429e0d9b68822623ee9a45b251f | /IPTVPlayer/iptvdm/busyboxdownloader.py | cb2ed44c023f836678bf24c6d483ddaac26cacb0 | [] | no_license | openmb/iptvplayer | cd00c693adcac426214cc45d7ae5c97b9d7cbe91 | bbc3f5b6f445f83639cd1ebb5992dc737bc9023d | refs/heads/master | 2021-01-17T09:58:09.202306 | 2017-03-26T18:19:10 | 2017-03-26T18:19:10 | 83,997,131 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,272 | py | # -*- coding: utf-8 -*-
#
# IPTV download manager API
#
# $Id$
#
#
###################################################
# LOCAL import
###################################################
from Plugins.Extensions.IPTVPlayer.tools.iptvtools import printDBG, printExc, iptv_system, eConnectCallback, E2PrioFix
from Plugins.Extensions.IPTVPlayer.iptvdm.basedownloader import BaseDownloader
from Plugins.Extensions.IPTVPlayer.iptvdm.wgetdownloader import WgetDownloader
from Plugins.Extensions.IPTVPlayer.iptvdm.iptvdh import DMHelper
###################################################
###################################################
# FOREIGN import
###################################################
from Tools.BoundFunction import boundFunction
from enigma import eConsoleAppContainer
###################################################
###################################################
# One instance of this class can be used only for
# one download
###################################################
class BuxyboxWgetDownloader(WgetDownloader):
def __init__(self):
printDBG('BuxyboxWgetDownloader.__init__ ----------------------------------')
WgetDownloader.__init__(self)
self.iptv_sys = None
def __del__(self):
printDBG("BuxyboxWgetDownloader.__del__ ----------------------------------")
def getName(self):
return "busybox wget"
def isWorkingCorrectly(self, callBackFun):
self.iptv_sys = iptv_system( "wget 2>&1 ", boundFunction(self._checkWorkingCallBack, callBackFun) )
def _checkWorkingCallBack(self, callBackFun, code, data):
reason = ''
sts = True
if 'Usage: wget' not in data:
sts = False
reason = data
self.iptv_sys = None
callBackFun(sts, reason)
def start(self, url, filePath, params = {}, info_from=None, retries=0):
'''
Owervrite start from BaseDownloader
'''
self.url = url
self.filePath = filePath
self.downloaderParams = params
self.fileExtension = '' # should be implemented in future
self.outData = ''
self.contentType = 'unknown'
if None == info_from:
info_from = WgetDownloader.INFO.FROM_FILE
self.infoFrom = info_from
cmd = 'wget ' + '"' + self.url + '" -O "' + self.filePath + '" > /dev/null'
printDBG("Download cmd[%s]" % cmd)
self.console = eConsoleAppContainer()
self.console_appClosed_conn = eConnectCallback(self.console.appClosed, self._cmdFinished)
self.console.execute( E2PrioFix( cmd ) )
self.wgetStatus = self.WGET_STS.CONNECTING
self.status = DMHelper.STS.DOWNLOADING
self.onStart()
return BaseDownloader.CODE_OK
def _terminate(self):
printDBG("BuxyboxWgetDownloader._terminate")
if None != self.iptv_sys:
self.iptv_sys.kill()
self.iptv_sys = None
if DMHelper.STS.DOWNLOADING == self.status:
if self.console:
self.console.sendCtrlC() # kill # produce zombies
self._cmdFinished(-1, True)
return BaseDownloader.CODE_OK
return BaseDownloader.CODE_NOT_DOWNLOADING
def _cmdFinished(self, code, terminated=False):
printDBG("BuxyboxWgetDownloader._cmdFinished code[%r] terminated[%r]" % (code, terminated))
# break circular references
self.console_appClosed_conn = None
self.console = None
self.wgetStatus = self.WGET_STS.ENDED
# When finished updateStatistic based on file sie on disk
BaseDownloader.updateStatistic(self)
if terminated:
self.status = DMHelper.STS.INTERRUPTED
elif 0 >= self.localFileSize:
self.status = DMHelper.STS.ERROR
elif self.remoteFileSize > 0 and self.remoteFileSize > self.localFileSize:
self.status = DMHelper.STS.INTERRUPTED
else:
self.status = DMHelper.STS.DOWNLOADED
if not terminated:
self.onFinish()
| [
"samsamsam@o2.pl"
] | samsamsam@o2.pl |
ecaf3bcf4fede2cf5d43624547555f1737b0269c | a41e1498e3c080f47abd8e8e57157548df3ebbf1 | /pandas/tests/indexes/ranges/test_join.py | 682b5c8def9ff0e00b533610c1d45a093e7d7a8d | [
"BSD-3-Clause"
] | permissive | pandas-dev/pandas | e7e639454a298bebc272622e66faa9829ea393bb | c7325d7e7e77ecb4a4e57b48bc25265277c75712 | refs/heads/main | 2023-09-01T12:42:07.927176 | 2023-09-01T11:14:10 | 2023-09-01T11:14:10 | 858,127 | 36,166 | 18,728 | BSD-3-Clause | 2023-09-14T21:18:41 | 2010-08-24T01:37:33 | Python | UTF-8 | Python | false | false | 6,268 | py | import numpy as np
from pandas import (
Index,
RangeIndex,
)
import pandas._testing as tm
class TestJoin:
def test_join_outer(self):
# join with Index[int64]
index = RangeIndex(start=0, stop=20, step=2)
other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="outer", return_indexers=True)
noidx_res = index.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
eres = Index(
[0, 2, 4, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
)
elidx = np.array(
[0, 1, 2, 3, 4, 5, 6, 7, -1, 8, -1, 9, -1, -1, -1, -1, -1, -1, -1],
dtype=np.intp,
)
eridx = np.array(
[-1, -1, -1, -1, -1, -1, -1, -1, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
dtype=np.intp,
)
assert isinstance(res, Index) and res.dtype == np.dtype(np.int64)
assert not isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres, exact=True)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# join with RangeIndex
other = RangeIndex(25, 14, -1)
res, lidx, ridx = index.join(other, how="outer", return_indexers=True)
noidx_res = index.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
assert isinstance(res, Index) and res.dtype == np.int64
assert not isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
# Join with non-RangeIndex
index = RangeIndex(start=0, stop=20, step=2)
other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="inner", return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Index([16, 18])
elidx = np.array([8, 9], dtype=np.intp)
eridx = np.array([9, 7], dtype=np.intp)
assert isinstance(res, Index) and res.dtype == np.int64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# Join two RangeIndex
other = RangeIndex(25, 14, -1)
res, lidx, ridx = index.join(other, how="inner", return_indexers=True)
assert isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres, exact="equiv")
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
# Join with Index[int64]
index = RangeIndex(start=0, stop=20, step=2)
other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="left", return_indexers=True)
eres = index
eridx = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 9, 7], dtype=np.intp)
assert isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
# Join withRangeIndex
other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="left", return_indexers=True)
assert isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_right(self):
# Join with Index[int64]
index = RangeIndex(start=0, stop=20, step=2)
other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="right", return_indexers=True)
eres = other
elidx = np.array([-1, -1, -1, -1, -1, -1, -1, 9, -1, 8, -1], dtype=np.intp)
assert isinstance(other, Index) and other.dtype == np.int64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
assert ridx is None
# Join withRangeIndex
other = RangeIndex(25, 14, -1)
res, lidx, ridx = index.join(other, how="right", return_indexers=True)
eres = other
assert isinstance(other, RangeIndex)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
assert ridx is None
def test_join_non_int_index(self):
index = RangeIndex(start=0, stop=20, step=2)
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = index.join(other, how="outer")
outer2 = other.join(index, how="outer")
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18])
tm.assert_index_equal(outer, outer2)
tm.assert_index_equal(outer, expected)
inner = index.join(other, how="inner")
inner2 = other.join(index, how="inner")
expected = Index([6, 8, 10])
tm.assert_index_equal(inner, inner2)
tm.assert_index_equal(inner, expected)
left = index.join(other, how="left")
tm.assert_index_equal(left, index.astype(object))
left2 = other.join(index, how="left")
tm.assert_index_equal(left2, other)
right = index.join(other, how="right")
tm.assert_index_equal(right, other)
right2 = other.join(index, how="right")
tm.assert_index_equal(right2, index.astype(object))
def test_join_non_unique(self):
index = RangeIndex(start=0, stop=20, step=2)
other = Index([4, 4, 3, 3])
res, lidx, ridx = index.join(other, return_indexers=True)
eres = Index([0, 2, 4, 4, 6, 8, 10, 12, 14, 16, 18])
elidx = np.array([0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp)
eridx = np.array([-1, -1, 0, 1, -1, -1, -1, -1, -1, -1, -1], dtype=np.intp)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_self(self, join_type):
index = RangeIndex(start=0, stop=20, step=2)
joined = index.join(index, how=join_type)
assert index is joined
| [
"noreply@github.com"
] | pandas-dev.noreply@github.com |
3d9d812cba98b8dd9aa337aba826cd5d44d24e30 | 813a8e7cc7dcd8d9b07e2c0c45184507d6760d59 | /materials/carbon_steel.py | 9117b2fbfa7dff3a8a33da71627cd76c8c20a677 | [
"MIT"
] | permissive | jultou-raa/materials | de6780e7a2a4ccdccb66a5835631105546a16428 | b5df21545c9fe0f115d9683c5b253b982c35e1ad | refs/heads/master | 2021-08-24T00:50:11.970866 | 2017-12-07T09:52:33 | 2017-12-07T09:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | # -*- coding: utf-8 -*-
#
from .helpers import mu0
# [1] https://en.wikipedia.org/wiki/Carbon_steel
# [2]
# https://en.wikipedia.org/wiki/Permeability_(electromagnetism)#Values_for_some_common_materials
# [3] https://en.wikipedia.org/wiki/List_of_thermal_conductivities
# [4]
# https://en.wikipedia.org/wiki/Heat_capacity#Table_of_specific_heat_capacities
#
# [1]
magnetic_permeability = 100*mu0
density = 7.85e3
# [3]
thermal_conductivity = 50.0
# stainless steel @293K:
electrical_conductivity = 1.180e6
# [4]
specific_heat_capacity = 0.466e3
| [
"nico.schloemer@gmail.com"
] | nico.schloemer@gmail.com |
fbcd624cd08dbd69701cdefb2d86373655f136df | ae8254fdc04306e90df7e0359460e120498eabb5 | /src/Pipelines/TrackML_Example/LightningModules/GNN/Models/split_checkpoint_agnn.py | 33d51d08a7cdb427231670a4de5c2699a357cc7a | [] | no_license | vhewes/Tracking-ML-Exa.TrkX | 23a21578a5275b0fe112a30489e02f19e21c7bbe | b8e94a85fc7688acc649693c35069b8d2a8594e0 | refs/heads/master | 2023-03-07T07:12:24.250885 | 2021-02-19T01:09:33 | 2021-02-19T01:09:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,281 | py | import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from .checkpoint_agnn import CheckpointedResAGNN
class SplitCheckpointedResAGNN(CheckpointedResAGNN):
def __init__(self, hparams):
super().__init__(hparams)
print("Initialised")
def training_step(self, batch, batch_idx):
weight = (torch.tensor(self.hparams["weight"]) if ("weight" in self.hparams)
else torch.tensor((~batch.y_pid.bool()).sum() / batch.y_pid.sum()))
output = (self(torch.cat([batch.cell_data, batch.x], axis=-1),
batch.edge_index).squeeze()
if ('ci' in self.hparams["regime"])
else self(batch.x, batch.edge_index).squeeze())
if ('pid' in self.hparams["regime"]):
y_pid = (batch.pid[batch.edge_index[0, batch.nested_ind[0]]] == batch.pid[batch.edge_index[1, batch.nested_ind[0]]]).float()
loss = F.binary_cross_entropy_with_logits(output[batch.nested_ind[0]], y_pid.float(), pos_weight = weight)
else:
loss = F.binary_cross_entropy_with_logits(output[batch.nested_ind[0]], batch.y[batch.nested_ind[0]], pos_weight = weight)
result = pl.TrainResult(minimize=loss)
result.log('train_loss', loss, prog_bar=True)
return result
# def validation_step(self, batch, batch_idx):
# weight = (torch.tensor(self.hparams["weight"]) if ("weight" in self.hparams)
# else torch.tensor((~batch.y_pid.bool()).sum() / batch.y_pid.sum()))
# output = (self(torch.cat([batch.cell_data, batch.x], axis=-1), batch.edge_index).squeeze()
# if ('ci' in self.hparams["regime"])
# else self(batch.x, batch.edge_index).squeeze())
# if ('pid' in self.hparams["regime"]):
# y_pid = (batch.pid[batch.edge_index[0, batch.nested_ind[0]]] == batch.pid[batch.edge_index[1, batch.nested_ind[0]]]).float()
# val_loss = F.binary_cross_entropy_with_logits(output[batch.nested_ind[0]], y_pid.float(), pos_weight = weight)
# else:
# val_loss = F.binary_cross_entropy_with_logits(output[batch.nested_ind[0]], batch.y[batch.nested_ind[0]], pos_weight = weight)
# result = pl.EvalResult(checkpoint_on=val_loss)
# result.log('val_loss', val_loss)
# #Edge filter performance
# preds = F.sigmoid(output[batch.nested_ind[0]]) > self.hparams["edge_cut"] #Maybe send to CPU??
# edge_positive = preds.sum().float()
# if ('pid' in self.hparams["regime"]):
# y_pid = batch.pid[batch.edge_index[0, batch.nested_ind[0]]] == batch.pid[batch.edge_index[1, batch.nested_ind[0]]]
# edge_true = y_pid.sum().float()
# edge_true_positive = (y_pid & preds).sum().float()
# else:
# edge_true = batch.y[batch.nested_ind[0]].sum()
# edge_true_positive = (batch.y[batch.nested_ind[0]].bool() & preds).sum().float()
# result.log_dict({'eff': torch.tensor(edge_true_positive/edge_true), 'pur': torch.tensor(edge_true_positive/edge_positive)})
# return result | [
"murnanedaniel@hotmail.com"
] | murnanedaniel@hotmail.com |
bef6d6e70e9196ed235a9bfec8ddb7e7233f9915 | 83624401467510aaf8e69328b0d9915e1cf0c5ce | /frites/conn/__init__.py | 3ee1a6f48de9ff32e79460c19a8809375484802e | [
"BSD-3-Clause"
] | permissive | MatthieuGilson/frites | a1f5c128b1f6b59eb7c1ba2a7740dea8eaddeb30 | 0e1b99f396d5b54b69f7a5cf962679f4afc0e776 | refs/heads/master | 2023-03-09T17:15:51.841205 | 2021-02-27T11:11:22 | 2021-02-27T11:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | """Information-based connectivity metrics and utility functions.
This submodule contains two types of functions :
1. **Connectivity metrics :** methods to estimate either the undirected or
directed connectivity. Some methods are performed within-trials and others
across-trials. In the case of within-trials metrics, it is then possible to
estimate if the connectivity is modulated by the task by passing the
connectivity arrays to `frites.workflow.WfMi`
2. **Connectivity related utility functions :** small utility functions that
work on connectivity arrays
"""
# connectivity metrics
from .conn_covgc import conn_covgc # noqa
from .conn_dfc import conn_dfc # noqa
from .conn_transfer_entropy import conn_transfer_entropy # noqa
# connectivity utility functions
from .conn_sliding_windows import define_windows, plot_windows # noqa
from .conn_utils import (conn_get_pairs, conn_reshape_undirected, # noqa
conn_reshape_directed)
| [
"e.combrisson@gmail.com"
] | e.combrisson@gmail.com |
a85bbcba494dcb659ffa55fd6b7a8a7f14703fac | 4362dce3e985f307bb131f260ddad1f8ad9d36f8 | /Lecture_15/first_ml/main/forms.py | febdc6fc10ec866d4ad277f619854564e9d8f57e | [] | no_license | ishmankotia96/python-django-summer-18 | f890489f7b1805e19e78a4e11bb865306f0c17ee | b3f3d2569626b21111468cdc7e93ab4146cb18a3 | refs/heads/master | 2020-03-24T16:39:37.516434 | 2018-07-29T17:29:29 | 2018-07-29T17:29:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from django import forms
class MLForm(forms.Form):
input_string = forms.CharField(max_length=100) | [
"jatin.katyal13@gmail.com"
] | jatin.katyal13@gmail.com |
671dafe0c0d3faa4bd51fd3ab010fbe69d9911f0 | 19361e1df45f755c67da3ddc47b6e4ed2cfa079f | /faker/faker_data_generator.py | 6b82a6a0c68747bdb864015d715aed85f75a32ce | [] | no_license | WilliamQLiu/python-examples | 99fb538191196714ded91f9ac97dec107aa22440 | 731113038b6574e3ede72cc61921b0aee8e3bafa | refs/heads/master | 2023-04-27T02:19:22.626542 | 2022-10-02T16:44:41 | 2022-10-02T16:44:41 | 11,562,476 | 74 | 67 | null | 2023-04-15T13:36:17 | 2013-07-21T14:03:14 | Jupyter Notebook | UTF-8 | Python | false | false | 4,855 | py | """ Requires pip install fake-factory """
# pylint: disable=I0011,C0103,W0142,E1101,C0304
# http://docs.python.org/2/library/xml.etree.elementtree.html
# https://pypi.python.org/pypi/fake-factory
import xml.etree.ElementTree as ET
from faker import Factory
if __name__ == '__main__':
faker = Factory.create() # Create and instantiate a Faker generator
# Setup Element Tree
root = ET.Element("root") # root
calls = ET.SubElement(root, "Calls") # Calls
call = ET.SubElement(calls, "Call") # Call
reportversion = ET.SubElement(call, "ReportVersion")
calldateandtimestart = ET.SubElement(call, "CallDateAndTimeStart")
calldateandtimeend = ET.SubElement(call, "CallDateAndTimeEnd")
phoneworker = ET.SubElement(call, "PhoneWorker")
pfirstname = ET.SubElement(phoneworker, "FirstName") # Phone Work First Name
plastname = ET.SubElement(phoneworker, "LastName") # Phone Work Last Name
caller = ET.SubElement(call, "Caller")
callername = ET.SubElement(caller, "CallerName")
cfirstname = ET.SubElement(callername, "FirstName") # Caller First Name
cmiddlename = ET.SubElement(callername, "MiddleName") # Caller Middle Name
clastname = ET.SubElement(callername, "LastName") # Caller Last Name
callerlocation = ET.SubElement(caller, "CallerLocation")
ccountry = ET.SubElement(callerlocation, "Country")
cstateprovince = ET.SubElement(callerlocation, "StateProvince")
ccounty = ET.SubElement(callerlocation, "County")
ccity = ET.SubElement(callerlocation, "City")
cpostalcode = ET.SubElement(callerlocation, "PostalCode")
caddress = ET.SubElement(callerlocation, "Address")
callerphonenumber = ET.SubElement(caller, "CallerPhoneNumber")
callnotes = ET.SubElement(call, "CallNotes")
# Put in Fake values
call.set("ID", str(faker.random_number(digits=9)))
reportversion.set("ID", str(faker.random_number(digits=4)))
reportversion.text = str(faker.random_element(\
array=('H2H', 'DDH', 'OASAS')))
calldateandtimestart.set("TimeZone", str(faker.timezone()))
calldateandtimestart.text = str(faker.date_time_this_year())
calldateandtimeend.set("TimeZone", str(faker.timezone()))
calldateandtimeend.text = str(faker.date_time_this_year())
phoneworker.set("ID", str(faker.random_number(digits=5)))
pfirstname.text = str(faker.first_name()) # Phone Worker First Name
plastname.text = str(faker.last_name()) # Phone Worker Last Name
caller.set("ID", str(faker.random_number(digits=6)))
cfirstname.text = str(faker.first_name()) # Caller First Name
cmiddlename.text = str(faker.first_name()) # Caller Last Name
clastname.text = str(faker.last_name()) # Caller Last Name
ccountry.text = str(faker.country())
cstateprovince.text = str(faker.state_abbr())
ccounty.text = str(faker.city()) # Nothing for counties
cpostalcode.text = str(faker.postcode())
caddress.text = str(faker.street_address())
callerphonenumber.text = str(faker.phone_number())
callnotes.text = str(faker.paragraphs(nb=3))
# Write entire tree to xml
tree = ET.ElementTree(root)
tree.write("fakedata.xml")
"""
<?xml version="1.0" encoding="utf-8"?>
<root>
<Calls>
<Call ID="15784825">
<ReportVersion ID="333">H2H</ReportVersion>
<CallDateAndTimeStart TimeZone="UTC-8">2013-10-01 00:44</CallDateAndTimeStart>
<CallDateAndTimeEnd TimeZone="UTC-8">2013-10-01 01:27</CallDateAndTimeEnd>
<CallLength>43</CallLength>
<PhoneWorker ID="30591">
<FirstName>Susan</FirstName>
<LastName>Stevens</LastName>
</PhoneWorker>
<Caller ID="989898">
<CallerName>
<FirstName>Bob</FirstName>
<MiddleName>Scott</MiddleName>
<LastName>Jones></LastName>
</CallerName>
<CallerLocation>
<Country>US</Country>
<StateProvince>CA</StateProvince>
<County>Alameda</County>
<City>Oakland</City>
<PostalCode>94444</PostalCode>
<Address>133 Elm Street</Address>
</CallerLocation>
<CallerPhoneNumber>510-555-1212</CallerPhoneNumber>
</Caller>
<CallNotes>This is my note! My notes can be very long.</CallNotes>
<CustomFields>
<Field ID="1234" FieldName="Gender">
<Value ID="9876">Male</Value>
</Field>
<Field ID="1235" FieldName="Age Group">
<Value ID="9875">25-29</Value>
</Field>
<Field ID="1236" FieldName="Mental Status Assessment - Functional">
<Value ID="9874">Sleep disturbance</Value>
<Value ID="9873">Fatigued</Value>
<Value ID="9872">Depressed</Value>
</Field>
</CustomFields>
</Call>
</Calls>
</root>
""" | [
"William.Q.Liu@gmail.com"
] | William.Q.Liu@gmail.com |
6b619b3c483cf421f5d62ed91e0103f4bd31ada4 | 492cfeab952ad8533f3fc3ca7b4267ec31cb8d30 | /myapp/celery.py | 8d6bb15014cecf1fa9dc2ceb5d6ef6316c4db4ea | [] | no_license | ohahlev/flask-module | b0ebadd32cd1937dffddf3c9e056eccac140e3a7 | 1ee139a789dd22007adafb0c77cc4595ebcc4c7e | refs/heads/master | 2020-09-07T10:39:43.605884 | 2019-11-10T07:14:58 | 2019-11-10T07:14:58 | 220,753,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | # myapp/celery.py
from flask_appfactory.celery import celeryfactory
from .app import create_app
celery = celeryfactory(create_app())
| [
"ohahlev@gmail.com"
] | ohahlev@gmail.com |
a290594538be0325021a941a6df3b199cd21a016 | 1b19103c7781c31b4042e5404eea46fa90014a70 | /cenit_google_proximity_beacon_api_v1beta1/models/config.py | b223ed193d9049c93cdfe58ede68332321064fba | [] | no_license | andhit-r/odoo-integrations | c209797d57320f9e49271967297d3a199bc82ff5 | dee7edc4e9cdcc92e2a8a3e9c34fac94921d32c0 | refs/heads/8.0 | 2021-01-12T05:52:26.101701 | 2016-12-22T03:06:52 | 2016-12-22T03:06:52 | 77,223,257 | 0 | 1 | null | 2016-12-23T12:11:08 | 2016-12-23T12:11:08 | null | UTF-8 | Python | false | false | 2,546 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010, 2014 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import models, fields
_logger = logging.getLogger(__name__)
COLLECTION_NAME = "google_proximity_beacon_api_v1beta1"
COLLECTION_VERSION = "0.1"
COLLECTION_PARAMS = {
# WITHOUT COLLECTION_PARAMS.
}
class CenitIntegrationSettings(models.TransientModel):
_name = "cenit.google_proximity_beacon_api_v1beta1.settings"
_inherit = 'res.config.settings'
############################################################################
# Pull Parameters
############################################################################
# WITHOUT PULL PARAMETERS.
############################################################################
# Default Getters
############################################################################
# WITHOUT GETTERS.
############################################################################
# Default Setters
############################################################################
# WITHOUT SETTERS.
############################################################################
# Actions
############################################################################
def install(self, cr, uid, context=None):
installer = self.pool.get('cenit.collection.installer')
data = installer.get_collection_data(
cr, uid,
COLLECTION_NAME,
version = COLLECTION_VERSION,
context = context
)
installer.install_collection(cr, uid, {'name': COLLECTION_NAME})
| [
"sanchocuba@gmail.com"
] | sanchocuba@gmail.com |
8beb7d33ae0a84fb331b4b7dc956f2ce5f95bb1e | 6eb8fa32f3d2ccc2aa7196ed702d4cc35c66d597 | /Week_01/0001.py | 581f1e99a5ad39046d1f43428ca77e27ca3e39a1 | [] | no_license | mach8686devops/AlgorithmCHUNZHAO | a7490c684fc89504e9c2a633a18ea250262b6dcc | 497e833b3843ed5222d3b2fc96c00fbc4b6e8550 | refs/heads/main | 2023-03-25T03:13:45.313989 | 2021-03-12T12:38:24 | 2021-03-12T12:38:24 | 330,971,263 | 0 | 0 | null | 2021-01-19T12:23:49 | 2021-01-19T12:23:48 | null | UTF-8 | Python | false | false | 398 | py | # 两数之和
# 哈希表
class Solution:
def twoSum(self, nums, target):
mapping = {}
for i, item in enumerate(nums):
if (target - item) in mapping:
return [mapping[target - item], i]
mapping[item] = i
# 注意是否有一定能够找到的条件
# return [-1, -1]
s = Solution()
print(s.twoSum([2, 7, 11, 15], 9))
| [
"zhangjohn202@gmail.com"
] | zhangjohn202@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.