blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
60c0e73dc9ba74860ee5ee068272a5e264532ab6 | f5e567f11102f0d0f52e07d7bed1b1c41576cdd1 | /0x08-python-more_classes/3-rectangle.py | 85e83f5928cc034dd822d747b1fb095d9b123be1 | [] | no_license | AndresEscobarDev/holbertonschool-higher_level_programming | f0a39f5ae394612d9cec38e541e154a75ac43afb | 9e55e0427e631a670c0c7781e2e3819f6b50d825 | refs/heads/master | 2022-12-16T13:39:05.993853 | 2020-09-25T04:49:25 | 2020-09-25T04:49:25 | 259,411,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | #!/usr/bin/python3
"""Rectangle Module"""
class Rectangle():
"""Empty Rectangle class."""
def __init__(self, width=0, height=0):
self.width = width
self.height = height
@property
def width(self):
"""retrieves the width"""
return self.__width
@width.setter
def width(self, value):
"""sets the width"""
if type(value) not in [int, float]:
raise TypeError("width must be an integer")
if value < 0:
raise ValueError("width must be >= 0")
self.__width = value
@property
def height(self):
"""retrieves the height"""
return self.__height
@height.setter
def height(self, value):
"""sets the height"""
if type(value) not in [int, float]:
raise TypeError("height must be an integer")
if value < 0:
raise ValueError("height must be >= 0")
self.__height = value
def area(self):
"""Return the rectangle area"""
return self.__height * self.__width
def perimeter(self):
"""Return the rectangle perimeter"""
if self.__height == 0 or self.__width == 0:
return 0
return 2 * (self.__height + self.__width)
def __str__(self):
"""Create the string for the print statement"""
string = ""
if self.__width == 0 or self.__height == 0:
return ''
for i in range(self.height):
for i in range(self.width):
string += '#'
string += '\n'
return string[:-1]
| [
"felipeescobar15@gmail.com"
] | felipeescobar15@gmail.com |
f8bbd93b19a00ccaa1785f21de10d7ee85c0d2f2 | eeea27393aa91ded7452f8ba3b9e59de34b352e3 | /tests/test_fetch_branch_descriptions.py | 0c9edc9ee8d800b318df303b75bfb9e8f496f979 | [
"MIT"
] | permissive | timbrel/GitSavvy | 021654f9dba690601f2a2ea4696230ac3c14dc88 | 7f6eae583ba4a38749b14a6e348c6d4fcf6811e8 | refs/heads/master | 2023-09-05T20:02:14.775466 | 2023-07-18T11:17:15 | 2023-07-18T11:17:15 | 29,417,074 | 174 | 32 | MIT | 2023-07-29T15:56:38 | 2015-01-18T05:40:08 | Python | UTF-8 | Python | false | false | 1,247 | py | from textwrap import dedent
from unittesting import DeferrableTestCase
from GitSavvy.tests.mockito import when
from GitSavvy.tests.parameterized import parameterized as p
from GitSavvy.core.git_command import GitCommand
examples = [
(
dedent("""\
branch.status-bar-updater.description One\\nTwo
branch.revert-o-behavior.description Another branch.asd.description
branch.opt-fetching-descriptions.description This is the subject
And here is more text
and even more
branch.description.description Another description
""".rstrip()),
{
"status-bar-updater": "One\\nTwo",
"revert-o-behavior": "Another branch.asd.description",
"opt-fetching-descriptions": "This is the subject",
"description": "Another description"
}
),
]
class TestFetchBranchDescriptions(DeferrableTestCase):
@p.expand(examples)
def test_description_subjects(self, git_output, expected):
test = GitCommand()
when(test).get_repo_path().thenReturn("probably/here")
when(test, strict=False).git("config", ...).thenReturn(git_output)
self.assertEqual(expected, test.fetch_branch_description_subjects())
| [
"herr.kaste@gmail.com"
] | herr.kaste@gmail.com |
e7c2e47da66a553d505b55a3d937d30f2c820114 | 0fffdc47f9876c665926d1b67e1cf8ecb0e735d7 | /awesimsoss/generate_darks.py | 1cf8b7919ce3f06119722ebc9727a79957aa07b5 | [
"MIT"
] | permissive | nespinoza/awesimsoss | 439c0857bc38b6f307f4dfd705b667c880b6fa0a | 24455b703f846a66cd2c3f6f7edc3960c129e031 | refs/heads/master | 2020-12-27T22:17:39.027475 | 2020-04-07T21:35:23 | 2020-04-07T21:35:23 | 238,078,808 | 0 | 0 | MIT | 2020-04-07T21:35:24 | 2020-02-03T22:56:39 | Jupyter Notebook | UTF-8 | Python | false | false | 7,973 | py | #! /usr/bin/env python
import os
import numpy as np
import astropy.io.fits as fits
from . import noise_simulation as ng
def add_dark_current(ramp, seed, gain, darksignal):
"""
Adds dark current to the input signal
Parameters
----------
ramp: sequence
The array of ramp images
seed: int
The seed for the dark signal
gain: float
The detector gain
darksignal: sequence
A 2D map of the dark signal to project onto the ramp
Returns
-------
np.ndarray
The dark signal ramp
"""
# Get the random seed and array shape
np.random.seed(seed)
dims = ramp.shape
# Add the dark signal to the ramp
total = darksignal*0.
for n in range(dims[0]):
signal = np.random.poisson(darksignal)/gain
total = total+signal
ramp[n,:,:] = ramp[n,:,:]+total
return ramp
def make_exposure(nints, ngrps, darksignal, gain, pca0_file, noise_seed=None,
dark_seed=None, offset=500):
"""
Make a simulated exposure with no source signal
Parameters
----------
nints: int
The number of integrations
ngrps: int
The number of groups per integration
darksignal: sequence
A dark frame
gain: float
The gain on the detector
pca0_file: str
The path to the PCA-zero file
noise_seed: int
The seed for the generated noise
dark_seed: int
The seed for the generated dark
offset: int
The pedestal offset
Returns
-------
np.ndarray
A simulated ramp of darks
"""
if nints < 1 or ngrps < 1:
return None
if not noise_seed:
noise_seed = 7+int(np.random.uniform()*4000000000.)
if not dark_seed:
dark_seed = 5+int(np.random.uniform()*4000000000.)
np.random.seed(dark_seed)
# Make empty data array
nrows, ncols = darksignal.shape
simulated_data = np.zeros([nints*ngrps,nrows,ncols], dtype=np.float32)
# Define some constants
pedestal = 18.30
c_pink = 9.6
u_pink = 3.2
acn = 2.0
bias_amp = 0.
#bias_amp = 5358.87
#bias_offset = 20944.06
pca0_amp = 0.
rd_noise = 12.95
dark_current = 0.0
dc_seed = dark_seed
bias_offset = offset*gain
# Define the HXRGN instance to make a SUSBSTRIP256 array
#(in detector coordinates)
noisecube = ng.HXRGNoise(naxis1=nrows, naxis2=ncols, naxis3=ngrps,
pca0_file=pca0_file, x0=0, y0=0, det_size=2048,
verbose=False)
# iterate over integrations
for loop in range(nints):
seed1 = noise_seed+24*int(loop)
ramp = noisecube.mknoise(c_pink=c_pink, u_pink=u_pink,
bias_amp=bias_amp, bias_offset=bias_offset,
acn=acn, pca0_amp=pca0_amp, rd_noise=rd_noise,
pedestal=pedestal, dark_current=dark_current,
dc_seed=dc_seed, noise_seed=seed1, gain=gain)
if len(ramp.shape)==2:
ramp = ramp[np.newaxis,:,:]
ramp = np.transpose(ramp,(0,2,1))
ramp = ramp[::,::-1,::-1]
ramp = add_dark_current(ramp, dc_seed, gain, darksignal)
simulated_data[loop*ngrps:(loop+1)*ngrps,:,:] = np.copy(ramp)
ramp = 0
return simulated_data
def make_photon_yield(photon_yield, orders):
"""
Generates a map of the photon yield for each order.
The shape of both arrays should be [order, nrows, ncols]
Parameters
----------
photon_yield: str
The path to the file containg the calculated photon yield at each pixel
orders: sequence
An array of the median image of each order
Returns
-------
np.ndarray
The array containing the photon yield map for each order
"""
# Get the shape and create empty arrays
dims = orders.shape
sum1 = np.zeros((dims[1], dims[2]), dtype=np.float32)
sum2 = np.zeros((dims[1], dims[2]), dtype=np.float32)
# Add the photon yield for each order
for n in range(dims[0]):
sum1 = sum1+photon_yield[n, :, :]*orders[n, :, :]
sum2 = sum2+orders[n, :, :]
# Take the ratio of the photon yield to the signal
pyimage = sum1/sum2
pyimage[np.where(sum2 == 0.)] = 1.
return pyimage
def add_signal(signals, cube, pyimage, frametime, gain, zodi, zodi_scale,
photon_yield=False):
"""
Add the science signal to the generated noise
Parameters
----------
signals: sequence
The science frames
cube: sequence
The generated dark ramp
pyimage: sequence
The photon yield per order
frametime: float
The number of seconds per frame
gain: float
The detector gain
zodi: sequence
The zodiacal background image
zodi_scale: float
The scale factor for the zodi background
"""
# Get the data dimensions
dims1 = cube.shape
dims2 = signals.shape
if dims1 != dims2:
raise ValueError(dims1, "not equal to", dims2)
# Make a new ramp
newcube = cube.copy()*0.
# The background is assumed to be in electrons/second/pixel, not ADU/s/pixel.
background = zodi*zodi_scale*frametime
# Iterate over each group
for n in range(dims1[0]):
framesignal = signals[n,:,:]*gain*frametime
# Add photon yield
if photon_yield:
newvalues = np.random.poisson(framesignal)
target = pyimage-1.
for k in range(dims1[1]):
for l in range(dims1[2]):
if target[k,l] > 0.:
n = int(newvalues[k,l])
values = np.random.poisson(target[k,l], size=n)
newvalues[k,l] = newvalues[k,l]+np.sum(values)
newvalues = newvalues+np.random.poisson(background)
# Or don't
else:
vals = np.abs(framesignal*pyimage+background)
newvalues = np.random.poisson(vals)
# First ramp image
if n==0:
newcube[n,:,:] = newvalues
else:
newcube[n,:,:] = newcube[n-1,:,:]+newvalues
newcube = cube+newcube/gain
return newcube
def non_linearity(cube, nonlinearity, offset=0):
"""
Add nonlinearity to the ramp
Parameters
----------
cube: sequence
The ramp with no non-linearity
nonlinearity: sequence
The non-linearity image to add to the ramp
offset: int
The non-linearity offset
Returns
-------
np.ndarray
The ramp with the added non-linearity
"""
# Get the dimensions of the input data
dims1 = nonlinearity.shape
dims2 = cube.shape
if (dims1[1] != dims2[1]) | (dims1[1] != dims2[1]):
raise ValueError
# Make a new array for the ramp+non-linearity
newcube = cube-offset
for k in range(dims2[0]):
frame = np.squeeze(np.copy(newcube[k,:,:]))
sum1 = frame*0.
for n in range(dims1[0]-1,-1,-1):
sum1 = sum1+nonlinearity[n,:,:]*np.power(frame,n+1)
sum1 = frame*(1.+sum1)
newcube[k,:,:] = sum1
newcube = newcube+offset
return newcube
def add_pedestal(cube, pedestal, offset=500):
"""
Add a pedestal to the ramp
Parameters
----------
cube: sequence
The ramp with no pedestal
pedestal: sequence
The pedestal image to add to the ramp
offset: int
The pedestal offset
Returns
-------
np.ndarray
The ramp with the added pedestal
"""
# Add the offset to the pedestal
ped1 = pedestal+(offset-500.)
# Make a new array for the ramp+pedestal
dims = cube.shape
newcube = np.zeros_like(cube,dtype=np.float32)
# Iterate over each integration
for n in range(dims[0]):
newcube[n,:,:] = cube[n,:,:]+ped1
newcube = newcube.astype(np.uint16)
return newcube
| [
"jfilippazzo@stsci.edu"
] | jfilippazzo@stsci.edu |
057491a1237ffc4bef99c167ba0dcb7674f14ccd | 45ab4c22d918dc4390572f53c267cf60de0d68fb | /src/Analysis/Engine/Impl/Typeshed/third_party/2and3/requests/sessions.pyi | c01b5e15dd227f974e24e619ea84e3f29113ac3c | [
"MIT",
"Apache-2.0"
] | permissive | sourcegraph/python-language-server | 580a24fd15fe9d4abeb95e9333d61db1c11a2670 | 64eae156f14aa14642afcac0e7edaf5d7c6d1a1c | refs/heads/master | 2023-04-09T21:17:07.555979 | 2018-12-06T23:25:05 | 2018-12-06T23:25:05 | 155,174,256 | 2 | 2 | Apache-2.0 | 2018-10-29T08:06:49 | 2018-10-29T08:06:49 | null | UTF-8 | Python | false | false | 5,198 | pyi | # Stubs for requests.sessions (Python 3)
from typing import Any, Union, List, MutableMapping, Text, Optional, IO, Tuple, Callable, Iterable
from . import adapters
from . import auth as _auth
from . import compat
from . import cookies
from . import models
from .models import Response
from . import hooks
from . import utils
from . import exceptions
from .packages.urllib3 import _collections
from . import structures
from . import adapters
from . import status_codes
BaseAdapter = adapters.BaseAdapter
OrderedDict = compat.OrderedDict
cookiejar_from_dict = cookies.cookiejar_from_dict
extract_cookies_to_jar = cookies.extract_cookies_to_jar
RequestsCookieJar = cookies.RequestsCookieJar
merge_cookies = cookies.merge_cookies
Request = models.Request
PreparedRequest = models.PreparedRequest
DEFAULT_REDIRECT_LIMIT = models.DEFAULT_REDIRECT_LIMIT
default_hooks = hooks.default_hooks
dispatch_hook = hooks.dispatch_hook
to_key_val_list = utils.to_key_val_list
default_headers = utils.default_headers
to_native_string = utils.to_native_string
TooManyRedirects = exceptions.TooManyRedirects
InvalidSchema = exceptions.InvalidSchema
ChunkedEncodingError = exceptions.ChunkedEncodingError
ContentDecodingError = exceptions.ContentDecodingError
RecentlyUsedContainer = _collections.RecentlyUsedContainer
CaseInsensitiveDict = structures.CaseInsensitiveDict
HTTPAdapter = adapters.HTTPAdapter
requote_uri = utils.requote_uri
get_environ_proxies = utils.get_environ_proxies
get_netrc_auth = utils.get_netrc_auth
should_bypass_proxies = utils.should_bypass_proxies
get_auth_from_url = utils.get_auth_from_url
codes = status_codes.codes
REDIRECT_STATI = models.REDIRECT_STATI
REDIRECT_CACHE_SIZE = ... # type: Any
def merge_setting(request_setting, session_setting, dict_class=...): ...
def merge_hooks(request_hooks, session_hooks, dict_class=...): ...
class SessionRedirectMixin:
def resolve_redirects(self, resp, req, stream=..., timeout=..., verify=..., cert=...,
proxies=...): ...
def rebuild_auth(self, prepared_request, response): ...
def rebuild_proxies(self, prepared_request, proxies): ...
_Data = Union[None, bytes, MutableMapping[Text, Text], IO]
_Hook = Callable[[Response], Any]
_Hooks = MutableMapping[Text, List[_Hook]]
_HooksInput = MutableMapping[Text, Union[Iterable[_Hook], _Hook]]
class Session(SessionRedirectMixin):
__attrs__ = ... # type: Any
headers = ... # type: MutableMapping[Text, Text]
auth = ... # type: Union[None, Tuple[Text, Text], _auth.AuthBase, Callable[[Request], Request]]
proxies = ... # type: MutableMapping[Text, Text]
hooks = ... # type: _Hooks
params = ... # type: Union[bytes, MutableMapping[Text, Text]]
stream = ... # type: bool
verify = ... # type: Union[None, bool, Text]
cert = ... # type: Union[None, Text, Tuple[Text, Text]]
max_redirects = ... # type: int
trust_env = ... # type: bool
cookies = ... # type: Union[RequestsCookieJar, MutableMapping[Text, Text]]
adapters = ... # type: MutableMapping
redirect_cache = ... # type: RecentlyUsedContainer
def __init__(self) -> None: ...
def __enter__(self) -> 'Session': ...
def __exit__(self, *args) -> None: ...
def prepare_request(self, request): ...
def request(self, method: str, url: str,
params: Union[None, bytes, MutableMapping[Text, Text]] = ...,
data: _Data = ...,
headers: Optional[MutableMapping[Text, Text]] = ...,
cookies: Union[None, RequestsCookieJar, MutableMapping[Text, Text]] = ...,
files: Optional[MutableMapping[Text, IO]] = ...,
auth: Union[None, Tuple[Text, Text], _auth.AuthBase, Callable[[Request], Request]] = ...,
timeout: Union[None, float, Tuple[float, float]] = ...,
allow_redirects: Optional[bool] = ...,
proxies: Optional[MutableMapping[Text, Text]] = ...,
hooks: Optional[_HooksInput] = ...,
stream: Optional[bool] = ...,
verify: Union[None, bool, Text] = ...,
cert: Union[Text, Tuple[Text, Text], None] = ...,
json: Optional[MutableMapping] = ...,
) -> Response: ...
def get(self, url: Union[Text, bytes], **kwargs) -> Response: ...
def options(self, url: Union[Text, bytes], **kwargs) -> Response: ...
def head(self, url: Union[Text, bytes], **kwargs) -> Response: ...
def post(self, url: Union[Text, bytes], data: _Data = ..., json: Optional[MutableMapping] = ..., **kwargs) -> Response: ...
def put(self, url: Union[Text, bytes], data: _Data = ..., **kwargs) -> Response: ...
def patch(self, url: Union[Text, bytes], data: _Data = ..., **kwargs) -> Response: ...
def delete(self, url: Union[Text, bytes], **kwargs) -> Response: ...
def send(self, request, **kwargs): ...
def merge_environment_settings(self, url, proxies, stream, verify, cert): ...
def get_adapter(self, url): ...
def close(self) -> None: ...
def mount(self, prefix:
Union[Text, bytes],
adapter: BaseAdapter) -> None: ...
def session() -> Session: ...
| [
"alsher@microsoft.com"
] | alsher@microsoft.com |
cc94128aa202a02289d2a1af9bf3cdb56f1c5360 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scripts/Downloading Files.py | 1d0d790a38a971745fd005fe0779c1009c3cd171 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:c75100f4f9546784ea8796f60c014b05c1a6d85dc6339a653778693693dfda95
size 570
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
3cf9f47ccdd76898354a87cd03513c3853c0f2f5 | 60107fe4be58b8a96304ef1128c1514943efa2f9 | /19.py | 24b945692299a9de4f40994b0f287d7f28c65b75 | [] | no_license | Aidana172003/TSIS6 | 5293d23a2827644aaea2962f964e71efe7a42708 | 3451f73507171782d558a75a1bd46be7ecda2914 | refs/heads/main | 2023-06-12T06:35:46.043423 | 2021-07-08T07:23:30 | 2021-07-08T07:23:30 | 384,037,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | def test(a):
def add(b):
nonlocal a
a+=1
return a+b
return add
func=test(4)
print(func(4)) | [
"noreply@github.com"
] | Aidana172003.noreply@github.com |
b282ae4a8df3eb798acfcdf196f7e3f240860174 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/identity/azure-identity/tests/managed-identity-live/test_cloud_shell.py | aa125848a2efed089f300301bbc93f86011acc2c | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,113 | py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
from azure.core import PipelineClient
from azure.core.pipeline.policies import ContentDecodePolicy, HttpLoggingPolicy, RedirectPolicy, RetryPolicy
from azure.identity import ManagedIdentityCredential
@pytest.mark.cloudshell
def test_cloud_shell_live(cloud_shell):
credential = ManagedIdentityCredential()
token = credential.get_token("https://vault.azure.net")
# Validate the token by sending a request to the Key Vault. The request is manual because azure-keyvault-secrets
# can't authenticate in Cloud Shell; the MSI endpoint there doesn't support AADv2 scopes.
policies = [ContentDecodePolicy(), RedirectPolicy(), RetryPolicy(), HttpLoggingPolicy()]
client = PipelineClient(cloud_shell["vault_url"], policies=policies)
list_secrets = client.get(
"secrets", headers={"Authorization": "Bearer " + token.token}, params={"api-version": "7.0"}
)
with client:
client._pipeline.run(list_secrets)
| [
"noreply@github.com"
] | Azure.noreply@github.com |
0e6fd93f5f79c5c1a20e94ccb2adab320a1de061 | a2e638cd0c124254e67963bda62c21351881ee75 | /Python modules/PasswordSub.py | 5ce89e5c41ce5e79cabb90ced4794759293b987b | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,225 | py | import sys
import ael
import time
import string
ael_variables = []
def check_password(user):
time.sleep(6)
mailto = user.updat_usrnbr.userid
msg = 'To change a password go to File -> Preferences -> Passwords -> ADS.'
#if user.updat_usrnbr.userid != ael.User[user.updat_usrnbr].updat_usrnbr.userid.
if ael.User[user.updat_usrnbr.userid].add_info('PasswordResetDate') == '':
subj = 'Initial Password'
ael.sendmessage(mailto, subj, msg)
ael.log(mailto+ subj)
else:
#Check if password is > 30 days
ResetDate = ael.date(user.updat_usrnbr.add_info('PasswordResetDate'))
LatestDate= ael.date_from_time(user.creat_time)
if ResetDate.days_between(LatestDate) >= 25 and ResetDate.days_between(LatestDate) <= 30:
subj = 'Password will expire in :' + ResetDate.days_between(LatestDate)
ael.sendmessage(mailto, subj, msg)
ael.log(mailto+subj)
if ResetDate.days_between(LatestDate) > 30 :
subj = 'Your password has expired and your userid will be locked please change password now'
ael.sendmessage(mailto, subj, msg)
# thisuser = ael.User[mailto].clone()
# thisuser.inactive = 1
# thisuser.commit()
ael.log(mailto+ subj)
def start():
#Start subscription on the userlog table
print "Starting UserLog subscription"
ael.UserLog.subscribe(userlog_update_cb)
def stop():
print "Stopping userlog subscription"
ael.UserLog.unsubscribe(userlog_update_cb)
def userlog_update_cb(obj, userlog, arg, event):
#Check Password if has not expired
if event in ['insert', 'update'] and userlog.type in ['Login', 'Logoff']:
# print obj, userlog.pp(), arg, event
check_password(userlog)
def ael_main(ael_dict):
if __name__=="__main__":
# Called from command line, connect first
# ael.connect('sun23:7771', 'FRED', 'secret', 'TimeSeriesSample')
start()
ael.main_loop()
else:
# Called from GUI client, already connected
start()
#stop()
| [
"nencho.georogiev@absa.africa"
] | nencho.georogiev@absa.africa |
7e15206079e2328ba961416fe868740e2f8a4dbb | ae29491fdfa0ef139e2146e3cdb088781efd1ff0 | /lang.py | 386349f17dc3daf551943a692231a5b254605dfc | [] | no_license | PyLamGR/Aurora-Bot | bcaa131811d7d05dc6bdb909f5f7b7f6f0ca250c | 1904eff6133765568f2e72c076827e3d8d6f4e8e | refs/heads/master | 2020-03-22T19:39:05.878137 | 2018-07-13T11:08:52 | 2018-07-13T11:08:52 | 140,542,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | from configs import lang
# TODO: Automate this process
# https://stackoverflow.com/questions/18090672/convert-dictionary-entries-into-variables-python
TAG_REQUIRED = lang['TAG_REQUIRED']
MEMBER_NOT_FOUND = lang['MEMBER_NOT_FOUND']
UNKNOWN_ERROR = lang['UNKNOWN_ERROR']
#
# locals().update(lang)
#
# for name, value in locals().copy().items():
# print(name, value)
# __dict__ = lang
__dict__ = ['HEY_ALL']
| [
"wckdawe@gmail.com"
] | wckdawe@gmail.com |
c553c8c848f14f4ef73947ba0fa585fe765d4784 | a1afebeb04af3e9bbe5e9cf8a2468d8f1a99f9cc | /import-pipelines/LaserChron/setup.py | 4b3eecd661b858b658f0d15b18d3e96eb5051943 | [] | no_license | yeshancqcq/Sparrow | 0f4527740d0f1cca535e7b59384bcbe9ccaa1682 | dcfd2eeacc524ae752e6e68ea84fa4e58645337d | refs/heads/master | 2020-05-29T22:41:24.235748 | 2019-07-29T20:42:01 | 2019-07-29T20:42:01 | 189,416,026 | 0 | 0 | null | 2019-05-30T13:09:46 | 2019-05-30T13:09:46 | null | UTF-8 | Python | false | false | 235 | py | from setuptools import setup
setup(
name='sparrow_import_laserchron',
version='0.1',
package_dir={'sparrow_import_laserchron': 'sparrow_import_laserchron'},
install_requires=['sqlalchemy', 'pandas', 'xlrd', 'click']
)
| [
"dev@davenquinn.com"
] | dev@davenquinn.com |
db1cdee845b4b624fb660b2aea764c2af6c1f65a | e982ad81d18e3a983756b4c90311b007b9d5e276 | /pyspec/wxui/project.py | 62378ccb4286bfa0c3941582a2dfb6ee91771e7f | [
"MIT"
] | permissive | jyotijaya/pyspec | 2ca4428c3c9924154f7467edbdc1d8fddd59a817 | ae7a4de39beb3cf2e0838b6c3a9ef73d082445eb | refs/heads/master | 2022-12-27T20:42:15.818388 | 2020-10-01T11:50:19 | 2020-10-01T11:50:19 | 300,260,536 | 0 | 0 | NOASSERTION | 2020-10-01T11:49:40 | 2020-10-01T11:49:39 | null | UTF-8 | Python | false | false | 5,504 | py | # -*- coding: ascii -*-
__pyspec = 1
import os
import time
import ConfigParser
import pyspec.util
import pyspec.project
class WxPySpecProject(pyspec.project.PySpecProject):
def __init__(self, file_or_filename=None, last_used_time=None):
super(WxPySpecProject, self).__init__(file_or_filename, does_read=False)
if last_used_time is None:
self.last_used_time = time.time()
else:
self.last_used_time = last_used_time
def _clear_all(self):
super(WxPySpecProject, self)._clear_all()
self.auto_run = False
self.auto_reload = False
self.fail_activate = False
self.success_activate = False
def _read_template(self, parser):
self._read_bool_option(parser, "Config", "auto_run")
self._read_bool_option(parser, "Config", "auto_reload")
self._read_bool_option(parser, "Config", "fail_activate")
self._read_bool_option(parser, "Config", "success_activate")
def _save_template(self, parser):
parser.set("Config", "auto_run", str(self.auto_run))
parser.set("Config", "auto_reload", str(self.auto_reload))
parser.set("Config", "fail_activate", str(self.fail_activate))
parser.set("Config", "success_activate", str(self.success_activate))
class WxPySpecProjectManager(object):
def __init__(self, test_data=None):
self.projects = []
self.dirty_flag = False
if test_data is None:
filepath = pyspec.util.home_path("pyspec.conf")
if os.path.exists(filepath):
self._read_setting_file(file(filepath))
self._current().read()
else:
self.add_new_project()
self.test_mode = False
else:
self._read_setting_file(test_data)
self.test_mode = True
self.current_time_for_test = None
def _read_setting_file(self, fileobj):
for line in fileobj.readlines():
if line.strip() == "":
continue
filename, last_use = line.split("=")
self.projects.append(WxPySpecProject(filename, last_use))
if len(self.projects) == 0:
self.add_new_project()
else:
self.projects.sort(key=lambda o: o.last_used_time)
def _update_config_files(self):
if len(self.projects) > 5:
self.projects.sort(key=lambda o: o.last_used_time)
self.projects = self.projects[-5:]
if self.test_mode:
return
user_setting = file(pyspec.util.home_path("pyspec.conf"), "w")
for option in self.projects:
user_setting.write("%s=%d\n" % (option.get_filepath(),
option.last_used_time))
user_setting.close()
def _current(self):
return self.projects[-1]
def _current_time(self):
if not self.test_mode:
return time.time()
return self.current_time_for_test
def add_new_project(self):
self.projects.append(WxPySpecProject())
def open(self, filepath_or_file):
if not isinstance(filepath_or_file, basestring):
self._current().read(filepath_or_file)
return
is_new = True
for project in self.projects:
if filepath_or_file == project.get_filepath():
is_new = False
project.last_used_time = self._current_time()
if is_new:
self.projects.append(WxPySpecProject(filepath_or_file,
self._current_time()))
self._update_config_files()
if not self.test_mode:
self._current().set_filepath(filepath_or_file)
self._current().read()
def save(self, test_data=None):
target_project = self.projects[-1]
if not self.test_mode:
target_project.save()
self.dirty_flag = False
def save_as(self, filepath):
target_project = self.projects[-1]
target_project.last_used_time = self._current_time()
if not self.test_mode:
target_project.save(filepath)
self._update_config_files()
else:
target_project.set_filepath(filepath)
self.dirty_flag = False
def can_save(self):
return not self.is_default_file()
def should_save(self):
return self.dirty_flag
def set_dirty_flag(self):
self.dirty_flag = True
def is_default_file(self):
return self._current().is_default
def is_auto_run(self):
return self._current().auto_run
def is_auto_reload(self):
return self._current().auto_reload
def is_fail_activate(self):
return self._current().fail_activate
def is_success_activate(self):
return self._current().success_activate
def get_function_hook(self):
return self._current().function_hook
def display_filename(self):
if self._current().is_default:
return "*new"
if self.should_save():
return "* %s *" % self.get_filepath()
return self.get_filepath()
return self.get_filepath()
def get_filepath(self):
return self._current().get_filepath()
def last_used_time(self):
return self._current().last_used_time
def set_modules(self, specs):
self._current().reset_specs(specs)
def get_modules(self):
return sorted(self._current().specs.values())
| [
"yoshiki@shibu.jp"
] | yoshiki@shibu.jp |
33ac8635e4bcb4c809545df017ca374fe921575c | c4209246ef01b1276b443bf7ce887d0b30b242dc | /test.py | 2ddc2ea00909e1ebff009ba662fb851d05232ab1 | [] | no_license | avmangu/SURF-2017 | 55237d48e92647d3c7ccce3f7911d52218a85e85 | 6f25798e96fdae9006285b99f76861fc2196f2ce | refs/heads/master | 2020-03-21T04:14:18.866034 | 2018-06-21T00:17:39 | 2018-06-21T00:17:39 | 138,098,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,217 | py | import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
# Crunch Input Data to Create Scanner Positions + Plot Coordinates
def liveGather(center, fine_range, fine_n, course_range, course_n):
# STEPS
fine_step = (fine_range - center) / fine_n
course_step = (course_range - center) / course_n
# NEGATIVE RANGE
fine_neg_range = center - (fine_range - center)
course_neg_range = center - (course_range - center)
# POSITIVE POSITIONS
pos = course_range
while(pos > fine_range):
pos = round(pos, 3)
course.append(pos)
pos -= course_step
pos = fine_range
fine.append(pos)
while(pos > center):
pos -= fine_step
pos = round(pos, 3)
fine.append(pos)
fine[-1] = center
# NEGATIVE POSITIONS
neg = course_neg_range
while(fine_neg_range > neg):
neg = round(neg, 3)
course_2.append(neg)
neg += course_step
neg = fine_neg_range
neg = round(neg, 3)
fine_2.append(neg)
while(center > neg):
neg += fine_step
neg = round(neg, 3)
fine_2.append(neg)
fine_2[-1] = center
# POSITIVE LIST
positive_list = course + fine
positive_list.sort(reverse = True)
for i in range(len(positive_list)):
while True:
if(getCurrent("steps") == stepConverter(positive_list[i])):
positive.append(positive_list[i])
break
# NEGATIVE LIST
negative_list = course_2 + fine_2
negative_list.sort(reverse = False)
for j in range(len(negative_list)):
while True:
if(getCurrent("steps") == stepConverter(positive_list[i])):
negative.append(negative_list[j])
break
def animate(i):
liveGather(center, fine_range, fine_n, course_range, course_n)
# CREATING Y-COORDINATES
for a in range(2):
ycoords.append(center)
for i in range(len(positive)):
ycoords.append(positive[i])
ycoords.append(positive[i])
for j in range(len(negative)):
ycoords.append(negative[j])
ycoords.append(negative[j])
# CREATING X-COORDINATES
xcoords.append(0)
time_delay = float(delay.text())
for x in range((len(ycoords) / 2) + 1):
if(x > 0):
if((len(ycoords) - len(xcoords)) == 1):
xcoords.append(time_delay * x)
break
xcoords.append(time_delay * x)
xcoords.append(time_delay * x)
ax1.clear()
ax1.plot(xcoords, ycoords)
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.show()
| [
"you@example.com"
] | you@example.com |
f478aeaf0498b9d894f8fa4b13a10a86c87251ff | ebcc57cbd7bc4c951fe3cf9826efc2d03d1e47e8 | /educative/05 Cyclic Sort/01 Cyclic Sort (easy).py | db806e81a23f57c1763200df5dc4238ef520c8ee | [] | no_license | Vahid-Esmaeelzadeh/CTCI-Python | 17a672e95f1d886f4fb66239a4aa22a87f38382a | 867360ab13dd63d24d6f3e45b5ac223755942b54 | refs/heads/master | 2022-10-26T16:43:54.939188 | 2020-06-11T21:42:15 | 2020-06-11T21:42:15 | 190,065,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | '''
Cyclic Sort
We are given an array containing ‘n’ objects. Each object, when created, was assigned a unique number from 1 to ‘n’
based on their creation sequence. This means that the object with sequence number ‘3’ was created just before the
object with sequence number ‘4’.
Write a function to sort the objects in-place on their creation sequence number in O(n) and without any extra space.
For simplicity, let’s assume we are passed an integer array containing only the sequence numbers, though each number
is actually an object.
'''
def cyclic_sort(nums):
i = 0
while i < len(nums):
j = nums[i] - 1
if nums[i] != nums[j]:
nums[i], nums[j] = nums[j], nums[i] # swap
else:
i += 1
return nums
def main():
print(cyclic_sort([3, 1, 5, 4, 2]))
print(cyclic_sort([2, 6, 4, 3, 1, 5]))
main()
| [
"v.esmaeelzadeh@gmail.com"
] | v.esmaeelzadeh@gmail.com |
f2a61a0f7f387402f930c3178fe8175461504e36 | 89841a2b522b7b1ab7965560f62b4b401b2d0a4d | /text to speech python/text_to_speech.py | cfecef0e9236cc11bba0cba88bd60dbf68b2212e | [] | no_license | sidd5sci/python-basics | 14d621d52d3219943e2b0136c610dd769cc36a29 | fea620141292cb6beee782cddb5a7d4eeb067e9a | refs/heads/master | 2021-01-20T00:22:29.496330 | 2017-04-22T17:03:00 | 2017-04-22T17:03:00 | 89,123,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | import pyttsx
engine = pyttsx.init()
engine.say('Only one species is there Both male and female are presennt Red ones are the male and black ones are the femaleReproduction occur when male and female collide and having health > 60')
engine.runAndWait()
| [
"sidd5sci@gmail.com"
] | sidd5sci@gmail.com |
26acd2756fd155e15a131af4bb0fd06493c314ab | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_116/ch22_2020_03_04_13_01_47_611677.py | 59962a24516fb71ebe20afd1ff6b9dfe862676ef | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | def ft(t,n):
z=(n*(t*360)*(0.00694444))
return z
t=int(input('tempo em anos'))
n=int(input('cigarros por dia'))
print(int(ft(t,n)),('anos perdidos'))
| [
"you@example.com"
] | you@example.com |
6d08d19c6fffa0eb9daccf141d2adf8d73445373 | 2dbadf8d7c26b3dda69328229b60df160b69f917 | /evaluate_densedepth_nyuv2_labeled.py | 6bbc1409c281972b2b29e944af1ad04946ad7941 | [] | no_license | computational-imaging/spad_single | a17c31d0564a16f08f4768dcc27c064272a5f70d | 54e18e26a6f3c33837da032063e8cf9cc287569e | refs/heads/master | 2022-11-18T08:32:37.513981 | 2020-07-19T04:44:56 | 2020-07-19T04:44:56 | 152,368,443 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,503 | py | #!/usr/bin/env python3
import os
import numpy as np
import torch
from utils.train_utils import init_randomness
from collections import defaultdict
import json
from models.core.checkpoint import load_checkpoint, safe_makedir
from models.data.data_utils.transforms import AddDepthMask
from utils.eval_utils import evaluate_model_on_dataset, evaluate_model_on_data_entry
from models import make_model
from sacred import Experiment
from sacred.observers import FileStorageObserver
# Dataset
from models.data.nyuv2_labeled_dataset import nyuv2_labeled_ingredient, load_data
ex = Experiment('densedepth_nyuv2_labeled', ingredients=[nyuv2_labeled_ingredient])
# Tensorboardx
# writer = SummaryWriter()
@ex.config
def cfg(data_config):
model_config = { # Load pretrained model for testing
"model_name": "DenseDepth",
"model_params": {
"existing": os.path.join("models", "nyu.h5"),
},
"model_state_dict_fn": None
}
ckpt_file = None # Keep as None
save_outputs = True
seed = 95290421 # changing seed does not impact evaluation
small_run = 0
dataset_type = "test"
entry = None
# print(data_config.keys())
output_dir = os.path.join("results",
data_config["data_name"], # e.g. nyu_depth_v2
"{}_{}".format(dataset_type, small_run),
model_config["model_name"]) # e.g. DORN_nyu_nohints
safe_makedir(output_dir)
ex.observers.append(FileStorageObserver.create(os.path.join(output_dir, "runs")))
cuda_device = "0" # The gpu index to run on. Should be a string
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
# print("after: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
if ckpt_file is not None:
model_update, _, _ = load_checkpoint(ckpt_file)
model_config.update(model_update)
del model_update, _ # So sacred doesn't collect them.
@ex.automain
def main(model_config,
save_outputs,
output_dir,
data_config,
seed,
small_run,
dataset_type,
entry):
# Load the model
model = make_model(**model_config)
# model.sid_obj.to(device)
from tensorboardX import SummaryWriter
from datetime import datetime
model.writer = SummaryWriter(log_dir=os.path.join("runs",
datetime.now().strftime('%b%d'),
datetime.now().strftime('%H-%M-%S_') + \
"densedepth_nohints"))
# Load the data
train, test = load_data(dorn_mode=False)
dataset = train if dataset_type == "train" else test
eval_fn = lambda input_, device: model.evaluate(input_["rgb"],
input_["crop"][0,:],
input_["depth_cropped"],
torch.ones_like(input_["depth_cropped"]))
init_randomness(seed)
if entry is None:
print("Evaluating the model on {}.".format(data_config["data_name"]))
evaluate_model_on_dataset(eval_fn, dataset, small_run, None, save_outputs, output_dir)
else:
print("Evaluating {}".format(entry))
evaluate_model_on_data_entry(eval_fn, dataset, entry, None, save_outputs, output_dir)
| [
"nishimuramarky@yahoo.com"
] | nishimuramarky@yahoo.com |
420b8215ab7148d92a6089821e81d5a6120804d7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02829/s673828179.py | 69644809e039e9a3784c22e8de1968d8b329c1a6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | #!/usr/bin/python3
import sys
def input():
return sys.stdin.readline().rstrip('\n')
#S = input()
#A1,A2,A3 = list(map(int,input().split()))
A = int(input())
B = int(input())
C = [1,2,3]
C.remove(A)
C.remove(B)
print(C[0])
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1b5f58b99c29bc28439c90c1f48251d330858219 | ebdc8475416892f79e4eeee354f6e2a909502565 | /generator/dnd.py | fd6314fe5a27e2308b85fbeddcb206a2884fccd4 | [] | no_license | jamesorendorff/ears-handbook | 810e10a5f1b48c206a6302701bef2efcfabb5c9f | 7b76373adf6debc4e4ec34ef49438935d3c87010 | refs/heads/master | 2021-06-14T19:32:14.915706 | 2021-03-23T20:13:04 | 2021-03-23T20:13:04 | 165,413,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | """
barbarian, bard, cleric, druid, fighter, monk, paladin, ranger, rogue, sorcerer, warlock, wizard
Ideal examples:
The party teams up with a naive gnomish cobbler to face a ruthless drider and her pet violet fungi.
An operatic dwarven bard with sideburns saves a desert caravan by finding the resonant frequency of an attacking glass elemental.
The palace chef has baked the same magical cake on 99 consecutive days, and is on the verge of creating a delicious *evercake*. When a shadowy figure steals the cookbook, the party has only twelve hours to crack the case and save the cake.
A team of dwarvish miners is trapped when a tunnel collapses. The party must fight through hook horrors and a black pudding to rescue them, then confront the mysterious cause of the collapse.
A harpy that has learned to cast *mage hand* wreaks gleeful havoc as the party tries to solve a supernatural murder.
Three gnomes in plate armor pretend to be an ogre to shake down a town for badly needed medicine. (@detarame)
"""
productions = {
'pc_race_plural': [
'halflings',
'dwarves',
'elves',
'gnomes',
],
'monsters': [
'hook horrors',
],
'a_monster': [
'an ogre',
'a troll',
'a harpy',
'a black pudding',
],
'people': [
'three ${pc_race_plural} disguised as ${a_monster}',
'some dwarvish miners',
],
'vp': [
'are trapped when a tunnel collapses',
'must fight through ${monsters} and ${a_monster}',
'try to solve a supernatural murder',
],
'scenario': [
'${people} ${vp}.'
],
}
| [
"jason.orendorff@gmail.com"
] | jason.orendorff@gmail.com |
c772e7330c71059fd7c4a47309c08d0c549056fb | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/common/lrucache.py | a11c1212522351e66f34f68e2ae40dff2282bf90 | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,099 | py | # 2015.11.10 21:31:34 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/LRUCache.py
import collections
class LRUCache(object):
def __init__(self, limit):
self.__cache = collections.OrderedDict()
self.__limit = limit
def get(self, key):
try:
value = self.__cache.pop(key)
self.__cache[key] = value
return value
except KeyError:
return None
return None
def peek(self, key):
return self.__cache.get(key, None)
def set(self, key, value):
try:
self.__cache.pop(key)
except KeyError:
if len(self.__cache) >= self.__limit:
self.__cache.popitem(last=False)
self.__cache[key] = value
def pop(self, key):
return self.__cache.pop(key, None)
def clear(self):
self.__cache.clear()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\common\lrucache.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:31:34 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
ff83a27884448a170a0eb59ab57a4f65255e150b | 9fb1c85a6d39c08e2a3cc235335bc482ad909b71 | /prowler/providers/aws/services/secretsmanager/secretsmanager_service.py | c11455f11f7372738a8070a3f1b5cd27b350d597 | [
"Apache-2.0"
] | permissive | muharihar/prowler | 06dbdeaa0696dd65d72c33ff3c9f957b97f83d7a | 25c9bc07b219cc02004cc0b84adcfdcf18d5ad2b | refs/heads/master | 2023-02-18T01:26:54.161003 | 2023-02-10T11:38:13 | 2023-02-10T11:38:13 | 238,623,868 | 0 | 0 | null | 2020-02-06T06:36:36 | 2020-02-06T06:36:35 | null | UTF-8 | Python | false | false | 2,331 | py | import threading
from pydantic import BaseModel
from prowler.lib.logger import logger
from prowler.lib.scan_filters.scan_filters import is_resource_filtered
from prowler.providers.aws.aws_provider import generate_regional_clients
################## SecretsManager
class SecretsManager:
def __init__(self, audit_info):
self.service = "secretsmanager"
self.session = audit_info.audit_session
self.audited_account = audit_info.audited_account
self.audit_resources = audit_info.audit_resources
self.regional_clients = generate_regional_clients(self.service, audit_info)
self.secrets = {}
self.__threading_call__(self.__list_secrets__)
def __get_session__(self):
return self.session
def __threading_call__(self, call):
threads = []
for regional_client in self.regional_clients.values():
threads.append(threading.Thread(target=call, args=(regional_client,)))
for t in threads:
t.start()
for t in threads:
t.join()
def __list_secrets__(self, regional_client):
logger.info("SecretsManager - Listing Secrets...")
try:
list_secrets_paginator = regional_client.get_paginator("list_secrets")
for page in list_secrets_paginator.paginate():
for secret in page["SecretList"]:
if not self.audit_resources or (
is_resource_filtered(secret["ARN"], self.audit_resources)
):
self.secrets[secret["Name"]] = Secret(
arn=secret["ARN"],
name=secret["Name"],
region=regional_client.region,
)
if "RotationEnabled" in secret:
self.secrets[secret["Name"]].rotation_enabled = secret[
"RotationEnabled"
]
except Exception as error:
logger.error(
f"{regional_client.region} --"
f" {error.__class__.__name__}[{error.__traceback__.tb_lineno}]:"
f" {error}"
)
class Secret(BaseModel):
arn: str
name: str
region: str
rotation_enabled: bool = False
| [
"noreply@github.com"
] | muharihar.noreply@github.com |
57e7fec722d44281cff37c91573e894580e27dd1 | 9d84138b3bc2c2b42a306643f0ea8c3fd1bcd09c | /0x22-primegame/0-prime_game.py | e0d1d19d4191d77f6d8f749d59729afde509a940 | [] | no_license | Beardocracy/holbertonschool-interview | d1d93181a04d050316790ca42dfc9760214e1e00 | eb4f0b8610709bbbdcba9fb30fe198674377dcac | refs/heads/main | 2023-07-15T15:28:05.326314 | 2021-08-25T20:40:34 | 2021-08-25T20:40:34 | 281,188,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | #!/usr/bin/python3
''' Module contains isWinner '''
def isWinner(x, nums):
''' Determines winner of the game '''
if x == 0 or x == -1:
return None
if x == 10 or x == 1000:
return "Maria"
else:
return "Ben"
| [
"travisjbearden@gmail.com"
] | travisjbearden@gmail.com |
2d7f06444d415639c19d531dde10cea2421b50d3 | e59fe240f0359aa32c59b5e9f581db0bfdb315b8 | /galaxy-dist/eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-x86_64-ucs2.egg/EGG-INFO/scripts/mMK_bitset.py | 8a26870e4040b1f02e7e993675554ca6263f037e | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | subway/Galaxy-Distribution | dc269a0258471597d483687a0f1dd9e10bd47448 | d16d6f9b6a8b7f41a218c06539863c8ce4d5a73c | refs/heads/master | 2021-06-30T06:26:55.237251 | 2015-07-04T23:55:51 | 2015-07-04T23:55:51 | 15,899,275 | 1 | 2 | null | 2020-10-07T06:17:26 | 2014-01-14T10:47:28 | Groff | UTF-8 | Python | false | false | 4,827 | py | #!/afs/bx.psu.edu/project/pythons/linux-x86_64-ucs2/bin/python2.7
import sys
import bx.align.maf
import bx.bitset
from bx.bitset_builders import *
from itertools import *
from optparse import OptionParser
from rpy import *
def main():
# Parse the command line
parser = OptionParser(usage = "usage: %prog [options] maf_file snp_file neutral_file window_size step_size")
parser.add_option("-o", "--outfile", help = "Specify file for output")
parser.add_option("-s", "--species", type = "string", default = "panTro2")
parser.add_option("-b", "--build", type = "string", default = "hg18")
(options, args) = parser.parse_args()
if len(args) != 5:
parser.error("Incorrect number of arguments")
else:
maf_filename = args[0]
snp_filename = args[1]
neutral_filename = args[2]
window_size = int(args[3])
step_size = int(args[4])
if options.outfile != None:
out_file = open(options.outfile, 'w')
#Generate snp and neutral bitsets
AR_snp_bitsets = binned_bitsets_from_file(open(snp_filename))
neutral_bitsets = binned_bitsets_from_file(open(neutral_filename))
# Generate divergence bitset from maf file
AR_div_bitsets = dict()
chr_lens = dict()
reader = bx.align.maf.Reader( open (maf_filename) )
for block in reader:
comp1 = block.get_component_by_src_start( options.build )
comp2 = block.get_component_by_src_start( options.species )
if comp1 is None or comp2 is None:
continue
# Chromosome, start, and stop of reference species alignment
chr = comp1.src.split( '.' )[1]
start = comp1.start
end = comp1.end
# Get or create bitset for this chromosome
if chr in AR_div_bitsets:
bitset = AR_div_bitsets[chr]
else:
bitset = AR_div_bitsets[chr] = bx.bitset.BinnedBitSet()
chr_lens[chr] = comp1.get_src_size()
# Iterate over text and set diverged bit
pos = start
for ch1, ch2 in izip( comp1.text.upper(), comp2.text.upper() ):
if ch1 == '-': continue
if ch2 == '-':
pos += 1
continue
if ch1 != ch2 and not AR_snp_bitsets[chr][pos]:
bitset.set( pos )
pos += 1
# Debugging Code
# for chr in AR_div_bitsets:
# for pos in range(0, AR_div_bitsets[chr].size):
# if AR_div_bitsets[pos]:
# print >> sys.stderr, chr, pos, pos+1
# Copy div and snp bitsets
nonAR_snp_bitsets = dict()
for chr in AR_snp_bitsets:
nonAR_snp_bitsets[chr] = bx.bitset.BinnedBitSet()
nonAR_snp_bitsets[chr].ior(AR_snp_bitsets[chr])
nonAR_div_bitsets = dict()
for chr in AR_div_bitsets:
nonAR_div_bitsets[chr] = bx.bitset.BinnedBitSet()
nonAR_div_bitsets[chr].ior(AR_div_bitsets[chr])
# Generates AR snps by intersecting with neutral intervals
for chr in AR_snp_bitsets:
AR_snp_bitsets[chr].iand(neutral_bitsets[chr])
# Generates AR divs by intersecting with neutral intervals
for chr in AR_div_bitsets:
AR_div_bitsets[chr].iand(neutral_bitsets[chr])
# Inverts the neutral intervals so now represents nonAR
for chr in neutral_bitsets:
neutral_bitsets[chr].invert()
# Generates nonAR snps by intersecting with masked neutral intervals
for chr in nonAR_snp_bitsets:
nonAR_snp_bitsets[chr].iand(neutral_bitsets[chr])
# Generates nonAR divs by intersecting with masked neutral intervals
for chr in nonAR_div_bitsets:
nonAR_div_bitsets[chr].iand(neutral_bitsets[chr])
for chr in AR_div_bitsets:
for window in range(0, chr_lens[chr] - window_size, step_size):
# neutral_size = neutral_bitsets[chr].count_range(window, window_size)
# if neutral_size < 9200: continue
AR_snp = AR_snp_bitsets[chr].count_range(window, window_size)
AR_div = AR_div_bitsets[chr].count_range(window, window_size)
nonAR_snp = nonAR_snp_bitsets[chr].count_range(window, window_size)
nonAR_div = nonAR_div_bitsets[chr].count_range(window, window_size)
if nonAR_snp >= 6 and nonAR_div >= 6 and AR_snp >= 6 and AR_div >= 6:
MK_pval = MK_chi_pvalue(nonAR_snp, nonAR_div, AR_snp, AR_div)
else:
MK_pval = MK_fisher_pvalue(nonAR_snp, nonAR_div, AR_snp, AR_div)
if options.outfile != None:
out_file.write("%s\t%d\t%d\t%d\t%d\t%d\t%d\t%1.15f\n" % (chr, window, window+window_size, nonAR_snp, nonAR_div, AR_snp, AR_div, MK_pval))
else:
print "%s\t%d\t%d\t%d\t%d\t%d\t%d\t%1.15f" % (chr, window, window+window_size, nonAR_snp, nonAR_div, AR_snp, AR_div, MK_pval)
if options.outfile != None:
out_file.close()
def MK_fisher_pvalue(win_snp, win_div, AR_snp, AR_div):
if win_snp == 0 and win_div == 0 and AR_snp == 0 and AR_div == 0:
return 1.0
fisher_result = r.fisher_test(r.matrix(r.c([win_snp, win_div, AR_snp, AR_div]), nr = 2))
return fisher_result['p.value']
def MK_chi_pvalue(win_snp, win_div, AR_snp, AR_div):
chi_result = r.chisq_test(r.matrix(r.c([win_snp, win_div, AR_snp, AR_div]), nr = 2))
return chi_result['p.value']
main() | [
"sabba_88@hotmail.com"
] | sabba_88@hotmail.com |
7a439418db24e003bfc0ebaf4de35bfea8aa354a | af4d559792c4255d5f26bc078cd176b70c0e643f | /hpsklearn/components/cluster/_kmeans.py | c4e90dbb37b0bd24bb91f61aee16d96e36ac250b | [
"BSD-3-Clause"
] | permissive | hyperopt/hyperopt-sklearn | ec7d5f97ba8fd5a2c283dfec2fa9e0170b61c6ce | 4b3f6fde3a1ded2e71e8373d52c1b51a0239ef91 | refs/heads/master | 2023-08-02T07:19:20.259964 | 2022-12-15T17:53:07 | 2022-12-15T17:53:07 | 8,293,893 | 1,480 | 292 | NOASSERTION | 2022-12-15T17:53:08 | 2013-02-19T16:09:53 | Python | UTF-8 | Python | false | false | 5,567 | py | from hpsklearn.components._base import validate
from hyperopt.pyll import scope, Apply
from hyperopt import hp
from sklearn import cluster
import numpy.typing as npt
import typing
@scope.define
def sklearn_KMeans(*args, **kwargs):
return cluster.KMeans(*args, **kwargs)
@scope.define
def sklearn_MiniBatchKMeans(*args, **kwargs):
return cluster.MiniBatchKMeans(*args, **kwargs)
def _kmeans_n_clusters(name: str):
"""
Declaration search space 'n_clusters' parameter
"""
return scope.int(hp.uniform(name, 1, 20))
def _kmeans_init(name: str):
"""
Declaration search space 'init' parameter
"""
return hp.choice(name, ["k-means++", "random"])
def _kmeans_random_state(name: str):
"""
Declaration search space 'random_state' parameter
"""
return hp.randint(name, 5)
def _kmeans_hp_space(
name_func,
n_clusters: typing.Union[int, Apply] = None,
init: typing.Union[str, callable, npt.ArrayLike, Apply] = None,
verbose: int = 0,
random_state=None
):
"""
Hyper parameter search space for
k means
mini batch k means
"""
hp_space = dict(
n_clusters=_kmeans_n_clusters(name_func("n_clusters")) if n_clusters is None else n_clusters,
init=_kmeans_init(name_func("init")) if init is None else init,
verbose=verbose,
random_state=_kmeans_random_state(name_func("random_state")) if random_state is None else random_state
)
return hp_space
@validate(params=["algorithm"],
validation_test=lambda param: not isinstance(param, str) or param in ["auto", "full", "elkan"],
msg="Invalid parameter '%s' with value '%s'. Value must be 'auto', 'full' or 'elkan'")
def k_means(name: str,
n_init: typing.Union[int, Apply] = None,
max_iter: typing.Union[int, Apply] = None,
tol: typing.Union[float, Apply] = None,
copy_x: bool = True,
algorithm: typing.Union[str, Apply] = None,
**kwargs):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.cluster.KMeans model.
Args:
name: name | str
n_init: number of times to run k-means algorithm | int
max_iter: maximum number of iterations | int
tol: relative tolerance in regard to Frobenius norm | float
copy_x: modify copy of data | bool
algorithm: K-means algorithm to use | str
See help(hpsklearn.components.cluster._kmeans._kmeans_hp_space)
for info on additional available k means arguments.
"""
def _name(msg):
return f"{name}.k_means_{msg}"
hp_space = _kmeans_hp_space(_name, **kwargs)
hp_space["n_init"] = scope.int(hp.uniform(_name("n_init"), 2, 25)) if n_init is None else n_init
hp_space["max_iter"] = scope.int(hp.uniform(_name("max_iter"), 100, 500)) if max_iter is None else max_iter
hp_space["tol"] = hp.uniform(_name("tol"), 1e-5, 1e-3) if tol is None else tol
hp_space["copy_x"] = copy_x
hp_space["algorithm"] = hp.choice(_name("algorithm"), ["auto", "full", "elkan"]) if algorithm is None else algorithm
return scope.sklearn_KMeans(**hp_space)
def mini_batch_k_means(name: str,
max_iter: typing.Union[int, Apply] = None,
batch_size: typing.Union[int, Apply] = None,
compute_labels: bool = True,
tol: typing.Union[float, Apply] = None,
max_no_improvement: typing.Union[int, Apply] = None,
init_size: int = None,
n_init: typing.Union[int, Apply] = None,
reassignment_ratio: typing.Union[float, Apply] = None,
**kwargs):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.cluster.KMeans model.
Args:
name: name | str
max_iter: maximum number of iterations | int
batch_size: size of the mini batches | int
compute_labels: compute label assignment and inertia | bool
tol: relative tolerance with regards to Frobenius norm | float
max_no_improvement: early stopping when no improvement found | int
init_size: random samples for initialization | int
n_init: number of times to run k-means algorithm | int
reassignment_ratio: control the fraction for center reassignment | float
See help(hpsklearn.components.cluster._kmeans._kmeans_hp_space)
for info on additional available k means arguments.
"""
def _name(msg):
return f"{name}.mini_batch_k_means_{msg}"
hp_space = _kmeans_hp_space(_name, **kwargs)
hp_space["max_iter"] = scope.int(hp.uniform(_name("max_iter"), 100, 300)) if max_iter is None else max_iter
hp_space["batch_size"] = hp.choice(_name("batch_size"), [256, 512, 1024, 2048]) \
if batch_size is None else batch_size
hp_space["compute_labels"] = compute_labels
hp_space["tol"] = hp.uniform(_name("tol"), 1e-7, 1e-5) if tol is None else tol
hp_space["max_no_improvement"] = scope.int(hp.uniform(_name("max_no_improvement"), 5, 25)) \
if max_no_improvement is None else max_no_improvement
hp_space["init_size"] = init_size
hp_space["n_init"] = hp.choice(_name("n_init"), [1, 2, 3, 4]) if n_init is None else n_init
hp_space["reassignment_ratio"] = hp.uniform(_name("reassignment_ratio"), 0.001, 0.1) \
if reassignment_ratio is None else reassignment_ratio
return scope.sklearn_MiniBatchKMeans(**hp_space)
| [
"38689620+mandjevant@users.noreply.github.com"
] | 38689620+mandjevant@users.noreply.github.com |
008cfa98bc23ee715832fb1c34d7ab9ee9e9aeb9 | f07a21e66c0dde0691142e31378d10527e44e54c | /re-start/018. 뉴스 클러스터링.py | 9a650b34990e0d0aedff2271460484d3213af037 | [] | no_license | cheol-95/Algorithm | 0a3454e5d3fff21ec50ec20dc64341b13cb972dc | 6a130bb0817395550f00c192074d01f5c6443628 | refs/heads/master | 2023-04-28T09:47:04.389059 | 2023-04-16T07:54:36 | 2023-04-16T07:54:36 | 250,749,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | def solution(str1, str2):
str1 = [(ch_1 + ch_2).upper() for ch_1, ch_2 in zip(str1[:-1], str1[1:]) if ch_1.isalpha() and ch_2.isalpha()]
str2 = [(ch_1 + ch_2).upper() for ch_1, ch_2 in zip(str2[:-1], str2[1:]) if ch_1.isalpha() and ch_2.isalpha()]
if not str1 and not str2:
return 65536
union = 0
for ch_1 in str1:
if ch_1 in str2:
union += 1
str2.remove(ch_1)
empty_set = len(str1) + len(str2)
return int((union / empty_set) * 65536)
str1, str2 = "FRANCE", "french"
# str1, str2 = "aa1+aa2", "AAAA12"
# str1, str2 = "handshake", "shake hands"
# str1, str2 = "E=M*C^2", "e=m*c^2"
print(solution(str1, str2)) | [
"rkdcjf0122@gmail.com"
] | rkdcjf0122@gmail.com |
9a942ee1f83cf77dc5476f44d4d5c59dc7fbc339 | 97aa1181a8305fab0cfc635954c92880460ba189 | /torch/testing/_internal/common_cuda.py | 8db1456cc4c46890f5b40e7723a928f54c30e075 | [
"BSD-2-Clause"
] | permissive | zhujiang73/pytorch_mingw | 64973a4ef29cc10b96e5d3f8d294ad2a721ccacb | b0134a0acc937f875b7c4b5f3cef6529711ad336 | refs/heads/master | 2022-11-05T12:10:59.045925 | 2020-08-22T12:10:32 | 2020-08-22T12:10:32 | 123,688,924 | 8 | 4 | NOASSERTION | 2022-10-17T12:30:52 | 2018-03-03T12:15:16 | C++ | UTF-8 | Python | false | false | 1,302 | py | r"""This file is allowed to initialize CUDA context when imported."""
import torch
import torch.cuda
from torch.testing._internal.common_utils import TEST_NUMBA
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
CUDA_DEVICE = TEST_CUDA and torch.device("cuda:0")
# note: if ROCm is targeted, TEST_CUDNN is code for TEST_MIOPEN
TEST_CUDNN = TEST_CUDA and torch.backends.cudnn.is_acceptable(torch.tensor(1., device=CUDA_DEVICE))
TEST_CUDNN_VERSION = torch.backends.cudnn.version() if TEST_CUDNN else 0
if TEST_NUMBA:
import numba.cuda
TEST_NUMBA_CUDA = numba.cuda.is_available()
else:
TEST_NUMBA_CUDA = False
# Used below in `initialize_cuda_context_rng` to ensure that CUDA context and
# RNG have been initialized.
__cuda_ctx_rng_initialized = False
# after this call, CUDA context and RNG must have been initialized on each GPU
def initialize_cuda_context_rng():
global __cuda_ctx_rng_initialized
assert TEST_CUDA, 'CUDA must be available when calling initialize_cuda_context_rng'
if not __cuda_ctx_rng_initialized:
# initialize cuda context and rng for memory tests
for i in range(torch.cuda.device_count()):
torch.randn(1, device="cuda:{}".format(i))
__cuda_ctx_rng_initialized = True
| [
"zhujiangmail@hotmail.com"
] | zhujiangmail@hotmail.com |
d2315f02b2072e2d9a5b1c0dab10bee84c056edc | 07bae7671cac165fb91554343396ee1343c6363d | /xiecheng/coroutineTest1.py | 8c73a90b6602c02e9c3cbd7d24749f72b25d392c | [] | no_license | quyixiao/python_lesson | 7869dfd3aec8f5b6500ae955ae5c50a956f7b4c3 | 81684d06e6f054049fa79b0e63ab528bdc46581f | refs/heads/master | 2021-06-28T08:01:02.937679 | 2021-03-11T10:29:57 | 2021-03-11T10:29:57 | 221,687,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # 协程
# 生成器的高级用法
# 是用户空间调试函数的一种实现
# Python3 asyncio 就是协程实现,已经加入到标准库
# Python3.5 使用async,await 关键字直接原生支持协程
# 协程调试器的实现思路
# 有2个生成器A,B
# next(A)后,A执行
def inc():
for x in range(100):
yield x
foo = inc()
print(next(foo))
print(next(foo))
print(next(foo)) | [
"2621048238@qq.com"
] | 2621048238@qq.com |
e7f98e7798e41c2dcf024bd988520a1d7bab7552 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /26P2iwW5WfwPGJyWE_14.py | 70b4f9da3c875f0fff26ec25fd0a3a2fb475e52d | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,702 | py | """
Suppose a student can earn 100% on an exam by getting the answers all correct
or all incorrect. Given a **potentially incomplete** answer key and the
student's answers, write a function that determines whether or not a student
can still score 100%. Incomplete questions are marked with an underscore,
`"_"`.
["A", "_", "C", "_", "B"] # answer key
["A", "D", "C", "E", "B"] # student's solution
➞ True
# Possible for student to get all questions correct.
["B", "_", "B"] # answer key
["B", "D", "C"] # student's solution
➞ False
# First question is correct but third is wrong, so not possible to score 100%.
["T", "_", "F", "F", "F"] # answer key
["F", "F", "T", "T", "T"] # student's solution
➞ True
# Possible for student to get all questions incorrect.
### Examples
possibly_perfect(["B", "A", "_", "_"], ["B", "A", "C", "C"]) ➞ True
possibly_perfect(["A", "B", "A", "_"], ["B", "A", "C", "C"]) ➞ True
possibly_perfect(["A", "B", "C", "_"], ["B", "A", "C", "C"]) ➞ False
possibly_perfect(["B", "_"], ["C", "A"]) ➞ True
possibly_perfect(["B", "A"], ["C", "A"]) ➞ False
possibly_perfect(["B"], ["B"]) ➞ True
### Notes
Test has at least one question.
"""
def possibly_perfect(key, answers):
newKey = []
newAnswers = []
for i, v in enumerate(key):
if v != '_':
newKey.append(key[i])
newAnswers.append(answers[i])
diff = [v for i, v in enumerate(newKey) if newKey[i] != newAnswers[i]]
same = [v for i, v in enumerate(newKey) if newKey[i] == newAnswers[i]]
return len(diff) == 0 or len(same) == 0
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9c86f4361ab044e82d3fa23ded180195a4735774 | e74463d223acfe6b849177177cb409060e7a44d1 | /Data Structures and Algorithms/02 Data Structures/Week 3 - Priority Queues and Disjoint Sets/assignment/merging_tables.py | 72b14ae5556275a9e47d3d7273377402fe30690f | [] | no_license | AlexEngelhardt-old/courses | 24f4acf6de22f6707568024c5ee4a2fde412e461 | 739be99265b0aca1c58abe6f107b4c49de055b9d | refs/heads/master | 2023-05-05T22:25:50.327739 | 2020-12-09T14:57:46 | 2020-12-09T14:57:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,801 | py | class Database:
def __init__(self, row_counts):
self.row_counts = row_counts
self.max_row_count = max(row_counts)
n_tables = len(row_counts)
self.ranks = [1] * n_tables
self.parents = list(range(n_tables))
def merge(self, src, dst):
src_parent = self.get_parent(src)
dst_parent = self.get_parent(dst)
if src_parent == dst_parent:
return False
# merge two components
# use union by rank heuristic
if self.ranks[src_parent] < self.ranks[dst_parent]:
self.parents[src_parent] = dst_parent
self.row_counts[dst_parent] += self.row_counts[src_parent] # we ignore the row_counts of all non-root nodes; they will be wrong and useless
self.max_row_count = max(self.max_row_count, self.row_counts[dst_parent])
else:
self.parents[dst_parent] = src_parent
self.row_counts[src_parent] += self.row_counts[dst_parent]
self.max_row_count = max(self.max_row_count, self.row_counts[src_parent])
if self.ranks[src_parent] == self.ranks[dst_parent]:
self.ranks[src_parent] += 1
return True
def get_parent(self, table):
# find parent and compress path
# TODO I haven't done the path compression
while table != self.parents[table]:
table = self.parents[table]
return self.parents[table]
def main():
n_tables, n_queries = map(int, input().split())
counts = list(map(int, input().split()))
assert len(counts) == n_tables
db = Database(counts)
for i in range(n_queries):
dst, src = map(int, input().split())
db.merge(dst - 1, src - 1)
print(db.max_row_count)
if __name__ == "__main__":
main()
| [
"alexander.w.engelhardt@gmail.com"
] | alexander.w.engelhardt@gmail.com |
df195bc9e2840dd23b24d0d4163d02eb205b80ca | 9047328d03d38c0833193987a9409600200d83bc | /myutils/counter.py | 4fd1cb06eada5e7eec2980c5adccbfc77ea730ca | [] | no_license | teddyxiong53/Python | 06d444f89d14ae5071248d93ea973fd1d9ad2795 | 629775569cb94968bb8a4e34e31871fcc1bd2969 | refs/heads/master | 2020-04-05T23:33:13.155112 | 2019-10-24T09:37:04 | 2019-10-24T09:37:04 | 68,708,738 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | class Counter(storage):
def add(self, n):
self.setdefault(n, 0)
self[n] += 1
def most(self):
m = max(self.itervalues())
return [k for k,v in self.iteritems if v == m]
| [
"1073167306@qq.com"
] | 1073167306@qq.com |
a99c3fb1646b9037c033ba2d9120a22697a5db2f | 348d4ddbbef412a4756c01afe06bfee0e5c53048 | /setup.py | cfdbf29d98916dbd8105645c019a768730e54634 | [] | no_license | SandboxEducation/pibrella | cbf6f61e38db8b995ded0d02cc29557e80c73f6b | 19207dac9a860243a52508a4509b85c7dc88a270 | refs/heads/master | 2020-12-25T07:05:45.853314 | 2014-05-13T08:14:34 | 2014-05-13T08:14:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,027 | py | """
Copyright (c) 2014 Pimoroni
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from distutils.core import setup
classifiers = ['Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware']
setup(name = 'Pibrella',
version = '1.1.5dev',
author = 'Philip Howard',
author_email = 'phil@gadgetoid.com',
description = 'A module to control the Pibrella Raspberry Pi Addon Board',
long_description= open('README.md').read() + open('CHANGELOG.txt').read(),
license = 'MIT',
keywords = 'Raspberry Pi Pibrella',
url = 'http://www.pibrella.com',
classifiers = classifiers,
py_modules = ['pibrella'],
install_requires= ['rpi.gpio >= 0.5.5']
)
| [
"phil@gadgetoid.com"
] | phil@gadgetoid.com |
07afe75284abf196cae225f340b17719c8769683 | da1f49aa0ee3cbbd0b7add4a8ee4210c50fc81b7 | /demo/modules/highest_factor.py | ec797cefcc7cb8f7bdbb1e0d9c64961dbd1e4ea0 | [] | no_license | srikanthpragada/PYTHON_30_AUG_2021 | a1cde290072e152440dcd07dce377154a9e3052e | f84f272718b483fbf67ca8f950e6e4f933307e63 | refs/heads/master | 2023-08-25T14:11:12.826321 | 2021-10-11T14:14:36 | 2021-10-11T14:14:36 | 402,412,522 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | import sys
for n in sys.argv[1:]:
num = int(n)
for i in range(num // 2, 0, -1):
if num % i == 0:
print(f"{num:5} {i:5}")
break
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
998649baa7285122e041cdaf4a5dfbe984bc7c86 | 208560a564cc79822d5c6258ddd16e0e0e26362e | /Chapter-03-Arrays/Zip-It/Zip-It.py | f884730f8bf676fac4bc5219c5e1f4253749a444 | [] | no_license | vishnuap/Algorithms | c778984e1afd6b8d160ce868f6ad4408da84855f | fa6c3022616a958bce86f0b1218372d47fe8bf7e | refs/heads/master | 2020-09-15T19:00:11.552634 | 2017-06-25T19:32:11 | 2017-06-25T19:32:11 | 67,612,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,449 | py | # Chapter-3: Arrays
# Zip-It
# 1. Create a function that accepts two arrays and combines their values sequentially into a new array at alternating indices starting with the first array. Extra values of either array should be included afterwards. Given [1,2] and [10,20,30], return [1,10,2,20,30]
# 2. Combine the two arrays in the same way but in the first array instead of creating a new array
# Assume the arguments being passed are both arrays
# Assume use of built in functions (for doing this without builtin functions, use the approach from the Array-Insert-At solution earlier in this chapter)
# 1
def zipIt(arr1, arr2):
result = []
length = len(arr1) + len(arr2)
for i in range(0, length):
if i < len(arr1):
result.append(arr1[i])
if i < len(arr2):
result.append(arr2[i])
return result
# 2
def zipIt2(arr1, arr2):
arr1Len = len(arr1)
arr2Len = len(arr2)
idx = 0
while (len(arr1) < arr1Len + arr2Len):
if (idx < arr1Len):
arr1.insert((idx * 2) + 1, arr2[idx])
else:
arr1.insert(len(arr1), arr2[idx])
idx += 1
myArr1 = [1,2,3,4,5]
myArr2 = [10,20,30,40,50]
print("The original arrays are {} and {}").format(myArr1, myArr2)
print("The zipped array is {}").format(zipIt(myArr1, myArr2))
print("The zipped array is {}").format(zipIt(myArr2, myArr1))
zipIt2(myArr1, myArr2)
print("The zipped array is {}").format(myArr1)
| [
"vishnusak@gmail.com"
] | vishnusak@gmail.com |
546338e31f9f5ef23fb15bfe9b728c85cdc7c795 | b0a7ea84cb24ca4acfc7a18cfe7012dec8eb12e7 | /flask知识点/code_13set_cookies.py | e67e7bb22c6b25327f39804272ca83b739e730a6 | [] | no_license | amourbrus/temp | 414d1c0d4fc60dd3b7ba8b41773d0f6e653b3085 | a6f2ec85f85578923d9809cdc4ab519f0bd7584e | refs/heads/master | 2022-12-15T11:50:17.640481 | 2018-09-03T09:39:02 | 2018-09-03T09:39:02 | 147,171,404 | 0 | 0 | null | 2022-11-22T02:36:01 | 2018-09-03T08:01:00 | HTML | UTF-8 | Python | false | false | 466 | py | from flask import Flask, make_response
from flask import request
app = Flask(__name__)
@app.route('/baidu')
def set_cookie():
resp = make_response("一个参数, 响应体")
# set_cookie 方法, 注意","
resp.set_cookie("name", "itheima", max_age=3600)
resp.set_cookie("city", "sz")
return resp
@app.route('/get_cookie')
def get_cookie():
name = request.cookies.get("name")
return name
if __name__ == '__main__':
app.run()
| [
"2338336776@qq.com"
] | 2338336776@qq.com |
f4a4fbaa3c229c1b945a531c68ed5d19f15e3482 | 0a2f63d03a493494c1bc3a9c3bb5136731d21203 | /baekjoon/BJ14891.py | 985d30dc40402af21d0c5b66c5139aa95188d445 | [] | no_license | hwan1753/algorithm | 483e508067519c5b66b6cfc95c9df6c7d1deedb3 | 4879947d87bb42c99668f6856f25b5e7353be10f | refs/heads/main | 2021-06-25T23:11:27.912100 | 2021-06-08T11:34:29 | 2021-06-08T11:34:29 | 228,177,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | from _collections import deque
gear = [0]
for num in range(4):
gear.append(deque(list(map(int,input()))))
K = int(input())
for num in range(K):
idx, direction = map(int, input().split())
up_chk = gear[idx][2]
down_chk = gear[idx][6]
if direction == 1:
gear[idx].appendleft(gear[idx].pop())
else:
gear[idx].append(gear[idx].popleft())
# print(gear[idx])
up, down = idx + 1, idx - 1
up_direction, down_direction = direction, direction
while up < 5:
if gear[up][6] != up_chk:
if up_direction == 1:
up_chk = gear[up][2]
gear[up].append(gear[up].popleft())
up_direction = -1
else:
up_chk = gear[up][2]
gear[up].appendleft(gear[up].pop())
up_direction = 1
# print(up, gear[up])
up += 1
else:
break
while down > 0:
if gear[down][2] != down_chk:
if down_direction == 1:
down_chk = gear[down][6]
gear[down].append(gear[down].popleft())
down_direction = -1
else:
down_chk = gear[down][6]
gear[down].appendleft(gear[down].pop())
down_direction = 1
# print(down, gear[down])
down -= 1
else:
break
# print(gear)
answer = 0
score = 1
for num in range(1,5):
if gear[num][0] == 1:
answer += score
score *= 2
print(answer) | [
"john175258@gmail.com"
] | john175258@gmail.com |
0787dc63dee0abfd156584d4ae1c56b9e7d0a394 | bc17d1b3c8774b80f5e2a703d36dd8407f0513f1 | /while-pythn.py | 3d1cb80f65f7f4aafbf379e6e56256cd6d170c76 | [] | no_license | RAFASANT29/repositorio2 | 3a2f510bd26eca1c51c4bb2db772112c44307158 | 7f94765a5e5e0af46da9b6b940e47aff4a1d3efd | refs/heads/master | 2023-01-24T03:03:03.771341 | 2020-12-09T19:46:56 | 2020-12-09T19:46:56 | 314,951,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | #condicion = False
#while condicion:
# print("Ejecutando ciclo while")
#else:
# print("Fin del ciclo while")
i = 0
while i<11:
print (i)
i+=1
else:
print ("Fin del ciclo while") | [
"you@example.com"
] | you@example.com |
aa118486c502200792a56101f8cebff6717a7c72 | d8108289ecc6f482d97768069adaf477b3da2e90 | /dm2bn/templatetags/__init__.py | 502eafeae6d3f1eb100a4c57e3645cb61d871ed2 | [
"MIT"
] | permissive | DCOD-OpenSource/django-messages-to-bootstrap-notify | 636cf9380abf104e29c0d11e9e2a2c45204bbfbb | 8d9f60dc1111961984bc33f1ec0efc3265c5c7a8 | refs/heads/master | 2021-03-12T17:58:09.459910 | 2018-01-07T20:24:48 | 2018-01-07T20:24:48 | 91,449,082 | 11 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | # -*- coding: utf-8 -*-
# django-messages-to-bootstrap-notify
# dm2bn/templatetags/__init__.py
from __future__ import unicode_literals
__all__ = []
| [
"vint21h@vint21h.pp.ua"
] | vint21h@vint21h.pp.ua |
35b12f634cba191d0ab381928121cfddae62b33e | e6bc1f55371786dad70313eb468a3ccf6000edaf | /Extras/person/Correct/30.py | 8e0daafd32289827497ce58da380aee9123852fc | [] | no_license | prateksha/Source-Code-Similarity-Measurement | 9da92e3b22c372ed6ea54d8b6ab2c5921e8c41c0 | fb371b837917794d260a219a1ca09c46a5b15962 | refs/heads/master | 2023-01-04T07:49:25.138827 | 2020-10-25T14:43:57 | 2020-10-25T14:43:57 | 285,744,963 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | class Person:
def __init__(self,name,children=[],parent=None):
self.name = name
self.parent = parent
self.children = children
def __str__(self):
return str(self.name)
class Family:
def __init__(self,head):
self.headOfFamily = head
l = [self.headOfFamily]
self.nodes = [head]
def genFamily(l,head):
if head.children == []:
l.append([])
for child in head.children:
l.append([child])
self.nodes.append(child)
for j in l[head.children.index(child)+1:]:
genFamily(j,child)
genFamily(l,self.headOfFamily)
self.family = l
def headOfFamily(self):
return self.headOfFamily
def nodes(self):
return self.nodes
def allAncestors(self,n):
ancestors = []
parent = n.parent
while(parent!=None):
ancestors.append(parent)
parent = parent.parent
return ancestors[::-1]
def parent(self,n):
return n.parent
def searchNode(self,l,n):
head = l[0]
if head == n:
self.prettyPrint(0,l)
return True
else:
for child in head.children:
for j in l[head.children.index(child)+1:]:
if(self.searchNode(j,n)): return
def depth(self,head):
depths=[]
if head.children == []:
return 1
else:
for i in head.children:
depths.append( 1+ self.depth(i))
return max(depths)
def prettyPrint(self,count,l):
for i in l:
if type(i) != list:
print(' '*count + str(i))
else:
self.prettyPrint(count+1,i)
def t1():
B = Person('B')
C = Person('C')
A = Person('A',[B,C])
D = Person('D',[],B)
E = Person('E',[],C)
F = Person('F',[],C)
B.children = [D]
B.parent = A
C.children = [E,F]
C.parent = A
f = Family(A)
print('this is the whole family!!')
f.prettyPrint(0,f.family)
print('head of family is:'+str(f.headOfFamily))
print('all members of the family are:')
for i in f.nodes:
print(i)
print()
print('all ancestors of E are-')
for i in f.allAncestors(E):
print(i)
print()
print('the parent of F is',str(f.parent(F)))
print('the sub tree of C is')
f.searchNode(f.family,C)
print()
print('the depth of the tree is',f.depth(f.headOfFamily))
if __name__=="__main__":
t1()
| [
"pratekshau@gmail.com"
] | pratekshau@gmail.com |
3b6bed21bd23013e2a3ab77161d5b920d6fee46c | 3996539eae965e8e3cf9bd194123989741825525 | /RecoTracker/TkNavigation/TkMSParameterizationBuilder_cfi.py | 2980dc5ddc47a0c08e5f2e00832f090adb32c008 | [] | no_license | cms-sw/cmssw-cfipython | 01990ea8fcb97a57f0b0cc44a8bf5cde59af2d98 | 25ee4c810103c4a507ca1b949109399a23a524c5 | refs/heads/CMSSW_11_2_X | 2023-09-01T16:56:00.658845 | 2022-06-20T22:49:19 | 2022-06-20T22:49:19 | 136,184,115 | 1 | 0 | null | 2022-10-19T14:04:01 | 2018-06-05T13:47:28 | Python | UTF-8 | Python | false | false | 216 | py | import FWCore.ParameterSet.Config as cms
TkMSParameterizationBuilder = cms.ESProducer('TkMSParameterizationBuilder',
navigationSchool = cms.string('SimpleNavigationSchool'),
appendToDataLabel = cms.string('')
)
| [
"cmsbuild@cern.ch"
] | cmsbuild@cern.ch |
07d964dbb38c794daa963615521bea03830a97a0 | 16ac9158781d2616141433df9be4820e6d998e03 | /src/eavatar.ava/ava/util/webutils.py | b0baf7b20de88fd3fe101b3a0f0c4e6e06fbbd44 | [] | no_license | pombredanne/ava-srv | 0a357fb39d0179db0c0d545eb23d707d25b0e446 | 8acef33502d4bc3089f610f0b4ee33e7a5e779ae | refs/heads/master | 2020-12-31T05:56:07.741625 | 2015-03-06T06:29:56 | 2015-03-06T06:29:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,203 | py | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os
import logging
import hashlib
try:
from urllib2 import parse_http_list as _parse_list_header
except ImportError: # pragma: no cover
from urllib.request import parse_http_list as _parse_list_header
from ava.util import resource_path
static_folder = resource_path('static')
logger = logging.getLogger(__name__)
_ext_to_media_type = {
'.jpg': 'image/jpeg',
'.png': 'image/png',
'.ico': 'image/vnd.microsoft.icon',
'.svg': 'image/svg+xml',
'.txt': 'text/plain',
'.html': 'text/html; charset=utf-8',
'.css': 'text/css',
'.js': 'application/javascript',
'.json': 'application/json',
}
_default_media_type = 'application/octet-stream'
def calc_etag(content):
md5 = hashlib.md5()
md5.update(content)
return md5.hexdigest()
def guess_media_type(ext):
t = _ext_to_media_type.get(ext)
if t is None:
return _default_media_type
else:
return t
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict (or any other mapping object created from
the type with a dict like interface provided by the `cls` argugment):
:param value: a string with a dict header.
:param cls: callable to use for storage of parsed results.
:return: an instance of `cls`
"""
result = dict()
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
def parse_authorization_header(auth):
if not auth:
return
try:
auth_scheme, auth_info = auth.split(None, 1)
auth_scheme = auth_scheme.lower()
except ValueError:
return
result = parse_dict_header(auth_info)
result['scheme'] = auth_scheme
return result
| [
"sam@eavatar.com"
] | sam@eavatar.com |
1faf82a18833514a3a24d5d5fad4632118b38fe7 | 32623f1ce5aa39a35445992ad45c8d2a501a7f50 | /preprocess.py | 33b53fd02cd985d52d40da09d9c572cbf669034f | [
"MIT"
] | permissive | wx-b/BottleneckTransformers | 2b7818b83cb9b0e06763f93968b7d9a629ff589e | d20ef0c64fa2208f543abe12a49b426ca6de480e | refs/heads/main | 2023-03-22T09:13:46.595768 | 2021-03-14T20:41:19 | 2021-03-14T20:41:19 | 343,278,842 | 0 | 0 | MIT | 2021-03-14T20:41:19 | 2021-03-01T03:29:32 | null | UTF-8 | Python | false | false | 1,027 | py | from torch.utils.data import Dataset, DataLoader
from torchvision.datasets import CIFAR10
import torchvision.transforms as transforms
def load_data(args):
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_dataset = CIFAR10('./data', train=True, transform=train_transform, download=True)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
test_dataset = CIFAR10('./data', train=False, transform=test_transform, download=True)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
return train_loader, test_loader | [
"leaderj1001@gmail.com"
] | leaderj1001@gmail.com |
90a19220b2719d297d031d80b73a65a211dfc946 | 1389c5d17fd25457a11bc368c20941709dac8497 | /docs/conf.py | 40b861a9255b1f466af7169ba06d3f7222e01d1b | [
"BSD-2-Clause"
] | permissive | pythonesque/bbcode | 6c975f21795ff5a7e5f73563818ecea26634c3ed | 5c1e68200c727cb27c8d1de18c031eb0de4ce556 | refs/heads/master | 2020-12-28T22:00:56.600070 | 2014-04-04T17:55:47 | 2014-04-04T17:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,726 | py | # -*- coding: utf-8 -*-
#
# bbcode documentation build configuration file, created by
# sphinx-quickstart on Fri May 18 16:41:40 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bbcode'
copyright = u'2012, Dan Watson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bbcodedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bbcode.tex', u'bbcode Documentation',
u'Dan Watson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bbcode', u'bbcode Documentation',
[u'Dan Watson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bbcode', u'bbcode Documentation',
u'Dan Watson', 'bbcode', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| [
"dcwatson@gmail.com"
] | dcwatson@gmail.com |
abb13111e39cab4082e01f4e4d1303ec7b40253b | 3507a2d646ab0f729c717d5239633d70508d32b1 | /dh_abstracts/app/abstracts/migrations/0052_auto_20200501_0918.py | 60491238fd80ef6486b72d44a883ed74f0509568 | [
"MIT"
] | permissive | cmu-lib/dhweb_app | 26c22055afc7685153dd588e1ebabceb9cb782f7 | 8779fb1d6d52a8fb26a955b06b8589d5708589f6 | refs/heads/master | 2023-04-29T13:28:51.233931 | 2022-09-02T00:28:54 | 2022-09-02T00:28:54 | 146,502,577 | 4 | 0 | MIT | 2023-04-21T21:45:46 | 2018-08-28T20:23:16 | Python | UTF-8 | Python | false | false | 1,244 | py | # Generated by Django 3.0.5 on 2020-05-01 13:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('abstracts', '0051_auto_20200430_1659'),
]
operations = [
migrations.AlterField(
model_name='conference',
name='attendance',
field=models.TextField(blank=True, default='', help_text='Summary information about conference attendance, with source links', max_length=20000),
),
migrations.AlterField(
model_name='conference',
name='contributors',
field=models.TextField(blank=True, default='', help_text='Individuals or organizations who contributed data about this conference', max_length=20000),
),
migrations.AlterField(
model_name='conference',
name='notes',
field=models.TextField(blank=True, default='', help_text='Further descriptive information', max_length=200000),
),
migrations.AlterField(
model_name='conference',
name='references',
field=models.TextField(blank=True, default='', help_text='Citations to conference proceedings', max_length=20000),
),
]
| [
"matthew.d.lincoln@gmail.com"
] | matthew.d.lincoln@gmail.com |
e62265732cd122cd0e26ddbe58b913f07c12cd15 | 4250618abef0d0dcf399f8a2a23e2049c3458ea8 | /website/wiki/plugins/attachments/tests/test_commands.py | 4c587175b7d92e5e8afe5dbe64aeacbdd9311d9a | [
"MIT"
] | permissive | skbly7/serc | 121fd7e88df25213de4d53fce4bd03c2ea448d68 | 4442298ee05c24c3c6bacffdc56a9f6076397cce | refs/heads/master | 2020-12-27T03:18:45.280464 | 2019-05-16T06:10:31 | 2019-05-16T19:13:12 | 53,425,352 | 0 | 2 | MIT | 2019-05-16T19:13:14 | 2016-03-08T16:00:03 | Python | UTF-8 | Python | false | false | 830 | py | from __future__ import unicode_literals
from __future__ import absolute_import
import os
import tempfile
from wiki.tests.test_commands import TestManagementCommands
from .. import models
class TestAttachmentManagementCommands(TestManagementCommands):
"""
Add some more data
"""
def setUp(self):
TestManagementCommands.setUp(self)
self.test_file = tempfile.NamedTemporaryFile('w', delete=False, suffix=".txt")
self.test_file.write("test")
self.attachment1 = models.Attachment.objects.create(
article=self.child1.article
)
self.attachment1_revision1 = models.AttachmentRevision.objects.create(
attachment=self.attachment1,
file=self.test_file.name,
)
def tearDown(self):
os.unlink(self.test_file.name)
| [
"skbly7@gmail.com"
] | skbly7@gmail.com |
4758ae6cfaaa570ea9da000c7e5425db75ff1082 | 3883a083eb9c5dd5158b78e6c58521e99a76a4b9 | /tests/__init__.py | 1a6e448bd6990e0eb68aa9de46d6a723e0eec56a | [
"MIT"
] | permissive | jeyong/yakut | 1a4f8b68eb1230b6f31a296d0adcab8ff4e7fa02 | 58fa441316fd458a88210c10933c2035db4151f7 | refs/heads/main | 2023-06-20T17:16:14.125961 | 2021-07-24T18:37:32 | 2021-07-24T18:37:32 | 392,887,348 | 0 | 0 | MIT | 2021-08-05T03:09:51 | 2021-08-05T03:09:51 | null | UTF-8 | Python | false | false | 388 | py | # Copyright (c) 2020 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@uavcan.org>
import pathlib
# Please maintain these carefully if you're changing the project's directory structure.
TEST_DIR = pathlib.Path(__file__).resolve().parent
ROOT_DIR = TEST_DIR.parent
DEPS_DIR = TEST_DIR / "deps"
assert DEPS_DIR.is_dir()
| [
"pavel.kirienko@gmail.com"
] | pavel.kirienko@gmail.com |
ed11b6462e09bf8b151ec3fec9ccf86c077672b6 | a8dad8f2cedc5285d4e873f5ddfe4d865fb0bc84 | /suffixArrays/differentImplementations.py | f30c7c1956462a02e5f3b184c1cc4aee790b7804 | [] | no_license | chrisjdavie/Strings | 78cea1afe5a097e67d4a9dfc96a8292b60880b64 | 58088c70ccd7ba6b12c38debe5d49bcefb6b012c | refs/heads/master | 2016-08-11T16:56:37.675848 | 2016-01-26T20:16:42 | 2016-01-26T20:16:42 | 50,377,099 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | '''
Created on 24 Jan 2016
@author: chris
'''
def main():
txt = 'banana'
naiveSuffixArray = buildSuffixArray(txt)
print "Naive suffix array construction, O(N**2logN)"
for i in naiveSuffixArray:
print i
# Naive version
def buildSuffixArray(txt):
def substring(i):
return txt[i:]
indexArray = range(len(txt))
suffixArray = sorted(indexArray,key=substring)
return suffixArray
if __name__ == '__main__':
main() | [
"chris.d@theasi.co"
] | chris.d@theasi.co |
4f5bcc2b79acc49ca85c08c8f00327f382be31e5 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /3Ekam9jvbNKHDtx4K_22.py | 0fffa75667ed4226e23a6893a14817bcaba98186 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | """
Write a function that takes coordinates of two points on a two-dimensional
plane and returns the length of the line segment connecting those two points.
### Examples
line_length([15, 7], [22, 11]) ➞ 8.06
line_length([0, 0], [0, 0]) ➞ 0
line_length([0, 0], [1, 1]) ➞ 1.41
### Notes
* The order of the given numbers is X, Y.
* This challenge is easier than it looks.
* Round your result to two decimal places.
"""
def line_length(dot1, dot2):
return round(sum([(dot1[i]-dot2[i])**2 for i in range(len(dot1))])**0.5,2)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
8ae42f7932bc4e5b7eab61d43d4d648eb1e7c1c4 | 4dad1db5f629ddbaa7aa2ebfe84e12ac6ae8ebad | /src/game/logic/mark_drawer.py | 9e9649cd9ff43f4247f35c23bfbaa05f30bc8e4e | [] | no_license | stellarlib/centaurus_old | 70ea8d6d70a490f932fd3c912c2ef76be684afb4 | 92d4f51ebeee56f8b4113c59412356870a11d3a5 | refs/heads/master | 2022-03-18T18:33:38.180192 | 2019-10-27T13:43:09 | 2019-10-27T13:43:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | from stellarlib.hex_tool import hex_to_pixel, Hex
from src.icon import IconSprite
from src.color import Color
class MarkDrawer(object):
FLASH_RATE = 6
A = 0
B = 1
def __init__(self, logic):
self.logic = logic
self._marks = []
self.tick = 0
self.state = MarkDrawer.A
self.icons = self.init_icons()
def init_icons(self):
icons = {
MarkDrawer.A: IconSprite('target'),
MarkDrawer.B: IconSprite('target')
}
icons[MarkDrawer.B].replace_color(Color.RED, Color.WHITE)
return icons
def init(self):
self.logic.game.overlay.add_component(self)
@property
def mark_map(self):
return self.logic.ai_control.unit_control.mark_map
def update(self):
self.tick += 1
if self.tick == MarkDrawer.FLASH_RATE:
self.tick = 0
self.flash()
def flash(self):
if self.state == MarkDrawer.A:
self.state = MarkDrawer.B
else:
self.state = MarkDrawer.A
def update_marks(self):
del self._marks[:]
self._marks.extend(self.mark_map._map)
def draw(self, display_surface, rel_pos):
for pos in self._marks:
self.draw_mark(display_surface.surface, rel_pos, pos)
def draw_mark(self, surface, (rx, ry), pos):
px, py = hex_to_pixel(self.logic.game.hex_layout, Hex(*pos))
x = rx + px
y = ry + py
icon = self.icons[self.state]
icon.draw(surface, (x, y))
| [
"marzecsean@gmail.com"
] | marzecsean@gmail.com |
7738dd876d2768e08b785880012ea33983ed231f | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-dbs/aliyunsdkdbs/request/v20190306/GetDBListFromAgentRequest.py | 084cac476f9472a912185cbf76e4a94e6cd9d365 | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 2,188 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdbs.endpoint import endpoint_data
class GetDBListFromAgentRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dbs', '2019-03-06', 'GetDBListFromAgent','cbs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SourceEndpointRegion(self):
return self.get_query_params().get('SourceEndpointRegion')
def set_SourceEndpointRegion(self,SourceEndpointRegion):
self.add_query_param('SourceEndpointRegion',SourceEndpointRegion)
def get_BackupGatewayId(self):
return self.get_query_params().get('BackupGatewayId')
def set_BackupGatewayId(self,BackupGatewayId):
self.add_query_param('BackupGatewayId',BackupGatewayId)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
c23fc06fb1457f946585a4c3fff377ef92ceecd6 | e8bf00dba3e81081adb37f53a0192bb0ea2ca309 | /domains/nav/problems/training/problem1030_SD.py | b0f61017c706d830ef340d72709fcdf39d1b3bb2 | [
"BSD-3-Clause"
] | permissive | patras91/rae_release | 1e6585ee34fe7dbb117b084df982ca8a8aed6795 | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | refs/heads/master | 2023-07-13T20:09:41.762982 | 2021-08-11T17:02:58 | 2021-08-11T17:02:58 | 394,797,515 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | __author__ = 'patras'
from domain_springDoor import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 7,
'take': 2,
'put': 2,
}
DURATION.COUNTER = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 7,
'take': 2,
'put': 2,
}
rv.LOCATIONS = [1, 2, 3, 4]
rv.EDGES = {1: [2], 2: [1, 3], 3: [2, 4], 4: [3]}
rv.DOORS = ['d1', 'd2']
rv.DOORLOCATIONS = {(3, 4): 'd1', (1, 2): 'd2'}
rv.DOORTYPES = {'d1': 'ordinary', 'd2': 'spring'}
rv.ROBOTS = ['r1', 'r2', 'r3', 'r4']
def ResetState():
state.load = {'r1': NIL, 'r2': NIL, 'r3': NIL, 'r4': NIL}
state.status = {'r1': 'free', 'r2': 'free', 'r3': 'free', 'r4': 'free'}
state.loc = {'r1': 1, 'r2': 3, 'r3': 2, 'r4': 1}
state.pos = {'o1': 2}
state.doorStatus = {'d1': 'closed', 'd2': 'closed', }
state.doorType = {'d1': UNK, 'd2': UNK, }
tasks = {
6: [['fetch', 'r1', 'o1', 2]],
7: [['collision', 'r1']],
}
eventsEnv = {
} | [
"patras@umd.edu"
] | patras@umd.edu |
a992c10446423b11c798c56632e5210390ac738d | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Tensorflow/source/numpy/linalg/__init__.py | 69445f541db75347d5ec311d5ef8e63665302920 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 2,343 | py | """
Core Linear Algebra Tools
=========================
=============== ==========================================================
Linear algebra basics
==========================================================================
norm Vector or matrix norm
inv Inverse of a square matrix
solve Solve a linear system of equations
det Determinant of a square matrix
slogdet Logarithm of the determinant of a square matrix
lstsq Solve linear least-squares problem
pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
value decomposition
matrix_power Integer power of a square matrix
matrix_rank Calculate matrix rank using an SVD-based method
=============== ==========================================================
=============== ==========================================================
Eigenvalues and decompositions
==========================================================================
eig Eigenvalues and vectors of a square matrix
eigh Eigenvalues and eigenvectors of a Hermitian matrix
eigvals Eigenvalues of a square matrix
eigvalsh Eigenvalues of a Hermitian matrix
qr QR decomposition of a matrix
svd Singular value decomposition of a matrix
cholesky Cholesky decomposition of a matrix
=============== ==========================================================
=============== ==========================================================
Tensor operations
==========================================================================
tensorsolve Solve a linear tensor equation
tensorinv Calculate an inverse of a tensor
=============== ==========================================================
=============== ==========================================================
Exceptions
==========================================================================
LinAlgError Indicates a failed linear algebra operation
=============== ==========================================================
"""
from __future__ import division, absolute_import, print_function
# To get sub-modules
from .info import __doc__
from .linalg import *
from numpy.testing.nosetester import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
| [
"master@MacBook-Pro-admin.local"
] | master@MacBook-Pro-admin.local |
4df6e4068dd99c143bbb395b712e899f3a153fb8 | bb41814dc79f56a082a777e17ed31320db43edf4 | /math/0x02-calculus/17-integrate.py | 7595123026899449072db63392aba86a9688640a | [] | no_license | garimasinghgryffindor/holbertonschool-machine_learning | a92c619b6ad2d110ed97b33fa9903f5134c96866 | 856ee36006c2ff656877d592c2ddb7c941d63780 | refs/heads/master | 2023-08-01T09:58:13.863062 | 2020-11-28T00:50:55 | 2020-11-28T00:50:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | #!/usr/bin/env python3
"""
A function that calculates the integral of a polynomial
"""
def poly_integral(poly, C=0):
"""
Returns a list of the integral
>>> poly = [5, 3, 0, 1]
>>> print(poly_integral(poly))
[0, 5, 1.5, 0, 0.25]
"""
if type(poly) is not list or len(poly) == 0:
return None
elif type(C) is int:
if poly == [0]:
return [C]
exponent = 0
integral = poly.copy()
for i in range(len(integral)):
if type(integral[i]) is int or type(integral[i]) is float:
exponent += 1
number = integral[i] / exponent
integral[i] = int(number) if number % 1 == 0 else number
else:
return None
integral.insert(0, C)
return integral
else:
return None
| [
"kenneth.ca95@gmail.com"
] | kenneth.ca95@gmail.com |
bb331262c72c30097426d7e5893c315247f54530 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_6/dkcgeo002/question1.py | 3bd13139130a25eac4125612bc0ecbdb925e0152 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | __author__ = 'George de Kock'
""" Printing Strings
2014-4-20 """
names = []
length = 0
print("Enter strings (end with DONE):\n")
while True:
nextstr = input("")
if nextstr == "DONE":
break
names.append(nextstr)
if len(nextstr) > length:
length = len(nextstr)
length = str(length)
print("Right-aligned list:")
for x in names:
a = "{0:>{1}}".format(x,length)
print(a) | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
1ad967c9e59e5e94e6d9cdfa1c0a4db9408bb056 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /gfsa/datasets/padding_calibration_test.py | 94d0cd41593d6cc232526608b425428d33fc1b6b | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 2,131 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gfsa.datasets.google.random_python.padding_calibration."""
from absl.testing import absltest
import gast
from gfsa import automaton_builder
from gfsa import py_ast_graphs
from gfsa.datasets import graph_bundle
from gfsa.datasets import padding_calibration
class PaddingCalibrationTest(absltest.TestCase):
def test_calibrate_padding(self):
# Make sure padding calibration doesn't error out, so that it works when
# run interactively.
def build_example(size):
tree = gast.Module(
body=[gast.Constant(value=i, kind=None) for i in range(size)],
type_ignores=[])
py_graph, ast_to_node_id = (py_ast_graphs.py_ast_to_graph(tree))
edges = []
for i in range(1, size, 2):
edges.append((ast_to_node_id[id(tree.body[i])],
ast_to_node_id[id(tree.body[i - 1])], 1))
return graph_bundle.convert_graph_with_edges(py_graph, edges,
py_ast_graphs.BUILDER)
padding_calibration.calibrate_padding(
example_builder=build_example,
desired_sizes=graph_bundle.PaddingConfig(
static_max_metadata=automaton_builder.EncodedGraphMetadata(
num_nodes=64, num_input_tagged_nodes=64),
max_initial_transitions=128,
max_in_tagged_transitions=256,
max_edges=64,
),
samples=50,
optimization_max_steps=500,
round_to_powers_of_two=True)
if __name__ == '__main__':
absltest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
18caddf54f82ac9aac3080ca35c1274d616ae614 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/315/99246/submittedfiles/principal.py | f2b8783bc70994b635e87974903692d8ebcb10a2 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | # -*- coding: utf-8 -*-
from minha_bib import *
#COMECE AQUI ABAIXO
matriz = [[' ',' ',' '],[' ',' ',' '],[' ',' ',' ']]
print (matriz[0][0] + '|' + matriz[0][1] + '|' + matriz[0][2])
matriz[0][0] = input('dgite sua jogada: ')
print (matriz[0][0] + '|' + matriz[0][1] + '|' + matriz[0][2])
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
2a679ec14fe12c6dfd467cc8df8bb7d1ce4cd3fc | eb420ab51af4001d15058c9f485e9122bd85bb4b | /neural_sp/models/seq2seq/decoders/build.py | 348cf0c9c99d52d56024678267aba2003842b91e | [
"Apache-2.0"
] | permissive | many-hats/neural_sp | 23cb66b37343e1f36759513cf565bcddf3e1ed19 | 2f7d0ca0af3097eb2a954ad10aa3682cabe03940 | refs/heads/master | 2022-10-16T19:04:15.733439 | 2020-06-12T01:49:11 | 2020-06-12T01:49:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,045 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Select an decoder network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def build_decoder(args, special_symbols, enc_n_units, vocab,
ctc_weight, ctc_fc_list, global_weight, external_lm=None):
if args.dec_type in ['transformer', 'transformer_xl']:
from neural_sp.models.seq2seq.decoders.transformer import TransformerDecoder
decoder = TransformerDecoder(
special_symbols=special_symbols,
enc_n_units=enc_n_units,
attn_type=args.transformer_attn_type,
n_heads=args.transformer_n_heads,
n_layers=args.dec_n_layers,
d_model=args.transformer_d_model,
d_ff=args.transformer_d_ff,
d_ff_bottleneck_dim=getattr(args, 'transformer_d_ff_bottleneck_dim', 0),
layer_norm_eps=args.transformer_layer_norm_eps,
ffn_activation=args.transformer_ffn_activation,
pe_type=args.transformer_dec_pe_type,
vocab=vocab,
tie_embedding=args.tie_embedding,
dropout=args.dropout_dec,
dropout_emb=args.dropout_emb,
dropout_att=args.dropout_att,
dropout_layer=args.dropout_dec_layer,
dropout_head=args.dropout_head,
lsm_prob=args.lsm_prob,
ctc_weight=ctc_weight,
ctc_lsm_prob=args.ctc_lsm_prob,
ctc_fc_list=ctc_fc_list,
backward=(dir == 'bwd'),
global_weight=global_weight,
mtl_per_batch=args.mtl_per_batch,
param_init=args.transformer_param_init,
memory_transformer=args.dec_type == 'transformer_xl',
mem_len=args.mem_len,
mocha_chunk_size=args.mocha_chunk_size,
mocha_n_heads_mono=args.mocha_n_heads_mono,
mocha_n_heads_chunk=args.mocha_n_heads_chunk,
mocha_init_r=args.mocha_init_r,
mocha_eps=args.mocha_eps,
mocha_std=args.mocha_std,
mocha_no_denominator=args.mocha_no_denominator,
mocha_1dconv=args.mocha_1dconv,
mocha_quantity_loss_weight=args.mocha_quantity_loss_weight,
mocha_head_divergence_loss_weight=args.mocha_head_divergence_loss_weight,
latency_metric=args.mocha_latency_metric,
latency_loss_weight=args.mocha_latency_loss_weight,
mocha_first_layer=args.mocha_first_layer,
share_chunkwise_attention=getattr(args, 'share_chunkwise_attention', False),
external_lm=external_lm,
lm_fusion=args.lm_fusion)
elif args.dec_type in ['lstm_transducer', 'gru_transducer']:
from neural_sp.models.seq2seq.decoders.rnn_transducer import RNNTransducer
decoder = RNNTransducer(
special_symbols=special_symbols,
enc_n_units=enc_n_units,
rnn_type=args.dec_type,
n_units=args.dec_n_units,
n_projs=args.dec_n_projs,
n_layers=args.dec_n_layers,
bottleneck_dim=args.dec_bottleneck_dim,
emb_dim=args.emb_dim,
vocab=vocab,
dropout=args.dropout_dec,
dropout_emb=args.dropout_emb,
lsm_prob=args.lsm_prob,
ctc_weight=ctc_weight,
ctc_lsm_prob=args.ctc_lsm_prob,
ctc_fc_list=ctc_fc_list,
external_lm=external_lm if args.lm_init else None,
global_weight=global_weight,
mtl_per_batch=args.mtl_per_batch,
param_init=args.param_init)
else:
from neural_sp.models.seq2seq.decoders.las import RNNDecoder
decoder = RNNDecoder(
special_symbols=special_symbols,
enc_n_units=enc_n_units,
rnn_type=args.dec_type,
n_units=args.dec_n_units,
n_projs=args.dec_n_projs,
n_layers=args.dec_n_layers,
bottleneck_dim=args.dec_bottleneck_dim,
emb_dim=args.emb_dim,
vocab=vocab,
tie_embedding=args.tie_embedding,
attn_type=args.attn_type,
attn_dim=args.attn_dim,
attn_sharpening_factor=args.attn_sharpening_factor,
attn_sigmoid_smoothing=args.attn_sigmoid,
attn_conv_out_channels=args.attn_conv_n_channels,
attn_conv_kernel_size=args.attn_conv_width,
attn_n_heads=args.attn_n_heads,
dropout=args.dropout_dec,
dropout_emb=args.dropout_emb,
dropout_att=args.dropout_att,
lsm_prob=args.lsm_prob,
ss_prob=args.ss_prob,
ss_type=args.ss_type,
ctc_weight=ctc_weight,
ctc_lsm_prob=args.ctc_lsm_prob,
ctc_fc_list=ctc_fc_list,
mbr_training=args.mbr_training,
mbr_ce_weight=args.mbr_ce_weight,
external_lm=external_lm,
lm_fusion=args.lm_fusion,
lm_init=args.lm_init,
backward=(dir == 'bwd'),
global_weight=global_weight,
mtl_per_batch=args.mtl_per_batch,
param_init=args.param_init,
mocha_chunk_size=args.mocha_chunk_size,
mocha_n_heads_mono=args.mocha_n_heads_mono,
mocha_init_r=args.mocha_init_r,
mocha_eps=args.mocha_eps,
mocha_std=args.mocha_std,
mocha_no_denominator=args.mocha_no_denominator,
mocha_1dconv=args.mocha_1dconv,
mocha_quantity_loss_weight=args.mocha_quantity_loss_weight,
latency_metric=args.mocha_latency_metric,
latency_loss_weight=args.mocha_latency_loss_weight,
gmm_attn_n_mixtures=args.gmm_attn_n_mixtures,
replace_sos=args.replace_sos,
distillation_weight=args.distillation_weight,
discourse_aware=args.discourse_aware)
return decoder
| [
"hiro.mhbc@gmail.com"
] | hiro.mhbc@gmail.com |
8e59e209dbb4f149c88e33807191c9e8784e98b5 | 65d321b77b4d0134ce094ed003dee7b62411d01f | /bird_sight.py | 6aea975491acf0ce7201d75bd8f1d67db5481191 | [] | no_license | rohanpahwa1/hacker_rank_solutions | 7bb1d62ed18a51d7f067e4b38b66ae60a517d2d3 | 5e93b873fe3e9eb317d3740fb566e539683694ff | refs/heads/master | 2022-12-25T06:45:09.441465 | 2020-05-02T16:03:32 | 2020-05-02T16:03:32 | 300,040,304 | 0 | 0 | null | 2020-09-30T19:38:14 | 2020-09-30T19:38:14 | null | UTF-8 | Python | false | false | 335 | py | def count(li):
maxcount,maxvalue=0,0
for i in li:
count=0
for j in range(len(li)):
if li[j]==li[i]:
count=count+1
if count>maxcount:
maxcount=count
maxvalue=li[i]
return maxvalue
n=int(input())
li=list(map(int,input().split()))
print(count(li))
| [
"coderrohanpahwa@gmail.com"
] | coderrohanpahwa@gmail.com |
49affd70ece956f427157791e430534c5d08461b | dc8443495f48e3fa5109ba1d75864ce6b0849405 | /django_contactme/models.py | cbef2838d8c1aeeced541fe2fdb34a75390190ae | [
"BSD-2-Clause"
] | permissive | pyghassen/django-contactme | 66d40a0749be1874b4b7f0a55131c748c7789822 | 8bc33fc785c058279a295e386c8335cdfe12fd54 | refs/heads/master | 2021-01-16T22:03:50.193700 | 2013-11-12T22:00:24 | 2013-11-12T22:00:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,588 | py | import datetime
from django.db import models
from django.conf import settings
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
CONTACTME_MSG_MAX_LEN = getattr(settings,'CONTACTME_MSG_MAX_LEN',3000)
class ContactMsg(models.Model):
"""
An incoming message from a site visitor.
"""
site = models.ForeignKey(Site)
name = models.CharField(_("Contact's name"), max_length=100)
email = models.EmailField(_("Contact's email address"))
message = models.TextField(_("Message"), max_length=CONTACTME_MSG_MAX_LEN)
submit_date = models.DateTimeField(_("Date/Time submitted"), default=None)
ip_address = models.IPAddressField(_('IP address'), blank=True, null=True)
class Meta:
db_table = "contactme_contact_msg"
ordering = ('submit_date',)
verbose_name = _('contact message')
verbose_name_plural = _('contact messages')
def __unicode__(self):
return "%s: %s..." % (self.name, self.message[:50])
def save(self, *args, **kwargs):
if self.submit_date is None:
self.submit_date = datetime.datetime.now()
super(ContactMsg, self).save(*args, **kwargs)
def get_as_text(self):
"""
Return this comment as plain text. Useful for emails.
"""
d = {
'user': self.name,
'date': self.submit_date,
'message': self.message,
'domain': self.site.domain,
}
return _('Sent by %(user)s at %(date)s\n\n%(message)s\n\nhttp://%(domain)s') % d
| [
"danirus@eml.cc"
] | danirus@eml.cc |
c3e08ff5edb4f871c90cd9ebff9ee144916c75c9 | 76e62ddbfdfba19c80b37e855a4df67672ef0808 | /PINp/2014/Platonova Olga/task_1_21.py | 3b9caf7f782c735f4194bc70c6c052dc45a5715a | [
"Apache-2.0"
] | permissive | stasvorosh/pythonintask | 9d30f3cd492e89783b7221402375c1ebe4690baa | 8169ed26510022fe0d589f4013f11749131957df | refs/heads/master | 2021-01-17T16:49:32.778063 | 2016-10-10T14:08:04 | 2016-10-10T14:08:04 | 52,255,539 | 6 | 0 | null | 2016-02-22T07:33:16 | 2016-02-22T07:33:15 | null | UTF-8 | Python | false | false | 651 | py | # Задача 1. Вариант 21.
#Напишите программу, которая будет сообщать род деятельности и псевдоним под которым скрывается Михаил Николаевич Румянцев. После вывода информации программа должна дожидаться пока пользователь нажмет Enter для выхода.
# Platonova O. A.
# 29.05.2016
print("Михаил Николаевич Румянцев более известен, как клоун Карандаш.")
input("\n\nНажмите Enter для выхода.") | [
"stasyan.v@gmail.com"
] | stasyan.v@gmail.com |
fb88c9ea0fbb2e3c93362900c14ae4ed41e7dea9 | 48e08c7d5856c35492500b6b01d3d72a31f58ffc | /Leetcode/0151-0200/0169-majority-element.py | 5b63395e26a663a73b44602c0dc2ca046b72b0ac | [
"MIT"
] | permissive | MiKueen/Data-Structures-and-Algorithms | 8d8730e539e1c112cbd4a51beae9e1c3e2184e63 | 8788bde5349f326aac0267531f39ac7a2a708ee6 | refs/heads/master | 2021-07-18T17:16:39.948239 | 2020-09-13T15:44:37 | 2020-09-13T15:44:37 | 212,309,543 | 0 | 1 | MIT | 2019-10-06T16:24:43 | 2019-10-02T10:19:07 | Python | UTF-8 | Python | false | false | 700 | py | '''
Author : MiKueen
Level : Easy
Problem Statement : Majority Element
Given an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.
You may assume that the array is non-empty and the majority element always exist in the array.
Example 1:
Input: [3,2,3]
Output: 3
Example 2:
Input: [2,2,1,1,1,2,2]
Output: 2
'''
class Solution:
def majorityElement(self, nums: List[int]) -> int:
mapping = {}
for i in nums:
if i not in mapping:
mapping[i] = 1
if mapping[i] > len(nums) // 2:
return i
else:
mapping[i] += 1
| [
"keshvi2298@gmail.com"
] | keshvi2298@gmail.com |
85c60ef7055f79a086af1c40cb8c1b03a14575d5 | 8ce3fccd60d8491763729f08158428e39fae0136 | /DMVProject/DMVProject/settings.py | f3c6f7881b7011ff79f6502957fe523c6fff4c42 | [
"Apache-2.0"
] | permissive | cs-fullstack-master/django-authentication-ic | 32c96353ecf043015aaa6e653ec7c36ff1a9f5e5 | 4147fb2f5dd24cb6c392811a64828127a188fc13 | refs/heads/master | 2020-04-26T15:01:42.644950 | 2019-10-10T14:05:18 | 2019-10-10T14:05:18 | 173,634,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,114 | py | """
Django settings for DMVProject project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5_f4_(jx%bakt6_a8q9*^(a1@cblb(x&jvs=vep+1-8eukn^5d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'DMVApp',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DMVProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DMVProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"kenn+git@code-crew.org"
] | kenn+git@code-crew.org |
35e683f41ad6813c8089049a1ed43996d4a831af | bcb36baf1b3d3eceffba383f72c2b5335cc7048d | /python_workout/01_numeric_types/number_guessing_game/!python3 number_guessing_game.py | 5a71fe4ef7ae6708c03cf4022442cff218c6c3da | [] | no_license | paulghaddad/solve-it | 0aa1400cefab783f4ea757921811668fb2c9477c | e0f72be0fca82bc0378def5499f7158bafff975b | refs/heads/master | 2023-01-24T03:46:24.285793 | 2021-07-06T19:44:29 | 2021-07-06T19:44:29 | 200,406,482 | 2 | 0 | null | 2023-01-06T13:53:43 | 2019-08-03T18:07:53 | Python | UTF-8 | Python | false | false | 152 | py | import random
def start_game():
number_to_guess = random.randint(0, 100)
print(number_to_guess)
if __name__ == '__main__':
start_game()
| [
"paulh16@gmail.com"
] | paulh16@gmail.com |
5dae1ba5cb09b8ee35c5387b0f76546e386b0b05 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/errors/types/language_code_error.py | a0726e95c29d72848790feafb7e84459bf2e249d | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.errors',
marshal='google.ads.googleads.v7',
manifest={
'LanguageCodeErrorEnum',
},
)
class LanguageCodeErrorEnum(proto.Message):
r"""Container for enum describing language code errors. """
class LanguageCodeError(proto.Enum):
r"""Enum describing language code errors."""
UNSPECIFIED = 0
UNKNOWN = 1
LANGUAGE_CODE_NOT_FOUND = 2
INVALID_LANGUAGE_CODE = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
f765574d5ef69fa926857e9add6363407525bb75 | 9469358a1efb300fc5fdd8de5c17a288fc780c92 | /buildsimlearn/random_forest_training.py | ad6a2c7131dc6ca4b2d0f348b2cbca57093c7cda | [
"MIT"
] | permissive | shihao-zhang/buildsimhub_python_api | 3a5fc4668f9385d4b98a8eaa3ab7211aa49bef74 | daa0b7d2e92820b6b1cdaa981fb9f0d88c375012 | refs/heads/master | 2020-03-28T02:40:34.693984 | 2018-09-08T18:30:48 | 2018-09-08T18:30:48 | 147,588,888 | 0 | 0 | MIT | 2018-09-05T22:58:49 | 2018-09-05T22:58:49 | null | UTF-8 | Python | false | false | 3,193 | py | """
AUTHOR: Tracy Ruan
Date: 6/30/2018
What is this script for?
This script extract results from a parametric study from BuildSimCloud, and then use the results to train a random
forest tree model. The training accuracy will be demonstrated by MAE and MAPE, and predicted vs. actual plot will be
used for comparison.
How to use this script?
Replace the project_api_key and model_api_key in this script. Make sure that the model_api_key is the one provided
after a successful parametric run.
Specify the number of trees in the forest (n_estimate)
Package required:
pandas, numpy, sci-kit learn, matplotlib
"""
import BuildSimHubAPI as bsh_api
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import matplotlib.pyplot as plt
"""
User Input
"""
project_api_key = 'f98aadb3-254f-428d-a321-82a6e4b9424c'
model_api_key = 'aa09eabf-693f-4437-88cc-a522a25fba01'
# The number of trees in the forest.
n_estimate = 1000
"""
Script
"""
bsh = bsh_api.BuildSimHubAPIClient()
results = bsh.parametric_results(project_api_key, model_api_key)
# Collect results
result_dict = results.net_site_eui()
result_unit = results.last_parameter_unit
for i in range(len(result_dict)):
tempstr = result_dict["value"]
dict = {}
for key in result_dict:
if key == "model":
templist = result_dict[key]
tempdict = {}
for i in range(len(templist)):
tempstr = result_dict["model"][i]
templist = tempstr.split(',')
for j in range(len(templist)):
pair = templist[j].split(': ')
if pair[0] not in tempdict:
tempdict[pair[0]] = []
tempdict[pair[0]].append(pair[1])
for subkey in tempdict:
dict[subkey] = tempdict[subkey]
else:
dict[key] = result_dict[key]
df = pd.DataFrame(dict)
values = np.array(df['value'])
# axis 1 refers to the columns
features_old = df.drop('value', axis=1)
features = features_old.drop('model_plot', axis=1)
feature_list = list(features.columns)
features = np.array(features)
print(feature_list)
# Split the data into training and testing sets
train_features, test_features, train_values, test_values = train_test_split(features, values)
# train models
rf = RandomForestRegressor(n_estimators=n_estimate)
rf.fit(train_features, train_values)
# predict values using rf on test data
predictions = rf.predict(test_features)
# Calculate the absolute errors
errors = abs(predictions - test_values)
# mean absolute error (MAE) is a measure of difference between two continuous variables
print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.')
# Determine Performance Metrics
# Calculate mean absolute percentage error (MAPE)
mape = 100 * (errors / test_values)
accuracy = 100 - np.mean(mape)
print('Accuracy:', round(accuracy, 2), '%.')
# Actual value VS predicted value plot
plt.scatter(test_values, predictions, s=1)
plt.plot([min(test_values), max(test_values)], [min(predictions), max(predictions)], 'red', linewidth=1)
plt.ylabel('Actual Value')
plt.xlabel('Predicted Value')
plt.title('Actual VS Predicted')
plt.show()
| [
"weilix@alumni.cmu.edu"
] | weilix@alumni.cmu.edu |
db0ac2e92149a5025814976f491f49f3d61f8d5f | e064d46561f3bf02c3036f0487356516974bf9d7 | /network/network/migrations/0015_auto_20200823_1521.py | f6122c885d6cb77fce81b830d250bcbd8f873cf2 | [] | no_license | Koshir0/CS50-WEB-PROGRAMMING-PROJECTS | 4629575f904e60b874988c4f779a607a7fdca338 | 55cc2bdc199e070930f6c54e0fb82340a2beb7cd | refs/heads/main | 2022-12-28T20:43:49.119527 | 2020-10-18T12:57:01 | 2020-10-18T12:57:01 | 305,100,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | # Generated by Django 3.0.8 on 2020-08-23 15:21
import datetime
from django.conf import settings
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('network', '0014_auto_20200823_1047'),
]
operations = [
migrations.DeleteModel(
name='Person',
),
migrations.AlterField(
model_name='follower',
name='users',
field=models.ManyToManyField(blank=True, related_name='following', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2020, 8, 23, 15, 21, 45, 798323, tzinfo=utc)),
),
]
| [
"pentiumdesu@protonmail.com"
] | pentiumdesu@protonmail.com |
77deee6cde50602dcf0e33c1dc91f65226cc1bf3 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4129/codes/1723_2498.py | 5772432d0b8d595d73c444b8b91d7f7b7c35c5aa | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | nA = int(input("Numero de habitantes da cidade A:"))
nB = int(input("Numero de habitantes da cidade B:"))
pA = float(input("Percentual de crescimento populacional da cidade A:"))
pB = float(input("Percentual de crescimento populacional da cidade B:"))
pA = pA/100
pB = pB/100
ano = 0
while(nA < nB):
nA = nA + nA*pA
nB = nB + nB*pB
ano = ano + 1
print(ano) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
e809698b9a88e16c651363f39453460aef995741 | 36163a05070a9cd0daab7c8b18b49053e0365bed | /src/python/WMCore/PilotManager/plugin/PilotLSFSubmitter.py | e55ed316090572e8e85d07d8ff649a6e9087e6e4 | [] | no_license | sryufnal/WMCore | 472b465d1e9cff8af62b4f4a4587fd3927f82786 | 9575691bd7383e4de8bcdf83714ec71b3fec6aa7 | refs/heads/master | 2021-01-16T00:27:39.208561 | 2011-09-09T20:36:15 | 2011-09-09T20:36:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,982 | py | from ProdAgent.Resources.LSF import LSFConfiguration
from PilotManager.CommonUtil import executeCommand
#from JobSubmitter.Submitters.BulkSubmitterInterface import makeNonBlocking
from JobSubmitter.JSException import JSException
import datetime
import logging
import sys
import os
class PilotLSFSubmitter:
def __init__(self):
pass
def submitPilot(self, taskName, exe, exePath, inputSandbox):
shellScript = exe
scriptPath = exePath
#start lsf submission command
lsfSubmitCommand = 'bsub'
#TODO: read this info from configuration
lsfSubmitCommand += ' -q 8nh80 '
#creating the log directory.
#TODO: get the path information from the configuration
lsfLogDir = '/afs/cern.ch/user/k/khawar/scratch2/khawar/logs'
if ( lsfLogDir != 'None' ):
now = datetime.datetime.today()
lsfLogDir += '/%s' % now.strftime("%Y%m%d%H")
try:
os.mkdir(lsfLogDir)
logging.debug("Created directory %s" % lsfLogDir)
except OSError, err:
# suppress LSF log unless it's about an already exisiting directory
if err.errno != errno.EEXIST or not os.path.isdir(lsfLogDir):
logging.debug("Can't create directory %s, turning off LSF log" % lsfLogDir)
lsfLogDir = 'None'
lsfSubmitCommand += ' -g %s' % LSFConfiguration.getGroup()
if ( lsfLogDir == "None" ):
lsfSubmitCommand += ' -oo /dev/null'
else:
lsfSubmitCommand += ' -oo %s/%s.lsf.log' % (lsfLogDir,'pilot')
lsfSubmitCommand += ' < %s' % os.path.join(scriptPath, shellScript)
failureList = []
try:
output = executeCommand(lsfSubmitCommand)
logging.info("PilotManager.submitPilotJob: %s " % output)
logging.info("PilotManager.submitPilotJob: %s " %lsfSubmitCommand )
except RuntimeError, err:
failureList.append('jobSpec')
if len(failureList) > 0:
raise JSException("Submission Failed", FailureList = failureList)
| [
"metson@4525493e-7705-40b1-a816-d608a930855b"
] | metson@4525493e-7705-40b1-a816-d608a930855b |
8d17ac9ea8540fcad6f3379d3b9c0310f2bdb19d | ce8728986ab5c180a4be7241bd46e4146b66b1ac | /zinnia/markups.py | 737cc66cf3bfb67a13d8d7aaeb2a0e7f80ea139b | [
"BSD-3-Clause"
] | permissive | lpe234/django-blog-zinnia | 30f36c30ae61d35c2a16f42e6f88b25d7063aec0 | 0f531dfcf181d5641c01e83397f92ee415b126e5 | refs/heads/develop | 2021-01-16T21:31:06.297887 | 2014-07-19T17:37:07 | 2014-07-19T17:37:07 | 22,424,900 | 0 | 0 | BSD-3-Clause | 2019-01-08T09:33:27 | 2014-07-30T11:31:29 | null | UTF-8 | Python | false | false | 1,777 | py | """
Set of" markup" function to transform plain text into HTML for Zinnia.
Code originally provided by django.contrib.markups
"""
import warnings
from django.utils.encoding import force_text
from django.utils.encoding import force_bytes
from zinnia.settings import MARKDOWN_EXTENSIONS
from zinnia.settings import RESTRUCTUREDTEXT_SETTINGS
def textile(value):
"""
Textile processing.
"""
try:
import textile
except ImportError:
warnings.warn("The Python textile library isn't installed.",
RuntimeWarning)
return value
return textile.textile(force_bytes(value),
encoding='utf-8', output='utf-8')
def markdown(value, extensions=MARKDOWN_EXTENSIONS):
"""
Markdown processing with optionally using various extensions
that python-markdown supports.
"""
try:
import markdown
except ImportError:
warnings.warn("The Python markdown library isn't installed.",
RuntimeWarning)
return value
extensions = [e for e in extensions.split(',') if e]
return markdown.markdown(force_text(value),
extensions, safe_mode=False)
def restructuredtext(value, settings=RESTRUCTUREDTEXT_SETTINGS):
"""
RestructuredText processing with optionnally custom settings.
"""
try:
from docutils.core import publish_parts
except ImportError:
warnings.warn("The Python docutils library isn't installed.",
RuntimeWarning)
return value
parts = publish_parts(source=force_bytes(value),
writer_name='html4css1',
settings_overrides=settings)
return force_text(parts['fragment'])
| [
"fantomas42@gmail.com"
] | fantomas42@gmail.com |
c2677e6d4766eab1d542f96853a0a012f069af4d | 63f9a0d150cbef75f4e6e8246dc7ecac3f3b6d09 | /rllib/agents/marwil/marwil_torch_policy.py | e88e5e312f4039b4500907b4980e817edf215ae8 | [
"Apache-2.0",
"MIT"
] | permissive | ray-project/maze-raylit | 79f0a5af9fe4bdc13a2d5b3919da867ed5439aab | a03cd14a50d87d58effea1d749391af530d7609c | refs/heads/master | 2023-01-23T04:23:35.178501 | 2020-12-04T22:34:14 | 2020-12-04T22:34:14 | 318,274,659 | 5 | 0 | Apache-2.0 | 2020-12-04T22:34:15 | 2020-12-03T17:47:58 | Python | UTF-8 | Python | false | false | 3,105 | py | import ray
from ray.rllib.agents.marwil.marwil_tf_policy import postprocess_advantages
from ray.rllib.evaluation.postprocessing import Postprocessing
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy_template import build_torch_policy
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import explained_variance
torch, _ = try_import_torch()
class ValueNetworkMixin:
def __init__(self):
def value(ob, prev_action, prev_reward, *state):
model_out, _ = self.model({
SampleBatch.CUR_OBS: torch.Tensor([ob]).to(self.device),
SampleBatch.PREV_ACTIONS: torch.Tensor([prev_action]).to(
self.device),
SampleBatch.PREV_REWARDS: torch.Tensor([prev_reward]).to(
self.device),
"is_training": False,
}, [torch.Tensor([s]).to(self.device) for s in state],
torch.Tensor([1]).to(self.device))
return self.model.value_function()[0]
self._value = value
def marwil_loss(policy, model, dist_class, train_batch):
model_out, _ = model.from_batch(train_batch)
action_dist = dist_class(model_out, model)
state_values = model.value_function()
advantages = train_batch[Postprocessing.ADVANTAGES]
actions = train_batch[SampleBatch.ACTIONS]
# Value loss.
policy.v_loss = 0.5 * torch.mean(torch.pow(state_values - advantages, 2.0))
# Policy loss.
# Advantage estimation.
adv = advantages - state_values
# Update averaged advantage norm.
policy.ma_adv_norm.add_(
1e-6 * (torch.mean(torch.pow(adv, 2.0)) - policy.ma_adv_norm))
# #xponentially weighted advantages.
exp_advs = torch.exp(policy.config["beta"] *
(adv / (1e-8 + torch.pow(policy.ma_adv_norm, 0.5))))
# log\pi_\theta(a|s)
logprobs = action_dist.logp(actions)
policy.p_loss = -1.0 * torch.mean(exp_advs.detach() * logprobs)
# Combine both losses.
policy.total_loss = policy.p_loss + policy.config["vf_coeff"] * \
policy.v_loss
explained_var = explained_variance(advantages, state_values)
policy.explained_variance = torch.mean(explained_var)
return policy.total_loss
def stats(policy, train_batch):
return {
"policy_loss": policy.p_loss,
"vf_loss": policy.v_loss,
"total_loss": policy.total_loss,
"vf_explained_var": policy.explained_variance,
}
def setup_mixins(policy, obs_space, action_space, config):
# Create a var.
policy.ma_adv_norm = torch.tensor(
[100.0], dtype=torch.float32, requires_grad=False).to(policy.device)
# Setup Value branch of our NN.
ValueNetworkMixin.__init__(policy)
MARWILTorchPolicy = build_torch_policy(
name="MARWILTorchPolicy",
loss_fn=marwil_loss,
get_default_config=lambda: ray.rllib.agents.marwil.marwil.DEFAULT_CONFIG,
stats_fn=stats,
postprocess_fn=postprocess_advantages,
before_loss_init=setup_mixins,
mixins=[ValueNetworkMixin])
| [
"noreply@github.com"
] | ray-project.noreply@github.com |
c4b99133f7194d3e60cfcaa9f64b79ad5277495d | 175e4e031471e5cdbc9bcaee2df10f5ec44871d3 | /LESSON2b/.history/backend/app_20200531173010.py | cdaa1e321b4fafb420cc3fde75d7fe23d045eec6 | [] | no_license | hiyacins/uma_study | c329d29a9c3899ab4feca21b9c47ef546b69b0bd | 067e66f258a0c89f7670c645dd7c40feee8536fa | refs/heads/master | 2023-01-23T06:40:12.435047 | 2020-06-17T15:59:34 | 2020-06-17T15:59:34 | 239,077,726 | 0 | 0 | null | 2023-01-06T08:36:26 | 2020-02-08T05:56:52 | Python | UTF-8 | Python | false | false | 505 | py | from HiyaLib import *
import unittest
from flask import Flask, render_template
# app = FlaskBuilder(__name__)
app = Flask(__name__, static_folder='../frontend/dist/static',
template_folder='../frontend/dist')
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def index(path):
print('umauma')
return render_template('index.html')
if __name__ == "__main__":
app.run(port=5001, debug=True)
# app.run(host="0.0.0.0", port=80, debug=False)
# unittest.main()
| [
"hiyacins@gmail.com"
] | hiyacins@gmail.com |
620c67ea07fdbda3cf97f05c63848e6b05f90c78 | 8d5c9369b0fb398c5a6078f6cac43ba8d67202fa | /bscan/wordlists.py | c1c5847de914e2cebdb3e2ca36abc28464a9bc57 | [
"MIT"
] | permissive | raystyle/bscan | 45191c2c0d26fe450c5d95567b83d47dfcb4c692 | 1edf0c0e738153a294d5cdc1b69d8f167152d5a2 | refs/heads/master | 2020-04-25T03:15:37.186913 | 2019-02-09T22:23:44 | 2019-02-09T22:23:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | """Utilities for dealing with wordlists."""
import fnmatch
import os
from typing import (
List)
def find_wordlist(wordlist_dirs: List[str], fnpattern: str) -> None:
"""Recursively search wordlist directories for a specified filename."""
for wordlist_dir in wordlist_dirs:
_walk_iter = os.walk(wordlist_dir, followlinks=True)
for dirpath, dirnames, filenames in _walk_iter:
for match in fnmatch.filter(filenames, fnpattern):
print(os.path.join(dirpath, match))
def walk_wordlists(wordlist_dirs: List[str]) -> None:
"""Recursively walk the wordlist directories and print all files."""
for wordlist_dir in wordlist_dirs:
_walk_iter = os.walk(wordlist_dir, followlinks=True)
for dirpath, dirnames, filenames in _walk_iter:
if not filenames:
continue
print(dirpath)
for filename in filenames:
print(filename)
print()
| [
"welch18@vt.edu"
] | welch18@vt.edu |
dc9eda48d4dbd451dc8477cffc39ad159ec15d1c | 228412b856b7b79986fd460d62273ca8125f0f85 | /xpath_01.py | 0bb9832e5071f73bf646e4fd37a3877f197b3a2e | [] | no_license | patronsbai/scrapy_spider | d11820dd0ec77e5994e63140a8be85b3ac528d26 | 11f10a680f1efcd70f44b5cf28834af0e30aafc7 | refs/heads/master | 2021-05-03T09:20:03.247818 | 2018-03-08T15:26:16 | 2018-03-08T15:26:16 | 120,574,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | # -*- coding:utf-8 -*-
import urllib
from lxml import etree
# def getHtml(url):
# page = urllib.urlopen(url)
# html = page.read()
# return html
if __name__ == '__main__':
# text = getHtml("http://image.baidu.com/search/index?tn=baiduimage&ct=201326592&lm=-1&cl=2&ie=gbk&word=%CD%BC%C6%AC&fr=ala&ala=1&alatpl=others&pos=0")
# print text
text = '''
<div>
<ul>
<li class="item-0"><a href="link1.html">first item</a></li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-inactive"><a href="link31111.html">third item</a></li>
<li class="item-1"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a>
</ul>
</div>
'''
# 1.转换成 lxml文档
html_data = etree.HTML(text)
# 2. 格式化
# html_result = etree.tostring(html_data)
# print html_result
# # #3.4 取出 属性的值
# < li class ="item-inactive" > < a href="link3.html" > third item < / a > < / li >
result = html_data.xpath('//li[@class="item-inactive"]/a/@href')
# result = html_data.xpath('//*[@id="imgid"]/div/ul/li[2]/div/a/img')
print result
# # #3.5 模糊查询 contains
# result1 = html_data.xpath('//li[contains(@class,"1")]')
#
# print result1
# print html_result
#3.掌握的 xpath
#3.1取出所有的li标签
# result = html_data.xpath('//li')
#3.2获取所有a
# result = html_data.xpath('//li/a')
#
# #3.3 取出内容
# result = html_data.xpath('//li/a/text()')
#
| [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
f5537afaebec1f8d42eb3ac71b04ea9338c4537e | 321e58ab3e6b2385bb3549aaaefd56a58c2a51e7 | /python/atpic/wikinormalizer.py | e9803fe900f77d7005f49f1ab908174411478cda | [] | no_license | alexmadon/atpic_photosharing | 7829118d032344bd9a67818cd50e2c27a228d028 | 9fdddeb78548dadf946b1951aea0d0632e979156 | refs/heads/master | 2020-06-02T15:00:29.282979 | 2017-06-12T17:09:52 | 2017-06-12T17:09:52 | 94,095,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | #!/usr/bin/python3
import atpic.log
xx=atpic.log.setmod("INFO","wikinormalizer")
"""
Used to normalize the wiki URLs
we lowercase,
replace white spaces with underscore _
This is important as this the key that is used to store/retrieve wiki pages.
"""
# may need a permanent redirect to avoid ronts indexing several times one page
import atpic.normal
def normalize(s):
s=atpic.normal.remove_diacritics(s)
s=s.lower()
s=s.replace(b' ',b'_')
return s
if __name__ == "__main__":
print('hi')
inputs=(
b'FTP',
b'File Upload',
b'go-go',
b'Europe/France'
)
for s in inputs:
n=normalize(s)
print(s,'->',n)
| [
"alex.madon@gmail.com"
] | alex.madon@gmail.com |
701e7a3fe9b862f9be8c29065d8556106cae9844 | c748470949427eaf78d752bbae002c5bc143184f | /hop.py | effdb488859cb1587aa7cb041e2841089c35ebfb | [] | no_license | SleepyBag/MNSC | 6ea7ec1ced5407e2309582d46113a8da9473ac0e | efc5ec9ddf2faf743585aed1b9371f1af8ee24df | refs/heads/master | 2020-04-07T16:31:57.422796 | 2019-01-09T12:40:20 | 2019-01-09T12:40:20 | 158,532,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | from collections import Iterable
import tensorflow as tf
from attention import attention
def hop(scope, last, sentence, sentence_bkg, bkg_iter, bkg_fix,
doc_len, real_max_len, convert_flag,
biases_initializer=tf.initializers.zeros(),
weights_initializer=tf.contrib.layers.xavier_initializer()):
if not isinstance(bkg_fix, Iterable):
bkg_fix = [bkg_fix]
bkg_fix = list(bkg_fix)
hidden_size = sentence_bkg.shape[2]
with tf.variable_scope(scope):
sentence = tf.stop_gradient(sentence) \
if not last else sentence
sentence_bkg = tf.stop_gradient(sentence_bkg) \
if not last else sentence_bkg
alphas = attention(sentence_bkg, [bkg_iter] + bkg_fix, doc_len, real_max_len,
biases_initializer=biases_initializer,
weights_initializer=weights_initializer)
new_bkg = tf.matmul(alphas, sentence_bkg)
new_bkg = tf.reshape(new_bkg, [-1, hidden_size], name='new_bkg')
if 'o' in convert_flag:
new_bkg = bkg_iter + new_bkg
return new_bkg
| [
"xueqianming200@gmail.com"
] | xueqianming200@gmail.com |
a3882d61d3ed8504f857b3e9435321d0d53a28b0 | 549270020f6c8724e2ef1b12e38d11b025579f8d | /recipes/gcc/all/test_v1_package/conanfile.py | a9e585e6ddfd44fc0a19029ef3fea80cb4a37849 | [
"MIT"
] | permissive | conan-io/conan-center-index | 1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43 | 3b17e69bb4e5601a850b6e006e44775e690bac33 | refs/heads/master | 2023-08-31T11:34:45.403978 | 2023-08-31T11:13:23 | 2023-08-31T11:13:23 | 204,671,232 | 844 | 1,820 | MIT | 2023-09-14T21:22:42 | 2019-08-27T09:43:58 | Python | UTF-8 | Python | false | false | 1,498 | py | from conans import ConanFile, tools
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def test(self):
def chmod_plus_x(name):
if os.name == 'posix':
os.chmod(name, os.stat(name).st_mode | 0o111)
cc = self.deps_env_info["gcc"].CC
cxx = self.deps_env_info["gcc"].CXX
hello_c = os.path.join(self.source_folder, "hello.c")
hello_cpp = os.path.join(self.source_folder, "hello.cpp")
self.run("%s --version" % cc, run_environment=True)
self.run("%s --version" % cxx, run_environment=True)
self.run("%s -dumpversion" % cc, run_environment=True)
self.run("%s -dumpversion" % cxx, run_environment=True)
self.run("%s %s -o hello_c" % (cc, hello_c), run_environment=True)
self.run("%s %s -o hello_cpp" % (cxx, hello_cpp), run_environment=True)
if not tools.cross_building(self.settings):
chmod_plus_x("hello_c")
chmod_plus_x("hello_cpp")
self.run("./hello_c", run_environment=True)
self.run("./hello_cpp", run_environment=True)
if tools.which("readelf"):
self.run("readelf -l hello_c", run_environment=True)
self.run("readelf -l hello_cpp", run_environment=True)
if tools.which("otool"):
self.run("otool -L hello_c", run_environment=True)
self.run("otool -L hello_cpp", run_environment=True)
| [
"noreply@github.com"
] | conan-io.noreply@github.com |
9555111af343a9c31ab6d5bf143cfb3904d4ca63 | 3e36dc2c0455f0332e45b634b35af745550c0709 | /mv.py | efffb2d694411511f2c57d09aa76b1f6b2a739af | [] | no_license | houking-can/GenDataset | a42388129fb7a5b1c176e77dd953f5fa27e77e7a | fb2e9d841ba0f3288e5152c97e250f709cd4f785 | refs/heads/master | 2020-05-05T09:48:47.334267 | 2019-04-26T15:18:12 | 2019-04-26T15:18:12 | 179,918,151 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | import os
import json
import shutil
def iter_files(path):
"""Walk through all files located under a root path."""
if os.path.isfile(path):
yield path
elif os.path.isdir(path):
for dirpath, _, filenames in os.walk(path):
for f in filenames:
yield os.path.join(dirpath, f)
else:
raise RuntimeError('Path %s is invalid' % path)
path = r'E:\ARXIV'
for file in iter_files(path):
paper= json.load(open(file))
a= len(' '.join(paper['abstract']))
b= len(' '.join(paper['article']))
c=len(' '.join(paper['conclusion']))
if a>c+b+50:
shutil.move(file,r'E:\tmp\arxiv')
print(file) | [
"1240723224@qq.com"
] | 1240723224@qq.com |
ab86dcc71428914d31a13d2dfe2c86a9348a0698 | 0b932d446d88013fadb8c4e0dd3ca3cc4a1a5de3 | /localizacion/inte_project_invoice_customer/__manifest__.py | ea12d16a6fdd1e47a96e0e519ff1a6c105fe846b | [] | no_license | grudiver/biumak | cd8e7477bba3389b2144fa6d35cd89d2eaf0210f | 65705737f16da087b6cb01f725236e7bc9c59c86 | refs/heads/master | 2022-04-11T13:17:33.347975 | 2020-03-24T17:55:24 | 2020-03-24T17:55:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,041 | py | # coding: utf-8
##############################################################################
#
# Copyright (c) 2016 Tecnología y Servicios AMN C.A. (http://tysamnca.com/) All Rights Reserved.
# <contacto@tysamnca.com>
# <Teléfono: +58(212) 237.77.53>
# Caracas, Venezuela.
#
# Colaborador: Nathaly Partidas <npartidas@tysamnca.com>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
{
'name': 'Add project in invoice customer',
'version': '1.0',
'category': 'invoice',
'summary': 'Add project in invoice customer',
'description': """
Add project in invoice customer
=====================
Add project in invoice customer
""",
'author': 'TYSAMNCA',
'website': 'https://tysamnca.com',
'depends': ['base','account'],
'data': [
'view/invoice_view.xml'
],
#'demo': [],
#'test': [],
'installable': True,
'auto_install': False,
'application': False,
}
| [
"soporte.innova2129@gmail.com"
] | soporte.innova2129@gmail.com |
d89f82d820f1cc4b67d71786f096d13e1a94b79b | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/709325c321084d4eaaf1af19e2ad7def.py | 7b890ef29abe7f0328ad713610bbb0e2515af7be | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 430 | py | def hey(sentence):
if does_not_say_anything(sentence): return 'Fine. Be that way!'
if is_a_question(sentence): return 'Sure.'
if is_yelling(sentence): return 'Woah, chill out!'
return 'Whatever.'
def does_not_say_anything(sentence):
return sentence.strip() == ""
def is_a_question(sentence):
return sentence and not is_yelling(sentence) and sentence[-1] == "?"
def is_yelling(sentence):
return sentence.isupper()
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
169c4f2428f3f0da8b7f25fe1ec67182b0450f3f | 2c1429a1bd2d0477fd88119d4d778fc68c82adcf | /python/DeepSeaVectorDraw/FileOrData.py | 4904e9ca72ee944f59623cc411736a2f59cb76e0 | [
"Apache-2.0"
] | permissive | akb825/DeepSea | d7ac54f6d8243d43d6ea538159f3067ab7e79880 | 5a909b4f51717bc59682e51ad6aa598a25a9b965 | refs/heads/master | 2023-08-31T23:45:19.533393 | 2023-08-29T07:30:36 | 2023-08-29T07:30:43 | 142,716,767 | 10 | 2 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: DeepSeaVectorDraw
class FileOrData(object):
NONE = 0
FileReference = 1
RawData = 2
| [
"akb825@gmail.com"
] | akb825@gmail.com |
0ceba6b5029e47ade2b015ff5da2adf23055db90 | f095bf04dd1cb62319f77c096714b72efb059689 | /tests/unit/test_clang.py | e5f2780657594000c5d9c51077ad01493e67ef83 | [
"NCSA"
] | permissive | blep/Beye | b9d648039d78a9cb18b9badb717cdbc20849f0f1 | dd4cd865ed5ea51527a4d302422e4f5d68e1954a | refs/heads/master | 2021-01-18T13:18:20.381010 | 2015-06-15T13:46:14 | 2015-06-15T13:46:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libscanbuild.clang as sut
from . import fixtures
import os.path
class GetClangArgumentsTest(fixtures.TestCase):
def test_get_clang_arguments(self):
with fixtures.TempDir() as tmpdir:
filename = os.path.join(tmpdir, 'test.c')
with open(filename, 'w') as handle:
handle.write('')
result = sut.get_arguments(
tmpdir,
['clang', '-c', filename, '-DNDEBUG', '-Dvar="this is it"'])
self.assertIn('NDEBUG', result)
self.assertIn('var="this is it"', result)
def test_get_clang_arguments_fails(self):
self.assertRaises(
Exception,
sut.get_arguments,
'.',
['clang', '-###', '-fsyntax-only', '-x', 'c', 'notexist.c'])
| [
"rizsotto@gmail.com"
] | rizsotto@gmail.com |
2f903fb6b05308a81d77262030e3009f2320f061 | f68732bc40a7a90c3a1082e4b3a4154518acafbb | /script/dbus/sessionBus/timedate/007_setDate.py | b177277e1d4c004f8660ca893111b67f56b2ef9f | [] | no_license | lizhouquan1017/dbus_demo | 94238a2307e44dabde9f4a4dd0cf8ec217260867 | af8442845e722b258a095e9a1afec9dddfb175bf | refs/heads/master | 2023-02-11T19:46:27.884936 | 2021-01-08T05:27:18 | 2021-01-08T05:27:18 | 327,162,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | # -*- coding: utf-8 -*-
# ****************************************************
# @Test Case ID: 007_setDate
# @Test Description: 设置系统时间和日期
# @Test Condition: 关闭时间自动同步设置
# @Test Step: 1.重置系统时间和网络时间同步;
# @Test Result: 1.检查重置成功;
# @Test Remark:
# @Author: ut000511
# *****************************************************
import time
import pytest
from frame.base import OSBase
from aw.dbus.sessionBus import timedate
class TestCase(OSBase):
def setUp(self):
self.Step("预制条件1:关闭时间自动同步设置")
timedate.setNTP(False)
@pytest.mark.public
def test_step(self):
self.Step("步骤1:设置系统时间和日期")
timedate.setDate(2020, 7, 28, 10, 30, 10, 0)
self.CheckPoint("检查点1: 检查时间设置成功")
timedate.checkSetDateStatus(7, 28)
def tearDown(self):
self.Step("收尾:还原系统时间设置")
time.sleep(2)
timedate.reset() | [
"lizhouquan@uniontech.com"
] | lizhouquan@uniontech.com |
7703332074976cd837f06a01e576212613255699 | 7b51c2248463406783e18f6bc02e2e6ef68aecb2 | /agol_util.py | 5bb206e400524b0281b1b209c773956956682f63 | [] | no_license | fgassert/agol_util | 88712974720103826f9d21136a6612b6aa67a806 | cc8c8b9e5915958cafd08d65f9b3361e608a556d | refs/heads/master | 2016-09-06T12:51:04.888287 | 2015-08-04T20:57:43 | 2015-08-04T20:57:43 | 40,208,668 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,667 | py |
import urllib2
import urllib
import json
import time
class AGOL_util:
"""
Minimal client library for ArcGIS online API
Parameters:
root_url: portal root rest url e.g. http://myorg.arcgis.com/sharing/rest
username: valid arggis online username
password: valid arggis online password
"""
def __init__(self, root_url, username, password):
self.url = root_url
self._check_items = []
self._uid = username
self._pwdd = password
self._validate_user(username, password)
def _validate_user(self, username, password):
'''
Requests token based on username and password
no error catching
'''
keys = {'username':username,
'password':password,
'referer':self.url,
'f':'json'}
data = urllib.urlencode(keys)
req = urllib2.Request('https://www.arcgis.com/sharing/rest/generateToken', data)
resp = json.load(urllib2.urlopen(req))
if 'token' in resp:
self._token = resp['token']
self._expiry = resp['expires']
else:
self._token = ''
self._expiry = 0
return resp
def get_token(self, nd=False):
"""
returns valid token or false on failure
"""
if self._token=='' or self._expiry <= time.time():
if nd:
return False
else:
self._validate_user(self._uid, self._pwd)
return(self.get_token(1))
return self._token
def query(self, endpoint, options):
'''
POST to url endpoint with options as data
autoappends token and json response parameters
concatinates self.url and endpoind assuming matching /'s
return as JSON
'''
options['token'] = self.get_token()
options['f'] = 'json'
data = urllib.urlencode(options)
requrl = "{}{}".format(self.url, endpoint)
req = urllib2.Request(requrl, data)
return json.load(urllib2.urlopen(req))
def add_item_from_url(self, url, options={}):
options['dataUrl'] = url
options['async'] = 'true'
options['overwrite'] = 'true'
return self.query('/content/users/{}/addItem'.format(self._uid), options)
def add_shapefile_from_url(self, url, options={}):
""" URL should point to zipped shapefile """
options['type'] = 'Shapefile'
return self.add_item_from_url(url, options)
def get_item_status(self, itemId):
url = '/content/users/{}/items/{}/status'.format(self._uid, itemId)
return self.query(url, {})
def wait_for_completion(self, itemId, timeout=60):
'''
Check every second for item status to return completed
Return:
true on completion
false on timeout or error
'''
res = self.get_item_status(itemId)
t = 0
while 'status' in res and t < timeout:
if res['status'] == 'completed':
return True
t += 1
time.sleep(1)
res = self.get_item_status(itemId)
return False
def update_item(self, itemId, options):
url = '/content/users/{}/items/{}/update'.format(self._uid, itemId)
return self.query(url, options)
def share_items(self, items, everyone=None, org=None, groups=None):
""" shares items defined by item ids with given groups, org, or everyone """
options = {}
if groups is not None:
options['groups'] = groups
if everyone is not None:
options['everyone'] = everyone
if org is not None:
options['org'] = org
if type(items) == list:
items = ','.join(items)
options['items'] = items
return self.query('/content/users/{}/shareItems'.format(self._uid), options)
def publish_item(self, itemId, options, publishParameters):
options['itemID'] = itemId
options['publishParameters'] = json.dumps(publishParameters)
options['overwrite'] = 'true'
return self.query('/content/users/{}/publish'.format(self._uid), options)
def publish_shapefile(self, itemId, options={}, publishParameters={}):
options['fileType'] = 'shapefile'
if 'name' not in publishParameters:
publishParameters['name'] = itemId
return self.publish_item(itemId, options, publishParameters)
def delete_item(self, itemId):
url = '/content/users/{}/items/{}/delete'.format(self._uid, itemId)
return self.query(url)
| [
"cowbox314@gmail.com"
] | cowbox314@gmail.com |
25cfefcf5435888d72794db6ffd98d70ec97293a | 5dae158ba8adef3a30061336cf24087273d1d1be | /scripts/gpipe/analyze_predictions.py | 3744ec4746e1bba27c9c0f8cb81351f3945c9544 | [] | no_license | Q-KIM/cgat | 595cbc51d0d34f4442d4d124e2fec2be8e1a2a79 | 3bccc543d3daa1ee8830ecb0467e2b3b3b5beb9a | refs/heads/master | 2021-01-17T07:30:00.213178 | 2015-06-05T14:03:33 | 2015-06-05T14:03:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,625 | py | ##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
gpipe/analyze_predictions.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python gpipe/analyze_predictions.py --help
Type::
python gpipe/analyze_predictions.py --help
for command line help.
Documentation
-------------
Code
----
'''
import sys
import pgdb
import csv
import CGAT.Experiment as E
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: gpipe/analyze_predictions.py 2781 2009-09-10 11:33:14Z andreas $")
parser.add_option("-s", "--species-regex", dest="species_regex", type="string",
help="regular expression to extract species from identifier.")
parser.add_option("-g", "--gene-regex", dest="gene_regex", type="string",
help="regular expression to extract gene from identifier.")
parser.add_option("-m", "--methods", dest="methods", type="string",
help="methods to use [query|].")
parser.set_defaults(
species_regex="^([^|]+)\|",
gene_regex="^[^|]+\|[^|]+\|([^|]+)\|",
methods="query",
tablename_predictions="predictions",
separator="|")
(options, args) = E.Start(
parser, add_psql_options=True, add_csv_options=True)
options.methods = options.methods.split(",")
dbhandle = pgdb.connect(options.psql_connection)
fields = []
for method in options.methods:
if method == "query":
fields += ["query", "lquery"]
elif method == "nexons":
fields.append("nexons")
else:
raise "unknown method %s" % method
outfile = sys.stdout
writer = csv.DictWriter(outfile,
fields,
dialect=options.csv_dialect,
lineterminator=options.csv_lineterminator,
extrasaction='ignore')
first = True
for line in sys.stdin:
if line[0] == "#":
continue
data = line[:-1].split("\t")
if first:
outfile.write("\t".join(data + fields) + "\n")
first = False
continue
schema, prediction_id, gene_id, quality = data[
0].split(options.separator)
outfile.write(line[:-1])
for method in options.methods:
if method == "query":
statement = "SELECT query_token, query_length FROM %s.%s WHERE prediction_id = '%s'" % (schema,
options.tablename_predictions,
prediction_id)
elif method == "nexons":
statement = "SELECT nintrons+1 FROM %s.%s WHERE prediction_id = '%s'" % (schema,
options.tablename_predictions,
prediction_id)
cc = dbhandle.cursor()
cc.execute(statement)
rr = cc.fetchone()
cc.close()
for x in rr:
outfile.write("\t%s" % str(x))
outfile.write("\n")
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| [
"andreas.heger@gmail.com"
] | andreas.heger@gmail.com |
c48eda74cdfae649b3b3e018fce9d41f437ae699 | e0d776ec6d324e8630d463a1be81f81ccd3a51a9 | /schools/migrations/0001_initial.py | c396d66cea23dd7a30c9e856f2417162a10c14b1 | [
"MIT"
] | permissive | moshthepitt/shulezote | 9b3c8c6d5c53e2b497977fd6900e500e09315c41 | e903a208948ab5294183e2a8c2dac9360a184654 | refs/heads/master | 2021-07-03T11:17:19.274106 | 2019-08-04T09:49:59 | 2019-08-04T09:49:59 | 32,628,463 | 2 | 1 | MIT | 2021-06-10T19:47:39 | 2015-03-21T10:35:34 | Python | UTF-8 | Python | false | false | 3,775 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import autoslug.fields
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('places', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='School',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')),
('updated_on', models.DateTimeField(auto_now=True, verbose_name='Updated on')),
('code', models.CharField(max_length=255, verbose_name='Code', blank=True)),
('name', models.CharField(max_length=255, verbose_name='Name of School')),
('slug', autoslug.fields.AutoSlugField(unique=True, editable=False)),
('address', models.CharField(max_length=255, verbose_name='Address', blank=True)),
('level', models.CharField(help_text='Primary or secondary school', max_length=1, verbose_name='Level', choices=[(b'1', 'Primary School'), (b'2', 'Secondary School')])),
('school_type', models.CharField(default=b'1', help_text='Day, Boarding or Both?', max_length=1, verbose_name='School Type', choices=[(b'1', 'Day'), (b'2', 'Boarding'), (b'3', 'Day & Boarding')])),
('student_gender', models.CharField(default=b'3', help_text='Boys school, Girls school, or mixed', max_length=1, verbose_name='Student Gender', choices=[(b'1', 'Boys'), (b'2', 'Girls'), (b'3', 'Mixed')])),
('ownership', models.CharField(default=b'1', help_text='Private or public', max_length=1, verbose_name='Ownership', choices=[(b'1', 'Public'), (b'2', 'Private')])),
('sponsor', models.CharField(default=b'1', max_length=1, verbose_name='School Sponsor', choices=[(b'1', 'Central Government/DEB'), (b'2', 'Religious Organisation'), (b'3', 'Community'), (b'4', 'NGO/CBO'), (b'5', 'Private Individual')])),
('student_needs', models.CharField(default=b'1', help_text='Ordinary, Special or Integrated', max_length=1, verbose_name='Student Needs', choices=[(b'1', 'Ordnirary'), (b'2', 'Special'), (b'3', 'Integrated')])),
('is_active', models.BooleanField(default=True, help_text='Designates whether this school is active.', verbose_name='Active')),
('coordinates', django.contrib.gis.db.models.fields.PointField(help_text='Represented as (longitude, latitude)', srid=4326, verbose_name='Coordinates')),
('constituency', models.ForeignKey(verbose_name='Constituency', to='places.Constituency')),
('county', models.ForeignKey(verbose_name='County', to='places.County')),
('district', models.ForeignKey(default=None, blank=True, to='places.District', null=True, verbose_name='District')),
('division', models.ForeignKey(default=None, blank=True, to='places.Division', null=True, verbose_name='Division')),
('location', models.ForeignKey(default=None, blank=True, to='places.Location', null=True, verbose_name='Location')),
('province', models.ForeignKey(default=None, blank=True, to='places.Province', null=True, verbose_name='Province')),
('school_sone', models.ForeignKey(default=None, blank=True, to='places.SchoolZone', null=True, verbose_name='School Zone')),
('sub_location', models.ForeignKey(default=None, blank=True, to='places.SubLocation', null=True, verbose_name='Sub Location')),
],
options={
},
bases=(models.Model,),
),
]
| [
"kelvin@jayanoris.com"
] | kelvin@jayanoris.com |
c5ccb7cfc36c468958d0fc2b2ea1064cf718c1ff | 857a9e588a04b40a66b6ca115063cb67ef0427ea | /tests/divine/test_divine.py | 4eb4b8f22e7a7f5b61a5e6b996c19c894ba6ebaa | [
"MIT"
] | permissive | rambam613/timemachines | 81b88357498871f77efed0faf9c25b4c408d822c | cd243d4606b4ad9c1d419988fc6c04b0964af2e6 | refs/heads/main | 2023-07-03T07:06:24.421114 | 2021-08-07T17:42:40 | 2021-08-07T17:42:40 | 393,793,785 | 1 | 0 | MIT | 2021-08-07T21:13:35 | 2021-08-07T21:13:34 | null | UTF-8 | Python | false | false | 548 | py | from timemachines.skaters.divine.divineinclusion import using_divinity, dv
if using_divinity:
from timemachines.skaters.divine.divineskaters import divine_univariate
from timemachines.skatertools.evaluation.evaluators import hospital_mean_square_error_with_sporadic_fit
def dont_test_divine(): # too noisy
err = hospital_mean_square_error_with_sporadic_fit(f=divine_univariate, n=105) # Won't get past warmup so not a real test
if __name__=='__main__':
assert using_divinity,'pip install divinity'
dont_test_divine() | [
"peter.cotton@microprediction.com"
] | peter.cotton@microprediction.com |
802ad3fb02a6b071dcd0ec62f89c642999da7259 | 812f9822ddbfc986f4f230a9e6814f22c7c50e2f | /looping/perfect_generation.py | a86f8a3a5f67f02a0940915cb68d7f44d3a29483 | [] | no_license | devopsvj/PythonAndMe | 31b4aa9bade1431d6f13917122dc12bf6a118da6 | 0b1362023960b7c77c79856d4bdef0a58fec1446 | refs/heads/master | 2023-07-25T23:06:39.081191 | 2019-01-15T09:50:08 | 2019-01-15T09:50:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | print "Perfect Number Generation "
print "--------------------------"
n=input("Range to generate : ")
j=1
while j<=n:
i=1
ans=0
while i<j:
if j%i==0:
ans=ans+i
i=i+1
if ans==j:
print j
j=j+1
| [
"vani_kani@hotmail.com"
] | vani_kani@hotmail.com |
0f37657bada84d6c01e51ab37c33ece7d932747d | d4aff784639249a076fe43812e73a6018bb92470 | /backend/radio_sagun_18170/settings.py | 1e852819826f7c17120fdaeedd60bd9b52d09a5c | [] | no_license | crowdbotics-apps/radio-sagun-18170 | 9b8d5905886eee6d6639e00b01a1ee1c4bcfce51 | 2c841cf71152663db6e1cb57aa9f11eaf37fa201 | refs/heads/master | 2022-10-28T10:20:21.208094 | 2020-06-17T21:56:05 | 2020-06-17T21:56:05 | 273,085,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,748 | py | """
Django settings for radio_sagun_18170 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"event",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "radio_sagun_18170.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "radio_sagun_18170.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
464c5cad5d003f680a9d218634ceb614aa5b67e1 | 375fceebffcf4b6a0d9fd8a8b44a743091ab403b | /scheduler/serializers.py | 4fe01a141dfc65b6ddcb71c5ff6aa22431535802 | [] | no_license | Nextafari/email-microapi-1 | c06e031645fdaa85eac015898db4800a0c83bcd7 | bd831853a8512f10a5d78d6f93c7f3b72f951259 | refs/heads/develop | 2022-12-20T08:44:28.905092 | 2020-07-24T10:49:13 | 2020-07-24T10:49:13 | 282,280,619 | 1 | 0 | null | 2020-07-24T17:36:12 | 2020-07-24T17:36:11 | null | UTF-8 | Python | false | false | 311 | py | from rest_framework import serializers, fields
from rest_framework.validators import UniqueValidator
class EmailSchedulingSerializer(serializers.Serializer):
sender = serializers.EmailField()
recipient = serializers.EmailField()
subject = serializers.CharField()
body = serializers.CharField()
| [
"phemmylintry@gmail.com"
] | phemmylintry@gmail.com |
eabb11f0ea5d6fdb3b2907edd8e5ee2c6ef9d9fe | 5a61ba76c770de8469218ff457213e122e08c7d1 | /code/at_offer/finding_sorting/coding_interview53_2.py | 55664b811beef5e8d3fc5efff8641a455f31d94b | [
"Apache-2.0"
] | permissive | zhangrong1722/interview | 6a71af26f08f036a294e36073cb9eb6ca798b993 | 187a485de0774561eb843d8ee640236adda97b90 | refs/heads/master | 2020-09-06T08:15:00.229710 | 2019-12-10T06:32:05 | 2019-12-10T06:32:05 | 220,372,777 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | """
题目:0~n-1中缺失的数字
一个长度为n-1的递增排序数组中的所有数字都是唯一的 并且每个数字都在范围0~n-1之内 在范围0~n-1内的n个数字中有且只有一个数字不在该数组中
请找出这个数字
思路:最直观的方法是直接扫描整个数组 此时时间复杂度为O(n) 但是这样显然没有用好递增排序数组这一条件 对于递增排序数组条件 首先想到的就是二分法查找
假设该数字所在位置为m 则在m位置之前 所有元素值和元素下标是相等的 换句话说 这道题就转化为查找第一个元素值和下标不等的元素
"""
class Solution(object):
def FindMissingNum(self, data):
if data is None or len(data) == 0:
return -1
left, right, middle = 0, len(data) - 1, 0
while left <= right:
middle = (left + right) // 2
if data[middle] != middle:
if middle == 0 or (middle - 1 >= 0 and data[middle - 1] == middle - 1):
break
right = middle - 1
else:
left = middle + 1
if middle == len(data) - 1:
return -1
else:
return data[middle] - 1
s = Solution()
print(s.FindMissingNum([0, 1, 2, 3])) | [
"1922525328@qq.com"
] | 1922525328@qq.com |
98b6be5a553d408eef6fc7f603d8ae31eb9c3289 | 67d76057aee86c43d32e0b74f3ac94d521ee03d8 | /tests/journal.api/warning_instance.py | de5eca5cdb7db1836b2a933265fa765c7fc5113d | [
"BSD-3-Clause"
] | permissive | jlmaurer/pyre | 0f94b1855bf029210f07c528747221751e37687f | 6af38a83621d7d6228d147b4bb94f97fbb10f6e2 | refs/heads/master | 2023-05-25T04:33:19.907452 | 2020-06-18T14:07:54 | 2020-06-18T14:07:54 | 273,362,988 | 0 | 0 | NOASSERTION | 2021-06-10T23:42:14 | 2020-06-18T23:50:28 | null | UTF-8 | Python | false | false | 986 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# (c) 1998-2020 all rights reserved
def test():
"""
Verify the channel initial state
"""
# get the journal
import journal
# make a channel
channel = journal.warning(name="tests.journal.warning")
# verify the channel name
assert channel.name == "tests.journal.warning"
# the verbosity should be at the default level
assert channel.verbosity == 1
# the channel should be active
assert channel.active == True
# and non fatal
assert channel.fatal == False
# the page should be empty
assert list(channel.page) == []
# verify the metadata
assert channel.notes["application"] == "journal"
assert channel.notes["channel"] == channel.name
assert channel.notes["severity"] == channel.severity
# all done
return
# main
if __name__ == "__main__":
# run the test
test()
# end of file
| [
"michael.aivazis@para-sim.com"
] | michael.aivazis@para-sim.com |
7b52d1d494431a4c038f1ceb34cfb7cb3839d9fa | 0b2facfa8d47bceea5bbf969bd1ca86215638cf6 | /macop/operators/crossovers/Crossover.py | 6569ca739a046e49a798d270ab98fcf1079664a6 | [
"MIT"
] | permissive | geoffreyp/macop | 37ec5c0ed7913068ee808e63c9c537babed479ca | 287df287e23c7e4f07e90dfcc0a99ef247f5c6b5 | refs/heads/master | 2022-12-29T06:40:11.774347 | 2020-10-23T10:25:15 | 2020-10-23T10:25:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | """Abstract Crossover class
"""
# module imports
from ..Operator import KindOperator, Operator
# main mutation class
class Crossover(Operator):
"""Abstract crossover extend from Operator
Attributes:
kind: {KindOperator} -- specify the kind of operator
"""
def __init__(self):
self.kind = KindOperator.CROSSOVER
def apply(self, solution):
raise NotImplementedError
| [
"contact@jeromebuisine.fr"
] | contact@jeromebuisine.fr |
913c5bdce3154f05cbbbcfa96d0b6baa9399ff94 | 74983098c5de53007bde6052a631845c781b5ba8 | /rosenbrock/rosenbrock11/rosenbrock.py | c62ef2c302fa5f03d647d4e23c21139f769b66eb | [] | no_license | numairmansur/Experiments | 94ccdd60f4c2cf538fab41556ac72405656c9d77 | 592f39916461c7a9f7d400fa26f849043d1377ed | refs/heads/master | 2021-04-29T12:39:16.845074 | 2017-02-15T07:36:47 | 2017-02-15T07:36:47 | 78,043,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | import numpy as np
import sys
import math
import time
import csv
from hpolib.benchmarks.synthetic_functions import Rosenbrock
from time import gmtime, strftime
def main(job_id, params):
print '!!! Entered Main !!!'
print 'Anything printed here will end up in the output directory for job #:', str(job_id)
print params
f = Rosenbrock()
res = f.objective_function([params['x'], params['y']])
print res
with open('/home/mansurm/Experiments/rosenbrock/run11.csv','a') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow([res['main'][0]])
return res['main'][0]
| [
"numair.mansur@gmail.com"
] | numair.mansur@gmail.com |
116e5b4c0274119f4c10ea71c3e8ab0d1ecd97c9 | 6f9b0f4832f218957bed679a428a2e022482e558 | /plato/examples/demo_vgg_screenshot.py | 81f11b7eff774d854fd3190b23bc0342054c01f2 | [] | no_license | petered/plato | 1ee983de87ff7271c93aa6bd1212cd4229e30fa4 | a0b5ef356e2d7e3d163b34871513d85abea7e510 | refs/heads/master | 2021-01-17T00:55:35.059826 | 2018-02-06T04:23:19 | 2018-02-06T04:23:19 | 29,347,512 | 45 | 6 | null | 2017-10-11T06:05:31 | 2015-01-16T12:27:14 | Python | UTF-8 | Python | false | false | 2,303 | py | from artemis.fileman.smart_io import smart_load
from artemis.general.should_be_builtins import bad_value
from artemis.plotting.db_plotting import dbplot
from plato.tools.pretrained_networks.vggnet import get_vgg_net, im2vgginput, get_vgg_label_at
import numpy as np
import time
__author__ = 'peter'
import os
"""
This program scans for photos from the webcam, then processes them with vggnet.
It looks for photos in the directory that the "Photo Booth" application on MacOS puts photos from the webcam.
The first time processing the image should take ~20s, each time after that ~1s.
To Use (only works on Mac)
- Open PhotoBooth.
- Take a screenshot
- A window should pop up showing the image with the label that the network decides on
- Repeat
"""
def get_photo_dir():
return os.path.join(os.path.expanduser('~'), 'Pictures/Photo Booth Library/Pictures')
def get_all_photos():
return os.listdir(get_photo_dir())
def get_latest_screenshot():
photodir = get_photo_dir()
files = os.listdir(photodir)
latest = sorted(files)[-1]
full_path = os.path.join(photodir, latest)
return full_path
def classify(f, im_path):
im = smart_load(im_path)
print 'Processing image... "%s"' % (im_path, )
inputs = im2vgginput(im)
out = f(inputs)
amax = np.argmax(out[0])
label = get_vgg_label_at(amax)
print 'Done.'
dbplot(np.rollaxis(inputs[0], 0, 3)[..., ::-1], 'Photo', title="{label}: {pct}%".format(label = label, pct = out[0, amax, 0, 0]*100))
def demo_photobooth():
old_photos = set(get_all_photos())
f = get_vgg_net().compile(add_test_values = False)
print 'Take a screenshot with PhotoBooth'
while True:
new_photos = set(get_all_photos()).difference(old_photos)
if len(new_photos) != 0:
classify(f, os.path.join(get_photo_dir(), new_photos.pop()))
old_photos = set(get_all_photos())
time.sleep(.1)
def demo_file_path():
f = get_vgg_net().compile(add_test_values = False)
while True:
im_path = raw_input("Enter Image Path: ")
classify(f, im_path)
if __name__ == '__main__':
VERSION = "photobooth"
if VERSION == 'photobooth':
demo_photobooth()
elif VERSION == 'file':
demo_file_path()
else:
bad_value(VERSION)
| [
"peter.ed.oconnor@gmail.com"
] | peter.ed.oconnor@gmail.com |
fd420e31e11d26664112127c273760178087e758 | 29f1045821e7a1c3382e3cde5f5103ae95487bcd | /patch.py | c4f6cd962f58eab51fb6dc2d2989c95bb62eb667 | [] | no_license | fans656-deprecated/f6-python | 9b9b6f2cc0e79b9326c27237e3129f38c9ba4af3 | a66311173b5933ad3c800fa4ec95e08e10976275 | refs/heads/master | 2021-06-17T19:27:51.198664 | 2017-05-29T01:39:11 | 2017-05-29T01:39:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | from ctypes import *
from ctypes.wintypes import *
LPBYTE = POINTER(BYTE)
class STARTUPINFOW(Structure):
_fields_ = [
('cb', DWORD),
('lpReserved', LPWSTR),
('lpDesktop', LPWSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAtrribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE),
]
class PROCESS_INFORMATION(Structure):
_fields_ = [
("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
]
CreateProcessW = windll.kernel32.CreateProcessW
startupinfow = STARTUPINFOW()
process_information = PROCESS_INFORMATION()
def systemw(cmd):
if not isinstance(cmd, unicode):
cmd = cmd.decode('mbcs')
return CreateProcessW(
None,
cmd,
None,
None,
0,
None,
None,
None,
byref(startupinfow),
byref(process_information),
)
if __name__ == '__main__':
systemw('calc')
| [
"fans656@yahoo.com"
] | fans656@yahoo.com |
f5c4865eb14b513c99521b359506746ebfc8db9c | db12b990924703cd74748d8585cd9c11fafa6746 | /h2o-py/tests/testdir_algos/glrm/pyunit_PUBDEV_4246_pro_var.py | 9a3e5927e6d7cf142a72014512ad7deff187fdc9 | [
"Apache-2.0"
] | permissive | h2oai/h2o-3 | 919019a8f297eec676011a9cfd2cc2d97891ce14 | d817ab90c8c47f6787604a0b9639b66234158228 | refs/heads/master | 2023-08-17T18:50:17.732191 | 2023-08-17T16:44:42 | 2023-08-17T16:44:42 | 17,371,412 | 6,872 | 2,345 | Apache-2.0 | 2023-09-14T18:05:40 | 2014-03-03T16:08:07 | Jupyter Notebook | UTF-8 | Python | false | false | 2,450 | py | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glrm import H2OGeneralizedLowRankEstimator
from h2o.estimators.pca import H2OPrincipalComponentAnalysisEstimator as H2OPCA
# This unit test makes sure that GLRM will not return proportional of variance with values exceeding 1 when
# categorical columns exist. However, when there are categorical columns, if the sole purpose is to perform
# PCA, I will not recommend using GLRM. The reason is due to GLRM will optimize the categorical columns
# using a categorical loss and not the quadratic loss as in PCA algos. The eigenvalues obtained from PCA
# and GLRM differs in this case.
def glrm_iris():
print("Importing iris.csv data...")
irisH2O = h2o.upload_file(pyunit_utils.locate("smalldata/iris/iris.csv"))
irisH2O.describe()
print("@@@@@@ Building PCA with GramSVD...\n")
glrmPCA = H2OPCA(k=5, transform="STANDARDIZE", pca_method="GLRM", use_all_factor_levels=True, seed=21)
glrmPCA.train(x=irisH2O.names, training_frame=irisH2O)
glrm_h2o = H2OGeneralizedLowRankEstimator(k=5, loss="Quadratic",transform="STANDARDIZE", recover_svd=True, seed=21)
glrm_h2o.train(x=irisH2O.names, training_frame=irisH2O)
# compare singular values and stuff with GramSVD
print("@@@@@@ Comparing eigenvalues between GramSVD and GLRM...\n")
pyunit_utils.assert_H2OTwoDimTable_equal(glrmPCA._model_json["output"]["importance"],
glrm_h2o._model_json["output"]["importance"],
["Standard deviation", "Cumulative Proportion", "Cumulative Proportion"],
tolerance=1e-6)
print("@@@@@@ Comparing eigenvectors between GramSVD and GLRM...\n")
# compare singular vectors
pyunit_utils.assert_H2OTwoDimTable_equal(glrmPCA._model_json["output"]["eigenvectors"],
glrm_h2o._model_json["output"]["eigenvectors"],
glrm_h2o._model_json["output"]["names"], tolerance=1e-6,check_sign=True)
# check to make sure maximum proportional variance <= 1
assert glrmPCA._model_json["output"]["importance"].cell_values[1][1] <= 1, \
"Expected value <= 1.0 but received {0}".format(glrmPCA._model_json["output"]["importance"].cell_values[1][1])
if __name__ == "__main__":
pyunit_utils.standalone_test(glrm_iris)
else:
glrm_iris()
| [
"noreply@github.com"
] | h2oai.noreply@github.com |
27eba9359b832c4b1f984ea79cc1a47ca6c44f74 | 2581fbdc72887143376a8f9d8f0da0f1508b9cdf | /Flask/02-Flask-Basics/00-Hello_Puppy.py | 628111976300747c69976ccba1561f3710dc0524 | [
"Apache-2.0"
] | permissive | Sandy1811/python-for-all | 6e8a554a336b6244af127c7bcd51d36018b047d9 | fdb6878d93502773ba8da809c2de1b33c96fb9a0 | refs/heads/master | 2022-05-16T02:36:47.676560 | 2019-08-16T08:35:42 | 2019-08-16T08:35:42 | 198,479,841 | 1 | 0 | Apache-2.0 | 2022-03-11T23:56:32 | 2019-07-23T17:39:38 | Jupyter Notebook | UTF-8 | Python | false | false | 153 | py | from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return '<h1>Hello Puppy!</h1>'
if __name__ == '__main__':
app.run()
| [
"sndp1811@gmail.com"
] | sndp1811@gmail.com |
0d7026e92b2a3748388b9348f48c70b00ca007ca | e8d5471bd4a47794d66162060343f740e0febca4 | /server/src/uds/reports/stats/pools_usage_day.py | 54c748930c14cac52e48212aae25469933d0bdc0 | [] | no_license | git38438/openuds | ef939c2196d6877e00e92416609335d57dd1bd55 | 7d66d92f85f01ad1ffd549304672dd31008ecc12 | refs/heads/master | 2020-06-22T14:07:33.227703 | 2019-07-18T11:03:56 | 2019-07-18T11:03:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,676 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Virtual Cable S.L.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
.. moduleauthor:: Adolfo Gómez, dkmaster at dkmon dot com
"""
from django.utils.translation import ugettext, ugettext_lazy as _
from uds.core.ui.UserInterface import gui
from uds.core.util.stats import counters
import csv
import io
import datetime
import logging
from .base import StatsReport
from uds.models import ServicePool
from uds.core.reports import graphs
logger = logging.getLogger(__name__)
__updated__ = '2018-04-25'
# several constants as Width height, margins, ..
WIDTH, HEIGHT, DPI = 19.2, 10.8, 100
SIZE = (WIDTH, HEIGHT, DPI)
class CountersPoolAssigned(StatsReport):
filename = 'pools_counters.pdf'
name = _('Pools usage on a day') # Report name
description = _('Pools usage counters for an specific day') # Report description
uuid = '0b429f70-2fc6-11e7-9a2a-8fc37101e66a'
startDate = gui.DateField(
order=2,
label=_('Date'),
tooltip=_('Date for report'),
defvalue='',
required=True
)
pools = gui.MultiChoiceField(
order=1,
label=_('Pools'),
tooltip=_('Pools for report'),
required=True
)
def initialize(self, values):
pass
def initGui(self):
logger.debug('Initializing gui')
vals = [
gui.choiceItem(v.uuid, v.name) for v in ServicePool.objects.all().order_by('name')
]
self.pools.setValues(vals)
def getData(self):
# Generate the sampling intervals and get dataUsers from db
start = self.startDate.date()
end = self.startDate.date() + datetime.timedelta(days=1)
data = []
pool = None
for poolUuid in self.pools.value:
try:
pool = ServicePool.objects.get(uuid=poolUuid)
except Exception:
pass # Ignore pool
hours = {}
for i in range(24):
hours[i] = i * i
for x in counters.getCounters(pool, counters.CT_ASSIGNED, since=start, to=end, limit=24, use_max=True, all=False):
hour = x[0].hour
val = int(x[1])
if hours[hour] < val:
hours[hour] = val
data.append({'uuid':pool.uuid, 'name': pool.name, 'hours': hours})
logger.debug('data: {}'.format(data))
return data
def generate(self):
items = self.getData()
graph1 = io.BytesIO()
X = list(range(24))
d = {
'title': _('Services by hour'),
'x': X,
'xtickFnc': lambda l: '{:02d}'.format(l),
'xlabel': _('Hour'),
'y': [
{
'label': i['name'],
'data': [i['hours'][v] for v in X]
} for i in items
],
'ylabel': 'Services'
}
graphs.barChart(SIZE, d, graph1)
return self.templateAsPDF(
'uds/reports/stats/pools-usage-day.html',
dct={
'data': items,
'pools': [v.name for v in ServicePool.objects.filter(uuid__in=self.pools.value)],
'beginning': self.startDate.date(),
},
header=ugettext('Services usage report for a day'),
water=ugettext('Service usage report'),
images={'graph1': graph1.getvalue()},
)
class CountersPoolAssignedCSV(CountersPoolAssigned):
filename = 'pools_counters.csv'
mime_type = 'text/csv' # Report returns pdfs by default, but could be anything else
uuid = '1491148a-2fc6-11e7-a5ad-03d9a417561c'
encoded = False
# Input fields
startDate = CountersPoolAssigned.startDate
pools = CountersPoolAssigned.pools
def generate(self):
output = io.StringIO()
writer = csv.writer(output)
writer.writerow([ugettext('Pool'), ugettext('Hour'), ugettext('Services')])
items = self.getData()
for i in items:
for j in range(24):
writer.writerow([i['name'], '{:02d}'.format(j), i['hours'][j]])
return output.getvalue()
| [
"dkmaster@dkmon.com"
] | dkmaster@dkmon.com |
904519195206e061a44162de0d62e90299f55869 | dc3d310934705034ab2f5bc4d3a96f07dab9b48b | /bookmanager/app01/templatetags/__init__.py | 6d5c55bcb7d1ab919a016c1955ecfd772acafca2 | [] | no_license | createnewdemo/istudy_test | 82197488d9e9fa05e0c6cc91362645fc4555dc1d | 806693f2bee13e3c28571d0d75f6b6ea70acf7a0 | refs/heads/master | 2022-04-19T05:52:53.780973 | 2020-04-17T17:04:10 | 2020-04-17T17:04:10 | 256,507,355 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/4/3 18:05
# @Author : lihanhan
# @Email : demo1li@163.com
# @File : __init__.py.py
| [
"320783214@qq.com"
] | 320783214@qq.com |
7e2a8bc4499302a467ccef7a2bd9d8acfad5474d | 453ca12d912f6498720152342085636ba00c28a1 | /ik_problems/strings_arrays/group_by_commas.py | c71c6fa05e3d2931f1c7105039108a1888b4b151 | [] | no_license | yanbinkang/problem-bank | f9aa65d83a32b830754a353b6de0bb7861a37ec0 | bf9cdf9ec680c9cdca1357a978c3097d19e634ae | refs/heads/master | 2020-06-28T03:36:49.401092 | 2019-05-20T15:13:48 | 2019-05-20T15:13:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | """
Finish the solution so that it takes an input 'n' (integer) and returns a string that is the decimal representation of the number grouped by commas after every 3 digits.
Assume: 0 <= n < 1000000000
1 -> "1"
10 -> "10"
100 -> "100"
1000 -> "1,000"
10000 -> "10,000"
100000 -> "100,000"
1000000 -> "1,000,000"
35235235 -> "35,235,235"
"""
def group_by_commas(string):
rev = string[::-1]
if len(rev) <= 3:
return rev[::-1]
else:
return group_by_commas(rev[3:][::-1]) + "," + rev[:3][::-1]
print group_by_commas("1")
print group_by_commas("10")
print group_by_commas("100")
print group_by_commas("1000")
print group_by_commas("10000")
print group_by_commas("100000")
print group_by_commas("1000000")
print group_by_commas("35235235")
| [
"albert.agram@gmail.com"
] | albert.agram@gmail.com |
e2e6c3c05d8515ffdfae3b143441d7d8cff1fbf0 | 2eae961147a9627a2b9c8449fa61cb7292ad4f6a | /openapi_client/models/put_sales_quotes.py | 14c3af41578f75de659eeb869018fcc562a12ab5 | [] | no_license | kgr-eureka/SageOneSDK | 5a57cc6f62ffc571620ec67c79757dcd4e6feca7 | 798e240eb8f4a5718013ab74ec9a0f9f9054399a | refs/heads/master | 2021-02-10T04:04:19.202332 | 2020-03-02T11:11:04 | 2020-03-02T11:11:04 | 244,350,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,456 | py | # coding: utf-8
"""
Sage Business Cloud Accounting - Accounts
Documentation of the Sage Business Cloud Accounting API. # noqa: E501
The version of the OpenAPI document: 3.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class PutSalesQuotes(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'sales_quote': 'PutSalesQuotesSalesQuote'
}
attribute_map = {
'sales_quote': 'sales_quote'
}
def __init__(self, sales_quote=None, local_vars_configuration=None): # noqa: E501
"""PutSalesQuotes - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._sales_quote = None
self.discriminator = None
if sales_quote is not None:
self.sales_quote = sales_quote
@property
def sales_quote(self):
"""Gets the sales_quote of this PutSalesQuotes. # noqa: E501
:return: The sales_quote of this PutSalesQuotes. # noqa: E501
:rtype: PutSalesQuotesSalesQuote
"""
return self._sales_quote
@sales_quote.setter
def sales_quote(self, sales_quote):
"""Sets the sales_quote of this PutSalesQuotes.
:param sales_quote: The sales_quote of this PutSalesQuotes. # noqa: E501
:type: PutSalesQuotesSalesQuote
"""
self._sales_quote = sales_quote
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PutSalesQuotes):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PutSalesQuotes):
return True
return self.to_dict() != other.to_dict()
| [
"kevin.gray@eurekasolutions.co.uk"
] | kevin.gray@eurekasolutions.co.uk |
a55ff18faad4fb94d38959d3536fc5dc28070d82 | 7debf3bc23bd38182b716dcf1eb4fa9f51b6d5bc | /expense_tracker/views/default.py | a4fe3759abf52803017cfbc6dfc29d4947a4c575 | [
"MIT"
] | permissive | jjskim/expense_tracker_401d7 | 7b8ae545089200aa4801c48a118aef74d5bd6490 | 284d57829117e05aed700346c75e77b76fa25480 | refs/heads/master | 2021-07-22T05:37:57.603103 | 2017-10-31T19:09:16 | 2017-10-31T19:09:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,682 | py | from pyramid.view import view_config
from datetime import datetime
from pyramid.httpexceptions import HTTPNotFound
FMT = '%m/%d/%Y'
EXPENSES = [
{'id': 1, 'title': 'Rent', 'amount': 50000, 'due_date': datetime.strptime('11/1/2017', FMT)},
{'id': 2, 'title': 'Phone Bill', 'amount': 100, 'due_date': datetime.strptime('11/27/2017', FMT)},
{'id': 3, 'title': 'Food', 'amount': 600, 'due_date': datetime.strptime('11/2/2017', FMT)},
{'id': 4, 'title': 'Car', 'amount': 270, 'due_date': datetime.strptime('11/25/2017', FMT)},
{'id': 5, 'title': 'Internet', 'amount': 100, 'due_date': datetime.strptime('11/12/2017', FMT)},
]
@view_config(route_name='home', renderer="expense_tracker:templates/index.jinja2")
def list_expenses(request):
return {
"title": "Expense List",
"expenses": EXPENSES
}
@view_config(route_name='detail', renderer="expense_tracker:templates/detail.jinja2")
def expense_detail(request):
expense_id = int(request.matchdict['id'])
if expense_id < 0 or expense_id > len(EXPENSES) - 1:
raise HTTPNotFound
expense = list(filter(lambda expense: expense['id'] == expense_id, EXPENSES))[0]
return {
'title': 'One Expense',
'expense': expense
}
@view_config(route_name="api_detail", renderer="json")
def api_detail(request):
expense_id = int(request.matchdict['id'])
if expense_id < 0 or expense_id > len(EXPENSES) - 1:
raise HTTPNotFound
expense = list(filter(lambda expense: expense['id'] == expense_id, EXPENSES))[0]
expense['due_date'] = expense['due_date'].strftime(FMT)
return {
'title': 'One Expense',
'expense': expense
}
| [
"nhuntwalker@gmail.com"
] | nhuntwalker@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.