blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f63a1432724c3cac911ccad6422806edc4c92da0 | 0369761e54c2766ff2ce13ed249d462a12320c0f | /bubble-search/bubble-search-practice/exercise-09.py | de843c707b960f927b8aa8ee8b57bf0057cd539f | [] | no_license | JasoSalgado/algorithms | e54c739005cc47ee8a401912a77cc70865d28c87 | 8db7d2bedfe468c70e5191bc7873e4dd86e7f95a | refs/heads/master | 2023-04-25T23:41:10.655874 | 2021-06-11T17:35:49 | 2021-06-11T17:35:49 | 333,979,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | """
Bubble search exercise 09
"""
list = [6514 , 2352 , 3984 , 3596 , 2445 , 5535 , 6332 , 5346 , 617 , 3976 , 1242 , 2573 , 7772 , 9324 , 4655 , 3144 , 6233 , 2287 , 6109 , 4139 , 2030 , 6734 , 1495 , 9466 , 6893 , 9336 , 963 , 4412 , 5347 , 2565 , 7590 , 5932 , 6747 , 7566 , 2456 , 9982 , 8880 , 6816 , 9415 , 2426 , 5892 , 5074 , 1501 , 9445 , 6921 , 545 , 4415 , 9516 , 6426 , 7369]
print(f"List: {list}")
for i in range(len(list)):
for x in range(len(list) - 1):
if list[x] > list[x + 1]:
aux = list[x]
list[x] = list[x + 1]
list[x + 1] = aux
print(list)
| [
"jaso_98@hotmail.com"
] | jaso_98@hotmail.com |
c39dd68cbb8cce48de736b436b57f5a98d8f5348 | cdbf35888ab546e44cb07e48ca538f1c3184554b | /project00--part1/farid/model/model.py | 0dea252c19a2be5b9217276edc59bc6bb5f30f05 | [] | no_license | farid01683/AI | 85350b85940a2d0c456a3262c7b14d9dfef42f12 | d472d5b14282dea6690cb46af329f90a5cdf09fa | refs/heads/master | 2022-01-22T19:55:44.655532 | 2019-07-24T17:16:34 | 2019-07-24T17:16:34 | 198,030,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | from keras.models import Sequential
from keras.layers.core import Dense
def creat_mlp(dim,regress=False):
model = Sequential()
model.add(Dense(8,inpute_dim=dim,activation='relu'))
model.add(Dense(4,activation='relu'))
if regress:
model.add(Dense(1,activation='relu'))
return model | [
"noreply@github.com"
] | farid01683.noreply@github.com |
d6ff585c2ffc693538bd8228c085b5e4a11c85cb | 7bdd1cbff549d403446b9a827b5e7436785885fe | /dashboard/views.py | d37476a0d37763dc0bca569357a46f954e2e2ed6 | [] | no_license | Vaishnavi109/MobileSensorCloud | 6594ee0de89cc7640f46927782e90a2954fb1db1 | 70b09c56b3b0fe48b95c604810fe346acdaf5d5c | refs/heads/master | 2021-01-17T23:58:51.120130 | 2016-04-13T06:59:53 | 2016-04-13T06:59:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | from django.shortcuts import render
def dashboard(request):
return render(request, 'dashboard.html', {})
| [
"rishirajdigambar.randive@sjsu.edu"
] | rishirajdigambar.randive@sjsu.edu |
5c5a5c54b2a1b520926479a05fc3e52a64526556 | 76e6d039e5be0df7b100ee1f91587412b878279e | /Homeworks/HW2.py | a9092f80134194a8a48d3e9b3af6500da56bec9a | [] | no_license | vahaponur/GlobalAIHubPythonCourse | 6b5128807f81c6d0b4db99ff3a20e046552b57cb | f11b1187a5ec189b9656f03fac208b1926cd21c5 | refs/heads/main | 2023-03-02T05:24:01.409809 | 2021-02-08T15:31:43 | 2021-02-08T15:31:43 | 334,997,537 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | #GlobalAIHub Homework 2
#user name and password specified
#UserName and Password
user_name="vonuryil"
password="globalaihub46@"
#get from user
get_user_name=input("User Name: ")
get_password=input("password: ")
#check if it is true
if (user_name == get_user_name and password==get_password):
print("Access Granted")
else:
print("Access Denied")
user_info={"user_name":"vonuryil","password":"dumbpassword"}
get_d_username=input("Dictionary Username: ")
get_d_password=input("Dictionary Password: ")
if (user_info["user_name"] == get_d_username and user_info["password"]==get_d_password):
print("Access Granted")
else:
print("Access Denied")
| [
"noreply@github.com"
] | vahaponur.noreply@github.com |
f8b8ecc8c9afc0614b9a66d3e6d49402720bd1bf | 11cd362cdd78c2fc48042ed203614b201ac94aa6 | /desktop/core/ext-py3/boto-2.49.0/boto/sts/connection.py | 8c0cf4b269ba1ac3926620ffdf9f697f9a4c88a2 | [
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0... | permissive | cloudera/hue | b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908 | dccb9467675c67b9c3399fc76c5de6d31bfb8255 | refs/heads/master | 2023-08-31T06:49:25.724501 | 2023-08-28T20:45:00 | 2023-08-28T20:45:00 | 732,593 | 5,655 | 2,244 | Apache-2.0 | 2023-09-14T03:05:41 | 2010-06-21T19:46:51 | JavaScript | UTF-8 | Python | false | false | 32,142 | py | # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Eucalyptus Systems, Inc.
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.connection import AWSQueryConnection
from boto.provider import Provider, NO_CREDENTIALS_PROVIDED
from boto.regioninfo import RegionInfo
from boto.sts.credentials import Credentials, FederationToken, AssumedRole
from boto.sts.credentials import DecodeAuthorizationMessage
import boto
import boto.utils
import datetime
import threading
_session_token_cache = {}
class STSConnection(AWSQueryConnection):
"""
AWS Security Token Service
The AWS Security Token Service is a web service that enables you
to request temporary, limited-privilege credentials for AWS
Identity and Access Management (IAM) users or for users that you
authenticate (federated users). This guide provides descriptions
of the AWS Security Token Service API.
For more detailed information about using this service, go to
`Using Temporary Security Credentials`_.
For information about setting up signatures and authorization
through the API, go to `Signing AWS API Requests`_ in the AWS
General Reference . For general information about the Query API,
go to `Making Query Requests`_ in Using IAM . For information
about using security tokens with other AWS products, go to `Using
Temporary Security Credentials to Access AWS`_ in Using Temporary
Security Credentials .
If you're new to AWS and need additional technical information
about a specific AWS product, you can find the product's technical
documentation at `http://aws.amazon.com/documentation/`_.
We will refer to Amazon Identity and Access Management using the
abbreviated form IAM. All copyrights and legal protections still
apply.
"""
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'sts.amazonaws.com'
APIVersion = '2011-06-15'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
converter=None, validate_certs=True, anon=False,
security_token=None, profile_name=None):
"""
:type anon: boolean
:param anon: If this parameter is True, the ``STSConnection`` object
will make anonymous requests, and it will not use AWS
Credentials or even search for AWS Credentials to make these
requests.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
connection_cls=STSConnection)
self.region = region
self.anon = anon
self._mutex = threading.Semaphore()
provider = 'aws'
# If an anonymous request is sent, do not try to look for credentials.
# So we pass in dummy values for the access key id, secret access
# key, and session token. It does not matter that they are
# not actual values because the request is anonymous.
if self.anon:
provider = Provider('aws', NO_CREDENTIALS_PROVIDED,
NO_CREDENTIALS_PROVIDED,
NO_CREDENTIALS_PROVIDED)
super(STSConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
validate_certs=validate_certs,
security_token=security_token,
profile_name=profile_name,
provider=provider)
def _required_auth_capability(self):
if self.anon:
return ['sts-anon']
else:
return ['hmac-v4']
def _check_token_cache(self, token_key, duration=None, window_seconds=60):
token = _session_token_cache.get(token_key, None)
if token:
now = datetime.datetime.utcnow()
expires = boto.utils.parse_ts(token.expiration)
delta = expires - now
if delta < datetime.timedelta(seconds=window_seconds):
msg = 'Cached session token %s is expired' % token_key
boto.log.debug(msg)
token = None
return token
def _get_session_token(self, duration=None,
mfa_serial_number=None, mfa_token=None):
params = {}
if duration:
params['DurationSeconds'] = duration
if mfa_serial_number:
params['SerialNumber'] = mfa_serial_number
if mfa_token:
params['TokenCode'] = mfa_token
return self.get_object('GetSessionToken', params,
Credentials, verb='POST')
def get_session_token(self, duration=None, force_new=False,
mfa_serial_number=None, mfa_token=None):
"""
Return a valid session token. Because retrieving new tokens
from the Secure Token Service is a fairly heavyweight operation
this module caches previously retrieved tokens and returns
them when appropriate. Each token is cached with a key
consisting of the region name of the STS endpoint
concatenated with the requesting user's access id. If there
is a token in the cache meeting with this key, the session
expiration is checked to make sure it is still valid and if
so, the cached token is returned. Otherwise, a new session
token is requested from STS and it is placed into the cache
and returned.
:type duration: int
:param duration: The number of seconds the credentials should
remain valid.
:type force_new: bool
:param force_new: If this parameter is True, a new session token
will be retrieved from the Secure Token Service regardless
of whether there is a valid cached token or not.
:type mfa_serial_number: str
:param mfa_serial_number: The serial number of an MFA device.
If this is provided and if the mfa_passcode provided is
valid, the temporary session token will be authorized with
to perform operations requiring the MFA device authentication.
:type mfa_token: str
:param mfa_token: The 6 digit token associated with the
MFA device.
"""
token_key = '%s:%s' % (self.region.name, self.provider.access_key)
token = self._check_token_cache(token_key, duration)
if force_new or not token:
boto.log.debug('fetching a new token for %s' % token_key)
try:
self._mutex.acquire()
token = self._get_session_token(duration,
mfa_serial_number,
mfa_token)
_session_token_cache[token_key] = token
finally:
self._mutex.release()
return token
def get_federation_token(self, name, duration=None, policy=None):
"""
Returns a set of temporary security credentials (consisting of
an access key ID, a secret access key, and a security token)
for a federated user. A typical use is in a proxy application
that is getting temporary security credentials on behalf of
distributed applications inside a corporate network. Because
you must call the `GetFederationToken` action using the long-
term security credentials of an IAM user, this call is
appropriate in contexts where those credentials can be safely
stored, usually in a server-based application.
**Note:** Do not use this call in mobile applications or
client-based web applications that directly get temporary
security credentials. For those types of applications, use
`AssumeRoleWithWebIdentity`.
The `GetFederationToken` action must be called by using the
long-term AWS security credentials of the AWS account or an
IAM user. Credentials that are created by IAM users are valid
for the specified duration, between 900 seconds (15 minutes)
and 129600 seconds (36 hours); credentials that are created by
using account credentials have a maximum duration of 3600
seconds (1 hour).
The permissions that are granted to the federated user are the
intersection of the policy that is passed with the
`GetFederationToken` request and policies that are associated
with of the entity making the `GetFederationToken` call.
For more information about how permissions work, see
`Controlling Permissions in Temporary Credentials`_ in Using
Temporary Security Credentials . For information about using
`GetFederationToken` to create temporary security credentials,
see `Creating Temporary Credentials to Enable Access for
Federated Users`_ in Using Temporary Security Credentials .
:type name: string
:param name: The name of the federated user. The name is used as an
identifier for the temporary security credentials (such as `Bob`).
For example, you can reference the federated user name in a
resource-based policy, such as in an Amazon S3 bucket policy.
:type policy: string
:param policy: A policy that specifies the permissions that are granted
to the federated user. By default, federated users have no
permissions; they do not inherit any from the IAM user. When you
specify a policy, the federated user's permissions are intersection
of the specified policy and the IAM user's policy. If you don't
specify a policy, federated users can only access AWS resources
that explicitly allow those federated users in a resource policy,
such as in an Amazon S3 bucket policy.
:type duration: integer
:param duration: The duration, in seconds, that the session
should last. Acceptable durations for federation sessions range
from 900 seconds (15 minutes) to 129600 seconds (36 hours), with
43200 seconds (12 hours) as the default. Sessions for AWS account
owners are restricted to a maximum of 3600 seconds (one hour). If
the duration is longer than one hour, the session for AWS account
owners defaults to one hour.
"""
params = {'Name': name}
if duration:
params['DurationSeconds'] = duration
if policy:
params['Policy'] = policy
return self.get_object('GetFederationToken', params,
FederationToken, verb='POST')
def assume_role(self, role_arn, role_session_name, policy=None,
duration_seconds=None, external_id=None,
mfa_serial_number=None,
mfa_token=None):
"""
Returns a set of temporary security credentials (consisting of
an access key ID, a secret access key, and a security token)
that you can use to access AWS resources that you might not
normally have access to. Typically, you use `AssumeRole` for
cross-account access or federation.
For cross-account access, imagine that you own multiple
accounts and need to access resources in each account. You
could create long-term credentials in each account to access
those resources. However, managing all those credentials and
remembering which one can access which account can be time
consuming. Instead, you can create one set of long-term
credentials in one account and then use temporary security
credentials to access all the other accounts by assuming roles
in those accounts. For more information about roles, see
`Roles`_ in Using IAM .
For federation, you can, for example, grant single sign-on
access to the AWS Management Console. If you already have an
identity and authentication system in your corporate network,
you don't have to recreate user identities in AWS in order to
grant those user identities access to AWS. Instead, after a
user has been authenticated, you call `AssumeRole` (and
specify the role with the appropriate permissions) to get
temporary security credentials for that user. With those
temporary security credentials, you construct a sign-in URL
that users can use to access the console. For more
information, see `Scenarios for Granting Temporary Access`_ in
AWS Security Token Service .
The temporary security credentials are valid for the duration
that you specified when calling `AssumeRole`, which can be
from 900 seconds (15 minutes) to 3600 seconds (1 hour). The
default is 1 hour.
The temporary security credentials that are returned from the
`AssumeRoleWithWebIdentity` response have the permissions that
are associated with the access policy of the role being
assumed and any policies that are associated with the AWS
resource being accessed. You can further restrict the
permissions of the temporary security credentials by passing a
policy in the request. The resulting permissions are an
intersection of the role's access policy and the policy that
you passed. These policies and any applicable resource-based
policies are evaluated when calls to AWS service APIs are made
using the temporary security credentials.
To assume a role, your AWS account must be trusted by the
role. The trust relationship is defined in the role's trust
policy when the IAM role is created. You must also have a
policy that allows you to call `sts:AssumeRole`.
**Important:** You cannot call `Assumerole` by using AWS
account credentials; access will be denied. You must use IAM
user credentials to call `AssumeRole`.
:type role_arn: string
:param role_arn: The Amazon Resource Name (ARN) of the role that the
caller is assuming.
:type role_session_name: string
:param role_session_name: An identifier for the assumed role session.
The session name is included as part of the `AssumedRoleUser`.
:type policy: string
:param policy: A supplemental policy that is associated with the
temporary security credentials from the `AssumeRole` call. The
resulting permissions of the temporary security credentials are an
intersection of this policy and the access policy that is
associated with the role. Use this policy to further restrict the
permissions of the temporary security credentials.
:type duration_seconds: integer
:param duration_seconds: The duration, in seconds, of the role session.
The value can range from 900 seconds (15 minutes) to 3600 seconds
(1 hour). By default, the value is set to 3600 seconds.
:type external_id: string
:param external_id: A unique identifier that is used by third parties
to assume a role in their customers' accounts. For each role that
the third party can assume, they should instruct their customers to
create a role with the external ID that the third party generated.
Each time the third party assumes the role, they must pass the
customer's external ID. The external ID is useful in order to help
third parties bind a role to the customer who created it. For more
information about the external ID, see `About the External ID`_ in
Using Temporary Security Credentials .
:type mfa_serial_number: string
:param mfa_serial_number: The identification number of the MFA device that
is associated with the user who is making the AssumeRole call.
Specify this value if the trust policy of the role being assumed
includes a condition that requires MFA authentication. The value is
either the serial number for a hardware device (such as
GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device
(such as arn:aws:iam::123456789012:mfa/user). Minimum length of 9.
Maximum length of 256.
:type mfa_token: string
:param mfa_token: The value provided by the MFA device, if the trust
policy of the role being assumed requires MFA (that is, if the
policy includes a condition that tests for MFA). If the role being
assumed requires MFA and if the TokenCode value is missing or
expired, the AssumeRole call returns an "access denied" errror.
Minimum length of 6. Maximum length of 6.
"""
params = {
'RoleArn': role_arn,
'RoleSessionName': role_session_name
}
if policy is not None:
params['Policy'] = policy
if duration_seconds is not None:
params['DurationSeconds'] = duration_seconds
if external_id is not None:
params['ExternalId'] = external_id
if mfa_serial_number is not None:
params['SerialNumber'] = mfa_serial_number
if mfa_token is not None:
params['TokenCode'] = mfa_token
return self.get_object('AssumeRole', params, AssumedRole, verb='POST')
def assume_role_with_saml(self, role_arn, principal_arn, saml_assertion,
policy=None, duration_seconds=None):
"""
Returns a set of temporary security credentials for users who
have been authenticated via a SAML authentication response.
This operation provides a mechanism for tying an enterprise
identity store or directory to role-based AWS access without
user-specific credentials or configuration.
The temporary security credentials returned by this operation
consist of an access key ID, a secret access key, and a
security token. Applications can use these temporary security
credentials to sign calls to AWS services. The credentials are
valid for the duration that you specified when calling
`AssumeRoleWithSAML`, which can be up to 3600 seconds (1 hour)
or until the time specified in the SAML authentication
response's `NotOnOrAfter` value, whichever is shorter.
The maximum duration for a session is 1 hour, and the minimum
duration is 15 minutes, even if values outside this range are
specified.
Optionally, you can pass an AWS IAM access policy to this
operation. The temporary security credentials that are
returned by the operation have the permissions that are
associated with the access policy of the role being assumed,
except for any permissions explicitly denied by the policy you
pass. This gives you a way to further restrict the permissions
for the federated user. These policies and any applicable
resource-based policies are evaluated when calls to AWS are
made using the temporary security credentials.
Before your application can call `AssumeRoleWithSAML`, you
must configure your SAML identity provider (IdP) to issue the
claims required by AWS. Additionally, you must use AWS
Identity and Access Management (AWS IAM) to create a SAML
provider entity in your AWS account that represents your
identity provider, and create an AWS IAM role that specifies
this SAML provider in its trust policy.
Calling `AssumeRoleWithSAML` does not require the use of AWS
security credentials. The identity of the caller is validated
by using keys in the metadata document that is uploaded for
the SAML provider entity for your identity provider.
For more information, see the following resources:
+ `Creating Temporary Security Credentials for SAML
Federation`_ in the Using Temporary Security Credentials
guide.
+ `SAML Providers`_ in the Using IAM guide.
+ `Configuring a Relying Party and Claims in the Using IAM
guide. `_
+ `Creating a Role for SAML-Based Federation`_ in the Using
IAM guide.
:type role_arn: string
:param role_arn: The Amazon Resource Name (ARN) of the role that the
caller is assuming.
:type principal_arn: string
:param principal_arn: The Amazon Resource Name (ARN) of the SAML
provider in AWS IAM that describes the IdP.
:type saml_assertion: string
:param saml_assertion: The base-64 encoded SAML authentication response
provided by the IdP.
For more information, see `Configuring a Relying Party and Adding
Claims`_ in the Using IAM guide.
:type policy: string
:param policy:
An AWS IAM policy in JSON format.
The temporary security credentials that are returned by this operation
have the permissions that are associated with the access policy of
the role being assumed, except for any permissions explicitly
denied by the policy you pass. These policies and any applicable
resource-based policies are evaluated when calls to AWS are made
using the temporary security credentials.
The policy must be 2048 bytes or shorter, and its packed size must be
less than 450 bytes.
:type duration_seconds: integer
:param duration_seconds:
The duration, in seconds, of the role session. The value can range from
900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the
value is set to 3600 seconds. An expiration can also be specified
in the SAML authentication response's `NotOnOrAfter` value. The
actual expiration time is whichever value is shorter.
The maximum duration for a session is 1 hour, and the minimum duration
is 15 minutes, even if values outside this range are specified.
"""
params = {
'RoleArn': role_arn,
'PrincipalArn': principal_arn,
'SAMLAssertion': saml_assertion,
}
if policy is not None:
params['Policy'] = policy
if duration_seconds is not None:
params['DurationSeconds'] = duration_seconds
return self.get_object('AssumeRoleWithSAML', params, AssumedRole,
verb='POST')
def assume_role_with_web_identity(self, role_arn, role_session_name,
web_identity_token, provider_id=None,
policy=None, duration_seconds=None):
"""
Returns a set of temporary security credentials for users who
have been authenticated in a mobile or web application with a
web identity provider, such as Login with Amazon, Facebook, or
Google. `AssumeRoleWithWebIdentity` is an API call that does
not require the use of AWS security credentials. Therefore,
you can distribute an application (for example, on mobile
devices) that requests temporary security credentials without
including long-term AWS credentials in the application or by
deploying server-based proxy services that use long-term AWS
credentials. For more information, see `Creating a Mobile
Application with Third-Party Sign-In`_ in AWS Security Token
Service .
The temporary security credentials consist of an access key
ID, a secret access key, and a security token. Applications
can use these temporary security credentials to sign calls to
AWS service APIs. The credentials are valid for the duration
that you specified when calling `AssumeRoleWithWebIdentity`,
which can be from 900 seconds (15 minutes) to 3600 seconds (1
hour). By default, the temporary security credentials are
valid for 1 hour.
The temporary security credentials that are returned from the
`AssumeRoleWithWebIdentity` response have the permissions that
are associated with the access policy of the role being
assumed. You can further restrict the permissions of the
temporary security credentials by passing a policy in the
request. The resulting permissions are an intersection of the
role's access policy and the policy that you passed. These
policies and any applicable resource-based policies are
evaluated when calls to AWS service APIs are made using the
temporary security credentials.
Before your application can call `AssumeRoleWithWebIdentity`,
you must have an identity token from a supported identity
provider and create a role that the application can assume.
The role that your application assumes must trust the identity
provider that is associated with the identity token. In other
words, the identity provider must be specified in the role's
trust policy. For more information, see ` Creating Temporary
Security Credentials for Mobile Apps Using Third-Party
Identity Providers`_.
:type role_arn: string
:param role_arn: The Amazon Resource Name (ARN) of the role that the
caller is assuming.
:type role_session_name: string
:param role_session_name: An identifier for the assumed role session.
Typically, you pass the name or identifier that is associated with
the user who is using your application. That way, the temporary
security credentials that your application will use are associated
with that user. This session name is included as part of the ARN
and assumed role ID in the `AssumedRoleUser` response element.
:type web_identity_token: string
:param web_identity_token: The OAuth 2.0 access token or OpenID Connect
ID token that is provided by the identity provider. Your
application must get this token by authenticating the user who is
using your application with a web identity provider before the
application makes an `AssumeRoleWithWebIdentity` call.
:type provider_id: string
:param provider_id: Specify this value only for OAuth access tokens. Do
not specify this value for OpenID Connect ID tokens, such as
`accounts.google.com`. This is the fully-qualified host component
of the domain name of the identity provider. Do not include URL
schemes and port numbers. Currently, `www.amazon.com` and
`graph.facebook.com` are supported.
:type policy: string
:param policy: A supplemental policy that is associated with the
temporary security credentials from the `AssumeRoleWithWebIdentity`
call. The resulting permissions of the temporary security
credentials are an intersection of this policy and the access
policy that is associated with the role. Use this policy to further
restrict the permissions of the temporary security credentials.
:type duration_seconds: integer
:param duration_seconds: The duration, in seconds, of the role session.
The value can range from 900 seconds (15 minutes) to 3600 seconds
(1 hour). By default, the value is set to 3600 seconds.
"""
params = {
'RoleArn': role_arn,
'RoleSessionName': role_session_name,
'WebIdentityToken': web_identity_token,
}
if provider_id is not None:
params['ProviderId'] = provider_id
if policy is not None:
params['Policy'] = policy
if duration_seconds is not None:
params['DurationSeconds'] = duration_seconds
return self.get_object(
'AssumeRoleWithWebIdentity',
params,
AssumedRole,
verb='POST'
)
def decode_authorization_message(self, encoded_message):
"""
Decodes additional information about the authorization status
of a request from an encoded message returned in response to
an AWS request.
For example, if a user is not authorized to perform an action
that he or she has requested, the request returns a
`Client.UnauthorizedOperation` response (an HTTP 403
response). Some AWS actions additionally return an encoded
message that can provide details about this authorization
failure.
Only certain AWS actions return an encoded authorization
message. The documentation for an individual action indicates
whether that action returns an encoded message in addition to
returning an HTTP code.
The message is encoded because the details of the
authorization status can constitute privileged information
that the user who requested the action should not see. To
decode an authorization status message, a user must be granted
permissions via an IAM policy to request the
`DecodeAuthorizationMessage` (
`sts:DecodeAuthorizationMessage`) action.
The decoded message includes the following type of
information:
+ Whether the request was denied due to an explicit deny or
due to the absence of an explicit allow. For more information,
see `Determining Whether a Request is Allowed or Denied`_ in
Using IAM .
+ The principal who made the request.
+ The requested action.
+ The requested resource.
+ The values of condition keys in the context of the user's
request.
:type encoded_message: string
:param encoded_message: The encoded message that was returned with the
response.
"""
params = {
'EncodedMessage': encoded_message,
}
return self.get_object(
'DecodeAuthorizationMessage',
params,
DecodeAuthorizationMessage,
verb='POST'
)
| [
"noreply@github.com"
] | cloudera.noreply@github.com |
f59837294f8f44c5babd41a112e886e751a61e97 | 31401549d7a342b3fcb0f276f20e18f130730c69 | /utils/loadweight.py | 05c9d7ff211cd6d9235020fb2c41f2ffb3f1af14 | [] | no_license | takeitea/Attention-Echino | e79f207010ad9c57b31d39ba8681d2cb0e59643f | e157c99e5784c8dc2470b0d3f3ffa61b7921ce09 | refs/heads/master | 2020-05-21T00:01:06.170506 | 2019-03-06T13:27:52 | 2019-03-06T13:27:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,897 | py | """
load part of the pre-trained parameters
"""
import os
import torch
import torch.utils.model_zoo as model_zoo
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
def loadcheckpoint(model, optimizer, args):
if args.resume:
if os.path.isfile(args):
print("load checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print(" loaded checkpoint '{}'({}) best_prec: {}".format(args.resume, checkpoint['epoch'], best_prec1))
else:
print("no checkpoint found at {}".format(args.resume))
def loadpartweight(model):
old_dict=model.state_dict()
new_dict=model_zoo.load_url(model_urls['vgg16_bn'])
count_feat=0
count_fetch=0
skip=0
for k,_ in new_dict.items():
if 'features' in k:
count_feat=count_feat+1
for i in range(count_feat):
for k in range(i,len(old_dict)):
if 'num_batches_tracked' in list(old_dict.keys())[k+skip]:
skip+=1
if new_dict[list(new_dict.keys())[i]].size()==old_dict[list(old_dict.keys())[k+skip]].size():
old_dict[list(old_dict.keys())[k+skip]]=list(new_dict.values())[i]
count_fetch+=1
break
old_dict.update()
model.load_state_dict(old_dict)
return model
| [
"945193029@qq.com"
] | 945193029@qq.com |
9c5cbb2dda577439b8fc5e973133aae546a8d80d | 51c47f22c24a1b3cd2bff3f1343657f8993dc2a9 | /ModuloChat/prueba.py | 247bf8899e9cb2acb87d96215ff635f969e95089 | [] | no_license | JesusHernandezJimenez/SistemasDistribuidos | b3951a91bddc993b174444a081d4f1b0a404b515 | 76f63dd5f17e1533075b8c7436a83237995b1a2b | refs/heads/main | 2023-06-06T14:32:46.250629 | 2021-06-22T19:22:03 | 2021-06-22T19:22:03 | 374,816,396 | 0 | 0 | null | 2021-06-22T19:22:04 | 2021-06-07T22:31:18 | Python | UTF-8 | Python | false | false | 352 | py | from tkinter import *
root = Tk()
root.title("Prueha")
root.geometry("400x400")
panel_1 = PanedWindow(bd=4, relief='flat', bg='red')
panel_1.pack(fill=BOTH, expand=1)
panel_2 = PanedWindow(panel_1, orient=HORIZONTAL, bd=4, relief='raised', bg='black')
panel_1.add(panel_2)
top = Label(panel_2, text='top panel')
panel_2.add(top)
root.mainloop() | [
"jesushernandezjimenez1998@gmail.com"
] | jesushernandezjimenez1998@gmail.com |
196c8b2bad841f8d3e41a79fadadc487737449e2 | 9ba901bf45f3d555395988f480f0eb55a595c83a | /轻松学习Python 69个内置函数/装饰器、描述器/staticmethod().py | 2f2c07d4ac1d6dad54d44c890059a344df83e32a | [] | no_license | gkliya/zhouhuajian-course | 803c3c5b390a9204e07a54924a2df8c1246b72be | 616867150efe46e2d3f8720b859ee38f286e4dbd | refs/heads/master | 2023-01-04T01:04:56.224479 | 2020-10-26T10:22:42 | 2020-10-26T10:22:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,500 | py | # @staticmethod
# 将方法转换为静态方法。
#
# 静态方法不会接收隐式的第一个参数。要声明一个静态方法,请使用此语法
#
# class C:
# @staticmethod
# def f(arg1, arg2, ...): ...
# @staticmethod 这样的形式称为函数的 decorator -- 详情参阅 函数定义。
class C:
@staticmethod
def f(*args):
print(f'{ args = }')
C.f()
C().f()
# 静态方法的调用可以在类上进行 (例如 C.f()) 也可以在实例上进行 (例如 C().f())。
#
# Python中的静态方法与Java或C ++中的静态方法类似。另请参阅 classmethod() ,用于创建备用类构造函数的变体。
#
# 像所有装饰器一样,也可以像常规函数一样调用 staticmethod ,并对其结果执行某些操作。比如某些情况下需要从类主体引用函数并且您希望避免自动转换为实例方法。对于这些情况,请使用此语法:
#
# class C:
# builtin_open = staticmethod(open)
# 想了解更多有关静态方法的信息,请参阅 标准类型层级结构 。
# 像所有装饰器一样,也可以像常规函数一样调用 staticmethod ,并对其结果执行某些操作。比如某些情况下需要从类主体引用函数并且您希望避免自动转换为实例方法。
class C:
# @staticmethod
def f(*args):
print(f'{ args = }')
f = staticmethod(f)
builtin_abs = staticmethod(abs)
print('-'*20)
C.f()
print(f'{ C.builtin_abs(-5) = }') | [
"noreply@github.com"
] | gkliya.noreply@github.com |
3ad37185c077b3819db6c35bc0d6421a09e4a073 | c9ebca529d290fc2fa94b5ce5b61657e431260e3 | /interviewcake/product-of-other-numbers.py | afaedd1b08667961316b7027227457fc3a831e8f | [] | no_license | aaakashkumar/competitive_programming | 58677e3166636d042d00e1d2be589499c5f17c1a | 575fa25c4586fa41b3d45d95dca6eff9584c3a4a | refs/heads/main | 2023-04-29T03:46:02.911230 | 2021-05-15T10:12:50 | 2021-05-15T10:12:50 | 305,634,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,740 | py | # https://www.interviewcake.com/question/python3/product-of-other-numbers?course=fc1§ion=greedy
# @author Akash Kumar
import unittest
def get_products_of_all_ints_except_at_index(int_list):
# Make a list with the products
left_elements_product = [1]*len(int_list)
right_elements_product = [1]*len(int_list)
left_pointer = 1
right_pointer = len(int_list)-2
left_elements_product[0] = int_list[0]
right_elements_product[len(int_list)-1] = int_list[len(int_list)-1]
while left_pointer < len(int_list)-1:
left_elements_product[left_pointer] = int_list[left_pointer] * \
left_elements_product[left_pointer-1]
right_elements_product[right_pointer] = int_list[right_pointer] * \
right_elements_product[right_pointer+1]
left_pointer += 1
right_pointer -= 1
result_list = []
result_list.append(right_elements_product[1])
for index in range(1, len(int_list)-1):
result_list.append(left_elements_product[index-1] * right_elements_product[index+1])
result_list.append(left_elements_product[len(int_list)-1-1])
return result_list
# Tests
class Test(unittest.TestCase):
def test_small_list(self):
actual = get_products_of_all_ints_except_at_index([1, 2, 3])
expected = [6, 3, 2]
self.assertEqual(actual, expected)
def test_longer_list(self):
actual = get_products_of_all_ints_except_at_index([8, 2, 4, 3, 1, 5])
expected = [120, 480, 240, 320, 960, 192]
self.assertEqual(actual, expected)
def test_list_has_one_zero(self):
actual = get_products_of_all_ints_except_at_index([6, 2, 0, 3])
expected = [0, 0, 36, 0]
self.assertEqual(actual, expected)
def test_list_has_two_zeros(self):
actual = get_products_of_all_ints_except_at_index([4, 0, 9, 1, 0])
expected = [0, 0, 0, 0, 0]
self.assertEqual(actual, expected)
def test_one_negative_number(self):
actual = get_products_of_all_ints_except_at_index([-3, 8, 4])
expected = [32, -12, -24]
self.assertEqual(actual, expected)
def test_all_negative_numbers(self):
actual = get_products_of_all_ints_except_at_index([-7, -1, -4, -2])
expected = [-8, -56, -14, -28]
self.assertEqual(actual, expected)
def test_error_with_empty_list(self):
with self.assertRaises(Exception):
get_products_of_all_ints_except_at_index([])
def test_error_with_one_number(self):
with self.assertRaises(Exception):
get_products_of_all_ints_except_at_index([1])
unittest.main(verbosity=2) | [
"noreply@github.com"
] | aaakashkumar.noreply@github.com |
2fe653f3c427c1407ff776b05974647bae83e94b | e5504d8c4880993b82d5583a11c5cc4623e0eac2 | /Arrays/twoSum2.py | dacf7a07e9511280bc0929061c05928bfd38bb93 | [] | no_license | noorulameenkm/DataStructuresAlgorithms | e5f87f426fc444d18f830e48569d2a7a50f5d7e0 | 7c3bb89326d2898f9e98590ceb8ee5fd7b3196f0 | refs/heads/master | 2023-06-08T19:29:42.507761 | 2023-05-28T16:20:19 | 2023-05-28T16:20:19 | 219,270,731 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | def pair_with_targetsum(arr, target_sum):
result = []
start, end = 0, len(arr) - 1
while start < end:
sum_ = arr[start] + arr[end]
# sum == target
if sum_ == target_sum:
result.append(start)
result.append(end)
break
# sum > target
elif sum_ > target_sum:
end -= 1
else:
start += 1
return result
def two_sum_pair(arr, target_sum):
nums = {}
for i, num in enumerate(arr):
if target_sum - num in nums:
return [nums[target_sum - num], i]
else:
nums[num] = i
return [-1, -1]
print(pair_with_targetsum([1, 2, 3, 4, 6], 6))
print(pair_with_targetsum([2, 5, 9, 11], 11))
print(two_sum_pair([1, 2, 3, 4, 6], 6))
print(two_sum_pair([2, 5, 9, 11], 11)) | [
"noorul.km@people10.com"
] | noorul.km@people10.com |
d62cbd757a73de4d12b4bf3a14c3779c20eb6bc0 | b26a9796c3fdcf4b10932b9043399e409558c90e | /mdio/config/desktop.py | 2c86465c9e3f622bc2215d7cfc338012929f7aba | [
"MIT"
] | permissive | neilLasrado/mdio | 92d034ef27f2e7831a41455e944a80f9a4a61e4d | daec26c101f96819db97db8bf9e7ba29f9738687 | refs/heads/master | 2020-03-25T06:40:58.039473 | 2018-09-16T19:04:46 | 2018-09-16T19:04:46 | 143,518,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return {
"District": {
"color": "#9b59b6",
"icon": "icon-globe",
"icon": "octicon octicon-globe",
"link": "List/District",
"doctype": "District",
"type": "list"
},
"Project": {
"color": "#c23c59",
"icon": "octicon octicon-rocket",
"label": _("Project"),
"link": "List/District Project",
"doctype": "District Project",
"type": "list"
}
}
| [
"neil@digithinkit.com"
] | neil@digithinkit.com |
f3277b20bb721bebb914ea94042f68ca678765e6 | e331e4f0c321b98acde31faf3548194ae6d7d14b | /qa/rpc-tests/spentindex.py | d1d3c9ccfca34d77274592a835672489a20dd991 | [
"MIT"
] | permissive | MB8Coin/mb8coin-core | 487e3e16e43c008a6913d92e6edcf428c67a1f50 | 1fa5bd60019f6cff8038ace509ec4ca17c8233c7 | refs/heads/master | 2021-10-27T07:12:31.935401 | 2021-10-19T19:02:31 | 2021-10-19T19:02:31 | 131,882,320 | 5 | 3 | MIT | 2019-05-24T14:29:38 | 2018-05-02T17:10:21 | C++ | UTF-8 | Python | false | false | 6,342 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test addressindex generation and fetching
#
import time
from test_framework.test_framework import MB8CoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class SpentIndexTest(MB8CoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-spentindex"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-spentindex"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-spentindex", "-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
# Check that
print("Testing spent index...")
feeSatoshis = 10000
privkey = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220])
scriptPubKey = CScript([OP_DUP, OP_HASH160, addressHash, OP_EQUALVERIFY, OP_CHECKSIG])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
amount = int(unspent[0]["amount"] * 100000000 - feeSatoshis)
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
print("Testing getspentinfo method...")
# Check that the spentinfo works standalone
info = self.nodes[1].getspentinfo({"txid": unspent[0]["txid"], "index": unspent[0]["vout"]})
assert_equal(info["txid"], txid)
assert_equal(info["index"], 0)
assert_equal(info["height"], 106)
print("Testing getrawtransaction method...")
# Check that verbose raw transaction includes spent info
txVerbose = self.nodes[3].getrawtransaction(unspent[0]["txid"], 1)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentTxId"], txid)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentIndex"], 0)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentHeight"], 106)
# Check that verbose raw transaction includes input values
txVerbose2 = self.nodes[3].getrawtransaction(txid, 1)
assert_equal(float(txVerbose2["vin"][0]["value"]), (amount + feeSatoshis) / 100000000)
assert_equal(txVerbose2["vin"][0]["valueSat"], amount + feeSatoshis)
# Check that verbose raw transaction includes address values and input values
privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash2 = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220])
scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(txid, 16), 0))]
amount = int(amount - feeSatoshis);
tx2.vout = [CTxOut(amount, scriptPubKey2)]
tx.rehash()
self.nodes[0].importprivkey(privkey)
signed_tx2 = self.nodes[0].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
txid2 = self.nodes[0].sendrawtransaction(signed_tx2["hex"], True)
# Check the mempool index
self.sync_all()
txVerbose3 = self.nodes[1].getrawtransaction(txid2, 1)
assert_equal(txVerbose3["vin"][0]["address"], address2)
assert_equal(txVerbose3["vin"][0]["valueSat"], amount + feeSatoshis)
assert_equal(float(txVerbose3["vin"][0]["value"]), (amount + feeSatoshis) / 100000000)
# Check the database index
block_hash = self.nodes[0].generate(1)
self.sync_all()
txVerbose4 = self.nodes[3].getrawtransaction(txid2, 1)
assert_equal(txVerbose4["vin"][0]["address"], address2)
assert_equal(txVerbose4["vin"][0]["valueSat"], amount + feeSatoshis)
assert_equal(float(txVerbose4["vin"][0]["value"]), (amount + feeSatoshis) / 100000000)
# Check block deltas
print("Testing getblockdeltas...")
block = self.nodes[3].getblockdeltas(block_hash[0])
assert_equal(len(block["deltas"]), 2)
assert_equal(block["deltas"][0]["index"], 0)
assert_equal(len(block["deltas"][0]["inputs"]), 0)
assert_equal(len(block["deltas"][0]["outputs"]), 0)
assert_equal(block["deltas"][1]["index"], 1)
assert_equal(block["deltas"][1]["txid"], txid2)
assert_equal(block["deltas"][1]["inputs"][0]["index"], 0)
assert_equal(block["deltas"][1]["inputs"][0]["address"], "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW")
assert_equal(block["deltas"][1]["inputs"][0]["satoshis"], (amount + feeSatoshis) * -1)
assert_equal(block["deltas"][1]["inputs"][0]["prevtxid"], txid)
assert_equal(block["deltas"][1]["inputs"][0]["prevout"], 0)
assert_equal(block["deltas"][1]["outputs"][0]["index"], 0)
assert_equal(block["deltas"][1]["outputs"][0]["address"], "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW")
assert_equal(block["deltas"][1]["outputs"][0]["satoshis"], amount)
print("Passed\n")
if __name__ == '__main__':
SpentIndexTest().main()
| [
"vidaru@protonmail.com"
] | vidaru@protonmail.com |
3e2d5b25ba6cdc75221465e223ac9c6c0a7a9de6 | cf8b4b316b2b019ca6afef15937d1306b231cd73 | /feature_clean.py | ec4b30c0765ed4472ff89eb79270025992d7fc78 | [] | no_license | seridica/cs229project | e1372304aabf3f683147b33f208966bb7fb6c7cb | 311d82b20a6ae5c3b93810f5d7b6dc24d5145a74 | refs/heads/master | 2021-08-30T05:12:55.265581 | 2017-12-16T04:35:04 | 2017-12-16T04:35:04 | 114,216,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,393 | py | # -*- coding: utf-8 -*-
"""
SVM for Pan-Lung Data
November 30 2017
CS229 Project
File provides functions for cleaning up feature set.
1) Function removes features that have all the same value (typically 0)
2) PCA on features
3) Normalize features
@author: Calvin
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.image import imread
from random import *
import io
import sys
import pickle
from count_features import *
from generate_labels import *
from cleanup import *
import pdb
from sklearn.decomposition import PCA
import random
"""
Function removes features that have all the same value.
"""
def remove_useless_features(trainFeatureMatrix, testFeatureMatrix):
nData, nFeat = trainFeatureMatrix.shape
nTest = testFeatureMatrix.shape[0]
newTrainFeatureMatrix = np.array([[]])
newTestFeatureMatrix = np.array([[]])
newTrainFeatureMatrix.shape = (nData, 0)
newTestFeatureMatrix.shape = (nTest, 0)
for i in range(nFeat):
tot_Sum = np.sum(trainFeatureMatrix[:,i])
if not (tot_Sum % nData == 0):
ntrainf = trainFeatureMatrix[:,i]
ntrainf.shape = (nData, 1)
ntestf = testFeatureMatrix[:,i]
ntestf.shape = (nTest, 1)
newTrainFeatureMatrix = np.concatenate( (newTrainFeatureMatrix, ntrainf), axis=1 )
newTestFeatureMatrix = np.concatenate( (newTestFeatureMatrix, ntestf), axis=1 )
return newTrainFeatureMatrix, newTestFeatureMatrix
"""
Function performs PCA on the features to identify the where all the variance
in the data lies.
"""
def pca_features(trainFeatureMatrix, testFeatureMatrix):
pca = PCA()
fullTrainPCA = pca.fit_transform(trainFeatureMatrix)
fullTestPCA = pca.transform(testFeatureMatrix)
expVar = pca.explained_variance_ratio_
print(expVar)
cumExp = 0
thresh = 0.9
for i in range(len(expVar)):
cumExp += expVar[i]
if cumExp > thresh:
break;
#thresh = 0.1
#for i in range(len(expVar)):
# if expVar[i] < thresh:
# break;
print("Number of components: ")
print( i )
print("Number of original features: ")
print(trainFeatureMatrix.shape[1])
newTrainFeatureMatrix = fullTrainPCA[:,:i]
newTestFeatureMatrix = fullTestPCA[:,:i]
# Plotting for presentation
components = (pca.components_)
plt.figure(figsize=(12,12))
plt.imshow(components, cmap='bwr', interpolation='none')
plt.colorbar()
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
plt.show()
return newTrainFeatureMatrix, newTestFeatureMatrix
"""
Function demeans and normalizes features
"""
def normalize_features(trainFeatureMatrix, testFeatureMatrix):
nDat, nFeat = trainFeatureMatrix.shape
newTrainFeatureMatrix = np.array(trainFeatureMatrix)
newTestFeatureMatrix = np.array(testFeatureMatrix)
for i in range(nFeat):
thisFeat = trainFeatureMatrix[:,i]
mFeat = np.mean(thisFeat)
mStd = np.std(thisFeat)
thisFeat = ( thisFeat - mFeat ) / mStd
newTrainFeatureMatrix[:,i] = thisFeat
testFeat = testFeatureMatrix[:,i]
if mStd == 0:
testFeat = testFeat - mFeat
else:
testFeat = ( testFeat - mFeat ) / mStd
newTestFeatureMatrix[:,i] = testFeat
return newTrainFeatureMatrix, newTestFeatureMatrix
"""
Function redistributes test and train data
"""
def redist_data( trainData, trainLabels, testData, testLabels ):
newRatio = 5
trainClassInds = {}
testClassInds = {}
nTrainDat, nFeatures = trainData.shape
nTestDat = testData.shape[0]
# Partition data in train and test
trainKeys = []
for i in range(nTrainDat):
currLabel = int( trainLabels[i] )
if currLabel in trainKeys:
trainClassInds[currLabel].append(i)
else:
trainClassInds[currLabel] = [i]
trainKeys.append(currLabel)
testKeys = []
for i in range(nTestDat):
currLabel = int( testLabels[i] )
if currLabel in testKeys:
testClassInds[currLabel].append(i)
else:
testClassInds[currLabel] = [i]
testKeys.append(currLabel)
# Make sure there are the same number of class labels
assert( len(testKeys) == len(trainKeys) )
# Redistribute
newTrainData = np.array([[]])
newTrainData.shape = (0, nFeatures)
newTrainLabels = []
newTestData = np.array([[]])
newTestData.shape = (0, nFeatures)
newTestLabels = []
for i in range(len(testKeys)):
# For original training data
inds = np.array(trainClassInds[testKeys[i]])
p = np.random.permutation(len(inds))
cutoff = int( np.floor( len(inds) / newRatio ) )
newTrainData = np.concatenate( (newTrainData, trainData[inds[p[cutoff:]],:] ), axis=0 )
newTestData = np.concatenate( (newTestData, trainData[inds[p[:cutoff]],:] ), axis=0 )
newTrainLabels = np.concatenate( (newTrainLabels, trainLabels[inds[p[cutoff:]]].reshape(-1)) )
newTestLabels = np.concatenate( (newTestLabels, trainLabels[inds[p[:cutoff]]].reshape(-1)) )
# For original test data
inds = np.array(testClassInds[testKeys[i]])
p = np.random.permutation(len(inds))
cutoff = int( np.floor( len(inds) / newRatio ) )
newTrainData = np.concatenate( (newTrainData, testData[inds[p[cutoff:]],:] ), axis=0 )
newTestData = np.concatenate( (newTestData, testData[inds[p[:cutoff]],:] ), axis=0 )
newTrainLabels = np.concatenate( (newTrainLabels, testLabels[inds[p[cutoff:]]].reshape(-1)) )
newTestLabels = np.concatenate( (newTestLabels, testLabels[inds[p[:cutoff]]].reshape(-1)) )
print( newTrainData.shape )
print( newTestData.shape )
newTrainLabels = np.array([newTrainLabels]).T
newTestLabels = np.array([newTestLabels]).T
return newTrainData, newTrainLabels, newTestData, newTestLabels | [
"calvink@stanford.edu"
] | calvink@stanford.edu |
1e6d34cd5428851cdf59a0a8cbcabbedc98ffb63 | edd8ad3dcb6ee9b019c999b712f8ee0c468e2b81 | /Python 300/04. List/052.py | b71c6b6b072e84b96609243c216c08fb45331666 | [] | no_license | narinn-star/Python | 575cba200de35b9edf3832c4e41ccce657075751 | 14eba211cd3a9e9708a30073ba5b31d21d39eeef | refs/heads/master | 2023-05-25T22:57:26.079294 | 2021-06-07T15:29:39 | 2021-06-07T15:29:39 | 331,647,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | #리스트에 원소 추가 _ append()
movie_rank = ["닥터 스트레인지", "스플릿", "럭키"]
movie_rank.append("배트맨")
print(movie_rank) | [
"skfls2618@naver.com"
] | skfls2618@naver.com |
34f4f7b2ce5b694d01a386ef1898e24a0a84e375 | a2a3bb37c3228b01681e019ad9781a01f0245195 | /blog/database.py | 5442b2f2d96af0a090ef3619a8e46773cc66481f | [] | no_license | prinudickson/fastapi_learning | 51e84423414d0cc8a6379464e81b6cc0ceebd3a7 | 284835b0cc94d564dc80a3b36e343a96d917ab49 | refs/heads/main | 2023-08-15T05:47:19.374600 | 2021-10-21T19:20:35 | 2021-10-21T19:20:35 | 398,273,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URL = "sqlite:///./blog.db"
# SQLALCHEMY_DATABASE_URL = "postgresql://user:password@postgresserver/db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close() | [
"prinu.dickson@nl.pwc.com"
] | prinu.dickson@nl.pwc.com |
ae953f626dcd7a8cc3573ca343fdeac058daa21f | df0c4875b45e68c106dd1e2ba397f71a10794327 | /src/pifetcher/utilities/sys_utils.py | d389d2340abd6f3e65f41dbd8999e6aed152bff2 | [
"MIT"
] | permissive | gavinz0228/pifetcher | c28b407cf4965852af67ffe619a55ee90fa49a72 | c8419ae153eefed04e0e8b239cf1a9226fa91c29 | refs/heads/master | 2021-07-04T20:26:41.973408 | 2020-11-22T16:57:38 | 2020-11-22T16:57:38 | 203,682,327 | 1 | 0 | null | 2019-08-24T17:04:59 | 2019-08-22T00:06:58 | Python | UTF-8 | Python | false | false | 507 | py | from os import path, chmod
from sys import platform
import stat
class SysUtils:
@staticmethod
def ensure_path(file_path):
if not path.exists(file_path):
raise Exception(f'file path {file_path} does not exist.')
else:
return file_path
@staticmethod
def set_executable_permission(file_path):
if platform in ['linux', 'linux2', 'darwin']:
chmod(file_path, stat.S_IRWXO)
chmod(file_path, stat.S_IRWXO)
| [
"gavinz0228@gmail.com"
] | gavinz0228@gmail.com |
7f150fe5a4b359dfe351f5c2d10a18def94f24ef | 38b5c22896452c7583073f0f719dcaaf98c0e7e2 | /client-GUI.py | 8b944cd71e788934413e2e9894eb3dc37af6b16b | [] | no_license | crcollver/group-messaging-app | 8be7565b62b45cec90cef197deffb5c68efbc5b6 | 89542c43ab6f566d457ed8cdec650e280b212193 | refs/heads/master | 2021-03-28T03:25:09.918567 | 2020-05-06T00:59:44 | 2020-05-06T00:59:44 | 247,832,270 | 0 | 0 | null | 2020-05-06T00:57:57 | 2020-03-16T22:55:50 | Python | UTF-8 | Python | false | false | 6,835 | py | #---------------------------------------------------
# Cameron Collver, Erik Shepard, & Rodolfo Rodriguez
# Anonymous Group Messaging - client-GUI.py
# Client Tkinter script for connecting to server.py
# Uses a default port of 12000 that is unchangeable for now
#
# SOURCES:
# https://www.youtube.com/watch?v=FKlmAkEb40s
# http://net-informations.com/python/net/thread.htm
# https://www.tutorialspoint.com/socket-programming-with-multi-threading-in-python
# https://github.com/effiongcharles/multi_user_chat_application_in_python
#---------------------------------------------------
from __future__ import unicode_literals
import socket
import threading
import tkinter
from tkinter import messagebox
from tkinter import simpledialog
host = socket.gethostbyname(socket.gethostname())
port = 12000
clientSocket = None
username = ""
window = tkinter.Tk()
window.title("Client")
def close_connection():
""" Handles an explicit closing of the connection """
if clientSocket is not None:
clientSocket.sendall("exit".encode("utf-8"))
clientSocket.close()
window.destroy()
window.protocol("WM_DELETE_WINDOW", close_connection)
# Top frame to connect
topFrame = tkinter.Frame(window)
lblHost = tkinter.Label(topFrame, text = "Host IP:").pack(side=tkinter.LEFT)
entHost = tkinter.Entry(topFrame)
entHost.pack(side=tkinter.LEFT, padx=(0, 3))
entHost.insert(tkinter.END, host)
btnConnect = tkinter.Button(topFrame, text="Connect", command=lambda : connect())
btnConnect.pack(side=tkinter.LEFT)
topFrame.pack(side=tkinter.TOP, pady=(5, 10))
# Display frame to show all messages
displayFrame = tkinter.Frame(window)
scrollBar = tkinter.Scrollbar(displayFrame)
scrollBar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
tkDisplay = tkinter.Text(displayFrame, height=20, width=60)
tkDisplay.pack(side=tkinter.LEFT, fill=tkinter.Y, padx=(5, 0))
tkDisplay.tag_config("tag_your_message", foreground="blue")
tkDisplay.tag_config("tag_direct_message", foreground="green")
scrollBar.config(command=tkDisplay.yview)
tkDisplay.config(yscrollcommand=scrollBar.set, background="#F4F6F7", highlightbackground="grey", state="disabled")
displayFrame.pack(side=tkinter.TOP)
# bottom frame for sending a message
bottomFrame = tkinter.Frame(window)
tkMessage = tkinter.Text(bottomFrame, height=2, width=60)
tkMessage.pack(side=tkinter.LEFT, padx=(5, 13), pady=(5, 10))
tkMessage.config(highlightbackground="grey", state="disabled")
tkMessage.bind("<Return>", (lambda event: get_msg(tkMessage.get("1.0", tkinter.END))))
bottomFrame.pack(side=tkinter.BOTTOM)
def connect():
""" Connect to specified server based on runtime arguments """
global clientSocket, host, port
if len(entHost.get()) > 1:
host = entHost.get() # change host to user specified host
try:
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect((host, port))
select_username()
if clientSocket is not None and username: # as long as the socket still exists and there is a valid username
# Create the thread that will receive messages only after username is established
# This thread will be killed once the socket is closed
RECEIVE_THREAD = threading.Thread(target=receive_message)
RECEIVE_THREAD.daemon = True
RECEIVE_THREAD.start()
except Exception as e:
tkinter.messagebox.showerror(title="ERROR", message=f"Unable to connect to host: {host}:{port}. Server may be unavailable.")
def select_username():
""" Once a connection is established, make user select a username for this session """
global username, clientSocket
while True:
try:
username = simpledialog.askstring("Setup", "Enter a username...", parent=window)
if username is not None:
clientSocket.sendall(username.encode("utf-8"))
server_res = clientSocket.recv(1024) #sending potential username to server
# checking for byte message that server sent back should be fine for our app
tkDisplay.config(state=tkinter.NORMAL)
if (server_res.decode() == "username_avail"):
tkDisplay.insert(tkinter.END, f"Great this username is available!\n<@{username}> will be your username for this session.")
tkMessage.config(state=tkinter.NORMAL) # set the message box to an enabled state to capture username
entHost.config(state=tkinter.DISABLED) # Disable host input box once a connection has been made
btnConnect.config(state=tkinter.DISABLED) # Disable connect button once a connection has been made
break
tkDisplay.insert(tkinter.END, f"The username {username} seems to be taken, lets try again.\n")
tkDisplay.config(state=tkinter.DISABLED)
else:
clientSocket.close()
clientSocket = None
break # return to the main window and have user reconnect
except ConnectionAbortedError:
tkinter.messagebox.showerror(title="SERVER ERROR", message=f"Server on {host}:{port} has shutdown unexpectedly.")
break
def receive_message():
""" Handles the receiving of server messages, without blocking main thread """
global clientSocket
while True:
try:
server_msg = clientSocket.recv(1024)
if not server_msg:
break
tkDisplay.config(state=tkinter.NORMAL)
if server_msg.decode().startswith("From <@"): # if message is a direct message, color it green
tkDisplay.insert(tkinter.END, f"\n{server_msg.decode()}", "tag_direct_message")
else:
tkDisplay.insert(tkinter.END, f"\n{server_msg.decode()}")
tkDisplay.config(state=tkinter.DISABLED)
tkDisplay.see(tkinter.END)
# throws this error when server shuts down with clients still connected
except ConnectionResetError:
tkinter.messagebox.showerror(title="SERVER ERROR", message=f"Server on {host}:{port} has shutdown unexpectedly.")
break
# throws this error when user types exit, suppresses it
except ConnectionAbortedError:
break
clientSocket.close()
window.destroy()
def get_msg(msg):
""" Get the user message from the message text box """
msg = msg.replace('\n', '')
# if this is a regular message, print it to the window
# otherwise user is sending potential user name so we do not display
tkDisplay.config(state=tkinter.NORMAL) # cannot insert into a window that is disabled
tkDisplay.insert(tkinter.END, f"\n<@{username}>: {msg}", "tag_your_message")
tkDisplay.config(state=tkinter.DISABLED) # disable window once insert it performed
tkDisplay.see(tkinter.END) # scroll if not enough room in window
tkMessage.delete('1.0', tkinter.END) # remove text in message window
send_message(msg)
def send_message(msg):
""" Sends the message to server on the main thread """
clientSocket.sendall(msg.encode("utf-8"))
if msg == "exit":
close_connection()
window.mainloop()
| [
"crcollver@gmail.com"
] | crcollver@gmail.com |
f29c840f7b7123d115bd70933064581e49a94100 | 96faedaf3717abbb7f6ddf215b7152808b344301 | /build_model.py | 16e2f2d343909a2776c51d81254f7eb0c58b4a68 | [] | no_license | Anonymous-Alien/Greedy-Attack-and-Gumbel-Attack | 9c1b6e6d0ec334efbe11581c7a32f7b545932bfb | 021edaf7318850df4437c8de56c02321d2d4f552 | refs/heads/master | 2020-04-18T01:36:46.665448 | 2019-01-23T05:51:05 | 2019-01-23T05:51:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,423 | py | import tensorflow as tf
import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Embedding, Conv1D, Input, GlobalMaxPooling1D, Multiply, Lambda, Permute,MaxPooling1D, Flatten, LSTM, Bidirectional, GRU, GlobalAveragePooling1D
from keras.datasets import imdb
from keras.objectives import binary_crossentropy
from keras.metrics import binary_accuracy as accuracy
from keras.optimizers import RMSprop
from keras import backend as K
from keras.preprocessing import sequence
from keras.callbacks import ModelCheckpoint
import os, itertools, math
def construct_original_network(emb, data,trainable=True):
if data == 'imdbcnn':
filters = 250
kernel_size = 3
hidden_dims = 250
net = Dropout(0.2, name = 'dropout_1')(emb)
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
net = Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1,
name = 'conv1d_1',trainable=trainable)(net)
# we use max pooling:
net = GlobalMaxPooling1D(name = 'global_max_pooling1d_1')(net)
# We add a vanilla hidden layer:
net = Dense(hidden_dims, name = 'dense_1',trainable=trainable)(net)
net = Dropout(0.2, name = 'dropout_2')(net)
net = Activation('relu', name = 'activation_2')(net)
# We project onto a single unit output layer, and squash it with a sigmoid:
net = Dense(2, name = 'dense_2',trainable=trainable)(net)
preds = Activation('softmax', name = 'activation_3')(net)
return preds
elif data == 'yahoolstm':
lstm_out = Bidirectional(LSTM(256,trainable=trainable), trainable = trainable)(emb)
net = Dropout(0.5)(lstm_out)
preds = Dense(10, activation='softmax',trainable=trainable)(net)
return preds
class TextModel():
def __init__(self, data, train = False):
self.data = data
print('Loading TextModel...')
if data == 'imdbcnn':
filters = 250
hidden_dims = 250
self.embedding_dims = 50
self.maxlen = 400
self.num_classes = 2
self.num_words = 20002
self.type = 'word'
if not train:
K.set_learning_phase(0)
X_ph = Input(shape=(self.maxlen,), dtype='int32')
emb_layer = Embedding(
self.num_words,
self.embedding_dims,
input_length=self.maxlen,
name = 'embedding_1'
)
emb_out = emb_layer(X_ph)
if train:
preds = construct_original_network(emb_out, data)
else:
emb_ph = Input(
shape=(self.maxlen, self.embedding_dims),
dtype='float32'
)
preds = construct_original_network(emb_ph, data)
if not train:
model1 = Model(X_ph, emb_out)
model2 = Model(emb_ph, preds)
pred_out = model2(model1(X_ph))
pred_model = Model(X_ph, pred_out)
pred_model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
self.pred_model = pred_model
grads = []
for c in range(self.num_classes):
grads.append(tf.gradients(preds[:,c], emb_ph))
grads = tf.concat(grads, axis = 0)
# [num_classes, batchsize, maxlen, embedding_dims]
approxs = grads * tf.expand_dims(emb_ph, 0)
# [num_classes, batchsize, maxlen, embedding_dims]
self.sess = K.get_session()
self.grads = grads
self.approxs = approxs
self.input_ph = X_ph
self.emb_out = emb_out
self.emb_ph = emb_ph
weights_name = 'original.h5'
model1.load_weights('{}/models/{}'.format(data, weights_name),
by_name=True)
model2.load_weights('{}/models/{}'.format(data, weights_name),
by_name=True)
self.pred_model.load_weights('{}/models/{}'.format(data, weights_name),
by_name=True)
print('Model constructed.', weights_name)
# For validating the data.
emb_weights = emb_layer.get_weights()
emb_weights[0][0] = np.zeros(50)
self.emb_weights = emb_weights[0]
emb_layer.set_weights(emb_weights)
else:
pred_model = Model(X_ph, preds)
pred_model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
self.pred_model = pred_model
from load_data import Data
dataset = Data(self.data, train = True)
self.train(dataset)
print('Training is done.')
elif data == 'agccnn':
from agccnn.data_helpers import create_vocab_set, construct_batch_generator, find_words_positions
filter_kernels = [7, 7, 3, 3, 3, 3]
dense_outputs = 1024
self.charlen = 1014
self.maxlen = 1014
nb_filter = 256
self.num_classes = 4
self.vocab, self.reverse_vocab, self.vocab_size, self.vocab_check = create_vocab_set()
self.embedding_dims = self.vocab_size
self.type = 'char'
K.set_learning_phase(1 if train else 0)
#Define what the input shape looks like
inputs = Input(shape=(self.charlen, self.vocab_size), name='input', dtype='float32')
conv = Conv1D(filters = nb_filter, kernel_size= filter_kernels[0], padding = 'valid', activation = 'relu', input_shape=(self.charlen, self.vocab_size))(inputs)
conv = MaxPooling1D(pool_size=3)(conv)
conv1 = Conv1D(filters = nb_filter, kernel_size= filter_kernels[1], padding = 'valid', activation = 'relu')(conv)
conv1 = MaxPooling1D(pool_size=3)(conv1)
conv2 = Conv1D(filters = nb_filter, kernel_size= filter_kernels[2], padding = 'valid', activation = 'relu')(conv1)
conv3 = Conv1D(filters = nb_filter, kernel_size= filter_kernels[3], padding = 'valid', activation = 'relu')(conv2)
conv4 = Conv1D(filters = nb_filter, kernel_size= filter_kernels[4], padding = 'valid', activation = 'relu')(conv3)
conv5 = Conv1D(filters = nb_filter, kernel_size= filter_kernels[5], padding = 'valid', activation = 'relu')(conv4)
conv5 = MaxPooling1D(pool_size=3)(conv5)
conv5 = Flatten()(conv5)
#Two dense layers with dropout of .5
z = Dropout(0.5)(Dense(dense_outputs, activation='relu')(conv5))
z = Dropout(0.5)(Dense(dense_outputs, activation='relu')(z))
#Output dense layer with softmax activation
pred = Dense(self.num_classes, activation='softmax', name='output')(z)
grads = []
for c in range(self.num_classes):
grads.append(tf.gradients(pred[:,c], inputs))
grads = tf.concat(grads, axis = 0)
# [num_classes, batchsize, self.charlen, embedding_dims]
approxs = grads * tf.expand_dims(inputs, 0)
# [num_classes, batchsize, self.charlen, embedding_dims]
model = Model(inputs, pred)
model.compile(
loss='categorical_crossentropy',
optimizer="sgd",
metrics=['accuracy']
)
model.load_weights(
'agccnn/params/crepe_model_weights-15.h5',
by_name=True
)
self.sess = K.get_session()
self.grads = grads
self.approxs = approxs
self.input_ph = inputs
self.model = model
from nltk.tokenize.moses import MosesDetokenizer
from nltk import word_tokenize
detokenizer = MosesDetokenizer()
self.tokenize = word_tokenize
self.detokenize = detokenizer.detokenize
self.construct_batch_generator = construct_batch_generator
self.find_words_positions = lambda sent: find_words_positions(
sent,
word_tokenize(sent),
self.charlen,
self.vocab,
self.vocab_size,
self.vocab_check
)
self.find_chars_positions = lambda sent: find_words_positions(
sent,
list(sent.lower().replace(' ', '')),
self.charlen,
self.vocab,
self.vocab_size,
self.vocab_check,
True
)
elif data == 'yahoolstm':
self.maxlen = 400
self.num_classes = 10
self.num_words = 20000
self.batch_size = 40
self.embedding_dims = 300
if not train:
K.set_learning_phase(0)
X_ph = Input(shape=(self.maxlen,), dtype='int32')
emb_layer = Embedding(
input_dim=self.num_words + 1,
output_dim= self.embedding_dims,
input_length=self.maxlen,
name = "embedding",
trainable=True)
emb = emb_layer(X_ph)
if train:
preds = construct_original_network(emb, data)
else:
emb_ph = Input(shape=(self.maxlen,self.embedding_dims), dtype='float32')
preds = construct_original_network(emb_ph, data)
if train:
model = Model(X_ph, preds)
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
else:
model1 = Model(X_ph, emb)
model2 = Model(emb_ph, preds)
pred_out = model2(model1(X_ph))
model = Model(X_ph, pred_out)
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
# Construct gradients.
grads = []
for c in range(self.num_classes):
grads.append(tf.gradients(preds[:,c], emb_ph))
grads = tf.concat(grads, axis = 0)
# [num_classes, batchsize, maxlen, embedding_dims]
approxs = grads * tf.expand_dims(emb_ph, 0)
# [num_classes, batchsize, maxlen, embedding_dims]
prev_epoch = 0; prev_itr = 7
model1.load_weights(
'yahoolstm/models/original-{}-{}.hdf5'.format(prev_epoch, prev_itr),
by_name = True
)
model2.load_weights(
'yahoolstm/models/original-{}-{}.hdf5'.format(prev_epoch, prev_itr),
by_name = True
)
emb_weights = emb_layer.get_weights()
self.emb_weights = emb_weights
self.emb_out = emb
self.emb_ph = emb_ph
self.sess = K.get_session()
self.grads = grads
self.approxs = approxs
self.input_ph = X_ph
self.pred_model = model
self.type = 'word'
if train:
from load_data import Data
print('Loading data...')
dataset = Data(data, train = True)
print('Training...')
self.train(dataset)
def train(self, dataset):
if self.data == 'imdbcnn':
epochs = 5
batch_size = 40
filepath = '{}/models/original.h5'.format(self.data)
checkpoint = ModelCheckpoint(
filepath,
monitor='val_acc',
verbose=1,
save_best_only=True,
mode='max')
callbacks_list = [checkpoint]
self.pred_model.fit(dataset.x_train, dataset.y_train, validation_data=(dataset.x_val, dataset.y_val),callbacks = callbacks_list, epochs=epochs, batch_size=batch_size)
elif self.data == 'yahoolstm':
model = self.pred_model
if 'models' not in os.listdir(self.data):
os.mkdir('{}/models'.format(self.data))
num_iters = int(math.ceil(len(dataset.x_train) * 1.0 / self.batch_size))
num_val_iters = int(math.ceil(len(dataset.x_val) * 1.0 / self.batch_size))
save_freq = 20
save_interval = int(num_iters // save_freq)
val_interval = 20
np.random.seed(0)
epochs = 3
for e in range(epochs):
print("epoch %d" % e)
# random permutes the data.
idx = np.random.permutation(len(dataset.x_train))
x_train, y_train = dataset.x_train[idx], dataset.y_train[idx]
val_batch_itr = 0
for i in range(0, num_iters):
batch_x = x_train[i * self.batch_size: (i+1) * self.batch_size]
batch_y = y_train[i * self.batch_size: (i+1) * self.batch_size]
curr_loss, curr_acc = model.train_on_batch(batch_x, batch_y)
if i == 0:
training_loss, training_acc = curr_loss, curr_acc
else:
training_loss = (i * training_loss + 1 * curr_loss) / float(i+1)
training_acc = (i * training_acc + 1 * curr_acc) / float(i+1)
if (i+1) % save_interval == 0:
current_freq = (i+1) // save_interval
model.save_weights('{}/models/original-{}-{}.hdf5'.format(self.data, e,current_freq))
print('Model saved at Epoch {}, Step {}'.format(e, i))
if (i+1) % val_interval == 0:
current_itr = val_batch_itr % num_val_iters
batch_x = dataset.x_val[current_itr * self.batch_size:(current_itr+1) * self.batch_size]
batch_y = dataset.y_val[current_itr * self.batch_size:(current_itr+1) * self.batch_size]
current_loss, current_acc = model.test_on_batch(batch_x, batch_y)
if val_batch_itr == 0:
val_loss, val_acc = current_loss, current_acc
else:
val_loss = (val_batch_itr * val_loss + current_loss) / float(val_batch_itr+1)
val_acc = (val_batch_itr * val_acc + current_acc) / float(val_batch_itr+1)
val_batch_itr += 1
print('Epoch: {} Step: {}; train_loss {}; train_acc {}; val_loss {}; val_acc {}'.format(e, i, training_loss, training_acc,val_loss, val_acc))
model.save_weights('{}/models/original-{}.hdf5'.format(self.data, e))
entire_val_loss, entire_val_acc = model.evaluate(dataset.x_val, dataset.y_val, verbose=0)
print('Epoch: {}; loss {}; acc {}'.format(epoch, val_loss, val_acc))
print('Epoch: {}; entire loss {}; acc {}'.format(epoch, entire_val_loss, entire_val_acc))
print('Saving model at the end of the epoch...')
def train_augment(self, dataset, new_data, method, changing_way):
print('Training model on augmented data...')
if self.data == 'imdbcnn':
epochs = 8
batch_size = 40
filepath = '{}/models/augment_{}_{}.h5'.format(self.data, method, changing_way)
checkpoint = ModelCheckpoint(filepath, monitor='val_acc',
verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
x = np.vstack([dataset.x_train, new_data[0]])
y = np.vstack([dataset.y_train, new_data[1]])
idx = np.random.permutation(len(x))
x = np.array(x)[idx]; y = np.array(y)[idx]
self.pred_model.fit(
x,
y,
validation_data=(dataset.x_val, dataset.y_val),
callbacks = callbacks_list,
epochs=epochs,
batch_size=batch_size
)
def predict(self, x, verbose=0):
if self.data in ['imdbcnn','yahoolstm']:
if type(x) == list or x.shape[1] < self.maxlen:
x = np.array(sequence.pad_sequences(x, maxlen=self.maxlen))
return self.pred_model.predict(x, batch_size = 2500,
verbose = verbose)
elif self.data == 'agccnn':
# x should be a list of texts.
if isinstance(x[0], basestring):
generator = self.construct_batch_generator(x, self.vocab, self.vocab_size, self.vocab_check, self.charlen, batchsize = 128)
predictions = []
for batch_data in generator:
predictions.append(self.model.predict(batch_data, verbose = verbose))
return np.concatenate(predictions, axis = 0)
return self.model.predict(x, verbose = verbose)
def compute_gradients(self, x):
if self.data in ['imdbcnn','yahoolstm']:
batchsize = 400
num_iters = int(math.ceil(len(x) * 1.0 / batchsize))
grads_val = []
for i in range(num_iters):
batch_data = x[i * batchsize: (i+1) * batchsize]
batch_emb = self.sess.run(self.emb_out,
feed_dict = {self.input_ph: batch_data})
batch_grads = self.sess.run(self.grads, feed_dict = {self.emb_ph: batch_emb}) # [num_classes, batchsize, maxlen, embedding_dims]
grads_val.append(batch_grads)
grads_val = np.concatenate(grads_val, axis = 1)
# [num_classes, num_data, maxlen, embedding_dims]
pred_val = self.predict(x)
# [num_data, maxlen, embedding_dims]
gradients = grads_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
return gradients #np.sum(abs(class_specific_scores), axis = -1)
elif self.data == 'agccnn':
generator = self.construct_batch_generator(x, self.vocab, self.vocab_size, self.vocab_check, self.charlen, batchsize = 128)
grads_val = []
for s, batch_data in enumerate(generator):
grads_val.append(self.sess.run(self.grads, feed_dict = {self.input_ph: batch_data}))
# [num_classes, num_data, charlen, embedding_dims]
grads_val = np.concatenate(grads_val, axis = 1)
pred_val = self.predict(x)
# [num_data, charlen, embedding_dims]
class_specific_grads = grads_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
return class_specific_grads
def compute_taylor_approximation(self, x):
if self.data in ['imdbcnn','yahoolstm']:
batchsize = 128
num_iters = int(math.ceil(len(x) * 1.0 / batchsize))
approxs_val = []
for i in range(num_iters):
batch_data = x[i * batchsize: (i+1) * batchsize]
batch_emb = self.sess.run(self.emb_out,
feed_dict = {self.input_ph: batch_data})
batch_approxs = self.sess.run(self.approxs, feed_dict = {self.emb_ph: batch_emb}) # [num_classes, batchsize, maxlen, embedding_dims]
approxs_val.append(batch_approxs)
approxs_val = np.concatenate(approxs_val, axis = 1)
# [num_classes, num_data, length, embedding_dims]
pred_val = self.predict(x)
# [num_data, length, embedding_dims]
class_specific_scores = approxs_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
# [num_data, length]
return np.sum(class_specific_scores, axis = -1)
elif self.data == 'agccnn':
generator = self.construct_batch_generator(x, self.vocab, self.vocab_size, self.vocab_check, self.charlen, batchsize = 128)
approxs_val = []
indices = []
for s, batch_data in enumerate(generator):
approxs_val.append(self.sess.run(self.approxs, feed_dict = {self.input_ph: batch_data}))
for sent in x[128 * s: 128 * (s+1)]:
indices.append(self.find_words_positions(sent))
# [num_classes, num_data, charlen, embedding_dims]
approxs_val = np.concatenate(approxs_val, axis = 1)
# print(np.sum(approxs_val[0] != 0, axis = -1))
pred_val = self.predict(x)
# [num_data, charlen, embedding_dims]
class_specific_approxs = approxs_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
approxs_score = []
for i, approxs_val in enumerate(class_specific_approxs):
approx_score = [np.sum(np.sum(approxs_val[start_idx:end_idx], axis = 0), axis = 0) for start_idx, end_idx in indices[i]] # [wordlen]
approxs_score.append(np.array(approx_score))
# print(np.array(approx_score).shape)
return approxs_score
def compute_integrated_gradients(self, x):
if self.data in ['imdbcnn','yahoolstm']:
batchsize = 20#128 if self.data == 'imdbcnn' else 40
steps = 10
approxs_val = []
emb_vals = []
num_iters1 = int(math.ceil(len(x) * 1.0 / batchsize))
for i in range(num_iters1):
batch_data = x[i * batchsize: (i+1) * batchsize]
batch_emb = self.sess.run(self.emb_out,
feed_dict = {self.input_ph: batch_data})
step_batch_emb = [batch_emb * float(s) / steps for s in range(1, steps+1)]
# [steps,batchsize, maxlen, embedding_dimension]
emb_vals.append(step_batch_emb)
emb_vals = np.concatenate(emb_vals, axis = 1)
# [steps, num_data, maxlen, embedding_dimension]
emb_vals = np.reshape(emb_vals, [-1, self.maxlen, self.embedding_dims])
num_iters = int(math.ceil(len(emb_vals) * 1.0 / batchsize))
for i in range(num_iters):
print(i)
batch_emb = emb_vals[i * batchsize: (i+1) * batchsize]
batch_approxs = self.sess.run(self.approxs, feed_dict = {self.emb_ph: batch_emb})
# [num_classes, batchsize, maxlen, embedding_dims]
approxs_val.append(batch_approxs)
approxs_val = np.concatenate(approxs_val, axis = 1)
# [num_classes, steps * num_data, length, embedding_dims]
approxs_val = np.reshape(approxs_val,
[self.num_classes, steps, len(x), self.maxlen, self.embedding_dims])
approxs_val = np.mean(approxs_val, axis = 1)
pred_val = self.predict(x)
# [num_data, length, embedding_dims]
class_specific_scores = approxs_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
# [num_data, length]
return np.sum(class_specific_scores, axis = -1)
elif self.data == 'agccnn':
batchsize = 128
generator = self.construct_batch_generator(x, self.vocab, self.vocab_size, self.vocab_check, self.charlen, batchsize = batchsize)
steps = 100
approxs_val = []
indices = []
for s, batch_data in enumerate(generator):
emb_vals = [batch_data * float(step) / steps for step in range(1, steps+1)]
batch_approxs = np.mean([self.sess.run(self.approxs, feed_dict = {self.input_ph: emb_val_s}) for emb_val_s in emb_vals], axis = 0)
# [num_classes, batchsize, maxlen, embedding_dims]
approxs_val.append(batch_approxs)
for sent in x[batchsize * s: batchsize * (s+1)]:
indices.append(self.find_words_positions(sent))
# [num_classes, num_data, charlen, embedding_dims]
approxs_val = np.concatenate(approxs_val, axis = 1)
pred_val = self.predict(x)
# [num_data, charlen, embedding_dims]
class_specific_approxs = approxs_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
approxs_score = []
for i, approxs_val in enumerate(class_specific_approxs):
approx_score = [np.sum(np.sum(approxs_val[start_idx:end_idx], axis = 0), axis = 0) for start_idx, end_idx in indices[i]] # [wordlen]
approxs_score.append(np.array(approx_score))
return approxs_score
| [
"noreply@github.com"
] | Anonymous-Alien.noreply@github.com |
8c8b11b281a3a8c90dc800644e35e30ea14afc61 | 3a7ef35a51aabaf762dca13f2197548380121ad8 | /beer-song/beer_song_test.py | 761f09f04a2044f0a3c224faaa93715505645455 | [
"Unlicense"
] | permissive | ikostan/Exercism_Python_Track | ff0be0386cf3fb1b62db54f72b8db15161928af7 | a6d52ad74e36db1d2bf82ed15362c1e4341d741d | refs/heads/master | 2023-08-09T16:56:17.615800 | 2020-10-16T00:46:55 | 2020-10-16T00:46:55 | 191,260,562 | 0 | 0 | Unlicense | 2023-09-04T01:17:29 | 2019-06-10T23:41:50 | Python | UTF-8 | Python | false | false | 19,079 | py | import unittest
from beer_song import recite
# Tests adapted from `problem-specifications//canonical-data.json` @ v2.1.0
class BeerSongTest(unittest.TestCase):
def test_first_generic_verse(self):
expected = [
"99 bottles of beer on the wall, 99 bottles of beer.",
"Take one down and pass it around, 98 bottles of beer on the wall.",
]
self.assertEqual(recite(start=99), expected)
def test_last_generic_verse(self):
expected = [
"3 bottles of beer on the wall, 3 bottles of beer.",
"Take one down and pass it around, 2 bottles of beer on the wall.",
]
self.assertEqual(recite(start=3), expected)
def test_verse_with_2_bottles(self):
expected = [
"2 bottles of beer on the wall, 2 bottles of beer.",
"Take one down and pass it around, 1 bottle of beer on the wall.",
]
self.assertEqual(recite(start=2), expected)
def test_verse_with_1_bottle(self):
expected = [
"1 bottle of beer on the wall, 1 bottle of beer.",
"Take it down and pass it around, no more bottles of beer on the wall.",
]
self.assertEqual(recite(start=1), expected)
def test_verse_with_0_bottles(self):
expected = [
"No more bottles of beer on the wall, no more bottles of beer.",
"Go to the store and buy some more, 99 bottles of beer on the wall.",
]
self.assertEqual(recite(start=0), expected)
def test_first_two_verses(self):
expected = [
"99 bottles of beer on the wall, 99 bottles of beer.",
"Take one down and pass it around, 98 bottles of beer on the wall.",
"",
"98 bottles of beer on the wall, 98 bottles of beer.",
"Take one down and pass it around, 97 bottles of beer on the wall.",
]
self.assertEqual(recite(start=99, take=2), expected)
def test_last_three_verses(self):
expected = [
"2 bottles of beer on the wall, 2 bottles of beer.",
"Take one down and pass it around, 1 bottle of beer on the wall.",
"",
"1 bottle of beer on the wall, 1 bottle of beer.",
"Take it down and pass it around, no more bottles of beer on the wall.",
"",
"No more bottles of beer on the wall, no more bottles of beer.",
"Go to the store and buy some more, 99 bottles of beer on the wall.",
]
self.assertEqual(recite(start=2, take=3), expected)
def test_all_verses(self):
expected = [
"99 bottles of beer on the wall, 99 bottles of beer.",
"Take one down and pass it around, 98 bottles of beer on the wall.",
"",
"98 bottles of beer on the wall, 98 bottles of beer.",
"Take one down and pass it around, 97 bottles of beer on the wall.",
"",
"97 bottles of beer on the wall, 97 bottles of beer.",
"Take one down and pass it around, 96 bottles of beer on the wall.",
"",
"96 bottles of beer on the wall, 96 bottles of beer.",
"Take one down and pass it around, 95 bottles of beer on the wall.",
"",
"95 bottles of beer on the wall, 95 bottles of beer.",
"Take one down and pass it around, 94 bottles of beer on the wall.",
"",
"94 bottles of beer on the wall, 94 bottles of beer.",
"Take one down and pass it around, 93 bottles of beer on the wall.",
"",
"93 bottles of beer on the wall, 93 bottles of beer.",
"Take one down and pass it around, 92 bottles of beer on the wall.",
"",
"92 bottles of beer on the wall, 92 bottles of beer.",
"Take one down and pass it around, 91 bottles of beer on the wall.",
"",
"91 bottles of beer on the wall, 91 bottles of beer.",
"Take one down and pass it around, 90 bottles of beer on the wall.",
"",
"90 bottles of beer on the wall, 90 bottles of beer.",
"Take one down and pass it around, 89 bottles of beer on the wall.",
"",
"89 bottles of beer on the wall, 89 bottles of beer.",
"Take one down and pass it around, 88 bottles of beer on the wall.",
"",
"88 bottles of beer on the wall, 88 bottles of beer.",
"Take one down and pass it around, 87 bottles of beer on the wall.",
"",
"87 bottles of beer on the wall, 87 bottles of beer.",
"Take one down and pass it around, 86 bottles of beer on the wall.",
"",
"86 bottles of beer on the wall, 86 bottles of beer.",
"Take one down and pass it around, 85 bottles of beer on the wall.",
"",
"85 bottles of beer on the wall, 85 bottles of beer.",
"Take one down and pass it around, 84 bottles of beer on the wall.",
"",
"84 bottles of beer on the wall, 84 bottles of beer.",
"Take one down and pass it around, 83 bottles of beer on the wall.",
"",
"83 bottles of beer on the wall, 83 bottles of beer.",
"Take one down and pass it around, 82 bottles of beer on the wall.",
"",
"82 bottles of beer on the wall, 82 bottles of beer.",
"Take one down and pass it around, 81 bottles of beer on the wall.",
"",
"81 bottles of beer on the wall, 81 bottles of beer.",
"Take one down and pass it around, 80 bottles of beer on the wall.",
"",
"80 bottles of beer on the wall, 80 bottles of beer.",
"Take one down and pass it around, 79 bottles of beer on the wall.",
"",
"79 bottles of beer on the wall, 79 bottles of beer.",
"Take one down and pass it around, 78 bottles of beer on the wall.",
"",
"78 bottles of beer on the wall, 78 bottles of beer.",
"Take one down and pass it around, 77 bottles of beer on the wall.",
"",
"77 bottles of beer on the wall, 77 bottles of beer.",
"Take one down and pass it around, 76 bottles of beer on the wall.",
"",
"76 bottles of beer on the wall, 76 bottles of beer.",
"Take one down and pass it around, 75 bottles of beer on the wall.",
"",
"75 bottles of beer on the wall, 75 bottles of beer.",
"Take one down and pass it around, 74 bottles of beer on the wall.",
"",
"74 bottles of beer on the wall, 74 bottles of beer.",
"Take one down and pass it around, 73 bottles of beer on the wall.",
"",
"73 bottles of beer on the wall, 73 bottles of beer.",
"Take one down and pass it around, 72 bottles of beer on the wall.",
"",
"72 bottles of beer on the wall, 72 bottles of beer.",
"Take one down and pass it around, 71 bottles of beer on the wall.",
"",
"71 bottles of beer on the wall, 71 bottles of beer.",
"Take one down and pass it around, 70 bottles of beer on the wall.",
"",
"70 bottles of beer on the wall, 70 bottles of beer.",
"Take one down and pass it around, 69 bottles of beer on the wall.",
"",
"69 bottles of beer on the wall, 69 bottles of beer.",
"Take one down and pass it around, 68 bottles of beer on the wall.",
"",
"68 bottles of beer on the wall, 68 bottles of beer.",
"Take one down and pass it around, 67 bottles of beer on the wall.",
"",
"67 bottles of beer on the wall, 67 bottles of beer.",
"Take one down and pass it around, 66 bottles of beer on the wall.",
"",
"66 bottles of beer on the wall, 66 bottles of beer.",
"Take one down and pass it around, 65 bottles of beer on the wall.",
"",
"65 bottles of beer on the wall, 65 bottles of beer.",
"Take one down and pass it around, 64 bottles of beer on the wall.",
"",
"64 bottles of beer on the wall, 64 bottles of beer.",
"Take one down and pass it around, 63 bottles of beer on the wall.",
"",
"63 bottles of beer on the wall, 63 bottles of beer.",
"Take one down and pass it around, 62 bottles of beer on the wall.",
"",
"62 bottles of beer on the wall, 62 bottles of beer.",
"Take one down and pass it around, 61 bottles of beer on the wall.",
"",
"61 bottles of beer on the wall, 61 bottles of beer.",
"Take one down and pass it around, 60 bottles of beer on the wall.",
"",
"60 bottles of beer on the wall, 60 bottles of beer.",
"Take one down and pass it around, 59 bottles of beer on the wall.",
"",
"59 bottles of beer on the wall, 59 bottles of beer.",
"Take one down and pass it around, 58 bottles of beer on the wall.",
"",
"58 bottles of beer on the wall, 58 bottles of beer.",
"Take one down and pass it around, 57 bottles of beer on the wall.",
"",
"57 bottles of beer on the wall, 57 bottles of beer.",
"Take one down and pass it around, 56 bottles of beer on the wall.",
"",
"56 bottles of beer on the wall, 56 bottles of beer.",
"Take one down and pass it around, 55 bottles of beer on the wall.",
"",
"55 bottles of beer on the wall, 55 bottles of beer.",
"Take one down and pass it around, 54 bottles of beer on the wall.",
"",
"54 bottles of beer on the wall, 54 bottles of beer.",
"Take one down and pass it around, 53 bottles of beer on the wall.",
"",
"53 bottles of beer on the wall, 53 bottles of beer.",
"Take one down and pass it around, 52 bottles of beer on the wall.",
"",
"52 bottles of beer on the wall, 52 bottles of beer.",
"Take one down and pass it around, 51 bottles of beer on the wall.",
"",
"51 bottles of beer on the wall, 51 bottles of beer.",
"Take one down and pass it around, 50 bottles of beer on the wall.",
"",
"50 bottles of beer on the wall, 50 bottles of beer.",
"Take one down and pass it around, 49 bottles of beer on the wall.",
"",
"49 bottles of beer on the wall, 49 bottles of beer.",
"Take one down and pass it around, 48 bottles of beer on the wall.",
"",
"48 bottles of beer on the wall, 48 bottles of beer.",
"Take one down and pass it around, 47 bottles of beer on the wall.",
"",
"47 bottles of beer on the wall, 47 bottles of beer.",
"Take one down and pass it around, 46 bottles of beer on the wall.",
"",
"46 bottles of beer on the wall, 46 bottles of beer.",
"Take one down and pass it around, 45 bottles of beer on the wall.",
"",
"45 bottles of beer on the wall, 45 bottles of beer.",
"Take one down and pass it around, 44 bottles of beer on the wall.",
"",
"44 bottles of beer on the wall, 44 bottles of beer.",
"Take one down and pass it around, 43 bottles of beer on the wall.",
"",
"43 bottles of beer on the wall, 43 bottles of beer.",
"Take one down and pass it around, 42 bottles of beer on the wall.",
"",
"42 bottles of beer on the wall, 42 bottles of beer.",
"Take one down and pass it around, 41 bottles of beer on the wall.",
"",
"41 bottles of beer on the wall, 41 bottles of beer.",
"Take one down and pass it around, 40 bottles of beer on the wall.",
"",
"40 bottles of beer on the wall, 40 bottles of beer.",
"Take one down and pass it around, 39 bottles of beer on the wall.",
"",
"39 bottles of beer on the wall, 39 bottles of beer.",
"Take one down and pass it around, 38 bottles of beer on the wall.",
"",
"38 bottles of beer on the wall, 38 bottles of beer.",
"Take one down and pass it around, 37 bottles of beer on the wall.",
"",
"37 bottles of beer on the wall, 37 bottles of beer.",
"Take one down and pass it around, 36 bottles of beer on the wall.",
"",
"36 bottles of beer on the wall, 36 bottles of beer.",
"Take one down and pass it around, 35 bottles of beer on the wall.",
"",
"35 bottles of beer on the wall, 35 bottles of beer.",
"Take one down and pass it around, 34 bottles of beer on the wall.",
"",
"34 bottles of beer on the wall, 34 bottles of beer.",
"Take one down and pass it around, 33 bottles of beer on the wall.",
"",
"33 bottles of beer on the wall, 33 bottles of beer.",
"Take one down and pass it around, 32 bottles of beer on the wall.",
"",
"32 bottles of beer on the wall, 32 bottles of beer.",
"Take one down and pass it around, 31 bottles of beer on the wall.",
"",
"31 bottles of beer on the wall, 31 bottles of beer.",
"Take one down and pass it around, 30 bottles of beer on the wall.",
"",
"30 bottles of beer on the wall, 30 bottles of beer.",
"Take one down and pass it around, 29 bottles of beer on the wall.",
"",
"29 bottles of beer on the wall, 29 bottles of beer.",
"Take one down and pass it around, 28 bottles of beer on the wall.",
"",
"28 bottles of beer on the wall, 28 bottles of beer.",
"Take one down and pass it around, 27 bottles of beer on the wall.",
"",
"27 bottles of beer on the wall, 27 bottles of beer.",
"Take one down and pass it around, 26 bottles of beer on the wall.",
"",
"26 bottles of beer on the wall, 26 bottles of beer.",
"Take one down and pass it around, 25 bottles of beer on the wall.",
"",
"25 bottles of beer on the wall, 25 bottles of beer.",
"Take one down and pass it around, 24 bottles of beer on the wall.",
"",
"24 bottles of beer on the wall, 24 bottles of beer.",
"Take one down and pass it around, 23 bottles of beer on the wall.",
"",
"23 bottles of beer on the wall, 23 bottles of beer.",
"Take one down and pass it around, 22 bottles of beer on the wall.",
"",
"22 bottles of beer on the wall, 22 bottles of beer.",
"Take one down and pass it around, 21 bottles of beer on the wall.",
"",
"21 bottles of beer on the wall, 21 bottles of beer.",
"Take one down and pass it around, 20 bottles of beer on the wall.",
"",
"20 bottles of beer on the wall, 20 bottles of beer.",
"Take one down and pass it around, 19 bottles of beer on the wall.",
"",
"19 bottles of beer on the wall, 19 bottles of beer.",
"Take one down and pass it around, 18 bottles of beer on the wall.",
"",
"18 bottles of beer on the wall, 18 bottles of beer.",
"Take one down and pass it around, 17 bottles of beer on the wall.",
"",
"17 bottles of beer on the wall, 17 bottles of beer.",
"Take one down and pass it around, 16 bottles of beer on the wall.",
"",
"16 bottles of beer on the wall, 16 bottles of beer.",
"Take one down and pass it around, 15 bottles of beer on the wall.",
"",
"15 bottles of beer on the wall, 15 bottles of beer.",
"Take one down and pass it around, 14 bottles of beer on the wall.",
"",
"14 bottles of beer on the wall, 14 bottles of beer.",
"Take one down and pass it around, 13 bottles of beer on the wall.",
"",
"13 bottles of beer on the wall, 13 bottles of beer.",
"Take one down and pass it around, 12 bottles of beer on the wall.",
"",
"12 bottles of beer on the wall, 12 bottles of beer.",
"Take one down and pass it around, 11 bottles of beer on the wall.",
"",
"11 bottles of beer on the wall, 11 bottles of beer.",
"Take one down and pass it around, 10 bottles of beer on the wall.",
"",
"10 bottles of beer on the wall, 10 bottles of beer.",
"Take one down and pass it around, 9 bottles of beer on the wall.",
"",
"9 bottles of beer on the wall, 9 bottles of beer.",
"Take one down and pass it around, 8 bottles of beer on the wall.",
"",
"8 bottles of beer on the wall, 8 bottles of beer.",
"Take one down and pass it around, 7 bottles of beer on the wall.",
"",
"7 bottles of beer on the wall, 7 bottles of beer.",
"Take one down and pass it around, 6 bottles of beer on the wall.",
"",
"6 bottles of beer on the wall, 6 bottles of beer.",
"Take one down and pass it around, 5 bottles of beer on the wall.",
"",
"5 bottles of beer on the wall, 5 bottles of beer.",
"Take one down and pass it around, 4 bottles of beer on the wall.",
"",
"4 bottles of beer on the wall, 4 bottles of beer.",
"Take one down and pass it around, 3 bottles of beer on the wall.",
"",
"3 bottles of beer on the wall, 3 bottles of beer.",
"Take one down and pass it around, 2 bottles of beer on the wall.",
"",
"2 bottles of beer on the wall, 2 bottles of beer.",
"Take one down and pass it around, 1 bottle of beer on the wall.",
"",
"1 bottle of beer on the wall, 1 bottle of beer.",
"Take it down and pass it around, no more bottles of beer on the wall.",
"",
"No more bottles of beer on the wall, no more bottles of beer.",
"Go to the store and buy some more, 99 bottles of beer on the wall.",
]
self.assertEqual(recite(start=99, take=100), expected)
| [
"igorkostan@gmail.com"
] | igorkostan@gmail.com |
8d6cf1588bdda74af37dd6269bec5931e71b5745 | cbeb1d7060dbc281c11c6b029a1d156e1ec7ebfd | /mountains/massif_amorican/cols.py | 962ec60e6f13af3674ab0bd98560b70c7b4298da | [] | no_license | paulkirkwood/py.parcoursdb | 28ceceaf4f44e03d9911892a9a916447cd7c7477 | df2745064e4c66dc0c2d522fc0381bf13a8e7859 | refs/heads/master | 2020-09-14T04:58:10.940799 | 2020-02-24T21:04:31 | 2020-02-24T21:04:31 | 223,024,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | import country
from col import Col
from ..util import french_col
def mur_de_bretagne():
return french_col("Mûr-de-Bretagne", 293, 2, 6.9)
| [
"paul@paulandsue.plus.com"
] | paul@paulandsue.plus.com |
52b202fd47aace9a4d7ef4788898606a49878af1 | 23f78b8e4547443ba3285440dd40ca1684109b9e | /model.py | 084e26aaebba616ae3aeccf6deeb77f93ac212b1 | [] | no_license | suneelc12/Rasa_NLU | 3f12b0bff32978b76cc492e7358c5907b1610778 | 9001bfc926a0826ad2615a8395597d7cbc2448d9 | refs/heads/master | 2020-04-11T03:41:41.668784 | 2019-03-07T21:06:19 | 2019-03-07T21:06:19 | 161,486,126 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | import os
import datetime
import sys
#import shutil
#modified_time=datetime.datetime.fromtimestamp(os.path.getmtime('C:/Users/ramad/Downloads/chatbot-node-rasa-master/HRbot/HR_Bot.json'))
#print(modified_time)
directory = 'C:/Users/ramad/Downloads/chatbot-node-rasa-master/models/default/'
def all_subdirs_of(b=directory):
result = []
for d in os.listdir(b):
bd = os.path.join(b, d)
if os.path.isdir(bd): result.append(bd)
return result
latest_subdir = max(all_subdirs_of(directory), key=os.path.getmtime)
print(latest_subdir )
sys.stdout.flush()
#import os
#import time
#import operator
#alist={}
#directory= 'C:/Users/ramad/Downloads/chatbot-node-rasa-master/models/default/'
#os.chdir(directory)
#for file in os.listdir("."):
# if os.path.isdir(file):
# timestamp = os.path.getmtime( file )
# # get timestamp and directory name and store to dictionary
# alist[os.path.join(os.getcwd(),file)]=timestamp
## sort the timestamp
#for i in sorted(alist.items(), key=operator.itemgetter(1)):
# latest="%s" % ( i[0])
#print ("newest directory is ", latest) | [
"noreply@github.com"
] | suneelc12.noreply@github.com |
a3cac7cff2c5cbd19e783ea7573def0d2719f2c2 | 967cd25c52be494817f69003ebcff5400ab1f51b | /thingspeak/testing/playground.py | e4c6b30449f5b80f757fb7921886b3266ff57354 | [
"MIT"
] | permissive | jutako/raspi | efc107bbf4c83d56ac8f8582dff8b3f56b151772 | f69d15a48765c85960e7d7da175d4f96cb1dfee3 | refs/heads/master | 2021-01-12T13:43:07.055659 | 2018-03-24T20:49:06 | 2018-03-24T20:49:06 | 72,223,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py |
temp = 10
hum = 20
key = 'VEHKJKJXTZBYLMVC'
import urllib
values = {'api_key' : key, 'field1' : temp, 'field2' : hum}
postdata = urllib.urlencode(values)
| [
"jussitapiokorpela@gmail.com"
] | jussitapiokorpela@gmail.com |
d4c07aa542fd2df9f7066b893a929bbebdacca97 | 0eb3cb7493b6cc604a1aea9afc7af02e89b38602 | /Chapter10. Files/file.py | 16b8206030bce27b3aa6d69377aa5c469ab2a262 | [] | no_license | ec4sug4/i | 8b7c2d21ff3e7c763464f3a77ea009683eb17d51 | 1dbd58bb12729749c220b9f1f92f63389e7a886c | refs/heads/master | 2023-05-10T17:08:57.966542 | 2020-07-02T09:33:01 | 2020-07-02T09:33:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | fileref = open("olympics.txt","r")
line = fileref.readlines()
for i in line[:4]:
print(i)
fileref.close() | [
"subham.kumar032@gmail.com"
] | subham.kumar032@gmail.com |
80e786872143779e1fca9b83e5ab6e2b6162d70c | 7c9a3e527c8e444c1be745a26c5803ded4977bf4 | /trades/migrations/0002_auto__add_trade__add_item.py | 714ab2836c1c559a46a5712e7c66496902cb44e8 | [] | no_license | sekl/esotrades | 5b536fb9632ebecdca3a479a5d255c513b8078c1 | 683f8f8e29f89eb0ef55ec56544a4d07a1944077 | refs/heads/master | 2016-09-09T19:52:38.898875 | 2014-03-08T12:26:49 | 2014-03-08T12:26:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,943 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Trade'
db.create_table(u'trades_trade', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('body', self.gf('django.db.models.fields.TextField')()),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'trades', ['Trade'])
# Adding model 'Item'
db.create_table(u'trades_item', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'trades', ['Item'])
def backwards(self, orm):
# Deleting model 'Trade'
db.delete_table(u'trades_trade')
# Deleting model 'Item'
db.delete_table(u'trades_item')
models = {
u'trades.item': {
'Meta': {'object_name': 'Item'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'trades.trade': {
'Meta': {'object_name': 'Trade'},
'body': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['trades'] | [
"sebastian.klier@gmx.de"
] | sebastian.klier@gmx.de |
33528cd85e5325910f82fa63bb57f9f679aeff7b | 284e9633e979ef51f6b1cf4525a90f2b3d9d1889 | /wasm/tests/test_exec_mode.py | 1914f102e50984a82c10b7d3464413d03322193d | [
"MIT"
] | permissive | JesterOrNot/RustPython | 01778140e2e7beaf2a8e2c3da2ce604d6eb116ba | bbe728e4ce203d6b328087b553acc8f81842f970 | refs/heads/master | 2020-12-14T19:26:42.785389 | 2020-02-04T21:10:09 | 2020-02-04T21:10:09 | 234,846,323 | 0 | 0 | MIT | 2020-01-19T05:36:26 | 2020-01-19T05:36:24 | null | UTF-8 | Python | false | false | 1,279 | py | import time
import sys
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
import pytest
def print_stack(driver):
stack = driver.execute_script(
"return window.__RUSTPYTHON_ERROR_MSG + '\\n' + window.__RUSTPYTHON_ERROR_STACK"
)
print(f"RustPython error stack:\n{stack}", file=sys.stderr)
@pytest.fixture(scope="module")
def driver(request):
options = Options()
options.add_argument('-headless')
driver = webdriver.Firefox(options=options)
try:
driver.get("http://localhost:8080")
except Exception as e:
print_stack(driver)
raise
time.sleep(5)
yield driver
driver.close()
def test_eval_mode(driver):
assert driver.execute_script("return window.rp.pyEval('1+1')") == 2
def test_exec_mode(driver):
assert driver.execute_script("return window.rp.pyExec('1+1')") is None
def test_exec_single_mode(driver):
assert driver.execute_script("return window.rp.pyExecSingle('1+1')") == 2
assert driver.execute_script(
"""
var output = [];
save_output = function(text) {{
output.push(text)
}};
window.rp.pyExecSingle('1+1\\n2+2',{stdout: save_output});
return output;
""") == ['2\n', '4\n']
| [
"yanganto@gmail.com"
] | yanganto@gmail.com |
4166e7f506510ccae3d18172138df9b4a82e5770 | 490fafa60041db0e4ba1cd8f0a147f26bcde8b81 | /Data-Structures/lists/finding.py | 9d600b83bb23370360836e7c079ac8cf1f49eac0 | [] | no_license | vahidsediqi/Python-basic-codes | 29bad84a700b91f7beb6f585634914e0e0523652 | ed1af51610d1d0c1d9f1cc1b032365b7f917686f | refs/heads/master | 2021-05-26T03:51:39.541880 | 2020-05-20T13:33:14 | 2020-05-20T13:33:14 | 254,041,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | letters = ['a','b','c','d','e']
# finding index of an item
# if the item is not in the the we get error
# to solve it we have to use if statment
print(letters.index('d'))
if 'f' in letters:
print(letters.index('f'))
else:
print('The letter is not exist') | [
"vsediqi@live.com"
] | vsediqi@live.com |
91bfa4b69dc8175e14f2c85dffe644cc6f7a0d71 | fe9e6580e954ed62c4e8fd6b860000bb553150a6 | /ecommerce/forms.py | bffb01b5ed4507bffcb530dd54713c62b71512fe | [] | no_license | Brucehaha/ecommerce | 037fb25608e848f5c0fd4ed78f42028d21872e39 | bea5e5a13ad1e958912b0ac99cfc556a593f91f3 | refs/heads/workplace | 2023-01-03T19:35:13.894572 | 2018-06-20T07:22:19 | 2018-06-20T07:22:19 | 124,492,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from django import forms
class ContactForm(forms.Form):
fullname = forms.CharField(
widget=forms.TextInput(
attrs={
"class": "form-control",
"placeholder": "Your fullname"
}
)
)
email = forms.EmailField(
widget=forms.EmailInput(
attrs={
"class": "form-control",
"placeholder": "Your Email"
}
)
)
content = forms.CharField(
widget=forms.Textarea(
attrs={
"class": "form-control",
"placeholder": "Year message"
}
)
)
def clean_email(self):
email = self.cleaned_data.get("email")
if not "gmail.com" in email:
raise forms.ValidationError("Email has to be gmail.com")
return email
| [
"henninglee2013@gmail.com"
] | henninglee2013@gmail.com |
649b19115673556dea865aec69f56090f46ec14a | 194dae90bf1cc497f9162eca3957fdc6c9094e33 | /deeptennis/data/dataset.py | 2176d31477742cddff68bb3f0879234dbb4103e5 | [
"MIT"
] | permissive | sethah/deeptennis | 41ea80002a5d7993b6ca625bb59efef1d70371f2 | a689c5f1d6f5ff1d665aec99b8db6262d3442c3a | refs/heads/master | 2021-06-03T23:51:59.754478 | 2020-05-18T00:13:41 | 2020-05-18T00:13:41 | 147,436,170 | 34 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | import numpy as np
from pathlib import Path
from PIL import Image
from typing import Callable, List
import torch
def compute_mean_std(ds: torch.utils.data.Dataset):
"""
Compute the mean and standard deviation for each image channel.
"""
tsum = 0.
tcount = 0.
tsum2 = 0.
for i in range(len(ds)):
im, *_ = ds[i]
im = im.view(im.shape[0], -1)
tsum = tsum + im.sum(dim=1)
tcount = tcount + im.shape[1]
tsum2 = tsum2 + (im * im).sum(dim=1)
mean = tsum / tcount
std = torch.sqrt(tsum2 / tcount - mean ** 2)
return mean, std
class ImageFilesDataset(torch.utils.data.Dataset):
def __init__(self, files: List[Path], labels: np.ndarray=None, transform: Callable=None):
self.transform = transform
self.files = files
self.labels = np.zeros(len(files)) if labels is None else labels
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
file = self.files[idx]
label = self.labels[idx]
with open(file, 'rb') as f:
img = Image.open(f)
sample = img.convert('RGB')
if self.transform is not None:
sample = self.transform(sample)
return sample, torch.tensor(label, dtype=torch.int64)
| [
"shendrickson@cloudera.com"
] | shendrickson@cloudera.com |
5ce2a703f5302283b074de6d2a1fb30fb8b91aa4 | bc0938b96b86d1396cb6b403742a9f8dbdb28e4c | /aliyun-python-sdk-nas/aliyunsdknas/request/v20170626/DescribeTagsRequest.py | d76b528b9d21f049ae887b42b56847b5cd568288 | [
"Apache-2.0"
] | permissive | jia-jerry/aliyun-openapi-python-sdk | fb14d825eb0770b874bc123746c2e45efaf64a6d | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | refs/heads/master | 2022-11-16T05:20:03.515145 | 2020-07-10T08:45:41 | 2020-07-10T09:06:32 | 278,590,780 | 0 | 0 | NOASSERTION | 2020-07-10T09:15:19 | 2020-07-10T09:15:19 | null | UTF-8 | Python | false | false | 2,120 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdknas.endpoint import endpoint_data
class DescribeTagsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'NAS', '2017-06-26', 'DescribeTags','nas')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self,Tags):
for i in range(len(Tags)):
if Tags[i].get('Value') is not None:
self.add_query_param('Tag.' + str(i + 1) + '.Value' , Tags[i].get('Value'))
if Tags[i].get('Key') is not None:
self.add_query_param('Tag.' + str(i + 1) + '.Key' , Tags[i].get('Key'))
def get_FileSystemId(self):
return self.get_query_params().get('FileSystemId')
def set_FileSystemId(self,FileSystemId):
self.add_query_param('FileSystemId',FileSystemId) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
1e92de41ab21ce95eb3acba4eb3da6bff2bf176a | cfe872e89f657aa8a5f58c5efbab03b463575e16 | /CodeProject/wsgi.py | 89f82db22dfc65b9e954d5ad5b03cae1fbe490ce | [] | no_license | Akash-79/Code-Of-Thon | 91063c8c8aca08557f273a4b5c9c7889b12a1e66 | 921debaa136218b311f1b27d9aa96fe29224e11b | refs/heads/master | 2022-11-23T04:27:34.645409 | 2020-08-01T08:31:15 | 2020-08-01T08:31:15 | 284,213,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for CodeProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CodeProject.settings')
application = get_wsgi_application()
| [
"akashmalasane79@gmail.com"
] | akashmalasane79@gmail.com |
3a6d883858b8888b951ebe9478f8c055c4023350 | b801a549da493431071ac13c13927d545e036a82 | /KNN.py | 448c8057381b79425c45e4537c49e0d9800c1703 | [] | no_license | minytie/Recommender-Systems-with-different-algorithms | eca2b416a4e57b4e2f2324bb232f26db8de080df | 3bd8c007608eb5a479bc03720a2ef3ccbb6515ba | refs/heads/master | 2022-12-15T13:46:51.901417 | 2020-09-06T00:58:10 | 2020-09-06T00:58:10 | 293,178,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | from surprise import KNNBasic
from surprise import Dataset
from surprise.model_selection import cross_validate
import json
from tqdm import tqdm
# Load the movielens-100k dataset (download it if needed).
data = Dataset.load_builtin('ml-100k')
for k in tqdm([5 * i for i in range(1,20)],desc= "running KNN : "):
# Use the famous SVD algorithm.
algo = KNNBasic(k = 5)
#algo.test()
# Run 5-fold cross-validation and print results.
performance = cross_validate(algo, data, measures=['RMSE', 'MAE'], cv=5, n_jobs = -1 ,verbose=True)
for key in performance:
performance[key] = list(performance[key])
with open("evaluations/KNN_%d.json" % k,"w") as f:
f.write(json.dumps(performance))
| [
"victor@MacBook-Pro.local"
] | victor@MacBook-Pro.local |
4b4e5b5f64b5b6c357d2474e845edb0281f3216f | 67296b720cc33df5cd8d8a5d45492128c21b0f90 | /collect_frames.py | 139cd487fac48e92b39d9d24f76518243edfccaf | [] | no_license | kaushikb258/World_Models | bd2b392f930e86380cc1a93a5f0e7cc12f5e68ff | d37e1a8f1b4cfae90fa038fa67557061a8e81a25 | refs/heads/master | 2020-04-10T21:23:13.903451 | 2018-12-11T07:49:00 | 2018-12-11T07:49:00 | 161,295,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | import numpy as np
import sys
import os
import gym
from PIL import Image
import matplotlib.pyplot as plt
from utils import *
try:
os.stat("frames")
except:
os.mkdir("frames")
env = gym.make("CarRacing-v0")
episodes = 10000
#------------------------------------------------------------------------------------------
ii = 0
act = []
for ep in range(episodes):
s = env.reset()
tstep = 0
ep_reward = 0
while True:
tstep += 1
steer = np.random.uniform(low=-1.0, high=1.0)
acc = np.random.uniform(low=0.0, high=1.0)
br = np.random.uniform(low=0.0, high=0.2)
actions = [steer, acc, br]
env.render()
if (tstep > 50):
act.append(actions)
im = Image.fromarray(s[:82,:,:])
im.save("frames/frame_" + str(ii) + ".png")
ii += 1
next_s, reward, done, info = env.step(actions)
ep_reward += reward
if (tstep > 50):
if (not is_car_on_road(next_s[:82,:,:])):
done = True
if (done):
print("episode: ", ep, "episode reward: ", ep_reward)
break
else:
s = next_s
act = np.array(act)
np.save("actions", act)
print(act.shape)
| [
"kaushikb258@gmail.com"
] | kaushikb258@gmail.com |
8ac060b920fdbfb4883e5b8557a11dcfbd7bdef8 | 47b739ab1293f7c6244ac70b438bfdcff61fe6fb | /question_model.py | a7024d38bdba531547ddbfb9a8059e3f9b1b5547 | [] | no_license | TotaltEcplise/Quiz_oop | c542d0dc0681f178c3710215c44c0b8e9f4d78b8 | 6c4ee83d3847495242d04e223371a8e605b1587f | refs/heads/master | 2023-03-07T06:42:00.136781 | 2021-02-22T13:16:18 | 2021-02-22T13:16:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | class Question:
def __init__(self, q_text, q_answer):
self.text = q_text
self.answer = q_answer
| [
"77200688+huserdil@users.noreply.github.com"
] | 77200688+huserdil@users.noreply.github.com |
9d81dd559f41247e65e5cff71490669e802b1643 | 629a4ae44605505c564def28a7de2d01dc4331bf | /src/ProgDBTutor/quote_data_access.py | d7f80d07652e86c4e923d910109491c5950014e2 | [] | no_license | lfereman/tutorial | 80e3b00676dd835632c8dbed441a7bfc55b96d75 | 19ed3f438987deb7a773312155cb9957137edda8 | refs/heads/master | 2021-04-30T12:26:26.372761 | 2018-02-13T12:42:45 | 2018-02-13T12:42:45 | 121,274,767 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,575 | py | #Data Access Object pattern: see http://best-practice-software-engineering.ifs.tuwien.ac.at/patterns/dao.html
#For clean separation of concerns, create separate data layer that abstracts all data access to/from RDBM
#
#Depends on psycopg2 librarcy: see (tutor) https://wiki.postgresql.org/wiki/Using_psycopg2_with_PostgreSQL
import psycopg2
class DBConnection:
def __init__(self,dbname,dbuser,dbpass,dbhost):
try:
self.conn = psycopg2.connect("dbname='{}' user='{}' host='{}' password='{}'".format(dbname,dbuser,dbhost, dbpass))
except:
print('ERROR: Unable to connect to database')
raise Exception('Unable to connect to database')
def close(self):
self.conn.close()
def get_connection(self):
return self.conn
def get_cursor(self):
return self.conn.cursor()
def commit(self):
return self.conn.commit()
def rollback(self):
return self.conn.rollback()
class Quote:
def __init__(self, iden, text, author):
self.id = iden
self.text = text
self.author = author
def to_dct(self):
return {'id': self.id, 'text': self.text, 'author': self.author}
class QuoteDataAccess:
def __init__(self, dbconnect):
self.dbconnect = dbconnect
def get_quotes(self):
cursor = self.dbconnect.get_cursor()
cursor.execute('SELECT id, text, author FROM Quote')
quote_objects = list()
for row in cursor:
quote_obj = Quote(row[0],row[1],row[2])
quote_objects.append(quote_obj)
return quote_objects
def get_quote(self, iden):
cursor = self.dbconnect.get_cursor()
#See also SO: https://stackoverflow.com/questions/45128902/psycopg2-and-sql-injection-security
cursor.execute('SELECT id, text, author FROM Quote WHERE id=%s', (iden,))
row = cursor.fetchone()
return Quote(row[0],row[1],row[2])
def add_quote(self, quote_obj):
cursor = self.dbconnect.get_cursor()
try:
cursor.execute('INSERT INTO Quote(text,author) VALUES(%s,%s)', (quote_obj.text, quote_obj.author,))
#get id and return updated object
cursor.execute('SELECT LASTVAL()')
iden = cursor.fetchone()[0]
quote_obj.id = iden
self.dbconnect.commit()
return quote_obj
except:
self.dbconnect.rollback()
raise Exception('Unable to save quote!')
| [
"len.feremans@gmail.com"
] | len.feremans@gmail.com |
57fd48037b9de1fa98c0b884626e6a2d481584e2 | e287723f843005f0639542f8bf83eebb62192137 | /monitoring/prober/scd/test_operation_simple.py | 3cd623c1e8308608c2db5a4a51ff2f0a786e2a10 | [
"Apache-2.0"
] | permissive | RFARREY/dss | 6c02f2aedd0a78260dccbefe3e0d13108031c00e | 2989f68fb1293c08104866e8b0df116487bf075f | refs/heads/master | 2023-07-28T11:58:11.185048 | 2021-08-19T05:56:56 | 2021-08-19T05:56:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,617 | py | """Basic Operation tests:
- make sure the Operation doesn't exist with get or query
- create the Operation with a 60 minute length
- get by ID
- search with earliest_time and latest_time
- mutate
- delete
"""
import datetime
from monitoring.monitorlib.infrastructure import default_scope
from monitoring.monitorlib import scd
from monitoring.monitorlib.scd import SCOPE_SC, SCOPE_CI, SCOPE_CM
from monitoring.monitorlib.testing import assert_datetimes_are_equal
BASE_URL = 'https://example.com/uss'
OP_ID = '0000008c-91c8-4afc-927d-d923f5000000'
def test_ensure_clean_workspace(scd_session):
resp = scd_session.get('/operation_references/{}'.format(OP_ID), scope=SCOPE_SC)
if resp.status_code == 200:
resp = scd_session.delete('/operation_references/{}'.format(OP_ID), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
elif resp.status_code == 404:
# As expected.
pass
else:
assert False, resp.content
def _make_op1_request():
time_start = datetime.datetime.utcnow() + datetime.timedelta(minutes=20)
time_end = time_start + datetime.timedelta(minutes=60)
return {
'extents': [scd.make_vol4(time_start, time_end, 0, 120, scd.make_circle(-56, 178, 50))],
'old_version': 0,
'state': 'Accepted',
'uss_base_url': BASE_URL,
'new_subscription': {
'uss_base_url': BASE_URL,
'notify_for_constraints': False
}
}
# Preconditions: None
# Mutations: None
@default_scope(SCOPE_SC)
def test_op_does_not_exist_get(scd_session):
resp = scd_session.get('/operation_references/{}'.format(OP_ID))
assert resp.status_code == 404, resp.content
# Preconditions: None
# Mutations: None
@default_scope(SCOPE_SC)
def test_op_does_not_exist_query(scd_session):
time_now = datetime.datetime.utcnow()
end_time = time_now + datetime.timedelta(hours=1)
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(time_now, end_time, 0, 5000, scd.make_circle(-56, 178, 300))
}, scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
assert OP_ID not in [op['id'] for op in resp.json().get('operation_references', [])]
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(time_now, end_time, 0, 5000, scd.make_circle(-56, 178, 300))
}, scope=SCOPE_CI)
assert resp.status_code == 403, resp.content
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(time_now, end_time, 0, 5000, scd.make_circle(-56, 178, 300))
}, scope=SCOPE_CM)
assert resp.status_code == 403, resp.content
# Preconditions: None
# Mutations: None
@default_scope(SCOPE_SC)
def test_create_op_single_extent(scd_session):
req = _make_op1_request()
req['extents'] = req['extents'][0]
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req)
assert resp.status_code == 400, resp.content
# Preconditions: None
# Mutations: None
@default_scope(SCOPE_SC)
def test_create_op_missing_time_start(scd_session):
req = _make_op1_request()
del req['extents'][0]['time_start']
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req)
assert resp.status_code == 400, resp.content
# Preconditions: None
# Mutations: None
@default_scope(SCOPE_SC)
def test_create_op_missing_time_end(scd_session):
req = _make_op1_request()
del req['extents'][0]['time_end']
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req)
assert resp.status_code == 400, resp.content
# Preconditions: None
# Mutations: Operation OP_ID created by scd_session user
def test_create_op(scd_session):
req = _make_op1_request()
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req, scope=SCOPE_CI)
assert resp.status_code == 403, resp.content
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req, scope=SCOPE_CM)
assert resp.status_code == 403, resp.content
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req, scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
data = resp.json()
op = data['operation_reference']
assert op['id'] == OP_ID
assert op['uss_base_url'] == BASE_URL
assert_datetimes_are_equal(op['time_start']['value'], req['extents'][0]['time_start']['value'])
assert_datetimes_are_equal(op['time_end']['value'], req['extents'][0]['time_end']['value'])
assert op['version'] == 1
assert 'subscription_id' in op
assert 'state' not in op
# Preconditions: Operation OP_ID created by scd_session user
# Mutations: None
def test_get_op_by_id(scd_session):
resp = scd_session.get('/operation_references/{}'.format(OP_ID), scope=SCOPE_CI)
assert resp.status_code == 403, resp.content
resp = scd_session.get('/operation_references/{}'.format(OP_ID), scope=SCOPE_CM)
assert resp.status_code == 403, resp.content
resp = scd_session.get('/operation_references/{}'.format(OP_ID), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
data = resp.json()
op = data['operation_reference']
assert op['id'] == OP_ID
assert op['uss_base_url'] == BASE_URL
assert op['version'] == 1
assert 'state' not in op
# Preconditions: None, though preferably Operation OP_ID created by scd_session user
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_op_by_search_missing_params(scd_session):
resp = scd_session.post('/operation_references/query')
assert resp.status_code == 400, resp.content
# Preconditions: Operation OP_ID created by scd_session user
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_op_by_search(scd_session):
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(None, None, 0, 5000, scd.make_circle(-56, 178, 300))
})
assert resp.status_code == 200, resp.content
assert OP_ID in [x['id'] for x in resp.json().get('operation_references', [])]
# Preconditions: Operation OP_ID created by scd_session user
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_op_by_search_earliest_time_included(scd_session):
earliest_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=59)
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(earliest_time, None, 0, 5000, scd.make_circle(-56, 178, 300))
})
assert resp.status_code == 200, resp.content
assert OP_ID in [x['id'] for x in resp.json()['operation_references']]
# Preconditions: Operation OP_ID created by scd_session user
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_op_by_search_earliest_time_excluded(scd_session):
earliest_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=81)
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(earliest_time, None, 0, 5000, scd.make_circle(-56, 178, 300))
})
assert resp.status_code == 200, resp.content
assert OP_ID not in [x['id'] for x in resp.json()['operation_references']]
# Preconditions: Operation OP_ID created by scd_session user
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_op_by_search_latest_time_included(scd_session):
latest_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=20)
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(None, latest_time, 0, 5000, scd.make_circle(-56, 178, 300))
})
assert resp.status_code == 200, resp.content
assert OP_ID in [x['id'] for x in resp.json()['operation_references']]
# Preconditions: Operation OP_ID created by scd_session user
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_op_by_search_latest_time_excluded(scd_session):
latest_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=1)
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(None, latest_time, 0, 5000, scd.make_circle(-56, 178, 300))
})
assert resp.status_code == 200, resp.content
assert OP_ID not in [x['id'] for x in resp.json()['operation_references']]
# Preconditions: Operation OP_ID created by scd_session user
# Mutations: Operation OP_ID mutated to second version
@default_scope(SCOPE_SC)
def test_mutate_op(scd_session):
# GET current op
resp = scd_session.get('/operation_references/{}'.format(OP_ID))
assert resp.status_code == 200, resp.content
existing_op = resp.json().get('operation_reference', None)
assert existing_op is not None
req = _make_op1_request()
req = {
'key': [existing_op["ovn"]],
'extents': req['extents'],
'old_version': existing_op['version'],
'state': 'Activated',
'uss_base_url': 'https://example.com/uss2',
'subscription_id': existing_op['subscription_id']
}
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req, scope=SCOPE_CI)
assert resp.status_code == 403, resp.content
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req, scope=SCOPE_CM)
assert resp.status_code == 403, resp.content
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req, scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
data = resp.json()
op = data['operation_reference']
assert op['id'] == OP_ID
assert op['uss_base_url'] == 'https://example.com/uss2'
assert op['version'] == 2
assert op['subscription_id'] == existing_op['subscription_id']
assert 'state' not in op
# Preconditions: Operation OP_ID mutated to second version
# Mutations: Operation OP_ID deleted
def test_delete_op(scd_session):
resp = scd_session.delete('/operation_references/{}'.format(OP_ID), scope=SCOPE_CI)
assert resp.status_code == 403, resp.content
resp = scd_session.delete('/operation_references/{}'.format(OP_ID), scope=SCOPE_CM)
assert resp.status_code == 403, resp.content
resp = scd_session.delete('/operation_references/{}'.format(OP_ID), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
# Preconditions: Operation OP_ID deleted
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_deleted_op_by_id(scd_session):
resp = scd_session.get('/operation_references/{}'.format(OP_ID))
assert resp.status_code == 404, resp.content
# Preconditions: Operation OP_ID deleted
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_deleted_op_by_search(scd_session):
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(None, None, 0, 5000, scd.make_circle(-56, 178, 300))
})
assert resp.status_code == 200, resp.content
assert OP_ID not in [x['id'] for x in resp.json()['operation_references']]
| [
"noreply@github.com"
] | RFARREY.noreply@github.com |
11276df4a89d1128a708b3fe2ff3a08e678a0a17 | c188122ea4797d10c6f8e1641a229545dc3fa53c | /core/thread/mtsleepA.py | 90c6ddea7f51e60da7f67b7b4c9cfb36cdb5c5ac | [
"BSD-3-Clause"
] | permissive | lostFox/autoRunSomething | e41fd34f44bb715fe6e21a70fa608e33efb779bc | 519f2ebca6e2c78aa3caeed2e88b8f92403a8b46 | refs/heads/master | 2021-07-15T23:08:24.624022 | 2021-03-03T00:16:40 | 2021-03-03T00:16:40 | 59,880,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = 'james'
import thread
from time import sleep, ctime
def loop0():
print 'start loop 0 at:', ctime()
sleep(4)
print 'loop 0 done at:', ctime()
def loop1():
print 'start loop 1 at', ctime()
sleep(2)
print 'loop 1 done at:', ctime()
def main():
print 'starting at:', ctime()
thread.start_new_thread(loop0, ())
thread.start_new_thread(loop1, ())
sleep(6)
print 'all DONE at:', ctime()
if __name__ == '__main__':
main()
| [
"lei_zhang@jit.com.cn"
] | lei_zhang@jit.com.cn |
9fc2d2257d8e3f9e9aa7e8d7bae0c0760336eeb8 | 919b8d06881302998d58cdc760d336c5d70b9055 | /firstdjangoproject/settings.py | c029eacca7b9b257c82ae2ce83c10c7c60487fd7 | [] | no_license | dharadhorda/django | 4f7a64b0fecbae245635755de28bd630f42126ae | 37ba50d002fc3ff2e1978de9f61b1ade3b040fdc | refs/heads/master | 2022-12-12T04:33:08.365851 | 2020-09-10T13:33:32 | 2020-09-10T13:33:32 | 294,416,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,214 | py | """
Django settings for firstdjangoproject project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&om63oy@2xt_rd#@c3=7(7l%catgjgc7zy1_fo*mvdt_1or%z1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'firstdjangoproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'template')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'firstdjangoproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static'),
] | [
"71008060+dharadhorda@users.noreply.github.com"
] | 71008060+dharadhorda@users.noreply.github.com |
3d9908bde294de7c224663f83522c34701c52a52 | e2daaaaeb89e6f4c6816f775fbe560c3fc765b4a | /HomeScanStaticAnalysis/controlflow.py | 3b57605063f2a412f609060aae77fc6eda6cd176 | [] | no_license | KulaniM/StaticAnalysis | deda8585de8faeef5ddeb0b1d0598cf37bbef65f | ecaad5113e7f9e267087a55496479c4384ccc646 | refs/heads/master | 2023-04-27T13:05:06.647890 | 2019-09-11T08:18:28 | 2019-09-11T08:18:28 | 207,722,516 | 0 | 0 | null | 2023-04-14T17:43:01 | 2019-09-11T04:23:28 | Java | UTF-8 | Python | false | false | 4,229 | py | ############################################################################################
########### depends on the output of this project https://github.com/gousiosg/java-callgraph
import sys
import json
callgraph = []
callgraphList = []
flowgraph = {}
forward = {}
backward = {}
init_caller_method =sys.argv[1] #str('a()')
init_caller_class = sys.argv[2]#str('hha')
init_callee_method = sys.argv[1]#str('getActivity()')
init_callee_class = sys.argv[2]#str('com.google.android.chimera.Fragment')
##
## M:class1:<method1>(arg_types) (typeofcall)class2:<method2>(arg_types)
## The line means that method1 of class1 called method2 of class2. The type of call can have one of the following values
with open('output2/callgraph.txt') as f:
content = f.read()
callgraph = content.strip().split(', ')
callgraph[0] = callgraph[0].replace('[', '')
callgraph[-1] = callgraph[-1].replace(']', '')
#print(len(callgraph))
#print(callgraph[-1])
callgraph = list(set(callgraph))
for call in callgraph:
callgraphD = {}
temp1 = call.strip().split(' ')
temp2 = temp1[0].strip().split(':')
if temp2[0] is "M":
temp3 = temp1[1].strip().split(':')
callgraphD['caller_method'] = temp2[2]
callgraphD['caller_class'] = temp2[1]
callgraphD['callee_method'] = temp3[1]
temp4 = temp3[0].strip().split(')')
callgraphD['callee_class'] = temp4[1]
callgraphD['callee_invoke-type'] = temp4[0].replace('(', '')
callgraphList.append(callgraphD)
##### Print all the methods called by the given_method of given_class### FORWARD FLOW
def forwardflow(caller_method,caller_class):
j = 0
temp5 = []
fward = []
for calldir in callgraphList:
if str(calldir.get("caller_method"))==caller_method and str(calldir.get("caller_class"))==caller_class:
j = j + 1
temp5.append(calldir)
fward.append(calldir.get('callee_method')+'/'+calldir.get('callee_class'))
if len(fward):
forward.update({caller_method+'/'+caller_class: fward})
return temp5
##### Print all the methods wich call within the given_method of given_class ### BACKWARD FLOW
def backwardflow(callee_method,callee_class):
j = 0
temp6 = []
bward = []
for calldir in callgraphList:
if str(calldir.get("callee_method"))==callee_method and str(calldir.get("callee_class"))==callee_class:
j = j + 1
temp6.append(calldir)
bward.append(calldir.get('caller_method')+'/'+calldir.get('caller_class'))
if len(bward):
backward.update({callee_method+'/'+callee_class: bward})
return temp6
##### forward flow call graph
i = 0
def forwardcallgraph(init_caller_method, init_caller_class):
global i
i = i+1
fleveli = forwardflow(init_caller_method,init_caller_class)
for callee in fleveli:
nextlevel = forwardcallgraph(callee.get('callee_method'), callee.get('callee_class'))
flowgraph.update({"forward":forward})
##### backward flow call graph
k = 0
def backwardcallgraph(init_callee_method, init_callee_class):
global k
k = k+1
fleveli = backwardflow(init_callee_method,init_callee_class)
for caller in fleveli:
nextlevel = backwardcallgraph(caller.get('caller_method'), caller.get('caller_class'))
flowgraph.update({"backward":backward})
###### call generate forward flow graph
#print('///////////////////////////////////////////////////////////////////////////')
#print('////////////////////////// FORWARD FLOW GRAPH /////////////////////////////')
#print('///////////////////////////////////////////////////////////////////////////')
forwardcallgraph(init_caller_method, init_caller_class)
#print(forward)
###### call generate forward flow graph
#print('///////////////////////////////////////////////////////////////////////////')
#print('////////////////////////// BACKWARD FLOW GRAPH ////////////////////////////')
#print('///////////////////////////////////////////////////////////////////////////')
backwardcallgraph(init_callee_method, init_callee_class)
#print(backward)
print(flowgraph)
| [
"kulani41@comp.nus.edu.sg"
] | kulani41@comp.nus.edu.sg |
1f1a15327737df474e4091401068d90bf7b7a2d8 | df856d5cb0bd4a4a75a54be48f5b91a62903ee6e | /jishaku/__init__.py | be18c93d969f66dcdc330dc9e0ffd89dc6bb8cc2 | [
"MIT",
"Apache-2.0"
] | permissive | mortalsky/jishaku | 4c89bd69f6e1efcc45fcfdcc81427c71e10dc1de | 9cbbf64dd83697559a50c64653350253b876165a | refs/heads/master | 2023-07-20T04:55:19.144528 | 2021-01-22T08:18:12 | 2021-01-22T08:18:12 | 299,701,523 | 0 | 0 | MIT | 2020-09-29T18:16:24 | 2020-09-29T18:16:23 | null | UTF-8 | Python | false | false | 452 | py | # -*- coding: utf-8 -*-
"""
jishaku
~~~~~~~
A discord.py extension including useful tools for bot development and debugging.
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
# pylint: disable=wildcard-import
from jishaku.cog import * # noqa: F401
from jishaku.features.baseclass import Feature # noqa: F401
from jishaku.meta import * # noqa: F401
__all__ = (
'Jishaku',
'Feature',
'setup'
)
| [
"sansgorialis@gmail.com"
] | sansgorialis@gmail.com |
fe3f96a2af6475819c782c04a2b8e8b6b3e3d814 | 52a7b1bb65c7044138cdcbd14f9d1e8f04e52c8a | /budget/urls.py | c353880f983753ec457815a9fa5d6fa7951041ab | [] | no_license | rds0751/aboota | 74f8ab6d0cf69dcb65b0f805a516c5f94eb8eb35 | 2bde69c575d3ea9928373085b7fc5e5b02908374 | refs/heads/master | 2023-05-03T00:54:36.421952 | 2021-05-22T15:40:48 | 2021-05-22T15:40:48 | 363,398,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from django.urls import path,include
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('app/',views.index,name='index'),
path('add_item/',views.add_item,name='add item'),
] | [
"you@example.com"
] | you@example.com |
b3f0bdd99f6bee334536b269df6e3f5644de88b7 | 4b14a94152356019675f3d2ac5d668b2459cf153 | /event_app/myenv/bin/easy_install | cf53bcefe41418c2b60872bdcd5909a47f635904 | [] | no_license | arvicz22/eventapp | 20b0360a22b6b7a57b7cc55beca9d0e398161372 | 6bbe8666b31db51262f51992fa14c19137777c90 | refs/heads/master | 2021-01-15T21:44:52.596047 | 2014-03-13T07:34:03 | 2014-03-13T07:34:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | #!/home/eric/Desktop/event_app/myenv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==0.9.8','console_scripts','easy_install'
__requires__ = 'setuptools==0.9.8'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('setuptools==0.9.8', 'console_scripts', 'easy_install')()
)
| [
"arvicz22@gmail.com"
] | arvicz22@gmail.com | |
bfb6270e3c9d1dea3f85b30e18b9ac93406c9354 | 3041068cd9882211a21d5a88b3843b21ff221ff1 | /bookclub/migrations/0004_notes.py | a610c96eee1764c359c9da46b3accfeef64b92d2 | [] | no_license | yardenroee/OriginBookClub | 82b7cbdd1aa0f2386242a06e020f8efc2384d0cd | 6305c6c333e490323ddc6b13d7ba98cef52e7828 | refs/heads/master | 2021-09-24T11:13:02.780516 | 2020-02-04T04:45:14 | 2020-02-04T04:45:14 | 236,394,318 | 0 | 0 | null | 2021-09-22T18:37:06 | 2020-01-27T00:05:01 | Python | UTF-8 | Python | false | false | 510 | py | # Generated by Django 3.0.2 on 2020-01-30 05:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bookclub', '0003_auto_20200129_0047'),
]
operations = [
migrations.CreateModel(
name='Notes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(default='')),
],
),
]
| [
"yardenroee@gmail.com"
] | yardenroee@gmail.com |
f34e145964579b358a43c5aeec09cc5535f79280 | 2ba0293397610e2f30e9a7038c65db6f6bbe974f | /Moves.py | 9af1d58229e9e3b17ba7e8d35c8959fe42c7ae42 | [] | no_license | Giruvagen/TextAdv | 2dcfcb21fe1612fdc812285fa2ad25e0086fe92d | cc58e960af749d59b475e81a508eb3e88245a063 | refs/heads/master | 2020-03-22T16:41:10.646193 | 2018-07-13T22:39:15 | 2018-07-13T22:39:15 | 140,343,760 | 0 | 0 | null | 2018-07-15T18:30:18 | 2018-07-09T21:24:31 | Python | UTF-8 | Python | false | false | 2,520 | py | #defines movement
import random
from content import mapdesc
from content import monsters
class motion(object):
def __init__(self, char):
self.char = char
def battle(self):
print("A monster appears!")
self.monster = random.choice(list(monsters.items()))
print("It's a wild {:s}!".format(self.monster))
self.move(self.direction)
def move(self, direction):
if self.direction == "north":
self.starty += 1
if self.starty > 2:
self.starty = 0
print(mapdesc[(self.startx,self.starty)])
else:
print(mapdesc[(self.startx,self.starty)])
self.battlechance = random.randint(0,8)
if self.battlechance == 1:
self.battle()
elif self.direction == "south":
self.starty -= 1
if self.starty < 0:
self.starty = 2
print(mapdesc[(self.startx,self.starty)])
else:
print(mapdesc[(self.startx,self.starty)])
self.battlechance = random.randint(0,8)
if self.battlechance == 1:
self.battle()
elif self.direction == "east":
self.startx += 1
if self.startx > 2:
self.startx = 0
print(mapdesc[(self.startx,self.starty)])
else:
print(mapdesc[(self.startx,self.starty)])
self.battlechance = random.randint(0,8)
if self.battlechance == 1:
self.battle()
elif self.direction == "west":
self.startx -= 1
if self.startx < 0:
self.startx = 2
print(mapdesc[(self.startx,self.starty)])
else:
print(mapdesc[(self.startx,self.starty)])
self.battlechance = random.randint(0,8)
if self.battlechance == 1:
self.battle()
else:
print("Please choose a valid direction: north, south, east or west!")
self.direction = input("Choose your direction of travel: ")
self.move(self.direction)
def startmove(self):
self.startx = random.randint(0,2)
self.starty = random.randint(0,2)
print("Here begins your adventure, {:s}, at spot {:d},{:d}".format(self.char,self.startx,self.starty))
print(mapdesc[(self.startx,self.starty)])
self.direction = input("Choose your direction of travel: ")
self.move(self.direction)
| [
"40126586+Giruvagen@users.noreply.github.com"
] | 40126586+Giruvagen@users.noreply.github.com |
71823020661ed6290534f1975e4bd38265fb1ff5 | 77c37bffac550aa48f146f9f082df75b53744d47 | /JMSSGraphics/Fire.py | 9c9625855804de7010331e6865750ad6f417372f | [] | no_license | toanh/JMSSGraphics | 95473d4ada68be47dc9d35ce699073d9eac3655a | 44493622b3b169cd6d064dc285f649d036375957 | refs/heads/master | 2021-05-12T00:54:16.501295 | 2018-05-13T12:22:31 | 2018-05-13T12:22:31 | 117,546,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,512 | py | from JMSSGraphics import *
from Particle import *
import random
import math
jmss = Graphics(width = 800, height = 600, title = "Fire!", fps = 60)
images = []
images.append(jmss.loadImage("fire01.png"))
images.append(jmss.loadImage("fire02.png"))
images.append(jmss.loadImage("fire03.png"))
images.append(jmss.loadImage("fire04.png"))
images.append(jmss.loadImage("fire05.png"))
particles = []
def SpawnParticle(img, x, y, vel_x, vel_y, size, lifetime, rotation):
new_particle = Particle()
new_particle.img = img
new_particle.x = x
new_particle.y = y
new_particle.vel_x = vel_x * 60
new_particle.vel_y = vel_y * 60
new_particle.height = size
new_particle.width = size
new_particle.orig_height = size
new_particle.orig_width = size
new_particle.lifetime = lifetime
new_particle.life = lifetime
new_particle.rotation = rotation
particles.append(new_particle)
def UpdateParticles(dt):
for p in particles:
t = float(p.life) / p.lifetime
p.life -= dt
p.width = t * p.orig_width
p.height = t * p.orig_height
p.x += p.vel_x * dt
p.y += p.vel_y * dt
for p in particles:
if p.x < -p.width or p.x > jmss.width:
particles.remove(p)
continue
if p.y < -p.height or p.y > jmss.height:
particles.remove(p)
continue
if p.life < 0:
particles.remove(p)
continue
def DrawParticles():
for p in particles:
jmss.drawImage(p.img, p.x - p.width/2, p.y, p.width, p.height, \
p.rotation, 0.5, 0.5, opacity= 0.5)
@jmss.mainloop
def Game(dt):
for _ in range(5):
fire_img = random.choice(images)
size = fire_img.height + random.randint(-fire_img.height/6, fire_img.height/6)
size /= 1.2
rand_x = random.randint(-20, 20)
max_lifetime = (1 - (abs(rand_x) / 20.0)) * 1.5
x, y = jmss.getMousePos()
SpawnParticle(fire_img,
x + rand_x, \
y + random.randint(-15, 15),\
0, \
random.random() * 5 + 1, \
size,
0.25 + random.random() * max_lifetime,
(random.random() * 3.14159265359 / 4) - 3.14159265359 / 8)
jmss.set_blend_type(BLEND_ADDITIVE)
jmss.clear(0, 0, 0, 1)
UpdateParticles(dt)
DrawParticles()
jmss.drawText(str(len(particles)), 0, 0)
jmss.run() | [
"toan.kien@gmail.com"
] | toan.kien@gmail.com |
0878d152efa09a7a635eab712d773508164c86c7 | 9ad765ac96f6534addcd44d55527e4196f4f6603 | /leetcode/Num_51_N_Queens.py | fad6e3ea43cbbf1c7ce321cda5d3f5a4204e672a | [] | no_license | salalals/ojPython2 | 134838c21f75afa23fc99b5398e9b0564bc7bb5f | f61a7d7af02bc6f3474dd2832c21c2c6ea5635b8 | refs/heads/master | 2021-09-01T01:44:17.254272 | 2017-12-24T07:22:59 | 2017-12-24T07:22:59 | 111,899,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
class Solution(object):
def solveNQueens(self, n):
"""
:type n: int
:rtype: List[List[str]]
"""
'''
递归
思路简单,python写清楚实现还是需要练习
'''
solutions = []
self.solve_n_queens(n, 0, [], solutions)
return map(lambda sol: self.translate_solution(n, sol), solutions)
def solve_n_queens(self, n, start, part_sol, solutions):
if start == n:
solutions.append(part_sol)
for col in range(n):
if col not in part_sol and not any(map(lambda prev_col_ind: abs(col - part_sol[prev_col_ind]) == start - prev_col_ind, range(len(part_sol)))):
self.solve_n_queens(n, start + 1, part_sol[:] + [col], solutions)
def translate_solution(self, n, solution):
"""
:param solution: list[int]
:return: List[str]
"""
return map(lambda ind: "." * ind + "Q" + "." * (n - ind - 1), solution)
| [
"lshuo@amazon.com"
] | lshuo@amazon.com |
c42cc045d3613843df744ac6b74f7a368d40170e | f46e5ab4747d113215e46240eee4d75509e4be0d | /tests.py | 2dd01180f049fb3cb67a16cefd56d899698aae9a | [
"MIT"
] | permissive | xmonader/objsnapshot | 0d2dc17f9637dfe614332f125af5d867a8110118 | ab639630e6762a1d7c8e7df251f959e27e270e4e | refs/heads/master | 2021-01-22T06:19:26.026384 | 2017-05-30T13:12:22 | 2017-05-30T13:12:22 | 92,542,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,737 | py | from .objsnapshot import commit, rollback
class Human:
def __init__(self, name, age):
self.name = name
self.age = age
def inc(self, by=None):
if by is None:
by = self.age
self.age += by
def __str__(self):
return "{} {} ".format(self.name, self.age)
def godangerous(self):
self.name = "mr x"
self.age = 90
class MovingBall:
__slots__ = ['x', 'y']
def __init__(self, x, y):
self.x = x
self.y = y
def move2(self, x, y):
self.x = x
self.y = y
__str__ = lambda self: "{} {}".format(self.x, self.y)
h = Human("Ahmed", 50)
mb = MovingBall(0, 0)
### Examples
def test_commit_state():
h = Human("Ahmed", 50)
mb = MovingBall(0, 0)
commit1 = commit(h)
assert commit1.state['name'] == 'Ahmed'
assert commit1.state['age'] == 50
assert len(commit1.state) == 2
h.inc(20)
h.inc(2)
commit2 = commit(h)
assert commit2.state['name'] == 'Ahmed'
assert commit2.state['age'] != 50
assert commit2.state['age'] == 72
assert len(commit2.state) == 2
h.godangerous()
commit3 = commit(h)
assert commit3.state['name'] == 'mr x'
assert len(commit3.state) == 2
## be good again
h = rollback(h, commit1)
assert h.name == 'Ahmed'
assert h.age == 50
commit1 = commit(mb)
assert len(commit1.state) == 2
assert commit1.state['x'] == 0
assert commit1.state['y'] == 0
mb.move2(5, 124)
commit2 = commit(mb)
assert commit2.state['x'] == 5
print(commit2.state)
assert commit2.state['y'] == 124
assert len(commit2.state) == 2
mb = rollback(mb, commit1)
assert mb.x == 0
assert mb.y == 0
| [
"xmonader@gmail.com"
] | xmonader@gmail.com |
45feca857f34e368b1e8a792f14d2161e7944bc8 | 9a44bd208fba409164207fb5e2d8192a4fc345e7 | /Marc/crawling_foc.py | 58217f757b8bb3d7d166b3c1206e06ffec51287b | [] | no_license | sinlars/GuoTuMarc2 | 06e96b0ce230902a0975512f00ce7e5d9eb963f3 | 99d057b854ff245482e2e5c93c08ab31145ce9d1 | refs/heads/master | 2023-02-15T00:52:21.468424 | 2020-12-30T05:36:23 | 2020-12-30T05:36:23 | 325,466,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,543 | py | #coding:utf-8
import urllib.request
import lxml.html
from pymarc import Record, Field
from pymarc import MARCReader
import re
import xlwt
import sys,io
import openpyxl
from bs4 import BeautifulSoup
import gzip
import docx
from docx import Document
from io import BytesIO
import pymysql
import pinyin
import datetime
import requests
#改变标准输出的默认编码
#sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030')
headers = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
#'Accept-Encoding':'gzip, deflate, sdch',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.9',
#'Connection':'keep-alive',
#'Cookie':'_gscu_413729954=00942062efyg0418; Hm_lvt_2cb70313e397e478740d394884fb0b8a=1500942062',
#'Host':'opac.nlc.cn',
'Cookie':'PHPSESSID=0f94e40864d4e71b5dfeb2a8cf392922; Hm_lvt_668f5751b331d2a1eec31f2dc0253443=1542012452,1542068702,1542164499,1542244740; Hm_lpvt_668f5751b331d2a1eec31f2dc0253443=1542246351',
'Upgrade-Insecure-Requests':'1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3141.7 Safari/537.36 Core/1.53.3226.400 QQBrowser/9.6.11682.400'}
def getHtml(url,num_retries = 5):
print('Crawling url:',url)
try:
request = urllib.request.Request(url,headers=headers)
response = urllib.request.urlopen(request,timeout=30)
info = response.info();
page_html = ''
page_html = response.read()
if info.get('Content-Encoding') == 'gzip':
buff = BytesIO(page_html) # 把content转为文件对象
f = gzip.GzipFile(fileobj=buff)
page_html = f.read().decode('utf-8')
else:
page_html = page_html.decode('utf-8','ignore')
print(page_html)
except Exception as e:
print('Downloading error:',str(e))
print('重试次数:', num_retries)
page_html = None;
if (num_retries > 0):
if(hasattr(e, 'code') and 500 <= e.code < 600) :
return getHtml(url,num_retries - 1)
else:
return getHtml(url, num_retries - 1)
else :
print('重试次数完毕:',num_retries)
return page_html
return page_html
def insertMysql(sql):
#sql = pymysql.escape_string(sql)
lastid = 0
db = pymysql.connect(host='localhost',port= 3306,user = 'root',passwd='123456',db='zhiwu',charset='utf8')
cursor = db.cursor()
db.escape(sql)
try:
#print(sql)
cursor.execute(sql)
lastid = db.insert_id();
db.commit()
except Exception as e:
print(e)
db.rollback()
cursor.close()
db.close()
return lastid
def get_pinyin(str):
if(str is None) :
return ''
str = str.strip()
if(str == 'None' or str == ''):
return ''
return pinyin.get(str,format='strip',delimiter=' ')
def get_pinyin_prefix(str):
if (str is None):
return ''
str = str.strip()
if (str == 'None' or str == ''):
return ''
return pinyin.get_initial(str,delimiter='').upper()
def get_name_existed(name):
db = pymysql.connect(host='localhost',port= 3306,user = 'root',passwd='123456',db='zhiwu',charset='utf8')
cursor = db.cursor()
sql = 'select * from tb_classsys where classsys_latin=\'%s\''
sql = sql % name
cursor.execute(sql)
data = cursor.fetchone()
#print(data)
cursor.close()
return data
def get_foc():
db = pymysql.connect(host='localhost', port=3306, user='root', passwd='123456', db='zhiwu', charset='utf8')
cursor = db.cursor()
sql = 'select * from zhiwu2'
#sql = sql % name
cursor.execute(sql)
data = cursor.fetchall()
print(data)
cursor.close()
return data
def get_text_docx():
file = docx.Document("C:\\Users\\dell\\Desktop\\高等九卷.docx")
i = 1
j = 0
wb = xlwt.Workbook()
ws = wb.add_sheet('中国高等植物彩色图鉴正文内容-第九卷', cell_overwrite_ok=True)
ws.write(0, 0, '物种中文名')
ws.write(0, 1, '物种拉丁名') # 科-中文名
ws.write(0, 2, '正文内容')
ws.write(0, 3, '正文英文内容')
ke = False
for p in file.paragraphs:
#if i > 20 :break
#print('--------------------')d
#if p.text.strip() == '':break
if p.text.strip() == '' :
continue
if j%4 == 0:
j = 0
i = i + 1
print('----------',i, j)
ws.write(i, j, p.text.strip())
if ke is True:
j = 0
#i = i + 1
ke = False
else :
j = j + 1
print(p.text,'---',p.style.name)
#print(run.bold for run in p.runs)
#if p.style.name == '种-英文' :
for run in p.runs:
if run.bold :
print(run.text,run.bold)
#print(run.bold)
#print('--------------------')
#j = j + 1
if p.text.strip().endswith('科'):
ke = True
wb.save("C:/Users/dell/Desktop/高等九卷.xls")
def get_content(url='http://www.efloras.org/',cralw_url='http://www.efloras.org/browse.aspx?flora_id=2&page=%s',pages=2):
for i in range(1,pages+1):
cralw_url_i = cralw_url % (str(i))
info = getHtml(cralw_url_i)
#print(info)
page_context = BeautifulSoup(info, "html.parser")
divs = page_context.find_all(id='ucFloraTaxonList_panelTaxonList')
#print(divs)
if len(divs) > 0:
div = divs[0]
table = div.find_all('table')[0]
#print(table)
trs = table.find_all('tr')
#print(trs)
for tr in trs:
tds = tr.find_all('td')
if len(tds) == 5:
print(tds[0].text,tds[1].text,tds[2].text,tds[3].text,tds[4].text)
#if tds[1].fina_all('a') is not None:
ke_urls = tds[1].select('a[href]')
print(ke_urls)
if len(ke_urls) > 0:
ke_url = ke_urls[0].get('href');
print('ke_url :',ke_url)
ke_context = getHtml(url)
#print(ke_context)
ke_context_soup = BeautifulSoup(ke_context, "html.parser")
table_ke = ke_context_soup.find_all('table',id='footerTable')
print(table_ke)
shu_urls = tds[3].select('a[href]')
print(shu_urls)
if len(shu_urls) > 0:
print('shu_url :',shu_urls[0].get('href'))
def get_ke_context(url):
volume_content = {};
ke_context = getHtml(url)
volume_content['url'] = url
volume_content['taxon_id'] = get_max_number(url)
ke_context_soup = BeautifulSoup(ke_context, "html.parser")
table_ke = ke_context_soup.find_all('table', id='footerTable')
tds = table_ke[0].select('td[style]')
#print(tds[0].text)# 科所在的卷册、页码等
volume_content['volume_title'] = tds[0].text
div_context = ke_context_soup.find_all('div', id='panelTaxonTreatment')
#print(div_context[0].find_all(re.compile("^image")))
#print('正文内容:',div_context[0].prettify())
foc_taxon_chain = ke_context_soup.select_one('span[id="lblTaxonChain"]')
#print(foc_taxon_chain)
parent_links = foc_taxon_chain.find_all('a')
if parent_links:
parent_link = parent_links[len(parent_links)-1].get('href')
volume_content['parent_taxon_id'] = get_max_number(parent_link)
volume_list = foc_taxon_chain.find_all('a', href=re.compile("volume_id"), recursive=False)
if len(volume_list) == 1:
volume_content['volume_id'] = get_max_number(volume_list[0].get('href'))
volume_content['volume'] = volume_list[0].text
span = div_context[0].find_all('span',id='lblTaxonDesc')[0]
#print('正文内容:', span.prettify())
#print(span.prettify())
#####################获取有image图片信息的部分内容################
image_table = span.select_one('table')
if image_table:
image_table_tr_list = image_table.find_all('tr')
for image_table_tr in image_table_tr_list:
image_table_td_list = image_table_tr.find_all('td')
for image_table_td in image_table_td_list:
if image_table_td.a:
#print('图片连接:',image_table_td.select_one('a').img.get('src')) ##获取图片的链接\
image_link = image_table_td.a.img.get('src')
#print('图片连接:', image_link)
#download_file(image_link,'F:\FloraData\images\\' + str(get_max_number(image_link)) + '.jpg')
if image_table_td.a.next_sibling :
print('当前物种的拉丁名及链接等:',image_table_td.a.next_sibling.get('href'),image_table_td.a.next_sibling.text)
if image_table_td.a.next_sibling.next_sibling:
print('Credit:',image_table_td.a.next_sibling.next_sibling.small.text)
image_table.extract()
###############################################################
#print(span.b.next_siblings)
latin_name_object = []
for wuzh in span.next_element.next_siblings:
if wuzh.name == 'p':
continue
if wuzh.name == 'a': #表示直接跳转下个物种,类似 See Isoëtaceae # http://www.efloras.org/florataxon.aspx?flora_id=2&taxon_id=20790
latin_name_object = []
latin_name_object.append(wuzh)
break
if wuzh.name == 'small' :
volume_content['small'] = wuzh.string.strip('\n\r ')
continue
if wuzh.string is not None and wuzh.string.strip('\n\r '):
latin_name_object.append(wuzh)
#else:
# print(repr(wuzh).strip(['\n', ' ', '\r\n']))
print(latin_name_object)
if len(latin_name_object) > 1:
if latin_name_object[0].name is None: #如果第一个字符串是类似1.,7a,... 则表示序号
volume_content['xuhao'] = latin_name_object[0].string.strip('\n\r ')
else:
volume_content['xuhao'] = ''
if latin_name_object[len(latin_name_object)-1].name is None : #如果最后一个字符串是类似(Blume) Tagawa, Acta Phytotax. Geobot. 7: 83. 1938.则表示文献
volume_content['latin_name'] = ' '.join(list(latin.string.strip('\n\r ') for latin in latin_name_object[1:len(latin_name_object)-1] ))
else:
volume_content['latin_name'] = ' '.join(list(latin.string.strip('\n\r ') for latin in latin_name_object[1:]))
else:
volume_content['xuhao'] = ''
volume_content['latin_name'] = ' '.join(list(latin.string.strip('\n\r ') for latin in latin_name_object))
#volume_content['xuhao'] = latin_name[0]
#print(span.b.next_sibling) #当前物种信息的物种拉丁名
#print(span.b.find_next_sibling("p").contents[0].strip())
volume_content['latin_name_full'] = span.b.next_sibling.strip()
#print(span.b.find_next_sibling("p"))
#print('-----------------------')
#print(span.b.find_next_sibling("p").contents[0])
zh_name_and_pinyin = span.b.find_next_sibling("p").contents[0]
if is_all_zh(zh_name_and_pinyin): #含有中文
print('#######################')
print(zh_name_and_pinyin.split(' ')[0].strip())
print(' '.join(zh_name_and_pinyin.split(' ')[1:]))
#print(re.sub('[A-Za-z0-9\!\%\[\]\,\。\(\)]', '', zh_name_and_pinyin))
#print(' '.join(re.findall(r'[A-Za-z\(\)]+', zh_name_and_pinyin)))
volume_content['zh_name'] = zh_name_and_pinyin.split(' ')[0].strip()
volume_content['zh_name_pinyin'] = ' '.join(zh_name_and_pinyin.split(' ')[1:]).strip()
else:
volume_content['zh_name'] = ''
volume_content['zh_name_pinyin'] = zh_name_and_pinyin.strip()
#authors = span.b.find_next_sibling("p").p.next_element #获取下面一个直接字符串
spdesc_p_list = span.b.find_next_sibling("p").p
#print('##############################################')
#print(spdesc_p_list)
#print('##############################################')
#print(spdesc_p_list.find_all('a',recursive=False))
authors_list = []
authors_id_list = []
for author in spdesc_p_list.find_all('a',recursive=False):
#print(author.text,author.get('href'))
authors_list.append(author.text)
authors_id_list.append(str(get_max_number(author.get('href'))))
volume_content['authors'] = ';'.join(authors_list)
volume_content['authors_id'] = ';'.join(authors_id_list)
#print(authors.find_all('p',recursive=False)[0].prettify())
spdescs = spdesc_p_list.find_all('p',recursive=False)
#print(spdescs)
print('##############################################')
if len(spdescs) > 0:
specs_context = ''
table = spdescs[0].select_one('table')
if table is not None:
#print(table.find_all('a'))
for s in table.next_sibling.next_sibling.strings:
#print(repr(s),type(s),s.parent.name=='i')
if s.parent.name == 'i':
specs_context = specs_context + '<i>' + s.strip('\n') + '</i>'
else:
specs_context = specs_context + s.strip('\n')
else :
#print(spdescs[0].strings)
for s in spdescs[0].strings:
#print(s)
if s.parent.name == 'i':
specs_context = specs_context + '<i>' + s.strip('\n') + '</i>'
else:
if s.parent.name == 'b':
specs_context = specs_context + '<b>' + s.strip('\n') + '</b>'
specs_context = specs_context + s.strip('\n')
#print(specs_context.strip())
#print('##############################################')
#print(specs_context.strip())#获取正文内容
volume_content['content'] = specs_context.strip()
#volume_content['create_date'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# sql = "insert into volume_content (`content`,`create_date`,`del_flag`) values ('%s','%s','%s')"
# sql = sql % (specs_context.strip(), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 0)
# pid = insertMysql(sql)
#print('##############################################')
print(volume_content)
wuzhong_detail_sql(volume_content)
table_jiansuobiao = div_context[0].find_all('table',id='tableKey') #获取检索表的内容
if len(table_jiansuobiao) > 0:
trs_jiansuobiao = table_jiansuobiao[0].find_all('tr')
table_jsb = trs_jiansuobiao[1].find_all('table')
if len(table_jsb) > 0:
trs_jsb = table_jsb[0].find_all('tr')
for tr in trs_jsb:
tds_jsb = tr.find_all('td')
tds_jxb_cs = tds_jsb[3].contents;
goto_no = ''
goto_id = ''
for tds_jxb_c in tds_jxb_cs:
#print(tds_jxb_c.name)
if tds_jxb_c.name == 'a':
tds_jxb_c_href = tds_jxb_c.get('href')
tds_jxb_c_s = tds_jxb_c.string;
if tds_jxb_c_s is not None:
#print(tds_jxb_c)
goto_id = tds_jxb_c_href + '='+ tds_jxb_c_s
else:
goto_id = tds_jxb_c_href
else :
goto_no = tds_jxb_c
#print(tds_jsb[0].text,tds_jsb[1].text,tds_jsb[2].text,goto_no,goto_id)
############################################################################################
###lower_taxa_ul = div_context[0].select_one('ul')#获取当前物种的下级物种信息
###print(lower_taxa_ul)
# if lower_taxa_ul is not None:
# for li in lower_taxa_ul.find_all('li'):
# lower_taxa_a = li.select_one('a')
# #print(lower_taxa_a.get('href'),lower_taxa_a.b.string,lower_taxa_a.b.next_sibling)
############################################################################################
related_objects = div_context[0].select_one('span[id="lblObjectList"]')
#print(related_objects)
if related_objects is not None:
related_objects_trs = related_objects.find_all('tr')
#print(related_objects_trs)
for related_objects_tr in related_objects_trs:
related_objects_tds = related_objects_tr.find_all('td')
if len(related_objects_tds) == 2:
related_objects_td_li = related_objects_tds[0].li
if related_objects_td_li is not None:
li_a = related_objects_td_li.a
print(li_a.text,li_a.get('href'))
else:
print(related_objects_tds[0].text)
print(related_objects_tds[1].text)
else:
print('采集错误')
def get_foc_vol_list(url='http://www.efloras.org/index.aspx'): #从foc主页上获取foc卷册列表
context = getHtml(url)
context_soup = BeautifulSoup(context, "html.parser")
span = context_soup.find_all('span',id='lblFloraList')
url_list = []
#print(span)
if len(span) > 0:
ul_list = span[0].find_all('ul')
li_list = ul_list[2].find_all('li') #FOC在ul_list的第三个位置
a_list = li_list[1].find_all('a')
print(a_list)
for a in a_list[1:]: #a_list[1:]:
a_href = a.get('href')
print(' Volume :',a.text)
#sql = "insert into volume (`url`,`volume_id`,`volume_no`,`create_date`,`create_by`,`del_flag`) values ('%s','%s','%s','%s','%s','%s')"
if a_href is not None:
url_list.append('http://www.efloras.org/' + a_href)
volume_id = get_max_number(a_href)
print('volume_id',str(volume_id))
else:
print('获取不到volume信息')
else:
print('未找到FOC卷册列表')
return url_list
def get_foc_volume_list(volumes,index_url = 'http://www.efloras.org/',level = 0): # 根据卷册信息的地址找到科、属、种下属列表页,采集相关信息
#url_list = []
level = level + 1 #level = 1从科开始
for vol in volumes:
context = getHtml(vol)
if context is None:
continue
context_soup = BeautifulSoup(context, "html.parser")
div = context_soup.find_all('div', id='ucFloraTaxonList_panelTaxonList')
volumeInfo = context_soup.select_one('span[id="ucVolumeInfo_lblVolumeInfo"]')
volume_map = []
if volumeInfo is not None:
volumeInfo_table_trs = volumeInfo.table.find_all('tr')
if len(volumeInfo_table_trs) > 0:
for volumeInfo_table_tr in volumeInfo_table_trs:
volumeInfo_table_tds = volumeInfo_table_tr.find_all('td')
if len(volumeInfo_table_tds) == 2:
volume_map.append(volumeInfo_table_tds[1].text)
else:
volume_map.append('')
if len(volume_map) != 5:
for i in range(5-len(volume_map)): volume_map.append('')
#print(volume_map)
foc_taxon_chain = context_soup.select_one('span[id="ucFloraTaxonList_lblTaxonChain"]')
parent_links = foc_taxon_chain.find_all('a')
volume_list = foc_taxon_chain.find_all('a', href=re.compile("volume_id"), recursive=False)
print(volume_list)
if len(div) > 0:
tr_list = div[0].find_all('tr',class_='underline')
for tr in tr_list[2:]:
td_list = tr.find_all('td') #科为四列,其他为五列,每一个都是一个物种信息
wuzhong_list = {}
wuzhong_list['parent_taxon_id'] = get_max_number(vol)
wuzhong_list['type'] = str(level)
wuzhong_list['type_name'] = ''
wuzhong_list['taxon_name'] = ''
wuzhong_list['title'] = volume_map[0]
wuzhong_list['families'] = volume_map[1]
wuzhong_list['genera'] = volume_map[2]
wuzhong_list['speces'] = volume_map[3]
wuzhong_list['online_date'] = volume_map[4]
wuzhong_list['taxon_id'] = td_list[0].text.strip()
wuzhong_list['accepted_name'] = td_list[1].text.strip()
wuzhong_detail_link_a = td_list[1].select_one('a')
if wuzhong_detail_link_a:
wuzhong_list['accepted_name_url'] = index_url + wuzhong_detail_link_a.get('href')
else:
wuzhong_list['accepted_name_url'] = ''
wuzhong_list['accepted_name_cn'] = td_list[2].text.strip()
wuzhong_list['lower_taxa'] = td_list[3].text.strip()
lower_taxa_link_a = td_list[3].select_one('a')
if lower_taxa_link_a:
wuzhong_list['lower_taxa_url'] = index_url + lower_taxa_link_a.get('href')
else:
wuzhong_list['lower_taxa_url'] = ''
if len(td_list) == 4:
if len(volume_list) == 1:
wuzhong_list['volume_no'] = get_max_number(volume_list[0].get('href'))
wuzhong_list['volume_name'] = volume_list[0].text
else:
wuzhong_list['volume_no'] = 0
wuzhong_list['volume_name'] = 0
if len(td_list) == 5:
volume_link_a = td_list[4].select_one('a')
if volume_link_a:
wuzhong_list['volume_no'] = get_max_number(volume_link_a.get('href'))
wuzhong_list['volume_name'] = volume_link_a.text
else:
wuzhong_list['volume_no'] = 0
wuzhong_list['volume_name'] = 0
print(wuzhong_list)
wuzhong_list_sql(wuzhong_list)
if wuzhong_list['accepted_name_url'] :
print('开始采集详细内容:',wuzhong_list['accepted_name_url'])
get_ke_context(wuzhong_list['accepted_name_url'])
if wuzhong_list['lower_taxa_url'] :
print('开始采集:',wuzhong_list['accepted_name_cn'],' 的下级内容', wuzhong_list['accepted_name_url'])
url_list = []
url_list.append(wuzhong_list['lower_taxa_url'])
get_foc_volume_list(url_list,index_url,level)
else:
print('无法找到')
volume_related_links_table = context_soup.find_all('table', id='ucVolumeResourceList_dataListResource')
#print(volume_related_links_table)
if len(volume_related_links_table) > 0:
#print(volume_related_links_table[0])
volumes_relateds = volume_related_links_table[0].find_all('tr',recursive=False) #搜索当前节点的直接子节点
if len(volumes_relateds) > 0:
#print(volumes_relateds)
for volume in volumes_relateds[1:]:
trs=volume.find_all('tr')
if len(trs) > 0:
tds = trs[0].find_all('td')
if len(tds) > 1:
a = tds[0].select_one('a')
href = a.get('href')
print('--------',a.text,' ',href)
print('=====',tds[1].text)
sql1 = "insert into volume_related_links (`taxid`,`type`,`url`,`title`,`resource_type`,`files`,`create_date`,`create_by`,`del_flag`) values ('%s','%s','%s','%s','%s','%s','%s','%s','%s')"
if tds[1].text.strip() == 'PDF':
paths = href.split('/')
print(paths)
#download_file(href,'f://FloraData//' + paths[len(paths)-1])
#sql1 = sql1 % (vol.split('&')[0]),tds[1].text,href,a.text,paths[len(paths)-1],datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),'luoxuan',0)
sql1 = sql1 % (re.sub("\D", "", vol.split('&')[0]),tds[1].text,href,a.text,'PDF',paths[len(paths)-1],datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),'luoxuan',0)
else: #tds[1].text == 'Treatment'
#get_ke_context(href)
sql1 = sql1 % (re.sub("\D", "", vol.split('&')[0]),tds[1].text,href,a.text,'',tds[1].text,
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'luoxuan', 0)
#print(insertMysql(sql1))
else:
print('无法找到volume_related_links')
def get_max_number(str): #获得连接中最大的数字
return max(list(map(int,re.findall(r"\d+\.?\d*",str))))
def is_all_zh(s): #是否含有中文
for ch in s:
if u'\u4e00' <= ch <= u'\u9fff':
return True
return False
def insert_related_objects(related_objects):#插入相关内容到表中,返回当前的id
sql = "insert into volume_related_links (`taxon_id`,`parent_taxon_id`,`type`,`url`,`parent_title`,`title`,`content`,`resource_type`,`files`,`create_date`,`create_by`,`del_flag`) " \
"values ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
sql = sql % (related_objects['taxon_id'],related_objects['parent_taxon_id'],related_objects['type'],related_objects['url'],
related_objects['parent_title'],related_objects['title'],related_objects['content'],related_objects['resource_type'],
related_objects['files'],datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),'luoxuan',0)
pid = insertMysql(sql)
return pid
def insert_jiansuobiao(jiansuobiao): #插入检索表内容到表中,返回当前的id
sql = "insert into volume_related_links (`taxon_id`,`first_no`,`first_no2`,`content`,`no_name`,`second_no`,`latin_name`,`goto_taxon_id`,`goto_taxon_url`,`create_date`,`create_by`,`del_flag`) " \
"values ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
sql = sql % (jiansuobiao['taxon_id'],jiansuobiao['first_no'],jiansuobiao['first_no2'],jiansuobiao['content'],
jiansuobiao['no_name'],jiansuobiao['second_no'],jiansuobiao['latin_name'],jiansuobiao['goto_taxon_id'],
jiansuobiao['goto_taxon_url'],datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),'luoxuan',0)
pid = insertMysql(sql)
return pid
def wuzhong_detail_sql(volume_content): #插入详细内容到表中,返回当前插入的id
small = ''
if 'small' in volume_content : small = volume_content['small']
sql = "insert into volume_content (`url`,`content`,`taxon_id`,`parent_taxon_id`,`xuhao`,`latin_name`,`latin_name_full`,`zh_name`,`zh_name_pinyin`,`authors`,`authors_id`,`volume_id`,`volume`,`volume_title`,`create_date`,`del_flag`,`small`) values ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
sql = sql % (volume_content['url'],volume_content['content'],volume_content['taxon_id'],volume_content['parent_taxon_id'],volume_content['xuhao'],volume_content['latin_name'],
volume_content['latin_name_full'],volume_content['zh_name'],volume_content['zh_name_pinyin'],volume_content['authors'],volume_content['authors_id'],
volume_content['volume_id'],volume_content['volume'],volume_content['volume_title'],datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 0,small)
pid = insertMysql(sql)
return pid
def wuzhong_list_sql(wuzhong_list):
sql = "insert into volume_ke (`parent_taxon_id`,`type`,`type_name`,`taxon_id`,`taxon_name`,`accepted_name`,`accepted_name_url`,`accepted_name_cn`,`lower_taxa`,`lower_taxa_url`,`volume_no`,`volume_name`,`title`,`families`,`genera`,`speces`,`online_date`,`create_date`,`create_by`,`del_flag`) values ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
sql = sql % (wuzhong_list['parent_taxon_id'], wuzhong_list['type'],
wuzhong_list['type_name'], wuzhong_list['taxon_id'],
wuzhong_list['taxon_name'],wuzhong_list['accepted_name'],
wuzhong_list['accepted_name_url'], wuzhong_list['accepted_name_cn'],
wuzhong_list['lower_taxa'], wuzhong_list['lower_taxa_url'],
wuzhong_list['volume_no'], wuzhong_list['volume_name'],
wuzhong_list['title'], wuzhong_list['families'],
wuzhong_list['genera'], wuzhong_list['speces'],
wuzhong_list['online_date'], datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'luoxuan', 0)
pid = insertMysql(sql)
return pid
def download_file(url,path): #下载文件
print('Download file:',url,path)
request = requests.get(url)
with open(path, "wb") as code:
code.write(request.content)
if __name__ == '__main__':
#search_isbn()
#print(html)
#read07Excel('C:/Users/dell/Desktop/书单:PDA_全库(2015)_20180621 科学文库书单第二版2.xlsx')
#get_page_html()
#get_ke_context('http://www.efloras.org/florataxon.aspx?flora_id=2&taxon_id=250098342')
#get_ke_context('http://www.efloras.org/florataxon.aspx?flora_id=2&taxon_id=20790')
#get_text_docx()
#read07_excel('C:/Users/dell/Desktop/高等二卷.xlsx')
#mings = ['f','fsdf','fsdf1','fsdfs','fsdfs']
#print(mings[2:len(mings)])
# i = 0
# datas = get_foc();
# for data in datas:
# i = i + 1
# print(data)
# if i >= 10:break
lists = get_foc_vol_list()
##print(lists)
get_foc_volume_list(lists)
#print(getHtml('http://flora.huh.harvard.edu/FloraData/002/Vol11/foc11-Preface.htm'))
#print(get_page_html())
#vol = 'http://www.efloras.org/browse.aspx?flora_id=2&start_taxon_id=103074,volume_page.aspx?volume_id=2002&flora_id=2'
#print(is_all_zh('剑叶铁角蕨 jian ye tie jiao jue'))
#print(is_all_zh('jian ye tie jiao jue'))
#print(re.findall(r"\d+\.?\d*",vol),get_max_number(vol)) | [
"1QAZ2wsx"
] | 1QAZ2wsx |
582ef42efdfd7e420d4b88d395d1bce8c2254139 | 55826466383423f170e4fe8999e60670edd53704 | /script/DRQN_hindsight/2d/DRQN_hindsight_2D_static.py | cb2b14f8814f460763481320ae10f2931a84ed39 | [] | no_license | siyuan2018/SNAC | c48dc7ced78f30bc6847025b8637337737bd3467 | 049c0566e2c154f93b5015a1a4607fdb8b4be117 | refs/heads/main | 2023-03-28T20:25:53.886811 | 2021-04-05T18:19:46 | 2021-04-05T18:19:46 | 362,850,937 | 1 | 0 | null | 2021-04-29T14:46:51 | 2021-04-29T14:46:50 | null | UTF-8 | Python | false | false | 12,543 | py | import sys
import torch
import torch.nn as nn
import numpy as np
import pickle
import random
import time
import os
from collections import deque
sys.path.append('../../Env/2D/')
from DMP_Env_2D_static import deep_mobile_printing_2d1r
from DMP_Env_2D_static_hindsight_replay import deep_mobile_printing_2d1r_hindsight
# plan_choose: 0 Dense circle, 1 Sparse circle
plan_choose = 0
log_path = "./log/DRQN_hindsight/2D/Static/plan_"+str(plan_choose)+"/"
if os.path.exists(log_path) == False:
os.makedirs(log_path)
print('2D_Static')
print('plan_choose:',plan_choose)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
env = deep_mobile_printing_2d1r(plan_choose=plan_choose)
env_hindsight = deep_mobile_printing_2d1r_hindsight(plan_choose=plan_choose)
print("device_using:", device)
######################
# hyper parameter
minibatch_size=64
Lr=0.00001
N_iteration=10000
N_iteration_test=10
alpha=0.9
Replay_memory_size=1000
Update_traget_period=200
Action_dim=env.action_dim
State_dim=env.state_dim
hidden_state_dim=256
Time_step=20
UPDATE_FREQ=5
INITIAL_EPSILON = 0.2
FINAL_EPSILON = 0.0
######################
use_hindsight=True
print("State_dim:",State_dim)
print("plan_width:", env.plan_width)
print("^^^^^^^^^^^^^^^^^^^^^^^^^^")
def iou(environment_memory,environment_plan,HALF_WINDOW_SIZE,plan_height,plan_width):
component1=environment_plan[HALF_WINDOW_SIZE:HALF_WINDOW_SIZE+plan_height,\
HALF_WINDOW_SIZE:HALF_WINDOW_SIZE+plan_width].astype(bool)
component2=environment_memory[HALF_WINDOW_SIZE:HALF_WINDOW_SIZE+plan_height,\
HALF_WINDOW_SIZE:HALF_WINDOW_SIZE+plan_width].astype(bool)
overlap = component1*component2 # Logical AND
union = component1 + component2 # Logical OR
IOU = overlap.sum()/float(union.sum())
return IOU
def get_and_init_FC_layer(din, dout):
li = nn.Linear(din, dout)
li.weight.data.normal_(0, 0.1)
return li
class Q_NET(nn.Module):
def __init__(self,out_size,hidden_size):
super(Q_NET, self).__init__()
self.out_size = out_size
self.hidden_size = hidden_size
self.fc_1 = get_and_init_FC_layer(State_dim, 64)
self.fc_2 = get_and_init_FC_layer(64, 128)
self.fc_3 = get_and_init_FC_layer(128, 128)
self.rnn=nn.LSTM(128,hidden_size,num_layers=1,batch_first=True)
self.adv = get_and_init_FC_layer(hidden_size, self.out_size)
self.val = get_and_init_FC_layer(hidden_size, 1)
self.relu = nn.ReLU()
def forward(self,x,bsize,time_step,hidden_state,cell_state):
x=x.view(bsize*time_step,State_dim)
x = self.fc_1(x)
x = self.relu(x)
x = self.fc_2(x)
x = self.relu(x)
x = self.fc_3(x)
x = self.relu(x)
x = x.view(bsize,time_step,128)
lstm_out = self.rnn(x,(hidden_state,cell_state))
out = lstm_out[0][:,time_step-1,:]
h_n = lstm_out[1][0]
c_n = lstm_out[1][1]
adv_out = self.adv(out)
val_out = self.val(out)
qout = val_out.expand(bsize,self.out_size) + (adv_out - adv_out.mean(dim=1).unsqueeze(dim=1).expand(bsize,self.out_size))
return qout, (h_n,c_n)
def init_hidden_states(self,bsize):
h = torch.zeros(1,bsize,self.hidden_size).float().to(device)
c = torch.zeros(1,bsize,self.hidden_size).float().to(device)
return h,c
class Memory():
def __init__(self,memsize):
self.memsize = memsize
self.memory = deque(maxlen=self.memsize)
def add_episode(self,epsiode):
self.memory.append(epsiode)
def get_batch(self,bsize,time_step):
sampled_epsiodes = random.sample(self.memory,bsize)
batch = []
for episode in sampled_epsiodes:
point = np.random.randint(0,len(episode)+1-time_step)
batch.append(episode[point:point+time_step])
return batch
class DQN_AGNET():
def __init__(self,device):
self.device=device
self.Eval_net= Q_NET(Action_dim,hidden_size=hidden_state_dim).to(device)
self.Target_net = Q_NET(Action_dim,hidden_size=hidden_state_dim).to(device)
self.learn_step = 0 # counting the number of learning for update traget periodiclly
# counting the transitions
self.optimizer = torch.optim.Adam(self.Eval_net.parameters(), lr=Lr)
self.loss = nn.SmoothL1Loss()
self.loss_his=[]
self.greedy_epsilon=0.2
self.replaymemory=Memory(Replay_memory_size)
def choose_action(self,s,hidden_state,cell_state):
state=torch.from_numpy(s).float().to(self.device)
choose=np.random.uniform()
if choose<=self.greedy_epsilon:
model_out = self.Eval_net.forward(state,bsize=1,time_step=1,hidden_state=hidden_state,cell_state=cell_state)
action=np.random.randint(0, Action_dim)
hidden_state = model_out[1][0]
cell_state = model_out[1][1]
else:
model_out = self.Eval_net.forward(state,bsize=1,time_step=1,hidden_state=hidden_state,cell_state=cell_state)
out = model_out[0]
action = int(torch.argmax(out[0]))
hidden_state = model_out[1][0]
cell_state = model_out[1][1]
return action, hidden_state, cell_state
def learning_process(self):
self.optimizer.zero_grad()
self.Eval_net.train()
if self.learn_step% Update_traget_period == 0:
self.Target_net.load_state_dict(self.Eval_net.state_dict())
hidden_batch, cell_batch = self.Eval_net.init_hidden_states(bsize=minibatch_size)
batch = self.replaymemory.get_batch(bsize=minibatch_size,time_step=Time_step)
current_states = []
acts = []
rewards = []
next_states = []
for b in batch:
cs,ac,rw,ns,ep = [],[],[],[],[]
for element in b:
cs.append(element[0])
ac.append(element[1])
rw.append(element[2])
ns.append(element[3])
current_states.append(cs)
acts.append(ac)
rewards.append(rw)
next_states.append(ns)
current_states = np.array(current_states)
acts = np.array(acts)
rewards = np.array(rewards)
next_states = np.array(next_states)
torch_current_states = torch.from_numpy(current_states).float().to(self.device)
torch_acts = torch.from_numpy(acts).long().to(self.device)
torch_rewards = torch.from_numpy(rewards).float().to(self.device)
torch_next_states = torch.from_numpy(next_states).float().to(self.device)
Q_s, _ = self.Eval_net.forward(torch_current_states,bsize=minibatch_size,time_step=Time_step,hidden_state=hidden_batch,cell_state=cell_batch)
Q_s_a = Q_s.gather(dim=1,index=torch_acts[:,Time_step-1].unsqueeze(dim=1)).squeeze(dim=1)
Q_next,_ = self.Target_net.forward(torch_next_states,bsize=minibatch_size,time_step=Time_step,hidden_state=hidden_batch,cell_state=cell_batch)
Q_next_max,__ = Q_next.detach().max(dim=1)
target_values = torch_rewards[:,Time_step-1] + (alpha * Q_next_max)
loss = self.loss(Q_s_a, target_values)
loss.backward()
self.optimizer.step()
self.learn_step+=1
self.loss_his.append(loss.item())
#### initial fill the replaymemory
# device = torch.device("cpu")
agent=DQN_AGNET(device)
for i in range(0,Replay_memory_size):
prev_state = env.reset()
local_memory = []
while True:
action = np.random.randint(0,Action_dim)
next_state,reward,done = env.step(action)
local_memory.append((prev_state,action,reward,next_state))
prev_state = next_state
if done:
break
agent.replaymemory.add_episode(local_memory)
agent.greedy_epsilon=INITIAL_EPSILON
print("agent greedy_epsilon", agent.greedy_epsilon)
best_reward=-500
total_steps = 0
reward_history_train=[]
reward_history_test=[]
iou_history_train=[]
iou_history_test=[]
for episode in range(N_iteration):
state = env.reset()
# print("plan",env.one_hot)
print("total_brick",env.total_brick)
reward_train = 0
step_size_memory=[]
start_time = time.time()
local_memory=[]
hidden_state, cell_state = agent.Eval_net.init_hidden_states(bsize=1)
while True:
total_steps +=1
action,hidden_state_next, cell_state_next = agent.choose_action(state,hidden_state, cell_state)
state_next, r, done = env.step(action)
step_size_memory.append(env.step_size)
local_memory.append((state, action, r, state_next))
reward_train += r
if total_steps % UPDATE_FREQ == 0:
agent.learning_process()
if done:
reward_history_train.append(reward_train)
break
state = state_next
hidden_state, cell_state = hidden_state_next, cell_state_next
agent.replaymemory.add_episode(local_memory)
iou_train=iou(env.environment_memory,env.plan,env.HALF_WINDOW_SIZE,env.plan_height,env.plan_width)
iou_history_train.append(iou_train)
#### hindsight
if use_hindsight:
local_memory_hindsight=[]
_ = env_hindsight.reset()
env_hindsight.plan[env.HALF_WINDOW_SIZE:env.HALF_WINDOW_SIZE+env.plan_height,\
env.HALF_WINDOW_SIZE:env.HALF_WINDOW_SIZE+env.plan_width]=env.environment_memory[env.HALF_WINDOW_SIZE:env.HALF_WINDOW_SIZE+env.plan_height,\
env.HALF_WINDOW_SIZE:env.HALF_WINDOW_SIZE+env.plan_width]
env_hindsight.input_plan= env_hindsight.plan[env.HALF_WINDOW_SIZE:env.HALF_WINDOW_SIZE+env.plan_height,\
env.HALF_WINDOW_SIZE:env.HALF_WINDOW_SIZE+env.plan_width]
for i,element in enumerate(local_memory):
_, r, _ = env_hindsight.step(element[1],step_size_memory[i])
local_memory_hindsight.append((element[0],element[1],r,element[3]))
agent.replaymemory.add_episode(local_memory_hindsight)
############ test agent
iou_test=0
reward_test_total=0
start_time_test = time.time()
for _ in range(N_iteration_test):
state = env.reset()
reward_test=0
hidden_state, cell_state = agent.Eval_net.init_hidden_states(bsize=1)
while True:
action, hidden_state_next, cell_state_next = agent.choose_action(state, hidden_state, cell_state)
state_next, r, done = env.step(action)
reward_test += r
if done:
break
state = state_next
hidden_state, cell_state = hidden_state_next, cell_state_next
reward_test_total += reward_test
iou_test += iou(env.environment_memory, env.plan, env.HALF_WINDOW_SIZE, env.plan_height, env.plan_width)
reward_test_total = reward_test_total / N_iteration_test
secs = int(time.time() - start_time)
mins = secs / 60
secs = secs % 60
print('Epodise: ', episode,
'| Ep_reward_test:', reward_test_total, '| Ep_IOU_test: ', iou_test / N_iteration_test)
print(" | time in %d minutes, %d seconds\n" % (mins, secs))
reward_history_test.append(reward_test_total)
iou_history_test.append(iou_test / N_iteration_test)
if agent.greedy_epsilon > FINAL_EPSILON:
agent.greedy_epsilon -= (INITIAL_EPSILON - FINAL_EPSILON)/N_iteration
if reward_test_total > best_reward:
torch.save(agent.Eval_net.state_dict(), log_path+'Eval_net_episode_%d.pth' % (episode))
torch.save(agent.Target_net.state_dict(), log_path+'Target_net_episode_%d.pth' % (episode))
best_reward=reward_test_total
with open(log_path+"reward_his_train.pickle", "wb") as fp:
pickle.dump(reward_history_train, fp)
with open(log_path+"reward_his_test.pickle", "wb") as fp:
pickle.dump(reward_history_test, fp)
with open(log_path+"loss.pickle", "wb") as fp:
pickle.dump(agent.loss_his, fp)
with open(log_path+"iou_train_history.pickle", "wb") as fp:
pickle.dump(iou_history_train, fp)
with open(log_path+"iou_test_history.pickle", "wb") as fp:
pickle.dump(iou_history_test, fp)
| [
"55855538+WenyuHan-LiNa@users.noreply.github.com"
] | 55855538+WenyuHan-LiNa@users.noreply.github.com |
ee0501d3fc56808a4fd300c256c1bd3071ec5d4c | fc4fa38962b121e31edf414a860a09caeb8048d2 | /Homework Problems & Solutions/HW 1/Shaik_Alimulla_HW1.py | df4f656e0119a969169fffd2d38f7f76cd6eb962 | [] | no_license | shaikalimulla/Data-Mining | 36da27846d4250eb232fdaa9a3f40195f0cd1a88 | 26311593a6ee902a5d148de22658cf703e2ef665 | refs/heads/master | 2016-09-14T12:50:35.244013 | 2016-04-30T02:47:04 | 2016-04-30T02:47:04 | 57,423,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,104 | py | #Assignment based on MAGIC Gamma Telescope Data Set ( http://archive.ics.uci.edu/ml/datasets/MAGIC+Gamma+Telescope )
import argparse
import numpy as np
class dataSet:
"""
Class to store the MAGIC Gamma Telescope Data Set
"""
def __init__(self, location):
with open (location, "r") as myfile:
self.readData=myfile.readlines();
def calculate( data, ithAttribute):
"""
Input Parameters:
data: The data that is read from the file.
ithAttribute: The ith Attribute for which the various properties must be calculated.
Default value of 0,infinity,-infinity are assigned to all the variables as required.
Objective of the function is to calculate: N (number of objects), min, max, mean, standard deviation, Q1, median, Q3, IQR
"""
noOfObjects , minValue , maxValue , mean , standardDeviation , q1 , median , q3 ,iqr = [0,"inf","-inf",0,0,0,0,0,0]
result = []
for x in data:
result.append(float(x.split(',')[ithAttribute-1]))
noOfObjects = np.size(result)
minValue = min(result)
maxValue = max(result)
mean = np.mean(result)
standardDeviation = np.std(result)
q1 = np.percentile(result, 25)
median = np.median(result)
q3 = np.percentile(result, 75)
iqr = abs(q3-q1)
return noOfObjects , minValue , maxValue, mean, standardDeviation , q1 , median , q3 , iqr
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Data Mining HW1')
parser.add_argument('--i', type=int,
help="ith attribute of the dataset ( limit 1 to 10 )",
default=5,
choices=set((1,2,3,4,5,6,7,8,9,10)) ,
required=True)
parser.add_argument("--data", type=str,
help="Location of the downloaded file",
default="magic04.data.txt",
required=False)
args = parser.parse_args()
data = dataSet(args.data)
print(','.join(map(str,calculate(data.readData,args.i))))
| [
"alimulla.shaik@gmail.com"
] | alimulla.shaik@gmail.com |
ff0717b66ccf936a78722d90e6d31dada09a8cf5 | 590c0fa3a144146d9ba3caf0ac7ff9e0a9e5c914 | /packages/riot/tft/__init__.py | 24c4fca9f3dd6ba4c7eb2a5045b91baa86f7a812 | [] | no_license | shoko31/InKeeperBot | 7154fbe1b2ac82a2ac5defe069927a00b0f37952 | c5fc16fc3ff0b2a113feb67e1d8e2c8dd7507b72 | refs/heads/master | 2023-05-25T23:00:41.163366 | 2020-07-23T11:45:49 | 2020-07-23T11:45:49 | 220,335,670 | 0 | 0 | null | 2023-05-22T21:38:27 | 2019-11-07T21:52:32 | Python | UTF-8 | Python | false | false | 181 | py | # __init__.py
from .tft_user import TFTUser
from .tft_game import TFTGame
from .tft_participant import TFTParticipant
from .tft_trait import TFTTrait
from .tft_unit import TFTUnit
| [
"elliott.zz59@gmail.com"
] | elliott.zz59@gmail.com |
14cf80ef32e4bf3d66b3d4e93f8898f83441fbf8 | f1aeae7a5256ad26c3808375ed8bfd5c8d649825 | /config/lan_scope.py | 917412ce34627e538870a712375c434200ef612c | [] | no_license | wikimedia/phlogiston | 4f3f1f5de2e53027ba5c2ebfd69caa381da9dd38 | eb57b5adc7dc193dc3c4a94b6ffb68cca4984741 | refs/heads/master | 2023-06-30T12:28:33.190327 | 2019-03-28T03:05:23 | 2019-03-28T03:05:23 | 40,551,004 | 13 | 4 | null | 2016-10-18T20:01:47 | 2015-08-11T15:51:58 | Python | UTF-8 | Python | false | false | 93 | py | [vars]
scope_title = Language
default_points = 0
start_date = 2016-07-01
show_points = False
| [
"jaufrecht@wikimedia.org"
] | jaufrecht@wikimedia.org |
3090368248d3f1123c7946855c97dbc0ec1154e9 | 4fd84e0e1097d1153ed477a5e76b4972f14d273a | /myvirtualenv/lib/python3.7/site-packages/azure/mgmt/iothub/models/certificate_properties.py | d91afb9c0adb00d0e035b9e1023cc3ad459f53fc | [
"MIT"
] | permissive | peterchun2000/TerpV-U | c045f4a68f025f1f34b89689e0265c3f6da8b084 | 6dc78819ae0262aeefdebd93a5e7b931b241f549 | refs/heads/master | 2022-12-10T09:31:00.250409 | 2019-09-15T15:54:40 | 2019-09-15T15:54:40 | 208,471,905 | 0 | 2 | MIT | 2022-12-08T06:09:33 | 2019-09-14T16:49:41 | Python | UTF-8 | Python | false | false | 2,165 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CertificateProperties(Model):
"""The description of an X509 CA Certificate.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: datetime
"""
_validation = {
'subject': {'readonly': True},
'expiry': {'readonly': True},
'thumbprint': {'readonly': True},
'is_verified': {'readonly': True},
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'subject': {'key': 'subject', 'type': 'str'},
'expiry': {'key': 'expiry', 'type': 'rfc-1123'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'is_verified': {'key': 'isVerified', 'type': 'bool'},
'created': {'key': 'created', 'type': 'rfc-1123'},
'updated': {'key': 'updated', 'type': 'rfc-1123'},
}
def __init__(self, **kwargs):
super(CertificateProperties, self).__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
| [
"peterchun2000@gmail.com"
] | peterchun2000@gmail.com |
d94e89c1a33b604c6d09bbc9becb67b7a20f9699 | 58ed092530bdb8a6f2bef0ebe261108e8a9c0aff | /myapp/search_indexes.py | abee499518bdc025949a48c1812c21f7f0f7d02f | [] | no_license | 100Rashmi/myTweeter | 2373fcb2ddf09432bfc17d1ddada935c3a29d6d1 | d7a5c067ee639347604a3cdaa11c14c0e11d5515 | refs/heads/master | 2022-12-27T00:06:52.553893 | 2017-11-06T22:16:13 | 2017-11-06T22:16:13 | 109,440,649 | 0 | 0 | null | 2022-12-07T23:47:29 | 2017-11-03T20:37:40 | Python | UTF-8 | Python | false | false | 1,292 | py | import datetime
from haystack import indexes
from myapp.models import Dweet, User
class DweetIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
content = indexes.CharField(model_attr='dweet_data')
dweet_id = indexes.CharField(model_attr='dweet_id')
created_time = indexes.DateTimeField(model_attr='created_time')
def get_model(self):
return Dweet
def index_queryset(self, using=None):
"""Used when the entire index for model is updated."""
return self.get_model().objects.filter(created_time__lte=datetime.datetime.now())
class UserIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
user_id = indexes.CharField(model_attr='user_id')
user_first_name = indexes.CharField(model_attr='user_first_name')
user_last_name = indexes.CharField(model_attr='user_last_name')
user_profile_name = indexes.CharField(model_attr='user_profile_name')
modified_time = indexes.DateTimeField(model_attr='modified_time')
def get_model(self):
return User
def index_queryset(self, using=None):
"""Used when the entire index for model is updated."""
return self.get_model().objects.filter(modified_time__lte=datetime.datetime.now())
| [
"singhrashmi579@adya.io"
] | singhrashmi579@adya.io |
a605b1d00537686bda9a0600a32decd33694a451 | 698512c01048fcefcc14583089ef2e8c7962923a | /Python_Projects/Data_Visualization/Chap17_working_w_API/python_repos.py | 71713ed6cee6b967da24332c0021287834a44c1f | [] | no_license | Miguel-Tirado/Python | c76cb9846c9a2b9c6b3c4827cdb95042f4e5d447 | 227def380c64095c3040c848aa035ac46d26d079 | refs/heads/main | 2023-04-16T12:15:03.301275 | 2021-04-30T16:39:48 | 2021-04-30T16:39:48 | 346,443,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | import requests
# Make an API call and store the responce
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'
headers = {'Accept' : 'application/vnd.github.v3+json'}
r = requests.get(url, headers=headers)
print(f"Status code: {r.status_code}")
# store API responce in a variable
# convert from json format to python dictionary format
responce_dict = r.json()
print(f"Total repositories: {responce_dict['total_count']}")
# Explore information about the repositories
repo_dicts = responce_dict['items']
print(f"Repositories returned: {len(repo_dicts)}")
# Examine the first repository
repo_dict = repo_dicts[0]
print("\nSelected information about the first repository:")
for repo_dict in repo_dicts:
print(f"Name: {repo_dict['name']}")
print(f"Owner: {repo_dict['owner']['login']}")
print(f"Stars: {repo_dict['stargazers_count']}")
print(f"Repository: {repo_dict['html_url']}")
print(f"Created: {repo_dict['created_at']}")
print(f"Updated: {repo_dict['updated_at']}")
print(f"Description: {repo_dict['description']}\n")
print(f"\nKeys: {len(repo_dict)}")
for key in sorted(repo_dict.keys()):
print(key)
# Process results
# when working with more complex API's its important to check 'incomplete_results'
# note that incomplte_results = False means the request was sucessful since (its not incomplete)
incomplete_results = responce_dict['incomplete_results']
print(responce_dict.keys())
# checking to see if incomplete_results is true or false?
# false means were the request was sucessful
# Note that sometimes if incomplte_results is true doesnt always mean the infor is incomplete
# Git API documentation states that it could be reaching a timeout or the request has already been
# made before
print(incomplete_results)
| [
"miguel.e.tirado11@gmail.com"
] | miguel.e.tirado11@gmail.com |
9918925b5893ab5e67cfe34926eb8f39e50a3f68 | a5b3c17361b0d68818a0088d2632706353aa768f | /app/core/urls.py | 2c01c9dc76b59d446a2cc277aaf6d2d00a8d8820 | [] | no_license | marcinpelszyk/django-docker-compose-deploy | 7bd6d91a08aa4c60fd801115e4277d26cfd77642 | 6e4716d5324172778e5babecb40952de66448301 | refs/heads/main | 2023-06-06T02:56:44.709915 | 2021-06-28T15:38:56 | 2021-06-28T15:38:56 | 380,349,649 | 0 | 1 | null | 2021-06-28T08:10:53 | 2021-06-25T20:42:07 | Python | UTF-8 | Python | false | false | 387 | py | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"marcin.pelszyk90@gmail.com"
] | marcin.pelszyk90@gmail.com |
069744ff71226f81911edc60bc48d467b30ed337 | b2a2a2f7e19fc8e9c6f5d2dedb0b4b10d7c813ae | /backend/api/util/functional/curry.py | 1a66ea3074629bc98d88a47586b5b1aef9541bb3 | [] | no_license | glenstarchman/bar-rate | d7a6e6660bd3fafe7777d435d33334e2be4d0480 | 575e5f695650487a679ede04af6f62d464c53c18 | refs/heads/master | 2022-02-27T01:31:17.879000 | 2019-09-26T14:43:05 | 2019-09-26T14:43:05 | 191,376,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | import functools
def curried(n):
def curry(fn):
def _inner(*args):
if len(args) < n:
return curried(n - len(args))(functools.partial(fn, *args))
return fn(*args)
return _inner
return curry
| [
"glen@starchman.com"
] | glen@starchman.com |
88d871218ddc9d5a96e3ac821323d3bf566ce9b1 | fb05ae8048b188c7d73e45d0b0732223686eb4e4 | /dash-demo.py | 8c67cc6049a8940e154186d5777e2c72a2d37422 | [] | no_license | jluttine/dash-demo | 1b8bd0bf0b6570cf8e33c0fb9278390f37baa686 | 2eab4c7cd92b24214354d8a5e3bce866677efe50 | refs/heads/master | 2023-01-12T19:03:09.745917 | 2020-11-13T16:57:41 | 2020-11-13T16:57:41 | 312,356,690 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,211 | py | import dash
import dash_html_components as html
import dash_core_components as dcc
from pages import demo1_graph, demo2_datatable
# Create the Dash app/server
app = dash.Dash(
__name__,
external_stylesheets=[
"https://codepen.io/chriddyp/pen/bWLwgP.css",
],
# We need to suppress these errors because when we define the callbacks,
# the subpage layouts haven't been defined yet.. So there would be errors
# about missing IDs. Is there some better solution?
suppress_callback_exceptions=True,
)
# List separate pages
subpages = [
("/demo-graph", demo1_graph),
("/demo-datatable", demo2_datatable),
]
# Generic page layout for the entire app
app.layout = html.Div(
[
# This element is used to read the current URL. Not visible to the
# user.
dcc.Location(id="url", refresh=False),
# The content will be rendered in this element so the children of this
# element will change when browsing to a different page
html.Div(
id="page-content",
className="DashboardContainer",
),
]
)
# Set callbacks for each page
for (_, page) in subpages:
page.set_callbacks(app)
# Layout of the main page
main_layout = html.Div(
className="Container",
children=[
html.H1("Plotly Dash demo"),
html.P(html.I("Jaakko Luttinen - November 16, 2020")),
html.P(html.I("Lead Data Scientist @ Leanheat by Danfoss")),
html.Ul(
[
html.Li([
"This demo is available at: ",
html.A(
"https://github.com/jluttine/dash-demo",
href="https://github.com/jluttine/dash-demo"
)
]),
html.Li("What is Plotly Dash?"),
html.Li("Why not Jupyter Notebooks?"),
]
),
] + [
html.A(
html.Div(
className="Card",
children=[
html.H2(page.title),
html.P(page.description),
]
),
href=url,
) for (url, page) in subpages
] + [
html.Ul([
html.Li([
"So much more cool features: ",
html.A(
"https://dash.plotly.com/",
href="https://dash.plotly.com/",
),
]),
html.Li("Show our real production Dash")
]),
]
)
@app.callback(
dash.dependencies.Output("page-content", "children"),
[dash.dependencies.Input("url", "pathname")]
)
def display_page(pathname):
"""Render the newly selected page when the URL changes"""
if pathname == "/":
return main_layout
page = dict(subpages)[pathname]
return html.Div(
[
# For subpages, add a few fixed elements at the top of the page
dcc.Link("< Back to main page", href="/"),
html.H1(page.title),
html.P(page.description),
# Then, the actual subpage content
page.layout,
]
)
if __name__ == "__main__":
app.run_server(debug=True)
| [
"jaakko.luttinen@iki.fi"
] | jaakko.luttinen@iki.fi |
86865a380e10df0386ac53bd7aac552daf77e862 | ba6c64c6f8d348a86c16395aaa5f8fadc6cf4386 | /python/lab3/weather_today.py | 55eb99344a6e96084ada05a6f8a6b5ec355564cf | [] | no_license | AmalM7/DataScienceAcademy | 875f00b1909a3b9ba76e178852db7aa6e851e220 | aa3719465f9582436f511ce56ad94cdf59354dca | refs/heads/master | 2020-03-30T19:21:32.129618 | 2018-10-07T19:59:39 | 2018-10-07T19:59:39 | 151,538,683 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | import requests
import sys
if len(sys.argv)==1:
print("you have to inter the city name")
sys.exit(0)
else:
city=sys.argv[1]
api_key="bc3dbc9f88d3d484ee1865b765665f1b"
class Weather:
def __init__(self, key):
self.key=key
def get_city_weather(self, city):
r=requests.get("http://api.openweathermap.org/data/2.5/weather?q="+city+"&appid="+self.key)
return r.json()
def show_data(self, json_object):
print("The temperature is" , json_object["main"]["temp"])
print("The humidity is", json_object["main"]["humidity"])
print("The weather description is", json_object["weather"][0]["description"])
weather_today=Weather(api_key)
obj=weather_today.get_city_weather(city)
weather_today.show_data(obj)
| [
"noreply@github.com"
] | AmalM7.noreply@github.com |
f044bb0442fbcbaa809b0a48dc4740ee1516c226 | c3e86a8cb94c67a7b0881d72a50e529b85cd27ac | /NameMixer2.0.py | 6c493c9546e36c0e61cfd4102f74382c99c81b06 | [] | no_license | Rinlix/Rix | 252e011089e6b7eec138db3f9e1dc50621974aa8 | 478eb79eb12e38f7479c45cc08ec6af3ebfd6c0e | refs/heads/master | 2020-04-16T16:55:58.775508 | 2019-01-15T00:24:41 | 2019-01-15T00:24:41 | 165,755,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | #NameMixer.py
import random
import time
while True:
true = True
def rigged():
print('DToo much cringe and toxic, cannot survive...')
true = False
names = []
while true:
name = input('Enter A Name: [q] to randomize all names ')
if name == 'q':
num = len(names)
true = False
elif name == 'DonaldTrump':
rigged()
else:
names.append(name)
for a in range(num):
output = random.choice(names)
names.remove(output)
ordinal = a + 1
if ordinal == 1:
ordinal1 = '1st'
elif ordinal == 2:
ordinal1 = '2nd'
elif ordinal == 3:
ordinal1 = '3rd'
else:
ordinal = str(ordinal)
ordinal1 = (ordinal + 'th')
for i in range(6):
time.sleep(0.001)
print('=', end='')
print('', end='\n')
print('')
print(ordinal1,'is', ': ', output)
print('')
| [
"noreply@github.com"
] | Rinlix.noreply@github.com |
bffc7093c19d2b1011751494132301c78945c914 | 4d1e2ed1bd2ff8ea1b3ba5613857503ccfce4b48 | /external/emsdk_portable/emscripten/1.34.1/tools/separate_asm.py | 4f48286e15bbbe77b1a786ad71f1c0635378d82d | [
"MIT",
"NCSA"
] | permissive | brooklynpacket/cocos2d-x | 7bf18909cf2af221aac70fdbe000658e7e97bcb8 | 03b3f8deef304bb9b5ed50acb23158e71712cc15 | refs/heads/master | 2023-08-24T10:38:11.252485 | 2019-02-06T01:23:56 | 2019-02-06T01:23:56 | 6,341,455 | 1 | 1 | null | 2022-09-26T13:53:36 | 2012-10-22T20:00:19 | C++ | UTF-8 | Python | false | false | 724 | py | #!/usr/bin/env python2
'''
Separates out the core asm module out of an emscripten output file.
This is useful because it lets you load the asm module first, then the main script, which on some browsers uses less memory
'''
import os, sys
import asm_module
infile = sys.argv[1]
asmfile = sys.argv[2]
otherfile = sys.argv[3]
everything = open(infile).read()
module = asm_module.AsmModule(infile).asm_js
module = module[module.find('=')+1:] # strip the initial "var asm =" bit, leave just the raw module as a function
everything = everything.replace(module, 'Module["asm"]')
o = open(asmfile, 'w')
o.write('Module["asm"] = ')
o.write(module)
o.write(';')
o.close()
o = open(otherfile, 'w')
o.write(everything)
o.close()
| [
"jeff@brooklynpacket.com"
] | jeff@brooklynpacket.com |
f0848bea7f02f1bf7e260eb65eeaf7fefbdc380a | daa90db36eff7050fe1224dc8caa403d9e95b5c9 | /tests/test_adjoints.py | 5fbab8f85551dc6a96b0c666a6f43509b69f6d57 | [
"MIT"
] | permissive | fagan2888/torchkbnufft | a19fc61648dc3b5665aa34680302691099c6dfac | 6c6e2c008ae3e8e48a938bedd25431f8db20c106 | refs/heads/master | 2020-12-02T23:29:45.918591 | 2019-12-19T20:15:47 | 2019-12-19T20:15:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,243 | py | import sys
import numpy as np
import torch
from torchkbnufft import (AdjKbNufft, AdjMriSenseNufft, KbInterpBack,
KbInterpForw, KbNufft, MriSenseNufft)
from torchkbnufft.math import inner_product
def test_interp_2d_adjoint(params_2d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
norm_tol = testing_tol
batch_size = params_2d['batch_size']
im_size = params_2d['im_size']
grid_size = params_2d['grid_size']
numpoints = params_2d['numpoints']
x = np.random.normal(size=(batch_size, 1) + grid_size) + \
1j*np.random.normal(size=(batch_size, 1) + grid_size)
x = torch.tensor(np.stack((np.real(x), np.imag(x)), axis=2))
y = params_2d['y']
ktraj = params_2d['ktraj']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
kbinterp_ob = KbInterpForw(
im_size=im_size,
grid_size=grid_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjkbinterp_ob = KbInterpBack(
im_size=im_size,
grid_size=grid_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
x_forw = kbinterp_ob(x, ktraj)
y_back = adjkbinterp_ob(y, ktraj)
inprod1 = inner_product(y, x_forw, dim=2)
inprod2 = inner_product(y_back, x, dim=2)
assert torch.norm(inprod1 - inprod2) < norm_tol
def test_nufft_2d_adjoint(params_2d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
norm_tol = testing_tol
im_size = params_2d['im_size']
numpoints = params_2d['numpoints']
x = params_2d['x']
y = params_2d['y']
ktraj = params_2d['ktraj']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
kbnufft_ob = KbNufft(
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjkbnufft_ob = AdjKbNufft(
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
x_forw = kbnufft_ob(x, ktraj)
y_back = adjkbnufft_ob(y, ktraj)
inprod1 = inner_product(y, x_forw, dim=2)
inprod2 = inner_product(y_back, x, dim=2)
assert torch.norm(inprod1 - inprod2) < norm_tol
def test_mrisensenufft_2d_adjoint(params_2d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
norm_tol = testing_tol
im_size = params_2d['im_size']
numpoints = params_2d['numpoints']
x = params_2d['x']
y = params_2d['y']
ktraj = params_2d['ktraj']
smap = params_2d['smap']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
sensenufft_ob = MriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjsensenufft_ob = AdjMriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
x_forw = sensenufft_ob(x, ktraj)
y_back = adjsensenufft_ob(y, ktraj)
inprod1 = inner_product(y, x_forw, dim=2)
inprod2 = inner_product(y_back, x, dim=2)
assert torch.norm(inprod1 - inprod2) < norm_tol
def test_interp_3d_adjoint(params_3d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
norm_tol = testing_tol
batch_size = params_3d['batch_size']
im_size = params_3d['im_size']
grid_size = params_3d['grid_size']
numpoints = params_3d['numpoints']
x = np.random.normal(size=(batch_size, 1) + grid_size) + \
1j*np.random.normal(size=(batch_size, 1) + grid_size)
x = torch.tensor(np.stack((np.real(x), np.imag(x)), axis=2))
y = params_3d['y']
ktraj = params_3d['ktraj']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
kbinterp_ob = KbInterpForw(
im_size=im_size,
grid_size=grid_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjkbinterp_ob = KbInterpBack(
im_size=im_size,
grid_size=grid_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
x_forw = kbinterp_ob(x, ktraj)
y_back = adjkbinterp_ob(y, ktraj)
inprod1 = inner_product(y, x_forw, dim=2)
inprod2 = inner_product(y_back, x, dim=2)
assert torch.norm(inprod1 - inprod2) < norm_tol
def test_nufft_3d_adjoint(params_3d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
norm_tol = testing_tol
im_size = params_3d['im_size']
numpoints = params_3d['numpoints']
x = params_3d['x']
y = params_3d['y']
ktraj = params_3d['ktraj']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
kbnufft_ob = KbNufft(
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjkbnufft_ob = AdjKbNufft(
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
x_forw = kbnufft_ob(x, ktraj)
y_back = adjkbnufft_ob(y, ktraj)
inprod1 = inner_product(y, x_forw, dim=2)
inprod2 = inner_product(y_back, x, dim=2)
assert torch.norm(inprod1 - inprod2) < norm_tol
def test_mrisensenufft_3d_adjoint(params_3d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
norm_tol = testing_tol
im_size = params_3d['im_size']
numpoints = params_3d['numpoints']
x = params_3d['x']
y = params_3d['y']
ktraj = params_3d['ktraj']
smap = params_3d['smap']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
sensenufft_ob = MriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjsensenufft_ob = AdjMriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
x_forw = sensenufft_ob(x, ktraj)
y_back = adjsensenufft_ob(y, ktraj)
inprod1 = inner_product(y, x_forw, dim=2)
inprod2 = inner_product(y_back, x, dim=2)
assert torch.norm(inprod1 - inprod2) < norm_tol
def test_mrisensenufft_3d_coilpack_adjoint(params_2d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
norm_tol = testing_tol
im_size = params_2d['im_size']
numpoints = params_2d['numpoints']
x = params_2d['x']
y = params_2d['y']
ktraj = params_2d['ktraj']
smap = params_2d['smap']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
sensenufft_ob = MriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints,
coilpack=True
).to(dtype=dtype, device=device)
adjsensenufft_ob = AdjMriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints,
coilpack=True
).to(dtype=dtype, device=device)
x_forw = sensenufft_ob(x, ktraj)
y_back = adjsensenufft_ob(y, ktraj)
inprod1 = inner_product(y, x_forw, dim=2)
inprod2 = inner_product(y_back, x, dim=2)
assert torch.norm(inprod1 - inprod2) < norm_tol
| [
"matt.muckley@gmail.com"
] | matt.muckley@gmail.com |
cb51eb6a2f963f2087652b4694cfd9b3a685df21 | 2f791e0444719ddcb8cc407e72e869f7fac5181b | /graphics/PromIndexResultsMerger.py | 2fc9cf0aab3d3cf6444c6b94e434fe502cc537b0 | [] | no_license | ichen-lab-ucsb/WFLIVM_k-Seq | 35d522df889e35826e535be56ed4d5579efe2c1b | 68990737c2257cef2815d7df74e2f7686bc5a597 | refs/heads/main | 2023-04-20T15:02:36.076837 | 2021-04-23T22:26:09 | 2021-04-23T22:26:09 | 360,320,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | ### this script concatenates the output files from the promiscuity index calculator (http://hetaira.herokuapp.com/) and merges I values with the master results table
### input files must be in an accessible directory (eg 'data/promiscuity_index_tables/WFLIVM-r_results_tables/') and must be in the format 'results (i).csv'
### import libraries
import pandas as pd
### create dataframe and
PI_DF = pd.DataFrame(columns=['seq','I'])
FileRange = range(0,40) ### set file range based on number of input files
for i in FileRange:
FileName = 'data/promiscuity_index_tables/WFLIVM-r_results_tables/results (' + str(i) + ').csv'
data = pd.read_csv(FileName, header=None, index_col=False)
data.columns = ['seq','I']
data.drop(data.tail(1).index,inplace=True)
PI_DF = pd.concat([PI_DF,data], ignore_index=True)
### merge I values to master file
df = pd.read_csv('data/WFLIVM-k-seq_merged_+r.csv').sort_values(by='seq')
merged = df.merge(PI_DF, on='seq')
merged.to_csv('data/WFLIVM-k-seq_merged_+r+I.csv', index=False) | [
"noreply@github.com"
] | ichen-lab-ucsb.noreply@github.com |
74a1d218dd31db1af846475408c11a85b61f2503 | dbb451b9775b9345ccc26b562bbddf6d7ade94d8 | /Python v4/Django 2.2 v4/Misc/orm/orm_app/migrations/0001_initial.py | 10a85ba2565a2325d960d26339e9e4b8bf50627e | [] | no_license | ethan-mace/Coding-Dojo | b46968806c80c73b736f98155aea89b8b33c4b0b | a1c7c88e9f0e5a5ebcafde733d5acaebec071270 | refs/heads/main | 2023-02-22T15:19:44.030120 | 2021-01-26T15:48:17 | 2021-01-26T15:48:17 | 325,411,101 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | # Generated by Django 3.1.3 on 2020-11-12 17:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=45)),
('description', models.TextField()),
('release_date', models.DateTimeField()),
('duration', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"ethanmace@protonmail.com"
] | ethanmace@protonmail.com |
430f1f7f8a7c02429470b2c79150c172f4170511 | 24a40c0abd0d363318360081dd7a4f9f4ed2a5d1 | /Developer/Python_Definitivo/Exercícios/Listas - Refeito (parte 2)/Ex 83 – Validando expressões matemáticas.py | 38332393008ceef86a4b359c8eefcc384f0d504c | [] | no_license | andrelima19/Projetos_Python | c4c399975f6e2f8755311008d1af87f9a0554963 | 4e7e96c19379625cb498f28a3eabc30cbd514259 | refs/heads/main | 2023-07-26T04:40:37.879183 | 2021-08-31T00:24:32 | 2021-08-31T00:24:32 | 343,010,230 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | # Exercício Python 083: Crie um programa onde o usuário digite uma expressão qualquer que use parênteses.
# Seu aplicativo deverá analisar se a expressão passada está com os parênteses abertos e fechados na ordem correta.
| [
"a.andreluislima@gmail.com"
] | a.andreluislima@gmail.com |
6e38e37f80b07675a03fc3c09d5f3f29091bf7f4 | 680a9f1cf6d54caf320021d8848bd42b8dbc703e | /site-packages/webassets/filter/compass.py | 2414d2bb0e308067e5a96bce368c127fde798caa | [] | no_license | rljacobson/Guru-NB | 9af650cb61c4ba86a4aa7f09b5e2f21a58486f12 | 8a36ac4c92b1c29102029b0f678311d11cff542c | refs/heads/master | 2021-01-15T18:53:05.071984 | 2015-02-01T17:31:48 | 2015-02-01T17:31:48 | 10,697,118 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,870 | py | """
Generally speaking, compass provides a command line util that is used
a) as a management script (like django-admin.py) doing for example
setup work, adding plugins to a project etc), and
b) can compile the sass source files into CSS.
While generally project-based, starting with 0.10, compass supposedly
supports compiling individual files, which is what we are using for
implementing this filter. Supposedly, because there are numerous issues
that require working around. See the comments in the actual filter code
for the full story on all the hoops be have to jump through.
An alternative option would be to use Sass to compile. Compass essentially
adds two things on top of sass: A bunch of CSS frameworks, ported to Sass,
and available for including. And various ruby helpers that these frameworks
and custom Sass files can use. Apparently there is supposed to be a way
to compile a compass project through sass, but so far, I haven't got it
to work. The syntax is supposed to be one of:
$ sass -r compass `compass imports` FILE
$ sass --compass FILE
See:
http://groups.google.com/group/compass-users/browse_thread/thread/a476dfcd2b47653e
http://groups.google.com/group/compass-users/browse_thread/thread/072bd8b51bec5f7c
http://groups.google.com/group/compass-users/browse_thread/thread/daf55acda03656d1
"""
import os
from os import path
import tempfile
import shutil
import subprocess
from webassets import six
from webassets.exceptions import FilterError
from webassets.filter import Filter, option
__all__ = ('Compass',)
class CompassConfig(dict):
"""A trivial dict wrapper that can generate a Compass config file."""
def to_string(self):
def string_rep(val):
""" Determine the correct string rep for the config file """
if isinstance(val, bool):
# True -> true and False -> false
return str(val).lower()
elif isinstance(val, six.string_types) and val.startswith(':'):
# ruby symbols, like :nested, used for "output_style"
return str(val)
elif isinstance(val, dict):
# ruby hashes, for "sass_options" for example
return '{%s}' % ', '.join("'%s' => '%s'" % i for i in val.items())
elif isinstance(val, tuple):
val = list(val)
# works fine with strings and lists
return repr(val)
return '\n'.join(['%s = %s' % (k, string_rep(v)) for k, v in self.items()])
class Compass(Filter):
"""Converts `Compass <http://compass-style.org/>`_ .sass files to
CSS.
Requires at least version 0.10.
To compile a standard Compass project, you only need to have
to compile your main ``screen.sass``, ``print.sass`` and ``ie.sass``
files. All the partials that you include will be handled by Compass.
If you want to combine the filter with other CSS filters, make
sure this one runs first.
Supported configuration options:
COMPASS_BIN
The path to the Compass binary. If not set, the filter will
try to run ``compass`` as if it's in the system path.
COMPASS_PLUGINS
Compass plugins to use. This is equivalent to the ``--require``
command line option of the Compass. and expects a Python list
object of Ruby libraries to load.
COMPASS_CONFIG
An optional dictionary of Compass `configuration options
<http://compass-style.org/help/tutorials/configuration-reference/>`_.
The values are emitted as strings, and paths are relative to the
Environment's ``directory`` by default; include a ``project_path``
entry to override this.
"""
name = 'compass'
max_debug_level = None
options = {
'compass': ('binary', 'COMPASS_BIN'),
'plugins': option('COMPASS_PLUGINS', type=list),
'config': 'COMPASS_CONFIG',
}
def open(self, out, source_path, **kw):
"""Compass currently doesn't take data from stdin, and doesn't allow
us accessing the result from stdout either.
Also, there's a bunch of other issues we need to work around:
- compass doesn't support given an explict output file, only a
"--css-dir" output directory.
We have to "guess" the filename that will be created in that
directory.
- The output filename used is based on the input filename, and
simply cutting of the length of the "sass_dir" (and changing
the file extension). That is, compass expects the input
filename to always be inside the "sass_dir" (which defaults to
./src), and if this is not the case, the output filename will
be gibberish (missing characters in front). See:
https://github.com/chriseppstein/compass/issues/304
We fix this by setting the proper --sass-dir option.
- Compass insists on creating a .sass-cache folder in the
current working directory, and unlike the sass executable,
there doesn't seem to be a way to disable it.
The workaround is to set the working directory to our temp
directory, so that the cache folder will be deleted at the end.
"""
tempout = tempfile.mkdtemp()
# Temporarily move to "tempout", so .sass-cache will be created there
old_wd = os.getcwd()
os.chdir(tempout)
try:
# Make sure to use normpath() to not cause trouble with
# compass' simplistic path handling, where it just assumes
# source_path is within sassdir, and cuts off the length of
# sassdir from the input file.
sassdir = path.normpath(path.dirname(source_path))
source_path = path.normpath(source_path)
# Compass offers some helpers like image-url(), which need
# information about the urls under which media files will be
# available. This is hard for two reasons: First, the options in
# question aren't supported on the command line, so we need to write
# a temporary config file. Secondly, the assume a defined and
# separate directories for "images", "stylesheets" etc., something
# webassets knows nothing of: we don't support the user defining
# something such directories. Because we traditionally had this
# filter point all type-specific directories to the root media
# directory, we will define the paths to match this. In other
# words, in Compass, both inline-image("img/test.png) and
# image-url("img/test.png") will find the same file, and assume it
# to be {env.directory}/img/test.png.
# However, this partly negates the purpose of an utility like
# image-url() in the first place - you not having to hard code
# the location of your images. So we allow direct modification of
# the configuration file via the COMPASS_CONFIG setting (see
# tickets #36 and #125).
#
# Note that is also the --relative-assets option, which we can't
# use because it calculates an actual relative path between the
# image and the css output file, the latter being in a temporary
# directory in our case.
config = CompassConfig(
project_path=self.env.directory,
http_path=self.env.url,
http_images_dir='',
http_stylesheets_dir='',
http_fonts_dir='',
http_javascripts_dir='',
images_dir='',
)
# Update with the custom config dictionary, if any.
if self.config:
config.update(self.config)
config_file = path.join(tempout, '.config.rb')
f = open(config_file, 'w')
try:
f.write(config.to_string())
f.flush()
finally:
f.close()
command = [self.compass or 'compass', 'compile']
for plugin in self.plugins or []:
command.extend(('--require', plugin))
command.extend(['--sass-dir', sassdir,
'--css-dir', tempout,
'--config', config_file,
'--quiet',
'--boring',
'--output-style', 'expanded',
source_path])
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# shell: necessary on windows to execute
# ruby files, but doesn't work on linux.
shell=(os.name == 'nt'))
stdout, stderr = proc.communicate()
# compass seems to always write a utf8 header? to stderr, so
# make sure to not fail just because there's something there.
if proc.returncode != 0:
raise FilterError(('compass: subprocess had error: stderr=%s, '+
'stdout=%s, returncode=%s') % (
stderr, stdout, proc.returncode))
guessed_outputfile = \
path.join(tempout, path.splitext(path.basename(source_path))[0])
f = open("%s.css" % guessed_outputfile)
try:
out.write(f.read())
finally:
f.close()
finally:
# Restore previous working dir
os.chdir(old_wd)
# Clean up the temp dir
shutil.rmtree(tempout)
| [
"rljacobson@gmail.com"
] | rljacobson@gmail.com |
4161eecca6148d937ab2bcd601a934e81e885d24 | 2d7c6461c6af13c3938e91de09883e3e817fc21b | /connectedjuniors/posts/migrations/0004_auto_20201007_1942.py | c34132668362412b2c94314c4768cda0ae1f2c89 | [] | no_license | manishthakurhere/connectedjuniors | 64bcbfc1cc261be4f242fe373ad115ef865233e7 | 05abef607069b0c87ebd42770aa18aa5c4edbb43 | refs/heads/master | 2023-02-05T10:40:40.691471 | 2020-12-16T16:05:51 | 2020-12-16T16:05:51 | 322,033,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | # Generated by Django 3.1 on 2020-10-07 14:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0003_auto_20201007_1533'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='post',
name='category',
field=models.CharField(default='uncategorized', max_length=255),
),
]
| [
"manishthakurhere@gmail.com"
] | manishthakurhere@gmail.com |
95c65277f91241c50d4f1ba3d992e6bd1eade41d | 79605a09c30148d4d01ab6ac73f7ca4085a9915b | /mnist_fashion.py | fb343289ada36949d0138a74d01f0edb9acce635 | [] | no_license | ranjan103/Fashion-MNIST- | df43b768c5f6142d5a6a8e59e8d4fc5ee7023812 | 069e88b7b9bd5fcfa90790d1b6f23658b2b4144e | refs/heads/master | 2020-04-25T05:49:07.306261 | 2019-02-25T17:56:03 | 2019-02-25T17:56:03 | 172,556,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,348 | py | # -*- coding: utf-8 -*-
"""MNIST_fashion.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/16fhQl202LeNZP_Wd6D2J1Drpb20Cg1ys
"""
import warnings
warnings.filterwarnings('ignore')
import pickle
import numpy as np
import pandas as pd
import json
import nltk
from textblob import TextBlob
import spacy
import matplotlib.pyplot as plt
import cv2
from sklearn.datasets import make_circles
import keras
from google.colab import drive
drive.mount('/content/gdrive')
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from keras.utils import to_categorical
from keras import models
from keras import layers
import cv2
from sklearn.datasets import make_circles
from keras.models import Sequential
from keras.layers import Dense
from keras import models
model = models.Sequential()
X,Y = make_circles(n_samples=500,shuffle=True,noise=0.05,random_state=1,factor=0.8)
X.shape
model.add(Dense(units=2, activation='relu', input_dim=2))
model.add(Dense(units=10, activation='relu'))
model.add(Dense(units=5, activation='relu'))
model.add(Dense(units=1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
split=int(0.8*X.shape[0])
x_train=X[:split,:]
x_test=X[split:,:]
y_test=Y[split:]
y_train=Y[:split]
print(x_train.shape)
print(y_train.shape)
x_train=np.array(x_train)
y_train=np.array(y_train)
history=model.fit(x_train, y_train, epochs=1000, batch_size=8)
score = model.evaluate(x_test, y_test, verbose=1)
print(score)
history.history.keys()
plt.style.use("seaborn")
plt.plot(history.history['loss'])
plt.show()
fashion_mnist = keras.datasets.fashion_mnist
(train_images,train_labels) , (test_images,test_labels) = fashion_mnist.load_data()
print(train_labels)
print(train_labels.shape)
print(train_images.shape)
import cv2
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure()
img_=train_images[0]
img_ = img_.reshape((28,28))
plt.imshow(img_)
plt.colorbar()
plt.grid(False)
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(1,1))
img_=train_images[0]
img_ = img_.reshape((28,28))
plt.imshow(img_)
plt.colorbar()
plt.grid(False)
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
import tensorflow as tf
modell = models.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
modell.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
modell.fit(train_images, train_labels, epochs=5)
test_loss, test_acc = modell.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
predictions = modell.predict(test_images)
print(predictions.shape)
print(predictions)
test_labels.shape
predictions[0]
pred_ = []
for i in range(test_images.shape[0]):
pred_.append(np.argmax(predictions[i]))
np.sum(pred_==test_labels)/float(test_labels.shape[0])
| [
"noreply@github.com"
] | ranjan103.noreply@github.com |
cdbe6cb7658953c0b092727fc4fecf6341ecc704 | a98e9a494cbc8dc4319fea3986e805ca08c61356 | /custom_model_runner/datarobot_drum/resource/predict_mixin.py | 144c26eae3cf89b305411794078dc56edd074227 | [
"Apache-2.0"
] | permissive | drdwa/datarobot-user-models | e24b91adee91b9af2e6d730ebf4aae79c7dc1ea5 | b890f88975c67c90bd5f3aef3abef872591ad295 | refs/heads/master | 2023-01-15T00:35:52.654073 | 2020-11-23T20:54:45 | 2020-11-23T20:54:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,192 | py | import os
import tempfile
from flask import request, Response
import werkzeug
from datarobot_drum.drum.common import (
REGRESSION_PRED_COLUMN,
TargetType,
UnstructuredDtoKeys,
PredictionServerMimetypes,
)
from datarobot_drum.resource.unstructured_helpers import (
_resolve_incoming_unstructured_data,
_resolve_outgoing_unstructured_data,
)
from datarobot_drum.drum.server import (
HTTP_200_OK,
HTTP_422_UNPROCESSABLE_ENTITY,
)
class PredictMixin:
"""
This class implements predict flow shared by PredictionServer and UwsgiServing classes.
This flow assumes endpoints implemented using Flask.
"""
def do_predict(self, logger=None):
response_status = HTTP_200_OK
file_key = "X"
filestorage = request.files.get(file_key)
if not filestorage:
wrong_key_error_message = (
"Samples should be provided as a csv, mtx, or arrow file under `{}` key.".format(
file_key
)
)
if logger is not None:
logger.error(wrong_key_error_message)
response_status = HTTP_422_UNPROCESSABLE_ENTITY
return {"message": "ERROR: " + wrong_key_error_message}, response_status
else:
if logger is not None:
logger.debug("Filename provided under X key: {}".format(filestorage.filename))
_, file_ext = os.path.splitext(filestorage.filename)
with tempfile.NamedTemporaryFile(suffix=file_ext) as f:
filestorage.save(f)
f.flush()
out_data = self._predictor.predict(f.name)
if self._target_type == TargetType.UNSTRUCTURED:
response = out_data
else:
num_columns = len(out_data.columns)
# float32 is not JSON serializable, so cast to float, which is float64
out_data = out_data.astype("float")
if num_columns == 1:
# df.to_json() is much faster.
# But as it returns string, we have to assemble final json using strings.
df_json = out_data[REGRESSION_PRED_COLUMN].to_json(orient="records")
response = '{{"predictions":{df_json}}}'.format(df_json=df_json)
else:
# df.to_json() is much faster.
# But as it returns string, we have to assemble final json using strings.
df_json_str = out_data.to_json(orient="records")
response = '{{"predictions":{df_json}}}'.format(df_json=df_json_str)
response = Response(response, mimetype=PredictionServerMimetypes.APPLICATION_JSON)
return response, response_status
def do_predict_unstructured(self, logger=None):
def _validate_content_type_header(header):
ret_mimetype, content_type_params_dict = werkzeug.http.parse_options_header(header)
ret_charset = content_type_params_dict.get("charset")
return ret_mimetype, ret_charset
response_status = HTTP_200_OK
kwargs_params = {}
data = request.data
mimetype, charset = _validate_content_type_header(request.content_type)
data_binary_or_text, mimetype, charset = _resolve_incoming_unstructured_data(
data,
mimetype,
charset,
)
kwargs_params[UnstructuredDtoKeys.MIMETYPE] = mimetype
if charset is not None:
kwargs_params[UnstructuredDtoKeys.CHARSET] = charset
kwargs_params[UnstructuredDtoKeys.QUERY] = request.args
ret_data, ret_kwargs = self._predictor.predict_unstructured(
data_binary_or_text, **kwargs_params
)
response_data, response_mimetype, response_charset = _resolve_outgoing_unstructured_data(
ret_data, ret_kwargs
)
response = Response(response_data)
if response_mimetype is not None:
content_type = response_mimetype
if response_charset is not None:
content_type += "; charset={}".format(response_charset)
response.headers["Content-Type"] = content_type
return response, response_status
| [
"noreply@github.com"
] | drdwa.noreply@github.com |
f88d26fd93f16bef39a4eafcdb8174838d8e21bd | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2147/60692/307788.py | 10de8faf4f82529d5d59df010ef8d72681e4f591 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | n = input()
if n == '5 5 1 3 2':
print(0)
print(3)
print(3)
print(2)
print(5)
elif n == '100 109 79 7 5':
list1 = [27,52,80,50,40,37,27,60,60,55,55,25,40,80,52,50,25,45,72,45,65,32,22,50,20,80,35,20,22,47,52,20,77,22,52,12,75,55,75,77,75,27,7,75,27,82,52,47,22,75,65,22,57,42,45,40,77,45,40,7,50,57,85,5,47,50,50,32,60,55,62,27,52,20,52,62,25,42,0,45,30,40,15,82,17,67,52,65,50,10,87,52,67,25,,70,67,52,67,42,55]
for i in list1:
print(i)
else:
print(n) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
68ae33c92faff858b27bc9a95c7b7ab370f1c58e | 930e76d01a4674a46f6927a382465d08ebfff536 | /src/core/database.py | 8edc98348388d0577f18d87edd316f9b6ea6f2e9 | [
"BSD-3-Clause"
] | permissive | Glacier-Ice/data-sci-api | 6ed88f4530ee071a77745d88189ff6bc83bf0932 | ddd8c1776a2c52f7c6c9d59cab9836a5f8926bc2 | refs/heads/master | 2023-05-10T21:06:22.157143 | 2020-05-11T18:15:54 | 2020-05-11T18:15:54 | 240,423,916 | 5 | 3 | BSD-3-Clause | 2023-05-01T21:22:46 | 2020-02-14T03:56:40 | Python | UTF-8 | Python | false | false | 1,056 | py | import psycopg2
from flask import current_app
def _render_settings_from_current_config():
config = current_app.config
return {
"database": config["db_name"],
"user": config["db_username"],
"password": config["db_password"],
"host": config["db_host"],
"port": config["db_port"],
}
def query(sql: str, db_settings: dict = None, **sql_params) -> list:
"""Connect to the database based on DB_SETTINGS and execute SQL
with SQL_PARAMS.
Note: Use sql_params and NEVER use Python string formatting to
avoid SQL Injection Attacks."""
if not db_settings:
db_settings = _render_settings_from_current_config()
with psycopg2.connect(**db_settings) as conn:
with conn.cursor() as cursor:
cursor.execute(sql, sql_params)
return cursor.fetchall()
def get_tables() -> list:
"""Get the tables in the current database."""
SQL = """SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'"""
return query(sql=SQL)
| [
"rexwangcc@gmail.com"
] | rexwangcc@gmail.com |
972e563f6cf199234a7a2dbed0586d79bbd072c2 | ab961b490dda45dc99faa3d4c8c5db75ada0448c | /explore.py | b05c75582753ab057e697619bfc1bd88a9aafb89 | [] | no_license | harperpack/budget-viz | eb3f1bebfd3e2aaf5b6b8644dd32bf87aec6714a | 0495c7916c917abca9c1ae8e206c6fa4484c2aef | refs/heads/master | 2022-11-23T08:32:53.759070 | 2020-07-20T01:12:12 | 2020-07-20T01:12:12 | 276,770,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,403 | py | import pandas as pd
import numpy as np
import json
#budget_data = "/Users/harper/Documents/harper_files/projects/budget_data/2019__2020__and_2021_Budget_Data.csv"
budget_data = "/Users/harper/Documents/harper_files/projects/budget-viz/2019__2020__and_2021_Budget_Data.csv"
bdf = pd.read_csv(budget_data)
# accounts = ['Revenues','Expenses']
# budget = {}
# print(bdf.head())
# print(1/0)
class SubAccount:
def __init__(self, name):
self.name = name
self.revenue = 0
self.expense = 0
class Account:
def __init__(self, name):
self.name = name
self.sub_accounts = []
self.revenue = 0
self.expense = 0
def fetch_subs(self, sub):
match = [sub_account for sub_account in self.sub_accounts if sub_account.name == sub]
if not match:
self.sub_accounts.append(SubAccount(sub))
return self.sub_accounts[-1]
elif len(match) > 1:
print("ERROR: Duplicate accts with {a}".format(a=sub))
else:
return match[0]
class Fund:
def __init__(self, name):
self.name = name
self.revenue = 0
self.expense = 0
class Unit:
def __init__(self, name):
self.name = name
self.revenue = 0
self.expense = 0
self.accounts = []
self.sub_accounts = []
def fetch_account(self, acct):
match = [account for account in self.accounts if account.name == acct]
if not match:
self.accounts.append(Account(acct))
return self.accounts[-1]
elif len(match) > 1:
print("ERROR: Duplicate accts with {a}".format(a=acct))
else:
return match[0]
def fetch_subs(self, sub):
match = [sub_account for sub_account in self.sub_accounts if sub_account.name == sub]
if not match:
self.sub_accounts.append(SubAccount(sub))
return self.sub_accounts[-1]
elif len(match) > 1:
print("ERROR: Duplicate subs with {a}".format(a=sub))
else:
return match[0]
class Dept:
def __init__(self, name):
self.name = name
self.funds = []
self.units = []
self.accounts = []
self.sub_accounts = []
self.revenue = 0
self.expense = 0
def fetch_fund(self, fund):
match = [fnd for fnd in self.funds if fnd.name == fund]
if not match:
self.funds.append(Fund(fund))
return self.funds[-1]
elif len(match) > 1:
print("ERROR: Duplicate funds with {a}".format(a=fund))
else:
return match[0]
def fetch_unit(self, unit):
if unit.upper() in ['CITY CLERK','CITY COUNCIL','REPARATIONS FUND','HOME FUND','INTERFUND TRANSFERS','SPECIAL ASSESSMENT']:
unit = unit.replace(' ','_')
match = [b_unit for b_unit in self.units if b_unit.name == unit]
if not match:
self.units.append(Unit(unit))
return self.units[-1]
elif len(match) > 1:
print("ERROR: Duplicate funds with {a}".format(a=unit))
else:
return match[0]
def fetch_account(self, acct):
match = [account for account in self.accounts if account.name == acct]
if not match:
self.accounts.append(Account(acct))
return self.accounts[-1]
elif len(match) > 1:
print("ERROR: Duplicate accts with {a}".format(a=acct))
else:
return match[0]
def fetch_subs(self, sub):
match = [sub_account for sub_account in self.sub_accounts if sub_account.name == sub]
if not match:
self.sub_accounts.append(SubAccount(sub))
return self.sub_accounts[-1]
elif len(match) > 1:
print("ERROR: Duplicate subs with {a}".format(a=sub))
else:
return match[0]
class Budget:
def __init__(self, name, df, output):
self.name = name
self.funds = []
self.depts = []
self.units = []
self.accounts = []
self.sub_accounts = []
self.revenue = 0
self.expense = 0
self.type_name = "Account Type"
self.fund_name = "Fund"
self.dept_name = "Department"
self.unit_name = "Business Unit"
self.acct_name = "Account Classification"
self.subs_name = "Account Code And Description"
self.vals_name = ["2019 Adopted Budget","2020 Adopted Budget","2021 Projected Budget"]
self.main(df, output)
def obtain_value(self, row):
sum = 0
for val_name in self.vals_name:
sum += float(row[val_name].replace(",",''))
#print(sum / len(self.vals_name))
return int(sum / len(self.vals_name))
def get_objects(self, row):
# top level
dept = self.fetch_dept(row[self.dept_name].title())
fund = self.fetch_fund(row[self.fund_name].title())
unit = self.fetch_unit(row[self.unit_name].title())
account = self.fetch_account(row[self.acct_name].title())
sub_account = self.fetch_subs(row[self.subs_name].title())
# dept level
dept_fund = dept.fetch_fund(row[self.fund_name].title())
dept_unit = dept.fetch_unit(row[self.unit_name].title())
dept_account = dept.fetch_account(row[self.acct_name].title())
dept_sub_account = dept.fetch_subs(row[self.subs_name].title())
# unit level
unit_account = dept_unit.fetch_account(row[self.acct_name].title())
unit_sub_account = dept_unit.fetch_subs(row[self.subs_name].title())
# account level
account_sub_account = unit_account.fetch_subs(row[self.subs_name].title())
return [dept, fund, unit, account, sub_account, dept_fund, dept_unit, dept_account, dept_sub_account, unit_account, unit_sub_account, account_sub_account]
def tally_row(self, row):
value = self.obtain_value(row)
objects = self.get_objects(row)
if row[self.type_name] == "Revenues":
for obj in objects:
obj.revenue += value
self.revenue += value
elif row[self.type_name] == "Expenses":
for obj in objects:
obj.expense += value
self.expense += value
else:
print("ERROR: Unknown classification with {t}".format(t=row["Account Type"]))
def fetch_dept(self, dept):
match = [department for department in self.depts if department.name == dept]
if not match:
self.depts.append(Dept(dept))
return self.depts[-1]
elif len(match) > 1:
print("ERROR: Duplicate depts with {d}".format(d=dept))
else:
return match[0]
def fetch_fund(self, fund):
match = [fnd for fnd in self.funds if fnd.name == fund]
if not match:
self.funds.append(Fund(fund))
return self.funds[-1]
elif len(match) > 1:
print("ERROR: Duplicate funds with {d}".format(d=fund))
else:
return match[0]
def fetch_unit(self, unit):
if unit.upper() in ['CITY CLERK','CITY COUNCIL','REPARATIONS FUND','HOME FUND','INTERFUND TRANSFERS','SPECIAL ASSESSMENT']:
unit = unit.replace(' ','_')
match = [b_unit for b_unit in self.units if b_unit.name == unit]
if not match:
self.units.append(Unit(unit))
return self.units[-1]
elif len(match) > 1:
print("ERROR: Duplicate units with {d}".format(d=unit))
else:
return match[0]
def fetch_account(self, acct):
match = [account for account in self.accounts if account.name == acct]
if not match:
self.accounts.append(Account(acct))
return self.accounts[-1]
elif len(match) > 1:
print("ERROR: Duplicate accts with {a}".format(a=acct))
else:
return match[0]
def fetch_subs(self, sub):
match = [sub_account for sub_account in self.sub_accounts if sub_account.name == sub]
if not match:
self.sub_accounts.append(SubAccount(sub))
return self.sub_accounts[-1]
elif len(match) > 1:
print("ERROR: Duplicate accts with {a}".format(a=sub))
else:
return match[0]
def format(self, number):
num = str(int(number))
length = len(num)
if length < 4:
return ''.join(['$',num])
output = ''
while length > 3:
output = num[-3:] + output
output = ',' + output
num = num[:-3]
length = len(num)
output = '$' + num + output
return output
def ratio(self, numerator, denominator):
if denominator:
return int(100 * (numerator/denominator))
elif numerator:
return "ERROR"
else:
return 'n/a'
def classify(self, name):
if name in [x.name for x in self.depts]:
return "Department"
elif name in [x.name for x in self.funds]:
return "Fund"
elif name in [x.name for x in self.units]:
return "Unit"
elif name in [x.name for x in self.accounts]:
return "Account"
elif name in [x.name for x in self.sub_accounts]:
return "Item"
else:
print("ERROR: cannot locate {n}".format(n=name))
def output(self):
budget = {"schema":["Department","Fund","Unit","Account","Item"],"revenue":{},"expense":{}}
budget["revenue"][self.name] = {"type":"Total","members":{"total":(self.revenue, self.ratio(self.revenue,self.revenue))}}
budget["expense"][self.name] = {"type":"Total","members":{"total":(self.expense, self.ratio(self.expense,self.expense))}}
#print([x.name for x in self.depts if x.name in [y.name for y in self.funds]])
# print([x.name for x in self.depts if x.name in [y.name for y in self.units]])
# print([x.name for x in self.funds if x.name in [y.name for y in self.units]])
#print([x.name for x in self.depts if x.name in [y.name for y in self.accounts]])
#print([x.name for x in self.depts if x.name in [y.name for y in self.sub_accounts]])
# print(sorted([x.name for x in self.depts]))
# print(sorted([x.name for x in self.funds]))
#print(sorted([x.name for x in self.units]))
# print(sorted([x.name for x in self.accounts]))
# print(sorted([x.name for x in self.sub_accounts]))
# print(1/0)
all_objs = self.depts + self.funds + self.units + self.accounts + self.sub_accounts
for obj in all_objs:
obj_type = self.classify(obj.name)
if obj.revenue:
if not budget["revenue"].get(obj.name,''):
budget["revenue"][obj.name] = {"type":obj_type,"members":{"total":(obj.revenue,self.ratio(obj.revenue,self.revenue))}}
else:
print("ERROR: duplicate rev dept with {o}".format(o=obj.name))
if obj.expense:
if not budget["expense"].get(obj.name,''):
budget["expense"][obj.name] = {"type":obj_type,"members":{"total":(obj.expense,self.ratio(obj.expense,self.expense))}}
else:
print("ERROR: duplicate exp dept with {o}".format(o=obj.name))
for dept in self.depts:
all_objs = dept.funds + dept.units + dept.accounts + dept.sub_accounts
for obj in all_objs:
if obj.revenue:
if not budget["revenue"].get(obj.name,''):
obj_type = self.classify(obj.name)
budget["revenue"][obj.name] = {"type":obj_type,"members":{dept.name:(obj.revenue,self.ratio(obj.revenue,dept.revenue))}}
else:
budget["revenue"][obj.name]["members"][dept.name] = (obj.revenue,self.ratio(obj.revenue,dept.revenue))
if obj.expense:
if not budget["expense"].get(obj.name,''):
obj_type = self.classify(obj.name)
budget["expense"][obj.name] = {"type":obj_type,"members":{dept.name:(obj.expense,self.ratio(obj.expense,dept.expense))}}
else:
budget["expense"][obj.name]["members"][dept.name] = (obj.expense,self.ratio(obj.expense,dept.expense))
for unit in dept.units:
all_objs = unit.accounts + unit.sub_accounts
for obj in all_objs:
if obj.revenue:
if not budget["revenue"].get(obj.name,''):
obj_type = self.classify(obj.name)
budget["revenue"][obj.name] = {"type":obj_type,"members":{unit.name:(obj.revenue,self.ratio(obj.revenue,unit.revenue))}}
else:
budget["revenue"][obj.name]["members"][unit.name] = (obj.revenue,self.ratio(obj.revenue,unit.revenue))
if obj.expense:
if not budget["expense"].get(obj.name,''):
obj_type = self.classify(obj.name)
budget["expense"][obj.name] = {"type":obj_type,"members":{unit.name:(obj.expense,self.ratio(obj.expense,unit.expense))}}
else:
budget["expense"][obj.name]["members"][unit.name] = (obj.expense,self.ratio(obj.expense,unit.expense))
for account in unit.accounts:
for obj in account.sub_accounts:
if obj.revenue:
if not budget["revenue"].get(obj.name,''):
obj_type = self.classify(obj.name)
budget["revenue"][obj.name] = {"type":obj_type,"members":{account.name:(obj.revenue,self.ratio(obj.revenue,account.revenue))}}
else:
budget["revenue"][obj.name]["members"][account.name] = (obj.revenue,self.ratio(obj.revenue,account.revenue))
if obj.expense:
if not budget["expense"].get(obj.name,''):
obj_type = self.classify(obj.name)
budget["expense"][obj.name] = {"type":obj_type,"members":{account.name:(obj.expense,self.ratio(obj.expense,account.expense))}}
else:
budget["expense"][obj.name]["members"][account.name] = (obj.expense,self.ratio(obj.expense,account.expense))
with open("./budget.json", 'w', encoding='utf-8') as f:
json.dump(budget, f, ensure_ascii=False, indent=4)
def rank_print(self):
ranked_rev_depts = reversed(sorted(self.depts, key=lambda dept: dept.revenue))
ranked_exp_depts = reversed(sorted(self.depts, key=lambda dept: dept.expense))
ranked_rev_accts = reversed(sorted(self.accounts, key=lambda acct: acct.revenue))
ranked_exp_accts = reversed(sorted(self.accounts, key=lambda acct: acct.expense))
print("Departments by Revenue: \n")
for rank, dept in enumerate(ranked_rev_depts, start=1):
ratio = self.ratio(dept.revenue,self.revenue)
if ratio < 5:
leftover = len(self.depts) - rank
print("{r} - {f}: Other ({l} departments)".format(r=rank,f=rank+leftover,l=leftover))
break
print("{r}: {d}\n\t{m}\t({x}%)\n".format(r=rank,d=dept.name,m=self.format(dept.revenue),x=ratio))
print("-----\nDepartments by Expense: \n")
for rank, dept in enumerate(ranked_exp_depts, start=1):
ratio = self.ratio(dept.expense,self.expense)
if ratio < 5:
leftover = len(self.depts) - rank
print("{r} - {f}: Other ({l} departments)".format(r=rank,f=rank+leftover,l=leftover))
break
print("{r}: {d}\n\t{m}\t({x}%)\n".format(r=rank,d=dept.name,m=self.format(dept.expense),x=ratio))
print("\n=====\nAccounts by Revenue: \n")
for rank, acct in enumerate(ranked_rev_accts, start=1):
ratio = self.ratio(acct.revenue,self.revenue)
if ratio < 5:
leftover = len(self.accounts) - rank
print("{r} - {f}: Other ({l} accounts)".format(r=rank,f=rank+leftover,l=leftover))
break
print("{r}: {d}\n\t{m}\t({x}%)\n".format(r=rank,d=acct.name,m=self.format(acct.revenue),x=ratio))
print("-----\nAccounts by Expense: \n")
for rank, acct in enumerate(ranked_exp_accts, start=1):
ratio = self.ratio(acct.expense,self.expense)
if ratio < 5:
leftover = len(self.accounts) - rank
print("{r} - {f}: Other ({l} accounts)".format(r=rank,f=rank+leftover,l=leftover))
break
print("{r}: {d}\n\t{m}\t({x}%)\n".format(r=rank,d=acct.name,m=self.format(acct.expense),x=ratio))
def verbose_print(self):
print("Total budget for {n}:".format(n=self.name))
print(">Revenue: {r}".format(r=self.format(self.revenue)))
print(">Expense: {e}".format(e=self.format(self.expense)))
print("------\n")
for dept in self.depts:
print("{d}:".format(d=dept.name))
print("->Total Revenue: {r}\t({x}% of total)".format(r=self.format(dept.revenue),x=self.ratio(dept.revenue,self.revenue)))
print("->Total Expense: {e}\t({x}% of total)".format(e=self.format(dept.expense),x=self.ratio(dept.expense,self.expense)))
print("\n")
for acct in dept.accounts:
print('--{a}'.format(a=acct.name))
print('---> R: {r}\t({x}%)'.format(r=self.format(acct.revenue),x=self.ratio(acct.revenue,dept.revenue)))
print('---> E: {e}\t({x}%)'.format(e=self.format(acct.expense),x=self.ratio(acct.expense,dept.expense)))
def main(self, df, output):
for index, row in df.iterrows():
self.tally_row(row)
# print(count)
# print(len(self.revenue) + len(self.expense))
# print(self.revenue)
# print(self.expense)
# print(1/0)
if output == "verbose":
self.verbose_print()
elif output == "rank":
self.rank_print()
elif output == "output":
self.output()
Budget("Evanston",bdf,"output")
# cols = []
# for col in bdf:
# if col == "Account Type":
# continue
# elif col in ["2019 Adopted Budget","2020 Adopted Budget","2021 Projected Budget"]:
# continue
# cols.append(col)
#
# for index, row in bdf.iterrows():
# dept = row["Department"]
# unit = row["Business Unit"]
# amount = float(row["2020 Adopted Budget"].replace('.','').replace(',',''))
# type = row["Account Type"]
# if not budget.get(dept,''):
# #budget[dept] = {"R19":0,"Ex19":0,"R20":0,"Ex20":0,"R21":0,"Ex21":0}
# budget[dept] = {"Revenues":0,"Expenses":0}
# # budget[dept] = {"2019":0,"2020":0,"2021":0}
# if not budget[dept].get(unit,''):
# # budget[dept][unit] = {"2019":0,"2020":0,"2021":0}
# budget[dept][unit] = {"Revenues":0,"Expenses":0}
# budget[dept][type] += amount
# budget[dept][unit][type] += amount
# for department, value in budget.items():
# print('------\n')
# print(department,"\t","R: ",value["Revenues"],"\t","E: ",value["Expenses"])
# for unit, details in value.items():
# if unit in ['Revenues','Expenses']:
# continue
# print('--> ',unit,"\t","R: ",details["Revenues"],"\t","E: ",details["Expenses"])
#
# # funds = {"Revenues":[],"Expenses":[]}
# # for index, row in bdf.iterrows():
# # # print(index)
# # # print(row["Fund"])
# # # print(row["Account Type"])
# # # print(1/0)
# # if row["Fund"] not in funds[row["Account Type"]]:
# # funds[row["Account Type"]].append(row["Fund"])
# # for key, value in funds.items():
# # print("-----\n")
# # print(key)
# # print(value)
# # print("\n")
# # print(1/0)
#
#
#
# # for account in accounts:
# # # budget[account] = {}
# # for col in bdf:
# # if col in accounts:
# # continue
# # elif col in ["2019 Adopted Budget","2020 Adopted Budget","2021 Projected Budget"]:
# # continue
# # if not budget.get(col,''):
# # budget[col] = {"same":False,"Revenues":[],"Expenses":[]}
# # budget[col][account] = bdf[col].unique()
# # # if not budget[col][account].all():
# # # print(bdf[col].unique())
# # # print("Wump")
# # # else:
# # # print(col,account)
# # # print(bdf[col].unique())
# # if np.array_equal(budget[col]["Revenues"],budget[col]["Expenses"]):
# # budget[col]["same"] = True
# # else:
# # budget[col]["same"] = False
# # for column, value in budget.items():
# # print("-----\n")
# # print(column)
# # if value["same"]:
# # print(value["Revenues"])
# # else:
# # print("~Revenues")
# # print(value["Revenues"])
# # print("\n")
# # print("~Expenses")
# # print(value["Expenses"])
| [
"charlespack2019@u.northwestern.edu"
] | charlespack2019@u.northwestern.edu |
1e93379db7739fa2b85b0811535ccec15813f695 | 3014f65daf3a2473cba81dabfce9ed9d81af15ff | /core/models.py | aa1cb33e9a8f540c8f7a9d42b21b88069aff77a7 | [] | no_license | alxpoa/agenda | ab3cc2f449b06544f9d1f183f0c5a0856a8995e9 | 7fbd27e2ecee71ee3fd12910394e1a4a0568db95 | refs/heads/master | 2020-09-27T13:28:15.398827 | 2019-12-09T16:14:45 | 2019-12-09T16:14:45 | 226,528,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Evento(models.Model):
titulo = models.CharField(max_length=100)
descricao = models.TextField(blank=True, null=True)
data_evento = models.DateTimeField(verbose_name = "Data do Evento")
data_criacao = models.DateTimeField(auto_now=True, verbose_name="Data de Criação")
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
class Meta:
db_table = 'evento'
def __str__(self):
return self.titulo
def get_data_evento(self):
return self.data_evento.strftime('%d/%m/%Y %H:%M Hrs') | [
"alxpoa@gmail.com"
] | alxpoa@gmail.com |
0419ac9e3a8690b336dfd0c914d177fad34f610a | 79c6aa23011caa4ac8ddc5abf0da7ff6a189df9e | /user/migrations/0011_alter_post_date.py | 9d89d725ff8f018293e7a4068a6557816c2e4ae5 | [] | no_license | anthonyd21/anthonyd21.github.io | 76c8a0ef3fe79f5fdff84f6acae48a35f49da41d | ad4bdd362263655daa08e58ace3667772e179807 | refs/heads/main | 2023-05-08T00:26:00.588030 | 2021-06-02T15:39:09 | 2021-06-02T15:39:09 | 368,646,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Generated by Django 3.2.3 on 2021-06-02 04:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0010_alter_post_date'),
]
operations = [
migrations.AlterField(
model_name='post',
name='date',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"anthonyd21@parkschool.net"
] | anthonyd21@parkschool.net |
a5bc0e8b58908e461baf83e50543d1ce01967306 | e3cc8b7f7dae80eb94d42b810657b10a2be07228 | /zips/plugin.video.metalliq-forqed/resources/lib/meta/navigation/people.py | 114ca73bd85c76b3e5b6c5ae46d4af3a857059fd | [
"Apache-2.0"
] | permissive | southpaw99/repo | 169b356c4773bb16b9e75738a41ee0571d2c7c91 | 2482a17576a4a0c615ea68339d4c12f529485fac | refs/heads/master | 2020-12-15T10:03:05.175325 | 2019-06-29T22:15:15 | 2019-06-29T22:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,066 | py | import meta.navigation.movies
import meta.navigation.tvshows
from meta import plugin, import_tmdb
from trakt import trakt
from xbmcswift2 import xbmcgui
from meta.utils.text import to_utf8
from language import get_string as _
import_tmdb()
@plugin.route('/people/list/show/<id>/<source>/<fanart>')
def people_list_show_people(id, source, fanart):
items = []
try:
if source == "imdb":
people = trakt.get_show_people(id)
else:
xbmcgui.Dialog().ok("Error", "No cast info found")
return plugin.finish(items=[])
except:
xbmcgui.Dialog().ok("Error", "No cast info found")
return plugin.finish(items=[])
if "cast" in people:
for actor in people["cast"]:
context_menu = [
(
"Convert to bob_xml",
"RunPlugin({0})".format(
plugin.url_for("bob_convert_person_to_xml", trakt_id=actor["person"]["ids"]["trakt"]))
)
]
image = get_person_artwork(actor)
label = "{0} ({1})".format(to_utf8(actor["person"]["name"]), to_utf8(actor["character"]))
info = actor["person"]["biography"]
items.append({'label': label,
'path': plugin.url_for("people_list_person_select", id=actor["person"]["ids"]["trakt"],
name=to_utf8(actor["person"]["name"])),
'info': info,
'thumbnail': image,
'poster': image,
'context_menu': context_menu,
'icon': "DefaultVideo.png",
'properties': {'fanart_image': fanart},
})
return plugin.finish(items=items)
@plugin.route('/people/list/movie/<id>/<source>/<fanart>')
def people_list_movie_people(id, source, fanart):
items = []
try:
if source == "imdb":
people = trakt.get_movie_people(id)
elif source == "tmdb":
ids = trakt.find_trakt_ids("tmdb", id)
if ids:
people = trakt.get_movie_people(ids["imdb"])
else:
xbmcgui.Dialog().ok("Error", "No cast info found")
return plugin.finish(items=[])
else:
xbmcgui.Dialog().ok("Error", "No cast info found")
return plugin.finish(items=[])
except:
xbmcgui.Dialog().ok("Error", "No cast info found")
return plugin.finish(items=[])
if "cast" in people:
for actor in people["cast"]:
context_menu = [
(
"Convert to bob_xml",
"RunPlugin({0})".format(
plugin.url_for("bob_convert_person_to_xml", trakt_id=actor["person"]["ids"]["trakt"]))
)
]
image = get_person_artwork(actor)
label = "{0} ({1})".format(to_utf8(actor["person"]["name"]), to_utf8(actor["character"]))
info = actor["person"]["biography"]
items.append({'label': label,
'path': plugin.url_for("people_list_person_select", id=actor["person"]["ids"]["trakt"],
name=to_utf8(actor["person"]["name"])),
'info': info,
'thumbnail': image,
'poster': image,
'context_menu': context_menu,
'icon': "DefaultVideo.png",
'properties': {'fanart_image': fanart},
})
return plugin.finish(items=items)
else:
xbmcgui.Dialog().ok("Error", "No cast info found")
@plugin.route('/people/<id>/<name>/select')
def people_list_person_select(id, name):
selection = xbmcgui.Dialog().select("show {0}'s:".format(name), ["movies", "shows"])
if selection == 0:
people_list_person_movies(id)
elif selection == 1:
people_list_person_shows(id)
@plugin.route('/people/<id>/shows')
def people_list_person_shows(id):
shows = trakt.get_person_shows(id)
if shows["cast"]:
meta.navigation.tvshows.list_trakt_items(shows["cast"], 1, 1)
else:
xbmcgui.Dialog().ok("Error", "No shows found")
@plugin.route('/people/<id>/movies')
def people_list_person_movies(id):
movies = trakt.get_person_movies(id)
if movies["cast"]:
meta.navigation.movies.list_trakt_movies_plain(movies["cast"])
else:
xbmcgui.Dialog().ok("Error", "No movies found")
def get_person_artwork(item):
person_id = item['person']['ids']['trakt']
person_tmdb_id = item['person']['ids']['tmdb']
try:
person_images = tmdb.People(person_tmdb_id).images()['profiles']
return 'https://image.tmdb.org/t/p/w640' + person_images[0]['file_path']
except:
return 'https://github.com/metalmagic767/themes/raw/master/metalliq-forqed/default//unavailable_movieposter.png'
| [
""
] | |
478b4ad805ee0087c6d18ba496681501d17cbbd0 | f0d925b64af90d903971aeb23225d9a4e98ee77d | /registration/tests.py | 36fbe28bf3359b091c359bea691af25368f9ac4c | [] | no_license | joseduno/django-playground-web | 8d0fd7c8746eaf4ffcd83970f95340dd23234f2b | a2121ac5e0e1ac06490e08b07f9f305988969778 | refs/heads/master | 2022-12-22T07:36:58.654226 | 2020-10-04T20:00:05 | 2020-10-04T20:00:05 | 291,525,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | from django.test import TestCase
from .models import Profile
from django.contrib.auth.models import User
# Create your tests here.
class ProfileTestCase(TestCase):
def setUp(self): # debe siempre llamarse asi
User.objects.create_user('test', 'test@test.com', 'test1234')
def test_profile_exists(self): # el nombre de la funcion debe empezar siempre con test_
exists = Profile.objects.filter(user__username='test').exists()
self.assertEqual(exists, True)
"""Para ejecutar la prueba, python3 manage.py test registration"""
| [
"jose.duno@spymovil.com"
] | jose.duno@spymovil.com |
1c72a69c41c707bacbf963e7c9a6acc1973fdfc0 | badd02f87eeee1216df4c66447e947f0f1cbe328 | /FlaskWebProject2/views.py | de8be825114043a447a1f9057b62635220fc4f58 | [] | no_license | Ajithvajrala23/Website-Using-Flask-Framework | 7dafbeb9eba7d8ad6f49c15eb58ec0ed4fb713f2 | c1ed1edb6d379daf6ef4ba3b36d27b7418231a64 | refs/heads/master | 2022-07-14T13:56:56.002797 | 2019-07-04T10:26:59 | 2019-07-04T10:26:59 | 192,701,155 | 0 | 0 | null | 2022-06-21T22:12:10 | 2019-06-19T09:25:56 | JavaScript | UTF-8 | Python | false | false | 5,260 | py | """
Routes and views for the flask application.
"""
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from FlaskWebProject2 import app
import os
import requests
import operator
import re
#import nltk
from flask import Flask, render_template, request, send_file
from collections import Counter
#from bs4 import BeautifulSoup
#from textblob import TextBlob
import numpy as np
#from textblob.sentiments import NaiveBayesAnalyzer
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import matplotlib.pyplot as plt
#import base64
analyser = SentimentIntensityAnalyzer()
stops = [
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you',
'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his',
'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself',
'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which',
'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are',
'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having',
'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if',
'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for',
'with', 'about', 'against', 'between', 'into', 'through', 'during',
'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in',
'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then',
'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no',
'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's',
't', 'can', 'will', 'just', 'don', 'should', 'now', 'id', 'var',
'function', 'js', 'd', 'script', '\'script', 'fjs', 'document', 'r',
'b', 'g', 'e', '\'s', 'c', 'f', 'h', 'l', 'k'
]
def calculate_sentimet(comment):
score = analyser.polarity_scores(comment)
negative = score['neg']
positive = score['pos']
neutral = score['neu']
return positive,neutral, negative
@app.route('/')
@app.route('/home')
def home():
"""Renders the home page."""
return render_template(
'index.html',
title='Home Page',
year=datetime.now().year,
)
@app.route('/contact')
def contact():
"""Renders the contact page."""
return render_template(
'contact.html',
title='Contact',
year=datetime.now().year,
message='Details'
)
@app.route('/about')
def about():
"""Renders the about page."""
return render_template(
'about.html',
title='About Me',
year=datetime.now().year,
message='I am Libra'
)
@app.route('/projects')
def projects():
"""Renders the about page."""
return render_template(
'projects.html',
title='Projects',
year=datetime.now().year,
message='My Notable works are'
)
@app.route("/text")
def text():
return render_template('text.html')
@app.route("/process", methods =['POST'])
def process():
comment = request.form['comment']
positive, neutral, negative = calculate_sentimet(comment)
pie_labels = ['Positive' ,'Neutral', 'Negative']
pie_values = [positive*100, neutral*100, negative*100]
colors = ['green', 'orange', 'red']
return render_template('sentiment.html', comment = comment,
positive = positive, neutral = neutral,
negative= negative,
max=17000,
set=zip(pie_values, pie_labels, colors))
@app.route('/me', methods=['GET', 'POST'])
def me():
errors = []
results = {}
if request.method == "POST":
# get url that the person has entered
try:
url = request.form['url']
r = requests.get(url)
print(r)
except:
errors.append(
"Unable to get URL. Please make sure it's valid and try again."
)
return render_template('me.html', errors=errors)
if r:
# text processing
print(r)
raw = BeautifulSoup(r.text, 'html.parser').get_text()
#nltk.data.path.append('./nltk_data/') # set the path
tokens = nltk.word_tokenize(raw)
text = nltk.Text(tokens)
# remove punctuation, count raw words
nonPunct = re.compile('.*[A-Za-z].*')
raw_words = [w for w in text if nonPunct.match(w)]
raw_word_count = Counter(raw_words)
# stop words
no_stop_words = [w for w in raw_words if w.lower() not in stops]
no_stop_words_count = Counter(no_stop_words)
# save the results
results = sorted(
no_stop_words_count.items(),
key=operator.itemgetter(1),
reverse=True
)
print(results)
try:
result = Result(
url=url,
result_all=raw_word_count,
result_no_stop_words=no_stop_words_count
)
except:
errors.append("Unable to add item to database.")
return render_template('me.html', errors=errors, results=results)
| [
"ajith.vajrala@gmail.com"
] | ajith.vajrala@gmail.com |
bc35d37cce8170a1fc6e960d5ed877d19de0450d | 00377b7f3f704b26262a2bc8ed1e2661c3cc22ee | /Input_Output/1.py | edc6e4cb0d82bb938b21810d4b98f8cbff82f630 | [] | no_license | canshot/selflearning-Python-Ruby-Jaewan | 453d1a8a41d5746f5e6646616591bd7c6f0b335f | 6ea8a507620290a444794688360b1089d68b25d0 | refs/heads/master | 2021-09-04T02:35:43.705676 | 2018-01-14T19:26:49 | 2018-01-14T19:26:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | in_str = input("insert your PW")
print(in_str.upper()+" World!")
| [
"limjaewan@Lab448Print.local"
] | limjaewan@Lab448Print.local |
67433c53ab09e47fd8566c046cb7de38f32f1cfd | 326940c9e5ca002ec8c3400e45cd6e3cb4c2b98c | /Computational Methods for EE/Assignment 2 - Spline Interpolation/q1-q2/q1.py | 3966a43b671534fdf4680dfe6e199afb7f1a7e11 | [] | no_license | suraj93/IITM-Course-Codes | d33de57b7b8759a8f56d77e6f00d3110cba4c5c6 | ed0ca14cdff0341580122f0d4e1a196f1417e1e4 | refs/heads/master | 2016-09-06T14:05:05.470723 | 2014-12-02T05:24:22 | 2014-12-02T05:24:22 | 24,233,526 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | import numpy as np
from scipy.special import jn,jv
from matplotlib.pyplot import *
def func(x):
y=(x**(1+jv(0,x)))/(np.sqrt((1+100*(x**2))*(1-x)))
return y
def func_deriv(x):
if x==0:
return 0
num=np.power(x,1+jv(0,x))
dnum=num*(-1*jv(1,x)*np.log(x)+(1+jv(0,x))/x)
den=np.sqrt(1-x+100*(x**2)-100*(x**3))
dden=(-1+200*x-300*(x**2))/(2*den)
df=(den*dnum-dden*num)/((den**2))
return df
x=np.arange(0,0.901,0.05)
y=func(x)
dy=[func_deriv(xx) for xx in x]
print dy
plot(x,dy)
show() | [
"surajh.93@gmail.com"
] | surajh.93@gmail.com |
846029f797948ff4c428cce8a5922b17ffbbd67d | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2016_09_01/aio/_monitor_management_client.py | c050f4b4aa8fc88df3e7a1e1c02c2d1b67f42612 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 3,731 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import MonitorManagementClientConfiguration
from .operations import MetricsOperations
from .operations import ServiceDiagnosticSettingsOperations
from .. import models
class MonitorManagementClient(object):
"""Monitor Management Client.
:ivar metrics: MetricsOperations operations
:vartype metrics: $(python-base-namespace).v2016_09_01.aio.operations.MetricsOperations
:ivar service_diagnostic_settings: ServiceDiagnosticSettingsOperations operations
:vartype service_diagnostic_settings: $(python-base-namespace).v2016_09_01.aio.operations.ServiceDiagnosticSettingsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = MonitorManagementClientConfiguration(credential, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.metrics = MetricsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_diagnostic_settings = ServiceDiagnosticSettingsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
http_request.url = self._client.format_url(http_request.url)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "MonitorManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"noreply@github.com"
] | catchsrinivas.noreply@github.com |
817a7cf6a9e89ea4e451b5c8ec929b7fddd5aca4 | a2764e06558fb659c5a2d919cd4428a5e0905e16 | /env/lib/python2.7/site-packages/github3/notifications.py | 2dc3747c10093d227db0564d9d13e900cbacd8f3 | [] | no_license | jesicamarquez/spotify-api-project | 89cbc98a330dcf1a2624df01240427f9b467cbc6 | 075739441b93875450d664c078738686bae351e8 | refs/heads/master | 2021-01-20T12:12:27.727693 | 2015-03-04T02:23:24 | 2015-03-04T02:23:24 | 28,785,150 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,316 | py | # -*- coding: utf-8 -*-
"""
github3.notifications
=====================
This module contains the classes relating to notifications.
See also: http://developer.github.com/v3/activity/notifications/
"""
from __future__ import unicode_literals
from json import dumps
from github3.models import GitHubCore
class Thread(GitHubCore):
"""The :class:`Thread <Thread>` object wraps notification threads. This
contains information about the repository generating the notification, the
subject, and the reason.
Two thread instances can be checked like so::
t1 == t2
t1 != t2
And is equivalent to::
t1.id == t2.id
t1.id != t2.id
See also:
http://developer.github.com/v3/activity/notifications/#view-a-single-thread
"""
def __init__(self, notif, session=None):
super(Thread, self).__init__(notif, session)
self._api = notif.get('url')
#: Comment responsible for the notification
self.comment = notif.get('comment', {})
#: Thread information
self.thread = notif.get('thread', {})
from github3.repos import Repository
#: Repository the comment was made on
self.repository = Repository(notif.get('repository', {}), self)
#: When the thread was last updated
self.updated_at = self._strptime(notif.get('updated_at'))
#: Id of the thread
self.id = notif.get('id')
#: Dictionary of urls for the thread
self.urls = notif.get('urls')
#: datetime object representing the last time the user read the thread
self.last_read_at = self._strptime(notif.get('last_read_at'))
#: The reason you're receiving the notification
self.reason = notif.get('reason')
#: Subject of the Notification, e.g., which issue/pull/diff is this in
#: relation to. This is a dictionary
self.subject = notif.get('subject')
self.unread = notif.get('unread')
def _repr(self):
return '<Thread [{0}]>'.format(self.subject.get('title'))
def delete_subscription(self):
"""Delete subscription for this thread.
:returns: bool
"""
url = self._build_url('subscription', base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
def is_unread(self):
"""Tells you if the thread is unread or not."""
return self.unread
def mark(self):
"""Mark the thread as read.
:returns: bool
"""
return self._boolean(self._patch(self._api), 205, 404)
def set_subscription(self, subscribed, ignored):
"""Set the user's subscription for this thread
:param bool subscribed: (required), determines if notifications should
be received from this thread.
:param bool ignored: (required), determines if notifications should be
ignored from this thread.
:returns: :class:`Subscription <Subscription>`
"""
url = self._build_url('subscription', base_url=self._api)
sub = {'subscribed': subscribed, 'ignored': ignored}
json = self._json(self._put(url, data=dumps(sub)), 200)
return Subscription(json, self) if json else None
def subscription(self):
"""Checks the status of the user's subscription to this thread.
:returns: :class:`Subscription <Subscription>`
"""
url = self._build_url('subscription', base_url=self._api)
json = self._json(self._get(url), 200)
return Subscription(json, self) if json else None
class Subscription(GitHubCore):
"""The :class:`Subscription <Subscription>` object wraps thread and
repository subscription information.
See also:
http://developer.github.com/v3/activity/notifications/#get-a-thread-subscription
"""
def __init__(self, sub, session=None):
super(Subscription, self).__init__(sub, session)
self._api = sub.get('url')
#: reason user is subscribed to this thread/repository
self.reason = sub.get('reason')
#: datetime representation of when the subscription was created
self.created_at = self._strptime(sub.get('created_at'))
#: API url of the thread if it exists
self.thread_url = sub.get('thread_url')
#: API url of the repository if it exists
self.repository_url = sub.get('repository_url')
self.ignored = sub.get('ignored', False)
self.subscribed = sub.get('subscribed', False)
def _repr(self):
return '<Subscription [{0}]>'.format(self.subscribed)
def delete(self):
return self._boolean(self._delete(self._api), 204, 404)
def is_ignored(self):
return self.ignored
def is_subscribed(self):
return self.subscribed
def set(self, subscribed, ignored):
"""Set the user's subscription for this subscription
:param bool subscribed: (required), determines if notifications should
be received from this thread.
:param bool ignored: (required), determines if notifications should be
ignored from this thread.
"""
sub = {'subscribed': subscribed, 'ignored': ignored}
json = self._json(self._put(self._api, data=dumps(sub)), 200)
self.__init__(json, self._session)
| [
"jesica.v.marquez@gmail.com"
] | jesica.v.marquez@gmail.com |
c8451b00b14ff3eef4cf17e896855e497bf843a3 | 5f5d845e383c6ed603fcb105f09bbc5811b2124a | /teuthology/test/test_packaging.py | 04f91d8173ec7a0efbd06ed0025345b3ae9e76f1 | [
"MIT"
] | permissive | vasukulkarni/teuthology | 0a2bed271dfd549c6966c561f97478182b0b28ea | f3a4e5e155f20ac4c46cfb8b66cc7170672f1f87 | refs/heads/master | 2020-04-08T16:39:48.897351 | 2015-03-17T00:01:45 | 2015-03-17T00:01:45 | 32,357,309 | 0 | 0 | null | 2015-03-16T22:42:04 | 2015-03-16T22:42:04 | null | UTF-8 | Python | false | false | 5,034 | py | import pytest
from mock import patch, Mock
from teuthology import packaging
class TestPackaging(object):
@patch("teuthology.packaging.misc")
def test_get_package_name_deb(self, m_misc):
m_misc.get_system_type.return_value = "deb"
assert packaging.get_package_name('sqlite', Mock()) == "sqlite3"
@patch("teuthology.packaging.misc")
def test_get_package_name_rpm(self, m_misc):
m_misc.get_system_type.return_value = "rpm"
assert packaging.get_package_name('sqlite', Mock()) is None
@patch("teuthology.packaging.misc")
def test_get_package_name_not_found(self, m_misc):
m_misc.get_system_type.return_value = "rpm"
assert packaging.get_package_name('notthere', Mock()) is None
@patch("teuthology.packaging.misc")
def test_get_service_name_deb(self, m_misc):
m_misc.get_system_type.return_value = "deb"
assert packaging.get_service_name('httpd', Mock()) == 'apache2'
@patch("teuthology.packaging.misc")
def test_get_service_name_rpm(self, m_misc):
m_misc.get_system_type.return_value = "rpm"
assert packaging.get_service_name('httpd', Mock()) == 'httpd'
@patch("teuthology.packaging.misc")
def test_get_service_name_not_found(self, m_misc):
m_misc.get_system_type.return_value = "rpm"
assert packaging.get_service_name('notthere', Mock()) is None
@patch("teuthology.packaging.misc")
def test_install_package_deb(self, m_misc):
m_misc.get_system_type.return_value = "deb"
m_remote = Mock()
expected = [
'DEBIAN_FRONTEND=noninteractive',
'sudo',
'-E',
'apt-get',
'-y',
'install',
'apache2'
]
packaging.install_package('apache2', m_remote)
m_remote.run.assert_called_with(args=expected)
@patch("teuthology.packaging.misc")
def test_install_package_rpm(self, m_misc):
m_misc.get_system_type.return_value = "rpm"
m_remote = Mock()
expected = [
'sudo',
'yum',
'-y',
'install',
'httpd'
]
packaging.install_package('httpd', m_remote)
m_remote.run.assert_called_with(args=expected)
@patch("teuthology.packaging.misc")
def test_remove_package_deb(self, m_misc):
m_misc.get_system_type.return_value = "deb"
m_remote = Mock()
expected = [
'DEBIAN_FRONTEND=noninteractive',
'sudo',
'-E',
'apt-get',
'-y',
'purge',
'apache2'
]
packaging.remove_package('apache2', m_remote)
m_remote.run.assert_called_with(args=expected)
@patch("teuthology.packaging.misc")
def test_remove_package_rpm(self, m_misc):
m_misc.get_system_type.return_value = "rpm"
m_remote = Mock()
expected = [
'sudo',
'yum',
'-y',
'erase',
'httpd'
]
packaging.remove_package('httpd', m_remote)
m_remote.run.assert_called_with(args=expected)
def test_get_koji_package_name(self):
build_info = dict(version="3.10.0", release="123.20.1")
result = packaging.get_koji_package_name("kernel", build_info)
assert result == "kernel-3.10.0-123.20.1.x86_64.rpm"
@patch("teuthology.packaging.config")
def test_get_kojiroot_base_url(self, m_config):
m_config.kojiroot_url = "http://kojiroot.com"
build_info = dict(
package_name="kernel",
version="3.10.0",
release="123.20.1",
)
result = packaging.get_kojiroot_base_url(build_info)
expected = "http://kojiroot.com/kernel/3.10.0/123.20.1/x86_64/"
assert result == expected
@patch("teuthology.packaging.config")
def test_get_koji_build_info_success(self, m_config):
m_config.kojihub_url = "http://kojihub.com"
m_proc = Mock()
expected = dict(foo="bar")
m_proc.exitstatus = 0
m_proc.stdout.getvalue.return_value = str(expected)
m_remote = Mock()
m_remote.run.return_value = m_proc
result = packaging.get_koji_build_info(1, m_remote, dict())
assert result == expected
args, kwargs = m_remote.run.call_args
expected_args = [
'python', '-c',
'import koji; '
'hub = koji.ClientSession("http://kojihub.com"); '
'print hub.getBuild(1)',
]
assert expected_args == kwargs['args']
@patch("teuthology.packaging.config")
def test_get_koji_build_info_fail(self, m_config):
m_config.kojihub_url = "http://kojihub.com"
m_proc = Mock()
m_proc.exitstatus = 1
m_remote = Mock()
m_remote.run.return_value = m_proc
m_ctx = Mock()
m_ctx.summary = dict()
with pytest.raises(RuntimeError):
packaging.get_koji_build_info(1, m_remote, m_ctx)
| [
"aschoen@redhat.com"
] | aschoen@redhat.com |
cb915a83c326ed9358735e7e6a6123656ae20d18 | f00ae2cb4709539e8a78247678d9bb51913e0373 | /oacids/schedules/schedule.py | 76499b213fe0838b48b11e39aed9eecb971f06d3 | [
"MIT"
] | permissive | openaps/oacids | 576351d34d51c62492fc0ed8be5e786273f27aee | ed8d6414171f45ac0c33636b5b00013e462e89fb | refs/heads/master | 2021-01-10T06:03:53.395357 | 2016-03-21T04:02:47 | 2016-03-21T04:02:47 | 51,559,470 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 343 | py |
from openaps.configurable import Configurable
import recurrent
class Schedule (Configurable):
prefix = 'schedule'
required = [ 'phases', 'rrule' ]
url_template = "schedule://{name:s}/{rrule:s}"
@classmethod
def parse_rrule (Klass, rrule):
parser = recurrent.RecurringEvent( )
rule = parser.parse(rrule)
return rule
| [
"bewest@gmail.com"
] | bewest@gmail.com |
2f02d0e8afa68349157c88c3ed9678c3edd16833 | 23c9552b39b4b840e54fcc58155c219e5a8e202d | /modules/units.py | 31fd2c3e5e1884b6f152ae07117a15c3f795b0df | [
"EFL-2.0"
] | permissive | jfriedly/jenni | cd26dd5be76378a540f740cd3bb9a122ad6f12db | 41c42a7ba13eaf57915a81d6aa6cdd188cfd7f8a | refs/heads/master | 2021-01-18T08:52:16.931326 | 2013-06-07T22:56:26 | 2013-06-07T22:56:26 | 2,134,190 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | #!/usr/bin/env python
'''
units.py - jenni Units Module
Copyright 2013, Michael Yanovich (yanovich.net)
Licensed under the Eiffel Forum License 2.
More info:
* jenni: https://github.com/myano/jenni/
* Phenny: http://inamidst.com/phenny/
'''
import datetime as dt
import json
import re
import web
exchange_rates = dict()
last_check = dt.datetime.now()
exchanges = ['mtgox', 'btc24', 'bitfloor', 'vcx', 'btce', 'rock', 'bitme',
'ripple', 'lybit']
def btc_page():
try:
page = web.get('http://bitcoincharts.com/t/markets.json')
except Exception, e:
print time.time(), btc, e
return False, 'Failed to reach bitcoincharts.com'
return True, page
def ppnum(num):
return re.sub("(?!\..*)(\d)(?=(\d{3})+(?!\d))", r"\1,", "%.2f" % num)
def btc(jenni, input):
'''.btc -- display the current prices for Bitcoins'''
global exchange_rates
global last_check
now = dt.datetime.now()
print 'now: ', now
print 'last: ', last_check
if (not exchange_rates) or (now - last_check > dt.timedelta(minutes=15)):
#if now - last_check > 900:
status, page = btc_page()
if status:
json_page = json.loads(page)
else:
return jenni.reply(page)
## build internal state of exchange
for each in json_page:
if each['currency'] == 'USD':
if 'USD' not in exchange_rates:
exchange_rates['USD'] = dict()
exchange_rates['USD'][each['symbol'].replace('USD', '')] = each['close']
last_check = dt.datetime.now()
response = '1 BTC (in USD) = '
symbols = exchange_rates['USD'].keys()
symbols.sort()
for each in symbols:
if each.replace('USD', '') in exchanges:
response += '%s: %s | ' % (each, exchange_rates['USD'][each])
response += 'lolcat (mtgox) index: %s | ' % (ppnum(float(exchange_rates['USD']['mtgox']) * 160))
response += 'last updated at: ' + str(last_check)
jenni.reply(response)
btc.commands = ['btc']
btc.example = '.btc'
btc.rate = 5
if __name__ == '__main__':
print __doc__.strip()
| [
"michael@yanovich.net"
] | michael@yanovich.net |
101b641690e7cda59c300f207ef57d7b4d613baa | ac10ccaf44a7610d2230dbe223336cd64f8c0972 | /ms2ldaviz/basicviz/migrations/0033_auto_20160920_0859.py | b74d76b496f5d8f05e297caac658ce76fd904faf | [] | no_license | ymcdull/ms2ldaviz | db27d3f49f43928dcdd715f4a290ee3040d27b83 | bd5290496af44b3996c4118c6ac2385a5a459926 | refs/heads/master | 2020-05-21T03:04:29.939563 | 2017-03-14T11:44:42 | 2017-03-14T11:44:42 | 84,564,829 | 0 | 0 | null | 2017-03-10T13:54:23 | 2017-03-10T13:54:22 | null | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basicviz', '0032_auto_20160920_0857'),
]
operations = [
migrations.RemoveField(
model_name='alphacorroptions',
name='multifileexperiment',
),
migrations.DeleteModel(
name='AlphaCorrOptions',
),
]
| [
"="
] | = |
0be6afb5db488b9ad36be9deefede6211c4a3f37 | a02bd3d7ad77d0994a495da9870113591db13444 | /Part2/calc2.py | fe6dc22c6479f17cc025dfa8ec864419294ee75f | [] | no_license | devjunhong/simpleInterpreter | df7807f3460adb5898b7ce2b38c1ee6041e6eb42 | 8793711eaef02a9f29f201393d88c03f866d3512 | refs/heads/master | 2020-04-01T16:15:10.126792 | 2019-02-13T02:52:00 | 2019-02-13T02:52:00 | 153,372,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,616 | py | # Token types
# EOF (end-of-file) token is used to indicate that
# there is no more input left for lexical analysis
INTEGER, PLUS, MINUS, EOF = 'INTEGER', 'PLUS', 'MINUS', 'EOF'
class Token(object):
def __init__(self, type, value):
# token type: INTEGER, PLUS, MINUS, or EOF
self.type = type
# token value: non-negative integer value, '+', '-', or None
self.value = value
def __str__(self):
"""String representation of the class instance.
Examples:
Token(INTEGER, 3)
Token(PLUS '+')
"""
return 'Token({type}, {value})'.format(
type=self.type,
value=repr(self.value)
)
def __repr__(self):
return self.__str__()
class Interpreter(object):
def __init__(self, text):
# client string input, e.g. "3 + 5", "12 - 5", etc
self.text = text
# self.pos is an index into self.text
self.pos = 0
# current token instance
self.current_token = None
self.current_char = self.text[self.pos]
def error(self):
raise Exception('Error parsing input')
def advance(self):
"""Advance the 'pos' pointer and set the 'current_char' variable."""
self.pos += 1
if self.pos > len(self.text) - 1:
self.current_char = None # Indicates end of input
else:
self.current_char = self.text[self.pos]
def skip_whitespace(self):
while self.current_char is not None and self.current_char.isspace():
self.advance()
def integer(self):
"""Return a (multidigit) integer consumed from the input."""
result = ''
while self.current_char is not None and self.current_char.isdigit():
result += self.current_char
self.advance()
return int(result)
def get_next_token(self):
"""Lexical analyzer (also known as scanner or tokenizer)
This method is responsible for breaking a sentence
apart into tokens.
"""
while self.current_char is not None:
if self.current_char.isspace():
self.skip_whitespace()
continue
if self.current_char.isdigit():
return Token(INTEGER, self.integer())
if self.current_char == '+':
self.advance()
return Token(PLUS, '+')
if self.current_char == '-':
self.advance()
return Token(MINUS, '-')
self.error()
return Token(EOF, None)
def eat(self, token_type):
# compare the current token type with the passed token
# type and if they match then "eat" the current token
# and assign the next token to the self.current_token,
# otherwise raise an exception.
if self.current_token.type == token_type:
self.current_token = self.get_next_token()
else:
self.error()
def expr(self):
"""Parser / Interpreter
expr -> INTEGER PLUS INTEGER
expr -> INTEGER MINUS INTEGER
"""
# set current token to the first token taken from the input
self.current_token = self.get_next_token()
# we expect the current token to be an integer
left = self.current_token
self.eat(INTEGER)
# we expect the current token to be either a '+' or '-'
op = self.current_token
if op.type == PLUS:
self.eat(PLUS)
else:
self.eat(MINUS)
# we expect the current token to be an integer
right = self.current_token
self.eat(INTEGER)
# after the above call the self.current_token is set to
# EOF token
# at this point either the INTEGER PLUS INTEGER or
# the INTEGER MINUS INTEGER sequence of tokens
# has been successfully found and the method can just
# return the result of adding or subtracting two integers,
# thus effectively interpreting client input
if op.type == PLUS:
result = left.value + right.value
else:
result = left.value - right.value
return result
def main():
while True:
try:
# To run under Python3 replace 'raw_input' call
# with 'input'
text = raw_input('calc> ')
except EOFError:
break
if not text:
continue
interpreter = Interpreter(text)
result = interpreter.expr()
print(result)
if __name__ == '__main__':
main() | [
"junhong.kim@milliman.com"
] | junhong.kim@milliman.com |
558a06e956fb8762b99ef627a77d6403aff05be4 | 9b0195fd5ffd407f6e5edf4955058299a361ca25 | /leetcode-python/huoshui/wechat_security/problem.py | 06451c59ced16a246d7992e7dfd2d0e85e9ce1c0 | [] | no_license | moqiguzhu/Online-Judge | b232dbbca8e513eb14620259c44b15b60b54e005 | 3ca0c407ffff775167b031d00fc7376c25d69e48 | refs/heads/master | 2022-01-27T05:42:51.813437 | 2022-01-18T02:17:04 | 2022-01-18T02:17:04 | 42,443,508 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | from collections import defaultdict, deque, Counter
from typing import List
import math
import copy
import random
import numpy as np
import bisect
import inspect
import unittest
def problem1(file_path):
wxid_cnt = defaultdict(lambda x: 0)
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
wxid = line.strip()
wxid_cnt[wxid] += 1
# 边界
if len(wxid_cnt) == 0:
print('文件为空')
return ''
c = Counter(wxid_cnt)
mostapp_wxid, freq = c.most_common(1)
return mostapp_wxid
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def problem2(root):
pass
| [
"moqiguzhu@163.com"
] | moqiguzhu@163.com |
e34b085b9bbc988d0ccd664acc4c3a15e17ae509 | 9eb9a74b323aa5e55c344cef9bea3df26c8901fc | /euler4.py | 3f27e5420724e669f8854a4950e36523c43988df | [] | no_license | BCarley/Euler | 6e60df448a0508fa55b0c16e55c87763ddd5e236 | a0029ab348c4388e824cf2e1d374b4a902b915e4 | refs/heads/master | 2016-09-05T15:56:12.097080 | 2015-09-01T08:36:44 | 2015-09-01T09:26:28 | 38,889,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,106 | py | """euler question 4
Trying to find the largest palindromic product of 2 three digit numbers.
attempting to use the quadratic sieve method for this"""
import math
import numpy as np
def is_palindromic(num):
"""tests a number to see is palindromic, returns Bool"""
if str(num)[::-1] == str(num):
return True
else:
return False
def generate_primes(end=10000000):
"""generator function that yeilds primes that are, by default
less than 10000000
10000000 is about the limit the sieve will work to until we start running out of memory"""
np1 = end + 1
s = range(end + 1)
s[1] = 0
for i in xrange(2, np1):
if s[i]:
try:
s[i*i: np1: i] = [0] * len(range(i*i, np1, i))
yield i
except MemoryError, e :
raise "Run out of memory, range is too long for c long!! \n Exception: %s" % e
def get_quadratic_residues(num, no_primes):
"""returns a list of the first num primes"""
factor_base = []
for prime in generate_primes():
if (num ** ((prime - 1)/2)) % prime == 1:
factor_base.append(prime)
if len(factor_base) > no_primes:
break
return factor_base
def sieve_values(num, values, factor_base):
"""performs a sieve on the values to return a list of numbers,
numbers that are returned as 1 are smooth numbers"""
for prime in factor_base:
cnt = 0
for (index, i) in enumerate(values):
if ((index + int(math.ceil(num ** 0.5))) ** 2 - num) % prime == 0:
cnt += 1
#print "divided by %i" % (prime), values
values[index::prime] = [int(value/prime) for value in values[index::prime]]
#print "Divided by %i at index %i, count is %i:" % (prime, index, cnt), values, "\n"
if prime == 2 or cnt == 2:
break
return values
def construct_matrix(num, values, factor_base):
"""returns a dictionary of factor vectors"""
smooth_x = []
for (index, value) in enumerate(values):
if value == 1:
smooth_x.append(index)
smooth_y = [((x + math.ceil(num ** 0.5))**2 - num) for x in smooth_x]
matrish = []
for y in smooth_y:
matrish.append([div_into(y, prime) % 2 for prime in factor_base])
m = matrish
return m
def div_into(x, y):
""" """
cnt = 0
while True:
if x % y == 0:
x /= y
cnt += 1
else:
break
return cnt
def factorise(num):
"""perform a quadratic sieve to find the largest factors of num"""
values = [(i + math.ceil(num ** 0.5)) ** 2 - num for i in xrange(-100, 100)]
factor_base = get_quadratic_residues(num, no_primes=100)
print "Factor Base:", factor_base
sieved_values = sieve_values(num, values, factor_base)
return construct_matrix(num, sieved_values, factor_base)
x = factorise(977779)
print "Sieved Values:", x
| [
"ben.carley@bipb.com"
] | ben.carley@bipb.com |
19f9791df898df635f27bb0f7a40bc6647087b8f | 771d0def5238a2b311639853d784841ccd57ce4e | /BTRNN-release/BT_RNN/BTRNN.py | 5e5230ffc5a5a06f0fa51622b6dbecc39b5d1e03 | [] | no_license | TripleEss/Block-term-Tensor-Neural-Networks | 4869c7d786425803b29698615000ccbd686e7664 | 9c0f4a64b4a3eb76a56f272feccbcca7469faa23 | refs/heads/master | 2023-01-11T10:28:39.117990 | 2020-02-19T10:11:17 | 2020-02-19T10:11:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,597 | py | # Created by ay27 at 2017/10/6
from keras.layers.recurrent import Recurrent
from keras.engine import InputSpec
from keras import activations
from keras import initializers
from keras import regularizers
from keras import constraints
from .BT_mul_Keras import *
class BT_RNN(Recurrent):
def __init__(self,
bt_input_shape, bt_output_shape, core_ranks, block_ranks,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
debug=False,
init_seed=11111986,
**kwargs):
super(BT_RNN, self).__init__(**kwargs)
self.bt_input_shape = np.array(bt_input_shape)
self.bt_output_shape = np.array(bt_output_shape)
self.core_ranks = np.array(core_ranks)
self.block_ranks = int(block_ranks)
self.debug = debug
self.units = np.prod(self.bt_output_shape)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = InputSpec(shape=(None, self.units))
self.states = None
self.kernel = None
self.recurrent_kernel = None
self.cores = [None]
self.factors = [None]
self.bias = None
self.debug = debug
self.init_seed = init_seed
self.input_dim = np.prod(self.bt_input_shape)
self.params_original = np.prod(self.bt_input_shape) * np.prod(self.bt_output_shape)
self.params_bt = self.block_ranks * \
(np.sum(self.bt_input_shape * self.bt_output_shape * self.core_ranks) + np.prod(
self.core_ranks))
self.batch_size = None
# reported compress ratio in input->hidden weight
self.compress_ratio = self.params_original / self.params_bt
if self.debug:
print('bt_input_shape = ' + str(self.bt_input_shape))
print('bt_output_shape = ' + str(self.bt_output_shape))
print('core_ranks = ' + str(self.core_ranks))
print('block_ranks = ' + str(self.block_ranks))
print('compress_ratio = ' + str(self.compress_ratio))
assert len(self.core_ranks.shape) == len(self.bt_input_shape.shape) == len(self.bt_output_shape.shape)
def build(self, input_shape):
# input shape: `(batch, time (padded with zeros), input_dim)`
# input_shape is a tuple
if isinstance(input_shape, list):
input_shape = input_shape[0]
assert len(input_shape) == 3
assert input_shape[2] == self.input_dim
self.batch_size = input_shape[0] if self.stateful else None
self.input_spec[0] = InputSpec(shape=(self.batch_size, None, self.input_dim))
self.states = [None]
if self.stateful:
self.reset_states()
################################################################################################################
# input -> hidden state
# the kernel layout is : [[core, factor0, factor1, factor2, ...],
# [core, factor0, factor1, factor2, ...],
# ...]
self.kernel = self.add_weight((self.params_bt,),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.cores, self.factors = split_kernel_into_core_and_factors(self.kernel,
self.bt_input_shape, self.bt_output_shape,
self.core_ranks, self.block_ranks)
################################################################################################################
# hidden -> hidden
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight((self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def preprocess_input(self, inputs, training=None):
# input shape: `(batch, time (padded with zeros), input_dim)`
return inputs
def step(self, inputs, states):
# inputs shape: [batch, input_dim]
if 0. < self.dropout < 1.:
inputs = inputs * states[1]
################################################################################################################
# NOTE: we now just substitute the `W_{xh}`
if len(self.core_ranks) == 2:
h = BT_mul2(inputs, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 3:
h = BT_mul3(inputs, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 4:
h = BT_mul4(inputs, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 5:
h = BT_mul5(inputs, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
else:
h = None
raise ValueError('error in len(core_ranks)')
if self.bias is not None:
h = K.bias_add(h, self.bias)
################################################################################################################
prev_output = states[0]
if 0. < self.recurrent_dropout < 1.:
prev_output *= states[2]
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
# Properly set learning phase on output tensor.
if 0. < self.dropout + self.recurrent_dropout:
output._uses_learning_phase = True
return output, [output]
def get_constants(self, inputs, training=None):
# this is totally same as the Keras API
constants = []
if self.implementation != 0 and 0. < self.dropout < 1.:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = K.in_train_phase(dropped_inputs,
ones,
training=training)
constants.append(dp_mask)
else:
constants.append(K.cast_to_floatx(1.))
if 0. < self.recurrent_dropout < 1.:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = K.in_train_phase(dropped_inputs,
ones,
training=training)
constants.append(rec_dp_mask)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(BT_RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class BT_GRU(Recurrent):
def __init__(self,
bt_input_shape, bt_output_shape, core_ranks, block_ranks,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
debug=False,
init_seed=11111986,
**kwargs):
super(BT_GRU, self).__init__(**kwargs)
self.bt_input_shape = np.array(bt_input_shape)
self.bt_output_shape = np.array(bt_output_shape)
self.core_ranks = np.array(core_ranks)
self.block_ranks = int(block_ranks)
self.debug = debug
self.units = np.prod(self.bt_output_shape)
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = InputSpec(shape=(None, self.units))
self.states = None
self.kernel = None
self.recurrent_kernel = None
self.cores = [None]
self.factors = [None]
self.bias = None
self.debug = debug
self.init_seed = init_seed
# store r, z, h
self.bt_output_shape[0] *= 3
self.input_dim = np.prod(self.bt_input_shape)
self.params_original = np.prod(self.bt_input_shape) * np.prod(self.bt_output_shape)
self.params_bt = self.block_ranks * \
(np.sum(self.bt_input_shape * self.bt_output_shape * self.core_ranks) + np.prod(
self.core_ranks))
self.batch_size = None
# reported compress ratio in input->hidden weight
self.compress_ratio = self.params_original / self.params_bt
if self.debug:
print('bt_input_shape = ' + str(self.bt_input_shape))
print('bt_output_shape = ' + str(self.bt_output_shape))
print('core_ranks = ' + str(self.core_ranks))
print('block_ranks = ' + str(self.block_ranks))
print('compress_ratio = ' + str(self.compress_ratio))
assert len(self.core_ranks.shape) == len(self.bt_input_shape.shape) == len(self.bt_output_shape.shape)
def build(self, input_shape):
# input shape: `(batch, time (padded with zeros), input_dim)`
# input_shape is a tuple
if isinstance(input_shape, list):
input_shape = input_shape[0]
assert len(input_shape) == 3
assert input_shape[2] == self.input_dim
self.batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec[0] = InputSpec(shape=(self.batch_size, None, self.input_dim))
self.states = [None]
if self.stateful:
self.reset_states()
################################################################################################################
# input -> hidden state
self.kernel = self.add_weight((self.params_bt,),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.cores, self.factors = split_kernel_into_core_and_factors(self.kernel,
self.bt_input_shape, self.bt_output_shape,
self.core_ranks, self.block_ranks)
################################################################################################################
# hidden -> hidden
# store r, z, h
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight((np.prod(self.bt_output_shape),),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def preprocess_input(self, x, training=None):
return x
def get_constants(self, inputs, training=None):
# this is totally same as the Keras API
constants = [[K.cast_to_floatx(1.) for _ in range(3)]]
if 0. < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(3)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def step(self, x, states):
h_tm1 = states[0] # previous memory
dp_mask = states[1] # dropout matrices for recurrent units
rec_dp_mask = states[2]
x1 = x * dp_mask[0]
################################################################################################################
# NOTE: we now just substitute the `W_{xh}`
if len(self.core_ranks) == 2:
matrix_x = BT_mul2(x1, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 3:
matrix_x = BT_mul3(x1, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 4:
matrix_x = BT_mul4(x1, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 5:
matrix_x = BT_mul5(x1, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
else:
matrix_x = None
raise ValueError('error in len(core_ranks)')
# following is same as Keras API
if self.use_bias:
matrix_x = K.bias_add(matrix_x, self.bias)
matrix_inner = K.dot(h_tm1 * rec_dp_mask[0],
self.recurrent_kernel[:, :2 * self.units])
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units: 2 * self.units]
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units: 2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
x_h = matrix_x[:, 2 * self.units:]
recurrent_h = K.dot(r * h_tm1 * rec_dp_mask[0],
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
h = z * h_tm1 + (1 - z) * hh
if 0. < self.dropout + self.recurrent_dropout:
h._uses_learning_phase = True
return h, [h]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(BT_GRU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class BT_LSTM(Recurrent):
def __init__(self,
bt_input_shape, bt_output_shape, core_ranks, block_ranks,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
debug=False,
init_seed=11111986,
**kwargs):
super(BT_LSTM, self).__init__(**kwargs)
self.bt_input_shape = np.array(bt_input_shape)
self.bt_output_shape = np.array(bt_output_shape)
self.core_ranks = np.array(core_ranks)
self.block_ranks = int(block_ranks)
self.debug = debug
self.units = np.prod(self.bt_output_shape)
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = [InputSpec(shape=(None, self.units)),
InputSpec(shape=(None, self.units))]
self.states = None
self.kernel = None
self.recurrent_kernel = None
self.cores = [None]
self.factors = [None]
self.bias = None
self.debug = debug
self.init_seed = init_seed
# store i, f, c, o
if not self.go_backwards:
self.bt_output_shape[0] *= 4
else:
self.units = int(self.units / 4)
self.input_dim = np.prod(self.bt_input_shape)
self.params_original = np.prod(self.bt_input_shape) * np.prod(self.bt_output_shape)
self.params_bt = self.block_ranks * \
(np.sum(self.bt_input_shape * self.bt_output_shape * self.core_ranks) + np.prod(
self.core_ranks))
self.batch_size = None
# reported compress ratio in input->hidden weight
self.compress_ratio = self.params_original / self.params_bt
if self.debug:
print('bt_input_shape = ' + str(self.bt_input_shape))
print('bt_output_shape = ' + str(self.bt_output_shape))
print('core_ranks = ' + str(self.core_ranks))
print('block_ranks = ' + str(self.block_ranks))
print('compress_ratio = ' + str(self.compress_ratio))
assert len(self.core_ranks.shape) == len(self.bt_input_shape.shape) == len(self.bt_output_shape.shape)
def build(self, input_shape):
print('BT-LSTM input shape = ' + str(input_shape))
if isinstance(input_shape, list):
input_shape = input_shape[0]
self.batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec[0] = InputSpec(shape=(self.batch_size, None, self.input_dim))
self.states = [None, None]
if self.stateful:
self.reset_states()
################################################################################################################
# input -> hidden state
self.kernel = self.add_weight((self.params_bt,),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.cores, self.factors = split_kernel_into_core_and_factors(self.kernel,
self.bt_input_shape, self.bt_output_shape,
self.core_ranks, self.block_ranks)
################################################################################################################
# hidden -> hidden
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(shape, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def preprocess_input(self, x, training=None):
return x
def get_constants(self, inputs, training=None):
# this is totally same as the Keras API
constants = []
if self.implementation != 0 and 0. < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(4)]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0. < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(4)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def step(self, inputs, states):
h_tm1 = states[0]
c_tm1 = states[1]
dp_mask = states[2]
rec_dp_mask = states[3]
if len(self.core_ranks) == 2:
z = BT_mul2(inputs * dp_mask[0],
self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 3:
z = BT_mul3(inputs * dp_mask[0],
self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 4:
z = BT_mul4(inputs * dp_mask[0],
self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 5:
z = BT_mul5(inputs * dp_mask[0],
self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
else:
raise ValueError('error in len(core_ranks)')
z += K.dot(h_tm1 * rec_dp_mask[0], self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units: 2 * self.units]
z2 = z[:, 2 * self.units: 3 * self.units]
z3 = z[:, 3 * self.units:]
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
h = o * self.activation(c)
if 0. < self.dropout + self.recurrent_dropout:
h._uses_learning_phase = True
return h, [h, c]
def get_config(self):
config = {'bt_input_shape': self.bt_input_shape,
'bt_output_shape': self.bt_output_shape,
'core_ranks': self.core_ranks,
'block_ranks': self.block_ranks,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(BT_LSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"chendi1995@sohu.com"
] | chendi1995@sohu.com |
f07fe830ae79276ded6e7b048e9d60d425affc20 | dff51e4a3bbcc464c0069a16f1394d36c31e2372 | /omaha_server/omaha/migrations/0021_auto_20150917_1028.py | aafbc492fed9e17886ba9b145e2bda1949d378ef | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | tuladhar/omaha-server | 3264de057221322038c7af704ea268c9e267d7da | 6cfd86e4319e03af0eb319fae6c867691ffc2c36 | refs/heads/master | 2022-11-21T19:38:50.335963 | 2020-06-09T14:14:03 | 2020-06-09T14:14:03 | 281,736,223 | 1 | 0 | NOASSERTION | 2020-07-22T17:02:48 | 2020-07-22T17:02:47 | null | UTF-8 | Python | false | false | 458 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import omaha.models
class Migration(migrations.Migration):
dependencies = [
('omaha', '0020_auto_20150710_0913'),
]
operations = [
migrations.AlterField(
model_name='version',
name='file',
field=models.FileField(null=True, upload_to=omaha.models._version_upload_to),
),
]
| [
"amekin@crystalnix.com"
] | amekin@crystalnix.com |
d34f105c69e1b5bc0a5ad34388dba471f066c4b5 | 4734fd79ebc8c10b6bec3d2e1995bc2534799f2e | /school_attendance/models/__init__.py | 31613f97af3bd621fb997d7d91cff7ec16320881 | [] | no_license | tringuyenhashmicro/Boston | 46da227957c996e674b9d56097f7967a77cfb274 | 8697a373da479e4f5b25681c0d551affdc83194a | refs/heads/master | 2021-04-12T09:57:42.480082 | 2017-09-13T03:19:21 | 2017-09-13T03:19:21 | 94,525,338 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | # -*- coding: utf-8 -*-
import attendance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
"huutringuen88@gmail.com"
] | huutringuen88@gmail.com |
77396e5d3c927e5543de2e57216c4e90e4e2a686 | 6c47debab1dda6ca5c10ff450785152d5a5551c0 | /CSV_Data_Process.py | 3425804ad7df230217bcea2e7f138572e61ea1ee | [] | no_license | bishalkunwar/Data_Science_Python | 339402d11ba7b0e1056efc2c244ac33972c9a61a | 1c2ab694f76c8ec56f985bf8309901a93234f136 | refs/heads/main | 2023-03-29T23:15:38.451774 | 2021-04-05T18:48:16 | 2021-04-05T18:48:16 | 354,936,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | import pandas as pd
data = pd.read_csv('try_data.csv')
#for json data pd.read_json and extension of data format is .json
# for excel data pd.read_excel and the extension of data format is .xlsx
print(data)
print()
print (data.loc[[1,2,5],['name','salary']])
#Outputs Serially
''' name department salary remarks
0 Bishal It 10000 G:N
1 Ram Manu 12000 G:G
2 Shyam Serv 13000 G:G
3 Hari food 14000 G:G
4 Gita Pantry 15000 B:G
5 Sita no 16000 B:B
name salary
1 Ram 12000
2 Shyam 13000
5 Sita 16000''' | [
"noreply@github.com"
] | bishalkunwar.noreply@github.com |
34d40f192d76b051fd22025ecf2e80bfce435750 | 68b76ff5dea447653d0988eb551ca2ee0e1bcd58 | /omniROS_ws/src/wcomputation/src/wcomputation_node.py | d3f5c73f860a56a8ea8594e62698e3ad432e8dfd | [
"MIT"
] | permissive | RobinBaruffa/omniROS | b671fce445824f4295daa58135c02376f37ecc88 | b7393f1b0f0dea447b6daa509169acf3fd56260b | refs/heads/master | 2021-06-19T01:17:54.138655 | 2021-02-09T13:38:39 | 2021-02-09T13:38:39 | 177,563,483 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | #!/usr/bin/env python
import rospy
from math import *
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import Point
from geometry_msgs.msg import Twist
def compute(msg):
result = Vector3()
r= 0.029
R=0.125
sinpi3 = 0.86602
result.x = (-msg.linear.x + R * msg.angular.z) / r;
result.y = (-sinpi3 * msg.linear.y + 0.5 * (msg.linear.x) + R *msg.angular.z) / r;
result.z = (sinpi3 * msg.linear.y + 0.5 * (msg.linear.x) + R * msg.angular.z) / r;
'''
result.x = (msg.linear.y +R *msg.angular.z) / r;
result.y = (-sinpi3 * msg.linear.x - 0.5 * msg.linear.y + R * msg.angular.z) / r;
result.z = (sinpi3 * msg.linear.x - 0.5 * msg.linear.y + R * msg.angular.z) / r;
'''
pub.publish(result)
rospy.loginfo(result)
return 0
def main():
rospy.init_node('wcomputation_node')
global pub
pub= rospy.Publisher('/omniROS/vel_w', Vector3, queue_size=10)
rospy.Subscriber("/omniROS/cmd_vel",Twist ,compute)
rospy.spin()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException:
pass
| [
"robin.baruffa@gmail.com"
] | robin.baruffa@gmail.com |
f1b53459b83dcd24505c18cb42c5cb2963030d4d | e8532a8d86d98eeb32ab15b0410ec15d904ea39f | /profiles/settings.py | 3f32d625bb4d03babd929836ab0343e429b02cb0 | [
"MIT"
] | permissive | yans0o0o0/profiles-api | 87ae4261865a3de25a85f95cd7d3d69cba850912 | 07a11c1c37a87e22b9684d43c7f10e40626848f4 | refs/heads/master | 2022-12-19T03:24:03.714899 | 2020-09-28T21:52:27 | 2020-09-28T21:52:27 | 298,104,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,480 | py | """
Django settings for profiles project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i15az6n0b%2(@&klir&qy@upz--3h%qx_#_80js(pdfwijn)@_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG',1)))
ALLOWED_HOSTS = ['ec2-3-19-229-167.us-east-2.compute.amazonaws.com','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third parts apps
'rest_framework',
'rest_framework.authtoken',
# Our Apps
'profile_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static/'
# By default Django uses "User" as the User Model
# We gonna modify this to put as the user model our UserProfile
AUTH_USER_MODEL='profile_api.UserProfile'
| [
"yansiverio@gmail.com"
] | yansiverio@gmail.com |
9b53ae79a71c0ca8f4401dd807a2369dca415e84 | 90736089f21562da1fb189aa80e6ba1012682aa5 | /gs-lab-manual/game-asteroids/asteroids.py | f0c3a8dd4a7eda803c89623d67a4ac3a484ebd4c | [] | no_license | CapaFenLisesi/Physics-For-Video-Games | e9e1a1d924f867e9bee05fae0d4557fc2abe97ad | 8ca7dda24977407055eba7d29d9da5970432ff77 | refs/heads/master | 2021-01-22T15:16:13.635568 | 2016-06-11T02:24:52 | 2016-06-11T02:24:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,309 | py | from visual import *
import random
#converts an angle in degrees to an angle in radians
def rad(degrees):
radians=degrees*pi/180
return radians
#pause and wait for mouse or keyboard event, then continue
def pause():
while True:
rate(50)
if scene.mouse.events:
m = scene.mouse.getevent()
if m.click == 'left': return
elif scene.kb.keys:
k = scene.kb.getkey()
return
#checks for a collision between two spheres
def collisionSpheres(sphere1, sphere2):
dist=mag(sphere1.pos-sphere2.pos)
if(dist<sphere1.radius+sphere2.radius):
return True
else:
return False
#checks for a collision between a cone and a sphere
def collisionConeSphere(c, s):
#result is the variable that we will return
#default is False
result=False
#check pos of cone
if(collisionSphereAndPoint(s,c.pos)):
result=True
#check tip of cone
result=False
tip=c.pos+c.axis
if(collisionSphereAndPoint(s,tip)):
result=True
#check edge of radius in x-y plane 1
r1=c.radius*cross(vector(0,0,1),norm(c.axis))
if(collisionSphereAndPoint(s,r1+c.pos)):
result=True
#check edge of radius in x-y plane 2
r2=-c.radius*cross(vector(0,0,1),norm(c.axis))
if(collisionSphereAndPoint(s,r2+c.pos)):
result=True
#return result
return result
#determines whether a point is within a sphere or not
#returns boolean
def collisionSphereAndPoint(sphereObj, targetVector):
dist=mag(sphereObj.pos-targetVector)
if(dist<sphereObj.radius):
return True
else:
return False
#creates four asteroids, one on each side of the scene
def createAsteroids():
#asteroid comes from the right
asteroid=sphere(pos=vector(20,0,0), radius=1, color=color.cyan)
asteroid.pos.y=random.randrange(-20,20,5)
asteroid.m=1
asteroid.v=vector(0,0,0)
asteroid.v.x=-random.randint(1,5)
asteroid.v.y=random.choice((1,-1))*random.randint(1,5)
asteroidList.append(asteroid)
#asteroid comes from the left
asteroid=sphere(pos=vector(-20,0,0), radius=1, color=color.cyan)
asteroid.pos.y=random.randrange(-20,20,5)
asteroid.m=1
asteroid.v=vector(0,0,0)
asteroid.v.x=random.randint(1,5)
asteroid.v.y=random.choice((1,-1))*random.randint(1,5)
asteroidList.append(asteroid)
#asteroid comes from the top
asteroid=sphere(pos=vector(0,20,0), radius=1, color=color.cyan)
asteroid.pos.x=random.randrange(-20,20,5)
asteroid.m=1
asteroid.v=vector(0,0,0)
asteroid.v.x=random.choice((1,-1))*random.randint(1,5)
asteroid.v.y=-random.randint(1,5)
asteroidList.append(asteroid)
#asteroid comes from the bottom
asteroid=sphere(pos=vector(0,-20,0), radius=1, color=color.cyan)
asteroid.pos.x=random.randrange(-20,20,5)
asteroid.m=1
asteroid.v=vector(0,0,0)
asteroid.v.x=random.choice((1,-1))*random.randint(1,5)
asteroid.v.y=random.randint(1,5)
asteroidList.append(asteroid)
def createFragments(asteroid):
fragment1=sphere(pos=asteroid.pos, radius=0.5, color=color.magenta)
fragment2=sphere(pos=asteroid.pos, radius=0.5, color=color.magenta)
fragment1.m=0.5
fragment2.m=0.5
fragment1.v=vector(0,0,0)
fragment1.v.x=random.choice((1,-1))*random.randint(1,5)
fragment1.v.y=random.choice((1,-1))*random.randint(1,5)
fragment2.v=2*asteroid.v-fragment1.v
fragmentList.append(fragment1)
fragmentList.append(fragment2)
#scene size
scene.range=20
scene.width=700
scene.height=700
#create the spaceship as a cone
spaceship = cone(pos=(0,0,0), axis=(2,0,0), radius=1, color=color.white)
fire = cone(pos=(0,0,0), axis=-spaceship.axis/2, radius=spaceship.radius/2, color=color.orange)
#initial values for mass, velocity, thrust, and net force
spaceship.m=1
spaceship.v=vector(0,0,0)
thrust=0
Fnet=vector(0,0,0)
#bullets
bulletspeed=10
bulletsList=[]
#angle to rotate
dtheta=rad(10)
#clock
t=0
dt=0.005
#asteroids
Nleft=0 #counter for number of asteroids left in the scene
asteroidList=[]
createAsteroids()
#fragments
fragmentList=[]
while spaceship.visible==1:
rate(200)
if scene.kb.keys:
k = scene.kb.getkey()
if k == "up": #turn thruster on
thrust=6
elif k=="left": #rotate left
spaceship.rotate(angle=-dtheta, axis=(0,0,-1));
elif k=="right": #rotate right
spaceship.rotate(angle=dtheta, axis=(0,0,-1));
elif k==" ": #fire a bullet
bullet=sphere(pos=spaceship.pos+spaceship.axis, radius=0.1, color=color.yellow)
bullet.v=bulletspeed*norm(spaceship.axis)+spaceship.v
bulletsList.append(bullet)
elif k=="q": #pause the game
pause()
else: #turn thruster off
thrust=0
Fnet=thrust*norm(spaceship.axis)
spaceship.v=spaceship.v+Fnet/spaceship.m*dt
spaceship.pos=spaceship.pos+spaceship.v*dt
fire.pos=spaceship.pos
fire.axis=-spaceship.axis/2
#check if the spaceship goes off screen and wrap
if spaceship.pos.x>20 or spaceship.pos.x<-20:
spaceship.pos=spaceship.pos-spaceship.v*dt
spaceship.pos.x=-spaceship.pos.x
if spaceship.pos.y>20 or spaceship.pos.y<-20:
spaceship.pos=spaceship.pos-spaceship.v*dt
spaceship.pos.y=-spaceship.pos.y
#update positions of bullets and check if bullets go off screen
for thisbullet in bulletsList:
if thisbullet.pos.x>20 or thisbullet.pos.x<-20:
thisbullet.visible=0
if thisbullet.pos.y>20 or thisbullet.pos.y<-20:
thisbullet.visible=0
if thisbullet.visible != 0:
thisbullet.pos=thisbullet.pos+thisbullet.v*dt
#update positions of asteroids
for thisasteroid in asteroidList:
if thisasteroid.visible==1:
thisasteroid.pos=thisasteroid.pos+thisasteroid.v*dt
#check for collision with spaceship
if(collisionConeSphere(spaceship,thisasteroid)):
spaceship.visible=0
fire.visible=0
#wrap at edge of screen
if thisasteroid.pos.x>20 or thisasteroid.pos.x<-20:
thisasteroid.pos=thisasteroid.pos-thisasteroid.v*dt
thisasteroid.pos.x=-thisasteroid.pos.x
if thisasteroid.pos.y>20 or thisasteroid.pos.y<-20:
thisasteroid.pos=thisasteroid.pos-thisasteroid.v*dt
thisasteroid.pos.y=-thisasteroid.pos.y
#check for collision with bullets
for thisbullet in bulletsList:
if(collisionSpheres(thisbullet,thisasteroid)and thisbullet.visible==1):
thisasteroid.visible=0
thisbullet.visible=0
createFragments(thisasteroid)
#update positions of fragments
for thisfragment in fragmentList:
if thisfragment.visible==1:
thisfragment.pos=thisfragment.pos+thisfragment.v*dt
#check for collision with spaceship
if(collisionConeSphere(spaceship,thisfragment)):
spaceship.visible=0
fire.visible=0
#wrap at edge of screen
if thisfragment.pos.x>20 or thisfragment.pos.x<-20:
thisfragment.pos=thisfragment.pos-thisfragment.v*dt
thisfragment.pos.x=-thisfragment.pos.x
if thisfragment.pos.y>20 or thisfragment.pos.y<-20:
thisfragment.pos=thisfragment.pos-thisfragment.v*dt
thisfragment.pos.y=-thisfragment.pos.y
#check for collision with bullets
for thisbullet in bulletsList:
if(collisionSpheres(thisbullet,thisfragment)and thisbullet.visible==1):
thisfragment.visible=0
thisbullet.visible=0
Nleft=0 #have to reset this before counting asteroids and fragments
for thisasteroid in asteroidList:
if thisasteroid.visible:
Nleft=Nleft+1
for thisfragment in fragmentList:
if thisfragment.visible:
Nleft=Nleft+1
#create more asteroids if all are gone
if Nleft==0:
createAsteroids()
#update fire
if thrust==0 or spaceship.visible==0:
fire.visible=0
else:
fire.visible=1
t=t+dt
| [
"atitus@highpoint.edu"
] | atitus@highpoint.edu |
fc01c6fb812fe78ca04496494d68fcc90ae706f5 | 9701d7b29f9b6961331af18ad7fe7ea6f9ee51bd | /shufflelists.py | 5d516ca06ce8628a2cd5e34a81de21bee844f6fd | [] | no_license | asouxuning/trans | 25fd784c67728c130b188ef74255676828b3fc5a | 1ea05f785b5c8d54411ca4350abbec37015fb387 | refs/heads/master | 2021-05-04T17:50:12.467873 | 2018-02-09T15:31:00 | 2018-02-09T15:31:00 | 120,280,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | import numpy as np
def shufflelists(lists):
li = np.random.permutation(len(lists[0])
lo = []
for i in range(len(li)):
| [
"asouxuning@163.com"
] | asouxuning@163.com |
472e796961fcde83ad76ffe05488280d21018b71 | a6706f3fcfc8904d4b05d2588ed1fd62112a7c1d | /UMS_Project/wsgi.py | ac72404d2a26d892ea7b7ae713c7f10caf1e0449 | [] | no_license | sravanmandava/Assignment | c1652013e44b6bd8132a4de71dbef1870e34df99 | bd9222ad2691afe31e3905c3f71b7872260ab74f | refs/heads/master | 2023-07-15T01:26:34.773949 | 2021-08-17T06:34:22 | 2021-08-17T06:34:22 | 396,706,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for UMS_Project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'UMS_Project.settings')
application = get_wsgi_application()
| [
"sravanmandava8@gmail.com"
] | sravanmandava8@gmail.com |
bf9a630f0c0d863fa65a6a23ed870c0df9bd8388 | de4245b9d88711e39bdc2676a2583e91aec94fde | /post/migrations/0026_auto_20210226_2135.py | 341013c69ffbf4cef5474b4c126894ce6a99198c | [] | no_license | GopinaathV/Instagram_clone | 6bc779d312bf689dd64c3da3f81c4c078128d04e | d06f97f1e38bd08c11baec87c3ef9edb594d5f9b | refs/heads/main | 2023-06-10T11:24:05.465927 | 2021-06-30T11:36:28 | 2021-06-30T11:36:28 | 381,617,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # Generated by Django 2.2 on 2021-02-26 16:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0025_auto_20210226_2130'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='sender_pic',
field=models.ImageField(null=True, upload_to=''),
),
]
| [
"gopinaath16@gamil.com"
] | gopinaath16@gamil.com |
591136ecd2269e61855177299ef373f586107e09 | 19fc974a62cc2c7863e2dff0ff6e784c961cd2ef | /gerritssh/internal/__init__.py | 022f407c155e9ba9b1add831b227ccec053d98fe | [] | no_license | hieulq/Auto-Make-Deliver-Report | 9b6e0698b8ab55894c76536a18a71aeb86422453 | 57a5e7de8d4bad47bf6514202516ee88ee78af88 | refs/heads/master | 2021-01-09T06:58:41.500772 | 2018-02-27T09:38:57 | 2018-02-27T09:38:57 | 56,847,220 | 1 | 4 | null | 2018-02-27T09:38:58 | 2016-04-22T10:21:33 | Python | UTF-8 | Python | false | false | 34 | py | from .cmdoptions import * # noqa
| [
"hieulq@vn.fujitsu.com"
] | hieulq@vn.fujitsu.com |
9394abdf60ce16f65a8a9c354b6dc08b0a490f42 | 3dd0172a975a7048edb6e0e3ea13961f94ead149 | /src/mscgfm_check.py | 083f31012f9e7755dbb953eed40c7fb1e93240f4 | [] | no_license | uchicago-voth/mscg_regression_tests | 9656c26f7299eead0a926d7981ec6b0b82df21fc | 825266138b6c591886f6c345c8bf3aeba1fab46d | refs/heads/master | 2021-06-03T18:25:42.360348 | 2020-06-30T22:58:05 | 2020-06-30T22:58:05 | 97,156,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,618 | py | import numpy as np
def mscg_content_equality(dat_1,dat_2, prefix="Data File equality: ",xyz_abs_tol=1e-8):
result = True
if not(np.array_equal(dat_1[0], dat_2[0])):
print(prefix+"Warning: Coordinates don't match bit for bit.")
sqdiff_mat = (dat_1[0]-dat_2[0])**2
min_mat = np.minimum(abs(dat_1[0]),abs(dat_2[0]))
residual = sqdiff_mat.mean()**0.5
max_residual = sqdiff_mat.max()**0.5
print(prefix+"Warning: RMS coordinate residual: {}".format(residual))
print(prefix+"Warning: Max coordinate residual: {}".format(max_residual))
rel_residual_mat = sqdiff_mat**0.5/min_mat
residual = rel_residual_mat.mean()
max_residual = rel_residual_mat.max()
print(prefix+"Warning: Mean relative coordinate residual: {}".format(residual))
print(prefix+"Warning: Max relative coordinate residual: {}".format(max_residual))
print(prefix+"First sqdiff coordinate frame: {}".format(\
(dat_1[0]-dat_2[0])))
violations = np.nonzero((dat_1[0]-dat_2[0]) > xyz_abs_tol)
print(prefix+"Indices violating residual ({}): {}".format(\
xyz_abs_tol,violations))
if (residual > xyz_abs_tol):
result=False
if not(np.array_equal(dat_1[1], dat_2[1])):
print(prefix+"Warning: Forces don't match bit for bit.")
sqdiff_mat = (dat_1[1]-dat_2[1])**2
min_mat = np.minimum(abs(dat_1[1]),abs(dat_2[1]))
residual = sqdiff_mat.mean()**0.5
max_residual = sqdiff_mat.max()**0.5
print(prefix+"Warning: RMS Force residual: {}".format(residual))
print(prefix+"Warning: Max Force residual: {}".format(max_residual))
rel_residual_mat = sqdiff_mat**0.5/min_mat
residual = rel_residual_mat.mean()
max_residual = rel_residual_mat.max()
print(prefix+"Warning: Mean relative force residual: {}".format(residual))
print(prefix+"Warning: Max relative force residual: {}".format(max_residual))
print(prefix+"First sqdiff coordinate frame: {}".format(\
(dat_1[1]-dat_2[1])))
violations = np.nonzero((dat_1[1]-dat_2[1]) > xyz_abs_tol)
print(prefix+"Indices violating residual ({}): {}".format(\
xyz_abs_tol,violations))
if (residual > xyz_abs_tol):
result=False
return result
def check_result_to_exitval(result):
'''Transforms boolean to command line exit value.
True -> 0, False -> 1. No guard logic.
'''
return int(not(result))
| [
"mocohen@uchicago.edu"
] | mocohen@uchicago.edu |
8926cbe8d1538cbbd04bf86bf0af6e92ec04783c | adb295bf248ded84d2c126d73c58b570af440dc6 | /scripts/providers.py | 13d8d431cf8b25bd62662d5e17425d61e6862069 | [] | no_license | sshveta/cfme_tests | eaeaf0076e87dd6c2c960887b242cb435cab5151 | 51bb86fda7d897e90444a6a0380a5aa2c61be6ff | refs/heads/master | 2021-03-30T22:30:12.476326 | 2017-04-26T22:47:25 | 2017-04-26T22:47:25 | 17,754,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,531 | py | #!/usr/bin/env python
"""
Given the name of a provider from cfme_data and using credentials from
the credentials stash, call the corresponding action on that provider, along
with any additional action arguments.
See cfme_pages/common/mgmt_system.py for documentation on the callable methods
themselves.
Example usage:
scripts/providers.py providername stop_vm vm-name
Note that attempts to be clever will likely be successful, but fruitless.
For example, this will work but not do anyhting helpful:
scripts/providers.py providername __init__ username password
"""
import argparse
import os
import sys
# Make sure the parent dir is on the path before importing provider_factory
cfme_tests_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, cfme_tests_path)
from utils.providers import provider_factory
def main():
parser = argparse.ArgumentParser(epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('provider_name',
help='provider name in cfme_data')
parser.add_argument('action',
help='action to take (list_vm, stop_vm, delete_vm, etc.)')
parser.add_argument('action_args', nargs='*',
help='foo')
args = parser.parse_args()
try:
result = call_provider(args.provider_name, args.action, *args.action_args)
if isinstance(result, list):
exit = 0
for entry in result:
print entry
elif isinstance(result, str):
exit = 0
print result
elif isinstance(result, bool):
# 'True' result becomes flipped exit 0, and vice versa for False
exit = int(not result)
else:
# Unknown type, explode
raise Exception('Unknown return type for "%s"' % args.action)
except Exception as e:
exit = 1
exc_type = type(e).__name__
if e.message:
sys.stderr.write('%s: %s\n' % (exc_type, e.message))
else:
sys.stderr.write('%s\n' % exc_type)
return exit
def call_provider(provider_name, action, *args):
# Given a provider class, find the named method and call it with
# *args. This could possibly be generalized for other CLI tools.
provider = provider_factory(provider_name)
try:
call = getattr(provider, action)
except AttributeError:
raise Exception('Action "%s" not found' % action)
return call(*args)
if __name__ == '__main__':
sys.exit(main())
| [
"sean.myers@redhat.com"
] | sean.myers@redhat.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.