max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
alchemy.py
|
temple-geography/gis-application-development
| 3
|
12784551
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 15 22:12:27 2021
@author: tum98420
"""
#%%
###Connect to the database###
from sqlalchemy import create_engine
db = 'connection_string'
engine = create_engine(db)
#print table names
print(engine.table_names())
#establish connection to access and manipulate database
conn = engine.connect()
#%%
###Table Reflection###
from sqlalchemy import MetaData, Table
#Initialize metadata
metadata=MetaData()
#Reflect Table
customer = Table('customer', metadata, autoload=True, autoload_with=engine)
print(repr(customer))
print(customer.columns.keys())
#%%
### Select ###
from sqlalchemy import select,
#bulid statement to query all records in customer table using the select() function.
stmt = select([customer])
#execute the selection and store in the result proxy
results = conn.execute(stmt).fetchmany(size=10)
print(results)
stmt = select([customer])
results = conn.execute(stmt).fetchall()
first_row = results[0]
first_row['first_name']
#%%
### Filtering and Targeting ###
from sqlalchemy import and_
film = Table('film', metadata, autoload=True, autoload_with=engine)
print(film.columns.keys())
stmt = select([film])
stmt = stmt.where(film.columns.rating=='G')
results = conn.execute(stmt).fetchmany(size=10)
for result in results:
print(result.title, result.release_year)
stmt = select([film])
stmt = stmt.where(
and_(film.columns.release_year == 2006,
film.columns.rating != "NC-17"))
results = conn.execute(stmt).fetchall()
for result in results:
print(result.title)
#%%
### Grouping, Ordering, and Built-in Functions ###
from sqlalchemy import desc, Float, case, cast, func
stmt = select([film.columns.title, film.columns.length, film.columns.rating])
stmt = stmt.order_by(film.columns.rating, desc(film.columns.length), film.columns.title)
results = conn.execute(stmt).fetchall()
print(results)
#Calculate the average length of film for each rating
stmt = select([film.columns.rating, func.avg(film.columns.length)])
stmt = stmt.group_by(film.columns.rating)
results = conn.execute(stmt).fetchall()
print(results)
#What percent of the entire rental duration is R-rated films
r_length = func.sum(
case([(film.columns.rating=='R', film.columns.rental_duration)],
else_ = 0))
total_dur = cast(func.sum(film.columns.rental_duration),Float)
stmt = select([r_length/total_dur*100])
percent_r = conn.execute(stmt).scalar() # Use .scalar() for getting just the value of a query that returns only one row and column.
print(percent_r)
#%%
### Display with Pandas ###
import pandas as pd
stmt = select([film.columns.rating, func.avg(film.columns.length).label("average length")])
stmt = stmt.group_by(film.columns.rating)
results = conn.execute(stmt).fetchall()
df = pd.DataFrame(results)
df.columns = results[0].keys()
print(df)
#%%
### Practice ###
#1. Calculate the average amount per customer_id in the
# payment table of the database.
#2. Display the result as a dataframe showing only the
# customer_id and average. Label the average column
# as 'average amount'.
#3. Fetch 10 records.
| 3.171875
| 3
|
fastfunc/divide.py
|
nschloe/fastfunc
| 10
|
12784552
|
<gh_stars>1-10
from _fastfunc import _divide_at
from .helpers import _operator_at
def at(a, k, vals):
_operator_at(_divide_at, a, k, vals)
return
| 1.460938
| 1
|
sdk/python/pulumi_vault/aws/secret_backend_role.py
|
pulumi/pulumi-vault
| 10
|
12784553
|
<gh_stars>1-10
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SecretBackendRoleArgs', 'SecretBackendRole']
@pulumi.input_type
class SecretBackendRoleArgs:
def __init__(__self__, *,
backend: pulumi.Input[str],
credential_type: pulumi.Input[str],
default_sts_ttl: Optional[pulumi.Input[int]] = None,
iam_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_sts_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
policy_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
policy_document: Optional[pulumi.Input[str]] = None,
role_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a SecretBackendRole resource.
:param pulumi.Input[str] backend: The path the AWS secret backend is mounted at,
with no leading or trailing `/`s.
:param pulumi.Input[str] credential_type: Specifies the type of credential to be used when
retrieving credentials from the role. Must be one of `iam_user`, `assumed_role`, or
`federation_token`.
:param pulumi.Input[int] default_sts_ttl: The default TTL in seconds for STS credentials.
When a TTL is not specified when STS credentials are requested,
and a default TTL is specified on the role,
then this default TTL will be used. Valid only when `credential_type` is one of
`assumed_role` or `federation_token`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] iam_groups: A list of IAM group names. IAM users generated
against this vault role will be added to these IAM Groups. For a credential
type of `assumed_role` or `federation_token`, the policies sent to the
corresponding AWS call (sts:AssumeRole or sts:GetFederation) will be the
policies from each group in `iam_groups` combined with the `policy_document`
and `policy_arns` parameters.
:param pulumi.Input[int] max_sts_ttl: The max allowed TTL in seconds for STS credentials
(credentials TTL are capped to `max_sts_ttl`). Valid only when `credential_type` is
one of `assumed_role` or `federation_token`.
:param pulumi.Input[str] name: The name to identify this role within the backend.
Must be unique within the backend.
:param pulumi.Input[Sequence[pulumi.Input[str]]] policy_arns: Specifies a list of AWS managed policy ARNs. The
behavior depends on the credential type. With `iam_user`, the policies will be
attached to IAM users when they are requested. With `assumed_role` and
`federation_token`, the policy ARNs will act as a filter on what the credentials
can do, similar to `policy_document`. When `credential_type` is `iam_user` or
`federation_token`, at least one of `policy_document` or `policy_arns` must
be specified.
:param pulumi.Input[str] policy_document: The IAM policy document for the role. The
behavior depends on the credential type. With `iam_user`, the policy document
will be attached to the IAM user generated and augment the permissions the IAM
user has. With `assumed_role` and `federation_token`, the policy document will
act as a filter on what the credentials can do, similar to `policy_arns`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] role_arns: Specifies the ARNs of the AWS roles this Vault role
is allowed to assume. Required when `credential_type` is `assumed_role` and
prohibited otherwise.
"""
pulumi.set(__self__, "backend", backend)
pulumi.set(__self__, "credential_type", credential_type)
if default_sts_ttl is not None:
pulumi.set(__self__, "default_sts_ttl", default_sts_ttl)
if iam_groups is not None:
pulumi.set(__self__, "iam_groups", iam_groups)
if max_sts_ttl is not None:
pulumi.set(__self__, "max_sts_ttl", max_sts_ttl)
if name is not None:
pulumi.set(__self__, "name", name)
if policy_arns is not None:
pulumi.set(__self__, "policy_arns", policy_arns)
if policy_document is not None:
pulumi.set(__self__, "policy_document", policy_document)
if role_arns is not None:
pulumi.set(__self__, "role_arns", role_arns)
@property
@pulumi.getter
def backend(self) -> pulumi.Input[str]:
"""
The path the AWS secret backend is mounted at,
with no leading or trailing `/`s.
"""
return pulumi.get(self, "backend")
@backend.setter
def backend(self, value: pulumi.Input[str]):
pulumi.set(self, "backend", value)
@property
@pulumi.getter(name="credentialType")
def credential_type(self) -> pulumi.Input[str]:
"""
Specifies the type of credential to be used when
retrieving credentials from the role. Must be one of `iam_user`, `assumed_role`, or
`federation_token`.
"""
return pulumi.get(self, "credential_type")
@credential_type.setter
def credential_type(self, value: pulumi.Input[str]):
pulumi.set(self, "credential_type", value)
@property
@pulumi.getter(name="defaultStsTtl")
def default_sts_ttl(self) -> Optional[pulumi.Input[int]]:
"""
The default TTL in seconds for STS credentials.
When a TTL is not specified when STS credentials are requested,
and a default TTL is specified on the role,
then this default TTL will be used. Valid only when `credential_type` is one of
`assumed_role` or `federation_token`.
"""
return pulumi.get(self, "default_sts_ttl")
@default_sts_ttl.setter
def default_sts_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_sts_ttl", value)
@property
@pulumi.getter(name="iamGroups")
def iam_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of IAM group names. IAM users generated
against this vault role will be added to these IAM Groups. For a credential
type of `assumed_role` or `federation_token`, the policies sent to the
corresponding AWS call (sts:AssumeRole or sts:GetFederation) will be the
policies from each group in `iam_groups` combined with the `policy_document`
and `policy_arns` parameters.
"""
return pulumi.get(self, "iam_groups")
@iam_groups.setter
def iam_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "iam_groups", value)
@property
@pulumi.getter(name="maxStsTtl")
def max_sts_ttl(self) -> Optional[pulumi.Input[int]]:
"""
The max allowed TTL in seconds for STS credentials
(credentials TTL are capped to `max_sts_ttl`). Valid only when `credential_type` is
one of `assumed_role` or `federation_token`.
"""
return pulumi.get(self, "max_sts_ttl")
@max_sts_ttl.setter
def max_sts_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_sts_ttl", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name to identify this role within the backend.
Must be unique within the backend.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="policyArns")
def policy_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a list of AWS managed policy ARNs. The
behavior depends on the credential type. With `iam_user`, the policies will be
attached to IAM users when they are requested. With `assumed_role` and
`federation_token`, the policy ARNs will act as a filter on what the credentials
can do, similar to `policy_document`. When `credential_type` is `iam_user` or
`federation_token`, at least one of `policy_document` or `policy_arns` must
be specified.
"""
return pulumi.get(self, "policy_arns")
@policy_arns.setter
def policy_arns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "policy_arns", value)
@property
@pulumi.getter(name="policyDocument")
def policy_document(self) -> Optional[pulumi.Input[str]]:
"""
The IAM policy document for the role. The
behavior depends on the credential type. With `iam_user`, the policy document
will be attached to the IAM user generated and augment the permissions the IAM
user has. With `assumed_role` and `federation_token`, the policy document will
act as a filter on what the credentials can do, similar to `policy_arns`.
"""
return pulumi.get(self, "policy_document")
@policy_document.setter
def policy_document(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_document", value)
@property
@pulumi.getter(name="roleArns")
def role_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies the ARNs of the AWS roles this Vault role
is allowed to assume. Required when `credential_type` is `assumed_role` and
prohibited otherwise.
"""
return pulumi.get(self, "role_arns")
@role_arns.setter
def role_arns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "role_arns", value)
@pulumi.input_type
class _SecretBackendRoleState:
def __init__(__self__, *,
backend: Optional[pulumi.Input[str]] = None,
credential_type: Optional[pulumi.Input[str]] = None,
default_sts_ttl: Optional[pulumi.Input[int]] = None,
iam_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_sts_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
policy_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
policy_document: Optional[pulumi.Input[str]] = None,
role_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering SecretBackendRole resources.
:param pulumi.Input[str] backend: The path the AWS secret backend is mounted at,
with no leading or trailing `/`s.
:param pulumi.Input[str] credential_type: Specifies the type of credential to be used when
retrieving credentials from the role. Must be one of `iam_user`, `assumed_role`, or
`federation_token`.
:param pulumi.Input[int] default_sts_ttl: The default TTL in seconds for STS credentials.
When a TTL is not specified when STS credentials are requested,
and a default TTL is specified on the role,
then this default TTL will be used. Valid only when `credential_type` is one of
`assumed_role` or `federation_token`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] iam_groups: A list of IAM group names. IAM users generated
against this vault role will be added to these IAM Groups. For a credential
type of `assumed_role` or `federation_token`, the policies sent to the
corresponding AWS call (sts:AssumeRole or sts:GetFederation) will be the
policies from each group in `iam_groups` combined with the `policy_document`
and `policy_arns` parameters.
:param pulumi.Input[int] max_sts_ttl: The max allowed TTL in seconds for STS credentials
(credentials TTL are capped to `max_sts_ttl`). Valid only when `credential_type` is
one of `assumed_role` or `federation_token`.
:param pulumi.Input[str] name: The name to identify this role within the backend.
Must be unique within the backend.
:param pulumi.Input[Sequence[pulumi.Input[str]]] policy_arns: Specifies a list of AWS managed policy ARNs. The
behavior depends on the credential type. With `iam_user`, the policies will be
attached to IAM users when they are requested. With `assumed_role` and
`federation_token`, the policy ARNs will act as a filter on what the credentials
can do, similar to `policy_document`. When `credential_type` is `iam_user` or
`federation_token`, at least one of `policy_document` or `policy_arns` must
be specified.
:param pulumi.Input[str] policy_document: The IAM policy document for the role. The
behavior depends on the credential type. With `iam_user`, the policy document
will be attached to the IAM user generated and augment the permissions the IAM
user has. With `assumed_role` and `federation_token`, the policy document will
act as a filter on what the credentials can do, similar to `policy_arns`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] role_arns: Specifies the ARNs of the AWS roles this Vault role
is allowed to assume. Required when `credential_type` is `assumed_role` and
prohibited otherwise.
"""
if backend is not None:
pulumi.set(__self__, "backend", backend)
if credential_type is not None:
pulumi.set(__self__, "credential_type", credential_type)
if default_sts_ttl is not None:
pulumi.set(__self__, "default_sts_ttl", default_sts_ttl)
if iam_groups is not None:
pulumi.set(__self__, "iam_groups", iam_groups)
if max_sts_ttl is not None:
pulumi.set(__self__, "max_sts_ttl", max_sts_ttl)
if name is not None:
pulumi.set(__self__, "name", name)
if policy_arns is not None:
pulumi.set(__self__, "policy_arns", policy_arns)
if policy_document is not None:
pulumi.set(__self__, "policy_document", policy_document)
if role_arns is not None:
pulumi.set(__self__, "role_arns", role_arns)
@property
@pulumi.getter
def backend(self) -> Optional[pulumi.Input[str]]:
"""
The path the AWS secret backend is mounted at,
with no leading or trailing `/`s.
"""
return pulumi.get(self, "backend")
@backend.setter
def backend(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend", value)
@property
@pulumi.getter(name="credentialType")
def credential_type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the type of credential to be used when
retrieving credentials from the role. Must be one of `iam_user`, `assumed_role`, or
`federation_token`.
"""
return pulumi.get(self, "credential_type")
@credential_type.setter
def credential_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "credential_type", value)
@property
@pulumi.getter(name="defaultStsTtl")
def default_sts_ttl(self) -> Optional[pulumi.Input[int]]:
"""
The default TTL in seconds for STS credentials.
When a TTL is not specified when STS credentials are requested,
and a default TTL is specified on the role,
then this default TTL will be used. Valid only when `credential_type` is one of
`assumed_role` or `federation_token`.
"""
return pulumi.get(self, "default_sts_ttl")
@default_sts_ttl.setter
def default_sts_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_sts_ttl", value)
@property
@pulumi.getter(name="iamGroups")
def iam_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of IAM group names. IAM users generated
against this vault role will be added to these IAM Groups. For a credential
type of `assumed_role` or `federation_token`, the policies sent to the
corresponding AWS call (sts:AssumeRole or sts:GetFederation) will be the
policies from each group in `iam_groups` combined with the `policy_document`
and `policy_arns` parameters.
"""
return pulumi.get(self, "iam_groups")
@iam_groups.setter
def iam_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "iam_groups", value)
@property
@pulumi.getter(name="maxStsTtl")
def max_sts_ttl(self) -> Optional[pulumi.Input[int]]:
"""
The max allowed TTL in seconds for STS credentials
(credentials TTL are capped to `max_sts_ttl`). Valid only when `credential_type` is
one of `assumed_role` or `federation_token`.
"""
return pulumi.get(self, "max_sts_ttl")
@max_sts_ttl.setter
def max_sts_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_sts_ttl", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name to identify this role within the backend.
Must be unique within the backend.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="policyArns")
def policy_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a list of AWS managed policy ARNs. The
behavior depends on the credential type. With `iam_user`, the policies will be
attached to IAM users when they are requested. With `assumed_role` and
`federation_token`, the policy ARNs will act as a filter on what the credentials
can do, similar to `policy_document`. When `credential_type` is `iam_user` or
`federation_token`, at least one of `policy_document` or `policy_arns` must
be specified.
"""
return pulumi.get(self, "policy_arns")
@policy_arns.setter
def policy_arns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "policy_arns", value)
@property
@pulumi.getter(name="policyDocument")
def policy_document(self) -> Optional[pulumi.Input[str]]:
"""
The IAM policy document for the role. The
behavior depends on the credential type. With `iam_user`, the policy document
will be attached to the IAM user generated and augment the permissions the IAM
user has. With `assumed_role` and `federation_token`, the policy document will
act as a filter on what the credentials can do, similar to `policy_arns`.
"""
return pulumi.get(self, "policy_document")
@policy_document.setter
def policy_document(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_document", value)
@property
@pulumi.getter(name="roleArns")
def role_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies the ARNs of the AWS roles this Vault role
is allowed to assume. Required when `credential_type` is `assumed_role` and
prohibited otherwise.
"""
return pulumi.get(self, "role_arns")
@role_arns.setter
def role_arns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "role_arns", value)
class SecretBackendRole(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend: Optional[pulumi.Input[str]] = None,
credential_type: Optional[pulumi.Input[str]] = None,
default_sts_ttl: Optional[pulumi.Input[int]] = None,
iam_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_sts_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
policy_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
policy_document: Optional[pulumi.Input[str]] = None,
role_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
## Import
AWS secret backend roles can be imported using the `path`, e.g.
```sh
$ pulumi import vault:aws/secretBackendRole:SecretBackendRole role aws/roles/deploy
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] backend: The path the AWS secret backend is mounted at,
with no leading or trailing `/`s.
:param pulumi.Input[str] credential_type: Specifies the type of credential to be used when
retrieving credentials from the role. Must be one of `iam_user`, `assumed_role`, or
`federation_token`.
:param pulumi.Input[int] default_sts_ttl: The default TTL in seconds for STS credentials.
When a TTL is not specified when STS credentials are requested,
and a default TTL is specified on the role,
then this default TTL will be used. Valid only when `credential_type` is one of
`assumed_role` or `federation_token`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] iam_groups: A list of IAM group names. IAM users generated
against this vault role will be added to these IAM Groups. For a credential
type of `assumed_role` or `federation_token`, the policies sent to the
corresponding AWS call (sts:AssumeRole or sts:GetFederation) will be the
policies from each group in `iam_groups` combined with the `policy_document`
and `policy_arns` parameters.
:param pulumi.Input[int] max_sts_ttl: The max allowed TTL in seconds for STS credentials
(credentials TTL are capped to `max_sts_ttl`). Valid only when `credential_type` is
one of `assumed_role` or `federation_token`.
:param pulumi.Input[str] name: The name to identify this role within the backend.
Must be unique within the backend.
:param pulumi.Input[Sequence[pulumi.Input[str]]] policy_arns: Specifies a list of AWS managed policy ARNs. The
behavior depends on the credential type. With `iam_user`, the policies will be
attached to IAM users when they are requested. With `assumed_role` and
`federation_token`, the policy ARNs will act as a filter on what the credentials
can do, similar to `policy_document`. When `credential_type` is `iam_user` or
`federation_token`, at least one of `policy_document` or `policy_arns` must
be specified.
:param pulumi.Input[str] policy_document: The IAM policy document for the role. The
behavior depends on the credential type. With `iam_user`, the policy document
will be attached to the IAM user generated and augment the permissions the IAM
user has. With `assumed_role` and `federation_token`, the policy document will
act as a filter on what the credentials can do, similar to `policy_arns`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] role_arns: Specifies the ARNs of the AWS roles this Vault role
is allowed to assume. Required when `credential_type` is `assumed_role` and
prohibited otherwise.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecretBackendRoleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
AWS secret backend roles can be imported using the `path`, e.g.
```sh
$ pulumi import vault:aws/secretBackendRole:SecretBackendRole role aws/roles/deploy
```
:param str resource_name: The name of the resource.
:param SecretBackendRoleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecretBackendRoleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend: Optional[pulumi.Input[str]] = None,
credential_type: Optional[pulumi.Input[str]] = None,
default_sts_ttl: Optional[pulumi.Input[int]] = None,
iam_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_sts_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
policy_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
policy_document: Optional[pulumi.Input[str]] = None,
role_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecretBackendRoleArgs.__new__(SecretBackendRoleArgs)
if backend is None and not opts.urn:
raise TypeError("Missing required property 'backend'")
__props__.__dict__["backend"] = backend
if credential_type is None and not opts.urn:
raise TypeError("Missing required property 'credential_type'")
__props__.__dict__["credential_type"] = credential_type
__props__.__dict__["default_sts_ttl"] = default_sts_ttl
__props__.__dict__["iam_groups"] = iam_groups
__props__.__dict__["max_sts_ttl"] = max_sts_ttl
__props__.__dict__["name"] = name
__props__.__dict__["policy_arns"] = policy_arns
__props__.__dict__["policy_document"] = policy_document
__props__.__dict__["role_arns"] = role_arns
super(SecretBackendRole, __self__).__init__(
'vault:aws/secretBackendRole:SecretBackendRole',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
backend: Optional[pulumi.Input[str]] = None,
credential_type: Optional[pulumi.Input[str]] = None,
default_sts_ttl: Optional[pulumi.Input[int]] = None,
iam_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_sts_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
policy_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
policy_document: Optional[pulumi.Input[str]] = None,
role_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'SecretBackendRole':
"""
Get an existing SecretBackendRole resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] backend: The path the AWS secret backend is mounted at,
with no leading or trailing `/`s.
:param pulumi.Input[str] credential_type: Specifies the type of credential to be used when
retrieving credentials from the role. Must be one of `iam_user`, `assumed_role`, or
`federation_token`.
:param pulumi.Input[int] default_sts_ttl: The default TTL in seconds for STS credentials.
When a TTL is not specified when STS credentials are requested,
and a default TTL is specified on the role,
then this default TTL will be used. Valid only when `credential_type` is one of
`assumed_role` or `federation_token`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] iam_groups: A list of IAM group names. IAM users generated
against this vault role will be added to these IAM Groups. For a credential
type of `assumed_role` or `federation_token`, the policies sent to the
corresponding AWS call (sts:AssumeRole or sts:GetFederation) will be the
policies from each group in `iam_groups` combined with the `policy_document`
and `policy_arns` parameters.
:param pulumi.Input[int] max_sts_ttl: The max allowed TTL in seconds for STS credentials
(credentials TTL are capped to `max_sts_ttl`). Valid only when `credential_type` is
one of `assumed_role` or `federation_token`.
:param pulumi.Input[str] name: The name to identify this role within the backend.
Must be unique within the backend.
:param pulumi.Input[Sequence[pulumi.Input[str]]] policy_arns: Specifies a list of AWS managed policy ARNs. The
behavior depends on the credential type. With `iam_user`, the policies will be
attached to IAM users when they are requested. With `assumed_role` and
`federation_token`, the policy ARNs will act as a filter on what the credentials
can do, similar to `policy_document`. When `credential_type` is `iam_user` or
`federation_token`, at least one of `policy_document` or `policy_arns` must
be specified.
:param pulumi.Input[str] policy_document: The IAM policy document for the role. The
behavior depends on the credential type. With `iam_user`, the policy document
will be attached to the IAM user generated and augment the permissions the IAM
user has. With `assumed_role` and `federation_token`, the policy document will
act as a filter on what the credentials can do, similar to `policy_arns`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] role_arns: Specifies the ARNs of the AWS roles this Vault role
is allowed to assume. Required when `credential_type` is `assumed_role` and
prohibited otherwise.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SecretBackendRoleState.__new__(_SecretBackendRoleState)
__props__.__dict__["backend"] = backend
__props__.__dict__["credential_type"] = credential_type
__props__.__dict__["default_sts_ttl"] = default_sts_ttl
__props__.__dict__["iam_groups"] = iam_groups
__props__.__dict__["max_sts_ttl"] = max_sts_ttl
__props__.__dict__["name"] = name
__props__.__dict__["policy_arns"] = policy_arns
__props__.__dict__["policy_document"] = policy_document
__props__.__dict__["role_arns"] = role_arns
return SecretBackendRole(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def backend(self) -> pulumi.Output[str]:
"""
The path the AWS secret backend is mounted at,
with no leading or trailing `/`s.
"""
return pulumi.get(self, "backend")
@property
@pulumi.getter(name="credentialType")
def credential_type(self) -> pulumi.Output[str]:
"""
Specifies the type of credential to be used when
retrieving credentials from the role. Must be one of `iam_user`, `assumed_role`, or
`federation_token`.
"""
return pulumi.get(self, "credential_type")
@property
@pulumi.getter(name="defaultStsTtl")
def default_sts_ttl(self) -> pulumi.Output[int]:
"""
The default TTL in seconds for STS credentials.
When a TTL is not specified when STS credentials are requested,
and a default TTL is specified on the role,
then this default TTL will be used. Valid only when `credential_type` is one of
`assumed_role` or `federation_token`.
"""
return pulumi.get(self, "default_sts_ttl")
@property
@pulumi.getter(name="iamGroups")
def iam_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of IAM group names. IAM users generated
against this vault role will be added to these IAM Groups. For a credential
type of `assumed_role` or `federation_token`, the policies sent to the
corresponding AWS call (sts:AssumeRole or sts:GetFederation) will be the
policies from each group in `iam_groups` combined with the `policy_document`
and `policy_arns` parameters.
"""
return pulumi.get(self, "iam_groups")
@property
@pulumi.getter(name="maxStsTtl")
def max_sts_ttl(self) -> pulumi.Output[int]:
"""
The max allowed TTL in seconds for STS credentials
(credentials TTL are capped to `max_sts_ttl`). Valid only when `credential_type` is
one of `assumed_role` or `federation_token`.
"""
return pulumi.get(self, "max_sts_ttl")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name to identify this role within the backend.
Must be unique within the backend.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyArns")
def policy_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Specifies a list of AWS managed policy ARNs. The
behavior depends on the credential type. With `iam_user`, the policies will be
attached to IAM users when they are requested. With `assumed_role` and
`federation_token`, the policy ARNs will act as a filter on what the credentials
can do, similar to `policy_document`. When `credential_type` is `iam_user` or
`federation_token`, at least one of `policy_document` or `policy_arns` must
be specified.
"""
return pulumi.get(self, "policy_arns")
@property
@pulumi.getter(name="policyDocument")
def policy_document(self) -> pulumi.Output[Optional[str]]:
"""
The IAM policy document for the role. The
behavior depends on the credential type. With `iam_user`, the policy document
will be attached to the IAM user generated and augment the permissions the IAM
user has. With `assumed_role` and `federation_token`, the policy document will
act as a filter on what the credentials can do, similar to `policy_arns`.
"""
return pulumi.get(self, "policy_document")
@property
@pulumi.getter(name="roleArns")
def role_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Specifies the ARNs of the AWS roles this Vault role
is allowed to assume. Required when `credential_type` is `assumed_role` and
prohibited otherwise.
"""
return pulumi.get(self, "role_arns")
| 1.664063
| 2
|
src/charma/streaming/decorators.py
|
mononobi/charma-server
| 1
|
12784554
|
# -*- coding: utf-8 -*-
"""
streaming decorators module.
"""
import charma.streaming.services as streaming_services
def stream(*args, **kwargs):
"""
decorator to register a stream provider.
:param object args: stream provider class constructor arguments.
:param object kwargs: stream provider class constructor keyword arguments.
:raises InvalidStreamProviderTypeError: invalid stream provider type error.
:raises DuplicateStreamProviderError: duplicate stream provider error.
:returns: stream provider class.
:rtype: type
"""
def decorator(cls):
"""
decorates the given class and registers an instance
of it into available stream providers.
:param type cls: stream provider class.
:returns: stream provider class.
:rtype: type
"""
instance = cls(*args, **kwargs)
streaming_services.register_stream_provider(instance, **kwargs)
return cls
return decorator
| 2.96875
| 3
|
hotdog/show_image.py
|
rparkin1/inceptionV3_hotdog
| 0
|
12784555
|
<gh_stars>0
#!/usr/bin/python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from IPython.display import Image, HTML, display
root = "images"
butions = dict(attributions)
def show_image(image_path):
display(Image(image_path))
image_rel = image_path.replace(root,'')
caption = "Image " + ' - '.join(attributions[image_rel].split(' - ')[:-1])
display(HTML("<div>%s</div>" % caption))
| 2.515625
| 3
|
lmsapi/api_netdevice/apps.py
|
orkasolutions-develop/lms-api-new
| 0
|
12784556
|
from django.apps import AppConfig
class ApiNetdeviceConfig(AppConfig):
name = 'api_netdevice'
| 1.109375
| 1
|
src/pyaxl/__init__.py
|
TeaObvious/pyaxl
| 7
|
12784557
|
from pyaxl.configuration import registry
from pyaxl.configuration import AXLClientSettings
| 1.171875
| 1
|
blog/models.py
|
bebutler1/SoloQ-DTC-Website-writtten-with-Django-
| 0
|
12784558
|
from django.db import models
# Create your models here. Database stuff
class Post(models.Model): #creates a table called posts
title = models.CharField(max_length=140) #Syntax: name = datatype(constraints)
body = models.TextField()
date = models.DateTimeField()
def __str__(self):
return self.title #returns a list of titles
| 2.734375
| 3
|
sql/views.py
|
Galo1117/archer
| 0
|
12784559
|
<filename>sql/views.py
# -*- coding: UTF-8 -*-
import re, time
import simplejson as json
from threading import Thread
from collections import OrderedDict
from django.db.models import Q, F
from django.db import connection, transaction
from django.utils import timezone
from django.conf import settings
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from .dao import Dao
from .api import ServerError, pages
from .const import Const, WorkflowDict
from .inception import InceptionDao
from .aes_decryptor import Prpcrypt
from .models import users, UserGroup, master_config, AliyunRdsConfig, workflow, slave_config, QueryPrivileges, Group, \
QueryPrivilegesApply, ProjectResource, GroupQueryPrivileges
from .workflow import Workflow
from .permission import role_required, superuser_required
from .sqlreview import getDetailUrl, execute_call_back, execute_skipinc_call_back
from .jobs import job_info, del_sqlcronjob
from .pycrypt import MyCrypt
from .projectresource import integration_resource, get_resource , PermissionVerification, get_query_permisshion
from .query import get_query_clustername
from archer.settings import HASH_KEY
import logging
logger = logging.getLogger('default')
dao = Dao()
inceptionDao = InceptionDao()
prpCryptor = Prpcrypt()
workflowOb = Workflow()
# 登录
def login(request):
access_itom_addr = settings.ACCESS_ITOM_ADDR
return HttpResponseRedirect('%s/login/'%(access_itom_addr))
# return render(request, 'login.html')
# 退出登录
def logout(request):
access_itom_addr = settings.ACCESS_ITOM_ADDR
if request.session.get('login_username', False):
del request.session['login_username']
if request.session.get('resource_status', False):
del request.session['resource_status']
return HttpResponseRedirect('%s/logout/'%(access_itom_addr))
# return render(request, 'login.html')
# SQL上线工单页面
def allworkflow(request):
context = {'currentMenu': 'allworkflow'}
return render(request, 'allWorkflow.html', context)
# 提交SQL的页面
def submitSql(request):
# 获取数据连接信息
masters = master_config.objects.all().order_by('cluster_name')
if len(masters) == 0:
return HttpResponseRedirect('/admin/sql/master_config/add/')
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
pv = PermissionVerification(loginUser, loginUserOb)
# 获取用户所属项目组信息
context = pv.get_group_info()
if context["status"] == 1:
group_list = context["data"]
else:
errMsg = context["msg"]
return render(request, 'error.html', {'errMsg': errMsg})
# 获取用户所属项目组拥有权限的实列信息
context = pv.get_cluster_info(masters)
if context["status"] == 1:
listAllClusterName = context["data"]
else:
errMsg = context["msg"]
return render(request, 'error.html', {'errMsg': errMsg})
# 获取所有有效用户,通知对象
active_user = users.objects.filter(is_active=1)
context = {'currentMenu': 'allworkflow', 'listAllClusterName': listAllClusterName,
'active_user': active_user, 'group_list': group_list}
return render(request, 'submitSql.html', context)
# 提交SQL给inception进行解析
def autoreview(request):
workflowid = request.POST.get('workflowid')
sqlContent = request.POST['sql_content']
workflowName = request.POST['workflow_name']
group_name = request.POST['group_name']
group_id = Group.objects.get(group_name=group_name).group_id
clusterName = request.POST['cluster_name']
db_name = request.POST.get('db_name')
isBackup = request.POST['is_backup']
reviewMan = request.POST.get('workflow_auditors')
notify_users = request.POST.getlist('notify_users')
# 服务器端参数验证
if sqlContent is None or workflowName is None or clusterName is None or db_name is None or isBackup is None or reviewMan is None:
context = {'errMsg': '页面提交参数可能为空'}
return render(request, 'error.html', context)
# 删除注释语句
sqlContent = ''.join(
map(lambda x: re.compile(r'(^--.*|^/\*.*\*/;[\f\n\r\t\v\s]*$)').sub('', x, count=1),
sqlContent.splitlines(1))).strip()
# 去除空行
sqlContent = re.sub('[\r\n\f]{2,}', '\n', sqlContent)
if sqlContent[-1] != ";":
context = {'errMsg': "SQL语句结尾没有以;结尾,请后退重新修改并提交!"}
return render(request, 'error.html', context)
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
pv = PermissionVerification(loginUser, loginUserOb)
# 检测用户资源权限
if loginUserOb.is_superuser:
reviewResult = pv.check_resource_priv(sqlContent, clusterName, db_name, 1)
else:
reviewResult = pv.check_resource_priv(sqlContent, clusterName, db_name, 0)
result = reviewResult["data"]
if reviewResult["status"] == 1:
context = {'errMsg': reviewResult["msg"]}
return render(request, 'error.html', context)
if result is None or len(result) == 0:
context = {'errMsg': 'inception返回的结果集为空!可能是SQL语句有语法错误'}
return render(request, 'error.html', context)
# 要把result转成JSON存进数据库里,方便SQL单子详细信息展示
jsonResult = json.dumps(result)
# 遍历result,看是否有任何自动审核不通过的地方,一旦有,则为自动审核不通过;没有的话,则为等待人工审核状态
workflowStatus = Const.workflowStatus['manreviewing']
for row in result:
if row[2] == 2:
# 状态为2表示严重错误,必须修改
workflowStatus = Const.workflowStatus['autoreviewwrong']
break
elif re.match(r"\w*comments\w*", row[4]):
workflowStatus = Const.workflowStatus['autoreviewwrong']
break
# 调用工作流生成工单
# 使用事务保持数据一致性
try:
with transaction.atomic():
# 存进数据库里
engineer = request.session.get('login_username', False)
if not workflowid:
Workflow = workflow()
Workflow.create_time = timezone.now()
else:
Workflow = workflow.objects.get(id=int(workflowid))
Workflow.workflow_name = workflowName
Workflow.group_id = group_id
Workflow.group_name = group_name
Workflow.engineer = engineer
Workflow.review_man = reviewMan
Workflow.status = workflowStatus
Workflow.is_backup = isBackup
Workflow.review_content = jsonResult
Workflow.cluster_name = clusterName
Workflow.db_name = db_name
Workflow.sql_content = sqlContent
Workflow.execute_result = ''
Workflow.audit_remark = ''
Workflow.save()
workflowId = Workflow.id
# 自动审核通过了,才调用工作流
if workflowStatus == Const.workflowStatus['manreviewing']:
# 调用工作流插入审核信息, 查询权限申请workflow_type=2
# 抄送通知人
listCcAddr = [email['email'] for email in
users.objects.filter(username__in=notify_users).values('email')]
workflowOb.addworkflowaudit(request, WorkflowDict.workflow_type['sqlreview'], workflowId,
listCcAddr=listCcAddr)
except Exception as msg:
context = {'errMsg': msg}
return render(request, 'error.html', context)
return HttpResponseRedirect(reverse('sql:detail', kwargs={'workflowId':workflowId, 'workflowType':0}))
# 展示SQL工单详细内容,以及可以人工审核,审核通过即可执行
def detail(request, workflowId, workflowType):
workflowDetail = get_object_or_404(workflow, pk=workflowId)
if workflowDetail.status in (Const.workflowStatus['finish'], Const.workflowStatus['exception']) \
and workflowDetail.is_manual == 0:
listContent = json.loads(workflowDetail.execute_result)
else:
listContent = json.loads(workflowDetail.review_content)
# 获取审核人
reviewMan = workflowDetail.review_man
reviewMan = reviewMan.split(',')
# 获取当前审核人
try:
current_audit_user = workflowOb.auditinfobyworkflow_id(workflow_id=workflowId,
workflow_type=WorkflowDict.workflow_type['sqlreview']
).current_audit_user
except Exception:
current_audit_user = None
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
# 获取定时执行任务信息
if workflowDetail.status == Const.workflowStatus['tasktiming']:
job_id = Const.workflowJobprefix['sqlreview'] + '-' + str(workflowId)
job = job_info(job_id)
if job:
run_date = job.next_run_time
else:
run_date = ''
else:
run_date = ''
# sql结果
column_list = ['ID', 'stage', 'errlevel', 'stagestatus', 'errormessage', 'SQL', 'Affected_rows', 'sequence',
'backup_dbname', 'execute_time', 'sqlsha1']
rows = []
for row_index, row_item in enumerate(listContent):
row = {}
row['ID'] = row_index + 1
row['stage'] = row_item[1]
row['errlevel'] = row_item[2]
row['stagestatus'] = row_item[3]
row['errormessage'] = row_item[4]
row['SQL'] = row_item[5]
row['Affected_rows'] = row_item[6]
row['sequence'] = row_item[7]
row['backup_dbname'] = row_item[8]
row['execute_time'] = row_item[9]
row['sqlsha1'] = row_item[10]
rows.append(row)
if workflowDetail.status == '执行中':
row['stagestatus'] = ''.join(
["<div id=\"td_" + str(row['ID']) + "\" class=\"form-inline\">",
" <div class=\"progress form-group\" style=\"width: 80%; height: 18px; float: left;\">",
" <div id=\"div_" + str(row['ID']) + "\" class=\"progress-bar\" role=\"progressbar\"",
" aria-valuenow=\"60\"",
" aria-valuemin=\"0\" aria-valuemax=\"100\">",
" <span id=\"span_" + str(row['ID']) + "\"></span>",
" </div>",
" </div>",
" <div class=\"form-group\" style=\"width: 10%; height: 18px; float: right;\">",
" <form method=\"post\">",
" <input type=\"hidden\" name=\"workflowid\" value=\"" + str(workflowDetail.id) + "\">",
" <button id=\"btnstop_" + str(row['ID']) + "\" value=\"" + str(row['ID']) + "\"",
" type=\"button\" class=\"close\" style=\"display: none\" title=\"停止pt-OSC进程\">",
" <span class=\"glyphicons glyphicons-stop\">×</span>",
" </button>",
" </form>",
" </div>",
"</div>"])
context = {'currentMenu': 'allworkflow', 'workflowDetail': workflowDetail, 'column_list': column_list, 'rows': rows,
'reviewMan': reviewMan, 'current_audit_user': current_audit_user, 'loginUserOb': loginUserOb,
'run_date': run_date}
if int(workflowType) == 1:
return render(request, 'detailhash.html', context)
else:
return render(request, 'detail.html', context)
# 审核通过,不执行
def passonly(request):
workflowId = request.POST['workflowid']
workflowType = request.POST.get('workflowtype',0)
if workflowId == '' or workflowId is None:
context = {'errMsg': 'workflowId参数为空.'}
return render(request, 'error.html', context)
workflowId = int(workflowId)
workflowDetail = workflow.objects.get(id=workflowId)
# 获取审核人
reviewMan = workflowDetail.review_man
reviewMan = reviewMan.split(',')
# 服务器端二次验证,正在执行人工审核动作的当前登录用户必须为审核人. 避免攻击或被接口测试工具强行绕过
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
if loginUser is None or (loginUser not in reviewMan and loginUserOb.is_superuser != 1):
context = {'errMsg': '当前登录用户不是审核人,请重新登录.'}
return render(request, 'error.html', context)
# 服务器端二次验证,当前工单状态必须为等待人工审核
if workflowDetail.status != Const.workflowStatus['manreviewing']:
context = {'errMsg': '当前工单状态不是等待人工审核中,请刷新当前页面!'}
return render(request, 'error.html', context)
# 使用事务保持数据一致性
try:
with transaction.atomic():
# 调用工作流接口审核
# 获取audit_id
audit_id = workflowOb.auditinfobyworkflow_id(workflow_id=workflowId,
workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id
auditresult = workflowOb.auditworkflow(request, audit_id, WorkflowDict.workflow_status['audit_success'],
loginUser, '')
# 按照审核结果更新业务表审核状态
if auditresult['data']['workflow_status'] == WorkflowDict.workflow_status['audit_success']:
# 将流程状态修改为审核通过,并更新reviewok_time字段
workflowDetail.status = Const.workflowStatus['pass']
workflowDetail.reviewok_time = timezone.now()
workflowDetail.audit_remark = ''
workflowDetail.save()
except Exception as msg:
context = {'errMsg': msg}
if int(workflowType) == 1:
return HttpResponse(context['errMsg'])
else:
return render(request, 'error.html', context)
return HttpResponseRedirect(reverse('sql:detail', kwargs={'workflowId':workflowId, 'workflowType':workflowType}))
# 仅执行SQL
def executeonly(request):
workflowId = request.POST['workflowid']
if workflowId == '' or workflowId is None:
context = {'errMsg': 'workflowId参数为空.'}
return render(request, 'error.html', context)
workflowId = int(workflowId)
workflowDetail = workflow.objects.get(id=workflowId)
clusterName = workflowDetail.cluster_name
db_name = workflowDetail.db_name
url = getDetailUrl(request) + str(workflowId) + '/'
# 获取审核人
reviewMan = workflowDetail.review_man
reviewMan = reviewMan.split(',')
# 服务器端二次验证,正在执行人工审核动作的当前登录用户必须为审核人或者提交人. 避免攻击或被接口测试工具强行绕过
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
if loginUser is None or (loginUser not in reviewMan and loginUser != workflowDetail.engineer and loginUserOb.role != 'DBA'):
context = {'errMsg': '当前登录用户不是审核人或者提交人,请重新登录.'}
return render(request, 'error.html', context)
# 服务器端二次验证,当前工单状态必须为审核通过状态
if workflowDetail.status != Const.workflowStatus['pass']:
context = {'errMsg': '当前工单状态不是审核通过,请刷新当前页面!'}
return render(request, 'error.html', context)
# 将流程状态修改为执行中,并更新reviewok_time字段
workflowDetail.status = Const.workflowStatus['executing']
workflowDetail.reviewok_time = timezone.now()
# 执行之前重新split并check一遍,更新SHA1缓存;因为如果在执行中,其他进程去做这一步操作的话,会导致inception core dump挂掉
try:
splitReviewResult = inceptionDao.sqlautoReview(workflowDetail.sql_content, workflowDetail.cluster_name, db_name,
isSplit='yes')
except Exception as msg:
context = {'errMsg': msg}
return render(request, 'error.html', context)
workflowDetail.review_content = json.dumps(splitReviewResult)
try:
workflowDetail.save()
except Exception:
# 关闭后重新获取连接,防止超时
connection.close()
workflowDetail.save()
# 采取异步回调的方式执行语句,防止出现持续执行中的异常
t = Thread(target=execute_call_back, args=(workflowId, clusterName, url))
t.start()
return HttpResponseRedirect(reverse('sql:detail', kwargs={ 'workflowId':workflowId, 'workflowType':0 }))
# 跳过inception直接执行SQL,只是为了兼容inception不支持的语法,谨慎使用
@role_required(('DBA',))
def execute_skipinc(request):
workflowId = request.POST['workflowid']
# 获取工单信息
workflowId = int(workflowId)
workflowDetail = workflow.objects.get(id=workflowId)
sql_content = workflowDetail.sql_content
clusterName = workflowDetail.cluster_name
url = getDetailUrl(request) + str(workflowId) + '/'
# 服务器端二次验证,当前工单状态必须为自动审核不通过
if workflowDetail.status not in [Const.workflowStatus['manreviewing'], Const.workflowStatus['pass'],
Const.workflowStatus['autoreviewwrong']]:
context = {'errMsg': '当前工单状态不是自动审核不通过,请刷新当前页面!'}
return render(request, 'error.html', context)
# 更新工单状态为执行中
workflowDetail = workflow.objects.get(id=workflowId)
workflowDetail.status = Const.workflowStatus['executing']
workflowDetail.reviewok_time = timezone.now()
workflowDetail.save()
# 采取异步回调的方式执行语句,防止出现持续执行中的异常
t = Thread(target=execute_skipinc_call_back, args=(workflowId, clusterName, sql_content, url))
t.start()
return HttpResponseRedirect(reverse('sql:detail', kwargs={'workflowId':workflowId, 'workflowType':0}))
# 终止流程
def cancel(request):
workflowId = request.POST['workflowid']
workflowType = request.POST.get('workflowtype', 0)
if workflowId == '' or workflowId is None:
context = {'errMsg': 'workflowId参数为空.'}
return render(request, 'error.html', context)
workflowId = int(workflowId)
workflowDetail = workflow.objects.get(id=workflowId)
# 获取审核人
reviewMan = workflowDetail.review_man
reviewMan = reviewMan.split(',')
audit_remark = request.POST.get('audit_remark')
if audit_remark is None:
context = {'errMsg': '驳回原因不能为空'}
return render(request, 'error.html', context)
# 服务器端二次验证,如果正在执行终止动作的当前登录用户,不是提交人也不是审核人,则异常.
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
if loginUser is None or (loginUser not in reviewMan and loginUser != workflowDetail.engineer and loginUserOb.role != 'DBA'):
context = {'errMsg': '当前登录用户不是审核人也不是提交人,请重新登录.'}
return render(request, 'error.html', context)
# 服务器端二次验证,如果当前单子状态是结束状态,则不能发起终止
if workflowDetail.status in (
Const.workflowStatus['abort'], Const.workflowStatus['finish'], Const.workflowStatus['autoreviewwrong'],
Const.workflowStatus['exception']):
return HttpResponseRedirect(reverse('sql:detail', kwargs={'workflowId':workflowId, 'workflowType':workflowType}))
# 使用事务保持数据一致性
try:
with transaction.atomic():
# 调用工作流接口取消或者驳回
# 获取audit_id
audit_id = workflowOb.auditinfobyworkflow_id(workflow_id=workflowId,
workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id
if loginUser == workflowDetail.engineer:
auditresult = workflowOb.auditworkflow(request, audit_id, WorkflowDict.workflow_status['audit_abort'],
loginUser, audit_remark)
else:
auditresult = workflowOb.auditworkflow(request, audit_id, WorkflowDict.workflow_status['audit_reject'],
loginUser, audit_remark)
# 删除定时执行job
if workflowDetail.status == Const.workflowStatus['tasktiming']:
job_id = Const.workflowJobprefix['sqlreview'] + '-' + str(workflowId)
del_sqlcronjob(job_id)
# 按照审核结果更新业务表审核状态
if auditresult['data']['workflow_status'] in (
WorkflowDict.workflow_status['audit_abort'], WorkflowDict.workflow_status['audit_reject']):
# 将流程状态修改为人工终止流程
workflowDetail.status = Const.workflowStatus['abort']
workflowDetail.audit_remark = audit_remark
workflowDetail.save()
except Exception as msg:
context = {'errMsg': msg}
if int(workflowType) == 1:
return HttpResponse(context['errMsg'])
else:
return render(request, 'error.html', context)
return HttpResponseRedirect(reverse('sql:detail', kwargs={'workflowId':workflowId, 'workflowType':workflowType}))
# 展示回滚的SQL
def rollback(request):
workflowId = request.GET['workflowid']
if workflowId == '' or workflowId is None:
context = {'errMsg': 'workflowId参数为空.'}
return render(request, 'error.html', context)
workflowId = int(workflowId)
try:
listBackupSql = inceptionDao.getRollbackSqlList(workflowId)
except Exception as msg:
context = {'errMsg': msg}
return render(request, 'error.html', context)
workflowDetail = workflow.objects.get(id=workflowId)
workflowName = workflowDetail.workflow_name
rollbackWorkflowName = "【回滚工单】原工单Id:%s ,%s" % (workflowId, workflowName)
context = {'listBackupSql': listBackupSql, 'currentMenu': 'sqlworkflow', 'workflowDetail': workflowDetail,
'rollbackWorkflowName': rollbackWorkflowName}
return render(request, 'rollback.html', context)
# SQL审核必读
def dbaprinciples(request):
context = {'currentMenu': 'dbaprinciples'}
return render(request, 'dbaprinciples.html', context)
# 图表展示
def charts(request):
context = {'currentMenu': 'charts'}
return render(request, 'charts.html', context)
# SQL在线查询
def sqlquery(request):
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
# 获取所有从库实例名称
slaves = slave_config.objects.all().order_by('cluster_name')
if len(slaves) == 0:
return HttpResponseRedirect('/admin/sql/slave_config/add/')
#判断是否为管理员
if loginUserOb.is_superuser:
listAllClusterName = [ slave.cluster_name for slave in slaves ]
else:
listAllClusterName = get_query_clustername(loginUser)
context = {'currentMenu': 'sqlquery', 'listAllClusterName': listAllClusterName}
return render(request, 'sqlquery.html', context)
# SQL慢日志
def slowquery(request):
# 获取所有实例主库名称
masters = master_config.objects.all().order_by('cluster_name')
if len(masters) == 0:
return HttpResponseRedirect('/admin/sql/master_config/add/')
cluster_name_list = [master.cluster_name for master in masters]
context = {'currentMenu': 'slowquery', 'tab': 'slowquery', 'cluster_name_list': cluster_name_list}
return render(request, 'slowquery.html', context)
# SQL优化工具
def sqladvisor(request):
# 获取所有实例主库名称
masters = master_config.objects.all().order_by('cluster_name')
if len(masters) == 0:
return HttpResponseRedirect('/admin/sql/master_config/add/')
cluster_name_list = [master.cluster_name for master in masters]
context = {'currentMenu': 'sqladvisor', 'listAllClusterName': cluster_name_list}
return render(request, 'sqladvisor.html', context)
# 查询权限申请列表
def queryapplylist(request):
slaves = slave_config.objects.all().order_by('cluster_name')
# 获取用户所属项目组信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
groupname_list = [ group['group_name'] for group in UserGroup.objects.filter(user_name=loginUser).values('group_name') ]
# 获取所有实例从库名称
listAllClusterName = [slave.cluster_name for slave in slaves]
if len(slaves) == 0:
return HttpResponseRedirect('/admin/sql/slave_config/add/')
# 获取所有项组名称
# group_list = Group.objects.all().annotate(id=F('group_id'),
# name=F('group_name'),
# parent=F('group_parent_id'),
# level=F('group_level')
# ).values('id', 'name', 'parent', 'level')
group_list = Group.objects.filter(group_name__in=groupname_list).annotate(id=F('group_id'),
name=F('group_name'),
parent=F('group_parent_id'),
level=F('group_level')
).values('id', 'name', 'parent', 'level')
group_list = [group for group in group_list]
if len(group_list) == 0 and loginUserOb.is_superuser == False:
errMsg = '您尚未属于任何项目组,请与管理员联系.'
return render(request, 'error.html', {'errMsg': errMsg})
# elif len(group_list) == 0 and loginUserOb.is_superuser == True:
# return HttpResponseRedirect('/config/')
context = {'currentMenu': 'queryapply', 'listAllClusterName': listAllClusterName,
'group_list': group_list}
return render(request, 'queryapplylist.html', context)
# 查询权限申请详情
def queryapplydetail(request, apply_id, audit_type):
workflowDetail = QueryPrivilegesApply.objects.get(apply_id=apply_id)
# 获取当前审核人
audit_info = workflowOb.auditinfobyworkflow_id(workflow_id=apply_id,
workflow_type=WorkflowDict.workflow_type['query'])
context = {'currentMenu': 'queryapply', 'workflowDetail': workflowDetail, 'audit_info': audit_info}
if int(audit_type) == 1:
return render(request, 'queryapplydetailhash.html', context)
else:
return render(request, 'queryapplydetail.html', context)
# 用户的查询权限管理
def queryuserprivileges(request):
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
# 获取所有用户
user_list_person = [ user['user_name'] for user in QueryPrivileges.objects.filter(is_deleted=0).values('user_name').distinct() ]
group_name_list = [ group['group_name'] for group in GroupQueryPrivileges.objects.all().values('group_name').distinct() ]
user_list_group = [ user['user_name'] for user in UserGroup.objects.filter(group_name__in=group_name_list).values('user_name').distinct() ]
user_list = user_list_person + user_list_group
# 排序去重
user_list = sorted(list(set(user_list)))
context = {'currentMenu': 'queryapply', 'user_list': user_list, 'loginUserOb': loginUserOb}
return render(request, 'queryuserprivileges.html', context)
# 用户的执行权限管理
def executeuserprivileges(request):
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
# 获取所有用户
user_list = users.objects.all().values("username").distinct()
context = {'currentMenu': 'queryapply', 'user_list': user_list, 'loginUserOb': loginUserOb}
return render(request, 'executeuserprivileges.html', context)
# 问题诊断--进程
def diagnosis_process(request):
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
# 获取所有实例名称
masters = AliyunRdsConfig.objects.all().order_by('cluster_name')
cluster_name_list = [master.cluster_name for master in masters]
context = {'currentMenu': 'diagnosis', 'tab': 'process', 'cluster_name_list': cluster_name_list,
'loginUserOb': loginUserOb}
return render(request, 'diagnosis.html', context)
# 问题诊断--空间
def diagnosis_sapce(request):
# 获取所有实例名称
masters = AliyunRdsConfig.objects.all().order_by('cluster_name')
cluster_name_list = [master.cluster_name for master in masters]
context = {'currentMenu': 'diagnosis', 'tab': 'space', 'cluster_name_list': cluster_name_list}
return render(request, 'diagnosis.html', context)
# 获取工作流审核列表
def workflows(request):
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
context = {'currentMenu': 'workflow', "loginUserOb": loginUserOb}
return render(request, "workflow.html", context)
# 工作流审核列表
def workflowsdetail(request, audit_id):
# 按照不同的workflow_type返回不同的详情
auditInfo = workflowOb.auditinfo(audit_id)
if auditInfo.workflow_type == WorkflowDict.workflow_type['query']:
return HttpResponseRedirect(reverse('sql:queryapplydetail', kwargs={'apply_id':auditInfo.workflow_id, 'audit_type':0}))
elif auditInfo.workflow_type == WorkflowDict.workflow_type['sqlreview']:
return HttpResponseRedirect(reverse('sql:detail', kwargs={'workflowId':auditInfo.workflow_id, 'workflowType':0}))
# 工作流审核列表HASH认证审核
def workflowsdetailhash(request):
# 用户免登录更加HASH认证快速审核
# http://192.168.123.110:8080/workflowshash/?timestamp=454545&hash=kkkkkkkk
timestamp, uuid, audit_id = None, None, None
dbom_host = request.scheme + "://" + request.get_host() + "/login/"
timestamp_before = request.GET.get('timestamp', '')
hash_encode = request.GET.get('hash', '')
timestamp_after = int(time.time())
# 解密哈希
try:
crypter = MyCrypt(HASH_KEY)
hash_text = crypter.decrypt(hash_encode)
hash_text_list = hash_text.split(',')
timestamp = hash_text_list[0]
uuid = hash_text_list[1]
audit_id = hash_text_list[2]
except Exception as e:
errMsg = "HASH鉴权失败,请确保HASH值正常。"
return HttpResponse(errMsg)
if int(timestamp_before) != int(timestamp) or (int(timestamp_after) - int(timestamp)) > 3600:
errMsg = "链接已经超过1小时或TIMESTAMP被修改,请登录DBOM(%s)进行审核。" % (dbom_host)
return HttpResponse(errMsg)
# 获取用户信息
loginUserOb = users.objects.get(uuid=uuid)
login_username = loginUserOb.username
if not loginUserOb:
errMsg = "用户鉴权失败,请登录DBOM(%s)进行审核。" % (dbom_host)
return HttpResponse(errMsg)
else:
request.session['login_username'] = login_username
request.session.set_expiry(300)
# 按照不同的workflow_type返回不同的详情
auditInfo = workflowOb.auditinfo(audit_id)
if auditInfo.workflow_type == WorkflowDict.workflow_type['query']:
return HttpResponseRedirect(reverse('sql:queryapplydetail', kwargs={'apply_id':auditInfo.workflow_id, 'audit_type':1}))
elif auditInfo.workflow_type == WorkflowDict.workflow_type['sqlreview']:
return HttpResponseRedirect(reverse('sql:detail', kwargs={'workflowId':auditInfo.workflow_id, 'workflowType':1}))
# 配置管理
@superuser_required
def config(request):
# 获取所有项组名称
group_list = Group.objects.all().annotate(id=F('group_id'),
name=F('group_name'),
parent=F('group_parent_id'),
level=F('group_level'),
leader=F('group_leader')
).values('id', 'name', 'parent', 'level', 'leader')
# 获取组的成员数
for group_name in group_list:
members_num = UserGroup.objects.filter(group_name=group_name['name']).count()
group_name['members_num'] = members_num
group_list = [group for group in group_list]
# 获取所有用户
user_list = users.objects.filter(is_active=1).values('username', 'display')
context = {'currentMenu': 'config', 'group_list': group_list, 'user_list': user_list,
'WorkflowDict': WorkflowDict}
group_list, p, groups, page_range, current_page, show_first, show_end, contacts = pages(group_list, request)
return render(request, 'config.html', locals())
# 配置项目组信息
@csrf_exempt
def configGroup(request):
context = { 'status': 1, 'msg':'', 'data': {}} # 1是成功,0是失败
if request.method == "POST":
operation_type = request.POST.get('operation_type', None)
project_name = request.POST.get('project_name', None)
project_auditors = request.POST.get('project_auditors', None)
if operation_type == "project_add":
try:
if not project_name or len(project_name) == 0:
msg = u'项目名称不能为空'
raise ServerError(msg)
elif not project_auditors or len(project_auditors) == 0:
msg = u'请选择项目负责人'
raise ServerError(msg)
except ServerError as e:
context['status'] = 0
context['msg'] = e.message
logger.error('项目添加出错:%s'%e.message)
else:
try:
# 添加组信息
group_default_dict = { 'group_name': project_name, 'group_leader': project_auditors }
group_obj, group_created = Group.objects.get_or_create(group_name=project_name, group_leader=project_auditors, defaults=group_default_dict)
logger.info('project add obj: %s created: %s' % (group_obj, group_created))
# 添加用户与组对应关系表
usergroup_default_dict = { 'group_name': project_name, 'user_name': project_auditors }
usergroup_obj, usergroup_created = UserGroup.objects.get_or_create(group_name=project_name, user_name=project_auditors, defaults=usergroup_default_dict)
logger.info('Relationship between the project and the user add obj: %s created: %s' % (usergroup_obj, usergroup_created))
# 配置项目成员
users_list_select_web = request.POST.getlist('users_selected', [])
configGroupMembers(project_name, users_list_select_web)
context['status'] = 1
context['msg'] = '项目组添加成功'
logger.info('Project add %s is success.'%project_name)
except Exception as e:
context['status'] = 0
serache_result = re.search('Duplicate entry',str(e))
if serache_result:
context['msg'] = '项目组已经存在'
else:
context['msg'] = '项目组添加失败'
logger.info('Project add %s is failed. { %s }'%(project_name, e))
elif operation_type == "project_del":
project_id = request.POST.get('project_id', None)
project_name = Group.objects.get(group_id=project_id)
try:
# 删除组信息
Group.objects.filter(group_id=project_id).delete()
# 删除组对应的用户信息
UserGroup.objects.filter(group_name=project_name.group_name).delete()
context['status'] = 1
context['msg'] = '项目组删除成功'
logger.info('Project %s delete success.' % project_name.group_name)
except Exception as e:
context['status'] = 0
context['msg'] = '项目组删除失败'
logger.info('Project %s delete failed. { %s }' %(project_name.group_name, e))
elif operation_type == "get_project":
project_dic = {}
get_type = request.POST.get('get_type', None)
project_id = request.POST.get('project_id', None)
try:
if get_type == 'edit':
# 项目组信息
project_info = Group.objects.get(group_id=project_id)
group_name = project_info.group_name
user_list = list(users.objects.filter(is_active=1).values('username'))
project_dic["group_id"] = project_info.group_id
project_dic["group_name"] = group_name
project_dic["group_leader"] = project_info.group_leader
project_dic["user_list"] = user_list
else:
group_name = ''
# 项目组成员信息
user_list_all = [user['username'] for user in list(users.objects.values('username'))]
user_list_select = [user['user_name'] for user in list(UserGroup.objects.filter(group_name=group_name).values('user_name'))]
user_list_noselect = [user for user in user_list_all if user not in user_list_select]
project_dic["user_list_select"] = user_list_select
project_dic["user_list_noselect"] = user_list_noselect
context['data'] = project_dic
context['status'] = 1
context['msg'] = '获取项目信息成功'
logger.info('Get project %s info success.' %group_name)
except Exception as e:
context['status'] = 0
context['msg'] = '获取项目信息失败'
logger.info('Get project info failed. { %s }' %e)
elif operation_type == "project_edit":
edit_group_id = request.POST.get('edit_group_id', None)
edit_project_name = request.POST.get('edit_project_name', None)
edit_project_auditors = request.POST.get('edit_project_auditors', None)
try:
if not edit_project_name or len(edit_project_name) == 0:
msg = u'项目名称不能为空'
raise ServerError(msg)
elif not edit_project_auditors or len(edit_project_auditors) == 0:
msg = u'请选择项目负责人'
raise ServerError(msg)
except ServerError as e:
context['status'] = 0
context['msg'] = e.message
logger.error('项目更新出错:%s'%e.message)
else:
try:
# 更新组信息
obj, created = Group.objects.update_or_create(group_id=edit_group_id, defaults={"group_name":edit_project_name, "group_leader":edit_project_auditors})
logger.info('project update obj: %s created: %s' % (obj, created))
# 配置项目成员
users_list_select_web = request.POST.getlist('users_selected', [])
configGroupMembers(edit_project_name, users_list_select_web)
context['status'] = 1
context['msg'] = '项目组更新成功'
logger.info('Project ID %s update success.' % edit_group_id)
except Exception as e:
context['status'] = 0
serache_result = re.search('Duplicate entry', str(e))
if serache_result:
context['msg'] = '项目组已经存在'
else:
context['msg'] = '项目组更新失败'
logger.info('Project ID %s update failed. { %s }' %(edit_group_id, e))
return HttpResponse(json.dumps(context), content_type="application/x-www-form-urlencoded")
# 配置项目成员
@csrf_exempt
def configGroupMembers(group_name, users_list_select_web):
user_list_select = [ user['user_name'] for user in list(UserGroup.objects.filter(group_name=group_name).values('user_name')) ]
insert_users_list = [ user for user in users_list_select_web if user not in user_list_select ]
del_users_list = [ user for user in user_list_select if user not in users_list_select_web ]
# 插入新增
for user in insert_users_list:
obj, created = UserGroup.objects.get_or_create(group_name=group_name, user_name=user, defaults={'group_name':group_name, 'user_name':user})
logger.info('group members insert obj: %s created: %s'%(obj, created))
logger.info('group members insert data %s'%insert_users_list)
# 删除剔除
for user in del_users_list:
UserGroup.objects.filter(group_name=group_name, user_name=user).delete()
logger.info('group members delete data %s' % del_users_list)
# 获取项目资源
@csrf_exempt
def projectresource(request):
currentMenu = 'projectresource'
context = {'status': 1, 'msg': '', 'data': {}} # 1是成功,0是失败
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
# 获取项目集群
listAllCluster = slave_config.objects.all().order_by('cluster_name')
listAllClusterName = [ str(cluster.cluster_name) for cluster in listAllCluster ]
if request.session.get('resource_status', 0) == 0:
logger.debug('异步整合现网表资源信息中...')
# 采取异步回调的方式进行资源整合,防止出现持续执行中的异常
t = Thread(target=integration_resource, args=(listAllClusterName,))
t.start()
request.session['resource_status'] = 1
# 获取当前用户所管理的项目列表
if loginUserOb.is_superuser:
user_project_list = [ group["group_name"] for group in Group.objects.all().values("group_name").distinct() ]
else:
user_project_list = [ group["group_name"] for group in Group.objects.filter(group_leader=loginUser).values("group_name").distinct() ]
if request.method == "POST":
limitStart = int(request.POST.get('offset',0))
pageSize = int(request.POST.get('pageSize',0))
project_name = request.POST.get('project_name',None)
cluster_name = request.POST.get('cluster_name',None)
db_name = request.POST.get('db_name',None)
search = request.POST.get('search',None)
config_type = request.POST.get('config_type',None)
if config_type == "change_cluster":
listDatabase = []
if cluster_name:
# 获取实列所有库信息
listDatabase = [ row['db_name'] for row in list(ProjectResource.objects.filter(cluster_name=cluster_name).values('db_name').distinct()) ]
return HttpResponse(json.dumps(listDatabase), content_type="application/x-www-form-urlencoded")
elif config_type == "get_resource":
resource_id = request.POST.get('resource_id',None)
project_name = request.POST.get('project_name',None)
if not project_name or len(project_name) == 0:
context['status'] = 0
context['msg'] = '请选择需要获取权限的项目'
else:
try:
group_list_str = ProjectResource.objects.get(id=resource_id).group_list
if len(group_list_str) > 0:
group_list_tmp = group_list_str.split(",")
else:
group_list_tmp = []
group_list_tmp.append(project_name)
group_list = ','.join(group_list_tmp)
# 更新资源列表信息
ProjectResource.objects.update_or_create(id=resource_id, defaults={'group_list':group_list})
context['status'] = 1
context['data'] = group_list
logger.info('Get resource ID %s is success.'%resource_id)
except Exception as e:
context['status'] = 0
context['msg'] = '资源获取失败'
logger.error('Get resource ID %s is filed. { %s }' %(resource_id, e))
return HttpResponse(json.dumps(context), content_type="application/x-www-form-urlencoded")
elif config_type == "get_db_all_resource":
group_name = request.POST.get('group_name', None)
cluster_name = request.POST.get('cluster_name',None)
db_name = request.POST.get('db_name', None)
if not group_name or len(group_name) == 0:
context['status'] = 0
context['msg'] = '请选择项目组'
elif not cluster_name or len(cluster_name) == 0:
context['status'] = 0
context['msg'] = '请选择数据库实例'
elif not db_name or len(db_name) == 0:
context['status'] = 0
context['msg'] = '请选择数据库'
else:
try:
group_info_list = list(ProjectResource.objects.filter(cluster_name=cluster_name, db_name=db_name).values('id', 'group_list'))
for group_info in group_info_list:
resource_id = group_info['id']
group_list_str = group_info['group_list']
if len(group_list_str) > 0:
group_list_tmp = group_list_str.split(",")
else:
group_list_tmp = []
if group_name not in group_list_tmp:
group_list_tmp.append(group_name)
group_list = ','.join(group_list_tmp)
# 更新资源列表信息
ProjectResource.objects.update_or_create(id=resource_id, defaults={'group_list':group_list})
context['status'] = 1
context['data'] = group_list
logger.info('Get resource ID %s is success.'%resource_id)
logger.info('Get whole database %s resource is success.' % db_name)
except Exception as e:
context['status'] = 0
context['msg'] = '整库资源获取失败'
logger.error('Get whole database %s resource is filed. { %s }' %(db_name, e))
return HttpResponse(json.dumps(context), content_type="application/x-www-form-urlencoded")
elif config_type == "del_resource":
resource_id = request.POST.get('resource_id',None)
project_name = request.POST.get('project_name',None)
if not project_name or len(project_name) == 0:
context['status'] = 0
context['msg'] = '请先选择项目'
else:
try:
group_list_tmp = (ProjectResource.objects.get(id=resource_id).group_list).split(",")
group_list_tmp.remove(project_name)
group_list = ','.join(group_list_tmp)
ProjectResource.objects.update_or_create(id=resource_id, defaults={'group_list':group_list})
context['status'] = 1
context['data'] = group_list
logger.info('Delete resource ID %s is success.'%resource_id)
except Exception as e:
context['status'] = 0
context['msg'] = '资源清除失败'
logger.error('Delete resource ID %s is filed. { %s }' %(resource_id, e))
return HttpResponse(json.dumps(context), content_type="application/x-www-form-urlencoded")
elif config_type == "del_db_all_resource":
group_name = request.POST.get('group_name', None)
cluster_name = request.POST.get('cluster_name', None)
db_name = request.POST.get('db_name', None)
if not group_name or len(group_name) == 0:
context['status'] = 0
context['msg'] = '请选择项目组'
elif not cluster_name or len(cluster_name) == 0:
context['status'] = 0
context['msg'] = '请选择数据库实例'
elif not db_name or len(db_name) == 0:
context['status'] = 0
context['msg'] = '请选择数据库'
else:
try:
group_info_list = list(ProjectResource.objects.filter(cluster_name=cluster_name, db_name=db_name).values('id','group_list'))
for group_info in group_info_list:
resource_id = group_info['id']
group_list_str = group_info['group_list']
if len(group_list_str) > 0:
group_list_tmp = group_list_str.split(",")
else:
group_list_tmp = []
if group_name in group_list_tmp:
group_list_tmp.remove(group_name)
group_list = ','.join(group_list_tmp)
# 更新资源列表信息
ProjectResource.objects.update_or_create(id=resource_id, defaults={'group_list': group_list})
context['status'] = 1
context['data'] = group_list
logger.info('Delete resource ID %s is success.' % resource_id)
logger.info('Delete whole database %s resource is success.' % db_name)
except Exception as e:
context['status'] = 0
context['msg'] = '整库资源清除失败'
logger.error('Delete whole database %s resource is filed. { %s }' % (db_name, e))
return HttpResponse(json.dumps(context), content_type="application/x-www-form-urlencoded")
else:
where_list = ['1=1']
if cluster_name:
where_list.append('AND cluster_name="%s"'%cluster_name)
if db_name:
where_list.append('AND db_name="%s"'%db_name)
if search:
where_list.append('AND ( table_name LIKE "%%%s%%" OR group_list LIKE "%%%s%%" )'%(search, search))
if len(where_list) > 0:
where_value = ' '.join(where_list)
table = 'project_resource'
count_sql = "SELECT COUNT(1) AS rowcount FROM %s WHERE %s;"%(table, where_value)
row_sql = "SELECT id,cluster_name,db_name,table_name,group_list FROM %s WHERE %s ORDER by id ASC LIMIT %s,%s;"%(table, where_value, limitStart, pageSize)
# 获取资源信息
resource_data = get_resource(count_sql, row_sql, project_name)
else:
table = 'project_resource'
count_sql = "SELECT COUNT(1) AS rowcount FROM %s;"%(table)
row_sql = "SELECT id,cluster_name,db_name,table_name,group_list FROM %s ORDER by id ASC LIMIT %s,%s;"%(table, limitStart, pageSize)
# 获取资源信息
resource_data = get_resource(count_sql, row_sql , project_name)
return HttpResponse(json.dumps(resource_data), content_type="application/x-www-form-urlencoded")
group_list = Group.objects.all().annotate(id=F('group_id'),
name=F('group_name'),
parent=F('group_parent_id'),
level=F('group_level')
).values('id', 'name', 'parent', 'level')
group_list = [group for group in group_list]
return render(request, 'project_config/get_project_group_resource.html', locals())
# 设置项目组的查询权限
@csrf_exempt
def groupQueryPermission(request):
currentMenu = 'projectresource'
context = {'status': 1, 'msg': '', 'data': {}} # 1是成功,0是失败
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
# 获取项目集群
listAllCluster = slave_config.objects.all().order_by('cluster_name')
listAllClusterName = [ str(cluster.cluster_name) for cluster in listAllCluster ]
# 获取当前用户所管理的项目列表
if loginUserOb.is_superuser:
user_group_list = [ group["group_name"] for group in Group.objects.all().values("group_name").distinct() ]
else:
user_group_list = [ group["group_name"] for group in Group.objects.filter(group_leader=loginUser).values("group_name").distinct() ]
if request.method == "POST":
limitStart = int(request.POST.get('offset',0))
pageSize = int(request.POST.get('pageSize',0))
group_name = request.POST.get('group_name',None)
cluster_name = request.POST.get('cluster_name',None)
db_name = request.POST.get('db_name',None)
search = request.POST.get('search',None)
user_group_text = '\"' + '\",\"'.join(user_group_list) + '\"'
where_list = ['1=1']
if group_name:
where_list.append('AND group_name="%s"' % group_name)
else:
where_list.append('AND group_name IN (%s)' % user_group_text)
if cluster_name:
where_list.append('AND cluster_name="%s"' % cluster_name)
if db_name:
where_list.append('AND db_name="%s"' % db_name)
if search:
where_list.append('AND ( table_name LIKE "%%%s%%" OR group_name LIKE "%%%s%%" )' % (search, search))
where_value = ' '.join(where_list)
table = 'group_query_privileges'
count_sql = "SELECT COUNT(1) AS rowcount FROM %s WHERE %s;" % (table, where_value)
row_sql = "SELECT privilege_id,group_name,cluster_name,db_name,table_name,valid_date,limit_num FROM %s WHERE %s ORDER by privilege_id ASC LIMIT %s,%s;" % (
table, where_value, limitStart, pageSize)
# 获取资源信息
resource_data = get_query_permisshion(count_sql, row_sql)
# logger.debug('获取权限资源信息:%s.'%resource_data)
return HttpResponse(json.dumps(resource_data), content_type="application/x-www-form-urlencoded")
return render(request, 'project_config/set_group_query_permission.html', locals())
# 设置项目组的查询权限
@csrf_exempt
def getGroupQueryPermission(request):
context = {'status': 1, 'msg': '', 'data': {}} # 1是成功,0是失败
group_name = request.POST.get('group_name', None)
cluster_name = request.POST.get('cluster_name', None)
db_name = request.POST.get('db_name', None)
operation_type = request.POST.get('operation_type', None)
valid_date = request.POST.get('valid_date', None)
limit_num = request.POST.get('limit_num', 1000)
table_resource_list = [ table['table_name'] for table in ProjectResource.objects.filter(cluster_name=cluster_name,db_name=db_name).values('table_name') ]
permission_table_list = [ table['table_name'] for table in GroupQueryPrivileges.objects.filter(group_name=group_name,cluster_name=cluster_name,db_name=db_name).values('table_name') ]
no_permission_table_list = [ table_name for table_name in table_resource_list if table_name not in permission_table_list ]
if operation_type == 'resource_save':
try:
if not group_name or len(group_name) == 0:
msg = u'请选择项目组'
raise ServerError(msg)
elif not cluster_name or len(cluster_name) == 0:
msg = u'请选择数据库实列'
raise ServerError(msg)
elif not db_name or len(db_name) == 0:
msg = u'请选择数据库'
raise ServerError(msg)
elif not valid_date or len(valid_date) == 0:
msg = u'请选择授权时间'
raise ServerError(msg)
elif not limit_num or len(limit_num) == 0:
msg = u'请选择查询限制行数'
raise ServerError(msg)
except ServerError as e:
context['status'] = 0
context['msg'] = e.message
logger.error('Group premission set error:%s' % e.message)
else:
try:
web_permission_table_list = request.POST.getlist('tables_selected', [])
new_permission_table_list = [ table_name for table_name in web_permission_table_list if table_name not in permission_table_list ]
del_permission_table_list = [ table_name for table_name in permission_table_list if table_name not in web_permission_table_list ]
defaults_data = {'group_name': group_name, 'cluster_name': cluster_name, 'db_name': db_name, 'valid_date': valid_date, 'limit_num': limit_num}
# 添加新增数据
for table_name in new_permission_table_list:
defaults_data['table_name'] = table_name
# 插入数据
GroupQueryPrivileges.objects.create(**defaults_data)
logger.debug('Insert group query permission %s.' % new_permission_table_list)
# 删除排除的数据
for table_name in del_permission_table_list:
# 删除数据
GroupQueryPrivileges.objects.filter(group_name=group_name,cluster_name=cluster_name,db_name=db_name,table_name=table_name).delete()
logger.debug('Delete group query permission %s.' % del_permission_table_list)
logger.debug('Save group query permission success.%s'%web_permission_table_list)
except Exception as e:
context['status'] = 0
context['msg'] = e
logger.error('Save group query permission error {%s}.'%e)
elif operation_type == 'del_premission':
privilege_id = request.POST.get('privilege_id', None)
try:
# 删除对应权限数据
GroupQueryPrivileges.objects.filter(privilege_id=privilege_id).delete()
logger.info("Delete group query permission sucdess.")
except Exception as e:
context['status'] = 0
context['msg'] = e
logger.error('Group premission delete error,:%s' % e)
table_resource = {}
table_resource['permission_table_list'] = permission_table_list
table_resource['no_permission_table_list'] = no_permission_table_list
context['data'] = table_resource
return HttpResponse(json.dumps(context), content_type="application/x-www-form-urlencoded")
| 1.757813
| 2
|
graphene/types/tests/test_mutation.py
|
bcb/graphene
| 0
|
12784560
|
import pytest
from ..mutation import Mutation
from ..objecttype import ObjectType
from ..schema import Schema
from ..scalars import String
from ..dynamic import Dynamic
def test_generate_mutation_no_args():
class MyMutation(Mutation):
'''Documentation'''
@classmethod
def mutate(cls, *args, **kwargs):
pass
assert issubclass(MyMutation, ObjectType)
assert MyMutation._meta.name == "MyMutation"
assert MyMutation._meta.description == "Documentation"
assert MyMutation.Field().resolver == MyMutation.mutate
def test_generate_mutation_with_meta():
class MyMutation(Mutation):
class Meta:
name = 'MyOtherMutation'
description = 'Documentation'
@classmethod
def mutate(cls, *args, **kwargs):
pass
assert MyMutation._meta.name == "MyOtherMutation"
assert MyMutation._meta.description == "Documentation"
assert MyMutation.Field().resolver == MyMutation.mutate
def test_mutation_raises_exception_if_no_mutate():
with pytest.raises(AssertionError) as excinfo:
class MyMutation(Mutation):
pass
assert "All mutations must define a mutate method in it" == str(excinfo.value)
def test_mutation_execution():
class CreateUser(Mutation):
class Input:
name = String()
dynamic = Dynamic(lambda: String())
dynamic_none = Dynamic(lambda: None)
name = String()
dynamic = Dynamic(lambda: String())
def mutate(self, args, context, info):
name = args.get('name')
dynamic = args.get('dynamic')
return CreateUser(name=name, dynamic=dynamic)
class Query(ObjectType):
a = String()
class MyMutation(ObjectType):
create_user = CreateUser.Field()
schema = Schema(query=Query, mutation=MyMutation)
result = schema.execute(''' mutation mymutation {
createUser(name:"Peter", dynamic: "dynamic") {
name
dynamic
}
}
''')
assert not result.errors
assert result.data == {
'createUser': {
'name': 'Peter',
'dynamic': 'dynamic',
}
}
| 2.34375
| 2
|
planck_viewer.py
|
heyfaraday/CMB_test
| 0
|
12784561
|
import matplotlib.pyplot as plt
import numpy as np
import healpy as hp
map_I = hp.read_map('data/COM_CMB_IQU-smica_1024_R2.02_full.fits')
hp.mollview(map_I, norm='hist', min=-0.1, max=0.1, xsize=2000)
plt.show()
map_Q = hp.read_map('data/COM_CMB_IQU-smica_1024_R2.02_full.fits', field=1)
hp.mollview(map_Q, norm='hist', min=-0.01, max=0.01, xsize=2000)
plt.show()
map_U = hp.read_map('data/COM_CMB_IQU-smica_1024_R2.02_full.fits', field=2)
hp.mollview(map_U, norm='hist', min=-0.01, max=0.01, xsize=2000)
plt.show()
cl_I = hp.anafast(map_I, lmax=2048)
plt.show()
cl_Q = hp.anafast(map_Q, lmax=2048)
plt.show()
cl_U = hp.anafast(map_U, lmax=2048)
plt.show()
ell = np.arange(len(cl_I))
plt.figure(figsize=(5, 5))
plt.plot(ell, ell * (ell + 1) * cl_I)
plt.xlabel('ell')
plt.ylabel('ell(ell+1)cl_I')
plt.grid()
plt.show()
plt.figure(figsize=(5, 5))
plt.plot(ell, ell * (ell + 1) * cl_Q)
plt.xlabel('ell')
plt.ylabel('ell(ell+1)cl_Q')
plt.grid()
plt.show()
plt.figure(figsize=(5, 5))
plt.plot(ell, ell * (ell + 1) * cl_U)
plt.xlabel('ell')
plt.ylabel('ell(ell+1)cl_U')
plt.grid()
plt.show()
| 1.945313
| 2
|
src/std/rfc4566.py
|
ojimary/titus
| 108
|
12784562
|
<reponame>ojimary/titus<gh_stars>100-1000
# Copyright (c) 2007, <NAME>. All rights reserved. See LICENSING for details.
# @implements RFC4566 (SDP)
import socket, time
class attrs(object):
'''A generic class that allows uniformly accessing the attribute and items,
and returns None for invalid attribute instead of throwing an acception.'''
def __init__(self, **kwargs):
for n,v in kwargs.items(): self[n] = v
# attribute access: use container if not found
def __getattr__(self, name): return self.__getitem__(name)
# container access: use key in __dict__
def __getitem__(self, name): return self.__dict__.get(name, None)
def __setitem__(self, name, value): self.__dict__[name] = value
def __contains__(self, name): return name in self.__dict__
#def __repr__(self): return repr(self.__dict__)
# @implements RFC4566 P3L3-P3L21
class SDP(attrs):
'''A SDP packet with dynamic properties.
The header names can be accessed as attributes or items.
Accessing an unavailable header gives None instead of exception.
'''
# header names that can appear multiple times.
_multiple = 'tramb'
def __init__(self, value=None):
if value:
self._parse(value)
# @implements RFC4566 P11L1-P12L10
class originator(attrs):
'''Represents a o= line with attributes username (str), sessionid (long),
version (long), nettype (str), addrtype (str), address (str).'''
def __init__(self, value=None):
if value:
self.username, self.sessionid, self.version, self.nettype, self.addrtype, self.address = value.split(' ')
self.sessionid = int(self.sessionid)
self.version = int(self.version)
else:
hostname = socket.gethostname()
self.username, self.sessionid, self.version, self.nettype, self.addrtype, self.address = \
'-', int(time.time()), int(time.time()), 'IN', 'IP4', (hostname.find('.')>0 and hostname or socket.gethostbyname(hostname))
def __repr__(self):
return ' '.join(map(lambda x: str(x), [self.username, self.sessionid, self.version, self.nettype, self.addrtype, self.address]))
# @implements RFC4566 P14L7-P16L9
class connection(attrs):
'''Represents a c= line with attributes nettype (str), addrtype (str), address (str)
and optionally ttl (int) and count (int).'''
def __init__(self, value=None, **kwargs):
if value:
self.nettype, self.addrtype, rest = value.split(' ')
rest = rest.split('/')
if len(rest) == 1: self.address = rest[0]
elif len(rest) == 2: self.address, self.ttl = rest[0], int(rest[1])
else: self.address, self.ttl, self.count = rest[0], int(rest[1]), int(rest[2])
elif 'address' in kwargs:
self.address = kwargs.get('address')
self.nettype = kwargs.get('nettype', 'IN')
self.addrtype = kwargs.get('addrtype', 'IP4')
if 'ttl' in kwargs: self.ttl = int(kwargs.get('ttl'))
if 'count' in kwargs: self.count = int(kwargs.get('count'))
def __repr__(self):
return self.nettype + ' ' + self.addrtype + ' ' + self.address + ('/' + str(self.ttl) if self.ttl else '') + ('/' + str(self.count) if self.count else '')
# @implements RFC4566 P22L17-P24L33
class media(attrs):
'''Represents a m= line and all subsequent lines until next m= or end.
It has attributes such as media (str), port (int), proto (str), fmt (list).'''
def __init__(self, value=None, **kwargs):
if value:
self.media, self.port, self.proto, rest = value.split(' ', 3)
self.port = int(self.port)
self.fmt = []
for f in rest.split(' '):
a = attrs()
try: a.pt = int(f) # if payload type is numeric
except: a.pt = f
self.fmt.append(a)
elif 'media' in kwargs:
self.media = kwargs.get('media')
self.port = int(kwargs.get('port', 0))
self.proto = kwargs.get('proto', 'RTP/AVP')
self.fmt = kwargs.get('fmt', [])
def __repr__(self):
result = self.media + ' ' + str(self.port) + ' ' + self.proto + ' ' + ' '.join(map(lambda x: str(x.pt), self.fmt))
for k in filter(lambda x: x in self, 'icbka'): # order is important
if k not in SDP._multiple: # single header
result += '\r\n' + k + '=' + str(self[k])
else:
for v in self[k]:
result += '\r\n' + k + '=' + str(v)
for f in self.fmt:
if f.name:
result += '\r\n' + 'a=rtpmap:' + str(f.pt) + ' ' + f.name + '/' + str(f.rate) + (f.params and ('/'+f.params) or '')
return result
def dup(self): # use this method instead of SDP.media(str(m)) to duplicate m. Otherwise, fmt will be incomplete
result = SDP.media(media=self.media, port=self.port, proto=self.proto, fmt=map(lambda f: attrs(pt=f.pt, name=f.name, rate=f.rate, params=f.params), self.fmt))
for k in filter(lambda x: x in self, 'icbka'):
result[k] = self[k][:] if isinstance(self[k], list) else self[k]
return result
# @implements RFC4566 P8L17-P10L5
def _parse(self, text):
g = True # whether we are in global line or per media line?
for line in text.replace('\r\n', '\n').split('\n'):
k, sep, v = line.partition('=')
if k == 'o': v = SDP.originator(v)
elif k == 'c': v = SDP.connection(v)
elif k == 'm': v = SDP.media(v)
if k == 'm': # new m= line
if not self['m']:
self['m'] = []
self['m'].append(v)
obj = self['m'][-1]
elif self['m']: # not in global
obj = self['m'][-1]
# @implements RFC4566 P25L41-P27L7
if k == 'a' and v.startswith('rtpmap:'):
pt, rest = v[7:].split(' ', 1)
name, sep, rest = rest.partition('/')
rate, sep, params = rest.partition('/')
for f in filter(lambda x: str(x.pt) == str(pt), obj.fmt):
f.name = name; f.rate = int(rate); f.params = params or None
else:
obj[k] = (k in SDP._multiple and ((k in obj) and (obj[k]+[v]) or [v])) or v
else: # global
obj = self
obj[k] = ((k in SDP._multiple) and ((k in obj) and (obj[k]+[v]) or [v])) or v
def __repr__(self):
result = ''
for k in filter(lambda x: x in self, 'vosiuepcbtam'): # order is important
if k not in SDP._multiple: # single header
result += k + '=' + str(self[k]) + '\r\n'
else:
for v in self[k]:
result += k + '=' + str(v) + '\r\n'
return result
#--------------------------- Testing --------------------------------------
# @implements RFC4566 P10L7-P10L21
def testSDP():
s = '''v=0\r
o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5\r
s=SDP Seminar\r
i=A Seminar on the session description protocol\r
u=http://www.example.com/seminars/sdp.pdf\r
e=<EMAIL> (<NAME>)\r
c=IN IP4 172.16.31.10/127\r
t=2873397496 2873404696\r
a=recvonly\r
m=audio 49170 RTP/AVP 0\r
m=video 51372 RTP/AVP 99\r
a=rtpmap:99 h263-1998/90000\r
'''
sdp = SDP(s)
assert str(sdp) == s
if __name__ == '__main__':
import doctest
doctest.testmod()
testSDP()
| 2.4375
| 2
|
python/visu_res.py
|
RTOS-Team-2/team2
| 1
|
12784563
|
import os
import cv2
import random
import logging
from tkinter import Tk
from car import Car, CarSpecs
from HTCSPythonUtil import config
if os.name == "nt":
# https://github.com/opencv/opencv/issues/11360
import ctypes
# Set DPI Awareness (Windows 10 and 8)
_ = ctypes.windll.shcore.SetProcessDpiAwareness(2)
# the argument is the awareness level, which can be 0, 1 or 2:
# for 1-to-1 pixel control I seem to need it to be non-zero (I'm using level 2)
logger = logging.getLogger(__name__)
tk = Tk()
window_width = tk.winfo_screenwidth()
black_region_height = 100
# image resources
WINDOW_NAME = "Highway Traffic Control System Visualization"
im_bigmap = cv2.imread(os.path.dirname(os.path.abspath(__file__)) + "/res/map.png")
im_minimap = cv2.imread(os.path.dirname(os.path.abspath(__file__)) + "/res/minimap.png")
red_car_straight = cv2.imread(os.path.dirname(os.path.abspath(__file__)) + "/res/car1.png")
red_car_left = cv2.imread(os.path.dirname(os.path.abspath(__file__)) + "/res/car1left.png")
red_car_right = cv2.imread(os.path.dirname(os.path.abspath(__file__)) + "/res/car1right.png")
blue_car_straight = cv2.imread(os.path.dirname(os.path.abspath(__file__)) + "/res/car2.png")
blue_car_left = cv2.imread(os.path.dirname(os.path.abspath(__file__)) + "/res/car2left.png")
blue_car_right = cv2.imread(os.path.dirname(os.path.abspath(__file__)) + "/res/car2right.png")
truck = cv2.imread(os.path.dirname(os.path.abspath(__file__)) + "/res/truck.png")
explosion = cv2.imread(os.path.dirname(os.path.abspath(__file__)) + "/res/explosion.png")
title = cv2.imread(os.path.dirname(os.path.abspath(__file__)) + "/res/title.png")
try:
_ = [im_bigmap.shape[0], im_minimap.shape[0], red_car_straight.shape[0], red_car_left.shape[0],
red_car_right.shape[0], blue_car_straight.shape[0], blue_car_left.shape[0], blue_car_right.shape[0],
truck.shape[0], explosion.shape[0], title.shape[0]]
except AttributeError:
logger.critical("Some image resources were not found.")
# to fit screen
im_minimap = cv2.resize(im_minimap, (window_width, im_minimap.shape[0]))
title = cv2.resize(title, (window_width, black_region_height))
logger.info(f"Window width will be set to {window_width} pixels.")
# measure
minimap_length_pixel = im_minimap.shape[1]
minimap_height_pixel = im_minimap.shape[0]
bigmap_length_pixel = im_bigmap.shape[1]
# fix parameters
region_width_meter_start = 200
map_height_meter = 16
map_length_meter = config["position_bound"]
center_fast_lane_mini = 32
center_slow_lane_mini = 80
center_merge_lane_mini = 130
detail_height = int(window_width * map_height_meter / region_width_meter_start)
y_stretch = detail_height / im_bigmap.shape[0]
center_fast_lane = 42.5 * y_stretch
center_slow_lane = 103.5 * y_stretch
center_merge_lane = 164 * y_stretch
car_height = int((center_slow_lane - center_fast_lane) * 0.8)
x_scale_minimap = minimap_length_pixel / map_length_meter
x_scale_bigmap = bigmap_length_pixel / map_length_meter
class CarImage(Car):
def __init__(self, car_id, specs: CarSpecs, state):
# Create Car
super().__init__(car_id, specs, state)
if specs.size > 7.5:
self.straight = truck
self.left = truck
self.right = truck
self.color = (11, 195, 255)
self.text_color = self.color
# Red or Blue
elif bool(random.getrandbits(1)):
self.straight = red_car_straight
self.left = red_car_left
self.right = red_car_right
self.color = (0, 0, 255) # BGR
self.text_color = self.color
else:
self.straight = blue_car_straight
self.left = blue_car_left
self.right = blue_car_right
self.color = (255, 0, 0) # BGR
self.text_color = (253, 177, 0) # BGR
# At least we set the height, but width will be dependent on the region's width in meter
self.straight = cv2.resize(self.straight, (self.straight.shape[0], car_height))
self.left = cv2.resize(self.left, (self.left.shape[0], car_height))
self.right = cv2.resize(self.right, (self.right.shape[0], car_height))
self.exploded = False
def __str__(self):
return super().__str__()
def __repr__(self):
return super().__repr__()
def get_point_on_minimap(self):
cy = 0
if self.lane == 0:
cy = center_merge_lane_mini
elif self.lane == 1:
cy = int((center_merge_lane_mini + center_slow_lane_mini) / 2)
elif self.lane == 2:
cy = center_slow_lane_mini
elif self.lane in [3, 4]:
cy = int((center_slow_lane_mini + center_fast_lane_mini) / 2)
elif self.lane == 5:
cy = center_fast_lane_mini
cx = int(self.distance_taken * x_scale_minimap)
return cx, cy
def is_in_region(self, region_offset, region_width):
return self.distance_taken > region_offset and \
self.distance_taken - self.specs.size < region_offset + region_width
def get_y_slice(self):
start = 0
if self.lane == 0:
start = int(center_merge_lane - self.straight.shape[0] / 2)
elif self.lane == 1:
start = int((center_merge_lane + center_slow_lane) / 2 - self.straight.shape[0] / 2)
elif self.lane == 2:
start = int(center_slow_lane - self.straight.shape[0] / 2)
elif self.lane in [3, 4]:
start = int((center_slow_lane + center_fast_lane) / 2 - self.straight.shape[0] / 2)
elif self.lane == 5:
start = int(center_fast_lane - self.straight.shape[0] / 2)
return slice(start, start + self.straight.shape[0])
def width_pixel(self, region_width_meter):
return int(self.specs.size / region_width_meter * window_width)
def get_x_slice_and_image(self, offset_region, width_region):
w_px_car = self.width_pixel(width_region)
on_vis_slice_x_end = int((self.distance_taken - offset_region) / width_region * window_width)
on_vis_slice_x_start = on_vis_slice_x_end - w_px_car
on_car_slice_x_start = 0
on_car_slice_x_end = w_px_car
if on_vis_slice_x_end > window_width:
on_car_slice_x_end -= on_vis_slice_x_end - window_width
on_vis_slice_x_end = window_width
elif on_vis_slice_x_start < 0:
on_car_slice_x_start -= on_vis_slice_x_start
on_vis_slice_x_start = 0
car_x_slice = slice(on_car_slice_x_start, on_car_slice_x_end)
return slice(on_vis_slice_x_start, on_vis_slice_x_end), self.get_image(w_px_car, car_x_slice)
def get_image(self, car_width_pixel, x_slice):
if self.distance_taken > map_length_meter - 30 or self.exploded:
im = explosion
elif self.lane in [1, 3]:
im = self.left
elif self.lane == 4:
im = self.right
else:
im = self.straight
return cv2.resize(im, (car_width_pixel, car_height))[:, x_slice, :]
| 2.546875
| 3
|
gmail/envio_email.py
|
guilalves/Automacao-Python
| 0
|
12784564
|
import emoji
from config import config_email
def enviar_email():
envio_email = emoji.emojize('E-mail enviado com sucesso! :wink:', use_aliases=True)
try:
config_email()
except Exception as e:
print(f'Erro ao chamar a funcao de disparo de emails! {e}')
else:
print(f'{envio_email}')
enviar_email()
| 2.921875
| 3
|
Labs/12_snake.py
|
bgoldstone/Computer_Science_I
| 0
|
12784565
|
<reponame>bgoldstone/Computer_Science_I
#################################
# 12_snake.py
#
# Name: <NAME>
# Date: 11/16/2001
#
# The classic arcade game "Snake"
#
#################################
import pygame
from random import randint
pygame.init()
# Constants:
WIDTH = 900
HEIGHT = 600
CENTER = (WIDTH//2, HEIGHT//2)
INIT_TICK = 5 # Clock ticks per second (5 is slow)
SEG_SIZE = 17 # Size of a (square) snake segment
SEG_MARGIN = 3 # Blank space in between segments
STEP_SIZE = SEG_SIZE + SEG_MARGIN # Spacing of segments
INIT_SNAKE_LEN = 3 # Number of segments in a baby snake
WIN_SNAKE_LEN = 25 # What does it take to win?
# Some basic color names
BLACK = (0,0,0)
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
YELLOW = (255,255,0)
MAGENTA = (255,0,255)
CYAN = (0,255,255)
DARKCYAN = (0, 107, 98)
WHITE = (255,255,255)
MAROON = (128,0,0)
ORANGE = (255,123,0)
BROWN = (181, 140, 83)
PURPLE =(31, 0, 74)
# Background fill colors for the various screens
TITLE_BG = (110,255,100)
REPLAY_BG = (0,0,127)
GAME_BG = BLACK
END_BG = DARKCYAN
screen = pygame.display.set_mode( (WIDTH, HEIGHT) )
pygame.display.set_caption("<NAME>") #### Don't forget this!
#####################################################################################################
# A snake is made up of a series of Segment sprites
class Segment(pygame.sprite.Sprite):
def __init__(self, location):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((SEG_SIZE, SEG_SIZE))
self.image.fill(RED)
self.rect = self.image.get_rect()
self.rect.center = location
#####################################################################################################
# An Apple sprite is a target that the snake wants to eat
class Apple(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((SEG_SIZE, SEG_SIZE)).convert()
self.image.fill(BLACK)
halfBox = SEG_SIZE//2
self.apple = pygame.draw.circle(self.image, GREEN, (halfBox, halfBox), halfBox, 0)
self.rect = self.image.get_rect()
self.image.set_colorkey(self.image.get_at((1, 1)))
self.rect.center = (( randint(0,screen.get_width()), randint(0,screen.get_height()) ))
def reposition(self, segGroup):
self.rect.centerx = randint(3, (screen.get_width()//STEP_SIZE) - 3) * STEP_SIZE
self.rect.centery = randint(3, (screen.get_height()//STEP_SIZE) - 3) * STEP_SIZE
while( pygame.sprite.spritecollide(self, segGroup, False) ) :
self.rect.centerx = randint(3, (screen.get_width()//STEP_SIZE) - 3) * STEP_SIZE
self.rect.centery = randint(3, (screen.get_height()//STEP_SIZE) - 3) * STEP_SIZE
#####################################################################################################
# Label sprites are used for the scoreboard, the title screen, etc.
# Creating a Label sprite requires 5 parameters:
# msg - a string
# center - an (x,y) pair of the center point of the Label object
# fontFile - name of a .ttf font file in the current folder (or "None")
# textSize - height of the text, in pixels
# textColor - an (r,g,b) triple of the color of the text
class Label(pygame.sprite.Sprite):
def __init__(self, msg, center, fontFile, textSize, textColor):
pygame.sprite.Sprite.__init__(self)
self.font = pygame.font.Font(fontFile, textSize)
self.text = msg
self.center = center
self.txtColor = textColor
def update(self):
self.image = self.font.render(self.text, 1, self.txtColor)
self.rect = self.image.get_rect() # get a new rect after any text change
self.rect.center = self.center
#####################################################################################################
# TitleScreen puts up an inital welcome screen and waits for the user to click the mouse
def titleScreen():
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill( TITLE_BG ) # Fill the background
screen.blit(background, (0,0)) # Blit background to screen
#### Fill in here to construct labels for a title and game instructions.
#### Use multiple Label sprites to do this.
#### Add your Label sprites to labelGroup.
title = Label("SNAKE GAME", (screen.get_width()//2,125),"12_fonts/DejaVuSans-Bold.ttf", 56, PURPLE)
instructions = Label("Goal: get snake to length of " + str(WIN_SNAKE_LEN) + " boxes long by eating green apples", (screen.get_width()//2,200),"fonts/DejaVuSans.ttf", 20, PURPLE)
instructions2 = Label("Keyboard: hit \"q\" or \"esc\" to quit", (screen.get_width()//2,225),"12_fonts/DejaVuSans.ttf", 20, PURPLE)
instructions3 = Label("Use \"up/down/left/right\" arrow keys to move", (screen.get_width()//2,250),"12_fonts/DejaVuSans.ttf", 20, PURPLE)
instructions4 = Label("If snake touches tail game over!", (screen.get_width()//2,275),"12_fonts/DejaVuSans.ttf", 20, PURPLE)
instructions5 = Label("press Left Mouse Button to Start!", (screen.get_width()//2,300),"12_fonts/DejaVuSans.ttf", 20, PURPLE)
labelGroup = pygame.sprite.Group([title,instructions,instructions2,instructions3,instructions4,instructions5])
clock = pygame.time.Clock()
keepGoing = True
while keepGoing:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT or event.type == pygame.MOUSEBUTTONDOWN:
keepGoing = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE or event.key == pygame.K_q:
keepGoing = False
labelGroup.clear(screen, background)
labelGroup.update()
labelGroup.draw(screen)
pygame.display.flip()
#####################################################################################################
# The game() function performs the actual gameplay. Returns a boolean
def game():
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill( GAME_BG )
#image = pygame.image.load("external-content.duckduckgo.com.jpg")
# Fill the background
screen.blit(background, (0,0)) # Blit background to screen
# Create sprites and sprite groups
scoreboard = Label("Snake Length = " + str(INIT_SNAKE_LEN),
(screen.get_width()//2, 50), None, 30, WHITE)
# A snake is a group of Segment sprites, evenly spaced,
# based on a grid of STEP_SIZE pixels per grid position.
# The snake's head is the sprite at position [0] in the list,
# and the tail is the last sprite in the list.
# The first segment is placed at grid position (5,5), and each
# subsequent segment is placed one grid position farther to the right.
snakeSegs = []
for i in range(INIT_SNAKE_LEN) :
seg = Segment( (STEP_SIZE*(5+i), (STEP_SIZE*5)) )
snakeSegs.insert(0, seg) # insert each new segment to the beginning of the list.
snakeGroup = pygame.sprite.Group(snakeSegs)
# Once the snake has been made, create an Apple sprite, and choose a random position
# that does not collide with the snake
apple = Apple()
apple.reposition(snakeGroup)
otherSprites = pygame.sprite.Group([scoreboard,apple])
# Set initial snake movement
dx = STEP_SIZE
dy = 0
clock = pygame.time.Clock()
# Initial clock speed is pretty slow, but could be increased as game progresses (higher levels?)
clockSpeed = INIT_TICK
keepGoing = True
paused = False
win = False
# The game loop:
while (keepGoing) :
clock.tick(clockSpeed) # Slow tick speed used (snake moves one segment per clock tick)
# The event loop:
for event in pygame.event.get() :
if event.type == pygame.QUIT :
keepGoing = False
elif event.type == pygame.KEYDOWN :
if event.key == pygame.K_ESCAPE or event.key == pygame.K_q :
keepGoing = False
elif event.key == pygame.K_p : # Pause
paused = not paused
# Arrow keys dictate where the next snake segment will appear on next clock tick
elif event.key == pygame.K_LEFT :
dx = -STEP_SIZE
dy = 0
elif event.key == pygame.K_RIGHT :
dx = STEP_SIZE
dy = 0
elif event.key == pygame.K_UP :
dx = 0
dy = -STEP_SIZE
elif event.key == pygame.K_DOWN :
dx = 0
dy = STEP_SIZE
if not paused :
# Make the snake "move" by adding a new first segment and deleting the last segment
head = Segment( ((snakeSegs[0].rect.centerx + dx), (snakeSegs[0].rect.centery + dy)) )
# Check to see if we have lost:
if pygame.sprite.spritecollide(head,snakeGroup, False):
keepGoing = False
else :
# It's not colliding, so insert the new head segment at the front of the snake (position [0]).
snakeSegs.insert(0, head) # snakeSegs is a Python list
snakeGroup.add(head) # snakeGroup is a Pygame group
if head.rect.centerx >= screen.get_width() or head.rect.centerx <= 0 or head.rect.centery >= screen.get_height() or head.rect.centery <= 0:
keepGoing = False
if (pygame.sprite.spritecollide(apple, snakeGroup, False)) : # Ate an apple!
apple.reposition(snakeGroup) # Move apple and let snake keep its tail
scoreboard.text = "Snake Length = " + str(len(snakeSegs)) # Snake is one seg longer
else :
tail = snakeSegs.pop() # Regular move; remove the tail segment
snakeGroup.remove(tail)
if len(snakeSegs) >= WIN_SNAKE_LEN : # Did we reach the goal?
keepGoing = False
win = True
snakeGroup.clear(screen,background)
otherSprites.clear(screen,background)
snakeGroup.update()
otherSprites.update()
snakeGroup.draw(screen)
otherSprites.draw(screen)
pygame.display.flip()
return win
#####################################################################################################
# playAgain asks the obvious question. Returns a boolean.
def playAgain(winLose):
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill( REPLAY_BG ) # Fill the background
screen.blit(background, (0,0)) # Blit background to screen
#### Add code here to construct Label sprites that:
#### Display a message about whether the player won or lost
#### Ask the player if they want to play again
#### Then add your Label sprites to labelGroup
if winLose:
won = Label("You WIN!",(screen.get_width()//2,screen.get_height()//2),"fonts/DejaVuSans-Bold.ttf",24,GREEN)
won2 = Label("Do you want to play again (y/n)",(screen.get_width()//2,screen.get_height()//2 + 50),"fonts/DejaVuSans-Bold.ttf",24,ORANGE)
label = [won,won2]
else:
lost = Label("You Lost :( you were "+ str(WIN_SNAKE_LEN-INIT_SNAKE_LEN) + " Apples away from winning!", (screen.get_width() // 2, screen.get_height() // 2),"fonts/DejaVuSans-Bold.ttf", 24, RED)
lost2 = Label("Do you want to play again (y/n)",(screen.get_width() // 2, screen.get_height() // 2 + 50),"fonts/DejaVuSans-Bold.ttf", 24, ORANGE)
label =[lost,lost2]
labelGroup = pygame.sprite.Group(label)
clock = pygame.time.Clock()
keepGoing = True
replay = False
while keepGoing:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
keepGoing = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q or event.key == pygame.K_ESCAPE:
keepGoing = False
elif event.key == pygame.K_y:
replay = True
keepGoing = False
elif event.key == pygame.K_n:
keepGoing = False
if pygame.mouse.get_pressed()[0]:
keepGoing = False
labelGroup.clear(screen, background)
labelGroup.update()
labelGroup.draw(screen)
pygame.display.flip()
return replay
#####################################################################################################
# endScreen puts up a final thankyou or credits screen for a short time, and then closes.
def endScreen():
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill( END_BG ) # Fill the background
screen.blit(background, (0,0)) # Blit background to screen
#### Add code here:
#### Construct Label sprites to display two messages and add them to the labelGroup:
#### 1: a "Good bye" or "Thanks for playing" message
#### 2: your name
#### (Use at least two label sprites for the two messages on this screen)
thanks = Label("Thanks For Playing!", (screen.get_width()//2,screen.get_height()//2), "fonts/DejaVuSans-Bold.ttf", 24, BROWN)
creator = Label("A game brought to you by <NAME>!", (screen.get_width()//2, screen.get_height()//2 + 50),"fonts/DejaVuSans-Bold.ttf", 24, BROWN)
labelGroup = pygame.sprite.Group(thanks,creator)
clock = pygame.time.Clock()
keepGoing = True
frames = 0
while keepGoing:
clock.tick(30) # Frame rate 30 frames per second.
frames = frames + 1 # Count the number of frames displayed
if frames == 60: # After 2 seconds (= 60 frames) end the message display
keepGoing = False
for event in pygame.event.get():
# Impatient people can quit earlier by clicking the mouse or pressing any key
if ( event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.KEYDOWN ):
keepGoing = False
if event.type == pygame.QUIT:
keepGoing = False
labelGroup.clear(screen, background)
labelGroup.update()
labelGroup.draw(screen)
pygame.display.flip()
#####################################################################################################
# main coordinates everything
def main():
titleScreen()
replay = True
while(replay):
outcome = game()
replay = playAgain(outcome)
endScreen()
# Kick it off!
main()
# Clean it up
pygame.quit()
| 3.71875
| 4
|
omnirob/omnibot/nengo/omnibot_network_test.py
|
caxenie/neuromorphic-sensorimotor-adaptation
| 0
|
12784566
|
import show_omnibot
import numpy as np
import nengo
model = nengo.Network()
with model:
bot = show_omnibot.OmniBotNetwork(
show_omnibot.connection.Serial('/dev/ttyUSB2', baud=2000000),
motor=True, arm=True, retina=False, # freqs=[100, 200, 300],
wheel=True, servo=True, load=True, msg_period=0.1)
motor = nengo.Node([0, 0, 0])
arm = nengo.Node([0]*5)
nengo.Connection(motor, bot.motor)
nengo.Connection(arm, bot.arm)
| 2.578125
| 3
|
src/my_project/easy_problems/from101to150/to_lower_case.py
|
ivan1016017/LeetCodeAlgorithmProblems
| 0
|
12784567
|
from typing import List
class Solution:
def toLowerCase(self, s: str) -> str:
answer: str = ""
x = ""
for letter in s:
if ord(letter) <= 90 and ord(letter) >= 65:
x = chr(ord(letter) + 32)
answer += x
else:
answer += letter
return answer
solution = Solution()
print(solution.toLowerCase(s = "Hello"))
print(solution.toLowerCase(s = "here"))
print(solution.toLowerCase(s = "LOVELY"))
| 3.5625
| 4
|
alipay/aop/api/response/AlipayFundTransAacollectBatchQueryResponse.py
|
snowxmas/alipay-sdk-python-all
| 213
|
12784568
|
<filename>alipay/aop/api/response/AlipayFundTransAacollectBatchQueryResponse.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.BatchDetailInfo import BatchDetailInfo
from alipay.aop.api.domain.BatchDetailInfo import BatchDetailInfo
from alipay.aop.api.domain.BatchDetailInfo import BatchDetailInfo
class AlipayFundTransAacollectBatchQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayFundTransAacollectBatchQueryResponse, self).__init__()
self._batch_memo = None
self._batch_no = None
self._batch_status = None
self._biz_type = None
self._create_date = None
self._creater_user_id = None
self._detail_list = None
self._ext_param = None
self._paid_detail_list = None
self._pay_amount_single = None
self._pay_amount_total = None
self._pay_amount_total_for_receiver = None
self._real_items_total = None
self._show_items_total = None
self._success_amount_total = None
self._time_out_value = None
self._unpaid_detail_list = None
@property
def batch_memo(self):
return self._batch_memo
@batch_memo.setter
def batch_memo(self, value):
self._batch_memo = value
@property
def batch_no(self):
return self._batch_no
@batch_no.setter
def batch_no(self, value):
self._batch_no = value
@property
def batch_status(self):
return self._batch_status
@batch_status.setter
def batch_status(self, value):
self._batch_status = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def create_date(self):
return self._create_date
@create_date.setter
def create_date(self, value):
self._create_date = value
@property
def creater_user_id(self):
return self._creater_user_id
@creater_user_id.setter
def creater_user_id(self, value):
self._creater_user_id = value
@property
def detail_list(self):
return self._detail_list
@detail_list.setter
def detail_list(self, value):
if isinstance(value, list):
self._detail_list = list()
for i in value:
if isinstance(i, BatchDetailInfo):
self._detail_list.append(i)
else:
self._detail_list.append(BatchDetailInfo.from_alipay_dict(i))
@property
def ext_param(self):
return self._ext_param
@ext_param.setter
def ext_param(self, value):
self._ext_param = value
@property
def paid_detail_list(self):
return self._paid_detail_list
@paid_detail_list.setter
def paid_detail_list(self, value):
if isinstance(value, list):
self._paid_detail_list = list()
for i in value:
if isinstance(i, BatchDetailInfo):
self._paid_detail_list.append(i)
else:
self._paid_detail_list.append(BatchDetailInfo.from_alipay_dict(i))
@property
def pay_amount_single(self):
return self._pay_amount_single
@pay_amount_single.setter
def pay_amount_single(self, value):
self._pay_amount_single = value
@property
def pay_amount_total(self):
return self._pay_amount_total
@pay_amount_total.setter
def pay_amount_total(self, value):
self._pay_amount_total = value
@property
def pay_amount_total_for_receiver(self):
return self._pay_amount_total_for_receiver
@pay_amount_total_for_receiver.setter
def pay_amount_total_for_receiver(self, value):
self._pay_amount_total_for_receiver = value
@property
def real_items_total(self):
return self._real_items_total
@real_items_total.setter
def real_items_total(self, value):
self._real_items_total = value
@property
def show_items_total(self):
return self._show_items_total
@show_items_total.setter
def show_items_total(self, value):
self._show_items_total = value
@property
def success_amount_total(self):
return self._success_amount_total
@success_amount_total.setter
def success_amount_total(self, value):
self._success_amount_total = value
@property
def time_out_value(self):
return self._time_out_value
@time_out_value.setter
def time_out_value(self, value):
self._time_out_value = value
@property
def unpaid_detail_list(self):
return self._unpaid_detail_list
@unpaid_detail_list.setter
def unpaid_detail_list(self, value):
if isinstance(value, list):
self._unpaid_detail_list = list()
for i in value:
if isinstance(i, BatchDetailInfo):
self._unpaid_detail_list.append(i)
else:
self._unpaid_detail_list.append(BatchDetailInfo.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayFundTransAacollectBatchQueryResponse, self).parse_response_content(response_content)
if 'batch_memo' in response:
self.batch_memo = response['batch_memo']
if 'batch_no' in response:
self.batch_no = response['batch_no']
if 'batch_status' in response:
self.batch_status = response['batch_status']
if 'biz_type' in response:
self.biz_type = response['biz_type']
if 'create_date' in response:
self.create_date = response['create_date']
if 'creater_user_id' in response:
self.creater_user_id = response['creater_user_id']
if 'detail_list' in response:
self.detail_list = response['detail_list']
if 'ext_param' in response:
self.ext_param = response['ext_param']
if 'paid_detail_list' in response:
self.paid_detail_list = response['paid_detail_list']
if 'pay_amount_single' in response:
self.pay_amount_single = response['pay_amount_single']
if 'pay_amount_total' in response:
self.pay_amount_total = response['pay_amount_total']
if 'pay_amount_total_for_receiver' in response:
self.pay_amount_total_for_receiver = response['pay_amount_total_for_receiver']
if 'real_items_total' in response:
self.real_items_total = response['real_items_total']
if 'show_items_total' in response:
self.show_items_total = response['show_items_total']
if 'success_amount_total' in response:
self.success_amount_total = response['success_amount_total']
if 'time_out_value' in response:
self.time_out_value = response['time_out_value']
if 'unpaid_detail_list' in response:
self.unpaid_detail_list = response['unpaid_detail_list']
| 1.789063
| 2
|
pytorch/train.py
|
renyiryry/natural-gradients
| 1
|
12784569
|
<filename>pytorch/train.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import numpy as np
import input_data
from sklearn.utils import shuffle
import warnings
warnings.filterwarnings('error')
from utils import *
import argparse
import sys
import time
import copy
np.random.seed(9999)
torch.manual_seed(9999)
class Model_3(nn.Module):
def __init__(self, activation, name_dataset):
super(Model_3, self).__init__()
# self.layersizes = [784, 200, 100, 10]
self.activation = activation
# self.layersizes = [784, 400, 400, 10]
if name_dataset == 'MNIST':
self.layersizes = [784, 500, 10]
elif name_dataset == 'CIFAR':
self.layersizes = [3072, 400, 400, 10]
elif name_dataset == 'webspam':
self.layersizes = [254, 400, 400, 2]
elif name_dataset == 'MNIST-autoencoder':
# reference: https://arxiv.org/pdf/1301.3641.pdf
self.layersizes = [784, 1000, 500, 250, 30, 250, 500, 1000, 784]
else:
print('Dateset not supported!')
sys.exit()
self.numlayers = len(self.layersizes) - 1
self.fc = list(range(self.numlayers))
for l in range(self.numlayers):
self.fc[l] = nn.Linear(self.layersizes[l], self.layersizes[l+1], bias=False)
self.fc = tuple(self.fc)
self.W = list(range(self.numlayers))
for l in range(self.numlayers):
self.W[l] = self.fc[l].weight
# self.W[1] = self.fc[1].weight
# self.W[2] = self.fc[2].weight
self.W = tuple(self.W)
def forward(self, x):
# a = (self.numlayers - 1) * [0]
# h = (self.numlayers - 1) * [0]
# for l in range(self.numlayers - 1):
# if l == 0:
# a[l] = self.fc[l](x)
# else:
# a[l] = self.fc[l](h[l-1])
# h[l] = F.relu(a[l])
# a = []
# h = []
# for l in range(self.numlayers - 1):
# if l == 0:
# a.append(self.fc[l](x))
# else:
# a.append(self.fc[l](h[l-1]))
# h.append(F.relu(a[l]))
a = list(range(self.numlayers - 1))
h = list(range(self.numlayers - 1))
for l in range(self.numlayers - 1):
if l == 0:
a[l] = self.fc[l](x)
else:
a[l] = self.fc[l](h[l-1])
if self.activation == 'relu':
h[l] = F.relu(a[l])
elif self.activation == 'sigmoid':
h[l] = torch.sigmoid(a[l])
z = self.fc[-1](h[-1])
# loss = F.cross_entropy(z, t, reduction = 'none')
# weighted_loss = torch.dot(loss, v)
z.retain_grad()
# for c in cache:
# c.retain_grad()
for c in a:
c.retain_grad()
for c in h:
c.retain_grad()
h = [x] + h
a = a + [z]
return z, a, h
params = {}
parser = argparse.ArgumentParser()
parser.add_argument('--algorithm', type=str)
parser.add_argument('--matrix_name', type=str)
parser.add_argument('--momentum_gradient', type=int)
parser.add_argument('--max_epoch', type=float)
parser.add_argument('--record_epoch', type=float)
parser.add_argument('--N1', type=int)
parser.add_argument('--N2', type=int)
parser.add_argument('--alpha', type=float)
parser.add_argument('--lambda_', type=float)
parser.add_argument('--tau', type=float)
parser.add_argument('--inverse_update_freq', type=int)
parser.add_argument('--inverse_update_freq_D_t', type=int)
parser.add_argument('--rho_kfac', type=float)
parser.add_argument('--activation', type=str)
parser.add_argument('--dataset', type=str)
args = parser.parse_args()
# print args.accumulate(args.algorithm)
algorithm = args.algorithm
matrix_name = args.matrix_name
params['algorithm'] = algorithm
max_epoch = args.max_epoch
record_epoch = args.record_epoch
inverse_update_freq = args.inverse_update_freq
inverse_update_freq_D_t = args.inverse_update_freq_D_t
rho_kfac = args.rho_kfac
activation = args.activation
name_dataset = args.dataset
if_momentum_gradient = args.momentum_gradient
if if_momentum_gradient != 0 and if_momentum_gradient != 1:
print('if_momentum_gradient')
print(if_momentum_gradient)
print('Error!')
sys.exit()
if_momentum_gradient = bool(if_momentum_gradient)
dataset = input_data.read_data_sets(name_dataset, one_hot=False)
X_test = dataset.test.images
t_test = dataset.test.labels
X_train = dataset.train.images
t_train = dataset.train.labels
X_train, t_train = torch.from_numpy(X_train), torch.from_numpy(t_train).long()
print('X_train.shape')
print(X_train.shape)
print('t_train.shape')
print(t_train.shape)
print('np.min(t_test)')
print(np.min(t_test))
print('np.max(t_test)')
print(np.max(t_test))
# Model
model = Model_3(activation, name_dataset)
print('Model created.')
# print('model.W[1] when initialize: ', model.W[1])
params['layersizes'] = model.layersizes
if algorithm == 'kfac' or algorithm == 'SMW-Fisher' or algorithm == 'SMW-Fisher-momentum' or algorithm == 'SMW-GN'\
or algorithm == 'Fisher-block' or algorithm == 'SMW-Fisher-D_t-momentum' or algorithm == 'SGD' \
or algorithm == 'SMW-Fisher-momentum-D_t-momentum':
init_lambda_ = args.lambda_
params['lambda_'] = init_lambda_
params['tau'] = args.tau
boost = 1.01
drop = 1 / 1.01
params['boost'] = boost
params['drop'] = drop
# elif algorithm == 'SGD':
# init_lambda_ = args.lambda_
# params['lambda_'] = init_lambda_
else:
print('Error: algorithm not defined.')
sys.exit()
# N1 = 128 # mini-batch size (for gradient)
N1 = args.N1
N2 = args.N2
if N2 > N1:
print('Error! 1432')
sys.exit()
alpha = args.alpha
# eps = 1e-2
params['N1'] = N1
params['N2'] = N2
params['inverse_update_freq'] = inverse_update_freq
params['inverse_update_freq_D_t'] = inverse_update_freq_D_t
params['rho_kfac'] = rho_kfac
params['alpha'] = alpha
params['numlayers'] = model.numlayers
data_ = {}
data_['model'] = model
if params['algorithm'] == 'kfac' or algorithm == 'Fisher-block':
A = [] # KFAC A
G = [] # KFAC G
for Wi in model.W:
A.append(torch.zeros(Wi.size(1)))
G.append(torch.zeros(Wi.size(0)))
# print('A[0].size(): ', A[0].size())
A_inv, G_inv = 3*[0], 3*[0]
data_['A'] = A
data_['G'] = G
data_['A_inv'] = A_inv
data_['G_inv'] = G_inv
elif params['algorithm'] == 'SMW-Fisher-momentum':
a_grad_momentum = []
h_momentum = []
layersizes = model.layersizes
for l in range(model.numlayers):
a_grad_momentum.append(torch.zeros(N2, layersizes[l+1]))
h_momentum.append(torch.zeros(N2, layersizes[l]))
data_['a_grad_momentum'] = a_grad_momentum
data_['h_momentum'] = h_momentum
D_t_inv = np.zeros((N2, N2))
data_['D_t_inv'] = D_t_inv
elif params['algorithm'] == 'SMW-Fisher-D_t-momentum':
data_['J_J_transpose'] = np.float32(np.zeros((N2, N2)))
elif params['algorithm'] == 'SMW-Fisher-momentum-D_t-momentum':
a_grad_momentum = []
h_momentum = []
layersizes = model.layersizes
for l in range(model.numlayers):
a_grad_momentum.append(torch.zeros(N2, layersizes[l+1]))
h_momentum.append(torch.zeros(N2, layersizes[l]))
data_['a_grad_momentum'] = a_grad_momentum
data_['h_momentum'] = h_momentum
data_['J_J_transpose'] = np.float32(np.zeros((N2, N2)))
elif algorithm == 'SMW-Fisher' or algorithm == 'SGD' or algorithm == 'SMW-GN':
1;
else:
print('Error: algorithm not defined.')
sys.exit()
if if_momentum_gradient:
# print('model.W[1].size')
# print(model.W[1].size())
# print(params['layersizes'])
data_['model_grad'] = get_zero(params)
# Visualization stuffs
len_record = int(max_epoch / record_epoch)
losses = np.zeros(len_record + 1)
acces = np.zeros(len_record + 1)
times = np.zeros(len_record + 1)
epochs = np.zeros(len_record + 1)
lambdas = np.zeros(len_record + 1)
acces[0] = get_acc(model, X_test, t_test)
losses[0] = get_loss(model, X_train, t_train)
times[0] = 0
epochs[0] = 0
lambdas[0] = params['lambda_']
# times[0] = 0
iter_per_epoch = int(len(t_train) / N1)
iter_per_record = int(len(t_train) * record_epoch / N1)
# Training
print('Begin training...')
epoch = -1
for i in range(int(max_epoch * iter_per_epoch)):
if i % iter_per_record == 0:
start_time = time.time()
epoch += 1
# get minibatch
X_mb, t_mb = dataset.train.next_batch(N1)
X_mb, t_mb = torch.from_numpy(X_mb), torch.from_numpy(t_mb).long()
# print('t_mb.size()')
# print(t_mb.size())
# Forward
z, a, h = model.forward(X_mb)
# print('z.size()')
# print(z.size())
loss = F.cross_entropy(z, t_mb, reduction = 'mean')
# print('loss')
# print(loss)
# print('torch.sum(a[-1], dim=0).size():', torch.sum(a[-1], dim=0).size())
# print('torch.sum(a[-1], dim=0):', torch.sum(a[-1], dim=0))
# print('torch.sum(a[-1], dim=1).size():', torch.sum(a[-1], dim=1).size())
# print('torch.sum(a[-1], dim=1):', torch.sum(a[-1], dim=1))
# print('loss: ', loss)
# backward and gradient
model = get_model_grad_zerod(model)
# test_start_time = time.time()
loss.backward()
# print('time of loss:', time.time() - test_start_time)
# print('a[0].grad')
# print(a[0].grad)
model_grad = []
for l in range(model.numlayers):
model_grad.append(copy.deepcopy(model.W[l].grad))
if if_momentum_gradient:
rho = min(1-1/(i+1), 0.9)
data_['model_grad'] = get_plus(\
get_multiply(rho, data_['model_grad'], params),
get_multiply(1 - rho, model_grad, params),
params)
else:
data_['model_grad'] = model_grad
# print('test')
# data_['model_grad'] = get_minus(data_['model_grad'], params)
# print('X_mb.grad: ', X_mb.grad)
# print('X_mb: ', X_mb)
# print('a2.grad.size: ', cache[2].grad.size())
# print('h1.size: ',cache[1].size())
# print('1/m * cache[1].t() @ cache[2].grad: ', 1/m * cache[1].t() @ cache[2].grad)
# print('cache[2].grad.t() @ cache[1] in train: ', cache[2].grad.t() @ cache[1])
# print('model.W[1].grad.data: ', model.W[1].grad.data)
# print('model.W[1].grad in train: ', model.W[1].grad)
# print('model.fc[1].weight.grad: ', model.fc[1].weight.grad)
# get second order caches
data_['X_mb'] = X_mb
data_['loss'] = loss
data_['t_mb'] = t_mb
if matrix_name == 'EF':
data_['t_mb_pred'] = t_mb
data_['a'] = a
data_['h'] = h
elif matrix_name == 'Fisher':
from torch.utils.data import WeightedRandomSampler
# print('F.softmax(z, dim=0)')
# print(F.softmax(z, dim=0))
# print('torch.sum(F.softmax(z, dim=0), dim=0)')
# print(torch.sum(F.softmax(z, dim=0), dim=0))
# print('torch.sum(F.softmax(z, dim=0), dim=1)')
# print(torch.sum(F.softmax(z, dim=0), dim=1))
pred_dist = F.softmax(z, dim=1)
# print('pred_dist')
# print(pred_dist)
# print('WeightedRandomSampler(pred_dist, 1)')
# print(len(list(WeightedRandomSampler(pred_dist, 1))))
t_mb_pred = list(WeightedRandomSampler(pred_dist, 1))
t_mb_pred = np.asarray(t_mb_pred)
t_mb_pred = np.squeeze(t_mb_pred)
# print(t_mb_pred)
t_mb_pred = torch.from_numpy(t_mb_pred).long()
# print('np.sum(t_mb_pred == t_mb) / len(t_mb)')
# print(np.sum(t_mb_pred.data.numpy() == t_mb.data.numpy()) / len(t_mb))
data_['t_mb_pred'] = t_mb_pred
# print('t_mb_pred.size()')
# print(t_mb_pred.size())
# print('t_mb.size()')
# print(t_mb.size())
# print('t_mb_pred')
# print(t_mb_pred)
z, a, h = model.forward(X_mb)
loss = F.cross_entropy(z, t_mb_pred, reduction = 'mean')
model = get_model_grad_zerod(model)
loss.backward()
# print('a[0].grad')
# print(a[0].grad)
data_['a'] = a
data_['h'] = h
else:
print('Error.')
sys.exit()
# i = epoch * iter_per_epoch + iter_
params['i'] = i
model = data_['model']
# p = []
# for l in range(model.numlayers):
# p.append(copy.deepcopy(model.W[l].grad))
# lambda_ = update_lambda(p, data_, params)
# print('test')
# test_start_time = time.time()
# print('time first half: ', time.time() - start_time)
lambda_minus_tau = params['lambda_']
params['lambda_'] = params['lambda_'] + params['tau']
if algorithm == 'kfac' or algorithm == 'Fisher-block':
data_, params = kfac_update(data_, params)
elif params['algorithm'] == 'SMW-Fisher' or params['algorithm'] == 'SMW-Fisher-momentum'\
or params['algorithm'] == 'SMW-Fisher-D_t-momentum'\
or params['algorithm'] == 'SMW-Fisher-momentum-D_t-momentum':
data_, params = SMW_Fisher_update(data_, params)
elif algorithm == 'SGD':
data_ = SGD_update(data_, params)
elif algorithm == 'SMW-GN':
data_ = SMW_GN_update(data_, params)
else:
print('Error: algorithm not defined.')
sys.exit()
params['lambda_'] = lambda_minus_tau
# params['lambda_'] = params['lambda_'] + params['tau']
# print('time of second order:', time.time() - test_start_time)
# print('time 3/4: ', time.time() - start_time)
p = data_['p']
# print('get_dot_product(p, model_grad, params)')
# print(get_dot_product(p, model_grad, params))
# print('-get_dot_product(p, model_grad, params) / get_dot_product(p, p, params)')
# print(-get_dot_product(p, model_grad, params) / get_dot_product(p, p, params))
# print('params[lambda_]')
# print(params['lambda_'])
# print('p[0]: ', p[0])
# print('p[1]: ', p[1])
# print('p[2]: ', p[2])
if algorithm == 'kfac' or algorithm == 'SMW-Fisher' or algorithm == 'SMW-Fisher-momentum' or algorithm == 'SMW-GN'\
or algorithm == 'Fisher-block' or algorithm == 'SMW-Fisher-D_t-momentum'\
or algorithm == 'SMW-Fisher-momentum-D_t-momentum':
lambda_ = update_lambda(p, data_, params)
# lambda_ = init_lambda_
# print('test')
params['lambda_'] = lambda_
elif algorithm == 'SGD':
1
else:
print('Error! 1435')
sys.exit()
# print('no update lambda')
model = update_parameter(p, model, params)
# print('time 7/8: ', time.time() - start_time)
if (i+1) % iter_per_record == 0:
times[epoch+1] = time.time() - start_time
# print('time this iter: ', times[i-1])
if epoch > 0:
times[epoch+1] = times[epoch+1] + times[epoch]
losses[epoch+1] = get_loss(model, X_train, t_train)
acces[epoch+1] = get_acc(model, X_test, t_test)
epochs[epoch+1] = (epoch + 1) * record_epoch
lambdas[epoch+1] = params['lambda_']
# print(z)
# print(t_mb)
# loss = F.cross_entropy(z, t_train)
print(f'Iter-{(epoch+1) * record_epoch:.3f}; Loss: {losses[epoch+1]:.3f}')
print(f'Accuracy: {acces[epoch+1]:.3f}')
if epoch > 0:
print('elapsed time: ', times[epoch+1] - times[epoch])
else:
print('elapsed time: ', times[epoch+1])
if algorithm == 'SMW-Fisher' or algorithm == 'SMW-Fisher-momentum' or algorithm == 'kfac'\
or algorithm == 'SMW-GN'\
or algorithm == 'SMW-Fisher-momentum-D_t-momentum':
# lambda_ = params['lambda_']
print('lambda = ', params['lambda_'])
elif algorithm == 'SGD':
1
else:
print('Error: algorithm not defined.')
sys.exit
print('\n')
model = get_model_grad_zerod(model)
# times = np.asarray([0] + [times])
# times = np.insert(times, 0, 0)
# losses = np.insert(losses, init_loss.data, 0)
# np.save('temp/kfac_losses.npy', losses)
# np.save('/content/logs/temp/kfac_losses.npy', losses)
print('Begin saving results...')
name_result = name_dataset + '_' + algorithm +\
'_matrix_name_' + matrix_name +\
'_momentum_gradient_' + str(int(if_momentum_gradient)) +\
'_alpha_' + str(alpha)
np.save('/content/logs/temp/' + name_result + '_losses.npy', losses)
np.save('/content/logs/temp/' + name_result + '_acces.npy', acces)
np.save('/content/logs/temp/' + name_result + '_lambdas.npy', lambdas)
np.save('/content/logs/temp/' + name_result + '_times.npy', times)
np.save('/content/logs/temp/' + name_result + '_epochs.npy', epochs)
np.save('/content/gdrive/My Drive/Gauss_Newton/result/' + name_result + '_losses.npy', losses)
np.save('/content/gdrive/My Drive/Gauss_Newton/result/' + name_result + '_acces.npy', acces)
np.save('/content/gdrive/My Drive/Gauss_Newton/result/' + name_result + '_lambdas.npy', lambdas)
np.save('/content/gdrive/My Drive/Gauss_Newton/result/' + name_result + '_times.npy', times)
np.save('/content/gdrive/My Drive/Gauss_Newton/result/' + name_result + '_epochs.npy', epochs)
print('Saved at /content/gdrive/My Drive/Gauss_Newton/result/' + name_result + '.')
"""
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.layersizes = [784, 200, 100, 10]
self.numlayers = len(self.layersizes) - 1
# self.fc = self.numlayers * [0]
self.fc = list(range(self.numlayers))
for l in range(self.numlayers):
self.fc[l] = nn.Linear(self.layersizes[l], self.layersizes[l+1], bias=False)
# self.fc[0] = nn.Linear(self.layersizes[0], self.layersizes[0+1], bias=False)
# self.fc[1] = nn.Linear(self.layersizes[1], self.layersizes[1+1], bias=False)
# self.fc[2] = nn.Linear(self.layersizes[2], self.layersizes[2+1], bias=False)
self.fc = tuple(self.fc)
# self.fc1 = nn.Linear(784, 200, bias=False)
# self.fc2 = nn.Linear(200, 100, bias=False)
# self.fc3 = nn.Linear(100, 10, bias=False)
# self.W = [self.fc1.weight, self.fc2.weight, self.fc3.weight]
# self.W = self.numlayers * [0]
# for l in range(self.numlayers):
# self.W[l] = self.fc[l].weight
# self.W = [fci.weight for fci in self.fc]
# self.W = []
# for l in range(self.numlayers):
# self.W.append(self.fc[l].weight)
self.W = list(range(3))
for l in range(self.numlayers):
self.W[l] = self.fc[l].weight
# self.W[1] = self.fc[1].weight
# self.W[2] = self.fc[2].weight
self.W = tuple(self.W)
# print('size(self.W[0]): ', self.W[0].numel())
# print('size(self.W[1]): ', self.W[1].numel())
# print('size(self.W[2]): ', self.W[2].numel())
def forward(self, x):
# a = (self.numlayers - 1) * [0]
# h = (self.numlayers - 1) * [0]
# for l in range(self.numlayers - 1):
# if l == 0:
# a[l] = self.fc[l](x)
# else:
# a[l] = self.fc[l](h[l-1])
# h[l] = F.relu(a[l])
a = list(range(self.numlayers - 1))
h = list(range(self.numlayers - 1))
for l in range(self.numlayers - 1):
if l == 0:
a[l] = self.fc[l](x)
else:
a[l] = self.fc[l](h[l-1])
h[l] = F.relu(a[l])
# a[0] = self.fc[0](x)
# h[0] = F.relu(a[0])
# a[1] = self.fc[1](h[0])
# h[1] = F.relu(a[1])
z = self.fc[-1](h[-1])
# cache = (a1, h1, a2, h2)
# cache = ((self.numlayers - 1)) * 2 * [0]
# for l in range(0, self.numlayers - 1):
# cache[2*l] = a[l]
# cache[2*l+1] = h[l]
# cache = tuple(cache)
# print('len(cache): ', len(cache))
z.retain_grad()
# for c in cache:
# c.retain_grad()
for c in a:
c.retain_grad()
for c in h:
c.retain_grad()
h = [x] + h
a = a + [z]
return z, a, h
"""
"""
class Model_2(nn.Module):
def __init__(self):
super(Model_2, self).__init__()
self.layersizes = [784, 200, 100, 10]
self.numlayers = len(self.layersizes) - 1
self.fc = list(range(self.numlayers))
for l in range(self.numlayers):
self.fc[l] = nn.Linear(self.layersizes[l], self.layersizes[l+1], bias=False)
self.fc = tuple(self.fc)
self.W = list(range(3))
for l in range(self.numlayers):
self.W[l] = self.fc[l].weight
# self.W[1] = self.fc[1].weight
# self.W[2] = self.fc[2].weight
self.W = tuple(self.W)
def forward(self, x, t, v):
# a1 = self.fc1(x)
# h1 = F.relu(a1)
# a2 = self.fc2(h1)
# h2 = F.relu(a2)
# z = self.fc3(h2)
# a = (self.numlayers - 1) * [0]
# h = (self.numlayers - 1) * [0]
# for l in range(self.numlayers - 1):
# if l == 0:
# a[l] = self.fc[l](x)
# else:
# a[l] = self.fc[l](h[l-1])
# h[l] = F.relu(a[l])
# a = []
# h = []
# for l in range(self.numlayers - 1):
# if l == 0:
# a.append(self.fc[l](x))
# else:
# a.append(self.fc[l](h[l-1]))
# h.append(F.relu(a[l]))
a = list(range(self.numlayers - 1))
h = list(range(self.numlayers - 1))
for l in range(self.numlayers - 1):
if l == 0:
a[l] = self.fc[l](x)
else:
a[l] = self.fc[l](h[l-1])
h[l] = F.relu(a[l])
# a = tuple(a)
# h = tuple(h)
z = self.fc[-1](h[-1])
loss = F.cross_entropy(z, t, reduction = 'none')
weighted_loss = torch.dot(loss, v)
# cache = ((self.numlayers - 1)) * 2 * [0]
# for l in range(0, self.numlayers - 1):
# cache[2*l] = a[l]
# cache[2*l+1] = h[l]
# cache = tuple(cache)
# print('len(cache): ', len(cache))
z.retain_grad()
# for c in cache:
# c.retain_grad()
for c in a:
c.retain_grad()
for c in h:
c.retain_grad()
h = [x] + h
a = a + [z]
return weighted_loss, a, h
"""
| 2.671875
| 3
|
jaad/renderers.py
|
AmadeusITGroup/jaad
| 0
|
12784570
|
from rest_framework.renderers import JSONRenderer, BaseRenderer
from rest_framework_csv.renderers import CSVRenderer as BaseCSVRenderer
class TextRenderer(BaseRenderer):
media_type = "text/plain"
format = "text"
format_description = "text"
def render(self, data, *args, **kwargs):
if isinstance(data, dict): # For instance used if the API returns an exception
return "\n".join(f"{k}: {v}" for k, v in data.items())
return str(data)
class PrettyJsonRenderer(JSONRenderer):
format = "json"
format_description = "JSON"
def render(self, data, *args, **kwargs):
if str(data.__class__) == "<class 'pandas.core.frame.DataFrame'>":
data = {
"data": [
{k: v for k, v in zip(data.columns, row)} for row in data.values
]
}
return super().render(data, *args, **kwargs)
def get_indent(self, accepted_media_type, renderer_context):
return 4
class JsonStatRenderer(PrettyJsonRenderer):
format = "json-stat"
format_description = "JSON-stat"
def render(self, data, *args, **kwargs):
# We handle pandas DF but we do not want to had dependencies on it hence
# It's up to the user to provide access to pyjstat
if str(data.__class__) == "<class 'pandas.core.frame.DataFrame'>":
from pyjstat import pyjstat
import pandas
def flatten_metrics_data_frame(data):
json_stat_data = []
# noqa: B301
for _index, row in data.iterrows():
# noinspection PyCompatibility
# IDEs detect row.iteritems as a call
# to dict.iteritems which is not supported in py3,
# whereas it is pandas.Series.iteritems()
group_data = {
key: value
for key, value in row.iteritems() # noqa: B301
if key != "metrics"
}
for metric, metric_value in row.metrics.items():
metric_data = {"metric": metric, "value": metric_value}
metric_data.update(group_data)
json_stat_data.append(metric_data)
return pandas.DataFrame(json_stat_data)
flatten_data_frame = flatten_metrics_data_frame(data)
if len(flatten_data_frame.index) > 0:
data = {"data": pyjstat.Dataset.read(flatten_data_frame)}
else:
data = {"data": []}
return super().render(data, *args, **kwargs)
class CSVRenderer(BaseCSVRenderer):
def __init__(self):
pass
def render(self, data, *args, **kwargs):
if str(data.__class__) == "<class 'pandas.core.frame.DataFrame'>":
data = [{k: v for k, v in zip(data.columns, row)} for row in data.values]
return super().render(data, *args, **kwargs)
media_type = "text"
format = "csv"
format_description = "CSV"
def renderer(*renderers):
def add_renderers(view):
view.renderer_classes = renderers
return view
return add_renderers
| 2.59375
| 3
|
graph_tools/data_gen.py
|
manon643/causal_lasso
| 3
|
12784571
|
import networkx as nx
import numpy as np
def gen_graph(graph_type, n, mean_deg):
"""Generates and returns a nx.Digraph and its adjacency matrix. Nodes are randomly permutated.
Arguments:
graph_type (string): type of graph Erdos-Renyi, scale-free, sachs or any graph in BNRepo
n (int): number of nodes
mean_deg (float): average degree of nodes
"""
# beta is the unpermutated adjacency matrix
if graph_type == "erdos-renyi":
beta = gen_random_graph(n, mean_deg)
elif graph_type == "scale-free":
# select
import igraph as ig
G_ig = ig.Graph.Barabasi(n=n, m=int(round(mean_deg / 2)), directed=True)
beta = np.array(G_ig.get_adjacency().data)
else:
raise NotImplementedError
# Randomly permute nodes
perm_mat = np.random.permutation(np.eye(n))
adj_matrix = perm_mat.T @ beta @ perm_mat
# Sanity check, is the graph acyclic?
assert np.trace(np.linalg.matrix_power(np.eye(n) + adj_matrix, n)) == n
# Create and return directed graph
graph = nx.from_numpy_array(adj_matrix, create_using=nx.DiGraph)
return graph, adj_matrix
def gen_random_graph(n, mean_deg):
"""Returns the adjacency matrix of an Erdos Renyi DAG
Args:
n (int): number of nodes
mean_deg (float): average degree of a node
"""
assert mean_deg <= n - 1
prob_one_edge = mean_deg / (n - 1)
beta = np.triu(np.random.random((n, n)) < prob_one_edge, k=1)
return np.float32(beta)
def simulate_parameter(adj_matrix, w_ranges):
"""Simulate SEM parameters for a DAG.
Args:
adj_matrix (np.array): [n, n] binary adj matrix of DAG
w_ranges (tuple): disjoint weight ranges
Returns:
weighted_adj_matrix (np.array): [n, n] weighted adj matrix of DAG
"""
weighted_adj_matrix = np.zeros(adj_matrix.shape)
range_choice = np.random.randint(len(w_ranges), size=adj_matrix.shape) # which range
for i, (low, high) in enumerate(w_ranges):
weights = np.random.uniform(low=low, high=high, size=adj_matrix.shape)
weighted_adj_matrix += adj_matrix * (range_choice == i) * weights
return weighted_adj_matrix
def sample_lin_scms(graph_type, noise_type, adj_matrix, nb_samples=1000,
weighted=False,
w_ranges=((-2.0, -.5), (.5, 2.0))):
""" Given a directed graph and a particular noise type, generates edge weights and samples
Args:
graph_type (string): type of graph
noise_type (string): one of gaussian, exp, gumbel, type of random noise
adj_matrix (np.array): [n, n] binary adjacency matrix
nb_samples (int): number of samples to generate
weighted (bool): whether to use uniformly weighted edges or all edges are
w_ranges (tuple): negative and positive ranges to sample edge weights (if weighted)
Returns:
X (np.array): [nb_samples, n] sample matrix
beta (np.array): [n, n] weighted adjacency matrix
sigma_n (np.array): [n, n] sample covariance matrix
"""
n = adj_matrix.shape[0]
# Sample edge weights
if weighted:
beta = simulate_parameter(adj_matrix, w_ranges)
else:
beta = adj_matrix
aux_inv = np.linalg.inv(np.eye(n) - beta)
# Sample noise
if noise_type == "gaussian":
epsilon = np.random.normal(size=(nb_samples, n))
elif noise_type == "exp":
epsilon = np.random.exponential(size=(nb_samples, n))
elif noise_type == "gumbel":
epsilon = np.random.gumbel(size=(nb_samples, n))
else:
raise NotImplementedError
X = epsilon @ aux_inv
sigma_n = np.cov(X.T, bias=True)
return X, beta, sigma_n
| 3.25
| 3
|
asymmetric_cryptography/asymmetric.py
|
elishahyousaf/Awesome-Python-Scripts
| 1,026
|
12784572
|
from Crypto import Random
from Crypto.PublicKey import RSA
import base64
def generate_keys(modulus_length=256*4):
privatekey = RSA.generate(modulus_length, Random.new().read)
publickey = privatekey.publickey()
return privatekey, publickey
def encryptit(message , publickey):
encrypted_msg = publickey.encrypt(message, 32)[0]
encoded_encrypted_msg = base64.b64encode(encrypted_msg)
return encoded_encrypted_msg
def decryptit(message, privatekey):
decoded_encrypted_msg = base64.b64decode(message)
decoded_decrypted_msg = privatekey.decrypt(decoded_encrypted_msg)
return decoded_decrypted_msg
if __name__ == '__main__':
message = "This is a awesome message!"
privatekey , publickey = generate_keys()
encrypted_msg = encryptit(message.encode("utf-8"), publickey)
decrypted_msg = decryptit(encrypted_msg, privatekey)
print(f'{privatekey.exportKey()} - ({len(privatekey.exportKey())})')
print(f'{publickey.exportKey()} - ({len(publickey.exportKey())})')
print(f'Original: {message} - ({len(message)})')
print(f'Encrypted: {encrypted_msg} - ({len(encrypted_msg)})')
print(f'Decrypted: {decrypted_msg} - ({len(decrypted_msg)})')
| 3.09375
| 3
|
pyof/v0x04/controller2switch/set_async.py
|
smythtech/python-openflow-legacy
| 0
|
12784573
|
<filename>pyof/v0x04/controller2switch/set_async.py<gh_stars>0
"""Define SetAsync message.
Sets whether a controller should receive a given asynchronous message that is
generated by the switch.
"""
# System imports
# Third-party imports
# Local imports
from pyof.v0x04.common.header import Type
from pyof.v0x04.controller2switch.common import AsyncConfig
__all__ = ('SetAsync',)
class SetAsync(AsyncConfig):
"""SetAsync message.
Sets whether a controller should receive a given asynchronous message that
is generated by the switch.
"""
def __init__(self, xid=None, packet_in_mask1=None, packet_in_mask2=None,
port_status_mask1=None, port_status_mask2=None,
flow_removed_mask1=None, flow_removed_mask2=None):
"""SetAsync message.
Args:
xid (int): xid to be used on the message header.
packet_in_mask1 (): .
packet_in_mask2 (): .
port_status_mask1 (): .
port_status_mask2 (): .
flow_removed_mask1 (): .
flow_removed_mask2 (): .
"""
self.__ordered__ = super().__ordered__ # pylint: disable=no-member
super().__init__(xid, packet_in_mask1, packet_in_mask2,
port_status_mask1, port_status_mask2,
flow_removed_mask1, flow_removed_mask2)
self.header.message_type = Type.OFPT_SET_ASYNC
| 2.5
| 2
|
14 Sound generation with VAE/code/train.py
|
aishifugi/generating-sound-with-neural-networks
| 81
|
12784574
|
<reponame>aishifugi/generating-sound-with-neural-networks
import os
import numpy as np
from autoencoder import VAE
LEARNING_RATE = 0.0005
BATCH_SIZE = 64
EPOCHS = 150
SPECTROGRAMS_PATH = "/home/valerio/datasets/fsdd/spectrograms/"
def load_fsdd(spectrograms_path):
x_train = []
for root, _, file_names in os.walk(spectrograms_path):
for file_name in file_names:
file_path = os.path.join(root, file_name)
spectrogram = np.load(file_path) # (n_bins, n_frames, 1)
x_train.append(spectrogram)
x_train = np.array(x_train)
x_train = x_train[..., np.newaxis] # -> (3000, 256, 64, 1)
return x_train
def train(x_train, learning_rate, batch_size, epochs):
autoencoder = VAE(
input_shape=(256, 64, 1),
conv_filters=(512, 256, 128, 64, 32),
conv_kernels=(3, 3, 3, 3, 3),
conv_strides=(2, 2, 2, 2, (2, 1)),
latent_space_dim=128
)
autoencoder.summary()
autoencoder.compile(learning_rate)
autoencoder.train(x_train, batch_size, epochs)
return autoencoder
if __name__ == "__main__":
x_train = load_fsdd(SPECTROGRAMS_PATH)
autoencoder = train(x_train, LEARNING_RATE, BATCH_SIZE, EPOCHS)
autoencoder.save("model")
| 2.734375
| 3
|
py/strato/racktest/infra/handlekill.py
|
eyal-stratoscale/pyracktest
| 0
|
12784575
|
<filename>py/strato/racktest/infra/handlekill.py<gh_stars>0
import signal
import sys
import logging
def _informIntSignalCaughtAndExit(* args):
logging.info("Caught Ctrl-C, exiting from process")
sys.exit()
def _informTermSignalCaughtAndExit(* args):
logging.info("SIGTERM received, exiting from process")
sys.exit(143)
def _register():
signal.signal(signal.SIGTERM, _informTermSignalCaughtAndExit)
signal.signal(signal.SIGINT, _informIntSignalCaughtAndExit)
_register()
| 1.695313
| 2
|
analyzer/commandline_wrapper.py
|
FlxB2/Constraint-Based-Automated-Updating-of-Application-Deployment-Models
| 2
|
12784576
|
<filename>analyzer/commandline_wrapper.py
import fileinput
import re
from output_reader import Output_Reader
from fast_downward_output_reader import Fast_Downward_Output_Reader
from jasper_output_reader import Jasper_Output_Reader
def parse_planner_output(output):
# split outputs by delimiter e.g. <<<<FD>>>> or <<<<JASPER>>>>
fd_output_list = []
jasper_output_list = []
current_output = None
for line in output.splitlines():
if line.startswith("<<<<FD>>>>"):
current_output = "FD"
fd_output_list.append("")
elif line.startswith("<<<<JASPER>>>>"):
current_output = "JASPER"
jasper_output_list.append("")
if current_output == "FD":
fd_output_list[len(fd_output_list)-1] += line + "\n"
elif current_output == "JASPER":
jasper_output_list[len(jasper_output_list)-1] += line + "\n"
print("number fd outputs: " + str(len(fd_output_list)))
print("number jasper outputs: " + str(len(jasper_output_list)))
reader = Fast_Downward_Output_Reader()
for output in fd_output_list:
report = reader.parse_output(output)
print(report)
reader = Jasper_Output_Reader()
for output in jasper_output_list:
report = reader.parse_output(output)
print(report)
content = ""
for line in fileinput.input():
content += line
parse_planner_output(content)
| 2.921875
| 3
|
script/entrypoint.py
|
riedan/postgres
| 0
|
12784577
|
#!/usr/bin/python3
import os
import shutil
import sys
from entrypoint_helpers import env, gen_cfg, gen_container_id, str2bool, start_app, set_perms, set_ownership
RUN_USER = env['sys_user']
RUN_GROUP = env['sys_group']
PG_DATA = env['pgdata']
PG_CONFIG_DIR = env['pg_config_dir']
try:
PG_SSL_KEY_FILE = env['pg_ssl_key_file']
PG_SSL_CERT_FILE = env['pg_ssl_cert_file']
PG_SSL_CA_FILE = env['pg_ssl_ca_file']
shutil.copyfile(PG_SSL_KEY_FILE, f'{PG_CONFIG_DIR}/server.key')
shutil.copyfile(PG_SSL_CERT_FILE, f'{PG_CONFIG_DIR}/server.crt')
shutil.copyfile(PG_SSL_CA_FILE, f'{PG_CONFIG_DIR}/root.crt')
set_perms(f'{PG_CONFIG_DIR}/server.key', user=RUN_USER, group=RUN_GROUP, mode=0o600 )
set_perms(f'{PG_CONFIG_DIR}/server.crt', user=RUN_USER, group=RUN_GROUP, mode=0o600 )
set_perms(f'{PG_CONFIG_DIR}/root.crt', user=RUN_USER, group=RUN_GROUP, mode=0o600 )
except:
print("no certificate")
set_ownership(f'{PG_CONFIG_DIR}', user=RUN_USER, group=RUN_GROUP)
set_ownership(f'{PG_DATA}', user=RUN_USER, group=RUN_GROUP)
set_ownership('/var/log/patroni', user=RUN_USER, group=RUN_GROUP)
gen_cfg('patroni.yml.j2', f'{PG_CONFIG_DIR}/patroni.yml' , user=RUN_USER, group=RUN_GROUP,mode=0o640 , overwrite=False)
start_app(f'patroni {PG_CONFIG_DIR}/patroni.yml', PG_DATA, 'patroni')
| 1.960938
| 2
|
falconcv/models/__init__.py
|
haruiz/FalconCV
| 16
|
12784578
|
from .api_installer import ApiInstaller
from .model_builder import ModelBuilder
from .api_model import ApiModel
| 1.046875
| 1
|
maoyan/maoyan/spiders/weibo.py
|
hellending/-requests-selenium-
| 0
|
12784579
|
<filename>maoyan/maoyan/spiders/weibo.py
from selenium import webdriver
import time,re,requests,csv,os,socket
from lxml import etree
import os
import sys
from selenium.webdriver.remote.webelement import WebElement
socket.setdefaulttimeout(7)
os.environ['REQUESTS_CA_BUNDLE'] = os.path.join(os.path.dirname(sys.argv[0]), 'cacert.pem')
from selenium.webdriver.common.action_chains import ActionChains
options = webdriver.ChromeOptions()
browser = webdriver.Chrome()
browser.maximize_window()
browser.get('https://weibo.com/u/6718757082/home?wvr=5')
print('您将有1分钟时间登陆......')
# time.sleep(7)
# browser.find_element_by_xpath('//*[@id="loginname"]').send_keys('1<PASSWORD>')
# browser.find_element_by_xpath('//*[@id="pl_login_form"]/div/div[3in]/div[2]/div/put').send_keys('1<PASSWORD>')
time.sleep(60)
f = open(r'.\1.txt','r',encoding='utf-8-sig')
s = f.readlines()[0]
if os.path.exists(r'.\weibo_data_财经.csv'):
os.remove(r'.\weibo_data_财经.csv')
f = open(r'.\weibo_data_财经.csv', 'w', encoding='utf-8-sig')
csv_writer = csv.writer(f)
csv_writer.writerow(['微博名', '性别', '所在地', '粉丝数', '联系方式', '简介'])
f.flush()
browser.find_element_by_xpath('//*[@id="plc_top"]/div/div/div[2]/input').send_keys(s)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="plc_top"]/div/div/div[2]/a').click()
browser.find_element_by_xpath('//*[@id="pl_feedtop_top"]/div[3]/a').click()
time.sleep(1)
list_history = []
# browser.find_element_by_xpath('/html/body/div[8]/div[2]/div/div[1.txt]/div/dl[2]/dd/input').clear()
# browser.find_element_by_xpath('/html/body/div[8]/div[2]/div/div[1.txt]/div/dl[2]/dd/input').send_keys(s)
browser.find_element_by_xpath('//*[@id="radio05"]').click()
# move = browser.find_element_by_xpath('//*[@id="pl_user_filtertab"]/div[1.txt]/ul/li[2]/span')
# ActionChains(browser).move_to_element(move).perform()
browser.find_element_by_xpath('/html/body/div[7]/div[2]/div/div[2]/a[1]').click()
browser.find_element_by_xpath('/html/body/div[1]/div[2]/ul/li[2]/a').click()
proxies = {'http':'http://192.168.127.12'}
m = 1
while m<=50:
html = browser.execute_script('return document.documentElement.outerHTML')
parse_html = etree.HTML(html)
people_src_list = parse_html.xpath('//div[@class="avator"]/a/@href')
print(people_src_list)
cookies = browser.get_cookies()
url = "http:"
session = requests.session()
cookieJar = requests.cookies.RequestsCookieJar()
for i in cookies:
cookieJar.set(i["name"],i["value"])
session.cookies.update(cookieJar)
for i in people_src_list:
try:
url1 = str(url)+str(i)+"?ishot=1.txt"
print("url1: ",url1)
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36'}
html = session.get(url=url1,headers=headers,timeout=(5,5)).text
socket.setdefaulttimeout(7)
pattern = re.compile(r"oid']='(.*?)';")
src = pattern.findall(html,re.S)[0]
pattern = re.compile(r'class=\\"username\\">(.*?)<\\/h1>')
result1 = pattern.findall(html,re.S)[0]
print(result1)
#已经收录过的用户不会再被收录
if result1 in list_history:
continue
try:
k = 2
flag = 0
while True:
if(k>=20):
flag = 1
break
pattern = re.compile(r'<strong class=\\"W_f1{}\\">(.*?)<\\/strong>'.format(k))
list = pattern.findall(html, re.S)
if len(list)>=2:
result2 = list[1]
print(result2)
break
k+=2
if flag==1 or int(result2)<1000:
continue
except:
pass
pattern_sex = re.compile(r'\\"icon_bed\\"><a><i class=\\"W_icon icon_pf_(.*?)\\"><\\/i>')
try:
text_sex = pattern_sex.findall(html,re.S)[0]
except:
print('no sex')
continue
if text_sex=='male':
result_sex = '男'
else:
result_sex = '女'
url2 = str(url)+"//weibo.com/"+str(src)+"/about"
pattern1 = re.compile(r"page_id']='(.*?)';")
src1 = pattern1.findall(html,re.S)[0]
html = session.get(url2,timeout=(5,5)).text
socket.setdefaulttimeout(7)
pattern = re.compile('<title>(.*?)</title>')
t = pattern.findall(html,re.S)[0]
if(t=='404错误'):
url2 = str(url)+"//weibo.com/p/"+str(src1)+"/info?mod=pedit_more"
html = session.get(url2,timeout=(5,5)).text
socket.setdefaulttimeout(7)
print("url2: ",url2)
# print(html)
# browser.find_element_by_xpath('//*[@id="pl_user_feedList"]/div[1.txt]/div[1.txt]/a').click()
# windows = browser.window_handles
# time.sleep(5)
# browser.switch_to.window(windows[-1.txt])
# js = 'var q=document.documentElement.scrollTop={}'.format(500)
# browser.execute_script(js)
# html = browser.execute_script('return document.documentElement.outerHTML')
# print(html)
# browser.find_element_by_css_selector("[class='WB_cardmore S_txt1 S_line1 clearfix']").click() #还要泛化
# time.sleep(2)
# js = 'var q=document.documentElement.scrollTop={}'.format(500)
# browser.execute_script(js)
# html = browser.execute_script('return document.documentElement.outerHTML')
#需要一个数据清洗函数和可行的正则表达式
# print(html)
pattern = re.compile(r'<span class=\\"pt_title S_txt2\\">(.*?)<\\/span>.*?<span class=\\"pt_detail\\">(.*?)<\\/span>')
ss = pattern.findall(html,re.S)
result3 = ''
result_location = ''
result_intro = ''
for z in range(len(ss)):
if ('QQ' in ss[z][0]) or ('电话' in ss[z][0]) or ('微信' in ss[z][0]) or ('邮箱' in ss[z][0]):
result3+=str(ss[z][0])+str(ss[z][1])+" "
elif '所在地' in ss[z][0]:
result_location+=str(ss[z][0])+str(ss[z][1])
elif '简介' in ss[z][0]:
result_intro += str(ss[z][0]) + str(ss[z][1])
if result3=='':
result3 = '无'
if result_location=='':
result_location = '无'
if result_intro=='':
result_intro = '无'
print(result3)
# result_intro = ''
# pattern_intro = re.compile(r'<p class=\\"p_txt\\">(.*?)<\\/p>')
# try:
# result_intro = pattern_intro.findall(html,re.S)[0]
# except:
# result_intro = '无'
csv_writer.writerow([result1,result_sex,result_location,result2,result3,result_intro])
f.flush()
list_history.append(result1)
# time.sleep(1.txt)
except:
continue
browser.find_element_by_class_name('next').click()
m+=1
# time.sleep(1)
# if os.path.exists(r'.\weibo_data_金融.csv'):
# os.remove(r'.\weibo_data_金融.csv')
# f = open(r'.\weibo_data_金融.csv', 'w', encoding='utf-8-sig')
# csv_writer = csv.writer(f)
# csv_writer.writerow(['微博名', '性别', '所在地', '粉丝数', '联系方式', '简介'])
# f.flush()
# browser.find_element_by_xpath('//*[@id="pl_feedtop_top"]/div[3]/a').click()
# browser.find_element_by_xpath('/html/body/div[8]/div[2]/div/div[1.txt]/div/dl[2]/dd/input').clear()
# browser.find_element_by_xpath('/html/body/div[8]/div[2]/div/div[1.txt]/div/dl[2]/dd/input').send_keys('金融')
# browser.find_element_by_xpath('/html/body/div[8]/div[2]/div/div[2]/a[1.txt]').click()
# time.sleep(2)
# proxies = {'http':'http://192.168.127.12'}
# m = 1.txt
# while m<=50:
# html = browser.execute_script('return document.documentElement.outerHTML')
# parse_html = etree.HTML(html)
# people_src_list = parse_html.xpath('//div[@class="avator"]/a/@href')
# print(people_src_list)
# cookies = browser.get_cookies()
# url = "http:"
# session = requests.session()
# cookieJar = requests.cookies.RequestsCookieJar()
# for i in cookies:
# cookieJar.set(i["name"],i["value"])
# session.cookies.update(cookieJar)
# for i in people_src_list:
# try:
# url1 = str(url)+str(i)+"?ishot=1.txt"
# print("url1: ",url1)
# headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36'}
# html = session.get(url=url1,headers=headers,timeout=(5,5)).text
# socket.setdefaulttimeout(7)
# pattern = re.compile(r"oid']='(.*?)';")
# src = pattern.findall(html,re.S)[0]
# pattern = re.compile(r'class=\\"username\\">(.*?)<\\/h1>')
# result1 = pattern.findall(html,re.S)[0]
# print(result1)
# #已经收录过的用户不会再被收录
# if result1 in list_history:
# continue
# try:
# k = 2
# flag = 0
# while True:
# if(k>=20):
# flag = 1.txt
# break
# pattern = re.compile(r'<strong class=\\"W_f1{}\\">(.*?)<\\/strong>'.format(k))
# list = pattern.findall(html, re.S)
# if len(list)>=2:
# result2 = list[1.txt]
# print(result2)
# break
# k+=2
# if flag==1.txt or int(result2)<1000 or int(result2)>10000:
# continue
# except:
# pass
# pattern_sex = re.compile(r'\\"icon_bed\\"><a><i class=\\"W_icon icon_pf_(.*?)\\"><\\/i>')
# try:
# text_sex = pattern_sex.findall(html,re.S)[0]
# except:
# print('no sex')
# continue
# if text_sex=='male':
# result_sex = '男'
# else:
# result_sex = '女'
# url2 = str(url)+"//weibo.com/"+str(src)+"/about"
# pattern1 = re.compile(r"page_id']='(.*?)';")
# src1 = pattern1.findall(html,re.S)[0]
# html = session.get(url2,timeout=(5,5)).text
# socket.setdefaulttimeout(7)
# pattern = re.compile('<title>(.*?)</title>')
# t = pattern.findall(html,re.S)[0]
# if(t=='404错误'):
# url2 = str(url)+"//weibo.com/p/"+str(src1)+"/info?mod=pedit_more"
# html = session.get(url2,timeout=(5,5)).text
# socket.setdefaulttimeout(7)
# print("url2: ",url2)
# pattern = re.compile(r'<span class=\\"pt_title S_txt2\\">(.*?)<\\/span>.*?<span class=\\"pt_detail\\">(.*?)<\\/span>')
# ss = pattern.findall(html,re.S)
# result3 = ''
# result_location = ''
# result_intro = ''
# for z in range(len(ss)):
# if ('QQ' in ss[z][0]) or ('电话' in ss[z][0]) or ('微信' in ss[z][0]) or ('邮箱' in ss[z][0]):
# result3+=str(ss[z][0])+str(ss[z][1.txt])+" "
# elif '所在地' in ss[z][0]:
# result_location+=str(ss[z][0])+str(ss[z][1.txt])
# elif '简介' in ss[z][0]:
# result_intro+=str(ss[z][0])+str(ss[z][1.txt])
#
# if result3=='':
# result3 = '无'
# if result_location=='':
# result_location = '无'
# if result_intro=='':
# result_intro = '无'
# print(result3)
# # result_intro = ''
# # pattern_intro = re.compile(r'<p class=\\"p_txt\\">(.*?)<\\/p>')
# # try:
# # result_intro = pattern_intro.findall(html,re.S)[0]
# # except:
# # result_intro = '无'
# csv_writer.writerow([result1,result_sex,result_location,result2,result3,result_intro])
# f.flush()
# list_history.append(result1)
# time.sleep(1.txt)
# except:
# continue
# browser.find_element_by_class_name('next').click()
# m+=1.txt
# time.sleep(1.txt)
print('数据收录完毕。。。。。')
| 2.53125
| 3
|
lib/crypto/receipt.py
|
clouserw/zamboni
| 0
|
12784580
|
<filename>lib/crypto/receipt.py
import json
from django.conf import settings
from django_statsd.clients import statsd
import commonware.log
import jwt
import requests
log = commonware.log.getLogger('z.crypto')
class SigningError(Exception):
pass
def sign(receipt):
"""
Send the receipt to the signing service.
This could possibly be made async via celery.
"""
# If no destination is set. Just ignore this request.
if not settings.SIGNING_SERVER:
return ValueError('Invalid config. SIGNING_SERVER empty.')
destination = settings.SIGNING_SERVER + '/1.0/sign'
timeout = settings.SIGNING_SERVER_TIMEOUT
receipt_json = json.dumps(receipt)
log.info('Calling service: %s' % destination)
log.info('Receipt contents: %s' % receipt_json)
headers = {'Content-Type': 'application/json'}
data = receipt if isinstance(receipt, basestring) else receipt_json
try:
with statsd.timer('services.sign.receipt'):
req = requests.post(destination, data=data, headers=headers,
timeout=timeout)
except requests.Timeout:
statsd.incr('services.sign.receipt.timeout')
log.error('Posting to receipt signing timed out')
raise SigningError('Posting to receipt signing timed out')
except requests.RequestException:
# Will occur when some other error occurs.
statsd.incr('services.sign.receipt.error')
log.error('Posting to receipt signing failed', exc_info=True)
raise SigningError('Posting to receipt signing failed')
if req.status_code != 200:
statsd.incr('services.sign.receipt.error')
log.error('Posting to signing failed: %s' % req.status_code)
raise SigningError('Posting to signing failed: %s'
% req.status_code)
return json.loads(req.content)['receipt']
def decode(receipt):
"""
Decode and verify that the receipt is sound from a crypto point of view.
Will raise errors if the receipt is not valid, returns receipt contents
if it is valid.
"""
raise NotImplementedError
def crack(receipt):
"""
Crack open the receipt, without checking that the crypto is valid.
Returns a list of all the elements of a receipt, which by default is
cert, receipt.
"""
return map(lambda x: jwt.decode(x.encode('ascii'), verify=False),
receipt.split('~'))
| 2.21875
| 2
|
theano_utils.py
|
Alexzhuqch001/temprnn
| 0
|
12784581
|
<reponame>Alexzhuqch001/temprnn<filename>theano_utils.py
# Copyright (c) 2016, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA Corporation nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import config
import os, sys, warnings
import theano
#----------------------------------------------------------------------------
# Check for common problems in a compiled Theano function.
def analyze_function(func, verbose = False):
assert isinstance(func, theano.compile.Function)
topo = func.maker.fgraph.toposort()
# Print stats.
if verbose:
op_names = [type(apply.op).__name__ for apply in topo]
op_dict = {op: 0 for op in op_names}
for op in op_names:
op_dict[op] += 1
op_list = op_dict.items()
op_list.sort(key = lambda x: -x[1])
print
for op, num in op_list:
print " %-8d%s" % (num, op)
print
# Check for float64 use.
for apply in topo:
dtype = getattr(apply.outputs[0].type, 'dtype', '')
acc_dtype = getattr(apply.op, 'acc_dtype', '')
if dtype == 'float64' or acc_dtype == 'float64':
print 'WARNING: Theano float64:', apply
if verbose:
print
theano.printing.debugprint(apply)
print
# Check for excess GPU=>CPU transfers.
for apply in topo:
op = type(apply.op).__name__
if op == 'HostFromGpu':
for parent in topo:
parent_inputs = [var.owner for var in parent.inputs]
if apply in parent_inputs:
print 'WARNING: Theano CPU fallback:', parent
if verbose:
print
theano.printing.debugprint(parent)
print
#----------------------------------------------------------------------------
# Compile and check Theano function.
def function(*args, **kwargs):
func = theano.function(*args, **kwargs)
analyze_function(func, verbose = False)
return func
#----------------------------------------------------------------------------
| 1.460938
| 1
|
src/tweet.py
|
fesanlu/Datos-COVID19
| 2
|
12784582
|
import sys
import tweepy
def tweeting(consumer_key, consumer_secret, my_access_token, my_access_token_secret, message):
# Authentication
my_auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
my_auth.set_access_token(my_access_token, my_access_token_secret)
my_api = tweepy.API(my_auth)
my_api.update_status(message)
if __name__ == '__main__':
if len(sys.argv) == 6:
consumer_key = sys.argv[1]
consumer_secret_key = sys.argv[2]
my_access_token = sys.argv[3]
my_access_token_secret = sys.argv[4]
message = sys.argv[5]
tweeting(consumer_key, consumer_secret_key, my_access_token, my_access_token_secret, message)
| 2.875
| 3
|
days/day101/Bite 92. Humanize a datetime/test_humanize_date.py
|
alex-vegan/100daysofcode-with-python-course
| 2
|
12784583
|
<filename>days/day101/Bite 92. Humanize a datetime/test_humanize_date.py
from datetime import timedelta
import pytest
from humanize_date import pretty_date, NOW
def n_days_ago_str(days):
return (NOW - timedelta(days=days)).strftime('%m/%d/%y')
@pytest.mark.parametrize("arg, expected", [
(NOW - timedelta(seconds=2), 'just now'),
(NOW - timedelta(seconds=9), 'just now'),
(NOW - timedelta(seconds=10), '10 seconds ago'),
(NOW - timedelta(seconds=59), '59 seconds ago'),
(NOW - timedelta(minutes=1), 'a minute ago'),
(NOW - timedelta(minutes=1, seconds=40), 'a minute ago'),
(NOW - timedelta(minutes=2), '2 minutes ago'),
(NOW - timedelta(minutes=59), '59 minutes ago'),
(NOW - timedelta(hours=1), 'an hour ago'),
(NOW - timedelta(hours=2), '2 hours ago'),
(NOW - timedelta(hours=23), '23 hours ago'),
(NOW - timedelta(hours=24), 'yesterday'),
(NOW - timedelta(hours=47), 'yesterday'),
(NOW - timedelta(days=1), 'yesterday'),
(NOW - timedelta(days=2), n_days_ago_str(2)),
(NOW - timedelta(days=7), n_days_ago_str(7)),
(NOW - timedelta(days=100), n_days_ago_str(100)),
(NOW - timedelta(days=365), n_days_ago_str(365)),
])
def test_pretty_date(arg, expected):
assert pretty_date(arg) == expected
def test_input_variable_of_wrong_type():
with pytest.raises(ValueError):
pretty_date(123)
def test_input_variable_future_date():
with pytest.raises(ValueError):
pretty_date(NOW + timedelta(days=1))
| 3
| 3
|
src/raritan/rpc/cert/__init__.py
|
vhirtzel/apc_reboot
| 1
|
12784584
|
<filename>src/raritan/rpc/cert/__init__.py
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright 2020 Raritan Inc. All rights reserved.
#
# This is an auto-generated file.
#
# Section generated by IdlC from "ServerSSLCert.idl"
#
import raritan.rpc
from raritan.rpc import Interface, Structure, ValueObject, Enumeration, typecheck, DecodeException
import raritan.rpc.cert
# interface
class ServerSSLCert(Interface):
idlType = "cert.ServerSSLCert:2.0.0"
SUCCESS = 0
ERR_GEN_KEY_LEN_INVALID = 100
ERR_GEN_CSR_OR_CERT_PENDING = 101
ERR_GEN_KEY_GEN_FAILED = 102
ERR_INSTALL_KEY_MISSING = 200
ERR_INSTALL_CERT_MISSING = 201
ERR_INSTALL_CERT_FORMAT_INVALID = 202
ERR_INSTALL_CERT_KEY_MISMATCH = 203
# structure
class CommonAttributes(Structure):
idlType = "cert.ServerSSLCert_2_0_0.CommonAttributes:1.0.0"
elements = ["country", "stateOrProvince", "locality", "organization", "organizationalUnit", "commonName", "emailAddress"]
def __init__(self, country, stateOrProvince, locality, organization, organizationalUnit, commonName, emailAddress):
typecheck.is_string(country, AssertionError)
typecheck.is_string(stateOrProvince, AssertionError)
typecheck.is_string(locality, AssertionError)
typecheck.is_string(organization, AssertionError)
typecheck.is_string(organizationalUnit, AssertionError)
typecheck.is_string(commonName, AssertionError)
typecheck.is_string(emailAddress, AssertionError)
self.country = country
self.stateOrProvince = stateOrProvince
self.locality = locality
self.organization = organization
self.organizationalUnit = organizationalUnit
self.commonName = commonName
self.emailAddress = emailAddress
@classmethod
def decode(cls, json, agent):
obj = cls(
country = json['country'],
stateOrProvince = json['stateOrProvince'],
locality = json['locality'],
organization = json['organization'],
organizationalUnit = json['organizationalUnit'],
commonName = json['commonName'],
emailAddress = json['emailAddress'],
)
return obj
def encode(self):
json = {}
json['country'] = self.country
json['stateOrProvince'] = self.stateOrProvince
json['locality'] = self.locality
json['organization'] = self.organization
json['organizationalUnit'] = self.organizationalUnit
json['commonName'] = self.commonName
json['emailAddress'] = self.emailAddress
return json
# structure
class ReqInfo(Structure):
idlType = "cert.ServerSSLCert_2_0_0.ReqInfo:1.0.0"
elements = ["subject", "names", "keyLength"]
def __init__(self, subject, names, keyLength):
typecheck.is_struct(subject, raritan.rpc.cert.ServerSSLCert.CommonAttributes, AssertionError)
for x0 in names:
typecheck.is_string(x0, AssertionError)
typecheck.is_int(keyLength, AssertionError)
self.subject = subject
self.names = names
self.keyLength = keyLength
@classmethod
def decode(cls, json, agent):
obj = cls(
subject = raritan.rpc.cert.ServerSSLCert.CommonAttributes.decode(json['subject'], agent),
names = [x0 for x0 in json['names']],
keyLength = json['keyLength'],
)
return obj
def encode(self):
json = {}
json['subject'] = raritan.rpc.cert.ServerSSLCert.CommonAttributes.encode(self.subject)
json['names'] = [x0 for x0 in self.names]
json['keyLength'] = self.keyLength
return json
# structure
class CertInfo(Structure):
idlType = "cert.ServerSSLCert_2_0_0.CertInfo:1.0.0"
elements = ["subject", "issuer", "names", "invalidBefore", "invalidAfter", "serialNumber", "keyLength"]
def __init__(self, subject, issuer, names, invalidBefore, invalidAfter, serialNumber, keyLength):
typecheck.is_struct(subject, raritan.rpc.cert.ServerSSLCert.CommonAttributes, AssertionError)
typecheck.is_struct(issuer, raritan.rpc.cert.ServerSSLCert.CommonAttributes, AssertionError)
for x0 in names:
typecheck.is_string(x0, AssertionError)
typecheck.is_string(invalidBefore, AssertionError)
typecheck.is_string(invalidAfter, AssertionError)
typecheck.is_string(serialNumber, AssertionError)
typecheck.is_int(keyLength, AssertionError)
self.subject = subject
self.issuer = issuer
self.names = names
self.invalidBefore = invalidBefore
self.invalidAfter = invalidAfter
self.serialNumber = serialNumber
self.keyLength = keyLength
@classmethod
def decode(cls, json, agent):
obj = cls(
subject = raritan.rpc.cert.ServerSSLCert.CommonAttributes.decode(json['subject'], agent),
issuer = raritan.rpc.cert.ServerSSLCert.CommonAttributes.decode(json['issuer'], agent),
names = [x0 for x0 in json['names']],
invalidBefore = json['invalidBefore'],
invalidAfter = json['invalidAfter'],
serialNumber = json['serialNumber'],
keyLength = json['keyLength'],
)
return obj
def encode(self):
json = {}
json['subject'] = raritan.rpc.cert.ServerSSLCert.CommonAttributes.encode(self.subject)
json['issuer'] = raritan.rpc.cert.ServerSSLCert.CommonAttributes.encode(self.issuer)
json['names'] = [x0 for x0 in self.names]
json['invalidBefore'] = self.invalidBefore
json['invalidAfter'] = self.invalidAfter
json['serialNumber'] = self.serialNumber
json['keyLength'] = self.keyLength
return json
# structure
class Info(Structure):
idlType = "cert.ServerSSLCert_2_0_0.Info:1.0.0"
elements = ["havePendingReq", "havePendingCert", "pendingReqInfo", "pendingCertInfo", "activeCertInfo", "maxSignDays"]
def __init__(self, havePendingReq, havePendingCert, pendingReqInfo, pendingCertInfo, activeCertInfo, maxSignDays):
typecheck.is_bool(havePendingReq, AssertionError)
typecheck.is_bool(havePendingCert, AssertionError)
typecheck.is_struct(pendingReqInfo, raritan.rpc.cert.ServerSSLCert.ReqInfo, AssertionError)
typecheck.is_struct(pendingCertInfo, raritan.rpc.cert.ServerSSLCert.CertInfo, AssertionError)
typecheck.is_struct(activeCertInfo, raritan.rpc.cert.ServerSSLCert.CertInfo, AssertionError)
typecheck.is_int(maxSignDays, AssertionError)
self.havePendingReq = havePendingReq
self.havePendingCert = havePendingCert
self.pendingReqInfo = pendingReqInfo
self.pendingCertInfo = pendingCertInfo
self.activeCertInfo = activeCertInfo
self.maxSignDays = maxSignDays
@classmethod
def decode(cls, json, agent):
obj = cls(
havePendingReq = json['havePendingReq'],
havePendingCert = json['havePendingCert'],
pendingReqInfo = raritan.rpc.cert.ServerSSLCert.ReqInfo.decode(json['pendingReqInfo'], agent),
pendingCertInfo = raritan.rpc.cert.ServerSSLCert.CertInfo.decode(json['pendingCertInfo'], agent),
activeCertInfo = raritan.rpc.cert.ServerSSLCert.CertInfo.decode(json['activeCertInfo'], agent),
maxSignDays = json['maxSignDays'],
)
return obj
def encode(self):
json = {}
json['havePendingReq'] = self.havePendingReq
json['havePendingCert'] = self.havePendingCert
json['pendingReqInfo'] = raritan.rpc.cert.ServerSSLCert.ReqInfo.encode(self.pendingReqInfo)
json['pendingCertInfo'] = raritan.rpc.cert.ServerSSLCert.CertInfo.encode(self.pendingCertInfo)
json['activeCertInfo'] = raritan.rpc.cert.ServerSSLCert.CertInfo.encode(self.activeCertInfo)
json['maxSignDays'] = self.maxSignDays
return json
class _generateUnsignedKeyPair(Interface.Method):
name = 'generateUnsignedKeyPair'
@staticmethod
def encode(reqInfo, challenge):
typecheck.is_struct(reqInfo, raritan.rpc.cert.ServerSSLCert.ReqInfo, AssertionError)
typecheck.is_string(challenge, AssertionError)
args = {}
args['reqInfo'] = raritan.rpc.cert.ServerSSLCert.ReqInfo.encode(reqInfo)
args['challenge'] = challenge
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
typecheck.is_int(_ret_, DecodeException)
return _ret_
class _generateSelfSignedKeyPair(Interface.Method):
name = 'generateSelfSignedKeyPair'
@staticmethod
def encode(reqInfo, days):
typecheck.is_struct(reqInfo, raritan.rpc.cert.ServerSSLCert.ReqInfo, AssertionError)
typecheck.is_int(days, AssertionError)
args = {}
args['reqInfo'] = raritan.rpc.cert.ServerSSLCert.ReqInfo.encode(reqInfo)
args['days'] = days
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
typecheck.is_int(_ret_, DecodeException)
return _ret_
class _deletePending(Interface.Method):
name = 'deletePending'
@staticmethod
def encode():
args = {}
return args
@staticmethod
def decode(rsp, agent):
return None
class _getInfo(Interface.Method):
name = 'getInfo'
@staticmethod
def encode():
args = {}
return args
@staticmethod
def decode(rsp, agent):
info = raritan.rpc.cert.ServerSSLCert.Info.decode(rsp['info'], agent)
typecheck.is_struct(info, raritan.rpc.cert.ServerSSLCert.Info, DecodeException)
return info
class _installPendingKeyPair(Interface.Method):
name = 'installPendingKeyPair'
@staticmethod
def encode():
args = {}
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
typecheck.is_int(_ret_, DecodeException)
return _ret_
def __init__(self, target, agent):
super(ServerSSLCert, self).__init__(target, agent)
self.generateUnsignedKeyPair = ServerSSLCert._generateUnsignedKeyPair(self)
self.generateSelfSignedKeyPair = ServerSSLCert._generateSelfSignedKeyPair(self)
self.deletePending = ServerSSLCert._deletePending(self)
self.getInfo = ServerSSLCert._getInfo(self)
self.installPendingKeyPair = ServerSSLCert._installPendingKeyPair(self)
# from raritan/rpc/cert/__extend__.py
def upload(agent, certData, keyData):
"""
Method to upload certificates
- **parameters**, **return**
:param agent: An agent instance for the device where the certificate should be uploaded
:param certData: The binary data of the certificate
:param keyData: The binary data of the key
- **Example**
:Example:
from raritan import rpc
from raritan.rpc import cert
agent = rpc.Agent("https", "my-pdu.example.com", "admin", "raritan")
ssl_proxy = cert.ServerSSLCert("/server_ssl_cert", agent)
# read files in binary mode
certFile = open("my-cert.crt", "rb")
keyFile = open("my-key.key", "rb")
# upload
cert.upload(agent, certFile.read(), keyFile.read())
"""
target = "cgi-bin/server_ssl_cert_upload.cgi"
formnames = ["cert_file", "key_file"]
filenames = ["cert-file.crt", "key-file.key"]
agent.form_data_file(target, [certData, keyData], filenames, formnames, ["application/octet-stream", "application/octet-stream"])
def download(agent):
"""
Method to download the server cert
**parameters**
:param agent: An agent instance from the device where the certificate file should be downloaded
:return: returns the certificate data
**Example**
:Example:
from raritan import rpc
from raritan.rpc import cert
agent = rpc.Agent("https", "my-pdu.example.com", "admin", "raritan")
# download
cert = cert.download(agent)
print(cert)
"""
target = "cgi-bin/server_ssl_cert_download.cgi"
return agent.get(target)
| 1.945313
| 2
|
naampy/in_rolls_fn.py
|
appeler/naampy
| 4
|
12784585
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import argparse
import pandas as pd
from pkg_resources import resource_filename
from .utils import column_exists, fixup_columns, get_app_file_path, download_file
IN_ROLLS_DATA = {'v1': 'https://dataverse.harvard.edu/api/v1/access/datafile/4967581',
'v2': 'https://dataverse.harvard.edu/api/v1/access/datafile/4965696',
'v2_1k': 'https://dataverse.harvard.edu/api/v1/access/datafile/4965695',
}
IN_ROLLS_COLS = ['n_male', 'n_female', 'n_third_gender', 'prop_female', 'prop_male', 'prop_third_gender']
class InRollsFnData():
__df = None
__state = None
__year = None
@staticmethod
def load_naampy_data(dataset):
data_fn = 'naampy_{0:s}.csv.gz'.format(dataset)
data_path = get_app_file_path('naampy', data_fn)
if not os.path.exists(data_path):
print("Downloading naampy data from the server ({0!s})..."
.format(data_fn))
if not download_file(IN_ROLLS_DATA[dataset], data_path):
print("ERROR: Cannot download naampy data file")
return None
else:
print("Using cached naampy data from local ({0!s})...".format(data_path))
return data_path
@classmethod
def in_rolls_fn_gender(cls, df, namecol, state=None, year=None, dataset='v2_1k'):
"""Appends additional columns from Female ratio data to the input DataFrame
based on the first name.
Removes extra space. Checks if the name is the Indian electoral rolls data.
If it is, outputs data from that row.
Args:
df (:obj:`DataFrame`): Pandas DataFrame containing the first name
column.
namecol (str or int): Column's name or location of the name in
DataFrame.
state (str): The state name of Indian electoral rolls data to be used.
(default is None for all states)
year (int): The year of Indian electoral rolls to be used.
(default is None for all years)
Returns:
DataFrame: Pandas DataFrame with additional columns:-
'n_female', 'n_male', 'n_third_gender',
'prop_female', 'prop_male', 'prop_third_gender' by first name
"""
if namecol not in df.columns:
print("No column `{0!s}` in the DataFrame".format(namecol))
return df
df['__first_name'] = df[namecol].str.strip()
df['__first_name'] = df['__first_name'].str.lower()
if cls.__df is None or cls.__state != state or cls.__year != year:
data_path = InRollsFnData.load_naampy_data(dataset)
adf = pd.read_csv(data_path, usecols=['state', 'birth_year',
'first_name', 'n_female', 'n_male', 'n_third_gender'])
agg_dict = {'n_female': 'sum', 'n_male': 'sum', 'n_third_gender': 'sum'}
if state and year:
adf = adf[(adf.state==state) & (adf.birth_year==year)].copy()
del adf['birth_year']
del adf['state']
elif state:
adf = adf.groupby(['state', 'first_name']).agg(agg_dict).reset_index()
adf = adf[adf.state==state].copy()
del adf['state']
elif year:
adf = adf.groupby(['birth_year', 'first_name']).agg(agg_dict).reset_index()
adf = adf[adf.birth_year==year].copy()
del adf['birth_year']
else:
adf = adf.groupby(['first_name']).agg(agg_dict).reset_index()
n = adf['n_female'] + adf['n_male'] + adf['n_third_gender']
adf['prop_female'] = adf['n_female'] / n
adf['prop_male'] = adf['n_male'] / n
adf['prop_third_gender'] = adf['n_third_gender'] / n
cls.__df = adf
cls.__df = cls.__df[['first_name'] + IN_ROLLS_COLS]
cls.__df.rename(columns={'first_name': '__first_name'}, inplace=True)
rdf = pd.merge(df, cls.__df, how='left', on='__first_name')
del rdf['__first_name']
return rdf
@staticmethod
def list_states(dataset='v2_1k'):
data_path = InRollsFnData.load_naampy_data(dataset)
adf = pd.read_csv(data_path, usecols=['state'])
return adf.state.unique()
in_rolls_fn_gender = InRollsFnData.in_rolls_fn_gender
def main(argv=sys.argv[1:]):
title = ('Appends Electoral roll columns for prop_female, n_female, '
'n_male n_third_gender by first name')
parser = argparse.ArgumentParser(description=title)
parser.add_argument('input', default=None,
help='Input file')
parser.add_argument('-f', '--first-name', required=True,
help='Name or index location of column contains '
'the first name')
parser.add_argument('-s', '--state', default=None,
choices=InRollsFnData.list_states(),
help='State name of Indian electoral rolls data '
'(default=all)')
parser.add_argument('-y', '--year', type=int, default=None,
help='Birth year in Indian electoral rolls data (default=all)')
parser.add_argument('-o', '--output', default='in-rolls-output.csv',
help='Output file with Indian electoral rolls data columns')
parser.add_argument('-d', '--dataset', default='v2_1k',
choices=['v1', 'v2', 'v2_1k'],
help='Select the dataset v1 is 12 states,\n' +
'v2 and v2_1k for 30 states with 100 and 1,000\n' +
' first name occurrences respectively'
'(default=v2_1k)')
args = parser.parse_args(argv)
print(args)
if not args.first_name.isdigit():
df = pd.read_csv(args.input)
else:
df = pd.read_csv(args.input, header=None)
args.first_name = int(args.first_name)
if not column_exists(df, args.first_name):
return -1
rdf = in_rolls_fn_gender(df, args.first_name, args.state, args.year, args.dataset)
print("Saving output to file: `{0:s}`".format(args.output))
rdf.columns = fixup_columns(rdf.columns)
rdf.to_csv(args.output, index=False)
return 0
if __name__ == "__main__":
sys.exit(main())
| 2.96875
| 3
|
itez/users/urls.py
|
Digital-Prophets/itez
| 1
|
12784586
|
from django.urls import path
from itez.users.views import (
update_view,
# UserUpdateView,
user_delete,
user_detail_view,
user_redirect_view,
UserCreateView,
user_profile,
user_profile_photo_update,
)
app_name = "users"
urlpatterns = [
path("user/create/", UserCreateView.as_view(), name="user_create"),
path("user/delete/<int:user_id>", user_delete, name="delete"),
path("user/profile/photo/upload/", user_profile_photo_update, name="profile_photo"),
path("user/profile/", user_profile, name="profile"),
path("~redirect/", view=user_redirect_view, name="redirect"),
# path("update/<int:pk>/", view=UserUpdateView.as_view(), name="update"),
path("update/<int:pk>/", view=update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
| 2.078125
| 2
|
fltk/nets/fashion_mnist_cnn.py
|
AbeleMM/fltk-testbed
| 0
|
12784587
|
<reponame>AbeleMM/fltk-testbed
# pylint: disable=missing-function-docstring,missing-class-docstring,invalid-name
import torch
class FashionMNISTCNN(torch.nn.Module):
def __init__(self):
super(FashionMNISTCNN, self).__init__()
self.layer1 = torch.nn.Sequential(
torch.nn.Conv2d(1, 16, kernel_size=5, padding=2),
torch.nn.BatchNorm2d(16),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2))
self.layer2 = torch.nn.Sequential(
torch.nn.Conv2d(16, 32, kernel_size=5, padding=2),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2))
self.flatten = torch.nn.Flatten()
self.fc = torch.nn.Linear(7 * 7 * 32, 10)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.fc(self.flatten(x))
return x
| 2.359375
| 2
|
tensorflow_developer2/13_loading_preprocessing_data/homl_chap13_00_data_processing.py
|
swilliams11/machine-learning
| 0
|
12784588
|
<reponame>swilliams11/machine-learning<filename>tensorflow_developer2/13_loading_preprocessing_data/homl_chap13_00_data_processing.py<gh_stars>0
import tensorflow as tf
"""
This is chapter 13 of hands on machine learning revision 2.
Data processing
"""
"""Creating and modifying a dataset"""
def dataset_from_tensor_slices():
"""Creates a dataset using from_tensor_slices."""
X = tf.range(10)
print(X)
dataset = tf.data.Dataset.from_tensor_slices(X)
print(dataset)
return dataset
def dataset_with_range():
"""Creates a dataset with the Dataset class Which is equivalent to the one above."""
dataset = tf.data.Dataset.range(10)
print(dataset)
return dataset
def print_dataset(ds):
# iterate over a dataset
for item in ds:
print(item)
def dataset_tansformation(ds):
# repeat the dataset 3 times and then group into batch of 7
return ds.repeat(3).batch(7)
def dataset_modification(ds):
return ds.map(lambda x: x * 2)
def dataset_unbatch(ds):
return ds.apply(ds.unbatch())
ds = dataset_from_tensor_slices()
"""
print("dataset from_tensor_slices")
print_dataset(dataset_from_tensor_slices())
print("dataset with Dataset.range()")
print_dataset(dataset_with_range())
print("\ndataset transformation")
ds2 = dataset_from_tensor_slices()
print_dataset(dataset_tansformation(ds2))
print("\ndataset modification")
print_dataset(dataset_modification(ds))
"""
print("\ndataset unbatched")
elements = [ [1, 2, 3], [1, 2], [1, 2, 3, 4] ]
dataset = tf.data.Dataset.from_generator(lambda: elements, tf.int64)
dataset = dataset.unbatch()
print(list(dataset.as_numpy_iterator()))
print("\nDataset apply function")
dataset = tf.data.Dataset.range(100)
def dataset_fn(ds):
return ds.filter(lambda x: x < 5)
dataset = dataset.apply(dataset_fn)
print(list(dataset.as_numpy_iterator()))
print("\nDataset take function")
for item in dataset.take(3):
print(item)
"""Shuffling a dataset"""
print("\nShuffling a dataset")
dataset = tf.data.Dataset.range(10).repeat(3)
dataset = dataset.shuffle(buffer_size=5, seed=42).batch(7)
for item in dataset:
print(item)
| 3.359375
| 3
|
loops/digits.py
|
fa-alvarez/python-examples
| 0
|
12784589
|
#!/usr/bin/env python3
def digits(n):
"""The digits function returns how many digits a number has."""
count = 0
if n == 0:
return 1
while (n >= 1):
count += 1
n = n / 10
return count
print(digits(25)) # Should print 2
print(digits(144)) # Should print 3
print(digits(1000)) # Should print 4
print(digits(0)) # Should print 1
| 4.0625
| 4
|
Palindrome.py
|
shakirmahmood/Tasks
| 0
|
12784590
|
<gh_stars>0
#q = int(input("Enter number of queries: "))
s = input("Enter a string: ")
if s == s[::-1]:
print("Entered string is a palindrome.")
else:
# dL = list(s) #Dummy List
for i in range(len(s)):
x = s[i]
s = s[:i]+s[i+1:]
#print(s)
if s == s[::-1]:
print("index: ", i, "character: ", x)
break
else:
s = s[:i] + x + s[i:]
| 3.53125
| 4
|
stash.py
|
okahilak/xcoord
| 0
|
12784591
|
<reponame>okahilak/xcoord
# Note: Numba target needs to be set here and cannot be set after importing the library
NUMBA_TARGET = "parallel" # Available targets: "cpu", "parallel", and "cuda"
@nb.jit(nopython=True, parallel=True)
def example_func(a, b):
return a**2 + b**2
@nb.jit(nopython=True, parallel=True)
def compute_direction_numba(trk_list):
# out = np.zeros(length_of_output)
out = [None]*len(trk_list)
for i in nb.prange(len(trk_list)):
# independent and parallel loop
# out[i] = compute_direction(trk_list[i])
trk = np.transpose(np.asarray(trk_list[i]))
# numb_points = trk.shape[0]
direction_rescale = [None]*trk.shape[0]
for j in range(trk.shape[0] - 1):
direction = trk[j + 1, :] - trk[j, :]
direction = direction / np.linalg.norm(direction)
# direction_rescale.append([int(255 * abs(s)) for s in direction])
direction_rescale.append([int(255 * abs(s)) for s in direction])
# repeat color for last point
direction_rescale.append([int(255 * abs(s)) for s in direction])
out[i] = direction_rescale
return out
@nb.jit(nopython=True, parallel=True)
def normalize_numba(x):
ret = np.empty_like(x)
for i in nb.prange(x.shape[0]):
acc = 0.0
for j in range(x.shape[1]):
acc += x[i, j]**2
norm = np.sqrt(acc)
for j in range(x.shape[1]):
ret[i, j] = x[i, j] / norm
return ret
def numba3(vec_obj, vec_ps, cos_maxsep):
nps = len(vec_ps)
nobj = len(vec_obj)
out = np.zeros(nobj, bool)
numba3_helper(vec_obj, vec_ps, cos_maxsep, out, nps, nobj)
return np.flatnonzero(out)
@nb.jit(nopython=True)
def numba3_helper(vec_obj, vec_ps, cos_maxsep, out, nps, nobj):
for i in range(nobj):
for j in range(nps):
cos = (vec_obj[i,0]*vec_ps[j,0] +
vec_obj[i,1]*vec_ps[j,1] +
vec_obj[i,2]*vec_ps[j,2])
if cos > cos_maxsep:
out[i] = True
break
return out
@nb.guvectorize([nb.void(nb.double[:], nb.double[:], nb.double[:])],
'(n),(n)->(n)', target=NUMBA_TARGET, nopython=True)
def _gprocess_point(a, b, out_b):
"""Substracts 'b' from 'a' and stores result in 'out_V'. Then takes cross product of 'c' and 'out_V' and stores result in 'out_p'.
Parameters
----------
a, b ,c: np.ndarray
One-dimensional arrays to process.
out_b : np.ndarray
Output where to accumulate the results
"""
# Substract
a1, a2, a3 = a[0], a[1], a[2]
b1, b2, b3 = b[0], b[1], b[2]
V1 = a1 - b1
V2 = a2 - b2
V3 = a3 - b3
# Length of V
v_norm = math.sqrt(V1*V1 + V2*V2 + V3*V3)
# Cross product
if v_norm != 0:
p1 = V1/v_norm
p2 = V2/v_norm
p3 = V3/v_norm
# Store result in out_b
#np.add(b, p, out=b)
out_b[0] = 255*p1
out_b[1] = 255*p2
out_b[2] = 255*p3
@nb.jit()
def _gprocess_points(xyz, wire, dwire, out_b):
"""Processes 'xyz' coordinates and calculates b-field due to current in wire 'w', 'dw'. Stores outputs to 'out_V', 'out_p', and 'out_b'.
Parameters
----------
xyz : np.ndarray
One-dimensional array to process.
wire : np.ndarray
Wire coordinates as 3-dimensional vectors.
dwire : np.ndarray
Wire length vectors
out_b : np.ndarray
Output where to accumulate the results
"""
for i in range(len(wire)):
w = wire[i]
dw = dwire[i]
_gprocess_point(xyz, w, dw, out_b) # V = xyz - w; p = dw x V
# def calculate_points():
#
# B = np.zeros_like(xyz, dtype=np.double)
#
# return B
#condition_tut.py
import random, time
from threading import Condition, Thread
"""
'condition' variable will be used to represent the availability of a produced
item.
"""
condition = Condition()
box = []
def producer(box, nitems):
for i in range(nitems):
time.sleep(random.randrange(2, 5)) # Sleeps for some time.
condition.acquire()
num = random.randint(1, 10)
box.append(num) # Puts an item into box for consumption.
condition.notify() # Notifies the consumer about the availability.
print("Produced:", num)
condition.release()
def consumer(box, nitems):
for i in range(nitems):
condition.acquire()
condition.wait() # Blocks until an item is available for consumption.
print("%s: Acquired: %s" % (time.ctime(), box.pop()))
condition.release()
threads = []
"""
'nloops' is the number of times an item will be produced and
consumed.
"""
nloops = random.randrange(3, 6)
for func in [producer, consumer]:
threads.append(Thread(target=func, args=(box, nloops)))
threads[-1].start() # Starts the thread.
for thread in threads:
"""Waits for the threads to complete before moving on
with the main script.
"""
thread.join()
print("All done.")
| 2.25
| 2
|
src/decorators/overrides.py
|
ZelphirKaltstahl/QuestionsAndAnswers
| 0
|
12784592
|
<filename>src/decorators/overrides.py
def overrides(interface_class):
def overrider(method):
assert(method.__name__ in dir(interface_class))
return method
return overrider
| 2.53125
| 3
|
src/Shared/GMAO_Shared/GMAO_pyobs3/pyobs3/naaps.py
|
GEOS-ESM/AeroApps
| 2
|
12784593
|
#!/bin/env python
"""
Implements Python interface to NRL NAAPS files
"""
import os
import sys
from types import *
from pyhdf import SD
from glob import glob
from numpy import ones, concatenate, array,linspace,arange, transpose
from datetime import date, datetime, timedelta
from .config import strTemplate
MISSING = -9999.99
ALIAS = dict (latitude = 'lat' ,
longitude = 'lon' ,
elevation = 'zs' ,
time = 'Time')
ALIAS['532_attenuated_backscatter'] = 'taback'
ALIAS['532_attenuated_backscatter_error'] = 'taback_err'
ALIAS['532_attenuated_molecular_backscatter'] = 'mol_aback'
SDS = list(ALIAS.keys())
#.........................................................................................
class NAAPS(object):
"""
Base class for NAAPS object.
"""
def __init__ (self,Path,keep=None,Verbose=0,only_good=True):
"""
Creates an NAAPS object defining the attributes corresponding
to the SDS's on input.
The optional parameter *keep* is used to specify the number of scan
lines (from the left of the swath) to keep. This is needed for
coping with the row anomaly problem.
"""
# Initially are lists of numpy arrays for each granule
# ----------------------------------------------------
self.verb = Verbose
self.keep = keep
self.SDS = SDS
# Variable names
# --------------
self.Names = []
for name in SDS:
self.Names.append(name)
self.Names += ['nymd','nhms']
# Create empty lists for SDS to be read from orbit file;
# each element of the list contains data for one orbit
# ------------------------------------------------------
for name in self.Names:
self.__dict__[name] = []
self.time_ = [] # to hold datetime objects
# Read each orbit, appending them to the list
# -------------------------------------------
if type(Path) is ListType:
if len(Path) == 0:
self.nobs = 0
print("WARNING: Empty NAAPS object created")
return
else:
Path = [Path, ]
self._readList(Path)
# Make each attribute a single numpy array
# ----------------------------------------
for name in self.Names:
# print 'name',name, 'donnees',self.__dict__[name]
try:
self.__dict__[name] = concatenate((self.__dict__[name]))
except:
print("Failed concatenating "+name)
# Make aliases for compatibility with older code
# ----------------------------------------------
# Alias = ALIAS.keys()
for name in self.Names:
if name in SDS:
self.__dict__[ALIAS[name]] = self.__dict__[name]
#---
def _readList(self,List):
"""
Recursively, look for files in list; list items can
be files or directories.
"""
for item in List:
if os.path.isdir(item): self._readDir(item)
elif os.path.isfile(item): self._readOrbit(item)
else:
print("%s is not a valid file or directory, ignoring it"%item)
#---
def _readDir(self,dir):
"""Recursively, look for files in directory."""
for item in os.listdir(dir):
path = dir + os.sep + item
if os.path.isdir(path): self._readDir(path)
elif os.path.isfile(path): self._readOrbit(path)
else:
print("%s is not a valid file or directory, ignoring it"%item)
#---
def _readOrbit(self,filename):
"""Reads one CALIPSO orbit with Level 1.5 data."""
# Reference time
# --------------
REF_DATE = datetime(1993,1,1,0,0,0)
# Open the CALIPSO file and loop over the datasets,
# extracting GEOLOCATION and Data fields
# ----------------------------------------------
if self.verb:
print("[] working on <%s>"%filename)
f = SD.SD(filename)
# for group in self.SDS.keys():
for name in self.SDS:
v = name
print('v', v)
if v == 'time':
sd = f.select(v)
Time = sd.get()
nobs = len(Time)
nymd = ones(nobs).astype('int')
nhms = ones(nobs).astype('int')
self.__dict__[v].append(Time) # time as on file
for i in range(nobs):
yymmdd = Time[i]
nymd0 = int(Time[i])
nd = Time[i] - nymd0
nd0 = nd * 24.0
hh = int(nd0)
nd1 = nd0 - hh
nd2 = nd1 * 60
mm = int(nd2)
nd3 = nd2 - mm
nd4 = nd3 * 60
ss = int(nd4)
nymd[i] = 20000000 + nymd0
nhms[i] = ((hh * 100) + mm) * 100 + ss
self.nymd.append(nymd)
self.nhms.append(nhms)
year = int(nymd[i]/10000)
month = int((nymd[i] - 10000*year)/100)
day = nymd[i] - (year*10000 + month * 100)
self.time_.append(datetime(year,month,day,hh,mm,ss))
else:
sd = f.select(v)
data = sd.get() # most of parameter : data = (nobs) or (nobs,km) except L2 feature type(nobs,km,4)
data = transpose(data)
print('data', data.shape)
if self.keep != None:
self.__dict__[v].append(data[0:self.keep,:])
else:
self.__dict__[v].append(data)
#---
def writeg(self,g5,syn_time,nsyn=8,g5_h=None,g5_ab=None,filename=None,dir='.',expid='NAAPS',Verb=1):
"""
Writes gridded CALIPSO measurements to file (same grid as GEOS-5 file).
Verb -- Verbose level:
0 - really quiet (default)
1 - Warns if invalid file is found
2 - Prints out non-zero number of fires in each file.
"""
from gfio import GFIO
from binObs_ import binobs3dh
# Determine synoptic time range
# -----------------------------
dt = timedelta(seconds = 12. * 60. * 60. / nsyn)
t1, t2 = (syn_time-dt,syn_time+dt)
# Lat lon grid from GEOS-5 file
# ------------
im = 360
jm = 181
print('im,jm', im, jm)
glon = linspace(-180.,180.,im,endpoint=False)
glat = linspace(-90.,90.,jm)
print('glon', glon, glat)
dLon = 360. / im
dLat = 180. / ( jm - 1.)
print('dlon', dLon, dLat)
nymd = 10000 * syn_time.year + 100 * syn_time.month + syn_time.day
nhms = 10000 * syn_time.hour + 100 * syn_time.minute + syn_time.second
print('nymd=',nymd, 'nhms=',nhms)
na_height = arange(0,8100,400) # height above sea level for NAAPS 100mfor night 400m forday
print('na_height shape', na_height.shape, g5_h.shape)
g5_height = g5_h
km = g5_height.shape[0] # because it is at the edge
print('km', km, g5_height.shape, g5_height[:,0])
nobs = self.lon.shape
vtitle = [ 'taback',
'taback_err',
'mol_aback',
'height' ]
vname = ['taback','taback_err', 'mol_aback']
vunits = [ 'km-1 sr-1','km-1 sr-1', 'km-1 sr-1' ]
kmvar = [km, km, km]
title = 'Gridded NAAPS attenuated backscatter coeff lev Geos5'
source = 'NASA/GSFC/GMAO GEOS-5 Aerosol Group'
contact = 'Virginie'
if filename is None:
filename = '%s/%s.day.calipso_l3a.%d_%02dz.nc4'%(dir,expid,nymd,nhms/10000)
# QA filtering
# ------------
I_bad = ones(self.taback.shape) # bad data
I_bad = False
# Time filter of data
# -------------------
lon = self.lon
lat = self.lat
taback = _timefilter(self.time_,t1,t2,self.taback,I_bad)
taback_err = _timefilter(self.time_,t1,t2,self.taback_err,I_bad)
mol_aback = _timefilter(self.time_,t1,t2,self.mol_aback,I_bad)
# height = _timefilter(self.time_,t1,t2,na_height,I_bad)
print('taback', taback.shape)
# Create the file
# ---------------
f = GFIO()
glevs=arange(km)
f.create(filename, vname, nymd, nhms,
lon=glon, lat=glat, levs=glevs, levunits='m',
vtitle=vtitle, vunits=vunits,kmvar=kmvar,amiss=MISSING,
title=title, source=source, contact=contact)
# gObs=binobs3dh(lon[13:14],lat[13:14],taback[13:14,:],na_height,g5_height[:,13:14],im,jm,MISSING)
print('test', lon[10:11],lat[10:11],taback[10:11,:],na_height,g5_height[:,10:11])
gObs=binobs3dh(lon[10:11],lat[10:11],taback[10:11,:],na_height,g5_height[:,10:11],im,jm,MISSING)
print('gobs', gObs[357:358,101:102,:])
# Grid variable and write to file
# -------------------------------
f.write('taback', nymd, nhms, binobs3dh(lon,lat,taback,na_height,g5_height,im,jm,MISSING) )
f.write('taback_err', nymd, nhms, binobs3dh(lon,lat,taback_err,na_height,g5_height,im,jm,MISSING) )
f.write('mol_aback', nymd, nhms, binobs3dh(lon,lat,mol_aback,na_height,g5_height,im,jm,MISSING) )
# f.write('height', nymd, nhms, g5_height)
if Verb >=1:
print("[w] Wrote file "+filename)
#....................................................................
def _timefilter ( t, t1, t2, a, I_bad ):
filler = MISSING * ones(a.shape[1:])
b = a.copy()
for i in range(len(t)):
if (t[i]<t1) or (t[i]>=t2):
b[i] = filler
if len(b.shape) == 3:
b[I_bad,:] = MISSING
elif len(b.shape) == 2:
b[I_bad] = MISSING
else:
raise IndexError("Invalid rank=%d for time filtering"%len(b.shape))
return b
#---
def orbits (path, syn_time, nsyn=8, period='night', Verbose=0 ):
"""
Returns a list of CALIPSO orbits for a given product at given synoptic time.
On input,
path --- mounting point for the CALIPSO Level 1.5 files
syn_time --- synoptic time (timedate format)
nsyn --- number of synoptic times per day (optional)
"""
# Determine synoptic time range
# -----------------------------
dt = timedelta(seconds = 12. * 60. * 60. / nsyn)
t1, t2 = (syn_time-dt,syn_time+dt)
print("[*] ", t1,"|", t2)
today = syn_time
yesterday = today - timedelta(hours=24)
Files = []
for t in (yesterday,today):
yy, mm, dd = (t.year,t.month,t.day)
dirn = "%s/%02d/%s"%(path,mm,period)
Files += glob("%s/naaps_caliop_assim_*.cdf"%(dirn))
# print 'Files', dirn, Files
Orbits = []
for f in Files:
dirn, filen = os.path.split(f)
tokens = filen.split('_')
beg_yy = int(tokens[3][0:4])
beg_mm = int(tokens[3][4:6])
beg_dd = int(tokens[3][6:8])
beg_h = int(tokens[3][8:10])
beg_m = int(tokens[3][10:12])
t_beg = datetime(beg_yy,beg_mm,beg_dd,beg_h,beg_m,0)
t_end = t_beg + timedelta(minutes=90)
# t_end = datetime(end_yy,end_mm,end_dd,end_h,end_m,0)
# print 'year', beg_yy, 'month', beg_mm, 'day', beg_dd, 'hour', beg_h, 'min', beg_m
if (t_beg>=t1 and t_beg<t2) or (t_end>=t1 and t_end<t2):
print("[x] ", t_beg, '|', t_end)
Orbits += [f,]
if Verbose:
print("[] ", f)
return Orbits
#............................................................................
if __name__ == "__main__":
# syn_time = datetime(2008,6,30,0,0,0)
# Time interval snd time step
# ---------------------------
t_beg = datetime(2007,4,1,0)
t_end = datetime(2007,4,1,21)
dt = timedelta(seconds=3*60*60) # 3-hourly
t = t_beg - dt
while t < t_end:
t += dt
syn_time = t
Files = orbits('/nobackup/2/vbuchard/CALIPSO_L15/NAAPS/',syn_time,period='day',Verbose=1)
print('files',Files)
#def hold():
# NAAPS files
naap = NAAPS(Files,Verbose=1)
# GEOS-5 file
g_template = "/nobackup/2/vbuchard/CALIPSO_L15/GEOS-5/aback_63lay/Y%y4/M%m2/dR_MERRA-AA-r2_ext532nm_Nv_63layers.%y4%m2%d2_%h200z.nc4"
g_fn = strTemplate(g_template,dtime=syn_time)
lon=naap.lon
lat=naap.lat
g = GFIO(g_fn)
g5_height = g.interp('h',lon,lat)
g5_aback = g.read('taback')
naap.writeg(g,syn_time,nsyn=8,g5_h=g5_height,g5_ab=g5_aback,filename=None,dir='/nobackup/2/vbuchard/CALIPSO_L15/',expid='NAAPS',Verb=1)
| 2.46875
| 2
|
setup.py
|
themantalope/spongemock
| 11
|
12784594
|
<filename>setup.py
import setuptools
from distutils.core import setup
from os import path
# Helper functions
# ----------------
def readFile(file):
with open(file) as f:
return f.read()
# Arguments
# ------------
CLASSIFIERS = []
# Dynamic info
# ------------
VERSION = '0.3.4'
CLASSIFIERS += [
'Development Status :: 3 - Alpha',
]
# Package/Dependency info
# ---------------
PACKAGES = [ 'spongemock' ]
PACKAGE_DIR = { 'spongemock': 'src' }
DATA_FILES = [ ('', ['README.rst','LICENSE']), ]
INSTALL_REQUIRES = [ 'pyperclip>=1.5.27' ]
# Static info
# -----------
NAME = 'spongemock'
DESCRIPTION = 'Mock some text like spongebob would. mOCk SoMe TexT lIKe SpONGebOb wOuLd.'
LONG_DESCRIPTION = readFile(path.join(path.dirname(path.abspath(__file__)), 'README.rst'))
AUTHOR = '<NAME>'
AUTHOR_EMAIL = '<EMAIL>'
LICENSE = 'MIT License'
URL = 'https://github.com/nkrim/spongemock'
KEYWORDS = 'spongemock spongebob squarepants meme mock text random'
ENTRY_POINTS = { 'console_scripts': [ 'spongemock = spongemock.__main__:main' ] }
CLASSIFIERS += [
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Text Processing',
'Topic :: Text Processing :: Filters',
'Topic :: Text Processing :: General',
'Topic :: Utilities',
]
ZIP_SAFE = True
# Setup call
# ----------
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
keywords=KEYWORDS,
entry_points=ENTRY_POINTS,
packages=PACKAGES,
package_dir=PACKAGE_DIR,
data_files=DATA_FILES,
install_requires=INSTALL_REQUIRES,
classifiers=CLASSIFIERS,
zip_safe=ZIP_SAFE )
| 1.960938
| 2
|
leetcode/24.swap-nodes-in-pairs.py
|
schio/algorithm_test
| 0
|
12784595
|
#
# @lc app=leetcode id=24 lang=python3
#
# [24] Swap Nodes in Pairs
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
if head and head.next:
point = head.next
head.next = self.swapPairs(point.next)
point.next = head
return point
return head
# @lc code=end
| 3.609375
| 4
|
wrappers/arlexecute/calibration/__init__.py
|
ska-telescope/algorithm-reference-library
| 22
|
12784596
|
__all__ = ['calibration', 'calibration_control', 'iterators', 'operations', 'pointing', 'rcal']
| 1.09375
| 1
|
601-700/681-690/684-redundantConnection/redundantConnection.py
|
xuychen/Leetcode
| 0
|
12784597
|
class UnionFind(object):
def __init__(self):
self.table = {}
def union(self, x, y):
self.table[self.find(x)] = self.find(y)
def find(self, x):
self.table.setdefault(x, x)
return x if self.table[x] == x else self.find(self.table[x])
class Solution(object):
def findRedundantConnection(self, edges):
"""
:type edges: List[List[int]]
:rtype: List[int]
"""
uf = UnionFind()
result = None
for edge in edges:
x, y = edge
if uf.find(x) != uf.find(y):
uf.union(x, y)
elif not result:
result = edge
else:
result = edges[-1]
break
return result
| 3.4375
| 3
|
Beginner/URI_1962.py
|
rbshadow/Python_URI
| 3
|
12784598
|
<reponame>rbshadow/Python_URI
def math():
test_case = int(input())
for i in range(test_case):
i_put = int(input())
if i_put == 2015:
print('1 A.C.')
else:
if i_put > 2015:
print((i_put - 2015) + 1, 'A.C.')
elif i_put < 2015:
print(2015 - i_put, 'D.C.')
if __name__ == '__main__':
math()
| 3.5
| 4
|
colight-master/run_batch.py
|
utkachenko/Con-MATSCo
| 1
|
12784599
|
import runexp
import testexp
import summary
memo = "multi_phase/sumo/pipeline"
runexp.main(memo)
print("****************************** runexp ends (generate, train, test)!! ******************************")
summary.main(memo)
print("****************************** summary_detail ends ******************************")
| 1.539063
| 2
|
examples/NeuroML/FN.py
|
29riyasaxena/MDF
| 12
|
12784600
|
from neuromllite import (
Network,
Cell,
Population,
Synapse,
RectangularRegion,
RandomLayout,
)
from neuromllite import (
Projection,
RandomConnectivity,
OneToOneConnector,
Simulation,
InputSource,
Input,
)
from neuromllite.NetworkGenerator import check_to_generate_or_run
import sys
def generate():
dt = 0.05
simtime = 100
################################################################################
### Build new network
net = Network(id="FN")
net.notes = "<NAME>umo cell model - originally specified in NeuroML/LEMS"
net.parameters = {
"initial_w": 0.0,
"initial_v": -1,
"a_v": -0.3333333333333333,
"b_v": 0.0,
"c_v": 1.0,
"d_v": 1,
"e_v": -1.0,
"f_v": 1.0,
"time_constant_v": 1.0,
"a_w": 1.0,
"b_w": -0.8,
"c_w": 0.7,
"time_constant_w": 12.5,
"threshold": -1.0,
"mode": 1.0,
"uncorrelated_activity": 0.0,
"Iext": 0,
}
cellInput = Cell(id="fn", lems_source_file="FN_Definitions.xml", parameters={})
for p in net.parameters:
cellInput.parameters[p] = p
net.cells.append(cellInput)
r1 = RectangularRegion(
id="region1", x=0, y=0, z=0, width=1000, height=100, depth=1000
)
net.regions.append(r1)
pop = Population(
id="FNpop",
size="1",
component=cellInput.id,
properties={"color": "0.2 0.2 0.2", "radius": 3},
random_layout=RandomLayout(region=r1.id),
)
net.populations.append(pop)
new_file = net.to_json_file("%s.json" % net.id)
################################################################################
### Build Simulation object & save as JSON
sim = Simulation(
id="Sim%s" % net.id,
network=new_file,
duration=simtime,
dt=dt,
seed=123,
recordVariables={"V": {"all": "*"}, "W": {"all": "*"}},
plots2D={
"VW": {"x_axis": "%s/0/fn/V" % pop.id, "y_axis": "%s/0/fn/W" % pop.id}
},
)
sim.to_json_file()
return sim, net
if __name__ == "__main__":
sim, net = generate()
################################################################################
### Run in some simulators
import sys
check_to_generate_or_run(sys.argv, sim)
| 2.6875
| 3
|
spotify-lyrics.py
|
justinqle/spotify-lyrics
| 0
|
12784601
|
<filename>spotify-lyrics.py
#!/usr/bin/env python3.7
import os
import subprocess
import sys
import spotipy
import spotipy.util as util
import lyricsgenius
scope = 'user-read-currently-playing'
username = os.getenv('SPOTIFY_USERNAME') # environment variable specifying Spotify username
if username is None:
print("Please specify Spotify username in an environment variable called SPOTIFY_USERNAME.")
sys.exit(1)
# Put Spotify developer credentials here
spotify_id = ""
spotify_secret = ""
# Put Genius developer credentials here
genius_access_token = ""
try:
token = util.prompt_for_user_token(username=username,
scope=scope,
client_id=spotify_id,
client_secret=spotify_secret,
redirect_uri='http://localhost/')
except Exception:
sys.exit(1)
spotify = spotipy.Spotify(auth=token) # Spotipy API wrapper
try:
playing = spotify.currently_playing()
except Exception:
print("Network error, please verify connection.")
sys.exit(1)
if playing is None:
print("No song is currently playing.")
sys.exit(1)
name = playing['item']['name']
# Song names with parentheses
name = name.split('(')[0].rstrip()
artist = playing['item']['artists'][0]['name']
genius = lyricsgenius.Genius(genius_access_token) # Genius API wrapper
song = genius.search_song(name, artist)
if song is not None:
rows, columns = subprocess.check_output(['stty', 'size']).decode().split()
columns = int(columns)
print('-' * columns)
print(song.lyrics)
| 2.984375
| 3
|
deploy/httpd/dump.py
|
vmlaker/wabbit
| 2
|
12784602
|
"""
Dump Apache HTTPD configuration to stdout.
"""
from os import chdir
from os.path import dirname, join, normpath, realpath
import sys
import coils
www_directory = sys.argv[1]
config_fname = sys.argv[2] if len(sys.argv)>=3 else 'wabbit.conf'
# Load the configuration, and add to it path to www.
config = coils.Config(config_fname)
config['www_directory'] = normpath(www_directory)
# Go into the directory of this file.
this_dir = dirname(realpath(__file__))
chdir(this_dir)
# Create a httpd.conf file with text replacement as per the configuration.
with open('httpd.conf.in') as inf:
for line in inf.readlines():
line = line.rstrip()
for key, val in config.items():
line = line.replace('@{}@'.format(key), val)
print(line)
| 2.96875
| 3
|
test/test_generator.py
|
jrimyak/CSE-5525-Semantic-Parsing
| 0
|
12784603
|
<filename>test/test_generator.py
# coding: utf-8
import os
import unittest
from gpsr_command_understanding.generation import generate_sentence_parse_pairs
from gpsr_command_understanding.generator import Generator
from gpsr_command_understanding.grammar import NonTerminal, tree_printer, expand_shorthand
from gpsr_command_understanding.loading_helpers import load_all_2018_by_cat, load_all_2019
from gpsr_command_understanding.parser import GrammarBasedParser
GRAMMAR_DIR_2018 = os.path.abspath(os.path.dirname(__file__) + "/../resources/generator2018")
GRAMMAR_DIR_2019 = os.path.abspath(os.path.dirname(__file__) + "/../resources/generator2019")
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), "fixtures")
class TestGenerator(unittest.TestCase):
def setUp(self) -> None:
self.generator = Generator(grammar_format_version=2019)
def test_parse_rule(self):
rules = {}
test = self.generator.lambda_parser.parse("(test (lambda $1 :e .(yo 1)))")
print(test.pretty())
#parse_rule("$vbgopl to the {room 1}, $vbfind (someone | a person), and answer a {question} = (say (answer {question}) (lambda $1:e (person $1) (at $1 {room 1})))", rules)
def test_ignore_comment(self):
test = self.generator.generator_grammar_parser.parse("")
test = self.generator.generator_grammar_parser.parse("# this is a comment")
test = self.generator.generator_grammar_parser.parse("; this is a comment")
test = self.generator.generator_grammar_parser.parse("// this is a comment")
test = self.generator.generator_grammar_parser.parse('; grammar name Category I')
test = self.generator.lambda_parser.parse("# test")
def test_parse_basic(self):
test = self.generator.generator_grammar_parser.parse("$test = {pron} went to the mall and {location} $go $home")
print(test.pretty())
def test_parse_wildcards(self):
test = self.generator.generator_sequence_parser.parse(
"Go to the {location placement 1} and get the {kobject}. Then give it to {name 1} who is next to {name 2} at the {location beacon 1} in the {location room}")
print(test.pretty())
self.assertEqual(self.generator.generator_sequence_parser.parse("{location room}"),
self.generator.generator_sequence_parser.parse("{room}"))
self.assertEqual(self.generator.generator_sequence_parser.parse("{location beacon}"),
self.generator.generator_sequence_parser.parse("{beacon}"))
self.assertEqual(self.generator.generator_sequence_parser.parse("{kobject}"),
self.generator.generator_sequence_parser.parse("{object known}"))
def test_expand_shorthand(self):
test = self.generator.generator_grammar_parser.parse("$test = top choice | (level one (level two alpha | level two beta))")
result = expand_shorthand(test)
self.assertEqual(len(result), 3)
def test_parse_choice(self):
test = self.generator.generator_grammar_parser.parse("$test = ( oneword | two words)")
print(test.pretty())
test = self.generator.generator_grammar_parser.parse("$test = ( front | back | main | rear ) $ndoor")
print(test.pretty())
top_choice = self.generator.generator_grammar_parser.parse("$test = front | back")
top_choice_short = self.generator.generator_grammar_parser.parse("$test = (front | back)")
self.assertEqual(top_choice, top_choice_short)
short_mix_choice = self.generator.generator_grammar_parser.parse("$test = aa | aa ba")
print(short_mix_choice.pretty())
complex_choice = self.generator.generator_grammar_parser.parse("$phpeople = everyone | all the (people | men | women | guests | elders | children)")
print(complex_choice.pretty())
print(tree_printer(complex_choice))
def test_generate(self):
generator = Generator(grammar_format_version=2018)
grammar = generator.load_rules(os.path.join(FIXTURE_DIR, "grammar.txt"))
semantics = generator.load_semantics_rules(os.path.join(FIXTURE_DIR, "semantics.txt"))
pairs = list(generate_sentence_parse_pairs(NonTerminal("Main"),grammar, semantics))
self.assertEqual(len(pairs), 6)
def test_load_2018(self):
generator = Generator(grammar_format_version=2018)
all_2018= load_all_2018_by_cat(generator, GRAMMAR_DIR_2018, expand_shorthand=False)
# To manually inspect correctness for now...
"""for nonterm, rules in all_2018[0].items():
print(nonterm)
print("")
for rule in rules:
print(rule.pretty())
print("---")"""
def test_load_2019(self):
generator = Generator(grammar_format_version=2019)
all_2019 = load_all_2019(generator, GRAMMAR_DIR_2019, expand_shorthand=False)
# To manually inspect correctness for now...
"""for nonterm, rules in all_2019[0].items():
print(nonterm)
print("")
for rule in rules:
print(rule.pretty())
print("---")"""
| 2.75
| 3
|
src/titanic.py
|
santoshilam/titanic_reproducibility
| 0
|
12784604
|
<reponame>santoshilam/titanic_reproducibility
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 09:03:52 2020
@author: santo
"""
# linear algebra
import numpy as np
# data processing
import pandas as pd
# data visualization
import seaborn as sns
"""%matplotlib inline"""
from matplotlib import pyplot as plt
from matplotlib import style
# Algorithms
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import GaussianNB
test_df = pd.read_csv("..\\data\\processed\\test.csv")
train_df = pd.read_csv("..\\data\\processed\\train.csv")
survived = 'survived'
not_survived = 'not survived'
fig, axes = plt.subplots(nrows=1, ncols=2,figsize=(10, 4))
women = train_df[train_df['Sex']=='female']
men = train_df[train_df['Sex']=='male']
ax = sns.distplot(women[women['Survived']==1].Age.dropna(), bins=18, label = survived, ax = axes[0], kde =False)
ax = sns.distplot(women[women['Survived']==0].Age.dropna(), bins=40, label = not_survived, ax = axes[0], kde =False)
ax.legend()
ax.set_title('Female')
ax = sns.distplot(men[men['Survived']==1].Age.dropna(), bins=18, label = survived, ax = axes[1], kde = False)
ax = sns.distplot(men[men['Survived']==0].Age.dropna(), bins=40, label = not_survived, ax = axes[1], kde = False)
ax.legend()
_ = ax.set_title('Male')
"""Data visualization, this is a saved fig of the above visualization
plt.savefig('male and female.png')
reproduce visualization and compare if necessary"""
| 2.5625
| 3
|
afs/orm/__init__.py
|
chanke/afspy
| 0
|
12784605
|
<filename>afs/orm/__init__.py
"""
module dealing with the object-realational mapping
"""
__all__ = ["DBMapper", ]
from afs.orm import DBMapper
from afs.orm import Historic
def setup_options():
"""
add logging options to cmd-line,
but surpress them, so that they don't clobber up the help-messages
"""
import argparse
my_argparser = argparse.ArgumentParser(add_help = False)
# setup DB_CACHE options
my_argparser.add_argument("--DB_CACHE", default = "", \
help = argparse.SUPPRESS)
my_argparser.add_argument("--DB_TIME_TO_CACHE", default = "", \
help = argparse.SUPPRESS)
my_argparser.add_argument("--DB_HISTORY", default = "", \
help = argparse.SUPPRESS)
my_argparser.add_argument("--DB_HISTORY_NUM_DAYS", \
type = int, help = argparse.SUPPRESS)
my_argparser.add_argument("--DB_HISTORY_MIN_INTERVAL_MINUTES", \
type = int, help = argparse.SUPPRESS)
my_argparser.add_argument("--DB_SID" , default = "", \
help = argparse.SUPPRESS)
my_argparser.add_argument("--DB_TYPE" , default = "", \
help = argparse.SUPPRESS)
# mysql options
my_argparser.add_argument("--DB_HOST", default = "", \
help = argparse.SUPPRESS)
my_argparser.add_argument("--DB_PORT", default = "", \
help = argparse.SUPPRESS)
my_argparser.add_argument("--DB_USER", default = "", \
help = argparse.SUPPRESS)
my_argparser.add_argument("--DB_PASSWD" , default = "", \
help = argparse.SUPPRESS)
my_argparser.add_argument("--DB_FLUSH", default = "", \
help = argparse.SUPPRESS)
# for logging, but don't show up in --help
my_argparser.add_argument("--LogLevel_sqlalchemy", default = "", \
help = argparse.SUPPRESS)
my_argparser.add_argument("--LogLevel_DB_CACHE", default = "", \
help = argparse.SUPPRESS)
return my_argparser
| 2.40625
| 2
|
server/scry.py
|
Shuzhengz/Scry
| 0
|
12784606
|
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from database import Database
app = FastAPI()
database = Database()
@app.get("/")
def read_root():
return {
"Collections": ["ports", "ssh_logins", "user_connections", "network_traffic", "storage"]
}
@app.get("/collection/{collection_name}")
def read_item(collection_name: str):
headers = {'Access-Control-Allow-Origin': '*'}
documents = []
for doc in database.database[collection_name].find({}):
doc.pop("_id")
documents.append(doc)
return JSONResponse(content=documents, headers=headers)
| 2.859375
| 3
|
galois_field/GFp.py
|
syakoo/galois-field
| 8
|
12784607
|
from __future__ import annotations
from .core.ElementInGFp import ElementInGFp
from .core import validator, primitive_roots
class GFp:
"""Galois Field: GF(p)
Args:
p (int): A prime number.
Examples:
>>> from galois_field import GFp
In this case, p = 11.
>>> gf = GFp(11)
Generate the element in GF(11).
>>> gf.elm(5) # 5 (mod 11)
ElementInGFp(5, 11)
>>> gf.elm(13) # 2 (mod 11)
ElementInGFp(2, 11)
"""
def __init__(self, p: int):
self.__p = p
@property
def p(self):
"""A prime number. Read-only."""
return self.__p
def __str__(self) -> str:
return f'GF({self.p})'
def is_valid(self) -> bool:
"""Determine if this field is valid.
Returns:
bool: Is valid ?
"""
return validator.is_prime(self.__p)
def elm(self, value: int) -> ElementInGFp:
"""Generate the element from a value in GF(p).
Args:
value (int): An input value.
Returns:
ElementInGFp: The element in GF(p).
"""
return ElementInGFp(value, self.p)
def random_primitive_elm(self) -> ElementInGFp:
"""Return a primitive element in GF(p) randomly.
Returns:
ElementInGFp: A primitive root in GF(p)
"""
return primitive_roots.random_primitive_root_over_Fp(self.__p)
| 3.296875
| 3
|
models.py
|
linda-huang/SEAL_OGB
| 0
|
12784608
|
<reponame>linda-huang/SEAL_OGB
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from torch.nn import (ModuleList, Linear, Conv1d, MaxPool1d, Embedding, ReLU,
Sequential, BatchNorm1d as BN)
import torch.nn.functional as F
from torch_geometric.nn import (GCNConv, SAGEConv, GINConv,
global_sort_pool, global_add_pool, global_mean_pool)
import pdb
def global_random_pool(x, batch, k):
r"""The global pooling operator from the `"An End-to-End Deep Learning
Architecture for Graph Classification"
<https://www.cse.wustl.edu/~muhan/papers/AAAI_2018_DGCNN.pdf>`_ paper,
where node features are sorted in descending order based on their last
feature channel. The first :math:`k` nodes form the output of the layer.
Args:
x (Tensor): Node feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{N \times F}`.
batch (LongTensor): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots,
B-1\}}^N`, which assigns each node to a specific example.
k (int): The number of nodes to hold for each graph.
:rtype: :class:`Tensor`
let's change this to random pooling!
"""
fill_value = x.min().item() - 1
batch_x, _ = to_dense_batch(x, batch, fill_value)
B, N, D = batch_x.size()
# _, perm = batch_x[:, :, -1].sort(dim=-1, descending=True)
perm = torch.stack([torch.randperm(N) for i in range(B)])
arange = torch.arange(B, dtype=torch.long, device=perm.device) * N
perm = perm + arange.view(-1, 1)
batch_x = batch_x.view(B * N, D)
batch_x = batch_x[perm]
batch_x = batch_x.view(B, N, D)
if N >= k:
batch_x = batch_x[:, :k].contiguous()
else:
expand_batch_x = batch_x.new_full((B, k - N, D), fill_value)
batch_x = torch.cat([batch_x, expand_batch_x], dim=1)
batch_x[batch_x == fill_value] = 0
x = batch_x.view(B, k * D)
return x
class GCN(torch.nn.Module):
def __init__(self, hidden_channels, num_layers, max_z, train_dataset,
use_feature=False, node_embedding=None, dropout=0.5):
super(GCN, self).__init__()
self.use_feature = use_feature
self.node_embedding = node_embedding
self.max_z = max_z
self.z_embedding = Embedding(self.max_z, hidden_channels)
self.convs = ModuleList()
initial_channels = hidden_channels
if self.use_feature:
initial_channels += train_dataset.num_features
if self.node_embedding is not None:
initial_channels += node_embedding.embedding_dim
self.convs.append(GCNConv(initial_channels, hidden_channels))
for _ in range(num_layers - 1):
self.convs.append(GCNConv(hidden_channels, hidden_channels))
self.dropout = dropout
self.lin1 = Linear(hidden_channels, hidden_channels)
self.lin2 = Linear(hidden_channels, 1)
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
def forward(self, z, edge_index, batch, x=None, edge_weight=None, node_id=None):
z_emb = self.z_embedding(z)
if z_emb.ndim == 3: # in case z has multiple integer labels
z_emb = z_emb.sum(dim=1)
if self.use_feature and x is not None:
x = torch.cat([z_emb, x.to(torch.float)], 1)
else:
x = z_emb
if self.node_embedding is not None and node_id is not None:
n_emb = self.node_embedding(node_id)
x = torch.cat([x, n_emb], 1)
for conv in self.convs[:-1]:
x = conv(x, edge_index, edge_weight)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[-1](x, edge_index, edge_weight)
if True: # center pooling
_, center_indices = np.unique(batch.cpu().numpy(), return_index=True)
x_src = x[center_indices]
x_dst = x[center_indices + 1]
x = (x_src * x_dst)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lin2(x)
else: # sum pooling
x = global_add_pool(x, batch)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lin2(x)
return x
class SAGE(torch.nn.Module):
def __init__(self, hidden_channels, num_layers, max_z, train_dataset=None,
use_feature=False, node_embedding=None, dropout=0.5):
super(SAGE, self).__init__()
self.use_feature = use_feature
self.node_embedding = node_embedding
self.max_z = max_z
self.z_embedding = Embedding(self.max_z, hidden_channels)
self.convs = ModuleList()
initial_channels = hidden_channels
if self.use_feature:
initial_channels += train_dataset.num_features
if self.node_embedding is not None:
initial_channels += node_embedding.embedding_dim
self.convs.append(SAGEConv(initial_channels, hidden_channels))
for _ in range(num_layers - 1):
self.convs.append(SAGEConv(hidden_channels, hidden_channels))
self.dropout = dropout
self.lin1 = Linear(hidden_channels, hidden_channels)
self.lin2 = Linear(hidden_channels, 1)
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
def forward(self, z, edge_index, batch, x=None, edge_weight=None, node_id=None):
z_emb = self.z_embedding(z)
if z_emb.ndim == 3: # in case z has multiple integer labels
z_emb = z_emb.sum(dim=1)
if self.use_feature and x is not None:
x = torch.cat([z_emb, x.to(torch.float)], 1)
else:
x = z_emb
if self.node_embedding is not None and node_id is not None:
n_emb = self.node_embedding(node_id)
x = torch.cat([x, n_emb], 1)
for conv in self.convs[:-1]:
x = conv(x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[-1](x, edge_index)
if True: # center pooling
_, center_indices = np.unique(batch.cpu().numpy(), return_index=True)
x_src = x[center_indices]
x_dst = x[center_indices + 1]
x = (x_src * x_dst)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lin2(x)
else: # sum pooling
x = global_add_pool(x, batch)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lin2(x)
return x
# An end-to-end deep learning architecture for graph classification, AAAI-18.
class DGCNN(torch.nn.Module):
def __init__(self, hidden_channels, num_layers, max_z, k=0.6, train_dataset=None,
dynamic_train=False, GNN=GCNConv, use_feature=False,
node_embedding=None, random_pool=False):
super(DGCNN, self).__init__()
self.use_feature = use_feature
self.node_embedding = node_embedding
if k <= 1: # Transform percentile to number.
if train_dataset is None:
k = 30
else:
if dynamic_train:
sampled_train = train_dataset[:1000]
else:
sampled_train = train_dataset
num_nodes = sorted([g.num_nodes for g in sampled_train])
k = num_nodes[int(math.ceil(k * len(num_nodes))) - 1]
k = max(10, k)
self.k = int(k)
self.max_z = max_z
self.z_embedding = Embedding(self.max_z, hidden_channels)
self.convs = ModuleList()
initial_channels = hidden_channels
if self.use_feature:
initial_channels += train_dataset.num_features
if self.node_embedding is not None:
initial_channels += node_embedding.embedding_dim
self.convs.append(GNN(initial_channels, hidden_channels))
for i in range(0, num_layers-1):
self.convs.append(GNN(hidden_channels, hidden_channels))
self.convs.append(GNN(hidden_channels, 1))
conv1d_channels = [16, 32]
total_latent_dim = hidden_channels * num_layers + 1
conv1d_kws = [total_latent_dim, 5]
self.conv1 = Conv1d(1, conv1d_channels[0], conv1d_kws[0],
conv1d_kws[0])
self.maxpool1d = MaxPool1d(2, 2)
self.conv2 = Conv1d(conv1d_channels[0], conv1d_channels[1],
conv1d_kws[1], 1)
dense_dim = int((self.k - 2) / 2 + 1)
dense_dim = (dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1]
self.lin1 = Linear(dense_dim, 128)
self.lin2 = Linear(128, 1)
self.random_pool = random_pool
def forward(self, z, edge_index, batch, x=None, edge_weight=None, node_id=None):
z_emb = self.z_embedding(z)
if z_emb.ndim == 3: # in case z has multiple integer labels
z_emb = z_emb.sum(dim=1)
if self.use_feature and x is not None:
x = torch.cat([z_emb, x.to(torch.float)], 1)
else:
x = z_emb
if self.node_embedding is not None and node_id is not None:
n_emb = self.node_embedding(node_id)
x = torch.cat([x, n_emb], 1)
xs = [x]
for conv in self.convs:
xs += [torch.tanh(conv(xs[-1], edge_index, edge_weight))]
x = torch.cat(xs[1:], dim=-1)
# Global pooling.
if self.random_pool:
x = global_random_pool(x, batch, self.k)
else:
x = global_sort_pool(x, batch, self.k)
x = x.unsqueeze(1) # [num_graphs, 1, k * hidden]
x = F.relu(self.conv1(x))
x = self.maxpool1d(x)
x = F.relu(self.conv2(x))
x = x.view(x.size(0), -1) # [num_graphs, dense_dim]
# MLP.
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin2(x)
return x
class GIN(torch.nn.Module):
def __init__(self, hidden_channels, num_layers, max_z, train_dataset,
use_feature=False, node_embedding=None, dropout=0.5,
jk=True, train_eps=False):
super(GIN, self).__init__()
self.use_feature = use_feature
self.node_embedding = node_embedding
self.max_z = max_z
self.z_embedding = Embedding(self.max_z, hidden_channels)
self.jk = jk
initial_channels = hidden_channels
if self.use_feature:
initial_channels += train_dataset.num_features
if self.node_embedding is not None:
initial_channels += node_embedding.embedding_dim
self.conv1 = GINConv(
Sequential(
Linear(initial_channels, hidden_channels),
ReLU(),
Linear(hidden_channels, hidden_channels),
ReLU(),
BN(hidden_channels),
),
train_eps=train_eps)
self.convs = torch.nn.ModuleList()
for i in range(num_layers - 1):
self.convs.append(
GINConv(
Sequential(
Linear(hidden_channels, hidden_channels),
ReLU(),
Linear(hidden_channels, hidden_channels),
ReLU(),
BN(hidden_channels),
),
train_eps=train_eps))
self.dropout = dropout
if self.jk:
self.lin1 = Linear(num_layers * hidden_channels, hidden_channels)
else:
self.lin1 = Linear(hidden_channels, hidden_channels)
self.lin2 = Linear(hidden_channels, 1)
def forward(self, z, edge_index, batch, x=None, edge_weight=None, node_id=None):
z_emb = self.z_embedding(z)
if z_emb.ndim == 3: # in case z has multiple integer labels
z_emb = z_emb.sum(dim=1)
if self.use_feature and x is not None:
x = torch.cat([z_emb, x.to(torch.float)], 1)
else:
x = z_emb
if self.node_embedding is not None and node_id is not None:
n_emb = self.node_embedding(node_id)
x = torch.cat([x, n_emb], 1)
x = self.conv1(x, edge_index)
xs = [x]
for conv in self.convs:
x = conv(x, edge_index)
xs += [x]
if self.jk:
x = global_mean_pool(torch.cat(xs, dim=1), batch)
else:
x = global_mean_pool(xs[-1], batch)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lin2(x)
return x
| 2.734375
| 3
|
tools/evaluate.py
|
anonymous202201/fast_transferable_blackbox_attack
| 0
|
12784609
|
""" Script for evaluating AE examples.
"""
import argparse
import importlib
import os
import shutil
import sys
import torch
import torchvision
from tqdm import tqdm
from fta.utils.dataset_utils import imagenet_utils
from fta.utils.torch_utils import image_utils, model_utils
import pdb
# Sample Usage:
# CUDA_VISIBLE_DEVICES=0 python tools/evaluate.py
def evaluate(args):
imagenet_label_dict = imagenet_utils.load_imagenet_label_dict()
target_model_type = args.target_model
model_class = getattr(torchvision.models, args.target_model)
model = model_class(pretrained=True).cuda()
model.eval()
img_mean, img_std = imagenet_utils.get_imagenet_normalize()
torch_normalize = model_utils.Normalize(img_mean, img_std)
img_names = os.listdir(args.benign_dir)
acc_count = 0
total_count = 0
for img_name in tqdm(img_names):
img_name_noext = os.path.splitext(img_name)[0]
img_path_benign = os.path.join(args.benign_dir, img_name)
img_benign_var = image_utils.load_img(
img_path_benign, expand_batch_dim=True).cuda()
img_benign_var = torch_normalize(img_benign_var)
pred_benign = torch.argmax(model(img_benign_var), axis=1)
pred_benign_id = pred_benign.cpu().numpy()[0]
img_path_adv = os.path.join(
args.adv_dir,
img_name_noext + "_adv.png")
if not os.path.exists(img_path_adv):
print("adv image not found.")
continue
img_adv_var = image_utils.load_img(
img_path_adv, expand_batch_dim=True).cuda()
img_adv_var = torch_normalize(img_adv_var)
pred_adv = torch.argmax(model(img_adv_var), axis=1)
pred_adv_id = pred_adv.cpu().numpy()[0]
print("ID: {0}, ori: {1}, adv: {2}".format(
img_name_noext,
imagenet_label_dict[pred_benign_id],
imagenet_label_dict[pred_adv_id]))
if pred_benign_id == pred_adv_id:
acc_count += 1
total_count += 1
accuracy = float(acc_count) / float(total_count)
print("Evaluate path: ", args.adv_dir)
print("Target Model: ", args.target_model)
print("ASR: ", 1.0 - accuracy)
print("{} over {}".format(total_count - acc_count, total_count))
return
def parse_args(args):
parser = argparse.ArgumentParser(
description="PyTorch AE evaluator.")
parser.add_argument(
'--benign_dir',
default="./sample_images",
type=str)
parser.add_argument(
'--adv_dir', default="./temp_outputs", type=str)
parser.add_argument(
'--target_model', default="resnet152", type=str)
return parser.parse_args(args)
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
args_dic = vars(args)
evaluate(args)
if __name__ == "__main__":
main()
| 2.421875
| 2
|
PoliBot/plugins/guilds/data.py
|
PoliticalHangouts/PoliticalHangout
| 0
|
12784610
|
<reponame>PoliticalHangouts/PoliticalHangout<gh_stars>0
import typing
import discord
from replit import db
class Colour:
def __init__(
self,
hex: int
) -> None:
self.hex=hex
class Guild:
def __init__(
self,
colour: Colour,
name: str
) -> None:
self.colour=colour.hex
self.name=name
self.members: typing.List[discord.Member] = []
@property
def members(
self
) -> typing.List[discord.Member]:
return self.members
def get_total_points(
self
) -> int:
total = 0
for member in self.members:
if str(member.id) in db["users"]:
total+=db["users"][str(member.id)]
else:
total+=500
return total
def not_yet_guild(
func
) -> typing.Any:
def wrapper(*args, **kwargs):
if args[0] not in self.members:
func(*args, **kwargs)
return
else:
return
def in_guild(
func
) -> typing.Any:
def wrapper(*args, **kwargs):
if args[0] not in self.members:
return
else:
func(*args, **kwargs)
return
@not_yet_guild
def add_member(
self,
member: discord.Member
) -> None:
self.members.append(member)
@in_guild
def remove_member(
self,
member: discord.Member
) -> None:
self.members.remove(member)
| 2.890625
| 3
|
sknano/setup.py
|
haidi-ustc/scikit-nano
| 21
|
12784611
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from __future__ import unicode_literals
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('sknano', parent_package, top_path)
config.add_subpackage('apps')
config.add_subpackage('core')
config.add_subpackage('generators')
config.add_subpackage('io')
config.add_subpackage('scripts')
config.add_subpackage('structures')
config.add_subpackage('testing')
config.add_subpackage('utils')
config.add_data_dir('data')
#config.make_config_py()
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 1.953125
| 2
|
data_processing/CSV_Filtration.py
|
BrigitaPetk/support_department_dashboard
| 0
|
12784612
|
import pandas as pd
import datetime
from pathlib import Path
import numpy as np
def plus3h(data):
columns = ['Created','First Lock', 'FirstResponse', 'Close Time']
columns_to_3 = {column: f"{column}+3" for column in columns}
for col in columns:
for index, row in data.iterrows():
row = str(row[col])
if row == str(np.nan):
data.loc[index, columns_to_3[col]] = 'NaN'
else:
time = datetime.datetime.fromisoformat(row)
plus = time + datetime.timedelta(hours=3)
data.loc[index, columns_to_3[col]] = plus
return columns_to_3.values(), data
def weekend_filtration(day):
weekday = day.weekday()
if weekday == 5:
modified_date = day.replace(hour= 8, minute= 0, second=0)
return modified_date + datetime.timedelta(days=2)
elif weekday == 6:
modified_date = day.replace(hour= 8, minute= 0, second=0)
return modified_date + datetime.timedelta(days=1)
else:
return day
def working_hours_filtration(day):
start_of_working_hours = datetime.datetime(1900, 1, 1, 8, 0, 0)
end_of_working_hours = datetime.datetime(1900, 1, 1, 17, 0, 0)
if day.time() < start_of_working_hours.time():
modifies_date2 = day.replace(hour= 8, minute= 0, second=0)
return modifies_date2
elif day.time() > end_of_working_hours.time():
modifies_date2 = day.replace(hour= 8, minute= 0, second=0)
modifies_date2 = modifies_date2 + datetime.timedelta(days=1)
return modifies_date2
else:
return day
def holiday_filtration(day):
naujieji = datetime.datetime(2022, 1, 1)
atkurimas = datetime.datetime(day.year, 2, 16)
nepriklausomybes = datetime.datetime(day.year, 3, 11)
velykos = datetime.datetime(day.year, 4, 17)
velykos2 = datetime.datetime(day.year, 4, 18)
darbininku = datetime.datetime(day.year, 5, 1)
jonines = datetime.datetime(day.year, 6, 24)
mindaugines = datetime.datetime(day.year, 7, 6)
zolines = datetime.datetime(day.year, 8, 15)
velines = datetime.datetime(day.year, 11, 1)
velines2 = datetime.datetime(day.year, 11, 2)
kucios = datetime.datetime(day.year, 12, 24)
kaledos = datetime.datetime(day.year, 12, 25)
kaledos2 = datetime.datetime(day.year, 12, 26)
holidays_list = [naujieji, atkurimas, nepriklausomybes, velykos, velykos2, darbininku, jonines,
mindaugines, zolines, velines, velines2, kucios, kaledos, kaledos2]
for holiday in holidays_list:
while day.date() == holiday.date():
day = day + datetime.timedelta(days=1)
day = day.replace(hour= 8, minute= 0, second=0)
return day
def final_time_modification(columns_list, data):
columns_mod = {column: f"{column}mod" for column in columns_list}
for column in columns_list:
for index, row in data.iterrows():
r = str(row[column])
if r == "NaN" or r == "NaT":
data.loc[index, columns_mod[column]] = r
else:
formated_date = datetime.datetime.fromisoformat(r)
not_holiday = holiday_filtration(formated_date)
not_weekend = weekend_filtration(not_holiday)
working_hours = working_hours_filtration(not_weekend)
data.loc[index, columns_mod[column]] = working_hours
return columns_mod.values(), data
# Solution Time % created - first response
# INtake Time % created - first lock
def delta_counter(index, final_data, first, name, created, close):
if (first == 'NaN' or first == 'NaT') and (close == 'NaN' or close == 'NaT'):
final_data.loc[index, name] = first
elif (first == 'NaN' or first == 'NaT') and (close != 'NaN' or close != 'NaT'):
creat = datetime.datetime.fromisoformat(created)
clo = datetime.datetime.fromisoformat(close)
if creat.date() == clo.date():
rezult = clo - creat
final_data.loc[index, name] = rezult
else:
sum = datetime.timedelta()
creat2 = creat
end_of_working_hours = datetime.datetime(year=creat2.year, month=creat2.month, day = creat2.day, hour= 17, minute = 0, second = 0)
delta_creat2 = end_of_working_hours - creat2
sum = sum + delta_creat2
while creat2.date() < clo.date() and creat2.date() + datetime.timedelta(days=1) < clo.date():
creat2 = creat2 + datetime.timedelta(days=1)
not_holiday = holiday_filtration(creat2)
not_weekend = weekend_filtration(not_holiday)
creat2 = not_weekend
if creat2.date() + datetime.timedelta(days=1) > clo.date():
break
sum = sum + datetime.timedelta(hours=8)
start_of_working_hours = datetime.datetime(year=clo.year, month=clo.month, day = clo.day, hour= 8, minute = 0, second = 0)
delta_closed = clo - start_of_working_hours
sum = sum + delta_closed
final_data.loc[index, name] = sum
else:
creat = datetime.datetime.fromisoformat(created)
first = datetime.datetime.fromisoformat(first)
if creat.date() == first.date():
rezult = first - creat
final_data.loc[index, name] = rezult
else:
sum = datetime.timedelta()
creat2 = creat
end_of_working_hours = datetime.datetime(year=creat2.year, month=creat2.month, day = creat2.day, hour= 17, minute = 0, second = 0)
delta_creat2 = end_of_working_hours - creat2
sum = sum + delta_creat2
while creat2.date() < first.date() and creat2.date() + datetime.timedelta(days=1) < first.date():
creat2 = creat2 + datetime.timedelta(days=1)
not_holiday = holiday_filtration(creat2)
not_weekend = weekend_filtration(not_holiday)
creat2 = not_weekend
if creat2.date() + datetime.timedelta(days=1) > first.date():
break
sum = sum + datetime.timedelta(hours=8)
start_of_working_hours = datetime.datetime(year=first.year, month=first.month, day = first.day, hour= 8, minute = 0, second = 0)
delta_closed = first - start_of_working_hours
sum = sum + delta_closed
final_data.loc[index, name] = sum
return final_data
def sol_int_counter(final_data):
for index, row in final_data.iterrows():
created = str(row['Created+3mod'])
close = str(row['Close Time+3mod'])
first_restonse = str(row['FirstResponse+3mod'])
first_lock = str(row['First Lock+3mod'])
delta_counter(index, final_data, first_restonse,'First Response - Created', created, close)
delta_counter(index, final_data, first_lock, 'First Lock - Created', created, close)
return final_data
def date(data3):
for index, row in data3.iterrows():
r = str(row['Created'])
x = datetime.datetime.fromisoformat(r)
date_numbers = x.isocalendar()
month = x.month
data3.loc[index,'Month'] = month
data3.loc[index,'Year'] = int(date_numbers[0])
data3.loc[index,'Week'] = int(date_numbers[1])
return data3
class CsvFiltration():
def first_filtration_GB(self, extract_direktorija, filter_direktorija_GB):
base = pd.read_csv(f'{filter_direktorija_GB}/base_GB.csv', delimiter=';', engine='python').set_index('TecReq#')
file_location = extract_direktorija
files = list(file_location.glob("*.csv*"))
for x in files:
data = pd.read_csv(x, delimiter=';', engine='python')
df1 = data[['TecReq#', 'Created', 'First Lock', 'FirstResponse', 'Close Time', 'Queue', 'Owner Country Code', 'State', 'Number of Articles', 'Needed knowledge level', 'Case CV']]
df2 = df1.loc[df1['State'] != "merged"]
final_data = df2.loc[df2['Queue'].str.contains("TH_GB|TH_IE")]
colums_list, data1 = plus3h(final_data)
mod_list, data2 = final_time_modification(colums_list, data1)
data3 = sol_int_counter(data2)
final_data = date(data3)
new_data = final_data[['TecReq#', 'Year','Month', 'Week', 'Created+3mod', 'First Lock+3mod', 'FirstResponse+3mod', 'Close Time+3mod',
'First Response - Created', 'First Lock - Created', 'Queue', 'Owner Country Code', 'State', 'Number of Articles',
'Needed knowledge level', 'Case CV']]
base = base.combine_first(new_data.set_index('TecReq#'))
base.to_csv(f"{filter_direktorija_GB}/base_GB.csv", sep=';')
print("base was updated")
def first_filtration(self, extract_direktorija, filter_direktorija):
base = pd.read_csv(f'{filter_direktorija}/base.csv', delimiter=';', engine='python').set_index('TecReq#')
file_location = extract_direktorija
files = list(file_location.glob("*.csv*"))
for x in files:
data = pd.read_csv(x, delimiter=';', engine='python')
df1 = data[['TecReq#', 'Created', 'First Lock', 'FirstResponse', 'Close Time', 'Queue', 'Owner Country Code', 'State', 'Number of Articles', 'Needed knowledge level', 'Case CV']]
df2 = df1.loc[df1['State'] != "merged"]
final_data = df2.loc[df2['Queue'].str.contains("TH_DE|TH_AT|TH_CH|TH_IT")]
colums_list, data1 = plus3h(final_data)
mod_list, data2 = final_time_modification(colums_list, data1)
data3 = sol_int_counter(data2)
final_data = date(data3)
new_data = final_data[['TecReq#', 'Year','Month', 'Week', 'Created+3mod', 'First Lock+3mod', 'FirstResponse+3mod', 'Close Time+3mod',
'First Response - Created', 'First Lock - Created', 'Queue', 'Owner Country Code', 'State', 'Number of Articles',
'Needed knowledge level', 'Case CV']]
base = base.combine_first(new_data.set_index('TecReq#'))
base.to_csv(f"{filter_direktorija}/base.csv", sep=';')
print("base was updated")
# len(set(baze.index.values)) == len(baze.index.values)
# len(set(baze.index.values))
# len(baze.index.values)
| 3.0625
| 3
|
tourney/achievements/reporter_achievement.py
|
netromdk/tourney
| 1
|
12784613
|
from .tiered_achievement import TieredAchievement
from .behavior import REPORT_SCORE_BEHAVIOR
class ReporterAchievement(TieredAchievement):
def __init__(self):
tiers = (
(1, "Reporter", "Reported a score."),
(10, "Journalist", "Reported 10 scores."),
(25, "Correspondent", "Reported 25 scores."),
(30, "<NAME>", "Reported 30 scores."),
(40, "<NAME>", "Reported 40 scores."),
(50, "Anchorman", "Reported 50 scores."),
(75, "<NAME>", "Reported 75 scores."),
(100, "<NAME>", "Reported 100 scores."),
(500, "<NAME>", "Reported 500 scores."),
(1000, "<NAME>", "Reported 1000 scores."),
)
super(ReporterAchievement, self).__init__("Reporter", tiers)
def accepted_behaviors(self):
return [REPORT_SCORE_BEHAVIOR]
def update(self, behavior):
user_id = behavior.user_id()
self.check_init(user_id)
self.data[user_id][0] += 1
amount = self.data[user_id][0]
if amount == self.next_tier(user_id):
self.data[user_id][1] += 1
return True
return False
| 2.59375
| 3
|
appviews/test.py
|
johnderm/remote
| 0
|
12784614
|
<gh_stars>0
dict = {'1':2, '2':3}
print(dict['1'])
| 2.578125
| 3
|
algorithm/impl/iterable/__init__.py
|
alpavlenko/EvoGuess
| 1
|
12784615
|
from .tabu_search import TabuSearch
algorithms = {
TabuSearch.slug: TabuSearch,
}
__all__ = [
'TabuSearch',
]
| 1.304688
| 1
|
test/conftest.py
|
roeeiit1/sqlalchemy-prometheus
| 8
|
12784616
|
import os
import pytest
@pytest.fixture(scope="session")
def settings():
return {
"HOST": "solr",
"PORT": 8983,
"PROTO": "http://",
"SOLR_USER": os.environ["SOLR_USER"],
"SOLR_PASS": os.environ["SOLR_PASS"],
"SERVER_PATH": "solr",
"SOLR_BASE_URL": "http://solr:8983/solr",
"SOLR_CONNECTION_URI": "solr://solr:8983/solr",
"SOLR_WORKER_COLLECTION_NAME": "sales_test_",
"SUPERSET_URI": "http://superset:8088",
"SUPERSET_USER": os.environ["SUPERSET_USER"],
"SUPERSET_PASS": os.environ["SUPERSET_PASS"],
"SUPERSET_DATABASE_NAME": "sales_test_",
}
| 1.835938
| 2
|
choice/companion/recruit.py
|
xelrach/DASaveReader
| 1
|
12784617
|
<reponame>xelrach/DASaveReader<gh_stars>1-10
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recruitment choices"""
import choice.quest_guid as quest_guid
from choice.utils import has_flag, get_plot
ALISTAIR_FLAG = 0
DOG_FLAG = 1
LELIANA_FLAG = 4
OGHREN_FLAG = 6
SHALE_FLAG = 7
WYNNE_FLAG = 8
ZEVRAN_FLAG = 9
LOGHAIN_FLAG = 10
STEN_FLAG = 13
def alistair_recruited(data):
quest_data = get_plot(data, quest_guid.PARTY)
return has_flag(quest_data, ALISTAIR_FLAG)
def dog_recruited(data):
quest_data = get_plot(data, quest_guid.PARTY)
return has_flag(quest_data, DOG_FLAG)
def leliana_recruited(data):
quest_data = get_plot(data, quest_guid.PARTY)
return has_flag(quest_data, LELIANA_FLAG)
def oghren_recruited(data):
quest_data = get_plot(data, quest_guid.PARTY)
return has_flag(quest_data, OGHREN_FLAG)
def shale_recruited(data):
quest_data = get_plot(data, quest_guid.PARTY)
return has_flag(quest_data, SHALE_FLAG)
def wynne_recruited(data):
quest_data = get_plot(data, quest_guid.PARTY)
return has_flag(quest_data, WYNNE_FLAG)
def zevran_recruited(data):
quest_data = get_plot(data, quest_guid.PARTY)
return has_flag(quest_data, ZEVRAN_FLAG)
def loghain_recruited(data):
quest_data = get_plot(data, quest_guid.PARTY)
return has_flag(quest_data, LOGHAIN_FLAG)
def sten_recruited(data):
quest_data = get_plot(data, quest_guid.PARTY)
return has_flag(quest_data, STEN_FLAG)
| 1.773438
| 2
|
ktane/modules/mazes.py
|
mattvperry/ktane-py
| 0
|
12784618
|
<gh_stars>0
from ktane import Module
from itertools import chain
from .maze_data import maze_data
class Mazes(Module):
def run(self):
mazes = map(Maze, maze_data)
identifier_coords = self.get_coord_or_quit('identifier')
if identifier_coords is None: return
mazes = [x for x in mazes if x.is_identifier(identifier_coords)]
if not mazes:
self.output_and_wait('No maze has that identifier.')
return
start = self.get_coord_or_quit('start')
if start is None: return
end = self.get_coord_or_quit('end')
if end is None: return
maze = mazes[0]
path = self.find_shortest_path(maze.as_graph(), start, end)
self.output_and_wait(maze.as_string_with_path(path))
def get_coord_or_quit(self, kind):
coords = self.get_list_or_quit(
lambda x: x.isdigit() and int(x) in range(6),
range(2, 3),
'Enter x and y coord of {} seperated by a space.'.format(kind))
return None if coords is None else tuple(map(int, coords))
def find_shortest_path(self, graph, start, end, path=[]):
path = path + [start]
if start == end:
return path
if not start in graph:
return None
shortest = None
for node in graph[start]:
if node not in path:
new_path = self.find_shortest_path(graph, node, end, path)
if new_path:
if not shortest or len(new_path) < len(shortest):
shortest = new_path
return shortest
class Maze(object):
def __init__(self, matrix_ascii):
self.matrix = matrix_ascii.strip().splitlines()
def as_string_with_path(self, path):
new_maze = Maze('\n'.join([''.join(x) for x in self.matrix]))
new_maze.__set_cell(tuple(i * 2 for i in path[0]), 's')
new_maze.__set_cell(tuple(i * 2 for i in path[-1]), 'e')
for point in path[1:-1]:
new_maze.__set_cell(tuple(i * 2 for i in point), '#')
return new_maze.__str__()
def as_graph(self):
graph = {}
for x in range(6):
for y in range(6):
point = (x, y)
graph[point] = [x for x in self.__neighbors(point)
if self.valid_move(point, x)]
return graph
def valid_move(self, from_point, to_point):
wall_x = (to_point[0] - from_point[0]) + 2 * from_point[0]
wall_y = (to_point[1] - from_point[1]) + 2 * from_point[1]
wall_point = (wall_x, wall_y)
return self.__point_in_bounds(wall_point) and not self.__is_wall(self.__get_cell(wall_point))
def is_identifier(self, point):
return self.__get_cell(tuple(i * 2 for i in point)) == 'O'
def __point_in_bounds(self, point):
return point[0] in range(len(self.matrix[0])) and point[1] in range(len(self.matrix))
def __get_cell(self, point):
return self.matrix[point[1]][point[0]]
def __set_cell(self, point, value):
chars = list(self.matrix[point[1]])
chars[point[0]] = value
self.matrix[point[1]] = ''.join(chars)
def __neighbors(self, point):
return [
(point[0] + 1, point[1]),
(point[0] - 1, point[1]),
(point[0], point[1] + 1),
(point[0], point[1] - 1)
]
def __is_wall(self, char):
return not char in 'se#O. '
def __str__(self):
maze_box_chars = [self.__convert_to_box(x) for x in self.__maze_chars(self.matrix)]
return '\n'.join([''.join(x) for x in maze_box_chars])
# ---
# Logic for unicode box drawing
# ---
char_map = {
'.': u'\u00B7',
'-': u'\u2550',
'|': u'\u2551',
'+': u'\u256C',
'/': u'\u2554',
'L': u'\u255A',
'J': u'\u255D',
'E': u'\u2560',
'3': u'\u2563',
'T': u'\u2566',
'F': u'\u2569',
'#': u'\u2588',
'\\': u'\u2557',
' ': ' ',
'O': 'O',
's': 'S',
'e': 'E',
}
def __maze_chars(self, maze):
maze_string = [self.__border_chars('/', '\\', 'T', maze[0])]
maze_string += [self.__row_chars(r) for r in maze]
maze_string += [self.__border_chars('L', 'J', 'F', maze[-1])]
return maze_string
def __border_chars(self, begin, end, connect, row):
border = [begin]
border += chain.from_iterable([('-', connect if x == '|' else '-') for x in row])
border += ['-', end]
return border
def __row_chars(self, row):
row_chars = ['E', '-'] if self.__is_wall(row[0]) else ['|', ' ']
row_chars.append(row[0])
char = lambda r, i: '-' if self.__is_wall(r[i]) and self.__is_wall(r[i + 1]) else ' '
row_chars += chain.from_iterable([(char(row, i), x) for i, x in enumerate(row[1::])])
row_chars += ['-', '3'] if self.__is_wall(row[-1]) else [' ', '|']
return row_chars
def __convert_to_box(self, chars):
return [self.char_map[x] for x in chars]
| 3.171875
| 3
|
airflow/providers/trino/transfers/gcs_to_trino.py
|
npodewitz/airflow
| 8,092
|
12784619
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Storage to Trino operator."""
import csv
import json
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Iterable, Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.trino.hooks.trino import TrinoHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GCSToTrinoOperator(BaseOperator):
"""
Loads a csv file from Google Cloud Storage into a Trino table.
Assumptions:
1. CSV file should not have headers
2. Trino table with requisite columns is already created
3. Optionally, a separate JSON file with headers can be provided
:param source_bucket: Source GCS bucket that contains the csv
:param source_object: csv file including the path
:param trino_table: trino table to upload the data
:param trino_conn_id: destination trino connection
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud and
interact with the Google Cloud Storage service.
:param schema_fields: The names of the columns to fill in the table. If schema_fields is
provided, any path provided in the schema object will be
:param schema_object: JSON file with schema fields
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
template_fields: Sequence[str] = (
'source_bucket',
'source_object',
'trino_table',
)
def __init__(
self,
*,
source_bucket: str,
source_object: str,
trino_table: str,
trino_conn_id: str = "trino_default",
gcp_conn_id: str = "google_cloud_default",
schema_fields: Optional[Iterable[str]] = None,
schema_object: Optional[str] = None,
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.trino_table = trino_table
self.trino_conn_id = trino_conn_id
self.gcp_conn_id = gcp_conn_id
self.schema_fields = schema_fields
self.schema_object = schema_object
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> None:
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
trino_hook = TrinoHook(trino_conn_id=self.trino_conn_id)
with NamedTemporaryFile("w+") as temp_file:
self.log.info("Downloading data from %s", self.source_object)
gcs_hook.download(
bucket_name=self.source_bucket,
object_name=self.source_object,
filename=temp_file.name,
)
data = csv.reader(temp_file)
rows = (tuple(row) for row in data)
self.log.info("Inserting data into %s", self.trino_table)
if self.schema_fields:
trino_hook.insert_rows(table=self.trino_table, rows=rows, target_fields=self.schema_fields)
elif self.schema_object:
blob = gcs_hook.download(
bucket_name=self.source_bucket,
object_name=self.schema_object,
)
schema_fields = json.loads(blob.decode("utf-8"))
trino_hook.insert_rows(table=self.trino_table, rows=rows, target_fields=schema_fields)
else:
trino_hook.insert_rows(table=self.trino_table, rows=rows)
| 1.914063
| 2
|
tresonator/transmission_line_utils.py
|
jhillairet/tresonator
| 0
|
12784620
|
# -*- coding: utf-8 -*-
"""
Transmission Line helper functions
"""
import numpy as np
def ZL_2_Zin(L,Z0,gamma,ZL):
"""
Returns the input impedance seen through a lossy transmission line of
characteristic impedance Z0 and complex wavenumber gamma=alpha+j*beta
Zin = ZL_2_Zin(L,Z0,gamma,ZL)
Args
----
L : length [m] of the transmission line
Z0: characteristic impedance of the transmission line
gamma: complex wavenumber associated to the transmission line
ZL: Load impedance
Returns
-------
Zin: input impedance
"""
assert L > 0
assert Z0 > 0
Zin = Z0*(ZL + Z0*np.tanh(gamma*L))/(Z0 + ZL*np.tanh(gamma*L))
return Zin
def transfer_matrix(L,V0,I0,Z0,gamma):
"""
Returns the voltage and the current at a distance L from an
initial voltage V0 and current I0 on a transmission line which
propagation constant is gamma.
VL, IL = transfer_matrix(L,V0,I0,Z0,gamma)
L is positive from the load toward the generator
Args
-----
L : transmission line length [m]
V0: initial voltage [V]
I0: initial current [A]
Z0 : characteristic impedance of the transmission line
gamma: =alpha+j*beta propagation constant of the transmission line
Returns
--------
VL: voltage at length L
IL: current at length L
"""
if Z0 <= 0:
raise ValueError
transfer_matrix = np.array([[np.cosh(gamma*L), Z0*np.sinh(gamma*L)],
[np.sinh(gamma*L)/Z0, np.cosh(gamma*L)]])
U = np.array([V0,I0])
A = transfer_matrix @ U
VL = A[0]
IL = A[1]
return VL, IL
def V0f_2_VL(L, V0f, gamma, reflection_coefficient):
"""
Propagation of the voltage at a distance L from the forward
voltage and reflection coefficient
VL = V0f_2_VL(L, V0f, gamma, reflectionCoefficient)
Args
----
L : Transmission Line Length [m]
V0f : forward voltage [V]
gamma : Transmission Line Complex Propagatioon Constant [1]
reflectionCoefficient : complex reflection coefficient [1]
Returns
--------
VL : (total) voltage at length L
"""
assert L > 0
assert gamma > 0
assert reflection_coefficient > 0
VL = V0f*(np.exp(-gamma*L) + reflection_coefficient*np.exp(+gamma*L))
return VL
| 3.25
| 3
|
utils/chip-form.py
|
SarahFDAK/FairbanksDistributors
| 1
|
12784621
|
import json
def main():
with open('chips.csv', 'r') as f:
data = f.read()
raw_types = [
t.split('\n') for t in data.split('$$TYPE$$') if t != '\n'
]
items = []
for t in raw_types:
type_name = t[0].strip()
print(type_name)
raw_items = [item.split('\t') for item in t[1:] if item != '\n']
for item in raw_items:
if len(item) != 4:
continue
(name, oz, upc, case) = item
items.append({'type': type_name, 'name': name,
'oz': oz, 'upc': upc, 'case': case})
with open('chips.json', 'w') as f:
json.dump(items, f, indent=2)
if __name__ == "__main__":
main()
| 2.78125
| 3
|
pycompupipe/components/gui/gui_manager.py
|
xaedes/PyCompuPipe
| 1
|
12784622
|
<gh_stars>1-10
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division # Standardmäßig float division - Ganzzahldivision kann man explizit mit '//' durchführen
from __future__ import absolute_import
import numpy as np
from pyecs import *
from . import GuiElement
from funcy import partial
class GuiManager(Component):
"""docstring for GuiManager"""
def __init__(self, *args,**kwargs):
super(GuiManager, self).__init__(*args,**kwargs)
self.exclusive_elements = set()
@callback
def awake(self):
for gui_element in Component.__added_components__[GuiElement]:
gui_element.fire_callbacks("register_manager", self)
self.entity.register_callback("mousemotion",partial(self.mouse_callback, "mousemotion"))
self.entity.register_callback("mousebuttonup",partial(self.mouse_callback, "mousebuttonup"))
self.entity.register_callback("mousebuttondown",partial(self.mouse_callback, "mousebuttondown"))
def query(self,x,y,limit_num=None):
result = []
for gui_element in Component.__added_components__[GuiElement]:
if gui_element.is_in((x,y)):
result.append(gui_element)
if limit_num is not None and len(result) == limit_num:
return result
return result
def query1(self,x,y):
l = self.query(x,y,1)
if len(l) == 0:
return None
else:
return l[0]
def mouse_callback(self, event_type, event):
gui_elements = self.query(*event.pos)
for gui_element in gui_elements:
if len(self.exclusive_elements)==0 or (gui_element in self.exclusive_elements):
if not gui_element.always_fetch_mouse:
gui_element.entity.fire_callbacks(event_type, event)
# gui_element.always_fetch_mouse == True:
# the gui element has mouse callbacks on manager entity
# we don't want to fire events twice for this gui element
# so we do nothing here
| 2.21875
| 2
|
py/grader_test1.py
|
tkuboi/gradzilla
| 0
|
12784623
|
import unittest
from lab1 import get_max
from lab1 import reverse
from lab1 import search
from lab1 import fib
from lab1 import factorial_iter
from lab1 import factorial_rec
class MyTest(unittest.TestCase):
def runTest(self):
with self.subTest(msg="testing get_max"):
self.test_get_max()
with self.subTest(msg="testing reverse"):
self.test_reverse()
with self.subTest(msg="testing search"):
self.test_search()
with self.subTest(msg="testing fib"):
self.test_fib()
with self.subTest(msg="testing factorial"):
self.test_factorial()
def test_get_max(self):
arr = [1,2,3,4,5]
self.assertEqual(get_max(arr), 5)
arr = [1, 1, 1, 0]
self.assertEqual(get_max(arr), 1)
self.assertEqual(get_max([]), None)
def test_reverse(self):
self.assertEqual(reverse("qweEerty"), "ytreEewq")
self.assertEqual(reverse("aa"), "aa")
self.assertEqual(reverse("a"), "a")
self.assertEqual(reverse(""), "")
def test_search(self):
arr = [1,2,3,4,5]
self.assertEqual(search(arr, 5), 4)
arr = [1,2,3,4,5]
self.assertEqual(search(arr, 2), 1)
arr = [1, 1, 1]
self.assertEqual(search(arr, 5), None)
arr = []
self.assertEqual(search(arr, 5), None)
def test_fib(self):
def fib_numbers(n):
sequence = []
for i in range(n+1):
sequence.append(fib(i))
return sequence
#this will test your fib function by calling it multiple times
self.assertEqual(fib_numbers(10),
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55])
def test_factorial(self):
self.assertEqual(factorial_iter(0), 1)
self.assertEqual(factorial_iter(1), 1)
self.assertEqual(factorial_iter(3), 6)
self.assertEqual(factorial_rec(0), 1)
self.assertEqual(factorial_rec(1), 1)
self.assertEqual(factorial_rec(3), 6)
def get_score(max_score, result):
score = max_score
for error in result.errors:
#print("-10 points for ", error[1])
score -= 30
for failure in result.failures:
#print("-5 points for ", failure[1])
score -= 5
return max(0, score)
def main():
runner = unittest.TextTestRunner()
result = runner.run(MyTest())
score = get_score(90, result)
print("SCORE:{%s}\n" % (score))
return score
if __name__ == '__main__':
main()
| 3.375
| 3
|
gtsfm/two_view_estimator.py
|
swershrimpy/gtsfm
| 122
|
12784624
|
<reponame>swershrimpy/gtsfm
"""Estimator which operates on a pair of images to compute relative pose and verified indices.
Authors: <NAME>, <NAME>
"""
import logging
from typing import Dict, Optional, Tuple
import dask
import numpy as np
from dask.delayed import Delayed
from gtsam import Cal3Bundler, Pose3, Rot3, Unit3
import gtsfm.utils.geometry_comparisons as comp_utils
import gtsfm.utils.logger as logger_utils
import gtsfm.utils.metrics as metric_utils
from gtsfm.common.keypoints import Keypoints
from gtsfm.common.two_view_estimation_report import TwoViewEstimationReport
from gtsfm.frontend.inlier_support_processor import InlierSupportProcessor
from gtsfm.frontend.matcher.matcher_base import MatcherBase
from gtsfm.frontend.verifier.verifier_base import VerifierBase
from gtsfm.evaluation.metrics import GtsfmMetric, GtsfmMetricsGroup
logger = logger_utils.get_logger()
mpl_logger = logging.getLogger("matplotlib")
mpl_logger.setLevel(logging.WARNING)
pil_logger = logging.getLogger("PIL")
pil_logger.setLevel(logging.INFO)
EPSILON = 1e-6
class TwoViewEstimator:
"""Wrapper for running two-view relative pose estimation on image pairs in the dataset."""
def __init__(
self,
matcher: MatcherBase,
verifier: VerifierBase,
inlier_support_processor: InlierSupportProcessor,
eval_threshold_px: float,
) -> None:
"""Initializes the two-view estimator from matcher and verifier.
Args:
matcher: matcher to use.
verifier: verifier to use.
inlier_support_processor: post-processor that uses information about RANSAC support to filter out pairs.
eval_threshold_px: distance threshold for marking a correspondence pair as inlier during evaluation
(not during estimation).
"""
self._matcher = matcher
self._verifier = verifier
self.processor = inlier_support_processor
self._corr_metric_dist_threshold = eval_threshold_px
def get_corr_metric_dist_threshold(self) -> float:
"""Getter for the distance threshold used in the metric for correct correspondences."""
return self._corr_metric_dist_threshold
def create_computation_graph(
self,
keypoints_i1_graph: Delayed,
keypoints_i2_graph: Delayed,
descriptors_i1_graph: Delayed,
descriptors_i2_graph: Delayed,
camera_intrinsics_i1_graph: Delayed,
camera_intrinsics_i2_graph: Delayed,
im_shape_i1_graph: Delayed,
im_shape_i2_graph: Delayed,
i2Ti1_expected_graph: Optional[Delayed] = None,
) -> Tuple[Delayed, Delayed, Delayed, Optional[Delayed], Optional[Delayed], Optional[Delayed]]:
"""Create delayed tasks for matching and verification.
Args:
keypoints_i1_graph: keypoints for image i1.
keypoints_i2_graph: keypoints for image i2.
descriptors_i1_graph: corr. descriptors for image i1.
descriptors_i2_graph: corr. descriptors for image i2.
camera_intrinsics_i1_graph: intrinsics for camera i1.
camera_intrinsics_i2_graph: intrinsics for camera i2.
im_shape_i1_graph: image shape for image i1.
im_shape_i2_graph: image shape for image i2.
i2Ti1_expected_graph (optional): ground truth relative pose, used for evaluation if available. Defaults to
None.
Returns:
Computed relative rotation wrapped as Delayed.
Computed relative translation direction wrapped as Delayed.
Indices of verified correspondences wrapped as Delayed.
Two view report w/ verifier metrics wrapped as Delayed.
Two view report w/ post-processor metrics wrapped as Delayed.
"""
# graph for matching to obtain putative correspondences
corr_idxs_graph = self._matcher.create_computation_graph(
keypoints_i1_graph,
keypoints_i2_graph,
descriptors_i1_graph,
descriptors_i2_graph,
im_shape_i1_graph,
im_shape_i2_graph,
)
# verification on putative correspondences to obtain relative pose
# and verified correspondences
# TODO: name this verified_correspondence_idxs (add note: everything here is delayed)
(i2Ri1_graph, i2Ui1_graph, v_corr_idxs_graph, inlier_ratio_est_model) = self._verifier.create_computation_graph(
keypoints_i1_graph,
keypoints_i2_graph,
corr_idxs_graph,
camera_intrinsics_i1_graph,
camera_intrinsics_i2_graph,
)
# if we have the expected GT data, evaluate the computed relative pose
if i2Ti1_expected_graph is not None:
R_error_deg, U_error_deg = dask.delayed(compute_relative_pose_metrics, nout=2)(
i2Ri1_graph, i2Ui1_graph, i2Ti1_expected_graph
)
num_inliers_gt_model, inlier_ratio_gt_model, v_corr_idxs_inlier_mask_gt = dask.delayed(
compute_correspondence_metrics, nout=3
)(
keypoints_i1_graph,
keypoints_i2_graph,
v_corr_idxs_graph,
camera_intrinsics_i1_graph,
camera_intrinsics_i2_graph,
i2Ti1_expected_graph,
self._corr_metric_dist_threshold,
)
else:
R_error_deg, U_error_deg = None, None
num_inliers_gt_model, inlier_ratio_gt_model = None, None
v_corr_idxs_inlier_mask_gt = None
two_view_report_graph = dask.delayed(generate_two_view_report)(
inlier_ratio_est_model,
R_error_deg,
U_error_deg,
num_inliers_gt_model,
inlier_ratio_gt_model,
v_corr_idxs_inlier_mask_gt,
v_corr_idxs_graph,
)
# Note: We name the output as _pp, as it represents a post-processed quantity.
(
i2Ri1_pp_graph,
i2Ui1_pp_graph,
v_corr_idxs_pp_graph,
two_view_report_pp_graph,
) = self.processor.create_computation_graph(
i2Ri1_graph, i2Ui1_graph, v_corr_idxs_graph, two_view_report_graph
)
# We provide both, as we will create reports for both.
return (i2Ri1_pp_graph, i2Ui1_pp_graph, v_corr_idxs_pp_graph, two_view_report_graph, two_view_report_pp_graph)
def generate_two_view_report(
inlier_ratio_est_model: float,
R_error_deg: float,
U_error_deg: float,
num_inliers_gt_model: int,
inlier_ratio_gt_model: float,
v_corr_idxs_inlier_mask_gt: np.ndarray,
v_corr_idxs: np.ndarray,
) -> TwoViewEstimationReport:
"""Wrapper around class constructor for Dask."""
two_view_report = TwoViewEstimationReport(
inlier_ratio_est_model=inlier_ratio_est_model,
num_inliers_est_model=v_corr_idxs.shape[0],
num_inliers_gt_model=num_inliers_gt_model,
inlier_ratio_gt_model=inlier_ratio_gt_model,
v_corr_idxs_inlier_mask_gt=v_corr_idxs_inlier_mask_gt,
v_corr_idxs=v_corr_idxs,
R_error_deg=R_error_deg,
U_error_deg=U_error_deg,
)
return two_view_report
def compute_correspondence_metrics(
keypoints_i1: Keypoints,
keypoints_i2: Keypoints,
corr_idxs_i1i2: np.ndarray,
intrinsics_i1: Cal3Bundler,
intrinsics_i2: Cal3Bundler,
i2Ti1: Pose3,
epipolar_distance_threshold: float,
) -> Tuple[int, float, Optional[np.ndarray]]:
"""Compute the metrics for the generated verified correspondence.
Args:
keypoints_i1: detected keypoints in image i1.
keypoints_i2: detected keypoints in image i2.
corr_idxs_i1i2: indices of correspondences.
intrinsics_i1: intrinsics for i1.
intrinsics_i2: intrinsics for i2.
i2Ti1: relative pose.
epipolar_distance_threshold: max epipolar distance to qualify as a correct match.
Returns:
Number of inlier correspondences to ground truth epipolar geometry, i.e. #correct correspondences.
Inlier Ratio, i.e. ratio of correspondences which are correct w.r.t. given relative pose.
Mask of which verified correspondences are classified as correct under Sampson error
(using GT epipolar geometry).
"""
if corr_idxs_i1i2.size == 0:
return 0, float("Nan"), None
v_corr_idxs_inlier_mask_gt = metric_utils.count_correct_correspondences(
keypoints_i1.extract_indices(corr_idxs_i1i2[:, 0]),
keypoints_i2.extract_indices(corr_idxs_i1i2[:, 1]),
intrinsics_i1,
intrinsics_i2,
i2Ti1,
epipolar_distance_threshold,
)
num_inliers_gt_model = np.count_nonzero(v_corr_idxs_inlier_mask_gt)
inlier_ratio_gt_model = num_inliers_gt_model / corr_idxs_i1i2.shape[0]
return num_inliers_gt_model, inlier_ratio_gt_model, v_corr_idxs_inlier_mask_gt
def compute_relative_pose_metrics(
i2Ri1_computed: Optional[Rot3], i2Ui1_computed: Optional[Unit3], i2Ti1_expected: Pose3
) -> Tuple[Optional[float], Optional[float]]:
"""Compute the metrics on relative camera pose.
Args:
i2Ri1_computed: computed relative rotation.
i2Ui1_computed: computed relative translation direction.
i2Ti1_expected: expected relative pose.
Returns:
Rotation error, in degrees
Unit translation error, in degrees
"""
R_error_deg = comp_utils.compute_relative_rotation_angle(i2Ri1_computed, i2Ti1_expected.rotation())
U_error_deg = comp_utils.compute_relative_unit_translation_angle(
i2Ui1_computed, Unit3(i2Ti1_expected.translation())
)
return (R_error_deg, U_error_deg)
def aggregate_frontend_metrics(
two_view_reports_dict: Dict[Tuple[int, int], Optional[TwoViewEstimationReport]],
angular_err_threshold_deg: float,
metric_group_name: str,
) -> None:
"""Aggregate the front-end metrics to log summary statistics.
We define "pose error" as the maximum of the angular errors in rotation and translation, per:
SuperGlue, CVPR 2020: https://arxiv.org/pdf/1911.11763.pdf
Learning to find good correspondences. CVPR 2018:
OA-Net, ICCV 2019:
NG-RANSAC, ICCV 2019:
Args:
two_view_report_dict: report containing front-end metrics for each image pair.
angular_err_threshold_deg: threshold for classifying angular error metrics as success.
metric_group_name: name we will assign to the GtsfmMetricGroup returned by this fn.
"""
num_image_pairs = len(two_view_reports_dict.keys())
# all rotational errors in degrees
rot3_angular_errors = []
trans_angular_errors = []
inlier_ratio_gt_model_all_pairs = []
inlier_ratio_est_model_all_pairs = []
num_inliers_gt_model_all_pairs = []
num_inliers_est_model_all_pairs = []
# populate the distributions
for report in two_view_reports_dict.values():
if report is None:
continue
rot3_angular_errors.append(report.R_error_deg)
trans_angular_errors.append(report.U_error_deg)
inlier_ratio_gt_model_all_pairs.append(report.inlier_ratio_gt_model)
inlier_ratio_est_model_all_pairs.append(report.inlier_ratio_est_model)
num_inliers_gt_model_all_pairs.append(report.num_inliers_gt_model)
num_inliers_est_model_all_pairs.append(report.num_inliers_est_model)
rot3_angular_errors = np.array(rot3_angular_errors, dtype=float)
trans_angular_errors = np.array(trans_angular_errors, dtype=float)
# count number of rot3 errors which are not None. Should be same in rot3/unit3
num_valid_image_pairs = np.count_nonzero(~np.isnan(rot3_angular_errors))
# compute pose errors by picking the max error from rot3 and unit3 errors
pose_errors = np.maximum(rot3_angular_errors, trans_angular_errors)
# check errors against the threshold
success_count_rot3 = np.sum(rot3_angular_errors < angular_err_threshold_deg)
success_count_unit3 = np.sum(trans_angular_errors < angular_err_threshold_deg)
success_count_pose = np.sum(pose_errors < angular_err_threshold_deg)
# count image pair entries where inlier ratio w.r.t. GT model == 1.
all_correct = np.count_nonzero(
[report.inlier_ratio_gt_model == 1.0 for report in two_view_reports_dict.values() if report is not None]
)
logger.debug(
"[Two view optimizer] [Summary] Rotation success: %d/%d/%d",
success_count_rot3,
num_valid_image_pairs,
num_image_pairs,
)
logger.debug(
"[Two view optimizer] [Summary] Translation success: %d/%d/%d",
success_count_unit3,
num_valid_image_pairs,
num_image_pairs,
)
logger.debug(
"[Two view optimizer] [Summary] Pose success: %d/%d/%d",
success_count_pose,
num_valid_image_pairs,
num_image_pairs,
)
logger.debug(
"[Two view optimizer] [Summary] # Image pairs with 100%% inlier ratio:: %d/%d", all_correct, num_image_pairs
)
# TODO(akshay-krishnan): Move angular_err_threshold_deg and num_total_image_pairs to metadata.
frontend_metrics = GtsfmMetricsGroup(
metric_group_name,
[
GtsfmMetric("angular_err_threshold_deg", angular_err_threshold_deg),
GtsfmMetric("num_total_image_pairs", int(num_image_pairs)),
GtsfmMetric("num_valid_image_pairs", int(num_valid_image_pairs)),
GtsfmMetric("rotation_success_count", int(success_count_rot3)),
GtsfmMetric("translation_success_count", int(success_count_unit3)),
GtsfmMetric("pose_success_count", int(success_count_pose)),
GtsfmMetric("num_all_inlier_correspondences_wrt_gt_model", int(all_correct)),
GtsfmMetric("rot3_angular_errors_deg", rot3_angular_errors),
GtsfmMetric("trans_angular_errors_deg", trans_angular_errors),
GtsfmMetric("pose_errors_deg", pose_errors),
GtsfmMetric("inlier_ratio_wrt_gt_model", inlier_ratio_gt_model_all_pairs),
GtsfmMetric("inlier_ratio_wrt_est_model", inlier_ratio_est_model_all_pairs),
GtsfmMetric("num_inliers_est_model", num_inliers_est_model_all_pairs),
GtsfmMetric("num_inliers_gt_model", num_inliers_gt_model_all_pairs),
],
)
return frontend_metrics
| 2.15625
| 2
|
flask_kits/utils/util.py
|
by46/flask-kits
| 1
|
12784625
|
<filename>flask_kits/utils/util.py
import time
from decimal import Decimal
__all__ = ['purge_sum_result', 'timestamp', 'get_raw_path']
def purge_sum_result(result, field_name='amount', default=None):
key = '{0}__sum'.format(field_name)
if default is None:
default = Decimal('0')
return result.get(key) or default
def timestamp():
return str(int(time.time()))
def get_raw_path(request):
path = request.path
if 'QUERY_STRING' in request.META and request.META.get('QUERY_STRING'):
path += '?' + request.META.get('QUERY_STRING')
return path
| 2.265625
| 2
|
untitled/VectorialIndex/VectorialIndex.py
|
Miky91/Python
| 0
|
12784626
|
# Insertar aqui la cabecera
import string
# Dada una linea de texto, devuelve una lista de palabras no vacias
# convirtiendo a minusculas y eliminando signos de puntuacion por los extremos
# Ejemplo:
# > extrae_palabras("Hi! What is your name? John.")
# ['hi', 'what', 'is', 'your', 'name', 'john']
def extrae_palabras(linea):
return filter(lambda x: len(x) > 0,
map(lambda x: x.lower().strip(string.punctuation), linea.split()))
class VectorialIndex(object):
def __init__(self, directorio, stop=[]):
pass
def consulta_vectorial(self, consulta, n=3):
pass
def consulta_conjuncion(self, consulta):
pass
| 3.734375
| 4
|
ecosystem_tests/ecosystem_tests_cli/plugins.py
|
cloudify-incubator/cloudify-ecosystem-test
| 1
|
12784627
|
<gh_stars>1-10
########
# Copyright (c) 2014-2021 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from .exceptions import EcosystemTestCliException
def create_plugins_list(plugins):
"""Returns a list of tuples of plugins to upload.
each tuple look like:(wagon_url,plugin.yaml_url)
:param plugins: tuple of tuples consists of the usr input for plugin
option.
"""
plugins_list = []
for plugin_tuple in plugins:
check_valid_urls(plugin_tuple)
# In case the user insert the plugin.yaml as the first argument.
wagon, yaml = find_wagon_yaml_url(plugin_tuple)
plugins_list.append((wagon, yaml))
return plugins_list
def check_valid_urls(plugin_tuple):
for url in plugin_tuple:
request = requests.head(url)
if request.status_code != requests.codes.found and \
request.status_code != requests.codes.ok:
raise EcosystemTestCliException('plugin url {url}'
' is not valid!'.format(url=url))
def find_wagon_yaml_url(plugin_tuple):
try:
wagon = \
[element for element in plugin_tuple if element.endswith('.wgn')][
0]
pl_yaml = \
[element for element in plugin_tuple if element.endswith('.yaml')][
0]
return wagon, pl_yaml
except IndexError:
raise EcosystemTestCliException(
'Plugin input - Could not find which url is for wagon and which '
'is for plugin.yaml for: {plugins}'.format(
plugins=plugin_tuple))
| 2.3125
| 2
|
exercicios/ex18ERRADO.py
|
gabrielaraujo3/exercicios-python
| 0
|
12784628
|
<filename>exercicios/ex18ERRADO.py
import math
n1 = float(input('Digite um angulo: '))
sn = math.sin(n1)
co = math.cos(n1)
ta = math.tan(n1)
print('Seno {}, cosseno {} e a tangente {} de {}'.format(sn,co,ta,n1))
ERRADO
| 3.890625
| 4
|
Chapter03/2data_normalization.py
|
packtprasadr/Machine-Learning-Algorithms
| 78
|
12784629
|
from __future__ import print_function
import numpy as np
from sklearn.preprocessing import Normalizer
# For reproducibility
np.random.seed(1000)
if __name__ == '__main__':
# Create a dummy dataset
data = np.array([1.0, 2.0])
print(data)
# Max normalization
n_max = Normalizer(norm='max')
nm = n_max.fit_transform(data.reshape(1, -1))
print(nm)
# L1 normalization
n_l1 = Normalizer(norm='l1')
nl1 = n_l1.fit_transform(data.reshape(1, -1))
print(nl1)
# L2 normalization
n_l2 = Normalizer(norm='l2')
nl2 = n_l2.fit_transform(data.reshape(1, -1))
print(nl2)
| 3.3125
| 3
|
Partial_correlation.py
|
rmccole/UCEs_genome_organization
| 3
|
12784630
|
"""
Script to take matrices and calculate partial correlations using matlab functions called using the matlab engine for
python.
Distributed under the following license:
Copyright 2017 Harvard University, Wu Lab
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions
and limitations under the License.
"""
import pandas as pd
import argparse
import matlab.engine
from collections import OrderedDict
import numpy as np
def get_args():
parser = argparse.ArgumentParser(description="Description")
parser.add_argument("file", type=str,
help='The filename of the matrix you wish to process, columns separated by tabs')
parser.add_argument("-y", "--YIndex", type=int, help = 'The column index (0-based) of the y variable')
parser.add_argument("-x", "--XIndexes", type=int, nargs="+",
help = 'The column indexes of the x variables, separated by spaces')
parser.add_argument("-z", "--ZIndexes", type=int, nargs="+",
help = 'The column indexes (0based) of the z variables, separated by spaces')
return parser.parse_args()
def importMatrix(strFileName):
pdMatrix = pd.read_csv(strFileName, sep='\t')
return pdMatrix
def pandaColumnToList(panda, strColName):
arCol = panda[strColName].tolist()
return arCol
def twoPandaColumns(strcolumn1, strcolumn2, panda):
#Returns a panda with same indexes and column headings, made of just the two columns named.
pdTwoColumns = panda[[strcolumn1, strcolumn2]]
return pdTwoColumns
def pdToNp(panda):
#Turn a panda into a numpy array
npArray = panda.values
return npArray
def npToMatDouble(npArray):
#Turn a numpy array into a matlab double,
### NB does not retain shape
#From http://stackoverflow.com/questions/10997254/converting-numpy-arrays-to-matlab-and-vice-versa
mlDouble = matlab.double(npArray.tolist())
return mlDouble
def orderedDictColumns(panda):
#Get a dictionary of column names whose keys are their indexes, e.g. first column is key 0.
arColNames = list(panda)
arIndexes = range(len(arColNames))
#From http://stackoverflow.com/questions/15372949/preserve-ordering-when-consolidating-two-lists-into-a-dict
odColumns = OrderedDict(zip(arIndexes, arColNames))
return odColumns
def xyzColumnsToMlDoubleXYand_Z(pdMatrix, strXName, strYName, strZName):
"""
From a panda matrix, specify the column names of the three columns you want to be your x, y, and z variables.
Returns a n x 2 matrix of x and y values, and an n x 1 matrix of z values.
If you want more than one column to go into z, this is not the right function.
"""
#First make a panda of just the x and y columns
pdXY = twoPandaColumns(strXName, strYName, pdMatrix)
#Make it into a numpy array
npXY = pdToNp(pdXY)
#Make it a matlab double
mlXY = npToMatDouble(npXY)
#Now similar with z column:
pdZ = pdMatrix[strZName]
npZ = pdToNp(pdZ)
#Reshape to get each value in its own row
npZreshape = npZ.reshape(len(npZ),1)
mlZ = npToMatDouble(npZreshape)
return mlXY, mlZ
def partialCorr(mlXY, mlZ):
"""
Run matlab partialcorr function, looking at correlation between pairs of values in mlXy, controlling for variables in z
"""
eng = matlab.engine.start_matlab()
partial, pval = eng.partialcorr(mlXY, mlZ, 'rows', 'pairwise', 'type', 'Spearman', nargout=2)
#Partial and pval are nested lists, need to access the 1th element of the 0th list (2nd element of 1st list for human)
intPartial = partial[0][1]
intpval = pval[0][1]
return intPartial, intpval
def partialCorrResults(intYColumnIndex, arIntXColumnIndexes, arIntZColumnIndexes, pdMatrix):
"""
Returns two pandas, one with partial correlation coefficients, the other with p values.
X variables are column headings. Z variables are column rows.
"""
odColumns = orderedDictColumns(pdMatrix)
#Get y variable name
strYVariable = str(odColumns[intYColumnIndex])
print 'Y variable is {0}'.format(strYVariable)
#Get list of x variable names, these will be the column names for the results pandas:
arXColumnNames = getColumnNameList(pdMatrix, arIntXColumnIndexes)
print 'X variables are {0}'.format(str(arXColumnNames))
#Get list of y variable names, these will be the row indexes for the results pandas:
arZColumnNames = getColumnNameList(pdMatrix, arIntZColumnIndexes)
print 'Z variables are {0}'.format(str(arZColumnNames))
arArRawPartials = []
arArRawPVals = []
for x in arIntXColumnIndexes:
#For each x variable, make a list of partial coefficients, and a list of pvals, one for each z variable
arPartials = []
arPVals = []
for z in arIntZColumnIndexes:
print 'Getting partial correlation between {0} and {1}, controlling for {2}'.format(odColumns[x], odColumns[intYColumnIndex], odColumns[z])
mlXY, mlZ = xyzColumnsToMlDoubleXYand_Z(pdMatrix, odColumns[x], odColumns[intYColumnIndex], odColumns[z])
intPartial, intPValue = partialCorr(mlXY, mlZ)
print 'Partial correlation coefficient between {0} and {1}, controlling for {2} is {3}'.format(odColumns[x], odColumns[intYColumnIndex], odColumns[z], intPartial)
print 'P value for partial correlation between {0} and {1}, controlling for {2} is {3}'.format(odColumns[x], odColumns[intYColumnIndex], odColumns[z], intPValue)
arPartials.append(intPartial)
arPVals.append(intPValue)
#When all z variables are finished, append the newly created lists to a final list of lists.
arArRawPartials.append(arPartials)
arArRawPVals.append(arPVals)
#Create the pretty pandas with columns as x variables, row indexes as z variables
pdPrettyPartials = pd.DataFrame(data=arArRawPartials, columns=arZColumnNames, index=arXColumnNames)
pdPrettyPVals = pd.DataFrame(data=arArRawPVals, columns=arZColumnNames, index=arXColumnNames)
#Transpose pandas so that x variables become columns and yvariables become rows
pdFinalPartials = pdPrettyPartials.transpose()
pdFinalPVals = pdPrettyPVals.transpose()
#Save those pretty pandas
pdFinalPartials.to_csv(('Partial_coeff_columnsXvariables_rowsZvariables_yvariable{0}.txt'.format(strYVariable)), sep='\t')
pdFinalPVals.to_csv(('Pvalues_columnsXvariables_rowsZvariables_yvariable_{0}.txt'.format(strYVariable)), sep='\t')
return pdFinalPartials, pdFinalPVals
def getColumnNameList(pdMatrix, arIntColumnIndexes):
odColumns = orderedDictColumns(pdMatrix)
arXColumnNames = []
for int in arIntColumnIndexes:
strColumn = str(odColumns[int])
arXColumnNames.append(strColumn)
return arXColumnNames
def main():
print 'Getting filename'
args = get_args()
print 'Getting matrix'
pdMatrix = importMatrix(args.file)
intYIndex = int(args.YIndex)
arXIndexes = args.XIndexes
arZIndexes = args.ZIndexes
partialCorrResults(intYIndex, arXIndexes, arZIndexes, pdMatrix)
if __name__ == "__main__":
main()
| 3
| 3
|
TFQ/VQE/vqe_multi.py
|
Project-Fare/quantum_computation
| 27
|
12784631
|
<filename>TFQ/VQE/vqe_multi.py
import tensorflow as tf
import tensorflow_quantum as tfq
import cirq
import sympy
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
def f(x):
vqe.set_weights(np.array([x]))
ret = vqe(tfq.convert_to_tensor([cirq.Circuit()]))
return ret.numpy()[0][0]
def anzatz(circuit, qubits, parameters):
for i in range(5):
pos_up = int(i*2)
pos_down = pos_up + 1
circuit.append([cirq.X(qubits[pos_down])])
circuit.append([cirq.ry(np.pi/2).on(qubits[pos_up])])
circuit.append([cirq.rx(-np.pi/2).on(qubits[pos_down])])
circuit.append([cirq.CNOT(qubits[pos_up], qubits[pos_down])])
circuit.append([cirq.rz(parameters[0]).on(qubits[pos_down])])
circuit.append([cirq.CNOT(qubits[pos_up], qubits[pos_down])])
circuit.append([cirq.ry(-np.pi/2).on(qubits[pos_up])])
circuit.append([cirq.rx(np.pi/2).on(qubits[pos_down])])
circuit.append([cirq.SWAP(qubits[0], qubits[1])])
circuit.append([cirq.CNOT(qubits[5], qubits[4])])
circuit.append([cirq.Z(qubits[6]), cirq.Z(qubits[7])])
circuit.append([cirq.S(qubits[6]), cirq.S(qubits[7])])
circuit.append([cirq.H(qubits[6]), cirq.H(qubits[7])])
circuit.append([cirq.CNOT(qubits[7], qubits[6])])
circuit.append([cirq.H(qubits[8]), cirq.H(qubits[9])])
circuit.append([cirq.CNOT(qubits[9], qubits[8])])
return circuit
def hamiltonian(qubits, a, b, c, d, e, f):
h = [a]
h.append(b * cirq.Z(qubits[1]))
h.append(c * cirq.Z(qubits[2]))
h.append(d * (cirq.Z(qubits[4]) + cirq.Z(qubits[5])))
h.append(e * (cirq.Z(qubits[6]) + cirq.Z(qubits[7])))
h.append(f * (cirq.Z(qubits[8]) + cirq.Z(qubits[9])))
return h
all_coeff = [
[2.8489, 0.5678, -1.4508, 0.6799, 0.0791, 0.0791],
[2.1868, 0.5449, -1.2870, 0.6719, 0.0798, 0.0798],
[1.1182, 0.4754, -0.9145, 0.6438, 0.0825, 0.0825],
[0.7381, 0.4325, -0.7355, 0.6233, 0.0846, 0.0846],
[0.4808, 0.3937, -0.5950, 0.6025, 0.0870, 0.0870],
[0.2976, 0.3593, -0.4826, 0.5818, 0.0896, 0.0896],
[0.2252, 0.3435, -0.4347, 0.5716, 0.0910, 0.0910],
[0.0609, 0.3018, -0.3168, 0.5421, 0.0954, 0.0954],
[-0.1253, 0.2374, -0.1603, 0.4892, 0.1050, 0.1050],
[-0.1927, 0.2048, -0.0929, 0.4588, 0.1116, 0.1116],
[-0.2632, 0.1565, -0.0088, 0.4094, 0.1241, 0.1241],
[-0.2934, 0.1251, 0.0359, 0.3730, 0.1347, 0.1347],
[-0.3018, 0.1142, 0.0495, 0.3586, 0.1392, 0.1392],
[-0.3104, 0.1026, 0.0632, 0.3406, 0.1450, 0.1450],
[-0.3135, 0.0984, 0.0679, 0.3329, 0.1475, 0.1475]
]
dist = [
0.2,
0.25,
0.4,
0.5,
0.6,
0.7,
0.75,
0.9,
1.2,
1.4,
1.8,
2.2,
2.4,
2.7,
2.85
]
qubits = [cirq.GridQubit(0, i) for i in range(10)]
params = [sympy.symbols('vqe')]
vqe_circuit = anzatz(cirq.Circuit(), qubits, params)
hs = []
for i in range(len(all_coeff)):
coeff = all_coeff[i]
readout_operators = sum(hamiltonian(qubits, coeff[0], coeff[1], coeff[2], coeff[3], coeff[4], coeff[5]))
ins = tf.keras.layers.Input(shape=(), dtype=tf.dtypes.string)
outs = tfq.layers.PQC(vqe_circuit, readout_operators)(ins)
vqe = tf.keras.models.Model(inputs=ins, outputs=outs)
opt = minimize(f, np.random.uniform(0, 2*np.pi, 1), method='Nelder-Mead')
hs.append(opt['fun'])
plt.plot(dist, hs, label='NM')
plt.xlabel("Bond Length")
plt.ylabel("Energy")
plt.ylim(-1.2, 0.2)
plt.xlim(0.22, 2.85)
plt.show()
| 2.171875
| 2
|
weather/api/migrations/0001_initial.py
|
Ethan-Genser/Weather-Station
| 0
|
12784632
|
# Generated by Django 2.1.5 on 2019-01-22 20:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Humidity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Temperature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('temperature', models.DecimalField(decimal_places=2, default=0.0, max_digits=3)),
('chnage', models.DecimalField(decimal_places=2, default=0.0, max_digits=3)),
('time_stamp', models.DateTimeField(auto_now=True)),
],
),
]
| 1.898438
| 2
|
apps/classroom/forms.py
|
alfarhanzahedi/edumate
| 1
|
12784633
|
<gh_stars>1-10
from django import forms
from .models import Classroom
from .models import Post
from .models import Comment
class ClassroomCreationForm(forms.ModelForm):
class Meta:
model = Classroom
fields = ('title', 'description', )
help_texts = {
'title': 'A suitable title for the classroom. For example, CSEN 3201.',
'description': 'A short description for the classroom.',
}
class ClassroomJoinForm(forms.Form):
unique_code = forms.CharField(
min_length = 6,
max_length = 6,
required = True,
help_text = 'The unique code shared by your teacher.'
)
class ClassroomPostCreateForm(forms.ModelForm):
class Meta:
model = Post
fields = ('post', )
help_texts = {
'post': 'Keep it simple and brief!'
}
| 2.5625
| 3
|
kloppy_spark/sinks/__init__.py
|
deepsports-io/kloppy-spark
| 0
|
12784634
|
from .show_dataframe import ShowDataFrame
| 1.140625
| 1
|
fungsi_korelasi/korelasi_gas.py
|
khairulh/pvt-calculator-fr
| 0
|
12784635
|
<gh_stars>0
from math import e as e_nat
from math import log as ln
from math import log10, ceil, floor
def array_P(P_atm, step, P_res, Pb):
"""
Array 1D dari tekanan untuk standardisasi (psia)
:param start: Biasanya 14.7 psia
:param step: Biasanya 1 psia
:param stop: Biasanya P reservoir (psia)
:return: Array 1D (psia)
"""
P = [P_atm]
for i in range(ceil(P_atm),floor(Pb),step):
P.append(i)
P.append(Pb)
for j in range(ceil(Pb)+step, ceil(P_res)+step, step):
P.append(j)
return P
class compressibility_factor:
def dranchuk_abou_kassem(self):
"""
Menghitung gas compressibility factor
:return:
"""
A1 = 0.3265
A2 = -1.0700
A3 = -0.5339
A4 = 0.01569
A5 = -0.05165
A6 = 0.5475
A7 = -0.7361
A8 = 0.1844
A9 = 0.1056
A10 = 0.6134
A11 = 0.7210
# TODO: Gajadi deh nanti aja
| 2.703125
| 3
|
bin/secret_parser.py
|
fasrc/hubzero-docker
| 0
|
12784636
|
import os
import configparser
| 1.101563
| 1
|
accounts/migrations/0002_auto_20210630_0652.py
|
Srinjay-hack/Buddy
| 0
|
12784637
|
# Generated by Django 3.2.4 on 2021-06-30 06:52
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='assistant',
name='phone',
field=models.CharField(default=django.utils.timezone.now, max_length=20),
preserve_default=False,
),
migrations.AddField(
model_name='assistant',
name='pickup_location',
field=models.CharField(default=django.utils.timezone.now, max_length=256),
preserve_default=False,
),
migrations.AddField(
model_name='caller',
name='pickup_location',
field=models.CharField(default=django.utils.timezone.now, max_length=256),
preserve_default=False,
),
migrations.AlterField(
model_name='caller',
name='phone',
field=models.CharField(max_length=20),
),
]
| 1.789063
| 2
|
nablapps/poll/admin.py
|
pettaroni/nablaweb
| 0
|
12784638
|
<reponame>pettaroni/nablaweb<gh_stars>0
"""
Admin interface for poll app.
"""
from django.contrib import admin
from .models import Poll, Choice
class ChoiceInline(admin.TabularInline):
"""Define how the choices should be viewed inlined with the poll"""
model = Choice
extra = 5
fields = ('choice', 'votes', )
fk_name = "poll"
@admin.register(Poll)
class PollAdmin(admin.ModelAdmin):
"""Admin interface for poll model"""
fields = ['publication_date', 'question', 'answer', 'is_current', 'users_voted', ]
readonly_fields = ['users_voted', 'created_by']
list_display = ('question', 'publication_date', 'is_current', 'created_by')
list_filter = ['publication_date']
inlines = [ChoiceInline]
actions = ['make_current']
def make_current(self, request, queryset):
"""Admin action to make a (single) poll the current poll"""
if queryset.count != 1:
self.message_user(request, "Only one can be marked as the current poll!")
Poll.objects.filter(is_current=True).update(is_current=False)
queryset.update(is_current=True)
make_current.short_description = "Gjør til forsideavstemning"
| 2.453125
| 2
|
src/python/coref/train/pw/run/train_bin.py
|
nmonath/coref_tools
| 0
|
12784639
|
"""
Copyright (C) 2018 University of Massachusetts Amherst.
This file is part of "coref_tools"
http://github.com/nmonath/coref_tools
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import datetime
import errno
import os
import sys
from shutil import copytree
import torch
from coref.models import new_model
from coref.train import new_trainer
from coref.util.Config import Config
from coref.util.IO import copy_source_to_dir
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train PWE HAC on dataset')
parser.add_argument('config', type=str, help='the config file')
parser.add_argument('--outbase', type=str,
help='prefix of out dir within experiment_out_dir')
parser.add_argument('--dataname', type=str, help='Name of dataset.')
args = parser.parse_args()
config = Config(args.config)
if args.outbase:
ts = args.outbase
dataname = args.dataname
ts = os.path.join(dataname, ts)
else:
now = datetime.datetime.now()
ts = "{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}".format(
now.year, now.month, now.day, now.hour, now.minute, now.second)
debug = config.debug
diagnostics = {}
# Set up output dir
config.experiment_out_dir = os.path.join(
config.experiment_out_dir, ts)
output_dir = config.experiment_out_dir
copy_source_to_dir(output_dir,config)
if config.batcher_filename != 'None':
batcher = torch.load(config.batcher_filename)
else:
batcher = None
model = new_model(config)
config.save_config(config.experiment_out_dir)
trainer = new_trainer(config, model)
trainer.train(batcher, config.experiment_out_dir, None)
| 1.765625
| 2
|
test1.py
|
walkeb6/tf-test1
| 0
|
12784640
|
<reponame>walkeb6/tf-test1
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# input and target output
x1 = tf.placeholder(tf.float32,shape=[None,784])
x2 = tf.placeholder(tf.float32,shape=[None,784])
x= tf.concat(1,[x1,x2])
#print x.get_shape()
y1_ = tf.placeholder(tf.float32,shape=[None,10])
y2_ = tf.placeholder(tf.float32,shape=[None,10])
y_ = tf.concat(1,[y1_,y2_])
# get the one-hot vectors of the sum
y1m = tf.argmax(y1_,1)
y2m = tf.argmax(y2_,1)
ym = tf.add(y1m,y2m)
y_ = tf.one_hot(ym,19)
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
def convpool_layer2d(x,widthx,widthy,features_in,features_out):
W_conv = weight_variable([widthx,widthy,features_in,features_out])
b_conv = bias_variable([features_out])
h_conv = tf.nn.relu(conv2d(x,W_conv)+b_conv)
h_pool = max_pool_2x2(h_conv);
return h_pool
def fc_layer(x,nIn,nOut):
W = weight_variable([nIn,nOut])
b = bias_variable([nOut])
return tf.nn.relu(tf.matmul(x,W)+b)
x_image = tf.reshape(x,[-1,28*2,28,1]);
# two convolutional layers
# first layer
h1 = convpool_layer2d(x_image,5,5,1,32)
# second layer
h2 = convpool_layer2d(h1,5,5,32,64)
h2_flat = tf.reshape(h2,[-1,2*7*7*64])
#h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1) + b_fc1)
h_fc1 = fc_layer(h2_flat,2*7*7*64,2048)
# apply dropout (right now not doing this)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)
# second layer
h_fc2 = fc_layer(h_fc1,2048,100);
# readout layer
n_out = 19
W_out = weight_variable([100,n_out])
b_out = bias_variable([n_out])
y_conv = tf.nn.sigmoid(tf.matmul(h_fc2, W_out)+b_out)
# the classical adder
# add the things up like a probability distribution
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
#correct_prediction=tf.equal(tf.argmax(y_conv,1), tf.argmax(y1_,1))
#accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
accuracy = tf.reduce_mean(tf.square(tf.subtract(y_,y_conv)))
sess=tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# go
for i in range(10000):
batch1 = mnist.train.next_batch(50)
batch2 = mnist.train.next_batch(50)
if i%100==0:
train_accuracy = accuracy.eval(feed_dict={x1:batch1[0],x2:batch2[0],y1_:batch1[1],y2_:batch2[1],keep_prob:1.0})
print("step %d, training accuracy %g"%(i,train_accuracy))
train_step.run(feed_dict={x1:batch1[0],x2:batch2[0],y1_:batch1[1],y2_:batch2[1],keep_prob:0.5})
#print("test accuracy %g"%accuracy.eval(feed_dict={x1_:mnist.test.images, y1_:mnist.test.labels,keep_prob:1.0}))
| 3.046875
| 3
|
server/app.py
|
johnjdailey/JS-Realtime-Dashboard
| 1
|
12784641
|
from flask import Flask
# blueprint import
from blueprints.tweets.tweets import tweetsData
from blueprints.btc.btc import btcData
def create_app(app):
# register blueprint
app.register_blueprint(tweetsData)
app.register_blueprint(btcData)
return app
if __name__ == "__main__":
app = Flask(__name__)
create_app(app).run(host="0.0.0.0", debug=False, port=9000)
| 2.203125
| 2
|
app/interfaces/i_parser.py
|
heatonk/caldera_pathfinder
| 71
|
12784642
|
import abc
class ParserInterface(abc.ABC):
@abc.abstractmethod
def parse(self, report):
pass
| 2.421875
| 2
|
Algorithm/FizzBuzz.py
|
ChawisB/KuberOpsTest
| 0
|
12784643
|
<reponame>ChawisB/KuberOpsTest
#Initially tried switch case but didn't really work out so tried f strings instead
def fizzbuzz(n):
for i in range(1, n + 1):
print([f'{i}', f'Fizz', f'Buzz', f'FizzBuzz'][(i % 3 == 0) + 2 * (i % 5 == 0)])
fizzbuzz(100)
| 2.71875
| 3
|
filters/tests/test_mixins.py
|
jof/drf-url-filters
| 176
|
12784644
|
<filename>filters/tests/test_mixins.py
import unittest
from filters.mixins import FiltersMixin
class MyTest(unittest.TestCase):
def test(self):
self.assertEqual(4, 4)
| 2.171875
| 2
|
youtube/views.py
|
BudzynskiMaciej/Django-Project
| 0
|
12784645
|
from django.shortcuts import render
from django.views import View
from django.views.generic import ListView, DetailView
from .models import Video
from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
class PopularVideosList(ListView):
template_name = 'index.html'
context_object_name = 'most_popular_videos'
def get_queryset(self):
return Video.objects.filter(is_most_viewed=True)
class PopularVideosDetail(DetailView):
model = Video
template_name = 'youtube/popularVideosDetail.html'
class MyVideosList(LoginRequiredMixin, View):
def get(self, request):
user = request.user.username
my_videos_list = Video.objects.filter(
channel_title__exact=user
).order_by('-published_at')[:15]
context = {'my_videos': my_videos_list}
return render(request, 'youtube/myVideosList.html', context)
| 2.109375
| 2
|
django_prices_vatlayer/migrations/0002_ratetypes.py
|
korycins/django-prices-vatlayer
| 12
|
12784646
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-25 13:40
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('django_prices_vatlayer', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='RateTypes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('types', jsonfield.fields.JSONField(verbose_name='types')),
],
),
]
| 1.960938
| 2
|
icontact/observer.py
|
rochapps/django-icontact
| 1
|
12784647
|
"""
Observer for iContact instances
"""
import logging
from django.db.models import signals
from icontact.models import IContact
from icontact.client import IContactClient, IContactException
logger = logging.getLogger(__name__)
class IContactObserver(object):
"""
Class that utilizes icontact client to sync model with icontact service
"""
def __init__(self, client=None):
"""
Initialize an instance of the CalendarObserver class.
"""
self.adapters = {}
self._client = client
def observe(self, model, adapter):
"""
Establishes a connection between the model and Google Calendar, using
adapter to transform data.
"""
self.adapters[model] = adapter
signals.post_save.connect(self.on_update, sender=model)
signals.post_delete.connect(self.on_delete, sender=model)
def on_update(self, **kwargs):
"""
Called by Django's signal mechanism when an observed model is updated.
"""
created = kwargs.get('created', False)
if created:
logging.debug("Created")
self.create(kwargs['sender'], kwargs['instance'])
return
logging.debug("Updating")
self.update(kwargs['sender'], kwargs['instance'])
def on_delete(self, **kwargs):
"""
Called by Django's signal mechanism when an observed model is deleted.
"""
self.delete(kwargs['sender'], kwargs['instance'])
def client(self):
"""
Instantiate the client class to make authenticated calls to icontact.
"""
if self._client is None:
self._client = IContactClient()
return self._client
def get_contact(self, instance):
"""
gets a contact from icontact service
"""
contact_id = IContact.objects.get_contact_id(instance)
logging.debug("contact id: {id}".format(id=contact_id))
try:
contact = self._client.get_contact(contact_id)
except IContactException:
return None
logging.debug('contact retrived')
logging.debug(contact)
return contact
def create(self, sender, instance):
"""
creates a new contact on icontact's datata base as well as a
iContact instance
"""
adapter = self.adapters[sender]
logging.debug('Adapter: {adapter}'.format(adapter=adapter))
client = self.client()
contact = adapter.get_contact_data(instance) #IcontactData instance
data = contact.get_data()
logging.debug("contact's data: %s"%data)
try:
icontact = client.create_contact(payload=data)
contact_id = icontact['contacts'][0]['contactId']
subscription = client.subscribe(contact_id)
except IContactException:
return None
IContact.objects.set_contact_id(instance, contact_id)
def update(self, sender, instance):
"""
Update or create an Icontact Contact.
By default the client subscribes to the deault list specified in
settings.py
"""
adapter = self.adapters[sender]
client = self.client()
contact = adapter.get_contact_data(instance) #IcontactData instance
data = contact.get_data()
logging.debug(data)
logging.debug(data['contact'])
try:
icontact = self.get_contact(instance)
contact_id = icontact['contact']['contactId']
client.update_contact(contact_id=contact_id, payload=data['contact'])
except IContactException:
return None
IContact.objects.set_contact_id(instance, contact_id)
def delete(self, sender, instance):
"""
Deletes iContact record from their service and from our database
"""
adapter = self.adapters[sender]
client = self.client()
contact = adapter.get_contact_data(instance) #IcontactData instance
icontact = self.get_contact(instance)
if not icontact: return None
contact_id = icontact['contact']['contactId']
try:
client.delete_contact(contact_id) #delete from icontact
except IContactException:
pass
IContact.objects.delete_contact_id(instance) #delete from database
| 2.59375
| 3
|
pytorch/config.py
|
maystroh/modelgenesis
| 0
|
12784648
|
import os
import shutil
import logging
logging.basicConfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG)
log = logging.getLogger()
class models_genesis_config:
model = "Unet3D"
suffix = "genesis_oct"
exp_name = model + "-" + suffix
# data
data = "/home/harddrive/Projects/GAMMA_data/model_genesis_data/generated/"
# train_fold = [0, 1, 2, 3, 4]
train_fold = [0]
# valid_fold = [5, 6]
valid_fold = [1]
test_fold = [1]
hu_min = -1000.0
hu_max = 1000.0
scale = 32
input_rows = 96
input_cols = 96
input_deps = 96
nb_class = 1
# model pre-training
verbose = 1
weights = None
batch_size = 1
optimizer = "sgd"
workers = 10
max_queue_size = workers * 4
save_samples = "png"
nb_epoch = 10000
patience = 50
lr = 1
# image deformation
nonlinear_rate = 0.9
paint_rate = 0.9
outpaint_rate = 0.8
inpaint_rate = 1.0 - outpaint_rate
local_rate = 0.5
flip_rate = 0.4
# logs
model_path = "../pretrained_weights"
if not os.path.exists(model_path):
os.makedirs(model_path)
logs_path = os.path.join(model_path, "Logs")
if not os.path.exists(logs_path):
os.makedirs(logs_path)
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
log.info("{:30} {}".format(a, getattr(self, a)))
log.info("\n")
| 2.09375
| 2
|
app/control/serializers.py
|
aalquaiti/challenge
| 0
|
12784649
|
<gh_stars>0
# Created by: <NAME>
# For QCTRL Backend Challenge
# January 2020
"""
Contains all Serializers used by Models in app
"""
from rest_framework import serializers
from .models import Control
class ControlSerializer(serializers.ModelSerializer):
"""
Serializer for Control Model. All fields are required
"""
class Meta:
model = Control
fields = '__all__'
| 1.78125
| 2
|
app/api/v1/schema/ussd.py
|
a-wakeel/Device-Registration-Subsystem
| 6
|
12784650
|
<filename>app/api/v1/schema/ussd.py<gh_stars>1-10
"""
DRS Registration schema package.
Copyright (c) 2018-2020 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from marshmallow import Schema, fields, validates, pre_load, pre_dump, post_dump, post_load, validate
from app.api.v1.helpers.validators import *
from app.api.v1.models.devicequota import DeviceQuota
from app.api.v1.models.status import Status
import ast
import pydash
from app import app, GLOBAL_CONF
from flask_babel import gettext as _
class RegistrationDetailsSchema(Schema):
"""Schema for USSD Registration routes."""
cnic = fields.Int(required=True)
msisdn = fields.Int(required=True)
network = fields.Str(required=True)
imeis = fields.List(fields.List(fields.Str(validate=validate_imei)), required=False)
device_count = fields.Int(required=True, error_messages={'required': 'Device count is required'})
@pre_load()
def valid_imeis(self, data):
if 'imeis' in data:
try:
imeis = data.get('imeis')[0]
for individual_imei in imeis:
if individual_imei.isdigit() and (len(individual_imei) > 13) and (len(individual_imei) < 17):
pass
else:
raise ValidationError("IMEI must be digits only and between 14 and 16 characters long.", field_names=['imeis'])
except Exception as e:
raise ValidationError(str(e), field_names=['imeis'])
@pre_load()
def convert_imei(self, data):
"""Converts imei to supported formats."""
if 'imeis' in data and 'file' not in data:
try:
data['imeis'] = ast.literal_eval(data.get('imeis'))
except Exception as e:
raise ValidationError('Invalid format for IMEIs Input', field_names=['imeis'])
imeis = pydash.flatten_deep(data['imeis'])
if len(imeis) == 0:
raise ValidationError('Invalid format for IMEIs Input', field_names=['imeis'])
elif not isinstance(data['imeis'][0], list):
raise ValidationError('Invalid format for IMEIs Input', field_names=['imeis'])
elif len(imeis) != len(list(set(imeis))):
raise ValidationError(_('Duplicate IMEIs in request'), field_names=['imeis'])
elif 'device_count' in data and data['device_count'].isdigit():
if int(data['device_count']) > 10:
raise ValidationError('Only 10 device are allowed in case of webpage input',
field_names=['imeis'])
if int(data['device_count']) != len(data['imeis']):
raise ValidationError('Device count should be same as no of devices',
field_names=['device_count'])
if 'imei_per_device' in data and data['imei_per_device'].isdigit():
if int(data['imei_per_device']) > 5:
raise ValidationError('Only 5 imeis are allowed per device in webpage',
field_names=['imei_per_device'])
invalid = list(filter(lambda x: len(x) != int(data['imei_per_device']), data['imeis']))
if len(invalid) > 0:
raise ValidationError('No of imei for each device should be same as imei per device',
field_names=['imei_per_device'])
@pre_dump()
def request_status(self, data):
"""Returns current status of the request."""
data.status_label = Status.get_status_type(data.status)
data.processing_status_label = Status.get_status_type(data.processing_status)
data.report_status_label = Status.get_status_type(data.report_status)
@validates('device_count')
def validate_device_count(self, value):
"""Validates devices count."""
if value <= 0:
raise ValidationError('Device count must be a positive number',
field_names=['device_count'])
if value > 10000000:
raise ValidationError('Device count in single request should be less than 10000000')
@validates('network')
def validate_network(self, value):
"""Validates network"""
if value.isdigit():
raise ValidationError('Network type should be a string',
field_names=['network'])
class UssdTrackingSchema(Schema):
"""Schema for USSD Tracking."""
msisdn = fields.Int(required=True)
network = fields.Str(required=True)
device_id = fields.Int(required=True)
@pre_dump()
def request_status(self, data):
"""Returns current status of the request."""
data.status_label = Status.get_status_type(data.status)
data.processing_status_label = Status.get_status_type(data.processing_status)
data.report_status_label = Status.get_status_type(data.report_status)
@validates('network')
def validate_network(self, value):
"""Validates network"""
if value.isdigit():
raise ValidationError('Network type should be a string',
field_names=['network'])
class UssdDeleteSchema(Schema):
"""Schema for USSD Tracking."""
msisdn = fields.Int(required=True)
network = fields.Str(required=True)
device_id = fields.Int(required=True)
close_request = fields.Str(required=True)
class UssdCountSchema(Schema):
"""Schema for USSD Tracking."""
msisdn = fields.Int(required=True)
network = fields.Str(required=True)
| 1.140625
| 1
|