blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1b1026d8d33928d11c7056e2bd7b68180380afcb | 91d09c67f88cf5002327c27548dd393e3b12df03 | /users/urls.py | 3a5d4dcf1a2242d5eb6e378494db0dafdf1856cf | [] | no_license | saturnisbig/blogproject | 479b733f6aaa353fb58195fafec259de1e41678d | 8804384ad88e4f6f303541dbcc683ad018324c30 | refs/heads/master | 2022-12-10T23:00:15.085834 | 2019-08-18T08:40:42 | 2019-08-18T08:40:42 | 124,767,941 | 0 | 0 | null | 2022-12-08T05:45:41 | 2018-03-11T15:02:28 | CSS | UTF-8 | Python | false | false | 274 | py | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
from django.conf.urls import url
from users import views
app_name = 'users'
urlpatterns = [
url(r'^profile/$', views.profile, name='profile'),
url(r'^profile/change/$', views.change_profile, name='change_profile'),
]
| [
"i.kenting@gmail.com"
] | i.kenting@gmail.com |
7f120ab7c1b9fd2eddfd7b92d2cae435cca71e83 | 4fc21c3f8dca563ce8fe0975b5d60f68d882768d | /Darlington/phase1/python Basic 1/day 4 solution/qtn2.py | 973e0aa68857d7f3cbfdbeb6ed8c7d1e20dabb88 | [
"MIT"
] | permissive | Uche-Clare/python-challenge-solutions | 17e53dbedbff2f33e242cf8011696b3059cd96e9 | 49ede6204ee0a82d5507a19fbc7590a1ae10f058 | refs/heads/master | 2022-11-13T15:06:52.846937 | 2020-07-10T20:59:37 | 2020-07-10T20:59:37 | 266,404,840 | 1 | 0 | MIT | 2020-05-23T19:24:56 | 2020-05-23T19:24:55 | null | UTF-8 | Python | false | false | 202 | py | #program to count number of 4
import math
def list_count_4(nums):
count = 0
for num in nums:
if num == 4:
count = count + 1
return count
print(list_count_4([1, 4, 6, 7, 4, 5, 4, 4])) | [
"darlingtonchibuzor64@gmail.com"
] | darlingtonchibuzor64@gmail.com |
55f87d8c96b29560bb04f2e49aad8726bf02a562 | 660e35c822423685aea19d038daa8356722dc744 | /account_invoice_line_standalone/invoice.py | 3aac84a7c0ec0a747bf99428d2e801b777a31f14 | [] | no_license | saifkazi/tryton_modules | a05cb4a90ae2c46ba39d60d2005ffc18ce5e44bb | 94bd3a4e3fd86556725cdff33b314274dcb20afd | refs/heads/main | 2023-05-05T12:20:02.059236 | 2021-05-19T10:46:37 | 2021-05-19T10:46:37 | 368,768,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from trytond.transaction import Transaction
from trytond.pool import PoolMeta
class Invoice(metaclass=PoolMeta):
__name__ = 'account.invoice'
@classmethod
def __setup__(cls):
super(Invoice, cls).__setup__()
add_remove = [
('invoice', '=', None),
]
if not cls.lines.add_remove:
cls.lines.add_remove = add_remove
else:
cls.lines.add_remove = [
add_remove,
cls.lines.add_remove,
]
class InvoiceLine(metaclass=PoolMeta):
__name__ = 'account.invoice.line'
@classmethod
def _view_look_dom_arch(cls, tree, type, field_children=None, level=0):
if type == 'form' and Transaction().context.get('standalone'):
tree_root = tree.getroottree().getroot()
if tree_root.get('cursor') == 'product':
tree_root.set('cursor', 'party')
return super(InvoiceLine, cls)._view_look_dom_arch(tree, type,
field_children=field_children, level=level)
| [
"saif.kazi76@gmail.com"
] | saif.kazi76@gmail.com |
3df944b45ac66276c48eadcd0d825f7ea50e5b7c | 90817e9d91df4673ae06eac02add165edcbac2ad | /AbstractUser/migrations/0008_auto_20170602_2306.py | e4c254d7ac2d18112e594af23f43860519df1d68 | [] | no_license | Jyonn/QingningWork | 8f19d34afcbb7d97e73091fc4ccfa0e2bec25521 | 5a48dd150ab11951d5084b9739777ff4c342438c | refs/heads/master | 2021-06-22T10:46:11.458104 | 2017-09-05T04:34:22 | 2017-09-05T04:34:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-02 23:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AbstractUser', '0007_abstractuser_introduce'),
]
operations = [
migrations.AlterField(
model_name='abstractuser',
name='introduce',
field=models.CharField(blank=True, default=None, max_length=20, null=True, verbose_name='一句话介绍'),
),
]
| [
"lqj679ssn@qq.com"
] | lqj679ssn@qq.com |
b4a17c6ce4806ea2c70cf001ae759a492bb0c91c | 51e51297ac2edfd7309d3b9f56d66b6eecad2baa | /tests/worker_test.py | 2a21aa8f7f07d81bf0932e8b565ca9addc1c2fe6 | [
"BSD-3-Clause"
] | permissive | ailesgrises/aiohttp | 19401d434f9bc7962f45ab7aad86bf0e24420fab | 2cf582eb4a00fe7dd88e5a546a36bbbfbec0170c | refs/heads/master | 2021-01-09T08:14:23.330898 | 2014-02-25T12:10:41 | 2014-02-25T12:10:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,945 | py | """Tests for aiohttp/worker.py"""
import asyncio
import unittest
import unittest.mock
from aiohttp import worker
from aiohttp.wsgi import WSGIServerHttpProtocol
class TestWorker(worker.AsyncGunicornWorker):
def __init__(self):
self.connections = {}
class WorkerTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.worker = TestWorker()
def tearDown(self):
self.loop.close()
@unittest.mock.patch('aiohttp.worker.asyncio')
def test_init_process(self, m_asyncio):
try:
self.worker.init_process()
except AttributeError:
pass
self.assertTrue(m_asyncio.get_event_loop.return_value.close.called)
self.assertTrue(m_asyncio.new_event_loop.called)
self.assertTrue(m_asyncio.set_event_loop.called)
@unittest.mock.patch('aiohttp.worker.asyncio')
def test_run(self, m_asyncio):
self.worker.loop = unittest.mock.Mock()
self.worker.run()
self.assertTrue(m_asyncio.async.called)
self.assertTrue(self.worker.loop.run_until_complete.called)
self.assertTrue(self.worker.loop.close.called)
def test_factory(self):
self.worker.wsgi = unittest.mock.Mock()
self.worker.loop = unittest.mock.Mock()
self.worker.log = unittest.mock.Mock()
self.worker.cfg = unittest.mock.Mock()
f = self.worker.factory(
self.worker.wsgi, 'localhost', 8080)
self.assertIsInstance(f, WSGIServerHttpProtocol)
@unittest.mock.patch('aiohttp.worker.asyncio')
def test__run(self, m_asyncio):
self.worker.ppid = 1
self.worker.alive = True
self.worker.servers = []
sock = unittest.mock.Mock()
sock.cfg_addr = ('localhost', 8080)
self.worker.sockets = [sock]
self.worker.wsgi = unittest.mock.Mock()
self.worker.log = unittest.mock.Mock()
self.worker.loop = unittest.mock.Mock()
self.worker.notify = unittest.mock.Mock()
self.loop.run_until_complete(self.worker._run())
m_asyncio.async.return_value.add_done_callback.call_args[0][0](
self.worker.sockets[0])
self.assertTrue(self.worker.log.info.called)
self.assertTrue(self.worker.notify.called)
def test__run_connections(self):
conn = unittest.mock.Mock()
self.worker.ppid = 1
self.worker.alive = False
self.worker.servers = [unittest.mock.Mock()]
self.worker.connections = {1: conn}
self.worker.sockets = []
self.worker.wsgi = unittest.mock.Mock()
self.worker.log = unittest.mock.Mock()
self.worker.loop = self.loop
self.worker.loop.create_server = unittest.mock.Mock()
self.worker.notify = unittest.mock.Mock()
def _close_conns():
yield from asyncio.sleep(0.1, loop=self.loop)
self.worker.connections = {}
asyncio.async(_close_conns(), loop=self.loop)
self.loop.run_until_complete(self.worker._run())
self.assertTrue(self.worker.log.info.called)
self.assertTrue(self.worker.notify.called)
self.assertFalse(self.worker.servers)
self.assertTrue(conn.closing.called)
@unittest.mock.patch('aiohttp.worker.os')
@unittest.mock.patch('aiohttp.worker.asyncio.sleep')
def test__run_exc(self, m_sleep, m_os):
m_os.getpid.return_value = 1
m_os.getppid.return_value = 1
self.worker.servers = [unittest.mock.Mock()]
self.worker.ppid = 1
self.worker.alive = True
self.worker.sockets = []
self.worker.log = unittest.mock.Mock()
self.worker.loop = unittest.mock.Mock()
self.worker.notify = unittest.mock.Mock()
slp = asyncio.Future(loop=self.loop)
slp.set_exception(KeyboardInterrupt)
m_sleep.return_value = slp
self.loop.run_until_complete(self.worker._run())
self.assertTrue(m_sleep.called)
self.assertTrue(self.worker.servers[0].close.called)
def test_close_wsgi_app(self):
self.worker.ppid = 1
self.worker.alive = False
self.worker.servers = [unittest.mock.Mock()]
self.worker.connections = {}
self.worker.sockets = []
self.worker.log = unittest.mock.Mock()
self.worker.loop = self.loop
self.worker.loop.create_server = unittest.mock.Mock()
self.worker.notify = unittest.mock.Mock()
self.worker.wsgi = unittest.mock.Mock()
self.worker.wsgi.close.return_value = asyncio.Future(loop=self.loop)
self.worker.wsgi.close.return_value.set_result(1)
self.loop.run_until_complete(self.worker._run())
self.assertTrue(self.worker.wsgi.close.called)
self.worker.wsgi = unittest.mock.Mock()
self.worker.wsgi.close.return_value = asyncio.Future(loop=self.loop)
self.worker.wsgi.close.return_value.set_exception(ValueError())
self.loop.run_until_complete(self.worker._run())
self.assertTrue(self.worker.wsgi.close.called)
def test_portmapper_worker(self):
wsgi = {1: object(), 2: object()}
class Worker(worker.PortMapperWorker):
def __init__(self, wsgi):
self.wsgi = wsgi
def factory(self, wsgi, host, port):
return wsgi
w = Worker(wsgi)
self.assertIs(
wsgi[1], w.get_factory(object(), '', 1)())
self.assertIs(
wsgi[2], w.get_factory(object(), '', 2)())
def test_portmapper_close_wsgi_app(self):
class Worker(worker.PortMapperWorker):
def __init__(self, wsgi):
self.wsgi = wsgi
wsgi = {1: unittest.mock.Mock(), 2: unittest.mock.Mock()}
wsgi[1].close.return_value = asyncio.Future(loop=self.loop)
wsgi[1].close.return_value.set_result(1)
wsgi[2].close.return_value = asyncio.Future(loop=self.loop)
wsgi[2].close.return_value.set_exception(ValueError())
w = Worker(wsgi)
w.ppid = 1
w.alive = False
w.servers = [unittest.mock.Mock()]
w.connections = {}
w.sockets = []
w.log = unittest.mock.Mock()
w.loop = self.loop
w.loop.create_server = unittest.mock.Mock()
w.notify = unittest.mock.Mock()
self.loop.run_until_complete(w._run())
self.assertTrue(wsgi[1].close.called)
self.assertTrue(wsgi[2].close.called)
def test_wrp(self):
conn = object()
tracking = {}
meth = unittest.mock.Mock()
wrp = worker._wrp(conn, meth, tracking)
wrp()
self.assertIn(id(conn), tracking)
self.assertTrue(meth.called)
meth = unittest.mock.Mock()
wrp = worker._wrp(conn, meth, tracking, False)
wrp()
self.assertNotIn(1, tracking)
self.assertTrue(meth.called)
| [
"fafhrd91@gmail.com"
] | fafhrd91@gmail.com |
370b32ada376d024a88c4b40396a2342fc4ef9a4 | 753cd066a9bd26b6c37c8d53a86c7a9c659ec18c | /tutorials/tutorials/pytorch/observing_tensors/tests/test_tuto_observing_tensors.py | b16dbc89be54070ee72bfcda5485b8c656938dc7 | [
"MIT"
] | permissive | graphcore/examples | ac872015808ed2a913d4d7bf0d63202ce15ebbae | e2f834dd60e7939672c1795b4ac62e89ad0bca49 | refs/heads/master | 2023-08-05T02:08:12.341836 | 2023-07-27T11:13:10 | 2023-07-27T11:13:10 | 143,977,106 | 311 | 80 | MIT | 2023-09-11T16:42:56 | 2018-08-08T07:29:17 | Python | UTF-8 | Python | false | false | 382 | py | # Copyright (c) 2021 Graphcore Ltd. All rights reserved.
from pathlib import Path
import pytest
from tutorials_tests import testing_util
working_path = Path(__file__).parent.parent
@pytest.mark.category2
@pytest.mark.ipus(1)
def test_run_default_ipu():
# Check default params
testing_util.run_command("python anchor_tensor_example.py", working_path, "Saved histogram")
| [
"adams@graphcore.ai"
] | adams@graphcore.ai |
726fb2896fcd06456771960f577867f6936d8cf2 | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/networkx/FPythonCode/networkx.generators.mycielski.py | ca5f521cea753b22fd26eed9dd5cca11a3694baf | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,448 | py | # Copyright (C) 2010-2018 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
"""Functions related to the Mycielski Operation and the Mycielskian family
of graphs.
"""
import networkx as nx
from networkx.utils import not_implemented_for
__all__ = ['mycielskian', 'mycielski_graph']
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def mycielskian(G, iterations=1):
r"""Returns the Mycielskian of a simple, undirected graph G
The Mycielskian of graph preserves a graph's triangle free
property while increasing the chromatic number by 1.
The Mycielski Operation on a graph, :math:`G=(V, E)`, constructs a new
graph with :math:`2|V| + 1` nodes and :math:`3|E| + |V|` edges.
The construction is as follows:
Let :math:`V = {0, ..., n-1}`. Construct another vertex set
:math:`U = {n, ..., 2n}` and a vertex, `w`.
Construct a new graph, `M`, with vertices :math:`U \bigcup V \bigcup w`.
For edges, :math:`(u, v) \in E` add edges :math:`(u, v), (u, v + n)`, and
:math:`(u + n, v)` to M. Finally, for all vertices :math:`u \in U`, add
edge :math:`(u, w)` to M.
The Mycielski Operation can be done multiple times by repeating the above
process iteratively.
More information can be found at https://en.wikipedia.org/wiki/Mycielskian
Parameters
----------
G : graph
A simple, undirected NetworkX graph
iterations : int
The number of iterations of the Mycielski operation to
preform on G. Defaults to 1. Must be a non-negative integer.
Returns
-------
M : graph
The Mycielskian of G after the specified number of iterations.
Notes
------
Graph, node, and edge data are not necessarily propagated to the new graph.
"""
n = G.number_of_nodes()
M = nx.convert_node_labels_to_integers(G)
for i in range(iterations):
n = M.number_of_nodes()
M.add_nodes_from(list(range(n, 2 * n)))
old_edges = list(M.edges())
M.add_edges_from((u, v + n) for u, v in old_edges)
M.add_edges_from((u + n, v) for u, v in old_edges)
M.add_node(2 * n)
M.add_edges_from((u + n, 2 * n) for u in range(n))
return M
def mycielski_graph(n):
"""Generator for the n_th Mycielski Graph.
The Mycielski family of graphs is an infinite set of graphs.
:math:`M_1` is the singleton graph, :math:`M_2` is two vertices with an
edge, and, for :math:`i > 2`, :math:`M_i` is the Mycielskian of
:math:`M_{i-1}`.
More information can be found at
http://mathworld.wolfram.com/MycielskiGraph.html
Parameters
----------
n : int
The desired Mycielski Graph.
Returns
-------
M : graph
The n_th Mycielski Graph
Notes
-----
The first graph in the Mycielski sequence is the singleton graph.
The Mycielskian of this graph is not the :math:`P_2` graph, but rather the
:math:`P_2` graph with an extra, isolated vertex. The second Mycielski
graph is the :math:`P_2` graph, so the first two are hard coded.
The remaining graphs are generated using the Mycielski operation.
"""
if n < 1:
raise nx.NetworkXError("must satisfy n >= 0")
if n == 1:
return nx.empty_graph(1)
else:
return mycielskian(nx.path_graph(2), n - 2)
| [
"nencho.georogiev@absa.africa"
] | nencho.georogiev@absa.africa |
08a0364c1114ec8fba63dc0d8b36330213911921 | f3b8d7de17a6ac23791bee66235b868ef1bf6515 | /ps4.py | 8d4d78d64f3a2a613a6a60bf696f2bd5d38a3607 | [
"Giftware"
] | permissive | juraj80/MIT-6.00-OCW-Problem-Sets | 1573babe7a829b8a24300122f21dba1212b4349e | 34129fb51fd13f67bfcfd97e5c2dd8c19fb9fcbf | refs/heads/master | 2021-01-02T08:17:55.270326 | 2017-08-17T14:34:20 | 2017-08-17T14:34:20 | 98,988,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,283 | py | # Problem Set 4
# Name:
# Collaborators:
# Time:
#
# Problem 1
#
def nestEggFixed(salary, save, growthRate, years):
"""
- salary: the amount of money you make each year.
- save: the percent of your salary to save in the investment account each
year (an integer between 0 and 100).
- growthRate: the annual percent increase in your investment account (an
integer between 0 and 100).
- years: the number of years to work.
- return: a list whose values are the size of your retirement account at
the end of each year.
"""
# TODO: Your code here.
result = []
endOfPeriod = salary * save * 0.01
for n in range (0,years):
result = result + [endOfPeriod]
endOfPeriod = endOfPeriod * (1 + 0.01 * growthRate)+ salary * save * 0.01
return result
def testNestEggFixed():
salary = 10000
save = 10
growthRate = 15
years = 5
savingsRecord = nestEggFixed(salary, save, growthRate, years)
print savingsRecord
# Output should have values close to:
# [1000.0, 2150.0, 3472.5, 4993.375, 6742.3812499999995]
# TODO: Add more test cases here.
#
# Problem 2
#
def nestEggVariable(salary, save, growthRates):
"""
- salary: the amount of money you make each year.
- save: the percent of your salary to save in the investment account each
year (an integer between 0 and 100).
- growthRate: a list of the annual percent increases in your investment
account (integers between 0 and 100).
- return: a list of your retirement account value at the end of each year.
"""
# TODO: Your code here.
result = []
endOfPeriod = salary * save * 0.01
result = result + [endOfPeriod]
for n in growthRates[1:]:
endOfPeriod = endOfPeriod * (1 + 0.01 * n)+ salary * save * 0.01
result = result + [endOfPeriod]
return result
def testNestEggVariable():
salary = 10000
save = 10
growthRates = [3, 4, 5, 0, 3]
savingsRecord = nestEggVariable(salary, save, growthRates)
print savingsRecord
# Output should have values close to:
# [1000.0, 2040.0, 3142.0, 4142.0, 5266.2600000000002]
# TODO: Add more test cases here.
#
# Problem 3
#
def postRetirement(savings, growthRates, expenses):
"""
- savings: the initial amount of money in your savings account.
- growthRate: a list of the annual percent increases in your investment
account (an integer between 0 and 100).
- expenses: the amount of money you plan to spend each year during
retirement.
- return: a list of your retirement account value at the end of each year.
"""
# TODO: Your code here.
result = []
endOfPeriod = savings * (1 + 0.01*growthRates[0]) - expenses
result = result + [endOfPeriod]
for n in growthRates[1:]:
endOfPeriod = endOfPeriod*(1 + 0.01 * n) - expenses
result = result + [endOfPeriod]
return result
def testPostRetirement():
savings = 100000
growthRates = [10, 5, 0, 5, 1]
expenses = 30000
savingsRecord = postRetirement(savings, growthRates, expenses)
print savingsRecord
# Output should have values close to:
# [80000.000000000015, 54000.000000000015, 24000.000000000015,
# -4799.9999999999854, -34847.999999999985]
# TODO: Add more test cases here.
#
# Problem 4
#
def findMaxExpenses(salary, save, preRetireGrowthRates, postRetireGrowthRates,
epsilon):
"""
- salary: the amount of money you make each year.
- save: the percent of your salary to save in the investment account each
year (an integer between 0 and 100).
- preRetireGrowthRates: a list of annual growth percentages on investments
while you are still working.
- postRetireGrowthRates: a list of annual growth percentages on investments
while you are retired.
- epsilon: an upper bound on the absolute value of the amount remaining in
the investment fund at the end of retirement.
"""
# TODO: Your code here.
savings = nestEggVariable(salary, save, preRetireGrowthRates)
savings = savings[-1]
print savings
low = 0
high = savings
guess = (low+high)/2.0
ctr = 1
remainingBalance = postRetirement(savings, postRetireGrowthRates, guess)
remainingBalance = remainingBalance[-1]
while abs(remainingBalance) > epsilon and ctr <= 10:
if remainingBalance < 0:
high = guess
else:
low = guess
ctr += 1
guess = (low+high)/2.0
remainingBalance = postRetirement(savings, postRetireGrowthRates, guess)
remainingBalance = remainingBalance[-1]
assert ctr <=10, 'Iteration count exceeded'
return guess
def testFindMaxExpenses():
salary = 10000
save = 10
preRetireGrowthRates = [3, 4, 5, 0, 3]
postRetireGrowthRates = [10, 5, 0, 5, 1]
epsilon = .01
expenses = findMaxExpenses(salary, save, preRetireGrowthRates,
postRetireGrowthRates, epsilon)
print expenses
# Output should have a value close to:
# 1229.95548986
# TODO: Add more test cases here.
| [
"you@example.com"
] | you@example.com |
fcadca27cc3e00dcbbbe0336d8ab72ac9d5b48ec | b26e1704b963881e7681712c923787772ac463ec | /Intro-To-Python-Edu/lesson7/task1/functions.py | 8e6f20412678e411c18e3179170d2bc4a60463da | [] | no_license | lakshmikantdeshpande/Python-Courses | 64f8a397b727042f2662fa7597ea0e73491717f3 | d15364b42c182d3487532853bde37deb48865494 | refs/heads/master | 2021-09-02T06:51:19.490674 | 2017-12-31T06:55:53 | 2017-12-31T06:55:53 | 94,890,325 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | def hello_world(): # function named my_function
print("Hello, World!")
for i in range(5):
hello_world() # call function defined above 5 times
print('I want to be a function')
print('I want to be a function')
print('I want to be a function')
def fun():
for i in range(3):
print('I want to be a function')
for i in range(3):
fun()
| [
"lakshmikantdeshpande@gmail.com"
] | lakshmikantdeshpande@gmail.com |
1ef3b2c006b31f50836361fc3da674b694f32d52 | ec959e13aaaa5ea8f597debaf34bba18e084c915 | /deep_learning_adv_detector/evaluation/shots_evaluation.py | 2723ebf221f38ac2615121ce5e7b3527bfc2ae87 | [] | no_license | machanic/MetaAdvDet | ecf2a116a29864778a00ffb674dddaa87456b2d9 | e90fbdb3520c1b4000aff9e62ace1bdafad72082 | refs/heads/master | 2023-05-09T21:55:56.855095 | 2021-06-07T08:53:09 | 2021-06-07T08:53:09 | 200,833,044 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,114 | py | import os
import re
from collections import defaultdict
from torch.utils.data import DataLoader
import torch
from config import PY_ROOT, IN_CHANNELS, IMAGE_SIZE
from evaluation_toolkit.evaluation import finetune_eval_task_accuracy
from networks.conv3 import Conv3
from dataset.meta_task_dataset import MetaTaskDataset
from dataset.protocol_enum import SPLIT_DATA_PROTOCOL, LOAD_TASK_MODE
from networks.resnet import resnet10, resnet18
def evaluate_shots(model_path_list, num_update,lr, protocol):
# deep learning训练是在all_in或者sampled all in下训练的,但是测试需要在task版本的dataset上做
extract_pattern_detail = re.compile(".*?DL_DET@(.*?)_(TRAIN_.*?)@model_(.*?)@data_(.*?)@epoch_(\d+)@class_(\d+)@lr_(.*?)@balance_(.*?)\.pth\.tar")
tot_num_tasks = 20000
way = 2
query = 15
result = defaultdict(dict)
assert protocol == SPLIT_DATA_PROTOCOL.TRAIN_I_TEST_II, "protocol {} is not TRAIN_I_TEST_II!".format(protocol)
for model_path in model_path_list:
ma = extract_pattern_detail.match(model_path)
dataset = ma.group(1)
if dataset == "ImageNet":
continue
file_protocol = ma.group(2)
if str(protocol) != file_protocol:
continue
balance = ma.group(8)
if balance == "True":
balance = "balance"
else:
balance = "no_balance"
print("evaluate_accuracy model :{}".format(os.path.basename(model_path)))
arch = ma.group(3)
adv_arch = ma.group(4)
if arch == "conv3":
model = Conv3(IN_CHANNELS[dataset], IMAGE_SIZE[dataset], 2)
elif arch == "resnet10":
model = resnet10(2, in_channels=IN_CHANNELS[dataset], pretrained=False)
elif arch == "resnet18":
model = resnet18(2, in_channels=IN_CHANNELS[dataset], pretrained=False)
model = model.cuda()
checkpoint = torch.load(model_path, map_location=lambda storage, location: storage)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(model_path, checkpoint['epoch']))
old_num_update = num_update
# for shot in range(16):
for shot in [0,1,5]:
if shot == 0:
shot = 1
num_update = 0
else:
num_update = old_num_update
meta_task_dataset = MetaTaskDataset(tot_num_tasks, way, shot, query,
dataset, is_train=False,
load_mode=LOAD_TASK_MODE.NO_LOAD,
protocol=protocol, no_random_way=True,adv_arch=adv_arch)
data_loader = DataLoader(meta_task_dataset, batch_size=100, shuffle=False, pin_memory=True)
evaluate_result = finetune_eval_task_accuracy(model, data_loader, lr, num_update,update_BN=False)
if num_update == 0:
shot = 0
result["{}@{}@{}".format(dataset, balance, adv_arch)][shot] = evaluate_result
return result | [
"sharpstill@163.com"
] | sharpstill@163.com |
723816392ed8356c2e70d9f3d4edd91218d0a165 | 7e8c799037f47345cb12a6fc7911610d7ac63640 | /blog/abstract_models.py | 7f69dfecd99b70a204bd19c1714589ddd861d86a | [] | no_license | SonerArslan2019/Django_Blog_kilicarslan | bcfd953dfc0d530217c02ff9bf1428160a63e68e | 8959708689b830c387c76414545add2474beeddd | refs/heads/master | 2023-06-04T06:19:28.209246 | 2021-06-27T14:50:40 | 2021-06-27T14:50:40 | 346,781,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | from django.db import models
class DateAbstractModel(models.Model):
olusturulma_tarihi = models.DateTimeField(auto_now_add=True)
duzenlenme_tarihi = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
| [
"soner@arslanyapi.com.tr"
] | soner@arslanyapi.com.tr |
531b82cce7d91539b4058862d81f35c95a923676 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/134/usersdata/260/47351/submittedfiles/escadarolante.py | ccf9c82c733d4eeb20eacce5e0d0ccbcad99b4e1 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | # -*- coding: utf-8 -*-
N=int(input("determine o número de pessoas na escada rolante:"))
s=0
B=N//2
m=int(input("determine o instante em que o cliente anterior sobe a escada:"))
n=int(input("determine o instante em que o cliente posterior sobe a escada:"))
if m+10-n >=0:
s=n-m+s
else:
s=s+10
for i in range (1,B-2,1):
n=int(input("determine o instante em que o cliente posterior sobe a escada:"))
if m+10-n >=0:
s=n-m+s
else:
s=s+10
m=n
print(s)
s=s+10
print(s) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
09252f08dae5f109c78667189940c3a139a3277c | d52ea2ddba48e3c9e69f16e2e671002cb8a55c9d | /example/market/req_trade_detail.py | cc67fe98c965ef27eef98344cb200dfe5aa11e9d | [
"Apache-2.0"
] | permissive | GreatWei/huobi_Python | 3bf8583d5ef042099d9f2a2a2cedfaba1b5f03bb | 7ed1cccedd2aadc61716292300bd5ba3d1d45f9f | refs/heads/master | 2023-05-01T00:39:10.497133 | 2021-05-07T16:20:32 | 2021-05-07T16:20:32 | 312,221,461 | 0 | 0 | Apache-2.0 | 2021-03-07T16:30:43 | 2020-11-12T09:03:01 | Python | UTF-8 | Python | false | false | 265 | py | from huobi.client.market import MarketClient
def callback(trade_req: 'TradeDetailReq'):
print("---- trade_event: ----")
trade_req.print_object()
print()
market_client = MarketClient()
market_client.req_trade_detail("btcusdt,eosusdt", callback)
| [
"devin0thinking@gmail.com"
] | devin0thinking@gmail.com |
1cf29dedec32bd7bf7bb668390038595047b2372 | d1797d0e70af8764b79b127fbc2015b5d030d87d | /examples/ph2co_grid_computation.py | 2b5493bdff4fc82e745b29e3cc342bd574d20f12 | [
"BSD-3-Clause"
] | permissive | keflavich/pyradex | 2cd4aa602f1005780402b5810078d76d37f95eff | c36c4652e220113cf3d0d9a41fda3ddf4ea3c11a | refs/heads/master | 2023-04-01T19:57:09.571559 | 2023-03-31T13:59:21 | 2023-03-31T13:59:21 | 11,107,526 | 14 | 13 | BSD-3-Clause | 2020-08-06T17:07:01 | 2013-07-01T21:23:04 | Python | UTF-8 | Python | false | false | 1,786 | py | """
Create some simple grids for the low-frequency para-H2CO lines
"""
import pyradex
import numpy as np
ntemp,ndens = 20,20
temperatures = np.linspace(10,50,ntemp)
densities = np.logspace(2.5,7,ndens)
abundance = 10**-8.5
opr = 0.01 # assume primarily para
fortho = opr/(1+opr)
taugrid_71M = np.empty([ndens,ntemp])
texgrid_71M = np.empty([ndens,ntemp])
fluxgrid_71M = np.empty([ndens,ntemp])
taugrid_145 = np.empty([ndens,ntemp])
texgrid_145 = np.empty([ndens,ntemp])
fluxgrid_145 = np.empty([ndens,ntemp])
taugrid_355M = np.empty([ndens,ntemp])
texgrid_355M = np.empty([ndens,ntemp])
fluxgrid_355M = np.empty([ndens,ntemp])
columngrid = np.empty([ndens,ntemp])
import os
if not os.path.exists('ph2co-h2.dat'):
import urllib
urllib.urlretrieve('http://home.strw.leidenuniv.nl/~moldata/datafiles/ph2co-h2.dat')
R = pyradex.Radex(species='ph2co-h2', abundance=abundance)
R.run_radex()
# get the table so we can look at the frequency grid
table = R.get_table()
# Target frequencies:
table[np.array([6,1,11])].pprint()
for ii,tt in enumerate(temperatures):
R.temperature = tt
for jj,dd in enumerate(densities):
R.density = {'oH2':dd*fortho,'pH2':dd*(1-fortho)}
R.abundance = abundance # reset column to the appropriate value
R.run_radex(reuse_last=False, reload_molfile=True)
TI = R.source_line_surfbrightness
taugrid_71M[jj,ii] = R.tau[6]
texgrid_71M[jj,ii] = R.tex[6].value
fluxgrid_71M[jj,ii] = TI[6].value
taugrid_145[jj,ii] = R.tau[1]
texgrid_145[jj,ii] = R.tex[1].value
fluxgrid_145[jj,ii] = TI[1].value
taugrid_355M[jj,ii] = R.tau[11]
texgrid_355M[jj,ii] = R.tex[11].value
fluxgrid_355M[jj,ii] = TI[11].value
columngrid[jj,ii] = R.column.value
| [
"keflavich@gmail.com"
] | keflavich@gmail.com |
c906c75846627e1bf93c97043c29fc3a43d60e2d | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4051/274004051.py | 623ea6ae45121ae2e1165f90702a331ebbe5128c | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 2,185 | py | from bots.botsconfig import *
from records004051 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'PW',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BHT', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 1},
{ID: 'HL', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'TRN', MIN: 0, MAX: 1},
{ID: 'NM1', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 99999},
{ID: 'PER', MIN: 0, MAX: 99999},
{ID: 'DMG', MIN: 0, MAX: 1},
{ID: 'AMT', MIN: 0, MAX: 20},
{ID: 'API', MIN: 0, MAX: 99999},
{ID: 'DEG', MIN: 0, MAX: 9},
{ID: 'IND', MIN: 0, MAX: 1},
{ID: 'LUI', MIN: 0, MAX: 9},
{ID: 'DTP', MIN: 0, MAX: 9},
{ID: 'MTX', MIN: 0, MAX: 99999},
{ID: 'QTY', MIN: 0, MAX: 99},
{ID: 'WS', MIN: 0, MAX: 99},
{ID: 'CRC', MIN: 0, MAX: 9},
{ID: 'HSD', MIN: 0, MAX: 99},
{ID: 'BCI', MIN: 0, MAX: 9},
{ID: 'PDI', MIN: 0, MAX: 1},
{ID: 'HAD', MIN: 0, MAX: 1},
{ID: 'NX1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 1},
]},
{ID: 'LQ', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N1', MIN: 0, MAX: 2},
{ID: 'TPB', MIN: 0, MAX: 99999},
{ID: 'DTP', MIN: 0, MAX: 9},
{ID: 'QTY', MIN: 0, MAX: 1},
{ID: 'YNQ', MIN: 0, MAX: 99999},
]},
{ID: 'HPL', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTP', MIN: 0, MAX: 99999},
]},
{ID: 'REF', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'DTP', MIN: 0, MAX: 9},
]},
{ID: 'EMS', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTP', MIN: 0, MAX: 9},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
7d335117801f689272938f9062c3dcf61cb44c86 | f5f771cd8600c2aeb7fc9b192d9084ec5fdf3616 | /lux/extensions/static/ui.py | 01908025b17802dafe3a932032a58b812ad359d0 | [
"BSD-3-Clause"
] | permissive | SirZazu/lux | 75fe9fde4ddaee1c9c17e55c6e6d07a289ea2f5b | d647c34d11d1172d40e16b6afaba4ee67950fb5a | refs/heads/master | 2021-01-21T19:40:46.536485 | 2015-06-02T16:30:18 | 2015-06-02T16:30:18 | 36,931,033 | 0 | 3 | null | 2015-10-09T14:08:26 | 2015-06-05T12:15:21 | Python | UTF-8 | Python | false | false | 1,575 | py | from lux.extensions.ui import *
def add_css(all):
css = all.css
vars = all.variables
vars.list_image_width = 140
vars.list_image_xs_height = 150
css('.media-list > .media',
css(' > a',
css(' .post-body',
margin_left=px(vars.list_image_width+20)),
css(' h3',
font_weight='normal',
color=vars.colors.gray_dark),
text_decoration='none',
color=vars.colors.gray))
css('.post-image',
width=px(vars.list_image_width),
max_height=px(vars.list_image_width),
float='left',
height='auto')
css('.post-image-xs',
max_height=px(vars.list_image_xs_height),
max_width=pc(90),
width='auto')
sphinx(all)
def sphinx(all):
css = all.css
vars = all.variables
vars.headerlink.color = vars.colors.gray_lighter
vars.headerlink.color_hover = vars.colors.gray_light
for n in range(1, 7):
css('h%d:hover > a.headerlink' % n,
visibility='visible')
css('dt:hover > a.headerlink', visibility='visible')
css('a.headerlink',
css(':hover',
color=vars.headerlink.color_hover),
padding=spacing(0, 4),
color=vars.headerlink.color,
text_decoration='none',
visibility='hidden')
css('.viewcode-link',
float='right')
css('div.viewcode-block:target',
margin=spacing(-1, -10),
padding=spacing(0, 10))
css('table.docutils.field-list th',
padding=spacing(0, 20, 0, 0))
| [
"luca.sbardella@gmail.com"
] | luca.sbardella@gmail.com |
f2464ecccf83e8fc7eb4b22c4f67dc16950a2098 | 5a5447267f9ba97bb00e26569892a8a62ce0d370 | /code/src/impex/auth/models.py | 0796ca80a7fea4ec9cafc3f6a497e043a3f600df | [] | no_license | socek/impex | d031a74a33f42344b760109fbd2dd5bec2213574 | 07bdabef51719850ecc9063cefc3e3607bfdcb94 | refs/heads/master | 2022-12-25T04:46:42.172644 | 2021-03-25T21:58:49 | 2021-03-25T21:58:49 | 37,607,725 | 0 | 0 | null | 2022-09-16T17:45:09 | 2015-06-17T16:44:39 | Python | UTF-8 | Python | false | false | 442 | py | from sqlalchemy import Boolean
from sqlalchemy import Column
from implugin.auth.models import BaseUser as VeryBaseUser
from implugin.auth.models import NotLoggedUser as BaseNotLoggedUser
from impex.application.models import Base
class BaseUser(VeryBaseUser):
is_admin = Column(Boolean(), default=False)
class NotLoggedUser(BaseUser, BaseNotLoggedUser):
is_admin = False
class User(BaseUser, Base):
__tablename__ = 'users'
| [
"msocek@gmail.com"
] | msocek@gmail.com |
53c00f598a2013b61c191015c3f29c6cb3367939 | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/pandas/tests/series/methods/test_item.py | 2bdeb4da5f70fafc4295beca62f7dd354ba78ec0 | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 1,622 | py | """
Series.item method, mainly testing that we get python scalars as opposed to
numpy scalars.
"""
import pytest
from pandas import (
Series,
Timedelta,
Timestamp,
date_range,
)
class TestItem:
def test_item(self):
# We are testing that we get python scalars as opposed to numpy scalars
ser = Series([1])
result = ser.item()
assert result == 1
assert result == ser.iloc[0]
assert isinstance(result, int) # i.e. not np.int64
ser = Series([0.5], index=[3])
result = ser.item()
assert isinstance(result, float)
assert result == 0.5
ser = Series([1, 2])
msg = "can only convert an array of size 1"
with pytest.raises(ValueError, match=msg):
ser.item()
dti = date_range("2016-01-01", periods=2)
with pytest.raises(ValueError, match=msg):
dti.item()
with pytest.raises(ValueError, match=msg):
Series(dti).item()
val = dti[:1].item()
assert isinstance(val, Timestamp)
val = Series(dti)[:1].item()
assert isinstance(val, Timestamp)
tdi = dti - dti
with pytest.raises(ValueError, match=msg):
tdi.item()
with pytest.raises(ValueError, match=msg):
Series(tdi).item()
val = tdi[:1].item()
assert isinstance(val, Timedelta)
val = Series(tdi)[:1].item()
assert isinstance(val, Timedelta)
# Case where ser[0] would not work
ser = Series(dti, index=[5, 6])
val = ser[:1].item()
assert val == dti[0]
| [
"davidycliao@gmail.com"
] | davidycliao@gmail.com |
71c00e850b4858890c697ad07312cff8e75b0e7d | 3d73017c944583b77be033c73f91a273b7925df3 | /src/models/networks/NeuralNetwork.py | fd175dfbb5e04973b2c2bf3650f4069913c45dd0 | [
"MIT"
] | permissive | FelixKleineBoesing/machineLearningModels | bd7b399cb3f5353292a3fa2f7a4bc62c25c91300 | 6c0530c32a03cf52fad67c13be108dd8d2283cb9 | refs/heads/master | 2020-05-23T22:18:21.185347 | 2019-06-23T19:46:53 | 2019-06-23T19:46:53 | 186,971,086 | 0 | 0 | null | 2019-06-12T06:48:12 | 2019-05-16T07:03:04 | Python | UTF-8 | Python | false | false | 5,760 | py | import numpy as np
import pandas as pd
from typing import Union, Tuple
from src.models.Model import Model
from src.models.networks.NetworkHelper import return_activation_functions
from src.models.networks.ActivationFunctions import ActivationFunction
from src.cost_functions.Cost import Cost
class NeuralNetwork(Model):
"""
Simple neural network implementation with x dense layers, n units and some possile activationsfunctions
"""
def __init__(self, cost_function: Cost, input_shape: Tuple, neurons: list = [4, 8, 1], params: dict = None,
activation_functions: list = ["relu", "relu", "linear"], epochs: int = 10, learning_rate: float = 0.1,
verbose: bool = False):
assert isinstance(neurons, list)
assert isinstance(activation_functions, list)
assert isinstance(input_shape, Tuple)
assert isinstance(params, dict)
assert isinstance(verbose, bool)
assert len(neurons) > 0, "Number of layers must be larger than zero"
assert len(neurons) == len(activation_functions), "Number of activation functions must be equal to length " \
"neurons list"
assert all(isinstance(unit, int) for unit in neurons), "all number neurons must be of type integer"
assert all([func in ["relu", "sigmoid", "tanh", "linear", "softmax"] or isinstance(func, ActivationFunction)
for func in activation_functions]), "Other activation functions than ReLu, sigmoid, tanh, linear " \
"or softmax are currently not supported. Supply the function " \
"itself, if you want to use another function. This function " \
"must be applicable vectorized."
self.epochs = epochs
self.learning_rate = learning_rate
self.params = params
self.input_shape = input_shape
self.activation_functions = return_activation_functions(activation_functions)
self.neurons = neurons
self.cost_function = cost_function
self.verbose = verbose
self.network = self._init_variables()
super().__init__()
def train(self, train_data: Union[pd.DataFrame, np.ndarray], train_label: Union[pd.DataFrame, np.ndarray],
val_data: Union[pd.DataFrame, np.ndarray], val_label: Union[pd.DataFrame, np.ndarray]):
assert (val_label is not None and val_label is not None) or (val_label is None and val_data is None)
stopped = False
epoch = 1
while not stopped and epoch < self.epochs:
y_hat = self._forward_pass(train_data)
if val_data is not None:
y_hat_val = self.predict(val_data)
val_costs = self.cost_function.compute(y_hat_val, val_label, aggregation=True)
costs = self.cost_function.compute(y_hat, train_label, aggregation=True)
self._backward_pass(y_hat, train_label, train_data)
if self.verbose:
if val_data is not None:
print("Epoch {}, train loss: {}, val_loss: {}".format(epoch, costs, val_costs))
else:
print("Epoch {}, train loss: {}".format(epoch, costs))
epoch += 1
def _forward_pass(self, data: np.ndarray):
arr = data
for key, layer in self.network.items():
activation = arr @ layer["weights"] + layer["bias"]
arr = layer["activ"].compute(activation.copy())
layer["activation"] = activation
return arr
def _backward_pass(self, y_hat: np.ndarray, y: np.ndarray, X: np.ndarray):
weight_updates = {}
past_layer = None
for index, key in enumerate(reversed(list(self.network.keys()))):
layer = self.network[key]
if index > 0:
weight_updates[past_layer_key] = self._clipp_gradients(layer["activation"].T @ activ_gradient)
if index == 0:
cost_gradient = self.cost_function.first_order_gradient(y_hat, y).T
else:
cost_gradient = (activ_gradient @ past_layer["weights"].T)
activ_gradient = cost_gradient * layer["activ"].first_order_gradient(layer["activation"])
activ_gradient = _reshape_activ_gradient(activ_gradient)
past_layer = layer
past_layer_key = key
weight_updates[key] = X.T @ activ_gradient
for key, wu in weight_updates.items():
self.network[key]["weights"] = self.network[key]["weights"] - self.learning_rate * wu
def predict(self, test_data: Union[pd.DataFrame, np.ndarray]):
arr = test_data
for key, layer in self.network.items():
activation = arr @ layer["weights"] + layer["bias"]
arr = layer["activ"].compute(activation)
return arr
def _init_variables(self):
variables = {}
last_neuron = self.input_shape[0]
for index, neuron in enumerate(self.neurons):
variables[index] = {
"weights": np.random.normal(size=(last_neuron, neuron)),
"bias": np.random.normal(size=(1, neuron)),
"activ": self.activation_functions[index]
}
last_neuron = neuron
return variables
def _clipp_gradients(self, arr):
arr[arr > 100] = 100
arr[arr < -100] = -100
return arr
def _reshape_activ_gradient(grad: np.ndarray):
if len(np.array(grad).shape) == 0:
grad = np.array([[grad], ])
if len(grad.shape) == 1:
grad = grad.reshape(grad.shape[0], 1)
return grad | [
"felix.boesing@t-online.de"
] | felix.boesing@t-online.de |
5f467abce0548100c93befdc5121dca66f697ca7 | cbb525bb66aed3d3c0364fbd8c142712c515b7b9 | /tests/test_graphics_factory.py | cebb460f308237283babfcd9e86dc30eb921f3ec | [
"MIT"
] | permissive | niki-sp/ezdxf | 5d96631cf653807b70536ce733cea7e65394acac | a6add53d3b283e5f654dcd1574417d47f8501d44 | refs/heads/master | 2021-01-23T03:13:46.079629 | 2017-03-09T04:58:27 | 2017-03-09T04:58:27 | 86,061,096 | 1 | 0 | null | 2017-03-24T11:29:44 | 2017-03-24T11:29:44 | null | UTF-8 | Python | false | false | 542 | py | import pytest
from ezdxf.graphicsfactory import copy_attribs
def test_None():
result = copy_attribs(None)
assert result == {}
def test_empty_dict():
result = copy_attribs({})
assert result == {}
def test_none_empty_dict():
dxfattribs = {'height': 1.0, 'width': 0.8}
result = copy_attribs(dxfattribs)
assert result == {'height': 1.0, 'width': 0.8}
# do not change original attribs
result['height'] = 2.0
assert dxfattribs['height'] == 1.0
if __name__ == '__main__':
pytest.main([__file__])
| [
"mozman@gmx.at"
] | mozman@gmx.at |
afd00898603c955f937bb5eace460362205ace42 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc044/B/1693063.py | 61811238d4496d73136309db90fdc8905dc0f6b6 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | def main():
R = 10**9+7
N = int(input())
a = list(map(int, input().split(" ")))
if a[0] != 0:
return 0
amax = max(a)
h = [0] * (amax+1)
for i in a:
h[i] += 1
if h[0] != 1:
return 0
ans = 1
b = 1
for i in h[1:]:
if i == 0:
return 0
ans *= pow(2, i * (i - 1) // 2, R)
ans %= R
ans *= pow(pow(2, b, R) - 1, i, R)
ans %= R
b = i
return ans
if __name__ == '__main__':
print(main()) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
615a822697c7f58c1b75af0495d0b5b28e8a1c76 | 6d1bf00636259c1a65842a8dd49ea2037218cc8d | /Login/cQube_login.py | 01bac2bf4b00079f8257901297531389d51191b5 | [] | no_license | chetandg123/Release_1.2 | efb4b0844b57638d23ac09783e0cd751893058ad | f9ff8add5930c7779ab1954c779f8f0e8cd1e908 | refs/heads/master | 2022-12-05T21:12:14.671813 | 2020-08-20T21:33:50 | 2020-08-20T21:33:50 | 288,701,130 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,770 | py | import time
import unittest
from Login.Invalid_credentials_check import login_test
from Login.Login_and_logout import test_logout
from Login.check_login_with_empty_inputs import login_without_inputs
from Login.login_test_with_invalid_email import login_test_for_credentials
from Login.login_to_cQube import Login_to_cQube
from Login.login_to_cqube import Login_to_cqube
from Login.login_with_invalid_credentials import login_test_invalidvalues
from Login.login_with_invalid_password import login_test_with_invalid_password
from Login.login_with_invalid_user import login_test_with_invalid_user
from Login.login_with_valid_user_and_empty_password import login_with_no_passwd
from Login.login_wrong_credentials import login_with_wrong_values
from reuse_func import GetData
class cQube_Login_Test(unittest.TestCase):
@classmethod
def setUpClass(self):
self.data = GetData()
self.driver = self.data.get_driver()
self.data.open_cqube_appln(self.driver)
self.data.page_loading(self.driver)
def test_login_without_credentails(self):
b =login_without_inputs(self.driver)
res =b.test_loginbtn()
self.assertEqual(res, "This field is required", msg="Failed")
print("login failed for without_credentails ")
def test_login_to_cqube(self):
b = Login_to_cqube(self.driver)
res = b.test_login()
self.assertEqual("cQube",self.driver.title,msg="login is not working")
print("login to cQube application ")
def test_invalid_inputs(self):
b = login_test(self.driver)
res = b.test_login()
self.assertEqual(res,"Enter atleast 4 characters" , msg="Failed")
print("login failed for invalid inputs ")
def test_login_and_logout(self):
b =test_logout(self.driver)
res = b.test_logoutbtn()
print("login and logout is working")
def test_login_not_valids(self):
b = login_test(self.driver)
res = b.test_login()
self.assertEqual(res,"Enter atleast 4 characters" , msg="Failed")
print("login failed for not_valids password ")
def test_credentials(self):
b =login_test_for_credentials(self.driver)
res =b.test_credentials()
self.assertEqual(res,"Invalid email address" , msg="Failed")
print("login failed for invalid email address")
def test_cqube_home(self):
b =Login_to_cQube(self.driver)
res =b.test_home()
print("login to home of cQube application ")
def test_invalids(self):
b =login_test_invalidvalues(self.driver)
res =b.test_login()
self.assertEqual(res,"User not found" , msg="Failed")
print("login failed for un user credentinals")
def test_invalidpassword(self):
b =login_test_with_invalid_password(self.driver)
res =b.test_invalidpwd()
self.assertEqual(res,"Enter atleast 4 characters" , msg="Failed")
print("login failed for invalid password")
def test_invalidpassuser(self):
b =login_test_with_invalid_user(self.driver)
res =b.test_invaliduser()
self.assertEqual(res,"User not found" , msg="Failed")
print("login failed for invalid user and passwords")
def test_nopasswd(self):
b = login_with_no_passwd(self.driver)
res = b.test_nopwd()
self.assertEqual(res,"This field is required" , msg="Failed")
print("login with no password ")
def test_wrong_values(self):
b = login_with_wrong_values(self.driver)
res = b.test_wrongvalues()
self.assertEqual(res,"Enter atleast 4 characters" , msg="Failed")
print("login failed for wrong values ")
@classmethod
def tearDownClass(cls):
cls.driver.close() | [
"ubuntu@ip-172-31-31-236.ap-south-1.compute.internal"
] | ubuntu@ip-172-31-31-236.ap-south-1.compute.internal |
d0ad990a6f25fe1b464a017c90b01b96e38dccd9 | 6318babade9f3b3bc21929382f384c000d46e4c7 | /alarm/websocket/ws.py | fcf00d6a5b3aae569059e773f4681fe97c32afa2 | [] | no_license | dmitriyVasilievich1986/python-alarm-api | 30a606bd2a9ecf1f1c2cc819754effc141c482cf | 77e61f591ab2b0cef81227587fcc7dd006203ffb | refs/heads/master | 2022-12-14T23:58:16.376833 | 2020-08-26T06:04:11 | 2020-08-26T06:04:11 | 290,320,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | from ..database.db_class import mysql_database
from ..variables.websocket import PORT, HOST
import websockets
import asyncio
from datetime import datetime
import json
async def index(websocket, path):
"""проверяем будильники и при срабатывании отправляем сообщение,
удаляем будильник из БД
"""
for x in mysql_database.get_all_alarms():
if datetime.now() >= datetime.strptime(x["time"], "%Y-%m-%d %H:%M"):
await websocket.send(json.dumps(x))
mysql_database.delete_alarm(x["id"])
start_server = websockets.serve(index, HOST, PORT)
| [
"dmitriyvasil@gmail.com"
] | dmitriyvasil@gmail.com |
6456b0581b91cd10df62d7134c6f3d032055c73c | ec480bc450df50ecf9fa41caf22b9a60e6722825 | /cowsay_app/admin.py | 9075709bd541dcc89b475e8941c076acbe42dbbb | [] | no_license | pbuzzo/cowsay_application | 4966905c15ddfbd594bff6e8c3c6cceab45040c0 | b06a6f5340f65e5f202eb253a7bd3770bfc95df3 | refs/heads/master | 2022-07-16T14:08:21.444695 | 2020-05-13T01:13:20 | 2020-05-13T01:13:20 | 263,062,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from django.contrib import admin
from cowsay_app.models import CowsayInput
admin.site.register(CowsayInput)
# admin.site.register(AnotherModel)
| [
"pbuzzo04@gmail.com"
] | pbuzzo04@gmail.com |
ec2a832a14e47bf74be0a2f1c24993dc5bb1331d | 4420a8b4fdc38eb9f260e604ad53955b27805431 | /test/unit/agent/common/config/abstract.py | fcc5ce13e7db19b86dcbddb5fee08995648425bf | [
"BSD-2-Clause"
] | permissive | askagirl/nginx-amplify-agent | c71bf6f67366391ed7a54f3e72a1ca662adfd5ea | 770938727c78e30db46d9d3dd1be31d1c812016e | refs/heads/master | 2021-01-23T19:17:31.442068 | 2017-09-06T01:58:49 | 2017-09-06T01:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | # -*- coding: utf-8 -*-
import os
from hamcrest import *
from test.base import WithConfigTestCase, disabled_test
from test.unit.agent.common.config.app import TestingConfig
__author__ = "Mike Belov"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__credits__ = ["Mike Belov", "Andrei Belov", "Ivan Poluyanov", "Oleg Mamontov", "Andrew Alexeev"]
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "dedm@nginx.com"
class AbstractConfigTestCase(WithConfigTestCase):
@disabled_test # disabled because it keeps altering our etc/agent.conf.testing file
def test_change_var_and_save(self):
"""
Test that configreader saves new config if it doesn't exist
"""
self.mk_test_config()
assert_that(os.path.exists(TestingConfig.filename), equal_to(True))
conf = TestingConfig()
conf.save('credentials', 'uuid', '123')
assert_that(conf['credentials']['uuid'], equal_to('123'))
for line in file(TestingConfig.filename).readlines():
if 'uuid' in line:
assert_that(line, contains_string('123'))
| [
"dedm@nginx.com"
] | dedm@nginx.com |
9929ad90ae8d3613954f4de305476d723ccbd40d | e8c9b8d0e1dbf44e9993b7c222b9439542bfaee5 | /3.集体智慧编程PCI_Code/mycode/chapter3/generatefeedvector.py | 407e560023e6b77939edc7373dcf0d6e24f71bbf | [] | no_license | ccgcyber/machine_learning | db8c0a8a3e5403ee04c54143aa6602d501ab3d0a | 6f0a3ab1bc64d7a1918fab11a930cd552ce4e79c | refs/heads/master | 2020-06-21T23:40:07.729640 | 2017-07-19T11:58:25 | 2017-07-19T11:58:25 | 94,207,382 | 0 | 0 | null | 2017-06-13T11:50:22 | 2017-06-13T11:50:22 | null | UTF-8 | Python | false | false | 1,539 | py | import feedparser
import re
# Returns title and dictionary of word counts for an RSS feed
def getwordcounts(url):
# Parse the feed
d=feedparser.parse(url)
wc={}
# Loop over all the entries
for e in d.entries:
if 'summary' in e: summary=e.summary
else: summary=e.description
# Extract a list of words
words=getwords(e.title+' '+summary)
for word in words:
wc.setdefault(word,0)
wc[word]+=1
return d.feed.title,wc
def getwords(html):
# Remove all the HTML tags
txt=re.compile(r'<[^>]+>').sub('',html)
# Split words by all non-alpha characters
words=re.compile(r'[^A-Z^a-z]+').split(txt)
# Convert to lowercase
return [word.lower() for word in words if word!='']
apcount={}
wordcounts={}
feedlist=[line for line in file('feedlist.txt')]
for feedurl in feedlist:
try:
title,wc=getwordcounts(feedurl)
wordcounts[title]=wc
for word,count in wc.items():
apcount.setdefault(word,0)
if count>1:
apcount[word]+=1
except:
print 'Failed to parse feed %s' % feedurl
wordlist=[]
for w,bc in apcount.items():
frac=float(bc)/len(feedlist)
if frac>0.1 and frac<0.5:
wordlist.append(w)
out=file('blogdata2.txt','w')
out.write('Blog')
for word in wordlist: out.write('\t%s' % word)
out.write('\n')
for blog,wc in wordcounts.items():
print blog
out.write(blog)
for word in wordlist:
if word in wc: out.write('\t%d' % wc[word])
else: out.write('\t0')
out.write('\n')
| [
"543429245@qq.com"
] | 543429245@qq.com |
06ee7b231ae7f32bb1845a83cc5957b08cf2eb19 | 2b7de92334e618990b632842481f6d3089542000 | /handlers/callback.py | c8fb75233c1afca6e8a7195d294f36b92b58e376 | [] | no_license | Solomons2002/chat_helper | 2dc23c87c202e576abcc5ebc2b1a70902edc9614 | 434d4099a7ec84f2fc29952b3d5d4b4eaee3beff | refs/heads/master | 2023-02-17T04:57:31.803057 | 2021-01-17T17:44:06 | 2021-01-17T17:44:06 | 330,449,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,197 | py | from aiogram import types
from mics import dp , bot
from models import Database
import text
import markup
from re import compile
from aiogram.dispatcher.filters.builtin import CommandStart
import datetime
import config
from states import AddKey,UpdateText
db = Database()
@dp.callback_query_handler(lambda call : call.data[:8] == 'add_key_', state = AddKey.state)
async def message(query , state):
await bot.delete_message(
chat_id = query.message.chat.id,
message_id = query.message.message_id
)
await query.message.answer(
text = "ℹ️Добавляйте ключевые слова вот в такое формате :\n*коды запчастей* , *запчасти*\n▪️Главное чтобы до и после запятой стоял пробел.",
reply_markup = markup.cancel,
parse_mode = 'Markdown'
)
await state.update_data(name = query.data[8:])
await AddKey.key.set()
@dp.message_handler(state = AddKey.key, text = '🚫 Отмена')
async def message( message, state):
await message.answer(
text = '*Главное меню*',
reply_markup = markup.main_menu,
parse_mode = 'Markdown'
)
await state.finish()
@dp.message_handler(state = AddKey.key, content_types = types.ContentType.TEXT)
async def message( message, state):
a = await state.get_data()
for key in message.text.split(' , '):
if len(message.text.split(' , ')) < 2:
await db.add_key(a['name'], key )
break
else:
await db.add_key(a['name'] , key
)
await state.finish()
await message.answer(
text = '*Главное меню*',
reply_markup = markup.main_menu,
parse_mode = 'Markdown'
)
@dp.callback_query_handler(lambda call : call.data[:8] == 'del_key_')
async def message(query , state):
lists = await db.get_keys(query.data[8:])
await bot.edit_message_text(
chat_id = query.message.chat.id,
message_id = query.message.message_id,
text = 'Выберите ключ который хотите удалить',
reply_markup = markup.get_keys_markup(lists)
)
@dp.callback_query_handler(lambda call : call.data[:9] == 'del2_key_')
async def message(query , state):
await db.delete_key(query.data[9:])
await bot.delete_message(
chat_id = query.message.chat.id,
message_id = query.message.message_id
)
await query.message.answer(
text = '*Главное меню*',
reply_markup = markup.main_menu,
parse_mode = 'Markdown'
)
@dp.callback_query_handler(lambda call : call.data[:8] == 'up_text_', state = UpdateText.state)
async def message(query , state):
await state.update_data(name = query.data[8:])
await query.message.answer(
text = "Введите новый текс для пары ключей",
reply_markup = markup.cancel
)
await UpdateText.text.set()
| [
"myEmail@example.com"
] | myEmail@example.com |
9aaaa6f910f3c1f9ddd8080dfe2085a053fdebdd | 056863ff17af7121b0348db31bf28836ff584e4a | /42.py | 44cd067599ee65f2ca28871a9290116c4476ea79 | [] | no_license | saranya258/python | 1face016cdd52c55df6fd6493f5f2aa5bcae9212 | df2274ad72bd36b7eb8cf4a4d2360e40dc902ee0 | refs/heads/master | 2020-06-07T14:19:32.476586 | 2019-07-10T08:53:32 | 2019-07-10T08:53:32 | 193,040,334 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | name1,name2=input().split()
if(len(name1)>len(name2)):
print(name1)
elif(len(name1)==len(name2)):
print(name2)
else:
print(name2)
| [
"noreply@github.com"
] | saranya258.noreply@github.com |
3f297bb66509481a7d100cee7dfd4c3ab2302750 | 42b2883caa1100c0b8ada96f9bc5b4ca89d275ea | /api/ultimanager/migrations/0001_initial.py | 7464e53203cc6f45f2d77c8b518b807c1e931204 | [
"MIT"
] | permissive | cdriehuys/comp426-api | 302dc5b7a7781bdc0b594b3993cb6f2363737804 | 916df2f858ea684b46fc4333e50e192ab091e40c | refs/heads/master | 2021-08-28T08:11:41.705961 | 2017-12-11T16:45:12 | 2017-12-11T16:45:12 | 112,657,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-06 03:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, help_text="The team's logo.", null=True, upload_to='images/teams/')),
('name', models.CharField(help_text="The team's name.", max_length=255)),
('user', models.ForeignKey(help_text='The user who manages the team.', on_delete=django.db.models.deletion.CASCADE, related_name='teams', related_query_name='team', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'teams',
'verbose_name': 'team',
},
),
]
| [
"chathan@driehuys.com"
] | chathan@driehuys.com |
8665ebb76bd6534d419114bad03ed528667aa227 | c84a561927ff9c6712e521c3448531f4992f41fb | /BioinformaticsStronghold/SSET/sset.py | 9a24c9ce372460b951d63d2fc6b9888b0e60fa23 | [] | no_license | Meng-Gen/rosalind | 55c174005807d0fc8703e62f7358f4ed205f977d | 3cf5e0ee1536e3e762ddd5354b8da4c8d378a640 | refs/heads/master | 2020-05-13T15:47:13.504360 | 2013-12-29T12:15:27 | 2013-12-29T12:15:27 | 15,453,371 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | import sys
def main():
n = int(sys.stdin.read().strip())
print(2**n % 1000000)
if __name__ == '__main__':
sys.exit(main())
| [
"plover@gmail.com"
] | plover@gmail.com |
c991c45938fdc4759dccfd7da11fdcf11d778094 | 6652a293a5ff3e3e9f8f019e376472b971f966a6 | /sw_layer/consumers.py | 366280a49634b7279a90d822cfdd1cbed1ab01ab | [] | no_license | sohjunjie/eforce | 7448a474e619fd868f96cf2e06367ebdc28b1ada | 72c80f10afc3870d7a97ce0b68485ba5a5ddfa40 | refs/heads/master | 2022-12-14T18:11:50.951514 | 2017-11-14T11:21:09 | 2017-11-14T11:21:09 | 102,879,472 | 5 | 2 | null | 2022-12-08T00:39:47 | 2017-09-08T16:01:22 | JavaScript | UTF-8 | Python | false | false | 939 | py | from channels import Group
from channels.sessions import channel_session
from channels.auth import channel_session_user, channel_session_user_from_http
from django.contrib.auth.models import User
from eforce.settings import EF_HQ_ROLENAME
@channel_session_user_from_http
def ws_connect_efhq(message):
if not message.user.userprofile.is_EF_HQ_user():
return
message.reply_channel.send({"accept": True})
Group('efhq').add(message.reply_channel)
@channel_session_user_from_http
def ws_connect_efassets(message):
if message.user.userprofile.is_EF_HQ_user():
return
message.reply_channel.send({"accept": True})
Group('efassets').add(message.reply_channel)
@channel_session_user_from_http
def ws_disconnect_hq(message):
Group('efhq').discard(message.reply_channel)
@channel_session_user_from_http
def ws_disconnect_efassets(message):
Group('efassets').discard(message.reply_channel)
| [
"junjie.soh93@gmail.com"
] | junjie.soh93@gmail.com |
e629c700cd8802477c7ff2ce63277766bc72fe78 | a960f00c6bc82178453df3aa2bfbce7d0eafae55 | /newinference.py | 4d68d53b799bf53877e51ca982126b8ed62c2c4b | [] | no_license | MathProblems/August | 882a8f4c458487b7275dc12ed4834d020eb4238a | e6c9a4cbae6013167bde81deda65e5153228b26f | refs/heads/master | 2021-01-01T17:05:00.356620 | 2015-10-23T19:09:21 | 2015-10-23T19:09:21 | 40,630,728 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,516 | py | import signal
import sys
import json
import jsonrpclib
import makesets
import pickle
from random import randint
from train_local import get_k_eqs
from train_local import read_parse
from train_local import read_sets
from train_local import parse_inp
from functools import reduce
sys.path.insert(0, '/Users/rikka/libsvm-3.18/python')
from svmutil import *
class StanfordNLP:
def __init__(self, port_number=8080):
self.server = jsonrpclib.Server("http://localhost:%d" % port_number)
def parse(self, text):
return json.loads(self.server.parse(text))
nlp = StanfordNLP()
def cleannum(n):
n = ''.join([x for x in n if x.isdigit() or x=='.' or x=='x' or x=='x*'])
return n
multi = None
glob = None
def make_eq(q,a,equations):
wps = q #open(q).readlines()
answs = a #open(a).readlines()
right = 0
wrong = 0
for k in range(len(wps)):
answers = get_k_eqs(equations[k],g=True,a=True)
seeneq = []
seen = []
for x in answers:
if x[1] not in seeneq:
seen.append(x)
seeneq.append(x[1])
answers = seen
answers = list(set(answers))
#First preprocessing, tokenize slightly
problem = wps[k]#.lower()
problem = problem.strip().split(" ")
for i,x in enumerate(problem):
if len(x)==0:continue
if x[-1] in [',','.','?']:
problem[i] = x[:-1]+" "+x[-1]
problem = ' '.join(problem)
problem = " " + problem + " "
print(equations[k])
print(problem)
if len(answers)==0:print("0 Answers \nINCORRECT"); wrong += 1; continue
#make story
story = read_parse(int(equations[k]))
#sets = makesets.makesets(story['sentences'])
sets = read_sets(int(equations[k]))
i = 0
for x in sets:
x[1].details()
xidx = [i for i,x in enumerate(sets) if x[1].num=='x']
if not xidx:
print("NO X WHY");wrong += 1; continue
xidx = xidx[0]
numlist = [(cleannum(v.num),v) for k,v in sets]
numlist = [x for x in numlist if x[0]!='']
allnumbs = {str(k):v for k,v in numlist}
objs = {k:(0,v) for k,v in numlist}
print(objs.items())
consts = [x for x in answers[0][1].split(" ") if x not in ['(',')','+','-','/','*','=',]]
present = [x for x in consts if x in objs]
if consts!=present: print(present,consts);print("missing thing");wrong += 1; continue
if len([x for x in objs if x not in consts])>0: print("missing thing");wrong +=1;continue
scores = []
for j,eq,cons,guess in answers:
consts = [x for x in eq.split(" ") if x not in ['(',')','+','-','/','*','=',]]
order = int(consts==[x[0] for x in numlist])
if order == 0: continue
#j = randint(0,len(answers)-1)
#eq = answers[j]
trips = []
#print(j,eq)
l,r = [x.strip().split(' ') for x in eq.split('=')]
consts = " ".join([x for x in answers[0][1].split(" ") if x not in ['(',')','+','-','/','*',]])
consts = consts.split(" = ")
sp = (objs[consts[0].split(" ")[-1]],objs[consts[1].split(" ")[0]])
target = 'x'
target = (target,objs[target])
#find innermost parens?
print(eq)
sides = []
thisscore = []
for i,compound in enumerate([l,r]):
while len(compound)>1:
if "(" in compound:
rpidx = (len(compound) - 1) - compound[::-1].index('(')
lpidx = rpidx+compound[rpidx:].index(")")
subeq = compound[rpidx+1:lpidx]
substr = "("+''.join(subeq)+")"
compound = compound[:rpidx]+[substr]+compound[lpidx+1:]
else:
subeq = compound[0:3]
substr = "("+''.join(subeq)+")"
compound = [substr]+compound[3:]
p,op,e = subeq
p = objs[p]
e = objs[e]
op = op.strip()
pute = compute(p,op,e,target,problem,story,order)
objs[substr]=pute
if pute == -1:
exit()
score,c,vals = pute
thisscore.append(score)
print(subeq,score)
sides.append(objs[compound[0]])
p = sides[0]; e = sides[1]
score = 1
for s in thisscore: score *= s
gscore = compute(p,'=',e,target,problem,story,order,score,cons)[0]
print("gscore ",gscore)
score *= gscore
scores.append((score,j,eq,guess))
scores = sorted(scores,reverse=True)
righties = [x for x in scores if x[1]==1]
print(scores[:3])
if not righties:
wrong+=1
print("TOP SCORING NO CORRECT SOLUTION \nINCORRECT")
continue
else:
corr = righties[0][3]
if len(scores)>0:
if scores[0][1]==1:
right += 1
print("CORRECT")
else:
wrong += 1
print("INCORRECT")
else:
wrong += 1
print("INCORRECT")
return (right,wrong)
def compute(p,op,e,target,problem,story,order,score=None,cons=None):
if op == '=':
vec = [order,score,cons]
vec.extend(makesets.vector(p,e,problem,story,target))
op_label, op_acc, op_val = svm_predict([-1], [vec], glob ,'-q -b 1')
else:
vec = makesets.vector(p,e,problem,story,target)
op_label, op_acc, op_val = svm_predict([-1], [vec], multi ,'-q -b 1')
op_val=op_val[0]
if op == '+':
val = op_val[0]
if op == '-':
val = op_val[1]
if op == '*':
val = op_val[2]
if op == '/':
val = op_val[3]
if op == '=':
val = op_val[0]
c = makesets.combine(p[1],e[1],op)
return (val,c,op_val)
if __name__=="__main__":
inp, mfile, gfile = sys.argv[1:4]
multi = svm_load_model(mfile)
glob = svm_load_model(gfile)
#q, a = sys.argv[1:3]
inp = sys.argv[1]
makesets.FOLD = sys.argv[1][-1]
q,a,e = parse_inp(inp)
right, wrong = make_eq(q,a,e)
print(right,wrong,right/len(q))
| [
"kedzior@uw.edu"
] | kedzior@uw.edu |
b82135fa8ee2499aa5b5ed2b07a245c15ab2c50f | 077215f1083e17b309eee9c7002f1c965a0ca2c5 | /第11章/创建进程.py | ba121b4c5917a561a354d5dd8a998c18e0041173 | [] | no_license | cooper-1/Pythonofspider | 8c7cd6344ca11c86d3b7a8b177d17933ff77c2d4 | b33f71c0a6c885b473207a3a78401e2fc4d2399b | refs/heads/master | 2023-08-26T07:33:43.001781 | 2021-11-05T12:55:20 | 2021-11-05T12:55:20 | 420,447,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # -*-coding: UTF-8
# @Time : 2021/10/17 19:47
# @Author : Cooper
# @FileName: 创建进程.py
# @Software: PyCharm
from multiprocessing import Process
def test(intercal):
print('我是子进程')
def main():
print('主进程开始')
p = Process(target=test, args=(1,))
p.start()
print('主进程结束')
if __name__ == '__main__':
main()
| [
"1274814498@qq.com"
] | 1274814498@qq.com |
4c35e4d6f532c3100bc86cdcb8ad37f97db8f646 | 5e1ee90de14350184bb6d4078ece4ec627165d73 | /revelation/__init__.py | 4369ae651f491010965c025fba991de90d6f007b | [
"MIT"
] | permissive | martibosch/revelation | 07866491087821c280bc2365f09b46e9cdd32886 | 7deb809b46cb3c90a98af7a846da7a81299cb3d5 | refs/heads/master | 2020-04-08T07:20:45.097324 | 2018-08-28T14:43:25 | 2018-08-28T14:43:25 | 159,135,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | # -*- coding: utf-8 -*-
"""Revelation root module with package info"""
from .app import PresentationReloader, Revelation
__author__ = "Humberto Rocha"
__email__ = "humrochagf@gmail.com"
__version__ = "0.5.2"
__all__ = ["PresentationReloader", "Revelation"]
| [
"humrochagf@gmail.com"
] | humrochagf@gmail.com |
85c5973f90f9917f7288192c1ede846725503394 | 6a7d8b67aad59c51dafdfb8bcffd53864a3d65b0 | /LeetCode/binaryTreeLevelOrderTraversal2.py | 200498802a749d657d2090e1b3f32c33ff8e0238 | [] | no_license | dicao425/algorithmExercise | 8bba36c1a08a232678e5085d24bac1dbee7e5364 | 36cb33af758b1d01da35982481a8bbfbee5c2810 | refs/heads/master | 2021-10-07T08:56:18.030583 | 2018-12-04T05:59:17 | 2018-12-04T05:59:17 | 103,611,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | #!/usr/bin/python
import sys
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def BFSQlevelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
result = []
q = [(0, root)]
while q:
item = q.pop(0)
if item[1]:
if len(result) < item[0] + 1:
result.insert(0, [])
result[-(item[0] + 1)].append(item[1].val)
q.append((item[0] + 1, item[1].left))
q.append((item[0] + 1, item[1].right))
return result
def DFSSlevelOrderBottom(self, root):
result = []
s = [(0, root)]
while s:
l, node = s.pop()
if node:
if len(result) < l + 1:
result.insert(0, [])
result[-(l + 1)].append(node.val)
s.append((l + 1, node.right))
s.append((l + 1, node.left))
return result
def levelOrderBottom(self, root):
self.result = []
self.dfs(0, root)
return self.result
def dfs(self, l, node):
if node:
if len(self.result) < l + 1:
self.result.insert(0, [])
self.result[-(l + 1)].append(node.val)
self.dfs(l + 1, node.left)
self.dfs(l + 1, node.right)
def main():
aa = Solution()
print aa.levelOrderBottom()
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"di.cao425@gmail.com"
] | di.cao425@gmail.com |
bf70b6312a0a2b58f0b34946117541aed9b0afb7 | 7a84ede765332dcce629412e835c1d074063aa2a | /ntd_duo/polarization_point.py | 8484ced91180affee6d82e9eb4ef9dd30a65881f | [] | no_license | DimitriMisiak/ethem | ec3ecbdb99c8eeeeb00abb1a2f6be1172242c7c7 | 45a56caacb02d9dd4a724273b21ff1f047f13026 | refs/heads/main | 2023-06-19T22:39:26.150774 | 2019-09-03T14:58:27 | 2019-09-03T14:58:27 | 387,753,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,526 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Illustrate the resolution of the steady staet and explore the polarization
plan according to different observables.
@author: misiak
"""
import sympy as sy
# adding ethem module path to the pythonpath
import sys
from os.path import dirname
sys.path.append( dirname(dirname(dirname(__file__))) )
import ethem as eth
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from scipy.integrate import odeint
from scipy.optimize import root
import numpy as np
from config_ntd_duo import evad, ntd, cryo
plt.close('all')
ib = 1.5e-10
tc = 0.016
#evad.update({ntd.current : ib,
# cryo.temperature : tc})
edict = evad.copy()
#edict.pop(ntd.current)
#edict.pop(cryo.temperature)
#
#phi_vect = eth.System.phi_vect
#param = [ntd.current, cryo.temperature] + list(phi_vect)
#
#eteq = eth.System.eteq
#eteq_num = eteq.subs(edict)
#eteq_list = list(eteq_num)
#eteq_fun = sy.lambdify(param, eteq_list, 'numpy')
edict_exp = edict.copy()
#edict_exp.pop(ntd.current)
#edict_exp.pop(cryo.temperature)
def ss_solve(current, temp, x0=[0.016,0.16]):
@np.vectorize
def aux_ss_solve(current, temp):
edict_exp.update({
ntd.current: current,
cryo.temperature: temp,
})
sol = eth.solve_sse(edict_exp, x0)
return tuple(sol)
return aux_ss_solve(current, temp)
i_array = 10**np.linspace(-11, -9, 50)
t_array = np.linspace(0.015, 0.020, 50)
sol_t_R = ss_solve(i_array, tc, x0=[0.01, 0.01])
sol_t_L = ss_solve(i_array, tc, x0=[0.1, 0.1])
label_list =('abso ','ntd ')
LS_list = ('-', '--')
for i in range(2):
lab = label_list[i]
LS = LS_list[i]
plt.figure('ntd temperature')
plt.plot(i_array, sol_t_R[i], color='red', label=lab+'to right', ls=LS)
plt.plot(i_array, sol_t_L[i], color='blue', label=lab+'to left', ls=LS)
plt.xscale('log')
plt.legend()
plt.grid(True, which='both')
sol_t_R = ss_solve(ib, t_array, x0=[0.01, 0.01])
sol_t_L = ss_solve(ib, t_array, x0=[0.1, 0.1])
plt.figure('temp check')
plt.plot(t_array, sol_t_R[i], color='red', label=lab+'to right', ls=LS)
plt.plot(t_array, sol_t_L[i], color='blue', label=lab+'to left', ls=LS)
plt.grid(True, which='both')
plt.legend()
# i_mesh, t_mesh = np.meshgrid(i_array, t_array)
#
# sol_mesh_R = ss_solve(i_mesh, t_mesh, x0=[0.01, 0.01])
# sol_mesh_L = ss_solve(i_mesh, t_mesh, x0=[0.1, 0.1])
#
# fig = plt.figure('ntd temperature meshplot')
# ax = plt.subplot(projection='3d')
# ax.plot_wireframe(np.log10(i_mesh), t_mesh, sol_mesh_R, color='red', alpha=0.3)
# ax.plot_wireframe(np.log10(i_mesh), t_mesh, sol_mesh_L, color='blue', alpha=0.3)
#### ntd RESISTANCE
#res = ntd.resistivity
#res_num = res.subs(evad)
#res_fun = sy.lambdify(param, res_num, 'numpy')
#
#res_R = res_fun(i_array, tc, sol_t_R)
#res_L = res_fun(i_array, tc, sol_t_L)
#
#plt.figure('ntd resistance')
#plt.plot(i_array, res_R, color='red', label='to right')
#plt.plot(i_array, res_L, color='blue', label='to left')
#plt.xscale('log')
#plt.legend()
#plt.grid(True, which='both')
#
#res_mesh_R = res_fun(i_mesh, t_mesh, sol_mesh_R)
#res_mesh_L = res_fun(i_mesh, t_mesh, sol_mesh_L)
#
#fig = plt.figure('ntd resistance meshplot')
#ax = plt.subplot(projection='3d')
#ax.plot_wireframe(np.log10(i_mesh), t_mesh, res_mesh_R, color='red', alpha=0.3)
#ax.plot_wireframe(np.log10(i_mesh), t_mesh, res_mesh_L, color='blue', alpha=0.3)
#
#### ntd VOLTAGE
#volt = ntd.resistivity * ntd.current
#volt_num = volt.subs(edict)
#volt_fun = sy.lambdify(param, volt_num, 'numpy')
#
#volt_R = volt_fun(i_array, tc, sol_t_R)
#volt_L = volt_fun(i_array, tc, sol_t_L)
#
#plt.figure('ntd voltage')
#plt.plot(i_array, volt_R, color='red', label='to right')
#plt.plot(i_array, volt_L, color='blue', label='to left')
#plt.xscale('log')
#plt.legend()
#plt.grid(True, which='both')
#
#volt_mesh_R = volt_fun(i_mesh, t_mesh, sol_mesh_R)
#volt_mesh_L = volt_fun(i_mesh, t_mesh, sol_mesh_L)
#
#fig = plt.figure('ntd voltage meshplot')
#ax = plt.subplot(projection='3d')
#ax.plot_wireframe(np.log10(i_mesh), t_mesh, np.log10(volt_mesh_R), color='red', alpha=0.3)
#ax.plot_wireframe(np.log10(i_mesh), t_mesh, np.log10(volt_mesh_L), color='blue', alpha=0.3)
#
#### TOT CONDUCTANCE
#cond = eteq.diff(ntd.temperature)[0]
#cond_num = cond.subs(edict)
#cond_fun = sy.lambdify(param, cond_num, 'numpy')
#
#cond_R = cond_fun(i_array, tc, sol_t_R)
#cond_L = cond_fun(i_array, tc, sol_t_L)
#
#plt.figure('tot conductance')
#plt.plot(i_array, cond_R, color='red', label='to right')
#plt.plot(i_array, cond_L, color='blue', label='to left')
#plt.xscale('log')
#plt.legend()
#plt.grid(True, which='both')
#
#cond_mesh_R = cond_fun(i_mesh, t_mesh, sol_mesh_R)
#cond_mesh_L = cond_fun(i_mesh, t_mesh, sol_mesh_L)
#
#fig = plt.figure('tot conductance meshplot')
#ax = plt.subplot(projection='3d')
#ax.plot_wireframe(np.log10(i_mesh), t_mesh, cond_mesh_R, color='red', alpha=0.3)
#ax.plot_wireframe(np.log10(i_mesh), t_mesh, cond_mesh_L, color='blue', alpha=0.3)
#
#### TOT ULTRA
#ultra = cond.diff(ntd.temperature)
#ultra_num = ultra.subs(edict)
#ultra_fun = sy.lambdify(param, ultra_num, 'numpy')
#
#ultra_R = ultra_fun(i_array, tc, sol_t_R)
#ultra_L = ultra_fun(i_array, tc, sol_t_L)
#
#plt.figure('tot ultra')
#plt.plot(i_array, ultra_R, color='red', label='to right')
#plt.plot(i_array, ultra_L, color='blue', label='to left')
#plt.xscale('log')
#plt.legend()
#plt.grid(True, which='both')
#
#ultra_mesh_R = ultra_fun(i_mesh, t_mesh, sol_mesh_R)
#ultra_mesh_L = ultra_fun(i_mesh, t_mesh, sol_mesh_L)
#
#fig = plt.figure('tot ultra meshplot')
#ax = plt.subplot(projection='3d')
#ax.plot_wireframe(np.log10(i_mesh), t_mesh, ultra_mesh_R, color='red', alpha=0.3)
#ax.plot_wireframe(np.log10(i_mesh), t_mesh, ultra_mesh_L, color='blue', alpha=0.3)
#### TIME CONSTANT
#import scipy.linalg as LA
#
#coup_mat = eth.System.coupling_matrix
#coup_mat_num = coup_mat.subs(edict)
#coup_mat_fun = sy.lambdify(param, coup_mat_num, 'numpy')
#
#@np.vectorize
#def tau_fun(current, temp1, temp2):
# coup_mat_eval = coup_mat_fun(current, temp1, temp2)
#
# eig, P = LA.eig(coup_mat_eval)
#
# return np.real(1./eig)
#
#tau_R = tau_fun(i_array, tc, sol_t_R)
#tau_L = tau_fun(i_array, tc, sol_t_L)
#
#plt.figure('tau')
#plt.plot(i_array, tau_R, color='red', label='to right')
#plt.plot(i_array, tau_L, color='blue', label='to left')
#plt.xscale('log')
#plt.legend()
#plt.grid(True, which='both')
#
#tau_mesh_R = tau_fun(i_mesh, t_mesh, sol_mesh_R)
#tau_mesh_L = tau_fun(i_mesh, t_mesh, sol_mesh_L)
#
#fig = plt.figure('tau meshplot')
#ax = plt.subplot(projection='3d')
#ax.plot_wireframe(np.log10(i_mesh), t_mesh, np.log10(tau_mesh_R), color='red', alpha=0.3)
#ax.plot_wireframe(np.log10(i_mesh), t_mesh, np.log10(tau_mesh_L), color='blue', alpha=0.3)
#from corner import corner
#
#tau_fix = tau_mesh_R
#tau_fix[tau_fix<0] = 10000.
#
#AAA = np.log10(tau_fix).ravel()
#NNN = np.random.normal(size=(900,))
#
#ravel_array = (
# np.log10(i_mesh).ravel(),
# t_mesh.ravel(),
# sol_mesh_R.ravel(),
# AAA,
# cond_mesh_R.ravel(),
#)
#
#labels = ('ibias', 'tcryo', 'tntd', 'tau', 'cond')
#
#samples = np.vstack(ravel_array)
#
#fig_corner = corner(samples.T, bins=50, smooth=1,
# labels=['{}'.format(l) for l in labels],
# quantiles=[0.16, 0.5, 0.84], show_titles=True,
# title_kwargs={"fontsize": 12})
| [
"dimitrimisiak@gmail.com"
] | dimitrimisiak@gmail.com |
240993dcab9ce9b7ae903563b3ef13e2b8480f18 | 7f57c1bc457f693d1e2b482bd92180b5f619d3bb | /34-find-first-and-last-position-of-element-in-sorted-array.py | ee18b6f1b67eaf713af07db0db4804f8f8b748b1 | [] | no_license | mtianyan/LeetcodePython | 17694eed256886a075208264904ac1533df71d03 | 6880de6f92d8b6bf9223656f356831fb2b475202 | refs/heads/master | 2023-07-11T19:46:54.388781 | 2023-07-06T15:59:34 | 2023-07-06T15:59:34 | 331,367,286 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | class Solution:
"""
https://leetcode-cn.com/problems/find-first-and-last-position-of-element-in-sorted-array/solution/zai-pai-xu-shu-zu-zhong-cha-zhao-yuan-su-de-di-3-4/
"""
def searchRange(self, nums: List[int], target: int) -> List[int]:
left_index = self.search(nums, target, True)
right_index = self.search(nums, target, False) - 1
if left_index <= right_index and right_index < len(nums) and nums[left_index] == target and nums[
right_index] == target:
return [left_index, right_index]
else:
return [-1, -1]
def search(self, nums, target, lower):
l = 0
r = len(nums) - 1
ans = len(nums)
while l <= r:
mid = (l + r) // 2
if (nums[mid] > target) or (lower and nums[mid] >= target):
r = mid - 1
ans = mid
else:
l = mid + 1
return ans
| [
"1147727180@qq.com"
] | 1147727180@qq.com |
658cbbd17351e8d6081436042932e7e26f113ff9 | 82f1b4c0bccd66933f93d02703a3948f08ebc1a9 | /tests/pytests/unit/modules/test_win_servermanager.py | a7fb7d62a9370e0c2a7f88accd72b61eafb30d50 | [
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] | permissive | waynew/salt | ddb71301944b64f5429e0dbfeccb0ea873cdb62d | ac9f139f795295de11be3fb1490ab8cec29611e5 | refs/heads/master | 2023-01-24T10:43:53.104284 | 2022-03-29T04:27:22 | 2022-03-29T13:45:09 | 163,890,509 | 1 | 0 | Apache-2.0 | 2019-01-02T21:17:12 | 2019-01-02T21:17:11 | null | UTF-8 | Python | false | false | 2,675 | py | import pytest
import salt.modules.win_servermanager as win_servermanager
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {win_servermanager: {"__grains__": {"osversion": "6.2"}}}
def test_install():
"""
Test win_servermanager.install
"""
mock_out = {
"Success": True,
"RestartNeeded": 1,
"FeatureResult": [
{
"Id": 338,
"Name": "XPS-Viewer",
"DisplayName": "XPS Viewer",
"Success": True,
"RestartNeeded": False,
"Message": "",
"SkipReason": 0,
}
],
"ExitCode": 0,
}
expected = {
"ExitCode": 0,
"RestartNeeded": False,
"Restarted": False,
"Features": {
"XPS-Viewer": {
"DisplayName": "XPS Viewer",
"Message": "",
"RestartNeeded": False,
"SkipReason": 0,
"Success": True,
}
},
"Success": True,
}
mock_reboot = MagicMock(return_value=True)
with patch.object(
win_servermanager, "_pshell_json", return_value=mock_out
), patch.dict(win_servermanager.__salt__, {"system.reboot": mock_reboot}):
result = win_servermanager.install("XPS-Viewer")
assert result == expected
def test_install_restart():
"""
Test win_servermanager.install when restart=True
"""
mock_out = {
"Success": True,
"RestartNeeded": 1,
"FeatureResult": [
{
"Id": 338,
"Name": "XPS-Viewer",
"DisplayName": "XPS Viewer",
"Success": True,
"RestartNeeded": True,
"Message": "",
"SkipReason": 0,
}
],
"ExitCode": 0,
}
expected = {
"ExitCode": 0,
"RestartNeeded": True,
"Restarted": True,
"Features": {
"XPS-Viewer": {
"DisplayName": "XPS Viewer",
"Message": "",
"RestartNeeded": True,
"SkipReason": 0,
"Success": True,
}
},
"Success": True,
}
mock_reboot = MagicMock(return_value=True)
with patch.object(
win_servermanager, "_pshell_json", return_value=mock_out
), patch.dict(win_servermanager.__salt__, {"system.reboot": mock_reboot}):
result = win_servermanager.install("XPS-Viewer", restart=True)
mock_reboot.assert_called_once()
assert result == expected
| [
"megan.wilhite@gmail.com"
] | megan.wilhite@gmail.com |
2577c465ebd5292f365c57c906af5e59dffbb77a | 6c19e86b105cf79043fa3b0c3c04ff4ecd8f70b0 | /blog/models.py | 7b1c25b95db4f764f027b6bae06a150064307809 | [] | no_license | carmenho9498/my-first-blog | 36a8ed095c40c5e4272a0878eda56c7e0b46ff44 | 636a28b36f5febc7ab3de87a4436d1f190c2365d | refs/heads/master | 2021-01-21T04:41:50.928170 | 2016-06-10T06:44:32 | 2016-06-10T06:44:32 | 55,787,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
picture_URL = models.URLField(blank=True, null=True)
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey('blog.Post', related_name='comments')
author = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
approved_comment = models.BooleanField(default=False)
def approve(self):
self.approved_comment = True
self.save()
def __str__(self):
return self.text
def approved_comments(self):
return self.comments.filter(approved_comment=True) | [
"you@example.com"
] | you@example.com |
c5bb5d778329befcf3a04a6a6d62dc3792921b85 | 7ae096774994e5842aaf31658d3a05fbda2de1da | /baselines/models_pytorch/roberta/convert_tf_checkpoint_to_pytorch.py | 611f6bc36a30409234931194b6e0418aa4d4697d | [] | no_license | lonePatient/chineseGLUE | a14b38598e76c938a94e8ce88003407dd7380b33 | 6c1f946828cf144388b5f75ae43ab5c1fc0a6f48 | refs/heads/master | 2020-09-02T14:18:05.608956 | 2019-11-03T02:16:44 | 2019-11-03T02:16:44 | 219,239,709 | 3 | 0 | null | 2019-11-03T02:11:38 | 2019-11-03T02:11:38 | null | UTF-8 | Python | false | false | 5,142 | py | # coding=utf-8
# Copyright 2018 The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT checkpoint."""
from __future__ import print_function
import os
import re
import argparse
import tensorflow as tf
import torch
import numpy as np
from .pytorch_modeling import BertConfig, BertForPreTraining, ALBertConfig, ALBertForPreTraining
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path, is_albert):
config_path = os.path.abspath(bert_config_file)
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {} with config at {}".format(tf_path, config_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
# Initialise PyTorch model
if is_albert:
config = ALBertConfig.from_json_file(bert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = ALBertForPreTraining(config)
else:
config = BertConfig.from_json_file(bert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = BertForPreTraining(config)
for name, array in zip(names, arrays):
name = name.split('/')
if name[0] == 'global_step':
continue
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name[-13:] == '_embeddings_2':
pointer = getattr(pointer, 'weight')
array = np.transpose(array)
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--tf_checkpoint_path",
default='check_points/pretrain_models/albert_large_zh/albert_model.ckpt',
type=str,
help="Path the TensorFlow checkpoint path.")
parser.add_argument("--bert_config_file",
default='check_points/pretrain_models/albert_large_zh/albert_config_large.json',
type=str,
help="The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--pytorch_dump_path",
default='check_points/pretrain_models/albert_large_zh/pytorch_albert_model.pth',
type=str,
help="Path to the output PyTorch model.")
parser.add_argument("--is_albert",
default=True,
type=bool,
help="whether is albert?")
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path,
args.bert_config_file,
args.pytorch_dump_path,
args.is_albert)
| [
"ccjdurandal422@163.com"
] | ccjdurandal422@163.com |
f45b3829656127d4fa1216edadf0657db893a4d0 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_1_1/CoderRaja/last.py | fc28b87667a1d2625257d8baabded7d5e60f9a43 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 211 | py | t=int(input())
p=1
while t>0:
s=input()
a=[]
m=''
a.append(s[0])
for x in s[1:]:
if x>=a[0]:
a=[x]+a
else:
a=a+[x]
for x in a:
m+=x
print("Case #%d: %s"%(p,m))
t-=1
p+=1
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
b3219873cde18dd57987499aa858fc1aad95176b | cc578cec7c485e2c1060fd075ccc08eb18124345 | /cs15211/TransposeMatrix.py | 1957a2ac56596a88832a126c764902e8879dab28 | [
"Apache-2.0"
] | permissive | JulyKikuAkita/PythonPrac | 18e36bfad934a6112f727b4906a5e4b784182354 | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | refs/heads/master | 2021-01-21T16:49:01.482561 | 2019-02-07T06:15:29 | 2019-02-07T06:15:29 | 91,907,704 | 1 | 1 | Apache-2.0 | 2019-02-07T06:15:30 | 2017-05-20T18:12:53 | Python | UTF-8 | Python | false | false | 2,139 | py | __source__ = 'https://leetcode.com/problems/transpose-matrix/'
# Time: O(R*C), where R and C are the number of rows and columns in the given matrix A.
# Space: O(R*C)
#
# Description: Leetcode # 867. Transpose Matrix
#
# Given a matrix A, return the transpose of A.
#
# The transpose of a matrix is the matrix flipped over it's main diagonal,
# switching the row and column indices of the matrix.
#
#
# Example 1:
#
# Input: [[1,2,3],[4,5,6],[7,8,9]]
# Output: [[1,4,7],[2,5,8],[3,6,9]]
# Example 2:
#
# Input: [[1,2,3],[4,5,6]]
# Output: [[1,4],[2,5],[3,6]]
#
#
# Note:
#
# 1 <= A.length <= 1000
# 1 <= A[0].length <= 1000
#
import unittest
#60ms 43.14%
class Solution(object):
def transpose(self, A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
R, C = len(A), len(A[0])
ans = [[None] * R for _ in xrange(C)]
for r, row in enumerate(A):
for c, val in enumerate(row):
ans[c][r] = val
return ans;
class Solution2(object):
def transpose(self, A):
#Alternative Solution:
return zip(*A)
#68ms 26.16%
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/transpose-matrix/solution/
# 2ms 100%
class Solution {
public int[][] transpose(int[][] A) {
if (A.length == 0) return A;
int r = A.length;
int c = A[0].length;
int[][] result = new int[c][r];
for (int i = 0; i < c; i++) {
for (int j = 0; j < r; j++) {
result[i][j] = A[j][i];
}
}
return result;
}
}
# 2ms 100%
class Solution {
public int[][] transpose(int[][] A) {
int row = A.length;
int col = A[0].length;
int[][] ans = new int[col][row];
int i = 0, j = 0;
for (int[] rows: A) {
i = 0;
for (int num: rows) {
ans[i][j] = num;
i++;
}
j++;
}
return ans;
}
}
''' | [
"b92701105@gmail.com"
] | b92701105@gmail.com |
a6611601faf0005aad20ab3b4013be5268db35a9 | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /third_party/android_deps/libs/com_google_android_gms_play_services_tasks/3pp/fetch.py | f03904c8215eb8baaf4d81ce21b893a6c49f4a7e | [
"MIT",
"NPL-1.1",
"MPL-1.1",
"BSD-3-Clause",
"EPL-1.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.1-only",
"LicenseRef-scancode-android-sdk-license",
"LicenseRef-scancode-unknown-license-reference",
... | permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 2,485 | py | #!/usr/bin/env python3
# Copyright 2021 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://dl.google.com/dl/android/maven2'
_GROUP_NAME = 'com/google/android/gms'
_MODULE_NAME = 'play-services-tasks'
_FILE_EXT = 'aar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser since it is susceptible
# to maliciously crafted xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
latest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no_patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
_MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name],
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| [
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] | chromium-scoped@luci-project-accounts.iam.gserviceaccount.com |
f333123d895a8fc638627c9937c510abb0e6a958 | 277f4e1d9c7574f7233ca716d874ea3d4f7c679a | /classification.py | 73f997d1e7608bc9de8c4047e2c7364d21cd302a | [] | no_license | MSBradshaw/Blood-Classification | 25eea7166d79075e68ec28a0e90de410c7751bfd | 03696383f713536d5b538c3b2e9a10fe91c68703 | refs/heads/master | 2020-03-31T10:15:04.220952 | 2018-10-09T21:26:52 | 2018-10-09T21:26:52 | 152,128,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,071 | py | import Classification_Utils as cu
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import time
import re
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
df = pd.read_csv('blood-samples.tsv', sep='\t', index_col='id')
#imputed missing values with 0
df = df.fillna(0)
print(df.shape)
#transform data and then remove extra information not needed for training
clean_df = df.T.iloc[2:-8,:]
#create labels
extract_label = lambda x: re.sub('_.*', '', x)
labels = list(map(extract_label,clean_df.index.tolist()))
train_df, test_df, train_labels, test_labels = train_test_split(
clean_df, labels,
test_size=0.30, # 30% of the data held out in test set
random_state=0, # Setting random_state ensures the same train/test split occurs each time this is run
stratify=labels) # Maintain ratio of tissues represented in each set
train_features = train_df.columns.values.tolist()
NUM_SPLITS = 100 # number of train/test splits in cross validation
start = time.time()
knn = cu.knn_model_crossval(train_df, train_labels, NUM_SPLITS)
end = time.time()
print("KNN Runtime:", (end - start)/60, "minutes")
start = time.time()
mlp = cu.mlp_crossval(train_df, train_labels, NUM_SPLITS)
end = time.time()
print("MLP Runtime:", (end - start)/60, "minutes")
start = time.time()
lr = cu.logistic_regression_model_crossval(train_df, train_labels, NUM_SPLITS)
end = time.time()
print("LR Runtime:", (end - start)/60, "minutes")
start = time.time()
gnb = cu.bayes_gaussian_model_crossval(train_df, train_labels, NUM_SPLITS)
end = time.time()
print("Gaussian NB Runtime:", (end - start)/60, "minutes")
start = time.time()
svc = cu.SVC_model_crossval(train_df, train_labels, NUM_SPLITS)
end = time.time()
print("SVC Runtime:", (end - start)/60, "minutes")
start = time.time()
rf = cu.randomforest_model_crossval(train_df, train_labels, NUM_SPLITS)
end = time.time()
print("RF Runtime:", (end - start)/60, "minutes")
start = time.time()
gbc = cu.gradient_boosting_crossval(train_df, train_labels, NUM_SPLITS)
end = time.time()
print("Gradient Boosting Runtime:", (end - start)/60, "minutes")
lr_pred = lr.predict(test_df)
lr_result = lr.score(test_df, test_labels)
rf_pred = rf.predict(test_df)
rf_result = rf.score(test_df, test_labels)
svc_pred = svc.predict(test_df)
svc_result = svc.score(test_df, test_labels)
gbc_pred = gbc.predict(test_df)
gbc_result = gbc.score(test_df, test_labels)
gnb_pred = gnb.predict(test_df)
gnb_result = gnb.score(test_df, test_labels)
knn_pred = knn.predict(test_df)
knn_result = knn.score(test_df, test_labels)
mlp_pred = mlp.predict(test_df)
mlp_result = mlp.score(test_df, test_labels)
print(lr_result)
print(rf_result)
print(svc_result)
print(gbc_result)
print(gnb_result)
print(knn_result)
print(mlp_result)
#PCA graph generation
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(clean_df)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2'])
principalDf['label'] = labels
fig = plt.figure(figsize = (8,8))
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('Principal Component 1 ' + str(pca.explained_variance_ratio_[0]), fontsize = 15)
ax.set_ylabel('Principal Component 2 ' + str(pca.explained_variance_ratio_[1]), fontsize = 15)
ax.set_title('2 Component PCA', fontsize = 20)
targets = ['M', 'B']
colors = ['r', 'g']
for target, color in zip(targets,colors):
indicesToKeep = principalDf['label'] == target
ax.scatter(principalDf.loc[indicesToKeep, 'principal component 1']
, principalDf.loc[indicesToKeep, 'principal component 2']
, c = color
, s = 50)
ax.legend(targets)
ax.grid()
fig.savefig('blood-PCA-plot.png')
pca.explained_variance_ratio_[]
| [
"michaelscottbradshaw@gmail.com"
] | michaelscottbradshaw@gmail.com |
61651885517d13e74aed18e327cc33d71aafd5fc | a703f0010c56be928bc5b68a9fabe38e74429894 | /linter.py | 85a2abd2bde17b20abf9f4f0bb5036962b1cdf65 | [
"MIT"
] | permissive | michaelblyons/SublimeLinter-contrib-sublime-syntax | a92d688b769dd095c831b1aebaa8b277b2fff697 | f30b5f5f27f86436690ea37b2f5de4ff8058a37e | refs/heads/master | 2020-04-29T20:03:31.185291 | 2019-03-01T01:07:00 | 2019-03-01T01:07:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,547 | py | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by FichteFoll
# Copyright (c) 2016 FichteFoll
#
# License: MIT
#
"""This module exports the SublimeSyntax plugin class."""
import contextlib
import logging
import re
import os
import sublime
import sublime_api
from SublimeLinter.lint import Linter
logger = logging.getLogger("SublimeLinter.plugin.sublime-syntax")
class SublimeSyntax(Linter):
"""Provides an interface to sublime-syntax.
This linter uses Sublime Text's internal sublime_api module
to access the syntax test running code.
Because that has rather harsh requirements,
we rely on creating temporary files in the Packages folder
in order to provide the full functionality.
"""
cmd = None # We implement a custom `run` method
regex = (
r'^[^:]+:(?P<line>\d+):((?P<col>\d+):)? '
r'(?P<message>.+)'
)
# An empty selector matches all views
defaults = {
'selector': ''
}
word_re = r'.' # only highlight a single character
@classmethod
def can_lint_view(cls, view, settings):
"""Check if file is 'lintable'."""
# Check `super` first bc it has the cheap, fast checks, e.g.
# if this linter has been disabled.
if not super().can_lint_view(view, settings):
return False
filename = view.file_name() or ''
basename = os.path.basename(filename)
# Fast path
if basename and basename.startswith("syntax_test"):
return True
# But, essentially all files can be syntax tests, if they contain
# a magic first line
first_line = view.substr(view.line(0))
match = re.match(r'^(\S*) SYNTAX TEST "([^"]*)"', first_line)
if match:
return True
return False
def run(self, cmd, code):
"""Perform linting."""
if not code:
return
# The syntax test runner only operates on resource files that the resource loader can load,
# which must reside in a "Packages" folder
# and has the restriction of only working on saved files.
with _temporary_resource_file(code, prefix="syntax_test_") as resource_path:
# Some change in ST caused the newly created file not to get picked up in time,
# so we add an artificial delay.
# This is a sucky solution,
# but I can't think of anything else.
# TOFIX Remove this hack
import time
time.sleep(0.2)
assertions, test_output_lines = sublime_api.run_syntax_test(resource_path)
logger.debug('assertions: {}'.format(assertions))
output = "\n".join(test_output_lines)
if "unable to read file" in output:
logger.error(output)
return output
##################################################
# Utility for temporary resource files
_temp_dir_name = ".temp-subsyn-lint"
_temp_path = None
def plugin_loaded():
"""Build and remove temporary path.
Required for sublime.packages_path()
because ST only "loads resources" from the Packages dir.
"""
global _temp_path
packages_path = sublime.packages_path()
_temp_path = os.path.join(packages_path, _temp_dir_name)
_remove_temp_path()
def plugin_unloaded():
"""Remove temporary path."""
# Don't block plugin unloading by not catching an exception.
# Has been fixed in 3189.
try:
_remove_temp_path()
except Exception:
import traceback
traceback.print_exc()
def _remove_temp_path():
"""Try to clean our temp dir if it exists."""
if os.path.exists(_temp_path):
if os.path.isdir(_temp_path):
def onerror(function, path, exc_info):
logger.exception("Unable to delete '%s' while cleaning up temporary directory",
path, exc_info=exc_info)
import shutil
shutil.rmtree(_temp_path, onerror=onerror)
else:
logger.warning("For some reason, '%s' is a file. Removing...", _temp_path)
os.remove(_temp_path)
@contextlib.contextmanager
def _temporary_resource_file(text, prefix='', suffix=''):
"""Create a temporary file in ST's "resource" folder, using tempfile.mkstemp.
Yields the relative resource path as a context manager
and removes it when the scope is exited.
Files are stored in a Temp folder relative to the Data folder,
which is removed afterwards if it does not contain any other files.
"""
import tempfile
# Ensure the folder exists
if not os.path.exists(_temp_path):
os.mkdir(_temp_path)
try:
fd, temp_file_path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=_temp_path)
logger.debug("created temporary file at '%s'", temp_file_path)
try:
with open(fd, 'w', encoding='utf-8') as f:
f.write(text)
temp_file_resource_path = "/".join(["Packages", _temp_dir_name,
os.path.basename(temp_file_path)])
yield temp_file_resource_path
finally:
os.remove(temp_file_path)
except FileNotFoundError:
_remove_temp_path()
finally:
# And remove the folder, if it's empty.
# Otherwise wait for a "restart".
try:
os.rmdir(_temp_path)
except OSError as e:
logger.debug("unable to delete temporary folder; %s", e)
| [
"fichtefoll2@googlemail.com"
] | fichtefoll2@googlemail.com |
24508a5d8234cf25b2283156e7c91e6d7dda78d0 | 494c0de8e2655827e3ebdc7f13428f049e04807f | /meetings/migrations/0003_auto_20170418_1505.py | 246667f4325b4b432f509380c57d9476ad5cb242 | [] | no_license | ghing/publicmeetings | 793483743bb5856cff6972e34213c0d2ae293eaf | 4f4a0f52d1b07e205dce3b507ab58663f1b2ac44 | refs/heads/master | 2021-01-19T09:41:30.275851 | 2017-08-14T03:08:38 | 2017-08-14T03:08:38 | 82,145,374 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-18 15:05
from __future__ import unicode_literals
from django.db import migrations
def set_meeting_type_for_phone_facebook_meetings(apps, schema_editor):
Meeting = apps.get_model("meetings", "Meeting")
Meeting.objects.filter(location__icontains="phone")\
.update(meeting_type='telephone')
Meeting.objects.filter(location__icontains="facebook")\
.update(meeting_type='facebook')
def unset_meeting_type_for_phone_facebook_meetings(apps, schema_editor):
Meeting = apps.get_model("meetings", "Meeting")
Meeting.objects.filter(location__icontains="phone", meeting_type='telephone')\
.update(meeting_type=None)
Meeting.objects.filter(location__icontains="facebook", meeting_type='facebook')\
.update(meeting_type=None)
class Migration(migrations.Migration):
dependencies = [
('meetings', '0002_auto_20170418_1451'),
]
operations = [
migrations.RunPython(set_meeting_type_for_phone_facebook_meetings,
reverse_code=unset_meeting_type_for_phone_facebook_meetings),
]
| [
"geoffhing@gmail.com"
] | geoffhing@gmail.com |
68c1e13a597b6146ff4629da76de0be89ae0948c | 70054615f56be28373b00c9df96544ec822be683 | /res/scripts/client/notification/listcontroller.py | 0c5427eea1045238ec87a1e7252dc94af9708f14 | [] | no_license | wanyancan/WOTDecompiled | c646ad700f5ec3fb81fb4e87862639ce0bdf0000 | 9ffb09007a61d723cdb28549e15db39c34c0ea1e | refs/heads/master | 2020-04-17T23:13:15.649069 | 2013-11-15T16:37:10 | 2013-11-15T16:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | from gui.Scaleform.framework.entities.EventSystemEntity import EventSystemEntity
from gui.shared.events import ShowWindowEvent
from notification.BaseMessagesController import BaseMessagesController
from notification import NotificationsModel
class ListController(BaseMessagesController, EventSystemEntity):
def __init__(self, model):
BaseMessagesController.__init__(self, model)
self._model.onDisplayStateChanged += self.__displayStateChangeHandler
def __displayStateChangeHandler(self, oldState, newState, data):
if newState == NotificationsModel.LIST_STATE:
self._model.resetNotifiedMessagesCount()
self.fireEvent(ShowWindowEvent(ShowWindowEvent.SHOW_NOTIFICATIONS_LIST, {'model': self._model,
'closeCallBack': self.__listCloseHandler}))
def __listCloseHandler(self):
if self._model.getDisplayState() == NotificationsModel.LIST_STATE:
self._model.setPopupsDisplayState()
| [
"james.sweet88@googlemail.com"
] | james.sweet88@googlemail.com |
203a7c12f779e628b118f62d4db6a9c37de0ce16 | f9ff85c981942d15c65d37de107e0c5fa5e6a2ba | /pychron/processing/tasks/browser/analysis_table.py | 9bddf5ab33c9bac6effe1d2b09ec5be7daf841d6 | [
"Apache-2.0"
] | permissive | kenlchen/pychron | 0c729f1b1973b9883734007b7a318fe21669e6c1 | ffd988e27ae09fb3e8a8790d87ff611557911d07 | refs/heads/master | 2021-01-24T21:53:42.293554 | 2016-04-04T07:18:39 | 2016-04-04T07:18:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,895 | py | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pyface.action.menu_manager import MenuManager
from traits.api import HasTraits, List, Any, Str, Enum, Bool, Button, \
Event, Property, cached_property, Instance, DelegatesTo, CStr, Int
# ============= standard library imports ========================
# ============= local library imports ==========================
from traitsui.menu import Action
from pychron.envisage.browser.adapters import BrowserAdapter, AnalysisAdapter
from pychron.envisage.browser.base_browser_model import filter_func
from pychron.core.ui.table_configurer import AnalysisTableConfigurer
class AnalysisTable(HasTraits):
analyses = List
oanalyses = List
selected = Any
dclicked = Any
context_menu_event = Event
analysis_filter = CStr
analysis_filter_values = List
analysis_filter_comparator = Enum('=', '<', '>', '>=', '<=', 'not =', 'startswith')
analysis_filter_parameter = Str
analysis_filter_parameters = Property(List, depends_on='tabular_adapter.columns')
# omit_invalid = Bool(True)
table_configurer = Instance(AnalysisTableConfigurer)
limit = DelegatesTo('table_configurer')
omit_invalid = DelegatesTo('table_configurer')
no_update = False
scroll_to_row = Event
refresh_needed = Event
tabular_adapter = Instance(AnalysisAdapter)
append_replace_enabled = Bool(True)
def set_analyses(self, ans, tc=None, page=None, reset_page=False):
self.analyses = ans
self.oanalyses = ans
self._analysis_filter_parameter_changed(True)
def configure_analysis_table(self):
self.table_configurer.edit_traits()
# handlers
def _analysis_filter_changed(self, new):
if new:
name = self.analysis_filter_parameter
self.analyses = filter(filter_func(new, name), self.oanalyses)
else:
self.analyses = self.oanalyses
def _analysis_filter_comparator_changed(self):
self._analysis_filter_changed(self.analysis_filter)
def _analysis_filter_parameter_changed(self, new):
if new:
vs = []
p = self._get_analysis_filter_parameter()
for si in self.oanalyses:
v = getattr(si, p)
if not v in vs:
vs.append(v)
self.analysis_filter_values = vs
def _get_analysis_filter_parameter(self):
p = self.analysis_filter_parameter
return p.lower()
@cached_property
def _get_analysis_filter_parameters(self):
return dict([(ci[1], ci[0]) for ci in self.tabular_adapter.columns])
# defaults
def _table_configurer_default(self):
return AnalysisTableConfigurer(id='analysis.table',
title='Configure Analysis Table')
def _analysis_filter_parameter_default(self):
return 'record_id'
def _tabular_adapter_default(self):
adapter = AnalysisAdapter()
self.table_configurer.adapter = adapter
self.table_configurer.load()
return adapter
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
03efa495884c07f252a9f6958464a3761072cf92 | 99ca760c7d8fcbb6cbd959d38b3df7994d5fc769 | /bronze4/13866.py | f2d59a26b55550ef02a8a660dca7de454608ee27 | [] | no_license | zozni/Algo | 5ad6a37120f24ab17e25e83b63f1eb7e42adcbe5 | 94c71f1d89e3ceb403a6731b6ae58a3fd26ee12c | refs/heads/master | 2023-05-31T19:24:44.081594 | 2021-06-06T14:51:13 | 2021-06-06T14:51:13 | 330,422,271 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | # 4명의 친구가 탁구를 하고 있다. 각각은 정수형으로 표현된 스킬 레벨을 가지고 있고, 숫자가 높을수록 더 좋은 플레이어다.
# 4명의 친구는 각각 두 명씩 구성된 두 팀을 구성하려고 한다. 게임이 더 흥미롭게 하기 위해서 팀의 스킬 레벨을 최대한 가깝게 유지하기를 원한다. 팀의 스킬 레벨은 팀원의 스킬 레벨의 합계이다.
# 그들은 탁구 선수로는 탁월하지만, 수학이나 컴퓨터와 같은 다른 것들에 능숙하지 않다. 팀의 스킬 레벨이 가능한 작은 차이를 갖도록 도와주자.
a, b, c, d = map(int, input().split())
print(abs(a+b+c+d-(max(a, b, c, d))*2-(min(a, b, c, d))*2))
| [
"thekey1027@naver.com"
] | thekey1027@naver.com |
7a3dc411964cfb0a23a700e0ba408b87e6bddbbf | 35fe9e62ab96038705c3bd09147f17ca1225a84e | /a10_ansible/library/a10_rba_user.py | 8115fff111db1542ac0a1b03c5c48ce7ffdb9ad4 | [] | no_license | bmeidell/a10-ansible | 6f55fb4bcc6ab683ebe1aabf5d0d1080bf848668 | 25fdde8d83946dadf1d5b9cebd28bc49b75be94d | refs/heads/master | 2020-03-19T08:40:57.863038 | 2018-03-27T18:25:40 | 2018-03-27T18:25:40 | 136,226,910 | 0 | 0 | null | 2018-06-05T19:45:36 | 2018-06-05T19:45:36 | null | UTF-8 | Python | false | false | 5,659 | py | #!/usr/bin/python
REQUIRED_NOT_SET = (False, "One of ({}) must be set.")
REQUIRED_MUTEX = (False, "Only one of ({}) can be set.")
REQUIRED_VALID = (True, "")
DOCUMENTATION = """
module: a10_user
description:
-
author: A10 Networks 2018
version_added: 1.8
options:
name:
description:
- Name of a user account
uuid:
description:
- uuid of the object
user-tag:
description:
- Customized tag
partition-list:
"""
EXAMPLES = """
"""
ANSIBLE_METADATA = """
"""
# Hacky way of having access to object properties for evaluation
AVAILABLE_PROPERTIES = {"name","partition_list","user_tag","uuid",}
# our imports go at the top so we fail fast.
from a10_ansible.axapi_http import client_factory
from a10_ansible import errors as a10_ex
def get_default_argspec():
return dict(
a10_host=dict(type='str', required=True),
a10_username=dict(type='str', required=True),
a10_password=dict(type='str', required=True, no_log=True),
state=dict(type='str', default="present", choices=["present", "absent"])
)
def get_argspec():
rv = get_default_argspec()
rv.update(dict(
name=dict(
type='str' , required=True
),
partition_list=dict(
type='str'
),
user_tag=dict(
type='str'
),
uuid=dict(
type='str'
),
))
return rv
def new_url(module):
"""Return the URL for creating a resource"""
# To create the URL, we need to take the format string and return it with no params
url_base = "/axapi/v3/rba/user/{name}"
f_dict = {}
f_dict["name"] = ""
return url_base.format(**f_dict)
def existing_url(module):
"""Return the URL for an existing resource"""
# Build the format dictionary
url_base = "/axapi/v3/rba/user/{name}"
f_dict = {}
f_dict["name"] = module.params["name"]
return url_base.format(**f_dict)
def build_envelope(title, data):
return {
title: data
}
def build_json(title, module):
rv = {}
for x in AVAILABLE_PROPERTIES:
v = module.params.get(x)
if v:
rx = x.replace("_", "-")
rv[rx] = module.params[x]
return build_envelope(title, rv)
def validate(params):
# Ensure that params contains all the keys.
requires_one_of = sorted([])
present_keys = sorted([x for x in requires_one_of if params.get(x)])
errors = []
marg = []
if not len(requires_one_of):
return REQUIRED_VALID
if len(present_keys) == 0:
rc,msg = REQUIRED_NOT_SET
marg = requires_one_of
elif requires_one_of == present_keys:
rc,msg = REQUIRED_MUTEX
marg = present_keys
else:
rc,msg = REQUIRED_VALID
if not rc:
errors.append(msg.format(", ".join(marg)))
return rc,errors
def exists(module):
try:
module.client.get(existing_url(module))
return True
except a10_ex.NotFound:
return False
def create(module, result):
payload = build_json("user", module)
try:
post_result = module.client.post(new_url(module), payload)
result.update(**post_result)
result["changed"] = True
except a10_ex.Exists:
result["changed"] = False
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def delete(module, result):
try:
module.client.delete(existing_url(module))
result["changed"] = True
except a10_ex.NotFound:
result["changed"] = False
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def update(module, result):
payload = build_json("user", module)
try:
post_result = module.client.put(existing_url(module), payload)
result.update(**post_result)
result["changed"] = True
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def present(module, result):
if not exists(module):
return create(module, result)
else:
return update(module, result)
def absent(module, result):
return delete(module, result)
def run_command(module):
run_errors = []
result = dict(
changed=False,
original_message="",
message=""
)
state = module.params["state"]
a10_host = module.params["a10_host"]
a10_username = module.params["a10_username"]
a10_password = module.params["a10_password"]
# TODO(remove hardcoded port #)
a10_port = 443
a10_protocol = "https"
valid, validation_errors = validate(module.params)
map(run_errors.append, validation_errors)
if not valid:
result["messages"] = "Validation failure"
err_msg = "\n".join(run_errors)
module.fail_json(msg=err_msg, **result)
module.client = client_factory(a10_host, a10_port, a10_protocol, a10_username, a10_password)
if state == 'present':
result = present(module, result)
elif state == 'absent':
result = absent(module, result)
return result
def main():
module = AnsibleModule(argument_spec=get_argspec())
result = run_command(module)
module.exit_json(**result)
# standard ansible module imports
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main() | [
"mdurrant@a10networks.com"
] | mdurrant@a10networks.com |
0b8904e187a8d7815da3f0f08cb9e6d3babaa1d3 | 45d53fb565bd8c9b94cf942fd1196392b059f5ee | /config.py | 4198a8349e0b70189dd289ffd2b8768aae172b30 | [
"MIT"
] | permissive | HassanEmam/tweets_odi | e817cfa6f07912849f19714f848abe3d581e472b | f14c4c70671dade870eac4d47fe76cfa231b9ef0 | refs/heads/master | 2022-10-05T03:05:40.400796 | 2021-02-09T05:45:26 | 2021-02-09T05:45:26 | 193,499,910 | 0 | 0 | MIT | 2022-09-16T18:03:55 | 2019-06-24T12:21:13 | HTML | UTF-8 | Python | false | false | 684 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = 'do-or-do-not-there-is-no-try'
# SECRET_KEY = os.environ.get('SECRET_KEY') or 'do-or-do-not-there-is-no-try'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
class DevelopmentConfig(Config):
"""
Development configurations
"""
DEBUG = True
SQLALCHEMY_ECHO = True
class ProductionConfig(Config):
"""
Production configurations
"""
DEBUG = False
app_config = {
'development': DevelopmentConfig,
'production': ProductionConfig
} | [
"hassan.emam@hotmail.com"
] | hassan.emam@hotmail.com |
6ccacb3c2a960d0183e2042aa8c3538b6c288ec9 | ef31f25f4de920eb3ec29236b55a675624fe1d12 | /selfhacked/iterator/stream/__init__.py | 81f49f28df9134fefdb0f72a2f9eaeb422cd460e | [] | no_license | MichaelKim0407/selfhacked-util | a7cd48d988108982a69ec1dea09fcfc45dbd029a | af30d48aa674deff89a52385efab4156007792e9 | refs/heads/master | 2023-03-15T16:23:12.931103 | 2019-03-28T19:17:27 | 2019-03-28T19:17:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | from typing import Iterator, Iterable, T_co
class Stream(Iterable[T_co]):
def _open(self):
pass
def _close(self):
pass
def _iter(self) -> Iterator[T_co]:
raise NotImplementedError # pragma: no cover
def __iter__(self):
self._open()
try:
yield from self._iter()
finally:
self._close()
def __or__(self, other) -> 'Stream':
"""
Override the `|` operator.
:param other: An iterator function, see `functional` package.
"""
return IterStream(other(self))
def __gt__(self, other):
"""
Override the `>` operator.
Call `other(self)`.
"""
return other(self)
def __call__(self) -> None:
"""
Go through the stream with a for loop without returning anything.
"""
for item in self:
pass
class IterStream(Stream[T_co]):
def __init__(self, iterable: Iterable):
self.__iter = iterable
def _iter(self):
yield from self.__iter
| [
"mkim0407@gmail.com"
] | mkim0407@gmail.com |
adbb9453e9d68cd875c93cc5c4462c5c69e7e133 | c26ee38f974ff36646a6225ea20c63d027d97622 | /easy/198.House_Robber.py | 732ccc4f8acb880d53ad6416b876c68d208b5f66 | [] | no_license | khlee12/python-leetcode | 78416d1512831eb762b3700461b8a2b492d7d5b5 | c0801d9d40d454213e29a42286d6e5fdbc3f6c3c | refs/heads/master | 2021-07-19T13:25:29.094937 | 2020-05-11T11:02:07 | 2020-05-11T11:02:07 | 158,390,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | # 198. House Robber
# https://leetcode.com/problems/house-robber/
# 动态规划
# https://www.youtube.com/watch?v=Jakbj4vaIbE
class Solution:
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# 不相邻数字的最大和
# max(opt(n-2)+new_num, opt(n-1))
# 动态规划解法
if nums is None or not nums:
return 0
if len(nums) == 1:
return nums[0]
opt_arr = [0]*len(nums)
opt_arr[0] = nums[0]
opt_arr[1] = max(nums[0], nums[1])
for i in range(2, len(nums)):
A = opt_arr[i-2]+nums[i]
B = opt_arr[i-1]
opt_arr[i] = max(A, B)
return opt_arr[-1]
# 递归解法
#return self.opt(nums, len(nums)-1)
# def opt(self, nums, i):
# if i==0:
# return nums[0]
# if i==1:
# return max(nums[0], nums[1])
# else:
# return max(self.opt(nums, i-2)+nums[i], self.opt(nums, i-1))
| [
"noreply@github.com"
] | khlee12.noreply@github.com |
642a28e1cea3a62ff37406233681d963c4d9cf56 | ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31 | /repos/flask-jsonrpc-master/tests/contrib/__init__.py | 2e548de766db42a28c693185bdb09fa6051ab05a | [] | no_license | babiato/flaskapp1 | 84de2d0b26a54f5820d3bbe97926782ad41e005c | 530beb9e3b8516e0e93960b99521c23a523ef546 | refs/heads/master | 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020-2020, Cenobit Technologies, Inc. http://cenobit.es/
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the Cenobit Technologies nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| [
"jinxufang@tencent.com"
] | jinxufang@tencent.com |
fe12dabf32a97f0df5179017597ce2eb1db112cf | 1894481eecddb75574415c02c112699124cdfd0a | /library/books/management/commands/import_authors.py | 19b4058e2b5bd95897beb6296e4ba7b6e1bc2646 | [
"MIT"
] | permissive | diogo-alves/library-manager | 87fbe18c34b9924c651bf04a5777bbea04050c30 | 79252709b40155cc510e5443787c28a6fbe484e7 | refs/heads/main | 2023-08-23T11:40:23.066001 | 2021-11-01T00:36:24 | 2021-11-01T00:36:24 | 415,591,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,210 | py | import csv
import time
from itertools import islice
from pathlib import Path
from django.core.management.base import BaseCommand, CommandError
from ...models import Author
DEFAULT_BATCH_SIZE = 100_000
class Command(BaseCommand):
help = 'Imports authors from a csv file. Expects names of authors.'
missing_args_message = (
'No csv file specified. Please provide the path to the csv file.'
)
def add_arguments(self, parser):
parser.add_argument('file', type=Path, metavar='FILE', help='Path to csv file.')
parser.add_argument(
'-b',
'--batch-size',
type=int,
default=DEFAULT_BATCH_SIZE,
help=(
'Controls how many authors can be created per query. '
f'Default value is {DEFAULT_BATCH_SIZE}.'
),
)
def handle(self, *args, **options):
start_time = time.time()
file = options.get('file')
authors = self.read_csv(file)
batch_size = options.get('batch_size')
self.save_authors(authors, batch_size)
elapsed_time = time.time() - start_time
self.stdout.write(
self.style.SUCCESS(
f'Authors were successfully imported! Finished in {elapsed_time:.0f}s.'
)
)
def read_csv(self, file):
if not file.is_file() or file.suffix.lower() != '.csv':
raise CommandError(f'The file "{file}" is not a valid csv file!')
with file.open() as csv_file:
csv_reader = csv.DictReader(csv_file)
if 'name' not in csv_reader.fieldnames:
raise CommandError(
'Invalid file format! No "name" column found in csv header.'
)
yield from self.generate_authors(csv_reader)
def generate_authors(self, reader):
for row in reader:
name = row.get('name')
yield Author(name=name)
def save_authors(self, authors, batch_size):
while True:
batch = list(islice(authors, batch_size))
if not batch:
break
Author.objects.bulk_create(batch, batch_size, ignore_conflicts=True)
| [
"diogo.alves.ti@gmail.com"
] | diogo.alves.ti@gmail.com |
913c64deecd12a2898506d4aa32ea620a7db6f91 | bc441bb06b8948288f110af63feda4e798f30225 | /patch_manager_sdk/model/tool/execution_snapshot_pb2.pyi | 4e97a20ea030f2936934180149aab915b69fa31e | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,759 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from patch_manager_sdk.model.tool.callback_pb2 import (
Callback as patch_manager_sdk___model___tool___callback_pb2___Callback,
)
from patch_manager_sdk.model.tool.extra_info_pb2 import (
ExtraInfo as patch_manager_sdk___model___tool___extra_info_pb2___ExtraInfo,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class ExecutionSnapshot(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Targets(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
targetId = ... # type: typing___Text
def __init__(self,
*,
targetId : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> ExecutionSnapshot.Targets: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> ExecutionSnapshot.Targets: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"targetId",b"targetId"]) -> None: ...
class Actions(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Param(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
cmd = ... # type: typing___Text
scriptType = ... # type: typing___Text
parser = ... # type: typing___Text
param = ... # type: typing___Text
execUser = ... # type: typing___Text
timeout = ... # type: builtin___int
def __init__(self,
*,
cmd : typing___Optional[typing___Text] = None,
scriptType : typing___Optional[typing___Text] = None,
parser : typing___Optional[typing___Text] = None,
param : typing___Optional[typing___Text] = None,
execUser : typing___Optional[typing___Text] = None,
timeout : typing___Optional[builtin___int] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> ExecutionSnapshot.Actions.Param: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> ExecutionSnapshot.Actions.Param: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"cmd",b"cmd",u"execUser",b"execUser",u"param",b"param",u"parser",b"parser",u"scriptType",b"scriptType",u"timeout",b"timeout"]) -> None: ...
name = ... # type: typing___Text
type = ... # type: typing___Text
action = ... # type: typing___Text
@property
def param(self) -> ExecutionSnapshot.Actions.Param: ...
def __init__(self,
*,
name : typing___Optional[typing___Text] = None,
type : typing___Optional[typing___Text] = None,
action : typing___Optional[typing___Text] = None,
param : typing___Optional[ExecutionSnapshot.Actions.Param] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> ExecutionSnapshot.Actions: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> ExecutionSnapshot.Actions: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"param",b"param"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"action",b"action",u"name",b"name",u"param",b"param",u"type",b"type"]) -> None: ...
name = ... # type: typing___Text
type = ... # type: typing___Text
operation = ... # type: typing___Text
packageId = ... # type: typing___Text
versionId = ... # type: typing___Text
needNotify = ... # type: typing___Text
@property
def targets(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ExecutionSnapshot.Targets]: ...
@property
def actions(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ExecutionSnapshot.Actions]: ...
@property
def callback(self) -> patch_manager_sdk___model___tool___callback_pb2___Callback: ...
@property
def extraInfo(self) -> patch_manager_sdk___model___tool___extra_info_pb2___ExtraInfo: ...
def __init__(self,
*,
name : typing___Optional[typing___Text] = None,
type : typing___Optional[typing___Text] = None,
operation : typing___Optional[typing___Text] = None,
packageId : typing___Optional[typing___Text] = None,
versionId : typing___Optional[typing___Text] = None,
targets : typing___Optional[typing___Iterable[ExecutionSnapshot.Targets]] = None,
actions : typing___Optional[typing___Iterable[ExecutionSnapshot.Actions]] = None,
callback : typing___Optional[patch_manager_sdk___model___tool___callback_pb2___Callback] = None,
extraInfo : typing___Optional[patch_manager_sdk___model___tool___extra_info_pb2___ExtraInfo] = None,
needNotify : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> ExecutionSnapshot: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> ExecutionSnapshot: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"callback",b"callback",u"extraInfo",b"extraInfo"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"actions",b"actions",u"callback",b"callback",u"extraInfo",b"extraInfo",u"name",b"name",u"needNotify",b"needNotify",u"operation",b"operation",u"packageId",b"packageId",u"targets",b"targets",u"type",b"type",u"versionId",b"versionId"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
e56ed17a7f3c541b2980e0b1932fd4ad8f4f6e24 | 805551c596dfeb7aff0efd8a5fd5f6e4dcb20ff1 | /pycoinnet/peer/tests/test_BitcoinPeer.py | 9b56f4a0bb93959f37638118c77f2a4750393a16 | [
"MIT"
] | permissive | pycoin/pycoinnet | 8d3dd5a65dd62f0fd47c097f0d5be798cf344a38 | d5f3e2c77cfd00f7ac0eb264db460fa34b57f7e4 | refs/heads/master | 2021-01-17T09:24:13.440637 | 2014-10-17T22:22:53 | 2014-10-17T22:22:53 | 25,469,574 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,700 | py | import asyncio
from pycoinnet.peer.BitcoinPeerProtocol import BitcoinPeerProtocol
from pycoinnet.peer.tests.helper import PeerTransport, MAGIC_HEADER, VERSION_MSG_BIN, VERSION_MSG, VERSION_MSG, VERSION_MSG_2, VERACK_MSG_BIN
def test_BitcoinPeerProtocol_send():
DATA = []
def write_f(data):
DATA.append(data)
peer = BitcoinPeerProtocol(MAGIC_HEADER)
pt = PeerTransport(write_f)
peer.connection_made(pt)
assert DATA == []
peer.send_msg("version", **VERSION_MSG)
assert DATA[-1] == VERSION_MSG_BIN
peer.send_msg("verack")
assert len(DATA) == 2
assert DATA[-1] == VERACK_MSG_BIN
peer.send_msg("mempool")
assert len(DATA) == 3
assert DATA[-1] == b'foodmempool\x00\x00\x00\x00\x00\x00\x00\x00\x00]\xf6\xe0\xe2'
def test_BitcoinPeerProtocol_read():
DATA = []
def write_f(data):
DATA.append(data)
peer = BitcoinPeerProtocol(MAGIC_HEADER)
pt = PeerTransport(write_f)
peer.connection_made(pt)
next_message = peer.new_get_next_message_f()
@asyncio.coroutine
def async_test():
t = []
name, data = yield from next_message()
t.append((name, data))
name, data = yield from next_message()
t.append((name, data))
return t
peer.data_received(VERSION_MSG_BIN)
peer.data_received(VERACK_MSG_BIN)
t = asyncio.get_event_loop().run_until_complete(async_test())
assert len(t) == 2
assert t[0] == ('version', VERSION_MSG)
assert t[1] == ('verack', {})
def test_BitcoinPeerProtocol_multiplex():
peer = BitcoinPeerProtocol(MAGIC_HEADER)
pt = PeerTransport(None)
peer.connection_made(pt)
next_message_list = [peer.new_get_next_message_f() for i in range(50)]
COUNT = 0
@asyncio.coroutine
def async_test(next_message):
name, data = yield from next_message()
assert name == 'version'
assert data == VERSION_MSG
name, data = yield from next_message()
assert name == 'verack'
assert data == {}
nonlocal COUNT
COUNT += 1
peer.data_received(VERSION_MSG_BIN)
peer.data_received(VERACK_MSG_BIN)
asyncio.get_event_loop().run_until_complete(asyncio.wait([asyncio.Task(async_test(nm)) for nm in next_message_list]))
assert COUNT == 50
def test_BitcoinPeerProtocol():
@asyncio.coroutine
def do_test(peer, vm1, vm2):
next_message = peer.new_get_next_message_f()
peer.send_msg("version", **vm1)
message_name, data = yield from next_message()
assert message_name == 'version'
assert data == vm2
peer.send_msg("verack")
message_name, data = yield from next_message()
assert message_name == 'verack'
assert data == {}
peer.send_msg("getaddr")
message_name, data = yield from next_message()
assert message_name == 'getaddr'
assert data == {}
return True
peer1 = BitcoinPeerProtocol(MAGIC_HEADER)
peer2 = BitcoinPeerProtocol(MAGIC_HEADER)
pt1 = PeerTransport(peer2.data_received)
pt2 = PeerTransport(peer1.data_received)
# connect them
peer1.connection_made(pt1)
peer2.connection_made(pt2)
f1 = asyncio.Task(do_test(peer1, VERSION_MSG, VERSION_MSG_2))
f2 = asyncio.Task(do_test(peer2, VERSION_MSG_2, VERSION_MSG))
done, pending = asyncio.get_event_loop().run_until_complete(asyncio.wait([f1, f2]))
for f in done:
assert f.result() == True
def test_queue_gc():
# create a peer
# add 50 listeners
# receive 100 messages
# first 10 listeners will stop listening after two messages
# check GC
peer = BitcoinPeerProtocol(MAGIC_HEADER)
pt = PeerTransport(None)
peer.connection_made(pt)
next_message_list = [peer.new_get_next_message_f() for i in range(50)]
assert len(peer.message_queues) == 50
next_message_list = None
assert len(peer.message_queues) == 0
@asyncio.coroutine
def async_listen(next_message, delay=0):
for i in range(101):
name, data = yield from next_message()
yield from asyncio.sleep(delay)
for i in range(3):
peer.add_task(async_listen(peer.new_get_next_message_f(), delay=60))
tasks = [asyncio.Task(async_listen(peer.new_get_next_message_f(), delay=1)) for i in range(50)]
peer.data_received(VERSION_MSG_BIN)
for i in range(100):
peer.data_received(VERACK_MSG_BIN)
# give everyone a chance to run (but no one finishes)
asyncio.get_event_loop().run_until_complete(asyncio.wait(tasks, timeout=0.1))
assert len(peer.message_queues) == 53
# now let all 50 finish. They should be collected.
asyncio.get_event_loop().run_until_complete(asyncio.wait(tasks))
## you would think this number would be 3, but it's not.
## oh well. This is close enough.
assert len(peer.message_queues) <= 4
def test_eof():
peer = BitcoinPeerProtocol(MAGIC_HEADER)
pt = PeerTransport(None)
peer.connection_made(pt)
COUNT = 0
@asyncio.coroutine
def async_listen(next_message):
count = 0
try:
while True:
name, data = yield from next_message()
count += 1
except EOFError:
pass
assert count == 2
nonlocal COUNT
COUNT += 1
tasks = [asyncio.Task(async_listen(peer.new_get_next_message_f())) for i in range(50)]
peer.data_received(VERSION_MSG_BIN)
peer.data_received(VERACK_MSG_BIN)
# end of stream
peer.connection_lost(None)
# give everyone a chance to run (but no one finishes)
asyncio.get_event_loop().run_until_complete(asyncio.wait(tasks))
assert COUNT == 50
| [
"him@richardkiss.com"
] | him@richardkiss.com |
a8f0d9ea9b49f4eb1e351a0da175a96c3a6bdf11 | c1ee8f22ece4fc39cb94fe19832fcba8e45cf5bc | /Python 3 PS Code/Side_Project/Union Images Program_with GUI Programming, Python 3/6_checkbox.py | 49a9c0d1a628d2bb7d6d15a496703e910332fbb9 | [] | no_license | JeongHanJun/BOJ | ae6b1c64c5b3226deef2708ae447aa1225333a92 | a865624fb0a9291b68f99af8535f708554fa0b41 | refs/heads/master | 2023-03-31T02:22:58.974437 | 2021-04-02T02:43:57 | 2021-04-02T02:43:57 | 258,809,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | from tkinter import *
root = Tk()
root.title("HJ GUI")
root.geometry("640x640")
checkvar = IntVar() #checkvar 에 int형으로 값을 저장한다
checkbox = Checkbutton(root, text="오늘 하루 보지 않기", variable=checkvar)
#checkbox.select() #화면에 checkbox가 체크되있는 상태로
#checkbox.deselect() # 위와 반대 , 기본값 디폴트값은 deselect 임
checkbox.pack() #화면에 checkbox 출력
checkvar2 = IntVar()
checkbox2 = Checkbutton(root, text = "1주일동안 보지 않기",variable=checkvar2)
checkbox2.pack()
def btncmd():
print(checkvar.get()) #0 은 체크 해제, 1은 체크
print(checkvar2.get())
btn = Button(root, text = "Click", command = btncmd)
btn.pack()
root.mainloop() | [
"noreply@github.com"
] | JeongHanJun.noreply@github.com |
bfe62c0e2fc135358592f635a3108871982b4dec | 1a1e6eee0b4b8eb507896d06ba70505a57053a11 | /0x00-python_variable_annotations/4-main.py | 3d1e303ce9a4d474c66abb477ded7c94b86625e5 | [] | no_license | Zaccheaus90/holbertonschool-web_back_end-1 | 4a23458e8a7632438b3f0864a73ef640753a76ed | fa0b08c37ece2510d450a8ad01d43ce48d18357b | refs/heads/main | 2023-03-15T22:31:08.566276 | 2020-12-26T00:09:43 | 2020-12-26T00:09:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | #!/usr/bin/env python3
a = __import__('4-define_variables').a
pi = __import__('4-define_variables').pi
i_understand_annotations = __import__(
'4-define_variables').i_understand_annotations
school = __import__('4-define_variables').school
print("a is a {} with a value of {}".format(type(a), a))
print("pi is a {} with a value of {}".format(type(pi), pi))
print("i_understand_annotations is a {} with a value of {}".format(
type(i_understand_annotations), i_understand_annotations))
print("school is a {} with a value of {}".format(type(school), school))
| [
"santitoya-2001@outlook.com"
] | santitoya-2001@outlook.com |
d6f8df8012d6fdb18f21d6016dc0ebe2e8441e2a | e41651d8f9b5d260b800136672c70cb85c3b80ff | /Notification_System/temboo/Library/Facebook/Reading/Friends.py | 222c896e5b77f4014ed32c3e3b0aaf62772f98df | [] | no_license | shriswissfed/GPS-tracking-system | 43e667fe3d00aa8e65e86d50a4f776fcb06e8c5c | 1c5e90a483386bd2e5c5f48f7c5b306cd5f17965 | refs/heads/master | 2020-05-23T03:06:46.484473 | 2018-10-03T08:50:00 | 2018-10-03T08:50:00 | 55,578,217 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,502 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# Friends
# Retrieves a list of names and profile IDs for Facebook friends associated with a specified user.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Friends(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Friends Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Friends, self).__init__(temboo_session, '/Library/Facebook/Reading/Friends')
def new_input_set(self):
return FriendsInputSet()
def _make_result_set(self, result, path):
return FriendsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FriendsChoreographyExecution(session, exec_id, path)
class FriendsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Friends
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final OAuth step.)
"""
super(FriendsInputSet, self)._set_input('AccessToken', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) A comma separated list of fields to return (i.e. id,name).)
"""
super(FriendsInputSet, self)._set_input('Fields', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) Used to page through results. Limits the number of records returned in the response.)
"""
super(FriendsInputSet, self)._set_input('Limit', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((optional, integer) Used to page through results. Returns results starting from the specified number.)
"""
super(FriendsInputSet, self)._set_input('Offset', value)
def set_ProfileID(self, value):
"""
Set the value of the ProfileID input for this Choreo. ((optional, string) The id of the profile to retrieve friends for. Defaults to "me" indicating the authenticated user.)
"""
super(FriendsInputSet, self)._set_input('ProfileID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.)
"""
super(FriendsInputSet, self)._set_input('ResponseFormat', value)
def set_Since(self, value):
"""
Set the value of the Since input for this Choreo. ((optional, date) Used for time-based pagination. Values can be a unix timestamp or any date accepted by strtotime.)
"""
super(FriendsInputSet, self)._set_input('Since', value)
def set_Until(self, value):
"""
Set the value of the Until input for this Choreo. ((optional, date) Used for time-based pagination. Values can be a unix timestamp or any date accepted by strtotime.)
"""
super(FriendsInputSet, self)._set_input('Until', value)
class FriendsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Friends Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_HasNext(self):
"""
Retrieve the value for the "HasNext" output from this Choreo execution. ((boolean) A boolean flag indicating that a next page exists.)
"""
return self._output.get('HasNext', None)
def get_HasPrevious(self):
"""
Retrieve the value for the "HasPrevious" output from this Choreo execution. ((boolean) A boolean flag indicating that a previous page exists.)
"""
return self._output.get('HasPrevious', None)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Facebook. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
class FriendsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FriendsResultSet(response, path)
| [
"shriswissfed@gmail.com"
] | shriswissfed@gmail.com |
b94a195ce2efe9376e5ee1fd5549e7ea86e7e059 | 493637ee75ab8d5e74d822f3d73407e7c17d5d97 | /framework/config.py | a9e5bc4492a808c353bae9a84b2acdaf69e5ba39 | [] | no_license | buptatx/fitch-sample | 2637bf2616e2acb06625568b72ce8f2ef6047dd3 | 7a899cb08971ba73726ff86f67519f8a36085a48 | refs/heads/master | 2020-04-28T22:33:01.581841 | 2019-03-06T06:13:01 | 2019-03-06T06:13:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | import os
# 目标设备
# 当然,这里只是一个例子
# 在实际应用中你可以用更加灵活的设计用于处理更复杂的情况
TARGET_DEVICE_ID = '123456F'
PROJECT_PATH = os.path.dirname(os.path.dirname(__file__))
CASE_DIR = os.path.join(PROJECT_PATH, 'cases')
# 默认在同目录下的pics文件夹,但建议是与代码分开管理,不用git(用git管理图片会让仓库变得非常臃肿)
PIC_DIR = os.path.join(PROJECT_PATH, 'pics')
# 用例运行期间的截图保存位置
RUNTIME_PIC_DIR = os.path.join(PROJECT_PATH, 'runtime_pic')
# 如果你的用例遇到不知名的问题,可以在此处把这几个log开关打开
# from loguru import logger
# logger.enable('pyminitouch')
# logger.enable('fastcap')
| [
"178894043@qq.com"
] | 178894043@qq.com |
2331374d23d78ac3d771b062ca74756194e6335d | 8839bd1f2e35726b6c8066985690fa2fa86b09a6 | /1.pyBoard v1.1(STM32)/3.拓展实验/4.以太网模块/3.MQTT通信/2.订阅者(subscribe)/main.py | 6d5b20c3db507bbe9742cf9939cd366236f0748f | [
"MIT"
] | permissive | elektrik-elektronik-muhendisligi/MicroPython-Examples-1 | a9532b06aba470f7f26f841929f4fb145549f70b | f7b08e95ff73e3417af21918c9c6bcf2f83281c6 | refs/heads/master | 2021-05-25T22:58:36.207098 | 2020-04-01T09:50:53 | 2020-04-01T09:50:53 | 253,956,073 | 1 | 0 | null | 2020-04-08T01:39:46 | 2020-04-08T01:39:45 | null | UTF-8 | Python | false | false | 1,496 | py | '''
实验名称:W5500以太网模块MQTT通信
版本:v1.0
日期:2019.11
作者:01Studio
说明:通过Socket编程实现pyBoard+W5500以太网模MQTT通信 订阅者(subscribe)。
'''
import network,pyb,time
from machine import I2C,Pin
from ssd1306 import SSD1306_I2C
from simple import MQTTClient
#初始化OLED
i2c = I2C(sda=Pin('Y10'), scl=Pin('Y9'))
oled = SSD1306_I2C(128, 64, i2c, addr=0x3c)
#初始化以太网模块
nic = network.WIZNET5K(pyb.SPI(2), pyb.Pin.board.Y5, pyb.Pin.board.Y4)
nic.active(True)
nic.ifconfig('dhcp')
#设置MQTT回调函数,有信息时候执行
def MQTT_callback(topic, msg):
print('topic: {}'.format(topic))
print('msg: {}'.format(msg))
#判断网络是否连接成功
if nic.isconnected():
print(nic.ifconfig()) #打印IP信息
#OLED数据显示
oled.fill(0) #清屏背景黑色
oled.text('IP/Subnet/GW:',0,0)
oled.text(nic.ifconfig()[0], 0, 20)
oled.text(nic.ifconfig()[1],0,38)
oled.text(nic.ifconfig()[2],0,56)
oled.show()
SERVER = 'mqtt.p2hp.com'
PORT = 1883
CLIENT_ID = '01Studio-pyBoard' # 客户端ID
TOPIC = '/public/01Studio/1' # TOPIC名称
client = MQTTClient(CLIENT_ID, SERVER, PORT)
client.set_callback(MQTT_callback) #配置回调函数
client.connect()
client.subscribe(TOPIC) #订阅主题
while (True):
client.check_msg() #检测是否收到信息,收到则执行回调函数打印。
time.sleep_ms(300) #接收间隔
| [
"237827161@qq.com"
] | 237827161@qq.com |
0a06c9d10907805d9afa0daa753e4cf88a0cc183 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/cosmos/azure-mgmt-cosmosdb/generated_samples/cosmos_db_gremlin_graph_migrate_to_autoscale.py | fb7e3c1b535bd8689d573bded60de3e7e3e56d8b | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,689 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.cosmosdb import CosmosDBManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-cosmosdb
# USAGE
python cosmos_db_gremlin_graph_migrate_to_autoscale.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CosmosDBManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.gremlin_resources.begin_migrate_gremlin_graph_to_autoscale(
resource_group_name="rg1",
account_name="ddb1",
database_name="databaseName",
graph_name="graphName",
).result()
print(response)
# x-ms-original-file: specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2023-03-15-preview/examples/CosmosDBGremlinGraphMigrateToAutoscale.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
f87b8a8de758020635f5c1519d78d435c6d91247 | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python/generated/swaggeraemosgi/model/com_adobe_cq_wcm_jobs_async_impl_async_page_move_config_provider_service_info.py | 2044f69011b92470db658b5634e45c30a23d7454 | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 7,672 | py | """
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0-pre.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from swaggeraemosgi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from swaggeraemosgi.model.com_adobe_cq_wcm_jobs_async_impl_async_page_move_config_provider_service_properties import ComAdobeCqWcmJobsAsyncImplAsyncPageMoveConfigProviderServiceProperties
globals()['ComAdobeCqWcmJobsAsyncImplAsyncPageMoveConfigProviderServiceProperties'] = ComAdobeCqWcmJobsAsyncImplAsyncPageMoveConfigProviderServiceProperties
class ComAdobeCqWcmJobsAsyncImplAsyncPageMoveConfigProviderServiceInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'pid': (str,), # noqa: E501
'title': (str,), # noqa: E501
'description': (str,), # noqa: E501
'properties': (ComAdobeCqWcmJobsAsyncImplAsyncPageMoveConfigProviderServiceProperties,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'pid': 'pid', # noqa: E501
'title': 'title', # noqa: E501
'description': 'description', # noqa: E501
'properties': 'properties', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ComAdobeCqWcmJobsAsyncImplAsyncPageMoveConfigProviderServiceInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
pid (str): [optional] # noqa: E501
title (str): [optional] # noqa: E501
description (str): [optional] # noqa: E501
properties (ComAdobeCqWcmJobsAsyncImplAsyncPageMoveConfigProviderServiceProperties): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"cliffano@gmail.com"
] | cliffano@gmail.com |
684c06392f33a274b09daec76ddf411a3bf1ad9a | 249c7081a766318360da21c66e7a30f917c90738 | /aula 16.py | d4e74de6a5f29a80e1dc638de60ce6aa0f4a2f3a | [] | no_license | Danlei27/PycharmProjects | b4d93a966b45c84f206498faa60c36f8b356c5a9 | abedd9911d7a28f64366f4ea69de86ed16d39534 | refs/heads/master | 2020-05-30T10:32:55.793721 | 2019-06-01T00:33:27 | 2019-06-01T00:33:27 | 189,675,167 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | lanche = ('Hambúrguer')
lanche = ('Suco')
print(lanche)
lanche2 = ('Hambúrguer','Suco','Pizza','Pudim')
print(lanche2[3])
print(lanche2[-4])
print(lanche2[1:3])#O ultimo nùmero é ignorado!
print(lanche2[2:])
print(lanche2[:2])#O ultimo nùmero é ignorado!
print(lanche2[-2:])
print(lanche2[-3:])
print(lanche2)
#tuplas são imutáveis
#lanche2[1] = 'Refrigerante'
#print(lanche[1])
print(len(lanche2))
for comida in lanche2:
print(f'Eu vou comer {comida}')
#for com variável composta
print('Comi de mais!')
for cont in range(0,len(lanche2)):
print(lanche2[cont])
print('comi muito!')
#for com range
for pos, comida in enumerate(lanche2):
print(f'Vou comer {comida} na posisão {pos}')
for cont in range(0, len(lanche2)):
print(f'Vou comer {lanche2[cont]}na posição {cont}')
print(sorted(lanche2))
print(lanche2)
a = (2, 5, 4)
b = (5, 8, 1, 2)
c = a + b
print(c)
print(len(c))
print(c.count(5))
print(c.count(9))
print(c.index(8))
print(c.index(2, 1))#deslocamento
pessoa = ('Danlei', 24, 'M', 75.0)
print(pessoa)
del(pessoa)
#tuplas são imutáveis mas podem serem apagadas
print(pessoa)
| [
"danleisantos@hotmail.com"
] | danleisantos@hotmail.com |
8fd6ed362d5bf73497bcd9062ee6aa1fc22fa0a7 | c130a094e04eb448201ca2ab8ed4fe56cd1d80bc | /samples/openapi3/client/petstore/python-experimental/petstore_api/model/enum_arrays.py | a3fb56284bdb0fca9cfff6477099bbc9a59c49ff | [
"Apache-2.0"
] | permissive | janweinschenker/openapi-generator | 83fb57f9a5a94e548e9353cbf289f4b4172a724e | 2d927a738b1758c2213464e10985ee5124a091c6 | refs/heads/master | 2022-02-01T17:22:05.604745 | 2022-01-19T10:43:39 | 2022-01-19T10:43:39 | 221,860,152 | 1 | 0 | Apache-2.0 | 2019-11-15T06:36:25 | 2019-11-15T06:36:24 | null | UTF-8 | Python | false | false | 3,064 | py | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
from frozendict import frozendict # noqa: F401
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from petstore_api.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
InstantiationMetadata,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
NumberBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
class EnumArrays(
DictSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
class just_symbol(
_SchemaEnumMaker(
enum_value_to_name={
">=": "GREATER_THAN_EQUALS",
"$": "DOLLAR",
}
),
StrSchema
):
@classmethod
@property
def GREATER_THAN_EQUALS(cls):
return cls._enum_by_value[">="](">=")
@classmethod
@property
def DOLLAR(cls):
return cls._enum_by_value["$"]("$")
class array_enum(
ListSchema
):
class _items(
_SchemaEnumMaker(
enum_value_to_name={
"fish": "FISH",
"crab": "CRAB",
}
),
StrSchema
):
@classmethod
@property
def FISH(cls):
return cls._enum_by_value["fish"]("fish")
@classmethod
@property
def CRAB(cls):
return cls._enum_by_value["crab"]("crab")
def __new__(
cls,
*args: typing.Union[dict, frozendict, ],
just_symbol: typing.Union[just_symbol, Unset] = unset,
array_enum: typing.Union[array_enum, Unset] = unset,
_instantiation_metadata: typing.Optional[InstantiationMetadata] = None,
**kwargs: typing.Type[Schema],
) -> 'EnumArrays':
return super().__new__(
cls,
*args,
just_symbol=just_symbol,
array_enum=array_enum,
_instantiation_metadata=_instantiation_metadata,
**kwargs,
)
| [
"noreply@github.com"
] | janweinschenker.noreply@github.com |
71f68d07002122dc75183d4ab1fe221324ffc9c5 | 452b8b849e080cda5a26f4018cafa5a674ff7c20 | /froide/frontpage/admin.py | 2c41dbd38fd38ef4552adec08ca22579de6830ad | [
"MIT"
] | permissive | okffi/tietopyynto | 1262dcaf748c41b49be4a774be552fc75fc9b336 | 66b7e7dbf3c3395d79af3da85b3b58f01fad62dc | refs/heads/tietopyynto | 2021-01-17T21:07:04.829856 | 2016-10-30T19:26:53 | 2016-10-30T19:26:53 | 14,255,294 | 3 | 2 | MIT | 2021-01-05T11:51:18 | 2013-11-09T10:19:16 | Python | UTF-8 | Python | false | false | 408 | py | from django.contrib import admin
from froide.frontpage.models import FeaturedRequest
class FeaturedRequestAdmin(admin.ModelAdmin):
list_display = ('request', 'title', 'user', 'timestamp',)
search_fields = ['title', 'request__title']
ordering = ('-timestamp',)
date_hierarchy = 'timestamp'
raw_id_fields = ('request', 'user',)
admin.site.register(FeaturedRequest, FeaturedRequestAdmin)
| [
"mail@stefanwehrmeyer.com"
] | mail@stefanwehrmeyer.com |
3483a074af1e08f1db72083cf409003993eed0fa | 3681c00144bbcad8b91b452c96627a9c179a98d7 | /my/books/kobo.py | 7b70a795621d1fd255fd12462f228c1ee4c9d42d | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | jhermann/HPI | 159485bf2971665e1128294330e4fad300ec053a | a60c30868b285f233caf65a59d3496082fb9d5c2 | refs/heads/master | 2021-04-11T12:11:50.363338 | 2020-03-17T23:09:06 | 2020-03-17T23:09:06 | 249,017,683 | 0 | 0 | MIT | 2020-03-21T16:39:46 | 2020-03-21T16:39:45 | null | UTF-8 | Python | false | false | 1,114 | py | """
Kobo e-ink reader: annotations and reading stats
"""
from typing import Callable, Union, List
from mycfg import paths
from mycfg.repos.kobuddy.src.kobuddy import *
# hmm, explicit imports make pylint a bit happier..
from mycfg.repos.kobuddy.src.kobuddy import Highlight, set_databases, get_highlights
set_databases(paths.kobuddy.export_dir)
# TODO maybe type over T?
_Predicate = Callable[[str], bool]
Predicatish = Union[str, _Predicate]
def from_predicatish(p: Predicatish) -> _Predicate:
if isinstance(p, str):
def ff(s):
return s == p
return ff
else:
return p
def by_annotation(predicatish: Predicatish, **kwargs) -> List[Highlight]:
pred = from_predicatish(predicatish)
res: List[Highlight] = []
for h in get_highlights(**kwargs):
if pred(h.annotation):
res.append(h)
return res
def get_todos():
def with_todo(ann):
if ann is None:
ann = ''
return 'todo' in ann.lower().split()
return by_annotation(with_todo)
def test_todos():
todos = get_todos()
assert len(todos) > 3
| [
"karlicoss@gmail.com"
] | karlicoss@gmail.com |
76a621f44efb5853a3c365a63265c9260e0b0689 | 3fbcff56de2da0827d14983d3b6d58d4270846f9 | /kobe/data/download.py | 1f44fef3eebc9e7336c981b6739cf015b29dd544 | [
"MIT"
] | permissive | THUDM/KOBE | 3f15aa910debe4e3a221ef8dae88c65897b83a23 | b424ab9bd868b2c13f5d78b96c8431163dea7400 | refs/heads/master | 2023-01-13T01:56:40.592019 | 2023-01-04T19:15:14 | 2023-01-04T19:15:14 | 178,392,931 | 228 | 65 | MIT | 2019-07-11T08:26:07 | 2019-03-29T11:26:17 | Python | UTF-8 | Python | false | false | 4,763 | py | import hashlib
import os
import shutil
import time
from urllib.request import urlopen
import gdown
import requests
import tqdm
def download(url, path, fname, redownload=False):
"""
Downloads file using `requests`. If ``redownload`` is set to false, then
will not download tar file again if it is present (default ``True``).
"""
outfile = os.path.join(path, fname)
download = not os.path.isfile(outfile) or redownload
print("[ downloading: " + url + " to " + outfile + " ]")
retry = 5
exp_backoff = [2 ** r for r in reversed(range(retry))]
pbar = tqdm.tqdm(unit="B", unit_scale=True, desc="Downloading {}".format(fname))
while download and retry >= 0:
resume_file = outfile + ".part"
resume = os.path.isfile(resume_file)
if resume:
resume_pos = os.path.getsize(resume_file)
mode = "ab"
else:
resume_pos = 0
mode = "wb"
response = None
with requests.Session() as session:
try:
header = (
{"Range": "bytes=%d-" % resume_pos, "Accept-Encoding": "identity"}
if resume
else {}
)
response = session.get(url, stream=True, timeout=5, headers=header)
# negative reply could be 'none' or just missing
if resume and response.headers.get("Accept-Ranges", "none") == "none":
resume_pos = 0
mode = "wb"
CHUNK_SIZE = 32768
total_size = int(response.headers.get("Content-Length", -1))
# server returns remaining size if resuming, so adjust total
total_size += resume_pos
pbar.total = total_size
done = resume_pos
with open(resume_file, mode) as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if total_size > 0:
done += len(chunk)
if total_size < done:
# don't freak out if content-length was too small
total_size = done
pbar.total = total_size
pbar.update(len(chunk))
break
except requests.exceptions.ConnectionError:
retry -= 1
pbar.clear()
if retry >= 0:
print("Connection error, retrying. (%d retries left)" % retry)
time.sleep(exp_backoff[retry])
else:
print("Retried too many times, stopped retrying.")
finally:
if response:
response.close()
if retry < 0:
raise RuntimeWarning("Connection broken too many times. Stopped retrying.")
if download and retry > 0:
pbar.update(done - pbar.n)
if done < total_size:
raise RuntimeWarning(
"Received less data than specified in "
+ "Content-Length header for "
+ url
+ "."
+ " There may be a download problem."
)
move(resume_file, outfile)
pbar.close()
def move(path1, path2):
"""Renames the given file."""
shutil.move(path1, path2)
def untar(path, fname, deleteTar=True):
"""
Unpacks the given archive file to the same directory, then (by default)
deletes the archive file.
"""
print("unpacking " + fname)
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
if deleteTar:
os.remove(fullpath)
def test_google():
try:
urlopen("https://www.google.com/", timeout=1)
return True
except Exception:
return False
FNAME = "saved.zip"
MD5 = "9924fb8ac6d32fc797499f226e0e9908"
CN_URL = "https://cloud.tsinghua.edu.cn/f/06f64ae627ec404db300/?dl=1"
URL = "https://drive.google.com/uc?id=1NOhv8pvC8IGwt8oRoIZ-A0EojJBZcolr"
if __name__ == "__main__":
if test_google():
gdown.cached_download(URL, FNAME, md5=MD5, postprocess=gdown.extractall)
os.remove(FNAME)
else:
# If Google is blocked, download from Tsinghua Cloud
download(CN_URL, ".", FNAME)
md5 = hashlib.md5(open(FNAME, "rb").read()).hexdigest()
print(f"Downloaded MD5 = {md5}; Required MD5 = {MD5}")
if md5 != MD5:
raise Exception(
"MD5 doesn't match; please remove saved.zip and rerun the script."
)
untar(".", FNAME)
| [
"chenqibin422@gmail.com"
] | chenqibin422@gmail.com |
20b24b9c0b67641688c4fd4d598fbf2a2ea7d663 | 7ee884fd83ab4b907e9b4d042964fc504e3009d2 | /neo/Core/Size.py | 22e3a427b7bca15a40a4f11057c7dcc8902537c7 | [
"MIT"
] | permissive | SharedMocha/neo-python | 4d3e589d6d053d261a07ccf42c29d320dd91d288 | 81759fb77d41768b38dbabcc4828321d98ac7a8c | refs/heads/master | 2020-03-23T19:02:52.421551 | 2018-07-31T14:22:21 | 2018-07-31T14:22:21 | 141,950,599 | 0 | 0 | null | 2018-07-23T02:22:57 | 2018-07-23T02:22:57 | null | UTF-8 | Python | false | false | 2,455 | py | from enum import IntEnum, Enum
from collections import Iterable
from neocore.IO.Mixins import SerializableMixin
from neocore.UIntBase import UIntBase
"""
This helper class is intended to help resolve the correct calculation of network serializable objects.
The result of `ctypes.sizeof` is not equivalent to C# or what we expect. See https://github.com/CityOfZion/neo-python/pull/418#issuecomment-389803377
for more discussion on the topic.
"""
class Size(IntEnum):
"""
Explicit bytes of memory consumed
"""
uint8 = 1
uint16 = 2
uint32 = 4
uint64 = 8
uint160 = 20
uint256 = 32
def GetVarSize(value):
# public static int GetVarSize(this string value)
if isinstance(value, str):
value_size = len(value.encode('utf-8'))
return GetVarSize(value_size) + value_size
# internal static int GetVarSize(int value)
elif isinstance(value, int):
if (value < 0xFD):
return Size.uint8
elif (value <= 0xFFFF):
return Size.uint8 + Size.uint16
else:
return Size.uint8 + Size.uint32
# internal static int GetVarSize<T>(this T[] value)
elif isinstance(value, Iterable):
value_length = len(value)
value_size = 0
if value_length > 0:
if isinstance(value[0], SerializableMixin):
if isinstance(value[0], UIntBase):
# because the Size() method in UIntBase is implemented as a property
value_size = sum(map(lambda t: t.Size, value))
else:
value_size = sum(map(lambda t: t.Size(), value))
elif isinstance(value[0], Enum):
# Note: currently all Enum's in neo core (C#) are of type Byte. Only porting that part of the code
value_size = value_length * Size.uint8
elif isinstance(value, (bytes, bytearray)):
# experimental replacement for: value_size = value.Length * Marshal.SizeOf<T>();
# because I don't think we have a reliable 'SizeOf' in python
value_size = value_length * Size.uint8
else:
raise Exception("Can not accurately determine size of objects that do not inherit from 'SerializableMixin' or 'Enum'")
else:
raise Exception("[NOT SUPPORTED] Unexpected value type {} for GetVarSize()".format(type(value)))
return GetVarSize(value_length) + value_size
| [
"tasaunders@gmail.com"
] | tasaunders@gmail.com |
3c051238fc51178bc2add4d9b82eca10212bb861 | 83e8a9746545652147efcbf56e3997c308fa4a79 | /zvt/domain/trader_info.py | d9a2b183f94b9465ab4cc3cc194019070475080c | [
"MIT"
] | permissive | baihuashu225/zvt | 5038c35239f8c458d48c28850f93755c68086f86 | b84d30447d48fbe09981c1fb388b3e118103980f | refs/heads/master | 2023-02-24T11:11:23.456569 | 2021-01-18T11:22:44 | 2021-01-18T11:22:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,118 | py | # -*- coding: utf-8 -*-
from sqlalchemy import Column, String, DateTime, Boolean, Float, Integer, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from zvt.contract import Mixin
from zvt.contract.register import register_schema
from zvt.utils import to_string
TraderBase = declarative_base()
# trader信息
class TraderInfo(TraderBase, Mixin):
__tablename__ = 'trader_info'
# 机器人名字
trader_name = Column(String(length=128))
entity_ids = Column(String(length=1024))
entity_type = Column(String(length=128))
exchanges = Column(String(length=128))
codes = Column(String(length=128))
start_timestamp = Column(DateTime)
end_timestamp = Column(DateTime)
provider = Column(String(length=32))
level = Column(String(length=32))
real_time = Column(Boolean)
kdata_use_begin_time = Column(Boolean)
kdata_adjust_type = Column(String(length=32))
# account stats of every day
@to_string
class AccountStats(TraderBase, Mixin):
__tablename__ = 'account_stats'
input_money = Column(Float)
# 机器人名字
trader_name = Column(String(length=128))
# 可用现金
cash = Column(Float)
# 具体仓位
positions = relationship("Position", back_populates="account_stats")
# 市值
value = Column(Float)
# 市值+cash
all_value = Column(Float)
# 盈亏
profit = Column(Float)
# 盈亏比例
profit_rate = Column(Float)
# 收盘计算
closing = Column(Boolean)
# the position for specific entity of every day
class Position(TraderBase, Mixin):
__tablename__ = 'position'
# 机器人名字
trader_name = Column(String(length=128))
# 账户id
account_stats_id = Column(Integer, ForeignKey('account_stats.id'))
account_stats = relationship("AccountStats", back_populates="positions")
# 做多数量
long_amount = Column(Float)
# 可平多数量
available_long = Column(Float)
# 平均做多价格
average_long_price = Column(Float)
# 做空数量
short_amount = Column(Float)
# 可平空数量
available_short = Column(Float)
# 平均做空价格
average_short_price = Column(Float)
# 盈亏
profit = Column(Float)
# 盈亏比例
profit_rate = Column(Float)
# 市值 或者 占用的保证金(方便起见,总是100%)
value = Column(Float)
# 交易类型(0代表T+0,1代表T+1)
trading_t = Column(Integer)
# 委托单
class Order(TraderBase, Mixin):
__tablename__ = 'order'
# 机器人名字
trader_name = Column(String(length=128))
# 订单价格
order_price = Column(Float)
# 订单数量
order_amount = Column(Float)
# 订单类型
order_type = Column(String(length=64))
# 订单状态
status = Column(String(length=64))
# 产生订单的selector/factor level
level = Column(String(length=32))
register_schema(providers=['zvt'], db_name='trader_info', schema_base=TraderBase)
# the __all__ is generated
__all__ = ['TraderInfo', 'AccountStats', 'Position', 'Order']
| [
"5533061@qq.com"
] | 5533061@qq.com |
64cb7a2efbec8e6f32550eb26f4fc11286196254 | 963b0c14043ca94e4c3be1562c496fd868dd813a | /compounds/migrations/0039_auto_20171026_0347.py | 3b32e7bbefa2776292e8618d1d273b90a0afebfc | [] | no_license | jianping-grp/yatcm | 7a0111a4c98de8f649cfe754b178add6b6d224d8 | 7d8e4cd18ccc27f2ae6b86018553d1a4608b8e1b | refs/heads/master | 2020-05-25T07:24:09.833547 | 2017-12-23T07:55:29 | 2017-12-23T07:55:29 | 115,178,789 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-26 03:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('compounds', '0038_target'),
]
operations = [
migrations.RemoveField(
model_name='target',
name='compounds',
),
migrations.RemoveField(
model_name='target',
name='related_diseases',
),
migrations.RemoveField(
model_name='target',
name='related_drugs',
),
migrations.DeleteModel(
name='Target',
),
]
| [
"libaiqing11@163.com"
] | libaiqing11@163.com |
2c9eca2460033dacd0d540b82dbc376a17de3b5b | c582efcb41f04c77f4fa7d204c109c22e27e93e2 | /vaccine 2021/day3/concurrency/three.py | 5d8e12f8a9721773d0e8a99e45659d8a4a49d2c5 | [] | no_license | shobhit-nigam/infineon | dddeaa4b212eb67ce0f130ff1d8d58dbd0dacbdf | b2830abc7485428b1aeba7abd13bfb18528ce61b | refs/heads/master | 2021-06-22T13:17:52.886845 | 2021-02-26T12:04:29 | 2021-02-26T12:04:29 | 195,349,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | import time
import threading
def taska():
for i in range(9, 0, -1):
print("task A left with", i, "seconds")
time.sleep(1)
def taskb():
for i in range(6, 0, -1):
print("task B left with", i, "seconds")
time.sleep(1)
def taskc():
for i in range(9, 0, -1):
print("task C left with", i, "seconds")
time.sleep(1)
ta = threading.Thread(target=taska)
tb = threading.Thread(target=taskb)
tc = threading.Thread(target=taskc)
#tc.start()
tb.start()
ta.start()
for i in range(3, 0, -1):
print("main left with", i, "seconds")
time.sleep(1)
#ta.join()
tb.join()
print("last line of two.py")
| [
"noreply@github.com"
] | shobhit-nigam.noreply@github.com |
e4ebd7e8e58696737152e2cccdb4d84e3a71ea6e | 4c9580b2e09e2b000e27a1c9021b12cf2747f56a | /chapter05/chapter05_example02/chapter05_example02/apps/goods/migrations/0001_initial.py | f7e277748f6319dd920746b4d2276211085146be | [] | no_license | jzplyy/xiaoyue_mall | 69072c0657a6878a4cf799b8c8218cc7d88c8d12 | 4f9353d6857d1bd7dc54151ca8b34dcb4671b8dc | refs/heads/master | 2023-06-26T02:48:03.103635 | 2021-07-22T15:51:07 | 2021-07-22T15:51:07 | 388,514,311 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | # Generated by Django 2.2 on 2020-11-04 07:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Goods',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('name', models.CharField(max_length=50, verbose_name='名字')),
('price', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='价格')),
('stock', models.IntegerField(default=0, verbose_name='库存')),
('sales', models.IntegerField(default=0, verbose_name='销量')),
],
options={
'verbose_name': '商品',
'verbose_name_plural': '商品',
'db_table': 'tb_goods',
},
),
]
| [
"jzplyy@126.com"
] | jzplyy@126.com |
6b647a37651c219bf089e678cc19e35ca3ca36ab | 63a1671145dc6dc6e1a9d10ec21c520b83036fea | /Image_segmentation/DeepLabV3Plus/utils/label.py | a16777728916e85dc1af45d5ecad346b56dd4eab | [
"MIT"
] | permissive | chenpaopao/- | 4eca1405a3aab86fe649817048852b620a962c1a | 320f7d9a0b9e49528561024217ba07645eb68805 | refs/heads/master | 2023-02-04T17:44:48.639136 | 2022-06-06T05:04:57 | 2022-06-06T05:04:57 | 323,789,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,248 | py | import numpy as np
import utils.color as color_module
import utils.draw as draw_module
def label_colormap(n_label=256, value=None):
"""Label colormap.
Parameters
----------
n_labels: int
Number of labels (default: 256).
value: float or int
Value scale or value of label color in HSV space.
Returns
-------
cmap: numpy.ndarray, (N, 3), numpy.uint8
Label id to colormap.
"""
def bitget(byteval, idx):
return (byteval & (1 << idx)) != 0
cmap = np.zeros((n_label, 3), dtype=np.uint8)
for i in range(0, n_label):
id = i
r, g, b = 0, 0, 0
for j in range(0, 8):
r = np.bitwise_or(r, (bitget(id, 0) << 7 - j))
g = np.bitwise_or(g, (bitget(id, 1) << 7 - j))
b = np.bitwise_or(b, (bitget(id, 2) << 7 - j))
id = id >> 3
cmap[i, 0] = r
cmap[i, 1] = g
cmap[i, 2] = b
if value is not None:
hsv = color_module.rgb2hsv(cmap.reshape(1, -1, 3))
if isinstance(value, float):
hsv[:, 1:, 2] = hsv[:, 1:, 2].astype(float) * value
else:
assert isinstance(value, int)
hsv[:, 1:, 2] = value
cmap = color_module.hsv2rgb(hsv).reshape(-1, 3)
return cmap
def label2rgb(
label,
img=None,
alpha=0.5,
label_names=None,
font_size=30,
thresh_suppress=0,
colormap=None,
loc="centroid",
font_path=None,
):
"""Convert label to rgb.
Parameters
----------
label: numpy.ndarray, (H, W), int
Label image.
img: numpy.ndarray, (H, W, 3), numpy.uint8
RGB image.
alpha: float
Alpha of RGB (default: 0.5).
label_names: list or dict of string
Label id to label name.
font_size: int
Font size (default: 30).
thresh_suppress: float
Threshold of label ratio in the label image.
colormap: numpy.ndarray, (M, 3), numpy.uint8
Label id to color.
By default, :func:`~imgviz.label_colormap` is used.
loc: string
Location of legend (default: 'centroid').
'lt' and 'rb' are supported.
font_path: str
Font path.
Returns
-------
res: numpy.ndarray, (H, W, 3), numpy.uint8
Visualized image.
"""
if colormap is None:
colormap = label_colormap()
res = colormap[label]
random_state = np.random.RandomState(seed=1234)
mask_unlabeled = label < 0
res[mask_unlabeled] = random_state.rand(*(mask_unlabeled.sum(), 3)) * 255
if img is not None:
if img.ndim == 2:
img = color_module.gray2rgb(img)
res = (1 - alpha) * img.astype(float) + alpha * res.astype(float)
res = np.clip(res.round(), 0, 255).astype(np.uint8)
if label_names is None:
return res
unique_labels = np.unique(label)
unique_labels = unique_labels[unique_labels != -1]
if isinstance(label_names, dict):
unique_labels = [l for l in unique_labels if label_names.get(l)]
else:
unique_labels = [l for l in unique_labels if label_names[l]]
if len(unique_labels) == 0:
return res
if loc == "centroid":
for label_i in unique_labels:
mask = label == label_i
if 1.0 * mask.sum() / mask.size < thresh_suppress:
continue
y, x = np.array(_center_of_mass(mask), dtype=int)
if label[y, x] != label_i:
Y, X = np.where(mask)
point_index = np.random.randint(0, len(Y))
y, x = Y[point_index], X[point_index]
text = label_names[label_i]
height, width = draw_module.text_size(
text, size=font_size, font_path=font_path
)
color = color_module.get_fg_color(res[y, x])
res = draw_module.text(
res,
yx=(y - height // 2, x - width // 2),
text=text,
color=color,
size=font_size,
font_path=font_path,
)
elif loc in ["rb", "lt"]:
text_sizes = np.array(
[
draw_module.text_size(
label_names[l], font_size, font_path=font_path
)
for l in unique_labels
]
)
text_height, text_width = text_sizes.max(axis=0)
legend_height = text_height * len(unique_labels) + 5
legend_width = text_width + 20 + (text_height - 10)
height, width = label.shape[:2]
legend = np.zeros((height, width, 3), dtype=np.uint8)
if loc == "rb":
aabb2 = np.array([height - 5, width - 5], dtype=float)
aabb1 = aabb2 - (legend_height, legend_width)
elif loc == "lt":
aabb1 = np.array([5, 5], dtype=float)
aabb2 = aabb1 + (legend_height, legend_width)
else:
raise ValueError("unexpected loc: {}".format(loc))
legend = draw_module.rectangle(
legend, aabb1, aabb2, fill=(255, 255, 255)
)
alpha = 0.5
y1, x1 = aabb1.round().astype(int)
y2, x2 = aabb2.round().astype(int)
res[y1:y2, x1:x2] = (
alpha * res[y1:y2, x1:x2] + alpha * legend[y1:y2, x1:x2]
)
for i, l in enumerate(unique_labels):
box_aabb1 = aabb1 + (i * text_height + 5, 5)
box_aabb2 = box_aabb1 + (text_height - 10, text_height - 10)
res = draw_module.rectangle(
res, aabb1=box_aabb1, aabb2=box_aabb2, fill=colormap[l]
)
res = draw_module.text(
res,
yx=aabb1 + (i * text_height, 10 + (text_height - 10)),
text=label_names[l],
size=font_size,
font_path=font_path,
)
else:
raise ValueError("unsupported loc: {}".format(loc))
return res
def _center_of_mass(mask):
assert mask.ndim == 2 and mask.dtype == bool
mask = 1.0 * mask / mask.sum()
dx = np.sum(mask, 0)
dy = np.sum(mask, 1)
cx = np.sum(dx * np.arange(mask.shape[1]))
cy = np.sum(dy * np.arange(mask.shape[0]))
return cy, cx
| [
"739677819@qq.com"
] | 739677819@qq.com |
7c3c3087dc74958a0f6315fac4107e9163cf577c | ed11f664cbc459c7a4456dd58f2b231edcb22f33 | /ctm_saas_client/models/system_setting_ldap.py | 075ca22376d0bf909f9de200076ddf447cb43c0e | [
"BSD-3-Clause"
] | permissive | jpmc216/ctm_python_client | c8b8ba60580bf869b3d1e6af9b99737e0a7ea527 | de44e5012214ec42bb99b7f9b4ebc5394cd14328 | refs/heads/main | 2023-08-26T22:06:34.022576 | 2021-10-25T13:41:31 | 2021-10-25T13:41:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,158 | py | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.30
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ctm_saas_client.configuration import Configuration
class SystemSettingLdap(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'ldap_enabled': 'bool',
'default_domain': 'str',
'domains': 'list[LdapDomainSettings]'
}
attribute_map = {
'ldap_enabled': 'ldapEnabled',
'default_domain': 'defaultDomain',
'domains': 'domains'
}
def __init__(self, ldap_enabled=None, default_domain=None, domains=None, _configuration=None): # noqa: E501
"""SystemSettingLdap - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._ldap_enabled = None
self._default_domain = None
self._domains = None
self.discriminator = None
self.ldap_enabled = ldap_enabled
if default_domain is not None:
self.default_domain = default_domain
if domains is not None:
self.domains = domains
@property
def ldap_enabled(self):
"""Gets the ldap_enabled of this SystemSettingLdap. # noqa: E501
:return: The ldap_enabled of this SystemSettingLdap. # noqa: E501
:rtype: bool
"""
return self._ldap_enabled
@ldap_enabled.setter
def ldap_enabled(self, ldap_enabled):
"""Sets the ldap_enabled of this SystemSettingLdap.
:param ldap_enabled: The ldap_enabled of this SystemSettingLdap. # noqa: E501
:type: bool
"""
if self._configuration.client_side_validation and ldap_enabled is None:
raise ValueError("Invalid value for `ldap_enabled`, must not be `None`") # noqa: E501
self._ldap_enabled = ldap_enabled
@property
def default_domain(self):
"""Gets the default_domain of this SystemSettingLdap. # noqa: E501
:return: The default_domain of this SystemSettingLdap. # noqa: E501
:rtype: str
"""
return self._default_domain
@default_domain.setter
def default_domain(self, default_domain):
"""Sets the default_domain of this SystemSettingLdap.
:param default_domain: The default_domain of this SystemSettingLdap. # noqa: E501
:type: str
"""
self._default_domain = default_domain
@property
def domains(self):
"""Gets the domains of this SystemSettingLdap. # noqa: E501
:return: The domains of this SystemSettingLdap. # noqa: E501
:rtype: list[LdapDomainSettings]
"""
return self._domains
@domains.setter
def domains(self, domains):
"""Sets the domains of this SystemSettingLdap.
:param domains: The domains of this SystemSettingLdap. # noqa: E501
:type: list[LdapDomainSettings]
"""
self._domains = domains
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SystemSettingLdap, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SystemSettingLdap):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SystemSettingLdap):
return True
return self.to_dict() != other.to_dict()
| [
"cmoraes@bmc.com"
] | cmoraes@bmc.com |
537ad653a81336f9eac99a3641fc6d2376828868 | 87a067e31fbc4ec2ea79dec0e4456452da28cfe1 | /tests/plugin/manymap/test_agent.py | ad04b7b95f255b8d94b8eae005f2915fb4c745da | [
"MIT"
] | permissive | bodik/sner4 | 94c4e486cae3b085885702981849e3ef6c48a6ed | d5d8e9cdd6dd058dd91eb119965a3f9f737e5c34 | refs/heads/master | 2023-08-19T18:06:51.383715 | 2023-08-07T16:22:40 | 2023-08-07T16:22:40 | 152,397,863 | 13 | 6 | MIT | 2023-08-15T11:45:12 | 2018-10-10T09:29:56 | Python | UTF-8 | Python | false | false | 898 | py | # This file is part of sner4 project governed by MIT license, see the LICENSE.txt file.
"""
manymap plugin agent tests
"""
import json
from uuid import uuid4
from sner.agent.core import main as agent_main
from sner.lib import file_from_zip
def test_basic(tmpworkdir): # pylint: disable=unused-argument
"""manymap module execution test"""
test_a = {
'id': str(uuid4()),
'config': {
'module': 'manymap',
'args': '-sV',
'delay': 1
},
'targets': ['invalid', 'tcp://127.0.0.1:1', 'udp://[::1]:2']
}
result = agent_main(['--assignment', json.dumps(test_a), '--debug'])
assert result == 0
assert 'Host: 127.0.0.1 (localhost)' in file_from_zip('%s.zip' % test_a['id'], 'output-1.gnmap').decode('utf-8')
assert '# Nmap done at' in file_from_zip('%s.zip' % test_a['id'], 'output-2.gnmap').decode('utf-8')
| [
"bodik@cesnet.cz"
] | bodik@cesnet.cz |
49ba6f67f8c5168096739959d0b1292a7ca65727 | c5a90053c7240d9fc95e31a4a74f279ed2f12929 | /azure-iot-device/tests/provisioning/internal/test_registration_query_status_result.py | 2924a0f0ab9b5a2b8aae78e88d119c3e06d8031c | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-iot-sdk-python-preview | a45bdfd0c2f198f6d99c2fdf9d29433c5f43bc17 | 50060e8c36d5751f8d207fa277db958ba89e9088 | refs/heads/master | 2023-03-18T01:03:13.686811 | 2019-09-07T00:04:31 | 2019-09-07T00:04:31 | 148,676,866 | 36 | 18 | MIT | 2019-09-07T00:04:32 | 2018-09-13T17:57:54 | Python | UTF-8 | Python | false | false | 1,892 | py | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import logging
import datetime
from azure.iot.device.provisioning.internal.registration_query_status_result import (
RegistrationQueryStatusResult,
)
logging.basicConfig(level=logging.INFO)
fake_request_id = "Request1234"
fake_retry_after = 6
fake_operation_id = "Operation4567"
fake_status = "Flying"
fake_device_id = "MyNimbus2000"
fake_assigned_hub = "Dumbledore'sArmy"
fake_sub_status = "FlyingOnHippogriff"
fake_created_dttm = datetime.datetime(2020, 5, 17)
fake_last_update_dttm = datetime.datetime(2020, 10, 17)
fake_etag = "HighQualityFlyingBroom"
@pytest.mark.describe("RegistrationQueryStatusResult")
class TestRegistrationQueryStatusResult(object):
@pytest.mark.it("Instantiates correctly")
def test_registration_status_query_result_instantiated_correctly(self):
intermediate_result = RegistrationQueryStatusResult(
fake_request_id, fake_retry_after, fake_operation_id, fake_status
)
assert intermediate_result.request_id == fake_request_id
assert intermediate_result.retry_after == fake_retry_after
assert intermediate_result.operation_id == fake_operation_id
assert intermediate_result.status == fake_status
@pytest.mark.it("Has request id that does not have setter")
def test_rid_is_not_settable(self):
registration_result = RegistrationQueryStatusResult(
"RequestId123", "Operation456", "emitted", None
)
with pytest.raises(AttributeError, match="can't set attribute"):
registration_result.request_id = "MyNimbus2000"
| [
"noreply@github.com"
] | Azure.noreply@github.com |
40ddd24842d9cef89390997e590efdb7f0f34329 | 95a27dd31260802da3768064c9614ce4f6ca4797 | /Scripts/GenCode_Explore_105.py | 53bc706c4f1aa80c5d04640bab2c285c9c7060d3 | [
"MIT"
] | permissive | ShepherdCode/Soars2021 | 7ee34227076ea424ad42f4727205141b69c78bb9 | ab4f304eaa09e52d260152397a6c53d7a05457da | refs/heads/main | 2023-07-09T05:28:56.993951 | 2021-08-18T14:16:57 | 2021-08-18T14:16:57 | 364,885,561 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,282 | py | #!/usr/bin/env python
# coding: utf-8
# # GenCode Explore
#
# Explore the human RNA sequences from GenCode.
#
# Assume user downloaded files from GenCode 38 [FTP](http://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_38/)
# to a subdirectory called data.
#
# In 104, we showed that we can do away with the protein-include file based on annotation.gff and just rely on the presence of UTR in the FASTA deflines. Here, stop importing the protein-include file.
# In[1]:
import time
def show_time():
t = time.time()
s = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
print(s)
show_time()
# In[2]:
import numpy as np
import pandas as pd
import gzip
import sys
import re
try:
from google.colab import drive
IN_COLAB = True
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py')
with open('RNA_describe.py', 'w') as f:
f.write(s.text) # writes to cloud local, delete the file later?
from RNA_describe import ORF_counter
from RNA_describe import assert_imported_RNA_describe
except:
print("CoLab not working. On my PC, use relative paths.")
IN_COLAB = False
DATAPATH='../data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.RNA_describe import ORF_counter
from SimTools.RNA_describe import assert_imported_RNA_describe
MODELPATH="BestModel" # saved on cloud instance and lost after logout
#MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login
if not assert_imported_RNA_describe():
print("ERROR: Cannot use RNA_describe.")
# In[3]:
PC_FILENAME='gencode.v38.pc_transcripts.fa.gz'
NC_FILENAME='gencode.v38.lncRNA_transcripts.fa.gz'
# In[4]:
class GenCodeLoader():
def __init__(self):
self.pattern5=re.compile('.*UTR5:')
self.pattern3=re.compile('.*UTR3:')
self.check_list = None
self.check_utr = False
def set_label(self,label):
self.label=label
def set_check_list(self,check_list):
self.check_list=check_list
def set_check_utr(self,check_utr):
self.check_utr=check_utr
def __save_previous(self,one_def,one_seq):
if one_def is None:
return
if self.check_utr:
if self.pattern5.match(one_def) is None:
return
if self.pattern3.match(one_def) is None:
return
VERSION = '.'
one_id = one_def[1:].split(VERSION)[0]
if self.check_list is not None:
if one_id not in self.check_list:
return
self.labels.append(self.label)
self.seqs.append(one_seq)
self.lens.append(len(one_seq))
self.ids.append(one_id)
def load_file(self,filename):
self.labels=[] # usually 1 for protein-coding or 0 for non-coding
self.seqs=[] # usually strings of ACGT
self.lens=[] # sequence length
self.ids=[] # GenCode transcript ID, always starts ENST, excludes version
DEFLINE='>' # start of line with ids in a FASTA FILE
EMPTY=''
one_def = None
one_seq = ''
with gzip.open (filename,'rt') as infile:
for line in infile:
if line[0]==DEFLINE:
self.__save_previous(one_def,one_seq)
one_def=line
one_seq = EMPTY
else:
# Continue loading sequence lines till next defline.
additional = line.rstrip()
one_seq = one_seq + additional
# Don't forget to save the last sequence after end-of-file.
self.__save_previous(one_def,one_seq)
df1=pd.DataFrame(self.ids,columns=['tid'])
df2=pd.DataFrame(self.labels,columns=['class'])
df3=pd.DataFrame(self.seqs,columns=['sequence'])
df4=pd.DataFrame(self.lens,columns=['seqlen'])
df=pd.concat((df1,df2,df3,df4),axis=1)
return df
# ## Load the GenCode data.
# Warning: GenCode has
# over 100K protein-coding RNA (mRNA)
# and almost 50K non-coding RNA (lncRNA).
# In[5]:
# Full GenCode ver 38 human is 106143 pc + 48752 nc and loads in 7 sec.
# Expect fewer transcripts if special filtering is used.
PC_FULLPATH=DATAPATH+PC_FILENAME
NC_FULLPATH=DATAPATH+NC_FILENAME
loader=GenCodeLoader()
show_time()
loader.set_label(1)
loader.set_check_list(None)
loader.set_check_utr(True)
pcdf=loader.load_file(PC_FULLPATH)
print("PC seqs loaded:",len(pcdf))
show_time()
loader.set_label(0)
loader.set_check_list(None)
loader.set_check_utr(False)
ncdf=loader.load_file(NC_FULLPATH)
print("NC seqs loaded:",len(ncdf))
show_time()
# In[6]:
print("Sorting PC...")
pcdf.sort_values('seqlen', ascending=True, inplace=True)
print("Sorting NC...")
ncdf.sort_values('seqlen', ascending=True, inplace=True)
# In[7]:
ncdf
# ## Look for short ORFs
# In[8]:
def show_short(df,too_short):
oc = ORF_counter()
count=len(df)
shorties=0
for pos in range(0,count):
sequence=df.iloc[pos]['sequence']
seqlen=df.iloc[pos]['seqlen']
oc.set_sequence(sequence)
orflen=oc.get_max_orf_len()
seqlen=df.iloc[pos]['seqlen']
if seqlen>200 and orflen<=TOO_SHORT:
seqid=df.iloc[pos]['tid']
#print("%s len=%d orf=%d"%(seqid,seqlen,orflen))
shorties += 1
if pos%10000==0:
print("Up to position %d, we have %d shorter than %d"%(pos,shorties,too_short))
print("After all %d, we have %d shorter than %d"%(count,shorties,too_short))
TOO_SHORT=60
show_short(pcdf,TOO_SHORT)
# In[9]:
show_short(ncdf,TOO_SHORT)
# ## Conclusion
# With TOO_SHORT=30
# NON-CODING
# We have 589 shorter than 30, with most of them (504) shorter than 10000
#
# CODING
# Using check_utr and check_list on pcdf, we have 0 shorter than 30.
# Using check_utr only, we have 0 shorter than 30.
#
| [
"jmill02@shepherd.edu"
] | jmill02@shepherd.edu |
2337447bc605e07db31f6a121bdc3bce8f729d6c | 9249f87109471de1fc3f3c3c1b121f51c09df683 | /lesson_8/test_4.py | 4aada1db89d91b62ae7e6e958aceb1e1b47ad3f4 | [] | no_license | anton1k/mfti-homework | 400a8213a57e44478d65437f5afef0432e8e84ea | 93683de329e6cb0001e713214aeb3069f6e213b0 | refs/heads/master | 2020-07-18T23:41:11.473608 | 2020-01-12T10:58:16 | 2020-01-12T10:58:16 | 206,335,501 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | # Три копии кривой Коха, построенные (остриями наружу) на сторонах правильного треугольника, образуют замкнутую кривую бесконечной длины, называемую снежинкой Коха. Нарисуйте ee.
import time
import turtle
turtle.shape('turtle')
turtle.penup()
turtle.goto(-200, 0)
turtle.pendown()
turtle.speed('fastest')
def draw (l ,n):
if n == 0:
turtle.forward(l)
return
l //= 3
draw(l, n-1)
turtle.left(60)
draw(l, n-1)
turtle.right(120)
draw(l, n-1)
turtle.left(60)
draw(l, n-1)
for _ in range(3):
draw(400, 3)
turtle.right(120)
time.sleep(2) | [
"40913464+anton1k@users.noreply.github.com"
] | 40913464+anton1k@users.noreply.github.com |
1867b4c6a2dcd1b2bd60fc1503f94939b4d082cf | b096066c3f7ecc0df19a34fce7c9e035994e0216 | /inspire/main_database.py | 8d415296b44961dc850494ddfb2660ebf4fb3bd7 | [] | no_license | platipy/Inspire | 80a057d77c2b304f5d5d2b22b3404e6a7a181dfc | 363e6e3d043564aa1c734776f403364c6d9d9913 | refs/heads/master | 2020-05-16T21:46:20.247072 | 2012-12-07T07:46:30 | 2012-12-07T07:46:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,478 | py | from inspire import db
import sqlalchemy
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.sql.expression import asc
import passlib.hash
from inspire.lib.history_meta import Versioned
class Teaching(db.Model):
__tablename__ = 'teaching'
student_id = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key = True)
teacher_id = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key = True)
class_name = db.Column(db.String)
time = db.Column(db.DateTime)
student = db.relationship("User",
backref='student_teaching',
primaryjoin="User.id == Teaching.student_id")
teacher = db.relationship("User",
backref='teacher_teaching',
primaryjoin="User.id == Teaching.teacher_id")
class Reset_Requests(db.Model):
__tablename__ = 'reset_requests'
student_id = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key = True)
teacher_id = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key = True)
approved = db.Column(db.Boolean)
students = db.relationship("User",
backref='student_requesting',
primaryjoin="User.id == Reset_Requests.student_id")
teachers = db.relationship("User",
backref='teacher_requesting',
primaryjoin="User.id == Reset_Requests.teacher_id")
def __repr__(self):
return '<Request from %s to %s>' % (self.student_id, self.teacher_id)
class User(db.Model):
user_types = ["guest","student","teacher","developer","admin"]
GUEST, STUDENT, TEACHER, DEVELOPER, ADMIN = xrange(5)
__tablename__ = 'user'
@property
def password(self):
raise Exception("Plaintext passwords are not stored!")
@password.setter
def password(self, value):
# TODO: Verify password strength somewhere
self.pwhash = passlib.hash.sha512_crypt.encrypt(value)
def verify_password(self, password):
return passlib.hash.sha512_crypt.verify(password, self.pwhash)
# Required information
id = db.Column(db.Integer, primary_key = True)
email = db.Column(db.String(40), unique = True, nullable = False)
pwhash = db.Column(db.String(119), unique = False, nullable = False)
name = db.Column(db.String(256), unique = False, nullable = False)
user_type = db.Column(db.Integer)
def __repr__(self):
return '<User %s (%s)>' % (self.email, self.name)
def populate():
u1 = User(email='acbart', password='pass', name='Austin Cory Bart', user_type=User.ADMIN)
db.session.add(u1)
u2 = User(email='lelouch', password='pass', name='Lelouch Lamperouge', user_type=User.STUDENT)
db.session.add(u2)
u3 = User(email='cookies', password='pass', name='Mr. Monster', user_type=User.TEACHER)
db.session.add(u3)
u4 = User(email='trex', password='pass', name='Rebecca Trexler', user_type=User.DEVELOPER)
db.session.add(u4)
u4 = User(email='dog_lover', password='pass', name='Some Dog Lover', user_type=User.STUDENT)
db.session.add(u4)
db.session.flush()
r= Reset_Requests(student_id=u2.id, teacher_id=u1.id, approved=False)
db.session.add(r)
r= Reset_Requests(student_id=u3.id, teacher_id=u1.id, approved=True)
db.session.add(r)
db.session.commit()
| [
"acbart@vt.edu"
] | acbart@vt.edu |
3a406dbc6b719171e97090341b63a20757510bb6 | b7dc7d08ff9af2f54a9736284db7bc44392b35c4 | /solution-bank/pattern/solution_5.py | 9ba76f5951fdbac4f544f44c4d996b8144711f23 | [
"MIT"
] | permissive | anishLearnsToCode/python-training-1 | e6278f9d9dc25c9fd5a27e2121c0af77f8ba4155 | ef5d6b64f888e167faecd1410563173dcc27f319 | refs/heads/main | 2023-02-24T12:10:58.919765 | 2021-01-17T18:25:25 | 2021-01-17T18:25:25 | 327,060,912 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | rows = int(input())
for i in range(rows):
# spaces
print(' ' * (rows - 1 - i), end='')
# stars
print('* ' * (i + 1))
| [
"anish_@outlook.com"
] | anish_@outlook.com |
4307c6822b8485fb25320aede82768f76c93e33c | 10f397d1fe8c68dc5af033e0b88cb99be56bc4f2 | /Tran/forms.py | d239edadd401fbf3fcb35ac7e06b1a1deba69718 | [] | no_license | wsqy/AccountRandmomAmount | db3905bd425c074935c256aab62f437fe7cb0ffc | b69bc1269a666c48fe954ac423a25d111e01176b | refs/heads/master | 2023-06-29T12:49:35.947729 | 2020-06-30T03:27:05 | 2020-06-30T03:27:05 | 271,683,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | from django import forms
from django.core.exceptions import ValidationError
from .models import Task
class TaskForm(forms.ModelForm):
class Meta:
model = Task
fields = "__all__"
def clean(self):
v1 = self.cleaned_data['amount_total_min']
v2 = self.cleaned_data['amount_total_max']
# raise ValidationError('........')
pass
| [
"1323800521@qq.com"
] | 1323800521@qq.com |
3884d7e99aa08566fd76a2d5983f442cb484b3ae | 620e586959d887f114bf186c72b4b4d14c46644d | /development_history/backup/model_accuracy77.py | 752e0172b85c770672a990540fa1dc951d2bc3d4 | [
"MIT"
] | permissive | leimao/Text_Language_Identifier | 0f919caec191bbe8110029cc630f635fcfa99ba7 | 0dc1b0950feec31caeef9ce9f7c0008af8ab8f29 | refs/heads/master | 2021-09-04T00:04:57.208236 | 2018-01-13T04:38:44 | 2018-01-13T04:38:44 | 116,631,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,001 | py | import numpy as np
import json
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.naive_bayes import GaussianNB
import time
TRAIN_DATA_RAW = "train_X_languages_homework.json.txt"
TRAIN_LABEL_RAW = "train_y_languages_homework.json.txt"
def read_data(path):
sentences = list()
with open(path, 'r') as f:
for line in f:
line = line.strip()
if line:
sentence = json.loads(line)['text']
sentences.append(sentence)
return sentences
def read_label(path):
classifications = list()
with open(path, 'r') as f:
for line in f:
line = line.strip()
if line:
classification = json.loads(line)['classification']
classifications.append(classification)
return classifications
def count_language(labelset):
label_count = dict()
for label in labelset:
if label in label_count:
label_count[label] += 1
else:
label_count[label] = 1
return label_count
def count_char_utf(dataset):
char_count = dict()
for data in dataset:
for char in data:
if char in char_count:
char_count[char] += 1
else:
char_count[char] = 1
return char_count
def extract_n_gram(dataset, labelset, language, n = 2, k = 200):
# extract the top k frequent n-grams for certain language
# count all n-grams
n_gram_count = dict()
for data, label in zip(dataset, labelset):
if label == language:
# print(label)
length = len(data)
for i in range(length-n+1):
n_gram = data[i:i+n]
#print(n_gram)
if n_gram in n_gram_count:
n_gram_count[n_gram] += 1
else:
n_gram_count[n_gram] = 1
# extract the top k frequent n-grams from all the n-grams
n_gram_count_tuple = list()
for n_gram in n_gram_count:
n_gram_count_tuple.append((n_gram, n_gram_count[n_gram]))
n_gram_count_tuple.sort(key = lambda tup: tup[1], reverse = True)
number_n_gram = len(n_gram_count_tuple)
n_gram_top_k = list()
n_gram_top_k_occurrence = list()
for i in range(min(k, number_n_gram)):
n_gram_top_k.append(n_gram_count_tuple[i][0])
n_gram_top_k_occurrence.append(n_gram_count_tuple[i][1])
return n_gram_top_k, n_gram_top_k_occurrence
def extract_n_gram_all(dataset, labelset, languages, n = 2, k = 200):
# extract the top k frequent n-grams for all languages
# make them into on n_gram list
n_gram_list = list()
n_gram_occurrence = list()
for language in languages:
n_gram_top_k, n_gram_top_k_occurrence = extract_n_gram(
dataset = dataset, labelset = labelset, language = language, n = n, k = k)
n_gram_list += n_gram_top_k
n_gram_list = list(set(n_gram_list))
return n_gram_list
def n_gram_representation(sentence, ns, n_gram_list):
# ns is the list of n
sentence_n_grams = list()
length = len(sentence)
for n in ns:
for i in range(length-n+1):
#print(sentence[i:i+n])
sentence_n_grams.append(sentence[i:i+n])
sentence_n_grams = set(sentence_n_grams)
num_n_grams_all = len(n_gram_list)
representation = np.zeros(num_n_grams_all)
for i in range(num_n_grams_all):
if n_gram_list[i] in sentence_n_grams:
representation[i] += 1
return representation
def prepare_n_gram_dataset(dataset, ns, n_gram_list):
n_gram_dataset = np.zeros((len(dataset), len(n_gram_list)))
size_dataset = len(dataset)
for i in range(size_dataset):
#print(i)
sentence = dataset[i]
n_gram_dataset[i] = n_gram_representation(sentence, ns, n_gram_list)
return n_gram_dataset
if __name__ == '__main__':
localtime = time.asctime( time.localtime(time.time()) )
print ("Local current time :" + localtime)
dataset = read_data(path = TRAIN_DATA_RAW)
labelset = read_label(path = TRAIN_LABEL_RAW)
data_train, data_test, label_train, label_test = train_test_split(dataset, labelset, test_size = 0.1, random_state = 0)
data_train, data_val, label_train, label_val = train_test_split(data_train, label_train, test_size = 0.1, random_state = 0)
label_count_train = count_language(labelset = label_train)
print(len(label_count_train))
char_count_train = count_char_utf(dataset = data_train)
print(len(char_count_train))
one_gram_list = extract_n_gram_all(dataset = data_train, labelset = label_train, languages = list(label_count_train.keys()), n = 1, k = 200)
print("Number of 1-grams: %d" %len(one_gram_list))
two_gram_list = extract_n_gram_all(dataset = data_train, labelset = label_train, languages = list(label_count_train.keys()), n = 2, k = 200)
print("Number of 2-grams: %d" %len(two_gram_list))
three_gram_list = extract_n_gram_all(dataset = data_train, labelset = label_train, languages = list(label_count_train.keys()), n = 3, k = 50)
print("Number of 3-grams: %d" %len(three_gram_list))
#four_gram_list = extract_n_gram_all(dataset = data_train, labelset = label_train, languages = list(label_count_train.keys()), n = 4, k = 100)
#print("Number of 4-grams: %d" %len(four_gram_list))
#one_gram_list = extract_n_gram_all(dataset = data_train, labelset = label_train, languages = list(label_count_train.keys()), n = 1, k = 50)
data_n_gram_train = prepare_n_gram_dataset(dataset = data_train, ns = [1,2,3], n_gram_list = one_gram_list + two_gram_list + three_gram_list)
data_n_gram_val = prepare_n_gram_dataset(dataset = data_val, ns = [1,2,3], n_gram_list = one_gram_list + two_gram_list + three_gram_list)
data_n_gram_test = prepare_n_gram_dataset(dataset = data_test, ns = [1,2,3], n_gram_list = one_gram_list + two_gram_list + three_gram_list)
#data_n_gram_train = prepare_n_gram_dataset(dataset = data_train, ns = [2], n_gram_list = two_gram_list)
#data_n_gram_val = prepare_n_gram_dataset(dataset = data_val, ns = [2], n_gram_list = two_gram_list)
#data_n_gram_test = prepare_n_gram_dataset(dataset = data_test, ns = [2], n_gram_list = two_gram_list)
# One-hot encoding labels
lb = preprocessing.LabelBinarizer()
lb.fit(label_train)
#print(lb.classes_)
label_onehot_train = lb.transform(label_train)
label_onehot_val = lb.transform(label_val)
label_onehot_test = lb.transform(label_test)
# Numeric encoding labels
le = preprocessing.LabelEncoder()
le.fit(label_train)
#print(le.classes_)
label_numeric_train = le.transform(label_train)
label_numeric_val = le.transform(label_val)
label_numeric_test = le.transform(label_test)
print("Start Regression")
localtime = time.asctime( time.localtime(time.time()) )
print ("Local current time :" + localtime)
clf = linear_model.LogisticRegression(solver = 'lbfgs', n_jobs = 14, max_iter = 30, verbose = True)
#clf = GaussianNB()
for i in range(1):
print("Training Round: %d" % i)
localtime = time.asctime( time.localtime(time.time()) )
print ("Local current time :" + localtime)
model = clf.fit(data_n_gram_train, label_numeric_train)
score_train = model.score(data_n_gram_train, label_numeric_train)
score_val = model.score(data_n_gram_val, label_numeric_val)
score_test = model.score(data_n_gram_test, label_numeric_test)
print("------------------------")
print("Train Score: %f" %score_train)
print("Validation Score: %f" %score_val)
print("Test Score: %f" %score_test)
print("------------------------")
localtime = time.asctime( time.localtime(time.time()) )
print ("Local current time :" + localtime)
| [
"dukeleimao@gmail.com"
] | dukeleimao@gmail.com |
8bd5a650c7886080d76679b5688ac6d2ee5a6b1a | 162eed4191aef4431f94a0db1ad4185b6daa6f67 | /supervised_learning/0x08-deep_cnns/5-dense_block.py | ba776159d43cb3771eb76e225dd3aa5d49112b87 | [] | no_license | giovannyortegon/holbertonschool-machine_learning | d6897bfb492f9d266302930927416637be3c172d | 8cd5e0f837a5c0facbf73647dcc9c6a3b1b1b9e0 | refs/heads/master | 2022-12-31T15:34:20.489690 | 2020-10-24T03:37:01 | 2020-10-24T03:37:01 | 279,656,017 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | #!/usr/bin/env python3
""" dense block """
import tensorflow.keras as K
def dense_block(X, nb_filters, growth_rate, layers):
""" dense_block - builds a dense block
X is the output from the previous layer.
nb_filters is an integer representing the number of filters in X.
growth_rate is the growth rate for the dense block.
layers is the number of layers in the dense block.
Returns:
The concatenated output of each layer within the Dense
Block and the number of filters within the concatenated
outputs, respectively.
"""
for layer in range(layers):
batch_normalization = K.layers.BatchNormalization()(X)
activation = K.layers.Activation('relu')(batch_normalization)
conv2d = K.layers.Conv2D(filters=4 * growth_rate,
kernel_size=1,
padding='same',
kernel_initializer='he_normal')(activation)
batch_normalization_1 = K.layers.BatchNormalization()(conv2d)
activation_1 = K.layers.Activation('relu')(batch_normalization_1)
conv2d_1 = K.layers.Conv2D(
filters=growth_rate,
kernel_size=(3, 3),
padding='same',
kernel_initializer='he_normal'
)(activation_1)
concatenate = K.layers.concatenate([X, conv2d_1])
X = concatenate
nb_filters += growth_rate
return X, nb_filters
| [
"ortegon.giovanny@hotmail.com"
] | ortegon.giovanny@hotmail.com |
dba64cc0db94bb04477aea6ecd4b9aaeb1b26022 | 6c18f36db8d6e4c6738f620e114f6fd1cebdf1c8 | /ideas/migrations/0008_auto_20190512_0052.py | 805bc67634c27511cf9c2f0e20a5594f2a090875 | [
"Apache-2.0"
] | permissive | neosergio/hackatrix-api | f9facd088292ef9d86adea77fcbc438114b75683 | 27f0180415efa97bd7345d100b314d8807486b67 | refs/heads/master | 2022-12-15T11:05:53.689886 | 2021-03-20T03:38:55 | 2021-03-20T03:38:55 | 240,255,211 | 1 | 0 | Apache-2.0 | 2022-12-08T07:48:07 | 2020-02-13T12:28:49 | Python | UTF-8 | Python | false | false | 409 | py | # Generated by Django 2.1.7 on 2019-05-12 00:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ideas', '0007_auto_20190512_0019'),
]
operations = [
migrations.AlterField(
model_name='idea',
name='max_number_of_participants',
field=models.PositiveIntegerField(default=7),
),
]
| [
"raulsergio9@gmail.com"
] | raulsergio9@gmail.com |
dd6a2af1067e9a3153b766e5699f35e5cab54027 | 0f96f24c8682ece3b501904baaa0eef411969bb1 | /0x04-python-more_data_structures/102-complex_delete.py | e1797d8bfc4c0fb5e21ddd71f6d34e1a30c8a9fe | [] | no_license | dkokonkwo/holbertonschool-higher_level_programming | 95c5103001e807bd46767f66d97568e23d893e68 | 5fa97f754afaf7326550113416e80fd942226254 | refs/heads/master | 2023-03-18T03:38:33.386497 | 2020-09-28T02:56:13 | 2020-09-28T02:56:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | #!/usr/bin/python3
def complex_delete(a_dictionary, value):
if a_dictionary:
check = False
for k, v in a_dictionary.items():
if v == value:
check = k
if check:
del a_dictionary[check]
return(complex_delete(a_dictionary, value))
return(a_dictionary)
| [
"eislek02@gmail.com"
] | eislek02@gmail.com |
6c27ce1579d0d7de31fbc1690a00cab9c720788e | d4a569dcf616b7f05e53a44803e38196b436b8b9 | /Thesis@3.9.1/Lib/site-packages/mypy/gclogger.py | 171bd98c9e148557fa3cc4338e4b784431159d11 | [
"MIT"
] | permissive | nverbois/TFE21-232 | ac3178d24939c872c02a671c0f1d8cc471af516b | 7113837b5263b5c508bfc6903cb6982b48aa7ee4 | refs/heads/main | 2023-06-05T18:50:59.207392 | 2021-06-25T19:54:40 | 2021-06-25T19:54:40 | 337,691,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,632 | py | import gc
import time
from typing import Mapping, Optional
class GcLogger:
"""Context manager to log GC stats and overall time."""
def __enter__(self) -> "GcLogger":
self.gc_start_time = None # type: Optional[float]
self.gc_time = 0.0
self.gc_calls = 0
self.gc_collected = 0
self.gc_uncollectable = 0
gc.callbacks.append(self.gc_callback)
self.start_time = time.time()
return self
def gc_callback(self, phase: str, info: Mapping[str, int]) -> None:
if phase == "start":
assert self.gc_start_time is None, "Start phase out of sequence"
self.gc_start_time = time.time()
elif phase == "stop":
assert self.gc_start_time is not None, "Stop phase out of sequence"
self.gc_calls += 1
self.gc_time += time.time() - self.gc_start_time
self.gc_start_time = None
self.gc_collected += info["collected"]
self.gc_uncollectable += info["uncollectable"]
else:
assert False, "Unrecognized gc phase (%r)" % (phase,)
def __exit__(self, *args: object) -> None:
while self.gc_callback in gc.callbacks:
gc.callbacks.remove(self.gc_callback)
def get_stats(self) -> Mapping[str, float]:
end_time = time.time()
result = {}
result["gc_time"] = self.gc_time
result["gc_calls"] = self.gc_calls
result["gc_collected"] = self.gc_collected
result["gc_uncollectable"] = self.gc_uncollectable
result["build_time"] = end_time - self.start_time
return result
| [
"38432529+nverbois@users.noreply.github.com"
] | 38432529+nverbois@users.noreply.github.com |
2ee47880dc1ecb60c7377cb376571e5fab304412 | 17f6b6d265da0e4c582bcace7ea5b8d637b9ae57 | /mysite/settings.py | 55f9d311e26c7a7cfbd57a7a4dac6ecee99b780e | [] | no_license | ankiwoong/django_tutorial | 547c1efbdd2c85cd8f4b8872221d717370c682cb | eb3e54a37201e708e1d4814bdc2616bc9f37ed30 | refs/heads/master | 2022-04-26T22:25:38.153543 | 2020-04-29T05:45:41 | 2020-04-29T05:45:41 | 259,482,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,160 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hg(33(x0_@bw6hajcaof@*1jbt=4-7lyex14=leu_$m^cxbmq6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"ankiwoong@gmail.com"
] | ankiwoong@gmail.com |
e34c0721d6b76766638a30f651b6c5d2ebab5f7f | de998ec16613345af3d2cac4f65bb13b5ef7ab4e | /heppy/Module.py | 7b9f341509da1841ce2ff0b3874330da62174af3 | [
"BSD-3-Clause"
] | permissive | spektre1/heppy | 67a23cd7cbd5e920fae9d8c0c6fcad2572973d18 | b597916ff80890ca057b17cdd156e90bbbd9a87a | refs/heads/master | 2020-12-08T06:00:00.790597 | 2019-12-24T16:15:59 | 2019-12-24T16:15:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,666 | py | class Module:
opmap = {}
def __init__(self, xmlns):
self.name = self.__class__.__name__
self.xmlns = xmlns
### RESPONSE parsing
def parse_nothing(self, response, tag):
pass
def parse_set(self, response, tag):
response.set(tag.tag.split('}')[1], tag.text)
def parse_add_list(self, response, tag):
response.put_to_list(tag.tag.split('}')[1] + 's', tag.text)
def parse_descend(self, response, tag):
for child in tag:
response.parse(child)
def parse_status(self, response, tag):
response.put_to_dict('statuses', {tag.attrib['s']: tag.text})
def parse_cd_tag(self, response, tag):
name = tag[0]
response.put_to_dict('avails', {name.text.lower(): name.attrib['avail']})
if len(tag) > 1:
response.put_to_dict('reasons', {name.text.lower(): tag[1].text})
### REQUEST rendering
## Command
def render_header(self, request, parent, command, attrs={}, text=None):
header_attrs = {'xmlns:' + self.name: self.xmlns}
if attrs:
header_attrs.update(attrs)
return request.add_subtag(parent, self.name + ':' + command, header_attrs, text)
def render_root_command(self, request, command, attrs={}):
if request.command is None:
epp = self.render_epp(request)
request.command = request.add_subtag(epp, 'command')
return request.add_subtag(request.command, command, attrs)
def render_command(self, request, command, attrs={}):
command_tag = self.render_root_command(request, command, attrs)
return self.render_header(request, command_tag, command)
def render_command_with_fields(self, request, command, fields, attrs={}):
command = self.render_command(request, command, attrs)
request.add_subtags(command, fields)
return command
## Extension
def render_root_extension(self, request):
if request.extension is None:
request.extension = request.add_subtag(request.command, 'extension')
return request.extension
def render_extension(self, request, command, attrs={}, text=None):
root_extension = self.render_root_extension(request)
return self.render_header(request, root_extension, command, attrs, text)
def render_extension_with_fields(self, request, command, fields, attrs={}):
extension = self.render_extension(request, command, attrs)
request.add_subtags(extension, fields)
return extension
## Common methods of modules
def render_epp(self, request):
if request.epp is None:
request.epp = request.add_tag('epp', {'xmlns': request.get_module('epp').xmlns})
return request.epp
def render_clTRID(self, request, data):
clTRID = data.get('clTRID', 'AA-00')
if clTRID != 'NONE' and request.command is not None:
request.add_subtag(request.command, 'clTRID', text=clTRID)
def render_check_command(self, request, data, field):
command = self.render_command(request, 'check')
for name in data.get(field + 's'):
request.add_subtag(command, self.name + ':' + field, text=name)
return command
def render_auth_info(self, request, parent, pw='', attrs={}):
auth_info = request.add_subtag(parent, self.name + ':authInfo')
request.add_subtag(auth_info, self.name + ':pw', attrs, pw)
def render_statuses(self, request, parent, status_data):
for status, description in status_data.iteritems():
request.add_subtag(parent, self.name + ':status', {'s': status}, description)
| [
"sol@hiqdev.com"
] | sol@hiqdev.com |
4bc59fa209e9644b0e6528d1769477df0988b777 | 2101b51e43b29825ea10a8e05928b1c678ee4771 | /triplestore.py | 3c957b2cbc5f5d933e2bd4a5f324b75b4cc84eb1 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | alex-ip/vocview | 34073022aa2d26e3f20f16f2a5c0c475937cadc7 | b52da9547f7b4d3d425b9f1f99fa471ba2fae483 | refs/heads/master | 2020-06-30T23:29:24.357864 | 2019-09-10T00:24:17 | 2019-09-10T00:24:17 | 200,982,974 | 0 | 0 | MIT | 2019-08-07T06:02:52 | 2019-08-07T06:02:51 | null | UTF-8 | Python | false | false | 6,097 | py | from rdflib import ConjunctiveGraph, Graph, URIRef, Literal
from rdflib.namespace import DCTERMS, XSD
from rdflib.store import NO_STORE, VALID_STORE
import yaml
from owlrl import DeductiveClosure, OWLRL_Semantics
import requests
from config import Config
import os
import pickle
from datetime import datetime, timedelta
class InvalidTriplestoreType(Exception):
pass
class Triplestore:
THIS_GRAPH = URIRef('http://www.this-graph.com/123456789/')
@staticmethod
def _create_pickle_disk():
print('creating new pickle disk')
g = Triplestore._create_db()
# Add time of creation of new Graph
g.add((Triplestore.THIS_GRAPH, DCTERMS.created, Literal(datetime.now(), datatype=XSD.dateTime)))
with open(Config.triplestore_path_pickle, 'wb') as f:
pickle.dump(g, f)
return g
@staticmethod
def get_db(triplestore_type):
if triplestore_type == 'memory':
g = Triplestore._create_db()
elif triplestore_type == 'pickle':
# Load pickled Graph object from disk. Check the time. If time has passed specified duration, then
# re-harvest data.
if os.path.isfile(Config.triplestore_path_pickle):
with open(Config.triplestore_path_pickle, 'rb') as f:
g = pickle.load(f)
for date in g.objects(Triplestore.THIS_GRAPH, DCTERMS.created):
now = datetime.now()
now -= timedelta(hours=Config.store_hours, minutes=Config.store_minutes)
if now > date.toPython():
g = Triplestore._create_pickle_disk()
else:
g = Triplestore._create_pickle_disk()
elif triplestore_type == 'sleepycat':
# TODO: Re-harvest like 'pickle'.
if hasattr(Config, 'g'):
# Config has a Graph object, reuse it and open the persistent store.
g = Config.g
rt = g.open(Config.triplestore_path_sleepy_cat, create=False)
else:
# If this is the initial load and Config does not have a Graph object in memory, then create it.
g = ConjunctiveGraph('Sleepycat')
rt = g.open(Config.triplestore_path_sleepy_cat, create=False)
if rt == NO_STORE:
g.open(Config.triplestore_path_sleepy_cat, create=True)
Triplestore._add_triples(g)
else:
assert rt == VALID_STORE, 'The underlying store is corrupt'
# elif triplestore_type == 'sparql':
# if os.path.isfile(Config.triplestore_path_pickle):
# with open(Config.triplestore_path_pickle, 'rb') as f:
# g = pickle.load(f)
# else:
# sparql = SPARQLWrapper(Config.sparql_endpoint)
# sparql.setQuery("""DESCRIBE * WHERE {
# ?s ?p ?o .
# }""")
# sparql.setReturnFormat(N3)
# results = sparql.query().convert()
# g = Graph().parse(data=results, format='n3')
# with open(Config.triplestore_path_pickle, 'wb') as f:
# pickle.dump(g, f)
else:
raise InvalidTriplestoreType(
'Expected one of: [memory, pickle, sleepycat]. Instead got {}'.format(triplestore_type))
return g
@staticmethod
def _create_db():
g = ConjunctiveGraph()
Triplestore._add_triples(g)
return g
@staticmethod
def _add_triples(g):
# Read in RDF from online sources to the Graph.
with open(os.path.join(Config.APP_DIR, Config.VOCAB_SOURCES)) as f:
vocabs = yaml.safe_load(f)
# Online resources
if vocabs.get('download'):
for vocab in vocabs['download'].values():
g.parse(vocab['source'], format=vocab['format'])
# Local resources
if vocabs.get('local'):
for vocab in vocabs['local'].values():
g.parse(os.path.join(Config.APP_DIR, 'local_vocabs', vocab['source']), format=vocab['format'])
# SPARQL resources
pass
# RVA
resource_endpoint = vocabs['rva']['resource_endpoint']
download_endpoint = vocabs['rva']['download_endpoint']
extension = vocabs['rva']['extension']
format = vocabs['rva']['format']
for id in vocabs['rva']['ids']:
r = requests.get(resource_endpoint.format(id), headers={'accept': 'application/json'})
try:
response = r.json()
versions = response['version']
download_id = None
for version in versions:
if version['status'] == 'current':
access_points = version['access-point']
for access_point in access_points:
if access_point.get('ap-sesame-download'):
download_id = access_point['id']
if download_id is None:
# Sesame endpoing not found, go for the Turtle file
for access_point in access_points:
if access_point.get('ap-file'):
g.parse(access_point['ap-file']['url'], format='turtle')
if download_id:
r = requests.get(download_endpoint.format(download_id), params={'format': extension})
g.parse(data=r.content.decode('utf-8'), format=format)
except Exception as e:
raise Exception('Something wrong with the response of RVA ID {}. Error: {}'.format(id, e))
# Expand graph using a rule-based inferencer.
if Config.reasoner:
DeductiveClosure(OWLRL_Semantics).expand(g)
| [
"edmond.chuc@outlook.com"
] | edmond.chuc@outlook.com |
d70d1abd8f984ed388c78932ba614bf8ebc781ff | fefb1e9b0b736da4e49d7754f8d1dbaf37f2fa6a | /.history/9_2_20210214200657.py | bb72624108ad4b81678c942e8559c2a5912115de | [] | no_license | wh-debug/python | 5a78a2227874ebc400d075197de0adab9f55d187 | 1467eeda670f170e6e2d7c0a0550f713f1ee9d75 | refs/heads/master | 2023-03-12T22:08:12.608882 | 2021-02-17T09:49:52 | 2021-02-17T09:49:52 | 334,032,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | '''
Author: Daylight
Date: 2021-02-14 20:03:42
LastEditTime: 2021-02-14 20:06:57
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \python\9_2.py
'''
'''使用类和实例'''
class car:
def __init__(self, make, model, year):
self.make = make
self.model = model
self.year = year
def get_descriptive_name():
long_name = f"{self.year} {self.make} {self.model}"
return long_name.title()
| [
"1813763848@qq.com"
] | 1813763848@qq.com |
513532a3efa50f08aa7e4002cf42754d84a1b2e3 | 63d290a2d314a1fb88da8ed2d7729e44ef9a2e25 | /ivi/tektronix/tektronixAWG2040.py | e96e89a2ff0d9111b061590750a44ba025b4b035 | [
"MIT"
] | permissive | tomscottGitHub/python-ivi | 09d92a80b1b73ff042e53283763dba5a06921f0d | 550abcf60289b979f938b5cdba0f760c98d399ac | refs/heads/master | 2021-01-19T19:17:11.244121 | 2013-10-20T23:46:28 | 2013-10-20T23:46:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixAWG2000 import *
class tektronixAWG2040(tektronixAWG2000):
"Tektronix AWG2040 arbitrary waveform generator driver"
def __init__(self, *args, **kwargs):
super(tektronixAWG2040, self).__init__(*args, **kwargs)
self._instrument_id = 'AWG2040'
self._output_count = 1
| [
"alex@alexforencich.com"
] | alex@alexforencich.com |
4c9237e132108a0883365844426267deb77bc4af | d36daf8818e2bb939d4e53325abf537538c63098 | /NSLib/Logger/__init__.py | ec3cfba399a1d1df35001a90994d541ccf16e5eb | [] | no_license | mbhushan/news-aggregator | 44d875c0a6c1d2d9ff08c366948b0df47a6984e8 | 539bb30cfb138504dff1f26827bbad00d590da4e | refs/heads/master | 2020-07-07T23:04:08.717262 | 2016-08-22T03:08:43 | 2016-08-22T03:08:43 | 66,235,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | '''
Created on Nov 4, 2014
@author: rsingh
'''
'''
Created on 22-Nov-2012
@author: rsingh
'''
import logging.config
import os
class Logger:
'''
Use Logger.getLogger(GazeLogger.LOGGER_WEBSITE) to get a pre-configured logger
'''
LOGGER_WEBSITE = 'website'
LOGGER_ADMIN = 'admin'
LOGGER_COMMON = 'common'
LOGGER_CONSOLE = 'console'
__LOGGER_CONFIG = 'logger.conf'
__LOG_DIRECTORY = '/var/log/nationstory/'
def __init__(self, name):
#create the needed directory if not present
if not os.path.exists(self.__LOG_DIRECTORY):
os.makedirs(self.__LOG_DIRECTORY, 0755)
#load the config file
logging.config.fileConfig(os.path.dirname(os.path.abspath(__file__)) + '/' + self.__LOGGER_CONFIG)
# create logger
self.logger = logging.getLogger(name)
@staticmethod
def getLogger(name):
gazeLogger = Logger(name)
return gazeLogger.logger
| [
"manibhushan.cs@gmail.com"
] | manibhushan.cs@gmail.com |
901a3377dd38f31145225294f44935c672ae3342 | b59fe015c45178a49f4407c22d16b6a6aab2c769 | /functions/screenrecord.py | 83714709e294f10469963a67bf6079a1fd905955 | [] | no_license | Hanlen520/AndroidPyScripts | c46585189337f19416fb6e1f37cfaca08502f499 | 4add0701aabee89babd0d3ff21d0763d58a71f15 | refs/heads/master | 2020-04-02T13:18:47.102438 | 2018-04-11T15:28:06 | 2018-04-11T15:28:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import string
import sys
import time
from utils import androiddebug
# 需要Android4.4及4.4以上版本,运行脚本后可录制设备上的操作,默认使用手机分辨率,时间3min。手动按Enter结束录制。
# 录制结果存放于当前目录下的video目录下
PATH = lambda p: os.path.abspath(p)
def record():
androiddebug.shell("screenrecord /data/local/tmp/video.mp4")
input_key = raw_input("Please press the Enter key to stop recording:\n")
if input_key == "":
androiddebug.adb("kill-server")
print "Get Video file..."
androiddebug.adb("start-server")
time.sleep(1.5)
path = PATH("%s/video" %os.getcwd())
if not os.path.isdir(path):
os.makedirs(path)
androiddebug.adb("pull /data/local/tmp/video.mp4 %s" % PATH("%s/%s.mp4" % (path, androiddebug.timestamp()))).wait()
if __name__ == "__main__":
sdk = string.atoi(androiddebug.shell("getprop ro.build.version.sdk").stdout.read())
if sdk < 19:
print ">>> sdk version is %s, less than 19!"
sys.exit(0)
else:
record()
print "Completed"
| [
"jayzhen_testing@163.com"
] | jayzhen_testing@163.com |
4358f1a7e741564f603e4786b91548678be81433 | 1e042794bd9ad14da7514a3b82cf699999b872c2 | /docs/conf.py | 70adc36d67a5d2e1d990225cf079f20080b8468f | [] | no_license | a18wheeler/cpp_template | 87f3f8a72912a976c2624c636a7e9f4c4641e6c6 | 83f5ca5939c6a7110419ffb5a25b4810975431d7 | refs/heads/master | 2021-01-09T05:43:04.180801 | 2017-02-03T00:54:12 | 2017-02-03T00:54:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,135 | py | # -*- coding: utf-8 -*-
#
# cppdemo documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 12 22:01:14 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cppdemo'
copyright = u'2014, Roie Black'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cppdemodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'cppdemo.tex', u'cppdemo Documentation',
u'Roie Black', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cppdemo', u'cppdemo Documentation',
[u'Roie Black'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cppdemo', u'cppdemo Documentation',
u'Roie Black', 'cppdemo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"rblack@austincc.edu"
] | rblack@austincc.edu |
8cc461af19f09bef2ec545d5ca46b5506d0f5f99 | 8ca19f1a31070738b376c0370c4bebf6b7efcb43 | /office365/teams/info.py | d805a9b8cf4added3d5c4928fb37900db1d7d72a | [
"MIT"
] | permissive | vgrem/Office365-REST-Python-Client | 2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3 | cbd245d1af8d69e013c469cfc2a9851f51c91417 | refs/heads/master | 2023-09-02T14:20:40.109462 | 2023-08-31T19:14:05 | 2023-08-31T19:14:05 | 51,305,798 | 1,006 | 326 | MIT | 2023-08-28T05:38:02 | 2016-02-08T15:24:51 | Python | UTF-8 | Python | false | false | 579 | py | from office365.entity import Entity
from office365.runtime.paths.resource_path import ResourcePath
class TeamInfo(Entity):
"""Represents a team with basic information."""
@property
def display_name(self):
"""
The name of the team.
:rtype: str or None
"""
return self.properties.get('displayName', None)
@property
def team(self):
from office365.teams.team import Team
return self.properties.get('team',
Team(self.context, ResourcePath("team", self.resource_path)))
| [
"vvgrem@gmail.com"
] | vvgrem@gmail.com |
9a2101c63cf26e5e27275a156d4338a4cf0d4ed9 | 1fbe4dcd2316499420ffd66903eb0fb5b675c316 | /responses/8G8FTA-CNP/2_reverse_digits.py | e7d8d4f8101edf4c3be9c15551f1edfb7f960261 | [] | no_license | danrasband/coding-experiment-reviews | 0f137b57bfbb76a47790f0ab08d876cb6a548091 | 3b0cb59012c284d2e7040d5e96e0d6a91265d655 | refs/heads/master | 2020-05-01T03:41:43.304696 | 2019-06-08T14:58:37 | 2019-06-08T14:58:37 | 177,251,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(X):
rem=0
rev=0
i=0
while(X):
rem = X%10
rev= (rev*10) + rem
X=X//10
i+=1
print (rev)
solution(-50) | [
"danrasband@gmail.com"
] | danrasband@gmail.com |
cfa5b7820cdf5964235047172c54a2e4d804928f | 2d5a588e8c9ee41b980d1f0e15914e33a40cff08 | /FullConvNet/train_full_cnn_simple_dataset.py | 600356f990e2799c8e6e1f194b65d1e49df67a5d | [] | no_license | bendavidsteel/final-year-project | 14ee835bf7a9fa2aabd033e21101908dbed0a407 | a7293152e14ea40dcaff2334b79969d5fe72a378 | refs/heads/master | 2020-04-01T19:01:04.447186 | 2019-04-27T16:24:03 | 2019-04-27T16:24:03 | 153,528,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,073 | py | '''
Description: Script to train the network and measure its performance on the test set.
Author: Alejandro Escontrela
Version: V.1.
Date: June 12th, 2018
Altered by: Ben Steel
Date: 15/02/19
'''
from CNN.full_network_batch_simple_dataset import *
from CNN.utils import *
from tqdm import tqdm
import argparse
import matplotlib.pyplot as plt
import pickle
if __name__ == '__main__':
save_path = 'adamGD_SoftmaxCross_2overpiGamma_FullNet128_SimpleDigits_NLquartiledata'
gamma = 2/(np.pi)
cost = train(gamma = gamma, save_path = save_path)
params, cost, layer_q5, layer_q25, layer_q50, layer_q75, layer_q95, final_layer = pickle.load(open(save_path, 'rb'))
[f1, f2, w3, w4, w5] = params
# Plot cost
plt.plot(cost, 'r')
plt.xlabel('# Iterations')
plt.ylabel('Cost')
plt.legend('Loss', loc='upper right')
plt.show()
# Get test data
X, y_dash = generateDataset()
# Normalize the data
X -= np.mean(X) # subtract mean
X /= np.std(X) # divide by standard deviation
test_data = np.hstack((X,y_dash))
X = test_data[:,0:-1]
X = X.reshape(len(test_data), 1, 8, 8)
y = test_data[:,-1]
corr = 0
digit_count = [0 for i in range(10)]
digit_correct = [0 for i in range(10)]
print()
print("Computing accuracy over test set:")
t = tqdm(range(len(X)), leave=True)
num_filt1 = num_filt2 = 5
conv_s = 1
params = [f1, f2, w3, w4, w5]
config = [num_filt1, num_filt2, conv_s, gamma]
for i in t:
x = X[i]
pred, prob = predict(x, y, params, config)
digit_count[int(y[i])]+=1
if pred==y[i]:
corr+=1
digit_correct[pred]+=1
t.set_description("Acc:%0.2f%%" % (float(corr/(i+1))*100))
print("Overall Accuracy: %.2f" % (float(corr/len(test_data)*100)))
x = np.arange(10)
digit_recall = [x/y for x,y in zip(digit_correct, digit_count)]
plt.xlabel('Digits')
plt.ylabel('Recall')
plt.title("Recall on Test Set")
plt.bar(x,digit_recall)
plt.show() | [
"bendavidsteel@gmail.com"
] | bendavidsteel@gmail.com |
4bc68a5a602a87a0b6b7b1526a819dee31589313 | f040315ff9479feb343a6b8d907330d6764b5ae6 | /make_graph.py | 5619b4deac5b4ea31f3e05899c016244e8ba2139 | [
"MIT"
] | permissive | loganrane/quant-predictions-crypto | 45560cab1b46c0e35fd8f4768d9802179ad2f2b7 | 11b9a575cdd6b8eb105a826b71db5cca6d3a0a18 | refs/heads/main | 2023-07-02T02:42:00.247996 | 2021-07-30T03:18:46 | 2021-07-30T03:18:46 | 389,632,937 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,979 | py | import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
from feature_extraction import extractAll
def candleStickGraph(ohlc_data):
"""Generate the candle stick graph for one month data
Args:
ohlc_data (pandas dataframe object): open-high-low-close data
Returns:
plotly figure object: candlestick data
"""
fig = go.Figure(go.Ohlc(x=ohlc_data['time'],
open=ohlc_data['open'],
high=ohlc_data['high'],
low=ohlc_data['low'],
close=ohlc_data['close'],
name='Price'))
fig.update(layout_xaxis_rangeslider_visible=False)
fig.update_layout(margin=dict(l=10, r=10, t=50, b=10),
width=900, height=300,
title='Candlestick chart for last one month')
return fig
def indicatorsGraph(prices_data):
"""Generate the indicators graph for one month
Args:
prices_data (pandas dataframe object): prices data
Returns:
plotly figure object: technical indicators"""
times = prices_data['time']
indicators = extractAll(prices_data)
indicators['time'] = times
indicators = indicators.iloc[-(24 * 30):, :]
fig = go.Figure()
fig.add_trace(go.Scatter(
x=indicators['time'], y=indicators['EMA_9'], name='EMA 9'))
fig.add_trace(go.Scatter(
x=indicators['time'], y=indicators['SMA_5'], name='SMA 5'))
fig.add_trace(go.Scatter(
x=indicators['time'], y=indicators['SMA_10'], name='SMA 10'))
fig.add_trace(go.Scatter(
x=indicators['time'], y=indicators['SMA_15'], name='SMA 15'))
fig.add_trace(go.Scatter(
x=indicators['time'], y=indicators['SMA_30'], name='SMA 30'))
fig.add_trace(go.Scatter(
x=indicators['time'], y=indicators['prices'], name='prices', opacity=0.2))
fig.update(layout_xaxis_rangeslider_visible=False)
fig.update_layout(margin=dict(l=10, r=10, t=50, b=10),
width=1010, height=300,
title='Price data with technical indicators for 30 days')
return fig
def predictionGraph(prices_data, predicted_data):
"""Plot graph for predicted data
Args:
prices_data (pandas dataframe object): prices data
predicted_data (pandas dataframe object): predicted prices
Returns:
plotly figure object: graph"""
prices_data = prices_data.iloc[-(24*30):, :]
fig = go.Figure()
fig.add_trace(go.Scatter(
x=prices_data['time'], y=prices_data['prices'], name='Real', marker_color='LightSkyBlue'))
fig.add_trace(go.Scatter(
x=predicted_data['time'], y=predicted_data['prices'], name='Predicted', marker_color='MediumPurple'))
fig.update_layout(margin=dict(l=10, r=10, t=50, b=10),
width=1010, height=300,
title='Prediction of future prices')
return fig
| [
"arpitfalcon1@gmail.com"
] | arpitfalcon1@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.