blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0405d240f84c528173e565e91cefbc577b86971f | f166c56b51fbf494df0eb46c23a3f5be0f94c555 | /libpyclingo/clingo/theory_atoms.py | 7d4f0bc4fcb30319b3470a312c620d24e2e9dbf8 | [
"MIT"
] | permissive | vishalbelsare/clingo | 86d91a16e41e580612067ef2f7b0f28f94a90c30 | e0c91d8f95cc28de1c480a871f9c97c30de83d40 | refs/heads/master | 2023-08-25T06:35:25.661440 | 2022-01-29T10:38:08 | 2022-01-29T10:38:08 | 165,138,167 | 0 | 0 | MIT | 2022-03-14T01:13:23 | 2019-01-10T22:09:12 | C++ | UTF-8 | Python | false | false | 7,053 | py | '''
Functions and classes to work with theory atoms.
Examples
--------
>>> from clingo.control import Control
>>>
>>> ctl = Control()
>>> ctl.add('base', [], """\\
... #theory example {
... t { };
... &a/0 : t, head
... }.
... {c}.
... &a { t: c }.
... """)
>>> ctl.ground([('base', [])])
>>> atm = next(ctl.theory_atoms)
>>> print(atm)
&a{t: c}
>>> elm = atm.elements[0]
>>> print(elm)
t: c
'''
from typing import List, Optional, Tuple
from enum import Enum
from functools import total_ordering
from ._internal import _c_call, _c_call2, _lib, _str, _to_str
__all__ = [ 'TheoryAtom', 'TheoryElement', 'TheoryTerm', 'TheoryTermType' ]
class TheoryTermType(Enum):
'''
Enumeration of theory term types.
'''
Function = _lib.clingo_theory_term_type_function
'''
For a function theory terms.
'''
List = _lib.clingo_theory_term_type_list
'''
For list theory terms.
'''
Number = _lib.clingo_theory_term_type_number
'''
For numeric theory terms.
'''
Set = _lib.clingo_theory_term_type_set
'''
For set theory terms.
'''
Symbol = _lib.clingo_theory_term_type_symbol
'''
For symbolic theory terms (symbol here means the term is a string).
'''
Tuple = _lib.clingo_theory_term_type_tuple
'''
For tuple theory terms.
'''
@total_ordering
class TheoryTerm:
'''
`TheoryTerm` objects represent theory terms.
Theory terms have a readable string representation, implement Python's rich
comparison operators, and can be used as dictionary keys.
'''
def __init__(self, rep, idx):
self._rep = rep
self._idx = idx
def __hash__(self):
return self._idx
def __eq__(self, other):
return self._idx == other._idx
def __lt__(self, other):
return self._idx < other._idx
def __str__(self):
return _str(_lib.clingo_theory_atoms_term_to_string_size,
_lib.clingo_theory_atoms_term_to_string,
self._rep, self._idx)
def __repr__(self):
return f'TheoryTerm({self._rep!r})'
@property
def arguments(self) -> List['TheoryTerm']:
'''
The arguments of the term (for functions, tuples, list, and sets).
'''
args, size = _c_call2('clingo_id_t*', 'size_t', _lib.clingo_theory_atoms_term_arguments, self._rep, self._idx)
return [TheoryTerm(self._rep, args[i]) for i in range(size)]
@property
def name(self) -> str:
'''
The name of the term (for symbols and functions).
'''
return _to_str(_c_call('char*', _lib.clingo_theory_atoms_term_name, self._rep, self._idx))
@property
def number(self) -> int:
'''
The numeric representation of the term (for numbers).
'''
return _c_call('int', _lib.clingo_theory_atoms_term_number, self._rep, self._idx)
@property
def type(self) -> TheoryTermType:
'''
The type of the theory term.
'''
type_ = _c_call('clingo_theory_term_type_t', _lib.clingo_theory_atoms_term_type, self._rep, self._idx)
return TheoryTermType(type_)
@total_ordering
class TheoryElement:
'''
Class to represent theory elements.
Theory elements have a readable string representation, implement Python's rich
comparison operators, and can be used as dictionary keys.
'''
def __init__(self, rep, idx):
self._rep = rep
self._idx = idx
def __hash__(self):
return self._idx
def __eq__(self, other):
return self._idx == other._idx
def __lt__(self, other):
return self._idx < other._idx
def __str__(self):
return _str(_lib.clingo_theory_atoms_element_to_string_size,
_lib.clingo_theory_atoms_element_to_string,
self._rep, self._idx)
def __repr__(self):
return f'TheoryElement({self._rep!r})'
@property
def condition(self) -> List[int]:
'''
The condition of the element in form of a list of program literals.
'''
cond, size = _c_call2('clingo_literal_t*', 'size_t', _lib.clingo_theory_atoms_element_condition,
self._rep, self._idx)
return [cond[i] for i in range(size)]
@property
def condition_id(self) -> int:
'''
Each condition has an id, which is a temporary program literal. This id
can be passed to `clingo.propagator.PropagateInit.solver_literal` to
obtain a corresponding solver literal.
'''
return _c_call('clingo_literal_t', _lib.clingo_theory_atoms_element_condition_id, self._rep, self._idx)
@property
def terms(self) -> List[TheoryTerm]:
'''
The tuple of the element.
'''
terms, size = _c_call2('clingo_id_t*', 'size_t', _lib.clingo_theory_atoms_element_tuple, self._rep, self._idx)
return [TheoryTerm(self._rep, terms[i]) for i in range(size)]
@total_ordering
class TheoryAtom:
'''
Class to represent theory atoms.
Theory atoms have a readable string representation, implement Python's rich
comparison operators, and can be used as dictionary keys.
'''
def __init__(self, rep, idx):
self._rep = rep
self._idx = idx
def __hash__(self):
return self._idx
def __eq__(self, other):
return self._idx == other._idx
def __lt__(self, other):
return self._idx < other._idx
def __str__(self):
return _str(_lib.clingo_theory_atoms_atom_to_string_size,
_lib.clingo_theory_atoms_atom_to_string,
self._rep, self._idx)
def __repr__(self):
return f'TheoryAtom({self._rep!r})'
@property
def elements(self) -> List[TheoryElement]:
'''
The elements of the atom.
'''
elems, size = _c_call2('clingo_id_t*', 'size_t', _lib.clingo_theory_atoms_atom_elements, self._rep, self._idx)
return [TheoryElement(self._rep, elems[i]) for i in range(size)]
@property
def guard(self) -> Optional[Tuple[str, TheoryTerm]]:
'''
The guard of the atom or None if the atom has no guard.
'''
if not _c_call('bool', _lib.clingo_theory_atoms_atom_has_guard, self._rep, self._idx):
return None
conn, term = _c_call2('char*', 'clingo_id_t', _lib.clingo_theory_atoms_atom_guard, self._rep, self._idx)
return (_to_str(conn), TheoryTerm(self._rep, term))
@property
def literal(self) -> int:
'''
The program literal associated with the atom.
'''
return _c_call('clingo_literal_t', _lib.clingo_theory_atoms_atom_literal, self._rep, self._idx)
@property
def term(self) -> TheoryTerm:
'''
The term of the atom.
'''
term = _c_call('clingo_id_t', _lib.clingo_theory_atoms_atom_term, self._rep, self._idx)
return TheoryTerm(self._rep, term)
| [
"noreply@github.com"
] | vishalbelsare.noreply@github.com |
a22bdda7a5301f2d8302d95c2778cd23dd7c3afe | ff9dcc6a63480378f43a5ce2121865373bc88f23 | /2017/Comptes-Rendus/TP1/SAVADOGO_DIACK/TP1 SAVADOGO- DIACK/tp1.py | dce307ef619e1d3cff6b1a25ca540924db2823ed | [] | no_license | jpcp13/L2 | d1cd5bb6cc5679f4971ca51ef3e8df797aa4ca1a | e5b2aaac3b4772222a1f3fd5c01582d4079ec6cc | refs/heads/master | 2018-12-08T01:18:31.656209 | 2018-10-25T05:01:26 | 2018-10-25T05:01:26 | 105,018,512 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,621 | py | def f(x):
return x**2 - x - 1
def df(x):
return 2*x - 1
def g(x):
return 1 + 1/x
def point_fixe(g, x0 ,epsi):
x = x0
nbiter = 0
delta = 2*epsi
while delta > epsi :
nbiter += 1
x1=g(x)
delta = abs(x1-x)
x=x1
print (x)
return x, nbiter
def newton(f, df, x0 ,epsi):
nbiter = 0
x = x0
delta = 2*epsi
while delta > epsi :
nbiter += 1
x1 = x - f(x) / df(x)# c'est cici qu'on remplace g par x - f(x)/df(x)
delta = abs(x1-x)
x = x1
print (x)
return x, nbiter
###### Programme
#1-b) Graphique de f(x)
x, dx = -1.0, 0.1
X, Y, Points = [], [], []
while x <= 2:
y = f(x)
point = (x, y)
X.append(x)
Y.append(y)
Points.append(point)
x += dx
import matplotlib.pyplot as plt
plt.plot(X, Y, 'y') ## Nous sauvegardons la figure
plt.grid()
plt.show()
#plt.savefig('figure_1.png')
# Les 25 premiers termes de la suite
import numpy as np
t = np.arange(1.5, 2.0, 0.01)
plt.plot(t, g(t), 'r-', t, t, 'b')
plt.grid('on')
plt.axis('equal')
plt.show()
print("Les 25 premiers termes de la suite")
res=1.0
stock=[]
i=0
print(res)
for i in range (25):
stock.append(res)
res=(1+1/res)
print(res)
print("\n")
#Methode du point fixe
print("1er test point fixe:")
#test1 point fixe
x0 = 1.0
epsi = 1e-15
r, nbiter = point_fixe(g,x0,epsi)
print ('r = {0} et nbiter = {1}'.format(r, nbiter))
print ('g(r)={0}'.format(g(r)))
print("\n")
print("2eme test point fixe:")
#test2 point fixe
x0 = -0.6
epsi = 1e-15
r2, nbiter = point_fixe(g, x0, epsi)
print ('r2 = {0} et nbiter = {1}'.format(r2, nbiter))
print("\n")
#Methode de Newton
#test1 methode de newton
print('test1 methode de newton:')
x0 = 1.0
epsi = 1e-12
t, nbiter = newton(f, df, x0, epsi)
print ('t = {0} et nbiter = {1}'.format(t, nbiter))
print ('g(t)={0}'.format(g(t)))
print("\n")
#test2 methode de newton
print('test2 methode de newton:')
x0 = -1.0
epsi = 1e-12
t2, nbiter = newton(f, df, x0, epsi)
print ('t2 = {0} et nbiter = {1}'.format(t2, nbiter))
print ('g(t2)={0}'.format(g(t2)))
#Methode de la dichotomie
def dichotomie(f, a, b, epsi):
nbiter = 0
while b-a > epsi:
nbiter += 1
m = (a+b)/2
if f(a)*f(m)> 0:
a=m
else:
b=m
return m, nbiter
print("\n")
#test1 methode dichotomie
print("dichotomie test 1:")
a = 1.5
b = 2.0
epsi = 1e-12
t, nbiter = dichotomie(f, a, b, epsi)
print ('t1 = {0} et nbiter = {1}'.format(t, nbiter))
print("\n")
#test2 methode dichotomie
print("dichotomie test 2:")
a = -1.0
b = 0.0
epsi = 1e-12
t, nbiter = dichotomie(f, a, b, epsi)
print ('t2 = {0} nbiter = {1}'.format(t, nbiter))
## SAVADOGO Hamed
## DIACK Aliou
| [
"jpcp13@gmail.com"
] | jpcp13@gmail.com |
1ed8d4703442d5c5bc5030797efcd92cf55f2648 | e7f4c2cfa0ebcb3c3fac2e269739bbf703ea9a06 | /tests/unit-tests/test_confluence_metadata.py | 6f1a5f27bb353c5d842dff7d9035d237fd53d196 | [
"BSD-2-Clause"
] | permissive | Embodimentgeniuslm3/confluencebuilder | e81441375e1b1d519dfdb38b7cc23c456124791e | a9390240dca1cb7e3ff011a3417fd170477703b1 | refs/heads/master | 2023-08-25T06:39:29.703644 | 2021-10-24T02:50:35 | 2021-10-24T02:50:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | # -*- coding: utf-8 -*-
"""
:copyright: Copyright 2020-2021 Sphinx Confluence Builder Contributors (AUTHORS)
:license: BSD-2-Clause (LICENSE)
"""
from tests.lib import prepare_conf
from tests.lib import prepare_sphinx
from tests.lib import prepare_sphinx_filenames
import os
import unittest
class TestConfluenceMetadata(unittest.TestCase):
@classmethod
def setUpClass(self):
self.config = prepare_conf()
test_dir = os.path.dirname(os.path.realpath(__file__))
self.dataset = os.path.join(test_dir, 'datasets', 'common')
self.filenames = prepare_sphinx_filenames(self.dataset,
[
'metadata',
],
configs=[self.config])
def test_confluence_metadata_directive_expected(self):
with prepare_sphinx(self.dataset, config=self.config) as app:
app.build(filenames=self.filenames)
builder_metadata = app.builder.metadata
self.assertTrue(builder_metadata)
self.assertTrue('metadata' in builder_metadata)
doc_labels = builder_metadata['metadata']
self.assertTrue(doc_labels)
self.assertTrue('labels' in doc_labels)
labels = doc_labels['labels']
self.assertEqual(len(labels), 2)
self.assertTrue('tag-a' in labels)
self.assertTrue('tag-c' in labels)
def test_confluence_metadata_directive_ignore(self):
opts = {
'builder': 'html',
'config': self.config,
'relax': True,
}
with prepare_sphinx(self.dataset, **opts) as app:
# build attempt should not throw an exception/error
app.build(filenames=self.filenames)
| [
"james.d.knight@live.com"
] | james.d.knight@live.com |
a4900cf97e06644f9b1079c0d8d34267b54ad5ef | e57d7785276053332c633b57f6925c90ad660580 | /sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/_configuration.py | b682db120b9a301c2c0411fb1a0542fd5f1ff1a8 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 2,960 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any
from azure.core.configuration import Configuration
from azure.core.credentials import AzureKeyCredential
from azure.core.pipeline import policies
from .._version import VERSION
class QuestionAnsweringClientConfiguration(Configuration):
"""Configuration for QuestionAnsweringClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.AzureKeyCredential
:param endpoint: Supported Cognitive Services endpoint (e.g., https://:code:`<resource-name>`.api.cognitiveservices.azure.com).
:type endpoint: str
"""
def __init__(self, credential: AzureKeyCredential, endpoint: str, **kwargs: Any) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if endpoint is None:
raise ValueError("Parameter 'endpoint' must not be None.")
super(QuestionAnsweringClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.endpoint = endpoint
self.api_version = "2021-05-01-preview"
kwargs.setdefault("sdk_moniker", "ai-language-questionanswering/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AzureKeyCredentialPolicy(
self.credential, "Ocp-Apim-Subscription-Key", **kwargs
)
| [
"noreply@github.com"
] | adriananeci.noreply@github.com |
42978d300d56543c398e6469c90adbfa4d47ff5a | f4b8c90c1349c8740c1805f7b6b0e15eb5db7f41 | /test/test_concern_note_item.py | 3ae83d0718e0ff47eb10f831512cec5acdaef178 | [] | no_license | CalPolyResDev/StarRezAPI | 012fb8351159f96a81352d6c7bfa36cd2d7df13c | b184e1863c37ff4fcf7a05509ad8ea8ba825b367 | refs/heads/master | 2021-01-25T10:29:37.966602 | 2018-03-15T01:01:35 | 2018-03-15T01:01:35 | 123,355,501 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | # coding: utf-8
"""
StarRez API
This is a way to connect with the StarRez API. We are not the developers of the StarRez API, we are just an organization that uses it and wanted a better way to connect to it. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: resdev@calpoly.edu
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import starrez_client
from starrez_client.models.concern_note_item import ConcernNoteItem # noqa: E501
from starrez_client.rest import ApiException
class TestConcernNoteItem(unittest.TestCase):
"""ConcernNoteItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testConcernNoteItem(self):
"""Test ConcernNoteItem"""
# FIXME: construct object with mandatory attributes with example values
# model = starrez_client.models.concern_note_item.ConcernNoteItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"fedorareis@gmail.com"
] | fedorareis@gmail.com |
da347efc1930fc80f8c88dd1a13954deab7eb7d5 | 24a1da610a57d9558a7261ed9ca92b20d8689634 | /June/47Day/Minimize_Maximum_Pair_Sum_in_Array_1184ms_28.1mb.py | c6b4f7b84036880c38b3df894c4b1ef2e971f389 | [] | no_license | JadeKim042386/LeetCode | b5b70a8100a19d705150f276ee8e0dc11c5038b2 | 77234a14dc97bd0e023842cd57698b37d1460511 | refs/heads/master | 2023-06-09T20:16:36.352246 | 2021-07-06T09:25:15 | 2021-07-06T09:25:15 | 349,680,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | class Solution:
def minPairSum(self, nums: List[int]) -> int:
arr = sorted(nums)
left, right = 0, len(nums) - 1
answer = 0
while left < right:
answer = max(answer, arr[left] + arr[right])
left += 1
right -= 1
return answer | [
"jk042386@gmail.com"
] | jk042386@gmail.com |
81573f4ec7c06d15ffa891286bf87762b14b1195 | 3af363719a8084b855077acd0bf6a0efc9e6762b | /backend/config/urls.py | 3ded0e7452ab67729732e3f886522a3f4633f919 | [] | no_license | salmanAndroidDev/social-network | 09dc4372985bb8b4219a239e39339a2aaccd4320 | 74ca8f1f9668c91c15e7acaa19dad3ed29e139ab | refs/heads/main | 2023-08-15T19:14:25.675409 | 2021-10-22T06:20:17 | 2021-10-22T06:20:17 | 419,827,526 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('account/', include('accounts.urls')),
path('social-hub/', include('social_hub.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| [
"salmanAndB@outlook.com"
] | salmanAndB@outlook.com |
13571023a12659a5938638520f75131cd1938acb | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/KCB_YCHF/KCB_YCHF_MM/OMS_SHOffer/YCHF_KCBYCHF_OMS_SHOffer_022.py | 51389f6b199d9455f00d1d325ba682212d48fed0 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,541 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test//xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test//service")
from ServiceConfig import *
from ARmainservice import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test//mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test//utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
from env_restart import *
class YCHF_KCBYCHF_OMS_SHOffer_022(xtp_test_case):
def setUp(self):
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YCHF_KCBYCHF_OMS_SHOffer_022')
#clear_data_and_restart_all()
#Api.trade.Logout()
#Api.trade.Login()
pass
#
def test_YCHF_KCBYCHF_OMS_SHOffer_022(self):
title = '先重启上海报盘再重启OMS(沪A最优五档即成转撤单全撤买入)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '已撤',
'errorID': 0,
'errorMSG': queryOrderErrorMsg(0),
'是否生成报单': '是',
'是否是撤废': '否',
# '是否是新股申购': '',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688000', '1', '4', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'报单测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
print(stkparm['错误原因'])
self.assertEqual(rs['报单测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['报单测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
## 还原可用资金
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YW_KCB_BAK_000')
#oms_restart()
self.assertEqual(rs['报单测试结果'], True) # 211
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
10d13a48f0545019c2703815d3a0d486f92e71e1 | d7e0b198c216fc877ec94c4279d837bfbc6bccfc | /tree/Yu/501.py | 65ba4a06cf23c9e665c8fc8565b553cb9c9cde2e | [
"MIT"
] | permissive | choiking/LeetCode | dcdb467e25ad6455156a9e2620dd98fabdf9c28b | 08a7ad6af2449e4268fce86823cbf667bbed2ae8 | refs/heads/master | 2021-07-11T15:46:01.841530 | 2017-10-12T23:34:45 | 2017-10-12T23:34:45 | 107,908,853 | 1 | 0 | null | 2017-10-22T22:48:30 | 2017-10-22T22:48:30 | null | UTF-8 | Python | false | false | 1,059 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Yu Zhou
# 501. Find Mode in Binary Search Tree
# ****************
# Descrption:
# Given a binary search tree (BST) with duplicates, find all the mode(s)
# (the most frequently occurred element) in the given BST.
# ****************
class Solution(object):
def findMode(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
def find_hash(root, hash):
# Edge:
if not root:
return
# Process
if root.val in hash:
hash[root.val] += 1
else:
hash[root.val] = 1
# Recursion
find_hash(root.left, hash)
find_hash(root.right, hash)
# Edge
if not root:
return []
hash = {}
res = []
find_hash(root, hash)
max_value = max(hash.values())
for key in hash.keys():
if hash[key] == max_value:
res.append(key)
return res
| [
"junior147147@yahoo.com"
] | junior147147@yahoo.com |
b9b586b21db02c671897a5893716ba96fcfe3712 | 5c280ec1f6f4160c53435d35fec4b46cff1c1485 | /web/forms.py | 2ef70d408d133ed056c297b3482e40568fe7d688 | [
"Apache-2.0"
] | permissive | cheekybastard/namebot | f255f34a10fc5d0ac7e504a432f669190fcbf846 | ea17b31e28461b5e54409f549cb9e1315ab8072a | refs/heads/master | 2020-07-10T08:16:57.038301 | 2014-01-04T00:14:54 | 2014-01-04T00:14:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | from wtforms import Form, BooleanField, TextField, PasswordField, validators
class NameGeneratorForm(Form):
field1 = TextField('Field 1', [validators.Length(min=4, max=25)])
field2 = TextField('Field 2', [validators.Length(min=4, max=25)])
field3 = TextField('Field 3', [validators.Length(min=4, max=25)])
field4 = TextField('Field 4', [validators.Length(min=4, max=25)])
field5 = TextField('Field 5', [validators.Length(min=4, max=25)])
| [
"dxdstudio@gmail.com"
] | dxdstudio@gmail.com |
a3a539ffb8cc47ba73a9ebaafc40f8dafc87b37c | 9e1ca5fe3f86b28d945ea3c06ea7715fc0946f1f | /add_derived_columns.py | 52aa033f3d2241e85302d7c13e92e4d8a141220c | [] | no_license | colinmorris/crawl-coroner | e2962447c697cffebcaa0bc4b8530b1ba30b16b0 | 026f9941ed5ae6df108cd1223c1e65e2b0b18f08 | refs/heads/master | 2021-01-11T18:42:00.951101 | 2017-02-11T18:07:33 | 2017-02-11T18:07:33 | 79,603,714 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | import pandas as pd
STORE_FNAME = 'morgue.h5'
# This is probably expensive. But then it only really needs to be run once.
if __name__ == '__main__':
store = pd.HDFStore(STORE_FNAME)
# other derived columns of interest:
# - player experience/skill
games = store['games']
# Original species: doing any kind of naive analysis across species tends to
# result in weird outcomes for draconians, because the population of baby
# draconians is very different from the population of coloured draconians
# (who must have made it to at least level 7). This column is the same
# as species, except that all draconian species are merged into one.
species = games['species'].cat.categories
drac_species = set(sp for sp in species if 'draconian' in sp)
def get_original_species(sp):
return 'draconian' if sp in drac_species else sp
games['orig_species'] = games['species'].map(get_original_species)
# "legit-ness". There are some 'junk' games that will tend to just pollute
# the results of a lot of typical analyses, namely:
# - games played by bots
# - games quit at level 1
# There's an argument to be made for making the latter condition more extreme
# and just excluding all quit games.
# Though actually, maybe that's a bad argument. Because I think some
# players will quit when they get into a clearly hopeless situation
# as some kind of face-saving gesture.
# Excluding level 1 quits was based on the empirical observation that
# there are a shit-ton of quits at level 1 (which probably don't
# meaningfully reflect how the game would have gone if the player
# had actually tried to win). Would be good to sniff around the
# data some time to see if there are any patterns in these lvl
# 1 quitters.
games['legit'] = (~games['bot'] &
~( (games['level'] == 1) & (games['howdied'] == 'quit'))
)
store.put('games', games, format='table', data_columns=['legit'])
| [
"colin.morris2@gmail.com"
] | colin.morris2@gmail.com |
6efa223635fb295398bf192b057721cde7c8084f | 364ebf23f0a6fba0c9e9fd5dfce6f657dc9bad32 | /estelcon_web/urls.py | 5aade7198cc85801026bde553332e1d4470e5fad | [] | no_license | hirunatan/estelcon_web | bed55f8b82ca268d314c761923b50a4bf9857876 | 13e69c90fd6fa2f4b1efab259d94788f3b6ce00e | refs/heads/master | 2021-01-20T11:30:33.367564 | 2018-11-06T20:07:49 | 2018-11-06T20:07:49 | 23,476,370 | 1 | 2 | null | 2015-10-02T20:50:35 | 2014-08-29T20:08:42 | HTML | UTF-8 | Python | false | false | 1,113 | py | from __future__ import absolute_import, print_function, unicode_literals
from cms.sitemaps import CMSSitemap
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.static import serve
admin.autodiscover()
from user_profiles import urls as user_profiles_urls
from activities import urls as activities_urls
urlpatterns = [
url(r'^sitemap\.xml$', sitemap,
{'sitemaps': {'cmspages': CMSSitemap}}),
]
urlpatterns += i18n_patterns(
url(r'^admin/', include(admin.site.urls)), # NOQA
url(r'^', include(user_profiles_urls)),
url(r'^', include(activities_urls)),
url(r'^', include('cms.urls')),
)
# This is only needed when using runserver.
if settings.DEBUG:
urlpatterns = [
url(r'^media/(?P<path>.*)$', serve,
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
] + staticfiles_urlpatterns() + urlpatterns
| [
"andres.moya@kaleidos.net"
] | andres.moya@kaleidos.net |
169ad0e7002fd35871d0ea40d975ce59b2119dab | 18a6b272d4c55b24d9c179ae1e58959674e53afe | /tf_rl/examples/NerveNet/environments/asset_generator.py | 2f0bdc000944dee0fe98136097ccebe41b778caf | [
"MIT"
] | permissive | Rowing0914/TF2_RL | 6cce916f409b3d4ef2a5a40a0611908f20d08b2c | c1b7f9b376cbecf01deb17f76f8e761035ed336a | refs/heads/master | 2022-12-10T09:58:57.456415 | 2021-05-23T02:43:21 | 2021-05-23T02:43:21 | 233,476,950 | 9 | 1 | MIT | 2022-12-08T07:02:42 | 2020-01-12T23:53:48 | Python | UTF-8 | Python | false | false | 1,939 | py | # -----------------------------------------------------------------------------
# @brief:
# generate the xml files for each different sub-tasks of one master task
# @author:
# Tingwu Wang, Aug. 30th, 2017
# -----------------------------------------------------------------------------
import argparse
import environments.init_path as init_path
import os
import num2words
import environments.centipede_generator as centipede_generator
import environments.snake_generator as snake_generator
import environments.reacher_generator as reacher_generator
TASK_DICT = {
'Centipede': [3, 5, 7] + [4, 6, 8, 10, 12, 14] + [20, 30, 40, 50],
# 'CpCentipede': [3, 5, 7] + [4, 6, 8, 10, 12, 14], # this doesn't exist anymore
'Reacher': [0, 1, 2, 3, 4, 5, 6, 7],
'Snake': [3, 4, 5, 6, 7, 8, 9] + [10, 20, 40],
}
OUTPUT_BASE_DIR = os.path.join(init_path.get_abs_base_dir(),
'environments', 'assets')
def save_xml_files(model_names, xml_number, xml_contents):
# get the xml path ready
number_str = num2words.num2words(xml_number)
xml_names = model_names + number_str[0].upper() + number_str[1:] + '.xml'
xml_file_path = os.path.join(OUTPUT_BASE_DIR, xml_names)
# save the xml file
f = open(xml_file_path, 'w')
f.write(xml_contents)
f.close()
GENERATOR_DICT = {
'Centipede': centipede_generator.generate_centipede,
'Snake': snake_generator.generate_snake,
'Reacher': reacher_generator.generate_reacher
}
if __name__ == '__main__':
# parse the parameters
parser = argparse.ArgumentParser(description='xml_asset_generator.')
parser.add_argument("--env_name", type=str, default='Centipede')
args = parser.parse_args()
# generator the environment xmls
for i_leg_num in TASK_DICT[args.env_name]:
xml_contents = GENERATOR_DICT[args.env_name](i_leg_num)
save_xml_files(args.env_name, i_leg_num, xml_contents)
| [
"kosakaboat@gmail.com"
] | kosakaboat@gmail.com |
e13a436074fd79fa40edf5d9d28341b1d5a8ff27 | 7d4964998493d333900c4be9d78ecbf8fd334127 | /devportfolio/wsgi.py | a9a8c5f341c35b93937e664b961decb5be4d2122 | [
"MIT"
] | permissive | nickmwangemi/devport | 56fd7556895469127d2b86cdf8a9b1e7d4346ed0 | 25b6c0ac97ada74cadf99ebcbae922d1aa0174fe | refs/heads/main | 2023-04-17T07:57:25.644870 | 2021-04-30T15:55:52 | 2021-04-30T15:55:52 | 362,522,911 | 0 | 1 | null | 2021-04-30T07:59:27 | 2021-04-28T15:41:19 | Python | UTF-8 | Python | false | false | 405 | py | """
WSGI config for devportfolio project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "devportfolio.settings.dev")
application = get_wsgi_application()
| [
"nickmwangemi@gmail.com"
] | nickmwangemi@gmail.com |
9a1ec3173aa9c9f1d1931ee671f3d6db686a96b7 | 5c9d96d8184f50afddc0704ab4d4eb8ebe8d132b | /REGAN_kitchen_companion.py | aa881f09b877d8df5b625fc1d4359c657e0bbb4b | [] | no_license | Tanyaregan/practices | 2248da1007577341a6b8c29d17b3369b04115cdd | d981941ce877753b87575542b3adf55879fb66f2 | refs/heads/master | 2021-05-23T05:30:40.405654 | 2018-05-08T20:23:31 | 2018-05-08T20:23:31 | 95,040,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,563 | py | def kitchen_companion():
"""Displays and edits a list of ingredients."""
import random
print 'Welcome to Kitchen Companion!'
ingredient_list = []
ingredients = open('ingredients.txt', 'r+')
for i in ingredients:
ingredient_list.append(i.rstrip())
while True:
print ''
print '*** INGREDIENTS LIST MAIN MENU ***'
print ''
print 'Would you like to:'
print ''
print '(L) List your ingredients in alphabetical order'
print '(A) Add an ingredient'
print '(R) Remove an ingredient'
print '(S) Search for a specific ingredient'
print '(X) Pick a random ingredient'
print '(Q) to quit.'
choice = raw_input('>>> ')
if choice == 'L':
print ''
print 'Your ingredients:'
for item in sorted(ingredient_list):
print item
continue
elif choice == 'A':
print ''
print 'What ingredient would you like to add?: '
addition = raw_input('>>> ')
if addition not in ingredient_list:
ingredient_list.append(addition)
print addition, 'has been added to the list.'
continue
else:
print addition, 'is already on the list.'
continue
elif choice == 'R':
print ''
print 'What ingredient would you like to delete?: '
deletion = raw_input('>>> ')
if deletion not in ingredient_list:
print deletion, 'is not on the list'
continue
else:
ingredient_list.remove(deletion)
print deletion, 'removed from list'
continue
elif choice == 'S':
print ''
print 'What item would you like to search for?: '
search = raw_input('>>> ')
for item in ingredient_list:
if item.startswith(search):
print item
continue
elif choice == 'X':
print ''
print 'Here is a random item, you silly Balloonicorn:'
rand = random.choice(ingredient_list)
print rand
elif choice == 'Q':
print ''
print 'Thanks for using Kitchen Companion!'
break
else:
print 'That entry is not in the list of choices, please try again.'
continue
ingredients.close()
kitchen_companion()
| [
"vagrant@vagrant.vm"
] | vagrant@vagrant.vm |
be174d5d2aac5c1d6e5238f958f929ec9c085eab | f5a2f74783fc6c48a75e415f08493f33a5937e3b | /cloudbio/package/conda.py | 9caa900e5ce4c1e3a7f098bbd869001cd31519f6 | [
"MIT"
] | permissive | espritfollet/cloudbiolinux | 8feccd8ca20a1e7cbb70aedbe373e2e522400be8 | c7c41c2634d044c60abae2c2264ff7e9b6885485 | refs/heads/master | 2021-05-05T20:11:52.955336 | 2017-12-23T10:58:27 | 2017-12-23T10:58:27 | 115,296,610 | 0 | 0 | MIT | 2019-03-23T05:30:44 | 2017-12-25T01:34:43 | Python | UTF-8 | Python | false | false | 7,262 | py | """Install packages via the Conda package manager: http://conda.pydata.org/
"""
import collections
import json
import os
import yaml
from cloudbio.custom import shared
from cloudbio.fabutils import quiet
from cloudbio.flavor.config import get_config_file
from cloudbio.package.shared import _yaml_to_packages
def install_packages(env, to_install=None, packages=None):
if shared._is_anaconda(env):
conda_bin = shared._conda_cmd(env)
if hasattr(env, "conda_yaml"):
Config = collections.namedtuple("Config", "base dist")
config_file = Config(base=env.conda_yaml, dist=None)
else:
config_file = get_config_file(env, "packages-conda.yaml")
if config_file.base is None and packages is None:
packages = []
else:
if to_install:
(packages, _) = _yaml_to_packages(config_file.base, to_install, config_file.dist)
with open(config_file.base) as in_handle:
channels = " ".join(["-c %s" % x for x in yaml.safe_load(in_handle).get("channels", [])])
conda_envs = _create_environments(env, conda_bin)
conda_info = json.loads(env.safe_run_output("{conda_bin} info --json".format(**locals())))
# Uninstall old R packages that clash with updated versions
# Temporary fix to allow upgrades from older versions that have migrated
# r-tximport is now bioconductor-tximport
# py2cairo is incompatible with r 3.4.1
# libedit pins to curses 6.0 but bioconda requires 5.9
for problem in ["r-tximport", "py2cairo", "libedit"]:
cur_packages = [x["name"] for x in
json.loads(env.safe_run_output("{conda_bin} list --json {problem}".format(**locals())))]
if problem in cur_packages:
env.safe_run("{conda_bin} remove --force -y {problem}".format(**locals()))
# install our customized packages
if len(packages) > 0:
for env_name, env_packages in _split_by_condaenv(packages):
if env_name:
assert env_name in conda_envs, (env_name, conda_envs)
env_str = "-n %s" % env_name
else:
env_str = ""
pkgs_str = " ".join(env_packages)
env.safe_run("{conda_bin} install --quiet -y {env_str} {channels} {pkgs_str}".format(**locals()))
conda_pkg_list = json.loads(env.safe_run_output(
"{conda_bin} list --json {env_str}".format(**locals())))
for package in env_packages:
_link_bin(package, env, conda_info, conda_bin, conda_pkg_list,
conda_envdir=conda_envs.get(env_name))
conda_pkg_list = json.loads(env.safe_run_output("{conda_bin} list --json".format(**locals())))
for pkg in ["python", "conda", "pip"]:
_link_bin(pkg, env, conda_info, conda_bin, conda_pkg_list, files=[pkg], prefix="bcbio_")
def _link_bin(package, env, conda_info, conda_bin, conda_pkg_list, files=None, prefix="", conda_env=None,
conda_envdir=None):
"""Link files installed in the bin directory into the install directory.
This is imperfect but we're trying not to require injecting everything in the anaconda
directory into a user's path.
"""
package = package.split("=")[0]
final_bindir = os.path.join(env.system_install, "bin")
if conda_envdir:
base_bindir = os.path.join(conda_envdir, "bin")
else:
base_bindir = os.path.dirname(conda_bin)
# resolve any symlinks in the final and base heirarchies
with quiet():
final_bindir = env.safe_run_output("cd %s && pwd -P" % final_bindir)
base_bindir = env.safe_run_output("cd %s && pwd -P" % base_bindir)
for pkg_subdir in [x for x in conda_pkg_list if x["name"] == package]:
pkg_subdir = pkg_subdir["dist_name"].split("::")[-1]
for pkg_dir in conda_info["pkgs_dirs"]:
pkg_bindir = os.path.join(pkg_dir, pkg_subdir, "bin")
if env.safe_exists(pkg_bindir):
if not files:
with quiet():
files = env.safe_run_output("ls -1 {pkg_bindir}".format(**locals())).split()
for fname in files:
# symlink to the original file in the /anaconda/bin directory
# this could be a hard or soft link
base_fname = os.path.join(base_bindir, fname)
if os.path.exists(base_fname) and os.path.lexists(base_fname):
_do_link(base_fname,
os.path.join(final_bindir, "%s%s" % (prefix, fname)))
def _do_link(orig_file, final_file):
"""Perform a soft link of the original file into the final location.
We need the symlink to point to /anaconda/bin directory, not the real location
in the pkgs directory so conda can resolve LD_LIBRARY_PATH and the interpreters.
"""
needs_link = True
# working symlink, check if already in the right place or remove it
if os.path.exists(final_file):
if (os.path.realpath(final_file) == os.path.realpath(orig_file) and
orig_file == os.path.normpath(os.path.join(os.path.dirname(final_file), os.readlink(final_file)))):
needs_link = False
else:
os.remove(final_file)
# broken symlink
elif os.path.lexists(final_file):
os.unlink(final_file)
if needs_link:
os.symlink(os.path.relpath(orig_file, os.path.dirname(final_file)), final_file)
def _split_by_condaenv(packages):
"""Split packages into those requiring special conda environments.
"""
out = collections.defaultdict(list)
for p in packages:
parts = p.split(";")
name = parts[0]
metadata = parts[1:]
condaenv = None
for k, v in [x.split("=") for x in metadata]:
if k == "env":
condaenv = v
out[condaenv].append(name)
return dict(out).items()
def _create_environments(env, conda_bin):
"""Creates custom local environments that conflict with global dependencies.
Available environments:
- python3 -- support tools that require python 3. This is an initial step
towards transitioning to more python3 tool support.
- samtools0 -- For tools that require older samtools 0.1.19
"""
out = {}
conda_envs = json.loads(env.safe_run_output("{conda_bin} info --envs --json".format(**locals())))["envs"]
if not any(x.endswith("/python3") for x in conda_envs):
env.safe_run("{conda_bin} create -y --name python3 python=3".format(**locals()))
conda_envs = json.loads(env.safe_run_output("{conda_bin} info --envs --json".format(**locals())))["envs"]
if not any(x.endswith("/samtools0") for x in conda_envs):
env.safe_run("{conda_bin} create -y --name samtools0 python=2".format(**locals()))
conda_envs = json.loads(env.safe_run_output("{conda_bin} info --envs --json".format(**locals())))["envs"]
out["python3"] = [x for x in conda_envs if x.endswith("/python3")][0]
out["samtools0"] = [x for x in conda_envs if x.endswith("/samtools0")][0]
return out
| [
"chapmanb@50mail.com"
] | chapmanb@50mail.com |
049b2793c4f8775370e60d6bba96c8a4b0ea6e27 | dfd0797c88aec7b02866d3c559cb1bc64ce87b44 | /Chapter 11 - Testing Code/survey.py | e81028f4b1415ccfd157f4a15c6bb19fcccb5a81 | [] | no_license | 8BitJustin/2020-Python-Crash-Course | d97f9b79c7a1e1c88c9bc2b035b0e98b2ef23025 | 1f078d7fa62e2b07f8d6c01f85e60baed8293779 | refs/heads/master | 2020-12-20T00:19:44.173143 | 2020-06-14T18:42:08 | 2020-06-14T18:42:08 | 235,893,110 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | class AnonymousSurvey():
"""Collect anonymous answers to a survey question."""
def __init__(self, question):
"""Store question and prepare to store responses."""
self.question = question
self.responses = []
def show_question(self):
"""Show the survey question."""
print(self.question)
def store_response(self, new_response):
"""Store a single response to the survey."""
self.responses.append(new_response)
def show_results(self):
"""Show all the responses that have been given."""
print("Survey results:")
for response in self.responses:
print(f"- {response}")
| [
"j.olson.digital@gmail.com"
] | j.olson.digital@gmail.com |
9cb761ebc2d0cb43448126d9150ef9c8f0545248 | 478aa8e979226404fcba2a9aa1cc9c05b7b9b33b | /cars/views.py | feb184e0a125ee455f5b919ce6e6a79d0eb278d8 | [] | no_license | avramenkomy/module_E5 | 5054728c08df028e0f32642c92f6393c31a3d900 | 4bfaacfd4fb94cd2ba59eff5c0dfca1b846bc56a | refs/heads/master | 2023-03-18T15:50:09.901002 | 2021-03-09T11:03:42 | 2021-03-09T11:03:42 | 345,962,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,941 | py | from django.http import HttpResponse
from django.shortcuts import render
from cars.models import Car
from django.template import loader
from django.views.generic.detail import DetailView
from cars.forms import CarFilterForm, CarFullFilter
from django.db.models import Q
# Create your views here.
def index(request):
template = loader.get_template('base.html')
cars = Car.objects.select_related("manufacturer").all()
form = CarFilterForm(request.GET)
full_form = CarFullFilter(request.GET)
if form.is_valid():
if form.cleaned_data["min_year"]:
cars = cars.filter(year__gte=form.cleaned_data["min_year"])
if form.cleaned_data["max_year"]:
cars = cars.filter(year__lte=form.cleaned_data["max_year"])
if form.cleaned_data["model"]:
cars = cars.filter(
Q(model__icontains=form.cleaned_data["model"])|
Q(manufacturer__title=form.cleaned_data["model"])|
Q(manufacturer__title__icontains=form.cleaned_data["model"]))
if form.cleaned_data["gear"] != "":
cars = cars.filter(Q(gear=form.cleaned_data["gear"]))
if full_form.is_valid():
if full_form.cleaned_data["search"]:
cars = cars.filter(Q(model=full_form.cleaned_data["search"])|
Q(manufacturer__title=full_form.cleaned_data["search"])|
Q(manufacturer__title__icontains=full_form.cleaned_data["search"])|
# Q(year=int(full_form.cleaned_data["search"]))|
Q(gear=full_form.cleaned_data["search"])|
Q(gear__icontains=full_form.cleaned_data["search"]))
return HttpResponse(template.render({ "cars": cars, "form": form, "full_form": full_form }))
class CarDetailView(DetailView):
model = Car
| [
"avramenkomy@yandex.ru"
] | avramenkomy@yandex.ru |
2f2e1030c13ce06b0ed7d9df9dc967b10e484184 | 42878cf22469c5adc3d92c9f5eb670b00001956d | /src/synamic/core/services/pre_processor/pre_processor_service.py | 119feec16e16f160b43344bfdb8f18d94db0b763 | [
"MIT"
] | permissive | SabujXi/Synamic | 4fb34f1b2c05df22e98f9b001b948c2a52248693 | c9c06ecf874be82dbb2cba890cb483300809de98 | refs/heads/master | 2021-05-16T00:27:50.197163 | 2020-08-10T18:20:59 | 2020-08-10T18:20:59 | 107,001,678 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,661 | py | """
author: "Md. Sabuj Sarker"
copyright: "Copyright 2017-2018, The Synamic Project"
credits: ["Md. Sabuj Sarker"]
license: "MIT"
maintainer: "Md. Sabuj Sarker"
email: "md.sabuj.sarker@gmail.com"
status: "Development"
"""
from synamic.core.standalones.functions.decorators import not_loaded, loaded
from .builtin_processors import _builtin_processor_classes
from synamic.exceptions import SynamicPreProcessorNotFound
class PreProcessorService:
def __init__(self, site):
self.__site = site
self.__name_to_processor = {}
self.__is_loaded = False
@property
def is_loaded(self):
return self.__is_loaded
@not_loaded
def load(self):
# load builtin processor
preprocess_cdir = self.__site.cpaths.pre_process_cdir
if preprocess_cdir.exists():
cdirs = preprocess_cdir.list_dirs(depth=1)
for cdir in cdirs:
processor_name = cdir.basename
if processor_name in _builtin_processor_classes:
self.add_processor(processor_name, cdir, _builtin_processor_classes[processor_name])
# Add builtin pre-processor even if the dir does not exist for it
for processor_name, processor_class in _builtin_processor_classes.items():
if self.get_processor(processor_name, default=None, error_out=False) is None:
cdir = preprocess_cdir.join(processor_name, is_file=False)
self.add_processor(processor_name, cdir, processor_class)
for processor in self.__name_to_processor.values():
processor.load()
self.__is_loaded = True
def add_processor(self, processor_name, processor_cpath, processor_class):
assert type(processor_class) is type
assert processor_name not in self.__name_to_processor
processor = processor_class(self.__site, processor_cpath)
self.__name_to_processor[processor_name] = processor
return processor
def get_processor(self, processor_name, default=None, error_out=True):
processor = self.__name_to_processor.get(processor_name, None)
if processor is None and error_out is True:
raise SynamicPreProcessorNotFound(f'Processor {processor_name} could not be found')
elif processor is None:
return default
else:
return processor
@property
def pre_processors(self):
return tuple(self.__name_to_processor.values())
def __getattr__(self, key):
return self.get_processor(key, error_out=True)
| [
"md.sabuj.sarker@gmail.com"
] | md.sabuj.sarker@gmail.com |
7547ee2982aa5a0f157c0e903540e4774050ac79 | 48df8f9a545b86caf0e52fbaa9f74d71c34285de | /oneshot/getstd.py | 1031bc3b11cb673aaf832f53853292edb9826731 | [] | no_license | Python3pkg/OneShot | 7954408376163279713396a6bfe06669b7c69eea | 26d8ad99a5bb8f925d358e9601005062f651ad4e | refs/heads/master | 2021-01-21T17:38:33.119792 | 2017-05-21T17:23:55 | 2017-05-21T17:23:55 | 91,974,963 | 0 | 0 | null | 2017-05-21T17:23:53 | 2017-05-21T17:23:53 | null | UTF-8 | Python | false | false | 1,450 | py | import os as _os
_on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'
if not _on_rtd:
import numpy as _np
# Get std dev (spot size) {{{
def getstd(res, h, xval):
"""
.. deprecated:: 0.0.0
I'm not really sure what this function does, but it's not referenced anywhere else.
"""
stddevsq = _np.zeros(res)
indivbool = False
if indivbool:
figscan = plt.figure() # noqa
def gauss(x, A, mu, sig):
return A*_np.exp(-_np.power(x-mu, 2)/(2*_np.power(sig, 2)))
for i, row in enumerate(_np.transpose(h)):
# A = max(row)
mean = _np.sum(xval*row)/row.sum()
var = _np.sum(_np.power(xval-mean, 2)*row)/row.sum()
# root = _np.sqrt(var)
# pguess = [A, mean, root]
# popt = pguess
# popt, pcov = spopt.curve_fit(gauss, xval, row, pguess)
# # print "A: {}, mean: {}, sig: {}".format(popt[0], popt[1], popt[2])
# # print "Percent diff: {}%".format(100*(popt[2]-root)/root)
# fit = gauss(xval, popt[0], popt[1], popt[2])
# unchangedroot = gauss(xval, popt[0], popt[1], root)
# if indivbool: plt.plot(xval, row, xval, fit, xval, unchangedroot)
# # plt.plot(xval, row)
# if indivbool: raw_input("Any key.")
# if indivbool: figscan.clf()
# # stddevsq[i] = _np.power(popt[2], 2)
stddevsq[i] = var
# stddev=_np.sqrt(stddevsq)
return stddevsq
# }}}
| [
"joelfred@slac.stanford.edu"
] | joelfred@slac.stanford.edu |
463090bb5b1d514884c43abfa3250022348fc00d | ad13583673551857615498b9605d9dcab63bb2c3 | /output/models/sun_data/mgroup/annotation/annotation00101m/annotation00101m4_xsd/__init__.py | c8a16b879136466c5d8c529de13e7558dae74f94 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 179 | py | from output.models.sun_data.mgroup.annotation.annotation00101m.annotation00101m4_xsd.annotation00101m4 import (
Root,
TheType,
)
__all__ = [
"Root",
"TheType",
]
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
5f62ae2b41203e02f3bd6369cd5bf77f010b43ab | 5ba34cad2a933adfed6b5df5b1229e48038596d4 | /common/utils.py | e1e50b301d87bce558520bd4db37ed53dcafe48a | [
"MIT"
] | permissive | Firmicety/fomalhaut-panel | bececa59cd42edd8793440a652d206b250591cb9 | 3e662db65a7ca654f75a19e38cb0931be21f92e9 | refs/heads/master | 2020-06-06T07:52:27.211654 | 2019-06-20T11:38:39 | 2019-06-20T11:38:39 | 192,683,216 | 0 | 0 | MIT | 2019-06-19T07:39:07 | 2019-06-19T07:39:07 | null | UTF-8 | Python | false | false | 4,540 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# created by restran on 2016/1/2
from __future__ import unicode_literals
import logging
import json
import sys
from django.http import HttpResponse
import six
from six import binary_type, text_type
import time
import itertools
logger = logging.getLogger(__name__)
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PYPY = True if getattr(sys, 'pypy_version_info', None) else False
if PY3:
from io import BytesIO
text_type = str
binary_type = bytes
else:
from cStringIO import StringIO as BytesIO
text_type = unicode
binary_type = str
def utf8(value):
"""Get the UTF8-encoded version of a value."""
if not isinstance(value, binary_type) and not isinstance(value, text_type):
value = binary_type(value)
if isinstance(value, text_type):
return value.encode('utf-8')
else:
return value
def text_type_dict(dict_data):
if not isinstance(dict_data, dict):
raise TypeError
new_dict = {}
for k, v in dict_data.items():
if isinstance(k, binary_type):
k = k.decode('utf-8')
if isinstance(v, binary_type):
v = v.decode('utf-8')
new_dict[k] = v
return new_dict
def datetime_to_str(dt, format_str='%Y-%m-%d %H:%M:%S'):
"""
将datetime转换成字符串
:param format_str:
:param dt:
:return:
"""
return dt.strftime(format_str) if dt else ''
def datetime_to_timestamp(dt):
"""
将 datetime 转换成时间戳,精确到毫秒
:param dt:
:return:
"""
if dt is None:
return None
else:
return time.mktime(dt.timetuple()) * 1e3 + dt.microsecond / 1e3
def error_404(request):
return HttpResponse("page not found")
# return render_to_response('404.html')
def http_response_json(dict_data, encoding='utf-8'):
"""
返回json数据
:param encoding:
:param dict_data:
:return:
"""
# ensure_ascii=False,用来处理中文
try:
if six.PY3:
# if isinstance(dict_data, binary_type):
# dict_data =
dict_data = text_type_dict(dict_data)
return HttpResponse(json.dumps(dict_data, ensure_ascii=False),
content_type="application/json; charset=utf-8")
else:
return HttpResponse(json.dumps(dict_data, encoding=encoding, ensure_ascii=False),
content_type="application/json; charset=utf-8")
except Exception as e:
logger.error(e)
# 去掉 ensure_ascii 再试一下
return HttpResponse(json.dumps(dict_data),
content_type="application/json; charset=utf-8")
def json_loads(content, encoding=None):
if six.PY3:
return json.loads(s=content.decode('utf-8'), encoding=encoding)
else:
return json.loads(s=content, encoding=encoding)
def json_dumps(dict_data, encoding='utf-8', indent=None, sort_keys=False):
"""
返回json数据
:param sort_keys:
:param indent:
:param encoding:
:param dict_data:
:return:
"""
# ensure_ascii=False,用来处理中文
try:
return json.dumps(dict_data, encoding=encoding, ensure_ascii=False, indent=indent, sort_keys=sort_keys)
except Exception as e:
logger.error(e)
# 去掉 ensure_ascii 再试一下
return json.dumps(dict_data, indent=indent, sort_keys=sort_keys)
def check_text_content_type(content_type):
"""
检查content_type 是否是文本类型
:param content_type:
:return:
"""
content_type = text_type(content_type).lower()
text_content_type = [
b'text',
b'application/json',
b'application/x-javascript',
b'application/xml',
b'application/x-www-form-urlencoded'
]
return any(map(content_type.startswith, text_content_type))
def grouper(iterable, size):
# http://stackoverflow.com/a/8991553
it = iter(iterable)
if size <= 0:
yield it
return
while True:
chunk = tuple(itertools.islice(it, size))
if not chunk:
return
yield chunk
__all__ = ['grouper', 'check_text_content_type',
'json_dumps', 'json_loads', 'http_response_json', 'error_404',
'datetime_to_timestamp', 'datetime_to_str', 'text_type_dict',
'utf8', 'BytesIO', 'text_type', 'binary_type', 'PY2', 'PY3', 'PYPY']
| [
"grestran@gmail.com"
] | grestran@gmail.com |
70854803177cbf6f526b08576fc9c7321e535049 | b10ee2d670cab2141cdee16dd99c9425cfeb24e1 | /oci_image.py | c25866a0a69c5b38e894eae5f67c13d823b48f5a | [] | no_license | VariableDeclared/resource-oci-image | c90ece9157c74cafc9ab2eaae74d327baa761432 | e583429139d874bbcab8330c13f3fde30912abdf | refs/heads/master | 2021-05-24T10:37:16.744676 | 2020-02-05T16:18:56 | 2020-02-05T16:19:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,893 | py | from pathlib import Path
import yaml
from ops.framework import Object
from ops.model import BlockedStatus, ModelError
class OCIImageResource(Object):
def __init__(self, charm, resource_name):
super().__init__(charm, resource_name)
self.resource_name = resource_name
def fetch(self):
resource_path = self.model.resources.fetch(self.resource_name)
if not resource_path.exists():
raise MissingResourceError(self.resource_name)
resource_text = Path(resource_path).read_text()
if not resource_text:
raise MissingResourceError(self.resource_name)
try:
resource_data = yaml.safe_load(resource_text)
except yaml.YAMLError as e:
raise InvalidResourceError(self.resource_name) from e
else:
return ImageInfo(resource_data)
class ImageInfo(dict):
def __init__(self, data):
# Translate the data from the format used by the charm store to the format
# used by the Juju K8s pod spec, since that is how this is typically used.
super().__init__({
'imagePath': data['registrypath'],
'username': data['username'],
'password': data['password'],
})
@property
def image_path(self):
return self['imagePath']
@property
def username(self):
return self['username']
@property
def password(self):
return self['password']
class ResourceError(ModelError):
status_type = BlockedStatus
status_message = 'Resource error'
def __init__(self, resource_name):
super().__init__(resource_name)
self.status = self.status_type(f'{self.status_message}: {resource_name}')
class MissingResourceError(ModelError):
status_message = 'Missing resource'
class InvalidResourceError(ModelError):
status_message = 'Invalid resource'
| [
"johnsca@gmail.com"
] | johnsca@gmail.com |
d483a1ed0b3c259251024bcf7ffd220b7ec4e08f | c81745ea7c36fa03cd818b3986f605a6da036658 | /u_app/models.py | 1d2d6231ad49b573d94313e42613910c8668c6d9 | [] | no_license | mpresto/user_mgmt_site | 3f77fbbe4fa3fec4724e7ba8916eed9fd1d18aae | 063ed174f0af46fff3a7c5f7631f1587dc152511 | refs/heads/master | 2022-04-11T18:15:29.603694 | 2020-03-29T04:10:32 | 2020-03-29T04:10:32 | 237,847,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import connection
import sqlite3
# Create your models here.
class MyUser(AbstractBaseUser):
email = models.EmailField(max_length=254, unique=True)
password = models.CharField(max_length=50)
full_name = models.CharField(max_length=50)
birth_date = models.DateField(blank=True, null=True)
registration_date = models.DateTimeField(auto_now_add=True)
last_login = models.DateTimeField(auto_now_add=True)
objects = BaseUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['full_name', 'birth_date',]
class Doggo(models.Model):
"""A class for our doggos"""
name = models.CharField(max_length=200)
image_url = models.CharField(max_length=500)
age = models.IntegerField(blank=True)
description = models.CharField(max_length=500)
entry_date = models.DateTimeField(auto_now_add=True)
submitter = models.ForeignKey(MyUser, on_delete=models.CASCADE)
average_rating = models.IntegerField(default=0)
class Rating(models.Model):
"""A model for our rating records"""
user_who_voted = models.ForeignKey(MyUser, on_delete=models.CASCADE)
rated_doggo = models.ForeignKey(Doggo, on_delete=models.CASCADE)
vote_value = models.IntegerField(default=0)
| [
"monty.preston5@gmail.com"
] | monty.preston5@gmail.com |
c3a6193d0572e0c981b8de79b098cda9d7eb1a93 | 09c595368ed7617381edb8ea87f56d5596ab4bdb | /Medium/103. Binary Tree Zigzag Level Order Traversal.py | bfdd1a6f73b158b43b2c1359cb6fcec8d8390eba | [] | no_license | h74zhou/leetcode | 38d989135b968b6947c36df7be288cb11ead3f0c | a5efedb34271160a7a776a7ce9bfff29f47b2389 | refs/heads/master | 2023-03-10T19:24:46.786706 | 2021-02-23T02:25:27 | 2021-02-23T02:25:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def zigzagLevelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
answer = []
if not root:
return answer
q, count = [root], 2
while q:
currLevel, nextLevel = [], []
for node in q:
if count % 2 == 0:
currLevel.append(node.val)
else:
currLevel.insert(0, node.val)
if node.left:
nextLevel.append(node.left)
if node.right:
nextLevel.append(node.right)
count += 1
answer.append(currLevel[:])
q = nextLevel[:]
return answer
| [
"iamherunzhou@gmail.com"
] | iamherunzhou@gmail.com |
e25c1f9014a8ca9635d1989cfee9bc1ea967069c | 78eb766321c7ed3236fb87bb6ac8547c99d0d1a4 | /oneYou2/home/migrations/0007_sitesettings_header.py | 770b350040d29f6fff38be8d6bdb226d71ae63f1 | [] | no_license | danmorley/nhs-example | 9d7be76116ed962248e1f7e287355a6870534f5d | ae4b5f395d3518ee17ef89348ed756c817e0c08c | refs/heads/master | 2022-12-13T02:13:18.484448 | 2019-02-28T11:05:31 | 2019-02-28T11:05:31 | 203,353,840 | 1 | 0 | null | 2022-12-07T04:29:46 | 2019-08-20T10:30:15 | Python | UTF-8 | Python | false | false | 612 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-02-13 11:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pages', '0011_header'),
('home', '0006_sitesettings_footer'),
]
operations = [
migrations.AddField(
model_name='sitesettings',
name='header',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='pages.Header'),
),
]
| [
"andrewkenyon123@gmail.com"
] | andrewkenyon123@gmail.com |
c008da7634db24b7e928afe0d27575e8bf211931 | ad3339db839a9353ae445b7069c7f2d2c805fadc | /tribune/urls.py | 3dbd1848986508a951390659bd2309263a06e032 | [] | no_license | felkiriinya/Moringa-Tribune | 1a89ad0ab01618284eb8e4fb55587f60cb197fa1 | 5c936fac04b265e9a1ea98e3c2c296a7a1ef75e8 | refs/heads/master | 2023-01-13T19:45:02.966996 | 2020-11-24T17:24:55 | 2020-11-24T17:24:55 | 312,059,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | """tribune URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.auth import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('',include('news.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$',views.LogoutView.as_view(), {'next_page': '/'}),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^api-token-auth/', obtain_auth_token)
]
| [
"felkiriinya@gmail.com"
] | felkiriinya@gmail.com |
f8460aaf86927b5c5035f75d913a717aa778cda9 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/counting_20200622225609.py | 00a400697539fd255487c113aa925cea6350e1ca | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | def counting(str):
str = str.split('-')
hour1 = int(convertTo24(str[0]).split(':')[0])
print('hour1',hour1)
hour2 = int(convertTo24(str[1]).split(':'))
def convertTo24(hour):
newHour = ''
if 'am' in hour and hour[:2] == '12':
newHour = '24'
newHour += hour[2:5]
print(hour)
print(newHour)
return newHour
counting("12:00am-12:00am") | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
82f26b32b81eeb6214dd9fb7788ba27f86cba0d3 | b2ba78fb1e53f92efdc3b6e0be50c81e5dd036ed | /algos/mbl_trpo/defaults.py | ef455095211017613a659a721b6b9e08ca3ca5f2 | [
"MIT"
] | permissive | ShuoZ9379/Integration_SIL_and_MBL | 2dcfae10cb5929c4121a3a8bfceebae8c0b6ba08 | d7df6501a665d65eb791f7fd9b8e85fd660e6320 | refs/heads/master | 2020-07-23T20:04:17.304302 | 2019-09-23T18:58:57 | 2019-09-23T18:58:57 | 207,690,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | py | from baselines.common.models import mlp, cnn_small
def atari():
return dict(
network = cnn_small(),
timesteps_per_batch=512,
max_kl=0.001,
cg_iters=10,
cg_damping=1e-3,
gamma=0.98,
lam=1.0,
vf_iters=3,
vf_stepsize=1e-4,
entcoeff=0.00,
)
def sparse_mujoco():
return dict(
network = mlp(num_hidden=32, num_layers=2),
timesteps_per_batch=1024,
max_kl=0.01,
cg_iters=10,
cg_damping=0.1,
gamma=0.99,
lam=0.98,
vf_iters=5,
vf_stepsize=1e-3,
entcoef=1.0,
normalize_observations=True,
)
def mujoco():
return dict(
network = mlp(num_hidden=32, num_layers=2),
timesteps_per_batch=2048,
max_kl=0.01,
cg_iters=10,
cg_damping=0.1,
gamma=0.99,
lam=0.98,
vf_iters=5,
vf_stepsize=1e-3,
normalize_observations=True,
normalize_obs=True,
ent_coef=0.0
)
def gym_locomotion_envs():
return dict(
network = mlp(num_hidden=32, num_layers=2),
timesteps_per_batch=1024,
max_kl=0.01,
cg_iters=10,
cg_damping=0.1,
gamma=0.99,
lam=0.98,
vf_iters=5,
vf_stepsize=1e-3,
normalize_observations=True,
entcoef=0.0
)
def robotics():
return dict(
network = mlp(num_hidden=32, num_layers=2),
timesteps_per_batch=1024,
max_kl=0.01,
cg_iters=10,
cg_damping=0.1,
gamma=0.99,
lam=0.98,
vf_iters=5,
vf_stepsize=1e-3,
normalize_observations=True,
)
def classic_control():
return dict(
network = mlp(num_hidden=32, num_layers=2),
#timesteps_per_batch=1024,
#max_kl=0.01,
#cg_iters=10,
#cg_damping=0.1,
#gamma=0.99,
#lam=0.98,
#vf_iters=5,
#vf_stepsize=1e-3,
normalize_observations=True,
normalize_obs=True
)
| [
"zhangshuo19930709@gmail.com"
] | zhangshuo19930709@gmail.com |
383e6330f03c52d4c051dbaae812f110ffd8383b | 7aac370bbd217d716ba53f0efd70047879bb1444 | /homeassistant/components/somfy_mylink/config_flow.py | 79fbf028b16a525c660d1986847854e8614a00c8 | [
"Apache-2.0"
] | permissive | OverloadUT/home-assistant | c6bd63edb7c087eaf81ff507fdeb3e1420062d3a | 7ccfaed7361604aa83cc55f059015327b544b5a7 | refs/heads/dev | 2023-02-23T01:04:13.441125 | 2021-10-25T16:26:03 | 2021-10-25T16:26:03 | 78,159,098 | 5 | 1 | Apache-2.0 | 2023-02-22T06:18:15 | 2017-01-06T00:21:06 | Python | UTF-8 | Python | false | false | 6,841 | py | """Config flow for Somfy MyLink integration."""
import asyncio
from copy import deepcopy
import logging
from somfy_mylink_synergy import SomfyMyLinkSynergy
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.components.dhcp import HOSTNAME, IP_ADDRESS, MAC_ADDRESS
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import callback
from homeassistant.helpers.device_registry import format_mac
from .const import (
CONF_REVERSE,
CONF_REVERSED_TARGET_IDS,
CONF_SYSTEM_ID,
CONF_TARGET_ID,
CONF_TARGET_NAME,
DEFAULT_PORT,
DOMAIN,
MYLINK_STATUS,
)
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from schema with values provided by the user.
"""
somfy_mylink = SomfyMyLinkSynergy(
data[CONF_SYSTEM_ID], data[CONF_HOST], data[CONF_PORT]
)
try:
status_info = await somfy_mylink.status_info()
except asyncio.TimeoutError as ex:
raise CannotConnect from ex
if not status_info or "error" in status_info:
_LOGGER.debug("Auth error: %s", status_info)
raise InvalidAuth
return {"title": f"MyLink {data[CONF_HOST]}"}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Somfy MyLink."""
VERSION = 1
def __init__(self):
"""Initialize the somfy_mylink flow."""
self.host = None
self.mac = None
self.ip_address = None
async def async_step_dhcp(self, discovery_info):
"""Handle dhcp discovery."""
self._async_abort_entries_match({CONF_HOST: discovery_info[IP_ADDRESS]})
formatted_mac = format_mac(discovery_info[MAC_ADDRESS])
await self.async_set_unique_id(format_mac(formatted_mac))
self._abort_if_unique_id_configured(
updates={CONF_HOST: discovery_info[IP_ADDRESS]}
)
self.host = discovery_info[HOSTNAME]
self.mac = formatted_mac
self.ip_address = discovery_info[IP_ADDRESS]
self.context["title_placeholders"] = {"ip": self.ip_address, "mac": self.mac}
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
self._async_abort_entries_match({CONF_HOST: user_input[CONF_HOST]})
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST, default=self.ip_address): str,
vol.Required(CONF_SYSTEM_ID): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): int,
}
),
errors=errors,
)
async def async_step_import(self, user_input):
"""Handle import."""
self._async_abort_entries_match({CONF_HOST: user_input[CONF_HOST]})
return await self.async_step_user(user_input)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for somfy_mylink."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
self.options = deepcopy(dict(config_entry.options))
self._target_id = None
@callback
def _async_callback_targets(self):
"""Return the list of targets."""
return self.hass.data[DOMAIN][self.config_entry.entry_id][MYLINK_STATUS][
"result"
]
@callback
def _async_get_target_name(self, target_id) -> str:
"""Find the name of a target in the api data."""
mylink_targets = self._async_callback_targets()
for cover in mylink_targets:
if cover["targetID"] == target_id:
return cover["name"]
raise KeyError
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if self.config_entry.state is not config_entries.ConfigEntryState.LOADED:
_LOGGER.error("MyLink must be connected to manage device options")
return self.async_abort(reason="cannot_connect")
if user_input is not None:
if target_id := user_input.get(CONF_TARGET_ID):
return await self.async_step_target_config(None, target_id)
return self.async_create_entry(title="", data=self.options)
cover_dict = {None: None}
mylink_targets = self._async_callback_targets()
if mylink_targets:
for cover in mylink_targets:
cover_dict[cover["targetID"]] = cover["name"]
data_schema = vol.Schema({vol.Optional(CONF_TARGET_ID): vol.In(cover_dict)})
return self.async_show_form(step_id="init", data_schema=data_schema, errors={})
async def async_step_target_config(self, user_input=None, target_id=None):
"""Handle options flow for target."""
reversed_target_ids = self.options.setdefault(CONF_REVERSED_TARGET_IDS, {})
if user_input is not None:
if user_input[CONF_REVERSE] != reversed_target_ids.get(self._target_id):
reversed_target_ids[self._target_id] = user_input[CONF_REVERSE]
return await self.async_step_init()
self._target_id = target_id
return self.async_show_form(
step_id="target_config",
data_schema=vol.Schema(
{
vol.Optional(
CONF_REVERSE,
default=reversed_target_ids.get(target_id, False),
): bool
}
),
description_placeholders={
CONF_TARGET_NAME: self._async_get_target_name(target_id),
},
errors={},
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
| [
"noreply@github.com"
] | OverloadUT.noreply@github.com |
9e4ef2699511b1cda5dab70de5f4507eac7db2cb | 1c29948305793ced5835a5345903c6110a078bd2 | /examples/app/app/middleware.py | 6415e78ef30c1971be3b14c88310612d74a34735 | [
"MIT"
] | permissive | cmanallen/flask-compose | 84b7dbd5c68dce04de335afe03a522835847b82e | 560760e39ac32e888f0a4d84154a1bd26a2a3033 | refs/heads/master | 2020-03-30T15:09:03.387185 | 2018-10-27T20:27:42 | 2018-10-27T20:27:42 | 151,350,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | from flask import jsonify, make_response
def render_response(fn):
"""Response renderer middleware."""
def decorator(*args, **kwargs):
response, code = fn(*args, **kwargs)
return make_response(jsonify(response), code)
return decorator
| [
"cmanallen90@gmail.com"
] | cmanallen90@gmail.com |
6958c46d143159426cf3e7865f78b0c9a69c09d1 | fc2fb2118ea02867d559bf8027e54e3c6b652cfd | /devItems/spring-2020/source-all/574_tfidf_traintest_p2_code_regression.py | a4d4d0c335155d8002b2ada302bdfe3e138c306f | [] | no_license | pdhung3012/SoftwareStoryPointsPrediction | 2431ad599e0fba37617cfd467de1f4f1afed56cc | 520990663cb42adcac315b75cd4eb1150c3fc86c | refs/heads/master | 2023-08-29T15:16:30.413766 | 2021-09-18T17:12:20 | 2021-09-18T17:12:20 | 254,596,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,117 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 18:19:24 2019
@author: hungphd
"""
# import modules
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.model_selection import cross_val_score, cross_val_predict, StratifiedKFold, train_test_split
import os
from sklearn.metrics import precision_score,accuracy_score
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
import xgboost as xgb
from sklearn.metrics import mean_squared_error,mean_absolute_error
def createDirIfNotExist(fopOutput):
try:
# Create target Directory
os.mkdir(fopOutput)
print("Directory ", fopOutput, " Created ")
except FileExistsError:
print("Directory ", fopOutput, " already exists")
# set file directory
fopVectorAllSystems = 'data/vector574_Tfidf1_code/'
fopOverallResultReg= 'result/574_code_reg/'
createDirIfNotExist(fopOverallResultReg)
from os import listdir
from os.path import isfile, join
arrFiles = [f for f in listdir(fopVectorAllSystems) if isfile(join(fopVectorAllSystems, f))]
fpMAEMax = fopOverallResultReg + 'MAE_max.txt'
fpMAEMin = fopOverallResultReg + 'MAE_min.txt'
fpMAEAvg = fopOverallResultReg + 'MAE_avg.txt'
o3=open(fpMAEMax,'w')
o3.write('')
o3.close()
o3 = open(fpMAEMin, 'w')
o3.write('')
o3.close()
o3 = open(fpMAEAvg, 'w')
o3.write('')
o3.close()
# fileCsv = fopVectorAllSystems + file+
fpVectorItemTrainReg = fopVectorAllSystems + 'code_train_regression.csv'
fpVectorItemTestReg = fopVectorAllSystems + 'code_test_regression.csv'
fopOutputItemDetail = fopOverallResultReg + "/details/"
# fopOutputItemEachReg = fopOutputItemDetail + file + "/"
fopOutputItemResult = fopOverallResultReg + "/result/"
fopOutputItemChart = fopOverallResultReg + "/chart/"
fpResultAll=fopOutputItemResult+'overall.txt'
fpAllMAEInfo = fopOutputItemChart + 'MAE.txt'
createDirIfNotExist(fopOutputItemDetail)
# createDirIfNotExist(fopOutputItemEachReg)
createDirIfNotExist(fopOutputItemResult)
createDirIfNotExist(fopOutputItemChart)
# fnAll='_10cv.csv'
# load data for 10-fold cv
df_train = pd.read_csv(fpVectorItemTrainReg)
print(list(df_train.columns.values))
y_train = df_train['star']
X_train = df_train.drop(['no','star'],axis=1)
df_test = pd.read_csv(fpVectorItemTestReg)
print(list(df_test.columns.values))
y_test = df_test['star']
X_test = df_test.drop(['no','star'],axis=1)
# create a list of classifiers
random_seed = 2
# classifiers = [GaussianNB(), LogisticRegression(random_state=random_seed),DecisionTreeClassifier(),
# RandomForestClassifier(random_state=random_seed, n_estimators=50), AdaBoostClassifier(), LinearDiscriminantAnalysis(),QuadraticDiscriminantAnalysis(),
# LinearSVC(random_state=random_seed), MLPClassifier(alpha=1), GradientBoostingClassifier(random_state=random_seed, max_depth=5)]
classifiers = [DecisionTreeRegressor(),
RandomForestRegressor(random_state=2, n_estimators=1000),AdaBoostRegressor(), xgb.XGBRegressor(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
max_depth = 5, alpha = 10, n_estimators = 10),
LinearSVR(random_state=random_seed), MLPRegressor(alpha=1),
GradientBoostingRegressor(random_state=random_seed, max_depth=5)]
# fit and evaluate for 10-cv
index = 0
# group = df_all['label']
arrClassifierName = ['DTR', 'RFR', 'ABR', 'XGBR', 'LSVR', 'MLPR', 'GBR']
arrXBar = []
arrMAE = []
arrStrMAEAvg = []
arrIndex=[]
o2=open(fpResultAll,'w')
o2.close()
k_fold = StratifiedKFold(10,shuffle=True)
for classifier in classifiers:
index=index+1
try:
filePredict = ''.join([fopOutputItemDetail,arrClassifierName[index-1], '.txt'])
print("********", "\n", "10 fold CV Results Regression with: ", str(classifier))
# X_train, X_test, y_train, y_test = train_test_split(all_data, all_label, test_size = 0.2,shuffle = False, stratify = None)
classifier.fit(X_train, y_train)
predicted = classifier.predict(X_test)
# cross_val = cross_val_score(classifier, all_data, all_label, cv=k_fold, n_jobs=1)
# predicted = cross_val_predict(classifier, all_data, all_label, cv=k_fold)
# weightAvg = precision_score(all_label, predicted, average='weighted') * 100
# maeAccuracy = mean_absolute_error(all_label, predicted)
# mqeAccuracy = mean_squared_error(all_label, predicted)
maeAccuracy = mean_absolute_error(y_test, predicted)
mqeAccuracy = mean_squared_error(y_test, predicted)
# maeAccuracy = mean_absolute_error(all_label, predicted)
print('{:.2f}'.format(maeAccuracy))
np.savetxt(filePredict, predicted, fmt='%s', delimiter=',')
o2 = open(fpResultAll, 'a')
o2.write('Result for ' + str(classifier) + '\n')
o2.write('MAE {}\nMQE {}\n'.format(maeAccuracy,mqeAccuracy))
# o2.write(str(sum(cross_val) / float(len(cross_val))) + '\n')
# o2.write(str(confusion_matrix(all_label, predicted)) + '\n')
# o2.write(str(classification_report(all_label, predicted)) + '\n')
o2.close()
strClassX = str(arrClassifierName[index - 1])
arrIndex.append(index)
arrXBar.append(strClassX)
arrMAE.append(maeAccuracy)
arrStrMAEAvg.append('{:.2f}'.format(maeAccuracy))
# break
except Exception as inst:
print("Error ", index)
print(type(inst)) # the exception instance
print(inst.args) # arguments stored in .args
print(inst)
arrAlgm = np.array(arrMAE)
bestMAE=np.amax(arrAlgm)
worstMAE=np.amin(arrAlgm)
avgMAE=np.average(arrAlgm)
maxIndexMAE= np.argmax(arrAlgm)
minIndexMAE = np.argmin(arrAlgm)
print(maxIndexMAE)
o3=open(fpMAEMax,'a')
o3.write('{}\t{}\n'.format(arrClassifierName[maxIndexMAE], bestMAE))
o3.close()
o3 = open(fpMAEMin, 'a')
o3.write('{}\t{}\n'.format(arrClassifierName[minIndexMAE], worstMAE))
o3.close()
o3 = open(fpMAEAvg, 'a')
o3.write('{}\n'.format(avgMAE))
o3.close()
o3 = open(fpAllMAEInfo, 'w')
for i in range(0,len(arrMAE)):
o3.write('{}\t{}\n'.format(arrXBar[i],arrMAE[i]))
o3.close()
# y_pos = np.arange(len(arrXBar))
# plt.bar(y_pos, arrMAE, align='center', alpha=0.5)
# plt.xticks(y_pos, arrIndex, rotation=90)
# plt.rcParams["figure.figsize"] = (40, 40)
# plt.ylabel('MAE Accuracy')
# plt.ylim(0, 50)
# for i in range(len(arrMAE)):
# plt.text(x=i - 0.5, y=arrMAE[i] + 1, s=arrStrMAEAvg[i])
# plt.text(x=i, y=arrMAE[i] - 1, s=arrXBar[i], rotation=90)
#
# plt.title(fpResultAll)
# plt.savefig(fpAllMAEInfo)
# plt.clf() | [
"pdhung3012@gmail.com"
] | pdhung3012@gmail.com |
695b5a756c2e0b025a91c5b0a72fc68c9b6b7b6a | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-antiddos/huaweicloudsdkantiddos/v1/model/show_d_dos_status_request.py | 62c9d00ea8af66a317f15806c1652a4f6f9548d0 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,667 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowDDosStatusRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'floating_ip_id': 'str',
'ip': 'str'
}
attribute_map = {
'floating_ip_id': 'floating_ip_id',
'ip': 'ip'
}
def __init__(self, floating_ip_id=None, ip=None):
"""ShowDDosStatusRequest - a model defined in huaweicloud sdk"""
self._floating_ip_id = None
self._ip = None
self.discriminator = None
self.floating_ip_id = floating_ip_id
if ip is not None:
self.ip = ip
@property
def floating_ip_id(self):
"""Gets the floating_ip_id of this ShowDDosStatusRequest.
用户EIP对应的ID
:return: The floating_ip_id of this ShowDDosStatusRequest.
:rtype: str
"""
return self._floating_ip_id
@floating_ip_id.setter
def floating_ip_id(self, floating_ip_id):
"""Sets the floating_ip_id of this ShowDDosStatusRequest.
用户EIP对应的ID
:param floating_ip_id: The floating_ip_id of this ShowDDosStatusRequest.
:type: str
"""
self._floating_ip_id = floating_ip_id
@property
def ip(self):
"""Gets the ip of this ShowDDosStatusRequest.
用户EIP
:return: The ip of this ShowDDosStatusRequest.
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""Sets the ip of this ShowDDosStatusRequest.
用户EIP
:param ip: The ip of this ShowDDosStatusRequest.
:type: str
"""
self._ip = ip
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowDDosStatusRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
83d7efe8ec51a03b89fc7457db7b1e9a0126db06 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/tensorflow_estimator/python/estimator/head/multi_head.py | 2826514b2b5d57d3feeb26964d7a3ac7fc016c82 | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:0a250ae00433eead90a7bb128c31387543770c56715cc722483690c2c4b13f76
size 21311
| [
"github@cuba12345"
] | github@cuba12345 |
2a314614273fd1f98a7a750f330a866ac7116ab5 | 4751a9daca11558dd0780f2e8b9477a484ebc7f4 | /src/qibo/tests/cirq_utils.py | c6ec7923450f40c20ffd28e6c9d73826c5a7d561 | [
"Apache-2.0"
] | permissive | drheli/qibo | f6875ed39883fe7bfa0b8939abb042fe636c5de7 | b99568aee9f978a5a82e92860c8d17e3358af7b9 | refs/heads/master | 2023-04-17T20:40:44.324689 | 2021-04-29T16:29:40 | 2021-04-29T16:29:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | import re
import cirq
import numpy as np
_QIBO_TO_CIRQ = {"CNOT": "CNOT", "RY": "Ry", "TOFFOLI": "TOFFOLI"}
def assert_gates_equivalent(qibo_gate, cirq_gate):
"""Asserts that qibo gate is equivalent to cirq gate.
Checks that:
* Gate type agrees.
* Target and control qubits agree.
* Parameter (if applicable) agrees.
Cirq gate parameters are extracted by parsing the gate string.
"""
pieces = [x for x in re.split("[()]", str(cirq_gate)) if x]
if len(pieces) == 2:
gatename, targets = pieces
theta = None
elif len(pieces) == 3:
gatename, theta, targets = pieces
else: # pragma: no cover
# case not tested because it fails
raise RuntimeError("Cirq gate parsing failed with {}.".format(pieces))
qubits = list(int(x) for x in targets.replace(" ", "").split(","))
targets = (qubits.pop(),)
controls = set(qubits)
assert _QIBO_TO_CIRQ[qibo_gate.__class__.__name__] == gatename
assert qibo_gate.target_qubits == targets
assert set(qibo_gate.control_qubits) == controls
if theta is not None:
if "π" in theta:
theta = np.pi * float(theta.replace("π", ""))
else: # pragma: no cover
# case doesn't happen in tests (could remove)
theta = float(theta)
np.testing.assert_allclose(theta, qibo_gate.parameters)
| [
"35475381+stavros11@users.noreply.github.com"
] | 35475381+stavros11@users.noreply.github.com |
74f1ce2400caeb1017978bc9cd7e592929ffa333 | 464850ba426263b17084fc71363ca14b8278b15e | /queue.py | 09c1d0d38a0d5d2c0a155264f7415f957a7588b0 | [] | no_license | eng-arvind/python | 8442c30ec10f979f913b354458b4f910539d8728 | 249f5f35f245a3f1742b10310de37ca6c6023af2 | refs/heads/master | 2020-12-23T06:40:16.911269 | 2020-02-02T18:42:01 | 2020-02-02T18:42:01 | 237,069,973 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from queue import Queue
def reversequeue(queue):
Stack = []
while (not queue.empty()):
Stack.append(queue.queue[0])
queue.get()
while (len(Stack) != 0):
queue.put(Stack[-1])
Stack.pop()
q=Queue()
q.put('5')
q.put('6')
q.put('7')
q.put('8')
q.put('9')
q.put('10')
q.put('11')
q.put('12')
reversequeue(q)
while(not q.empty()):
print(q.get(),end=" ")
| [
"noreply@github.com"
] | eng-arvind.noreply@github.com |
ec79280c4e8b367009e17e371a77a8f3c6e29ec7 | 73bb9d0d50b96b3d7ee48e2d97b1d8128a5f2b1e | /backjoon/15/11054.py | ef1c7f92b461227fc42eda080384ef49f02a5cde | [] | no_license | Hoon94/Algorithm | a0ef211d72a2b78e08249501d197875065392084 | 6f6969214bbb6bacd165313b6d8c0feb1caa8963 | refs/heads/master | 2023-05-11T13:12:11.585285 | 2023-05-08T14:38:47 | 2023-05-08T14:38:47 | 244,936,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | n = int(input())
a = list(map(int, input().split()))
dpp = [0 for _ in range(n)]
dpm = [0 for _ in range(n)]
dpb = [0 for _ in range(n)]
for i in range(n):
for j in range(i):
if a[i] > a[j] and dpp[i] < dpp[j]:
dpp[i] = dpp[j]
dpp[i] += 1
for i in range(n - 1, -1, -1):
for j in range(n - 1, i, -1):
if a[i] > a[j] and dpm[i] < dpm[j]:
dpm[i] = dpm[j]
dpm[i] += 1
for i in range(n):
dpb[i] = dpp[i] + dpm[i] - 1
print(max(dpb))
| [
"dleognsdl1@naver.com"
] | dleognsdl1@naver.com |
a7dac03b28d59fbdba3a9c969905a7c20ffc2d7c | 23d8c1b24ce4eb9fe7ee7f790c58d411c5b6f185 | /.lint.py.tcf.py | fb97800befcfff47a7b69b560c9e7425329ea899 | [
"Apache-2.0"
] | permissive | d-scott-phillips/tcf | aca30a6faad3778a8eef28409dcc6cf88c702a05 | 4d5f06b25799f18c103d4e4e8b222652956ebc49 | refs/heads/master | 2020-05-14T09:56:20.700897 | 2019-04-16T15:47:32 | 2019-04-16T15:47:32 | 181,752,941 | 0 | 0 | Apache-2.0 | 2019-04-16T19:24:08 | 2019-04-16T19:24:08 | null | UTF-8 | Python | false | false | 2,783 | py | #! /usr/bin/python3
"""
Implement TCF specificy lints
"""
import re
lint_py_check_per_line_name = "misc Python checks"
def lint_py_check_per_line_filter(_repo, cf):
"""
Run multiple line-by-line checks
"""
if not cf or cf.binary or cf.deleted:
return False
with open(cf.name, 'r') as f:
firstline = f.readline()
if not cf.name.endswith(".py") and not 'python' in firstline:
_repo.log.info("%s: skipping, not a Python file", cf.name)
return False
return True
def lint_py_check_per_line(_repo, cf):
"""
Run multiple line-by-line checks
"""
with open(cf.name, "r") as f:
line_cnt = 0
regex_import = re.compile(r"\s*from\s+.*\s+import\s+.*")
warnings = 0
for line in f:
line_cnt += 1
line = line.strip()
if not line_cnt in cf.lines or _repo.wide:
continue # Not a line modified, skip it
# Check that imports are not done with *from HERE import
# THAT* because it makes code very confusing when we can't
# see where functions are coming from
m = regex_import.match(line)
if m:
_repo.warning("""\
%s:%d: python style error: use 'import MODULE' vs 'from MODULE import SYMBOLs'
see https://securewiki.ith.intel.com/display/timo/Coding+Style+and+procedures#CodingStyleandprocedures-Importingcode"""
% (cf.name, line_cnt))
warnings += 1
# We like spacing around equal signs and operators in
# general, the C way. The python way sucks. ARG OPERATOR
# ARG beats ARGOPERATORARG. Ewks.
# Likewise, [X] is an index, [ X ] is a list. Heaven's
# sake. For consistency, dictionaries are { K: V }; it's
# really had to check on those and a patch to pylint would
# be needed for that.
regex_bad_eqop = re.compile(r"\S(=|==|!=|\+=|-=|\*=|/=|\|=|&=|^=)\S")
regex_config = re.compile("CONFIG_[^=]+=")
# Catches things like blabla('--someswitch=', whatever) or
# blabla("--something=that")
regex_string = re.compile(r"=dd[^\s'\"]*['\"]")
# Got a probable bad usage?
m = regex_bad_eqop.search(line)
if m:
# Maybe a config assignment (this is actually shell code)
if regex_config.search(line) or regex_string.search(line):
continue
# Maybe rst code, ignore it
if '===' in line:
continue
_repo.warning("""\
%s:%d: python style error: always leave spaces around operators
('a = b' vs 'a=b')\
""" % (cf.name, line_cnt))
| [
"inaky.perez-gonzalez@intel.com"
] | inaky.perez-gonzalez@intel.com |
2badb5dc4737dd2ff29fa4c88e0c38d4282f9abe | f1766b34f25348b49303ca55a99aaf87f42d78ab | /backend/home/migrations/0002_load_initial_data.py | 723269172a48c32a19c73fb69e921379e67a086f | [] | no_license | crowdbotics-apps/test-25996 | 907a79dfad8a679b84df78679777a6037257ef51 | 2e05c8580a9aefc5ccec4d45c3fdf7406e92da3f | refs/heads/master | 2023-04-18T04:31:43.233315 | 2021-04-28T12:20:13 | 2021-04-28T12:20:13 | 362,441,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "test"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">test</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "test-25996.botics.co"
site_params = {
"name": "test",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
b46eb3393d73726f9d7c429be423e933ee3bd82c | 866e5f850aed11ef49daab845fbafc2e4b2a585f | /vk.py | 2a8d101b46e93eae73160e0374aa59710325e357 | [] | no_license | SeregaFreeman/vk-robot | 4031bde2b8ee10a8dec5bdcdadf58ceb100547e4 | 3d27ea5e8d84c25e84a9e7ee07afe7f03f7dd801 | refs/heads/master | 2021-06-01T06:21:07.453525 | 2016-07-29T22:29:30 | 2016-07-29T22:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,627 | py | import vk_api
from random import randint, choice
def main():
""" Пример отправки Jsony Stathomy рандомное сообщенько и возврата id друзей профиля"""
login = input('Введи логин: ')
password = input('Введи пароль: ')
vk_session = vk_api.VkApi(login, password, api_version='5.53')
try:
vk_session.authorization()
except vk_api.AuthorizationError as error_msg:
print(error_msg)
return
"""
Примеры можно посмотреть здесь: https://github.com/python273/vk_api/tree/master/examples
VK API - здесь: https://new.vk.com/dev.php?method=methods
"""
vk = vk_session.get_api()
user = vk.users.get()[-1]
print(user)
'''Просто способ работы с api
'''
vk.messages.send(user_id='376365095', message='HI LOLKA')
print(vk.friends.get(user_id=user['id'])['items'])
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
'''Далее рабочий код, но он работает после подправки файла vk_tools.py (259 строка):
for x in range(len(response)):
if self.one_param:
if response[x] is False:
self.one_param['return'][cur_pool[x]] = {'_error': True}
else:
self.one_param['return'][cur_pool[x]] = response[x]
else:
if response[x] is False:
self.pool[i + x][2].update({'_error': True})
elif type(response[x]) is list:
self.pool[i + x][2].update({'list': response[x]})
else:
if type(response[0]) is int:
self.pool[i + x][2].update()
else:
self.pool[i + x][2].update(response[x])
'''
with vk_api.VkRequestsPool(vk_session) as pool:
message = ''
for i in range(randint(1, 10)):
message += choice(['Syka', 'bliat', 'соси кирпичь', 'улыбашка', 'ti', 'жОпа', 'mamky ipal'])\
+ choice([' ', ', '])
pool.method('messages.send', {'oauth': '1', 'user_id': '376365095', 'message': message})
with vk_api.VkRequestsPool(vk_session) as pool:
friends = pool.method('friends.get', {'user_id': user['id']})
if friends['items']:
print(friends['items'])
else:
print('Ti odinokaia sychka')
if __name__ == '__main__':
main() | [
"19941510metalhead@gmail.com"
] | 19941510metalhead@gmail.com |
ce58a280272597d54408ad8dd15c1be2287bd149 | 240c95e46d5cdb547f4500f00960dd96705cac34 | /functionalTools.py | 16c95a123d1533a81323304dd25b7767d16e0f2d | [] | no_license | CaMeLCa5e/dailysummer2015 | a23fda58c27fe42a7512b4150420827eb292305c | 378d386702db0814bd79e83de37de8ba442f9c7b | refs/heads/master | 2021-01-10T18:12:08.298279 | 2015-08-19T21:48:41 | 2015-08-19T21:48:41 | 36,266,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | def f(x): return x % 3 == 0 or x % 5 == 0
print filter(f, range(2, 25))
def cube(x): return x*x*x
print map(cube, range(1, 11))
seq = range(8)
def add(x, y): return x+y
print map(add, seq, seq) | [
"JM273606@gmail.com"
] | JM273606@gmail.com |
4f03c3470bbba0f7c758d20b10865ae84540daca | 5a9d8c64c6478f3816b63f59f1cdaca73c0848eb | /pythonNet/ex11_re/regex.py | e958d05f261053f72450c4cb97517e4327dd014a | [] | no_license | wangredfei/nt_py | f68134977e6d1e05cf17cec727644509f084c462 | fedf03c0d52565f588e9b342d1c51df0b6dc2681 | refs/heads/master | 2020-04-08T07:55:08.302589 | 2018-11-23T09:53:48 | 2018-11-23T09:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | import re
s = '2008年发生了很多大事,08奥运,512地震'
s2 = "zhang:1994 li:1993"
'''
pattern = r'(\w)+:(\d+)'
# [('g', '1994'), ('i', '1993')]
pattern = r'(\w+)+:(\d+)'
# [('zhang', '1994'), ('li', '1993')]
l = re.findall(pattern,s2)
print(l)
'''
pattern = r'\d+'
regex = re.compile(pattern)
l = regex.findall(s,0,19)
print(l)
l = re.split(r'\s+',"Hello wo15 N44ao Beijing")
print(l)
s = re.subn(r'\s+','##','hello word haha',1)
print(s)
| [
"289498360@qq.com"
] | 289498360@qq.com |
f98515e8184c3727c5d871f06b0705370ea3bfc8 | ce074998469af446e33d0fab7adb01320ccc77ed | /dst_procedures/Execute command writing output to local Admin Share.py | e8d4822c286ecd38edd7c220027f7a2e6937af2e | [] | no_license | parahaoer/detection_rules | 1341063568b0ccfa180da129a29aeec0a62e679e | c9f3408eccbcb4b61d1d441af31839872f9bb26c | refs/heads/master | 2023-02-09T13:54:40.254874 | 2020-12-28T09:25:31 | 2020-12-28T09:25:31 | 265,990,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | {"query": {"constant_score": {"filter": {"bool": {"must": [{"bool": {"must": [{"match_phrase": {"event_id": "5140"}}, {"match_phrase": {"share_name": "Admin$"}}]}}, {"bool": {"must_not": [{"bool": {"must": [{"wildcard": {"user_name.keyword": "*$"}}]}}]}}]}}}}}
tactic = "Lateral Movement"
technique = "Windows Admin Shares"
procedure = "Execute command writing output to local Admin Share"
tech_code = "T1077"
| [
"33771109+parahaoer@users.noreply.github.com"
] | 33771109+parahaoer@users.noreply.github.com |
9b83b8e8c296e4803184fccdac2033c1537c7f51 | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /nox_mesh_4_loop_repro_w_3_retries/interreplay_91_l.2/replay_config.py | c00a845bedd714e8a3156ea7c049b6468c541d45 | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import Replayer
from sts.simulation_state import SimulationConfig
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./nox_core -v -i ptcp:6635 routing', address='127.0.0.1', port=6635, cwd='nox_classic/build/src')],
topology_class=MeshTopology,
topology_params="num_switches=4",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False)
control_flow = Replayer(simulation_config, "experiments/nox_mesh_4_loop_repro_w_3_retries/interreplay_91_l.2/events.trace",
wait_on_deterministic_values=False)
# Invariant check: 'None'
| [
"cs@cs.berkeley.edu"
] | cs@cs.berkeley.edu |
2f551310aef9f44360448ad6ae031ec388a40da1 | 242733887bf50ffc91b2e305dfbea268fdfebae0 | /Regex/parseBytes.py | bdb567c5b6494ff4034cf86aff518e00bfb34981 | [] | no_license | aryabiju37/Python-mini-Proects | c2e4f59bf309b3d7e7696dfe92fb6ff63790114a | a84e03702665cf2e06a16637cfe24a6697163894 | refs/heads/master | 2023-07-25T18:56:54.649800 | 2021-08-27T06:49:32 | 2021-08-27T06:49:32 | 400,420,845 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | import re
def parse_bytes(inputStream):
byteStream = re.compile(r'[0-1]{4,}')
match = byteStream.findall(inputStream)
return match
print(parse_bytes("11010101 101 323"))
print(parse_bytes("my data is: 10101010 11100010"))
print(parse_bytes("asdsa"))
| [
"riyabee123@gmail.com"
] | riyabee123@gmail.com |
1c17c87fa534798f87e424fc1c749678ab1f35ab | 17d23f404a20c34a406dd086b0a89f956c4ecac0 | /Django-Tutorials/accounts/migrations/0008_auto_20190305_0920.py | aedc1d26206c204d22e2425f605c4b62f37425b5 | [] | no_license | apabhishek178/ieltsonline | 69df682862d96bc04b318262e962e22a0919fe88 | 42061efa8293c948342a670f0a62c90d3b31ebff | refs/heads/master | 2020-04-26T09:19:55.712217 | 2019-03-20T13:36:31 | 2019-03-20T13:36:31 | 173,451,873 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-03-05 03:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0007_userprofile_organisation'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='website',
field=models.CharField(default='', max_length=100),
),
]
| [
"apabhishek178@gmail.com"
] | apabhishek178@gmail.com |
dde041f8735a4a188e6da4d287558879d50e5e3a | 5730110af5e4f0abe538ed7825ddd62c79bc3704 | /pacu/pacu/core/svc/vstim/stimulus/position_tuple.py | a16384e53fbb82b460e1b846638146f31903f41b | [] | no_license | jzeitoun/pacu-v2 | bdbb81def96a2d87171ca20b89c878b2f66975e7 | 0ccb254a658263b4fe8c80ea623f860cb7dc1428 | refs/heads/master | 2021-06-03T18:50:50.890399 | 2020-04-27T16:31:59 | 2020-04-27T16:31:59 | 110,889,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | from pacu.util.spec.list import FloatListSpec
from pacu.core.svc.impl.pacu_attr import PacuAttr
from pacu.core.svc.impl.ember_attr import EmberAttr
class PositionTuple(PacuAttr, FloatListSpec):
component = 'x-svc-comp-input-array'
description = EmberAttr('2 floats in deg')
placeholder = EmberAttr('')
title = EmberAttr('Position')
tooltip = EmberAttr('')
| [
"jzeitoun@uci.edu"
] | jzeitoun@uci.edu |
675d92022d928d36636dbf7fb19c6956b8fcde33 | 6039142144cb221f04e29e2c7359dc5bed7bb830 | /atividade06/model/__init__.py | abc61a6a013de4d5b185c09456ed6bbd7e291e45 | [
"Apache-2.0"
] | permissive | Yuri-Santiago/yuri-mateus-poo-python-ifce-p7 | f5f245345c38b1e08a1ce6d142204b30868023d0 | edbf0e945e01430eb14dff3c0c7806582430d1c2 | refs/heads/master | 2023-06-12T18:48:08.950423 | 2021-07-09T17:04:17 | 2021-07-09T17:04:17 | 349,737,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | from atividade06.model.cliente import Cliente
from atividade06.model.produto import Produto
from atividade06.model.notafiscal import NotaFiscal
from atividade06.model.itemnotafiscal import ItemNotaFiscal
# banco de dados
# Clientes
cliente1 = Cliente(1, "Yuri Mateus", 100, '200.100.345-34', 'pessoa fisica')
cliente2 = Cliente(2, "Raquel Maciel", 200, '123.456.789-10', 'pessoa fisica')
cliente3 = Cliente(3, "Israel Leite", 300, '109.876.543-21', 'pessoa fisica')
clientes = [cliente1, cliente2, cliente3]
# Produtos
produto1 = Produto(1, 100, 'Arroz', 5.5)
produto2 = Produto(2, 200, 'Feijao', 4.5)
produto3 = Produto(3, 300, 'Batata', 6)
produtos = [produto1, produto2, produto3]
# Notas Fiscais
notafiscal1 = NotaFiscal(1, 100, cliente1)
notafiscal2 = NotaFiscal(2, 200, cliente2)
notafiscal3 = NotaFiscal(3, 300, cliente3)
notas = [notafiscal1, notafiscal2, notafiscal3]
# ItensNotaFiscal
item1 = ItemNotaFiscal(1, 1, 6, produto1)
item2 = ItemNotaFiscal(2, 1, 8, produto1)
item3 = ItemNotaFiscal(3, 2, 5, produto2)
item4 = ItemNotaFiscal(4, 1, 10, produto1)
item5 = ItemNotaFiscal(5, 2, 4, produto2)
item6 = ItemNotaFiscal(6, 3, 7, produto3)
itens = [item1, item2, item3, item4, item5, item6]
# Adicionando os produtos
notafiscal1.adicionarItem(item1)
notafiscal2.adicionarItem(item2)
notafiscal2.adicionarItem(item3)
notafiscal3.adicionarItem(item4)
notafiscal3.adicionarItem(item5)
notafiscal3.adicionarItem(item6)
| [
"yurimateussantiago@gmail.com"
] | yurimateussantiago@gmail.com |
dd97a9756058e4c5df2ac0b5327245d868e532b0 | fe856f232f21ee5f1b45d4c7c19d062b3b3261bc | /pyfr/solvers/navstokes/elements.py | 5c6de3d59176f9693955284cf792aadd3e4a498a | [
"CC-BY-4.0",
"BSD-3-Clause"
] | permissive | bartwozniak/PyFR | 5589c36e7dc0dcac9a7aed7c69c8964bda2c55d8 | d99120c1db245c7a2a35c72dae51ea72c49efef5 | refs/heads/master | 2021-01-20T16:03:21.409981 | 2013-12-05T18:08:48 | 2013-12-05T18:08:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | # -*- coding: utf-8 -*-
from pyfr.solvers.baseadvecdiff import BaseAdvectionDiffusionElements
from pyfr.solvers.euler.elements import BaseFluidElements
class NavierStokesElements(BaseFluidElements, BaseAdvectionDiffusionElements):
def set_backend(self, backend, nscalupts):
super(NavierStokesElements, self).set_backend(backend, nscalupts)
backend.pointwise.register('pyfr.solvers.navstokes.kernels.tflux')
def get_tdisf_upts_kern(self):
tplargs = dict(ndims=self.ndims, nvars=self.nvars,
c=self._cfg.items_as('constants', float))
return self._be.kernel('tflux', tplargs, dims=[self.nupts, self.neles],
u=self.scal_upts_inb, smats=self._smat_upts,
f=self._vect_upts[0])
| [
"freddie@witherden.org"
] | freddie@witherden.org |
6c2149f2e2e124af3166ea13c3d9579008857761 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /120_design_patterns/003_factories/examples/factory/Factory/autos/jeepsahara.py | 2deb7ee4bf417e533292c263531291cac3658182 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 227 | py | from .abs_auto import AbsAuto
class JeepSahara(AbsAuto):
def __init__(self, name):
self._name = name
def start(self):
print('%s running ruggedly.' % self.name)
def stop(self):
print('%s shutting down.' % self.name) | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
6a9ea93a8f6c1c2225a44921839604c2585cc1a9 | e2afe7e135d4cf68fb88cbccf59a782c986fb171 | /getTwits.py | fac1fd1a4f85c8576b00c655b35f4bfee464c1f3 | [] | no_license | BUEC500C1/video-AIRICLEE | b09368ef9543c5f1599f80e5f21e7a0556a1c552 | f2a4ac0d8ce000673c1fe5e163eb841b56bab0c9 | refs/heads/master | 2020-12-29T09:30:30.298871 | 2020-02-25T01:29:33 | 2020-02-25T01:29:33 | 238,556,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,069 | py | import tweepy #https://github.com/tweepy/tweepy
import os
from PIL import Image, ImageFont, ImageDraw
import twitterAPIKey
def getTwitsFeed():
#Twitter API credentials
consumer_key = twitterAPIKey.consumer_key
consumer_secret = twitterAPIKey.consumer_secret
access_key = twitterAPIKey.access_key
access_secret = twitterAPIKey.access_secret
# Authentification
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
# key word to look for twitter
query = twitterAPIKey.query
language = "en"
# user_timeline function
results = api.search(q=query, lang=language)
list_tweet = []
for tweet in results:
list_tweet.append(tweet.text)
return list_tweet
def transTwits2Image(list_tweet):
# transform text to image
i = 0
for tweet in list_tweet:
text = tweet
im = Image.new('RGB',(1080,720),(255,255,255))
dr = ImageDraw.Draw(im)
ttf='/usr/share/fonts/truetype/myfonts/puhui.ttf'
font=ImageFont.truetype(ttf, 12)
dr.text((10, 5), text, font=font, fill="#000000")
# im.show()
im.save("images/t%d.png"%i)
i = i + 1;
def transImage2Video(usrName):
with open('videos.txt', 'w') as f:
for i in range(12):
command = "ffmpeg -loglevel quiet -y -ss 0 -t 3 -f lavfi -i color=c=0x000000:s=830x794:r=30 " \
"-i /home/lighao/EC500/assignment_3/images/t" + str(i+1) \
+ ".png -filter_complex \"[1:v]scale=830:794[v1];[0:v][v1]overlay=0:0[outv]\" " \
"-map [outv] -c:v libx264 /home/lighao/EC500/assignment_3/video" \
+ str(i+1) + ".mp4 -y"
p = os.popen(command)
p.close()
f.write("file video" + str(i+1) + ".mp4" + '\n')
f.close()
cd = "ffmpeg -loglevel quiet -y -f concat -i videos.txt -c copy OutputVideo" + "test" + usrName + ".mp4"
pp = os.popen(cd)
pp.close()
def getResult(usrName):
list_tweet = getTwitsFeed()
transTwits2Image(list_tweet)
transImage2Video(usrName)
if __name__ == '__main__':
getResult()
| [
"noreply@github.com"
] | BUEC500C1.noreply@github.com |
cf52fd255745206eefa21f318a7747bf99f10b47 | 63f9a0d150cbef75f4e6e8246dc7ecac3f3b6d09 | /rllib/agents/a3c/a2c.py | 0a71a359c014b6d352bd73ae29c0b0ff8ea4351d | [
"Apache-2.0",
"MIT"
] | permissive | ray-project/maze-raylit | 79f0a5af9fe4bdc13a2d5b3919da867ed5439aab | a03cd14a50d87d58effea1d749391af530d7609c | refs/heads/master | 2023-01-23T04:23:35.178501 | 2020-12-04T22:34:14 | 2020-12-04T22:34:14 | 318,274,659 | 5 | 0 | Apache-2.0 | 2020-12-04T22:34:15 | 2020-12-03T17:47:58 | Python | UTF-8 | Python | false | false | 2,487 | py | import math
from ray.rllib.agents.a3c.a3c import DEFAULT_CONFIG as A3C_CONFIG, \
validate_config, get_policy_class
from ray.rllib.agents.a3c.a3c_tf_policy import A3CTFPolicy
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.execution.metric_ops import StandardMetricsReporting
from ray.rllib.execution.rollout_ops import ParallelRollouts, ConcatBatches
from ray.rllib.execution.train_ops import ComputeGradients, AverageGradients, \
ApplyGradients, TrainOneStep
from ray.rllib.utils import merge_dicts
A2C_DEFAULT_CONFIG = merge_dicts(
A3C_CONFIG,
{
"rollout_fragment_length": 20,
"min_iter_time_s": 10,
"sample_async": False,
# A2C supports microbatching, in which we accumulate gradients over
# batch of this size until the train batch size is reached. This allows
# training with batch sizes much larger than can fit in GPU memory.
# To enable, set this to a value less than the train batch size.
"microbatch_size": None,
},
)
def execution_plan(workers, config):
rollouts = ParallelRollouts(workers, mode="bulk_sync")
if config["microbatch_size"]:
num_microbatches = math.ceil(
config["train_batch_size"] / config["microbatch_size"])
# In microbatch mode, we want to compute gradients on experience
# microbatches, average a number of these microbatches, and then apply
# the averaged gradient in one SGD step. This conserves GPU memory,
# allowing for extremely large experience batches to be used.
train_op = (
rollouts.combine(
ConcatBatches(min_batch_size=config["microbatch_size"]))
.for_each(ComputeGradients(workers)) # (grads, info)
.batch(num_microbatches) # List[(grads, info)]
.for_each(AverageGradients()) # (avg_grads, info)
.for_each(ApplyGradients(workers)))
else:
# In normal mode, we execute one SGD step per each train batch.
train_op = rollouts \
.combine(ConcatBatches(
min_batch_size=config["train_batch_size"])) \
.for_each(TrainOneStep(workers))
return StandardMetricsReporting(train_op, workers, config)
A2CTrainer = build_trainer(
name="A2C",
default_config=A2C_DEFAULT_CONFIG,
default_policy=A3CTFPolicy,
get_policy_class=get_policy_class,
validate_config=validate_config,
execution_plan=execution_plan)
| [
"noreply@github.com"
] | ray-project.noreply@github.com |
7472d35af0f5726c029f1b53e6644e87d50bde12 | 9c47c55873e88d747bccb397c4b8197f42317c99 | /main.py | 8e86847052413cc452bb67b2ca961a4967c68a4a | [] | no_license | balubankudi/LinkedIn-Course | 29eefc7a86df3be5dc1d1b2953b9e1ef6077a158 | 26907301529db251d048f3b5ae129ab445c8bf6e | refs/heads/master | 2022-11-18T08:38:15.960284 | 2020-06-23T17:45:37 | 2020-06-23T17:45:37 | 274,471,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,049 | py |
x = 100
y = 42
if x == 5:
print("do five stuff")
elif x == 6:
print("do six stuff")
else:
print("do something else")
# Copyright 2009-2017 BHG http://bw.org/
words = ['one', 'two', 'three', 'four', 'five']
n = 0
while(n < 5):
print(words[n])
n += 1
# Copyright 2009-2017 BHG http://bw.org/
words = ['one', 'two', 'three', 'four', 'five']
for i in words:
print(i)
# simple fibonacci series
# the sum of two elements defines the next set
a, b = 0, 1
while b < 1000:
print(b, end=' ', flush=True)
a, b = b, a + b
print() # line ending
# Copyright 2009-2017 BHG http://bw.org/
def function(n):
print(n)
function(12)
#!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
def isprime(n):
if n <= 1:
return False
for x in range(2, n):
if n % x == 0:
return False
else:
return True
#in line 62 i don´t understand the use of Flush
def list_primes():
for n in range(100):
if isprime(n):
print(n,end=" ",flush=True)
print()
list_primes()
n = 6
if isprime(n):
print(f'{n} is prime')
else:
print(f'{n} not prime')
#defining even numbers
def iseven(n):
if n <= 1:
return False
for x in range(2, n):
if n % x == 2:
return False
else:
return True
def list_even():
for n in range(100):
if iseven(n):
print(n,end=" ",flush=True)
print()
list_even()
# knowing format
x = 7.8
print('x is {}'.format(x))
print(type(x))
x = [ 1, 2, 3, 4, 5 ]
x[2] = 42
#eventhough initially the amount of x[2] is supposed to be 3, it is changed in the next line
for i in x:
print('i is {}'.format(i))
#a tuple is like a list except that it is inmutable so we use parenthesis instead of brackets. So the x[2]= 42 will not execute since the initially list is inmutable. See what happens next. it gives error. Conclusion: for inmutable use parenthesis over brackets
#x = ( 1, 2, 3, 4, 5 )
#x[2] = 42
#for i in x:
# print('i is {}'.format(i))
#print a list of even numbers from 1 to 100 and later add them"
x = range (2, 101, 2)
for i in x:
print(i)
x = range (2, 101, 2)
sum = 0
for i in x:
sum += i
print(sum)
#defining even numbers type 2
def iseven(n):
if n <= 1:
return False
for x in range(1, n):
if n % 2 == 0:
return True
else:
return False
def list_even():
for n in range(101):
if iseven(n):
print(n,end=" ",flush=True)
print()
list_even()
def list_evensum():
sum = 0
for n in range(101):
if iseven(n):
sum += n
print(sum)
list_evensum()
#Conditional
x = 2
if x == 0:
print('zero true')
elif x == 1:
print('one 1 true')
elif x == 2:
print('elif 2 true')
elif x == 3:
print('elif 3 true')
elif False:
print('elif true')
else:
print('neither true')
hungry = True
x = 'Feed the bear now!' if hungry else 'Do not feed the bear.'
print(x)
infectedwithcovid19 = True
x = 'stay at home!' if infectedwithcovid19 else 'still stay at home safe'
print(x)
#operators
x = 5
y = 3
z = x + y
z = -z
print(f'result is {z}')
#bitwise operators
x = 0x0a
y = 0x02
z = x | y
print(f'(hex) x is {x:02x}, y is {y:02x}, z is {z:02x}')
print(f'(bin) x is {x:08b}, y is {y:08b}, z is {z:08b}')
#boolean
a = True
b = False
x = ( 'bear', 'bunny', 'tree', 'sky', 'rain' )
y = 'bear'
if y in x:
print('expression is true')
else:
print('expression is false')
if y is x[0]:
print('expression is true')
else:
print('expression is false')
print(id(y))
print(id(x[0]))
#operator precedence: order in which orders are evaluates
#while loop
secret = 'swordfish'
pw = ''
#while pw != secret:
# pw = input("What's the secret word? ")
#for loop
animals = ( 'bear', 'bunny', 'dog', 'cat', 'velociraptor' )
for pet in animals:
print(pet)
animals = ( 'bear', 'bunny', 'dog', 'cat', 'velociraptor' )
for pet in range(5):
print(pet)
secret = 'swordfish'
pw = ''
auth = False
count = 0
max_attempt = 5
#while pw != secret:
#count += 1
#if count > max_attempt: break
#if count == 3: continue
#pw = input(f"{count}: What's the secret word?")
#else:
# auth = True
#print ("authorized" if auth else "Calling the FBI")
#normally the while function is built to be false and else is true
#defining functions
def main():
x = kitten(5, 6, 7)
print(x)
def kitten(a, b = 1, c = 0):
print ("Meow")
print (a, b, c)
if __name__ == '__main__': main()
#notabene: arguments without default should always be before arguement with default
def main():
x = 5
kitten (x)
print(f"in main x is {x}")
def kitten(a):
a = 3
print ("Meow")
print (a)
if __name__ == '__main__': main()
#this is what we call "call by value", and when you pass a variable to a f(x), the f(x) operates on a copy of the variable. the value is passed but not the object it self. A interger is not mutable,
#so this is important to understand: an integer is not mutable so it cannot change, so when you assign a new value to an integer, you're actually assigning an entirely different object to the name. The original integer is not changed, the name simply refers to a new object. Passing a value to a function acts exactly the same way. A reference to the object is passed and acts exactly like an assignment. So mutable objects may be changed, and those changes will be reflected in the caller. Immutable objects may not be changed. So function arguments in Python act exactly as assignments in Python, with all the quirks of Python's object model. For the most part, things will work as expected, but be careful with lists and other mutable objects.
#keyword arguments
def main():
kitten(Buffy = 'meow', Zilla = 'grr', Angel = 'rawr')
def kitten(**kwargs):
if len(kwargs):
for k in kwargs:
print('Kitten {} says {}'.format(k, kwargs[k]))
else: print('Meow.')
if __name__ == '__main__': main()
#GENERATORS: INCLUSIVE RANGE
def main():
for i in inclusive_range(25):
print(i, end = ' ')
print()
def inclusive_range(*args):
numargs = len(args)
start = 0
step = 1
# initialize parameters
if numargs < 1:
raise TypeError(f'expected at least 1 argument, got {numargs}')
elif numargs == 1:
stop = args[0]
elif numargs == 2:
(start, stop) = args
elif numargs == 3:
(start, stop, step) = args
else: raise TypeError(f'expected at most 3 arguments, got {numargs}')
# generator
i = start
while i <= stop:
yield i
i += step
if __name__ == '__main__': main()
def main():
seq = range(11)
print_list(seq)
def print_list(o):
for x in o: print(x, end = ' ')
print()
if __name__ == '__main__': main()
class RevStr(str):
def __str__(self):
return self[::-1]
def main():
hello = RevStr('Hello, World.')
print(hello)
if __name__ == '__main__': main()
print (chr(128406))
x = list(range(30))
print(x)
| [
"replituser@example.com"
] | replituser@example.com |
5377cede56b1c31c3c6e1d05f57ca234f77bbbf7 | 56afef87e593f4a09da95ebeceb3b04940d7069f | /Unified/stagor.py | 7efc149dc8a90ff89ef4b2494eeeff03c2cc2a03 | [] | no_license | lucacopa/WmAgentScripts | 8bf3d3377a84be6172eb2b13214d8e7b2596d130 | c6884f005574506c27b4a09099d987e70734c7e5 | refs/heads/master | 2021-01-18T07:21:55.940980 | 2015-07-13T19:16:48 | 2015-07-13T19:16:48 | 12,436,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,659 | py | #!/usr/bin/env python
from assignSession import *
from utils import checkTransferStatus, checkTransferApproval, approveSubscription, getWorkflowByInput
import sys
import itertools
import pprint
from htmlor import htmlor
def stagor(url,specific =None):
done_by_wf_id = {}
done_by_input = {}
completion_by_input = {}
good_enough = 100.0
for wfo in session.query(Workflow).filter(Workflow.status == 'staging').all():
## implement the grace period for by-passing the transfer.
pass
for transfer in session.query(Transfer).all():
if specific and str(transfer.phedexid)!=str(specific): continue
skip=True
for wfid in transfer.workflows_id:
tr_wf = session.query(Workflow).get(wfid)
if tr_wf:
if tr_wf.status == 'staging':
skip=False
break
if skip: continue
if transfer.phedexid<0: continue
## check the status of transfers
checks = checkTransferApproval(url, transfer.phedexid)
approved = all(checks.values())
if not approved:
print transfer.phedexid,"is not yet approved"
approveSubscription(url, transfer.phedexid)
continue
## check on transfer completion
checks = checkTransferStatus(url, transfer.phedexid, nocollapse=True)
if not specific:
for dsname in checks:
if not dsname in done_by_input: done_by_input[dsname]={}
if not dsname in completion_by_input: completion_by_input[dsname] = {}
done_by_input[dsname][transfer.phedexid]=all(map(lambda i:i>=good_enough, checks[dsname].values()))
completion_by_input[dsname][transfer.phedexid]=checks[dsname].values()
if checks:
print "Checks for",transfer.phedexid,[node.values() for node in checks.values()]
done = all(map(lambda i:i>=good_enough,list(itertools.chain.from_iterable([node.values() for node in checks.values()]))))
else:
## it is empty, is that a sign that all is done and away ?
print "ERROR with the scubscriptions API of ",transfer.phedexid
print "Most likely something else is overiding the transfer request. Need to work on finding the replacement automatically, if the replacement exists"
done = False
## the thing above is NOT giving the right number
#done = False
for wfid in transfer.workflows_id:
tr_wf = session.query(Workflow).get(wfid)
if tr_wf:# and tr_wf.status == 'staging':
if not tr_wf.id in done_by_wf_id: done_by_wf_id[tr_wf.id]={}
done_by_wf_id[tr_wf.id][transfer.phedexid]=done
if done:
## transfer.status = 'done'
print transfer.phedexid,"is done"
else:
print transfer.phedexid,"not finished"
pprint.pprint( checks )
#print done_by_input
print "\n----\n"
for dsname in done_by_input:
fractions = None
if dsname in completion_by_input:
fractions = itertools.chain.from_iterable([check.values() for check in completion_by_input.values()])
## the workflows in the waiting room for the dataset
using_its = getWorkflowByInput(url, dsname)
#print using_its
using_wfos = []
for using_it in using_its:
wf = session.query(Workflow).filter(Workflow.name == using_it).first()
if wf:
using_wfos.append( wf )
#need_sites = int(len(done_by_input[dsname].values())*0.7)+1
need_sites = len(done_by_input[dsname].values())
if need_sites > 10:
need_sites = int(need_sites/2.)
got = done_by_input[dsname].values().count(True)
if all([wf.status != 'staging' for wf in using_wfos]):
## not a single ds-using wf is in staging => moved on already
## just forget about it
print "presence of",dsname,"does not matter anymore"
print "\t",done_by_input[dsname]
print "\t",[wf.status for wf in using_wfos]
print "\tneeds",need_sites
continue #??
## should the need_sites reduces with time ?
# with dataset choping, reducing that number might work as a block black-list.
if all(done_by_input[dsname].values()):
print dsname,"is everywhere we wanted"
## the input dataset is fully transfered, should consider setting the corresponding wf to staged
for wf in using_wfos:
if wf.status == 'staging':
print wf.name,"is with us. setting staged and move on"
wf.status = 'staged'
session.commit()
elif fractions and len(list(fractions))>1 and set(fractions)==1:
print dsname,"is everywhere at the same fraction"
print "We do not want this in the end. we want the data we asked for"
continue
## the input dataset is fully transfered, should consider setting the corresponding wf to staged
for wf in using_wfos:
if wf.status == 'staging':
print wf.name,"is with us everywhere the same. setting staged and move on"
wf.status = 'staged'
session.commit()
elif got >= need_sites:
print dsname,"is almost everywhere we wanted"
#print "We do not want this in the end. we want the data we asked for"
#continue
## the input dataset is fully transfered, should consider setting the corresponding wf to staged
for wf in using_wfos:
if wf.status == 'staging':
print wf.name,"is almost with us. setting staged and move on"
wf.status = 'staged'
session.commit()
else:
print dsname
print "\t",done_by_input[dsname]
print "\tneeds",need_sites
print "\tgot",got
for wfid in done_by_wf_id:
#print done_by_wf_id[wfid].values()
## ask that all related transfer get into a valid state
if all(done_by_wf_id[wfid].values()):
pass
#tr_wf = session.query(Workflow).get(wfid)
#print "setting",tr_wf.name,"to staged"
#tr_wf.status = 'staged'
#session.commit()
if __name__ == "__main__":
url = 'cmsweb.cern.ch'
spec=None
if len(sys.argv)>1:
spec = sys.argv[1]
stagor(url, spec)
htmlor()
| [
"vlimant@cern.ch"
] | vlimant@cern.ch |
28519e7591dcb13b25682c4d4c25774be9b499d1 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/largestTime_20200903122443.py | 84a256da28e4d575cd155ab30b135a196baa028c | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from itertools import permutations
def Time(A):
# getting the different permutations
# get the one that falls between 0000 and 2359
# then place the semi colon in the proper place
# otherwise return an empty string
A = [str(i) for i in A]
perm = permutations(A)
time = ""
newArray = []
arr = []
for i in list(perm):
string = "".join(i)
newArray.append(string)
newArray = [int(i) for i in newArray]
for i in newArray:
if i > 0000 and i <= 2359:
arr.append(i)
newTime = arr[len(arr)-1]
newTime = str(newTime)
print(new)
Time([1,2,3,4])
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
19f811ca33bd32efecc9c62adc05120fc3c75251 | 4ae3b27a1d782ae43bc786c841cafb3ace212d55 | /Test_Slen/PythonSelFramework/tests/emu_nvdia.py | 873ce775dcbd7be2d18d25684ee30b9ab8eb2b11 | [] | no_license | bopopescu/Py_projects | c9084efa5aa02fd9ff6ed8ac5c7872fedcf53e32 | a2fe4f198e3ca4026cf2e3e429ac09707d5a19de | refs/heads/master | 2022-09-29T20:50:57.354678 | 2020-04-28T05:23:14 | 2020-04-28T05:23:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | < form
autocomplete = "false"
class ="modules-login-components-SignIn-components-Form-___style__sign-up-form___2z3Ug" >
< div class ="core-forms-components-___input__input-wrapper___17k2k" > < div class ="ui textfield invalid
">
<label for="email">Email Address</label><input autocomplete="true
" id="
email
" type="
email
" class="
input
" placeholder="
user @ domain.com
" value="">" \
"" \
"<i aria-hidden="
true
" class="
ui
icon
close - circle -
fill
"></i></div><div class="
core - forms - components - ___input__error - message___3ZeWU
">Required field</div></div><div class="
ui
buttons
"><button class="
ui
button
primary
disabled
" disabled=""
type = "submit" > Next < / button > < a
class ="forgot-password ui button flat" href="/signin/password/forgot" > Forgot Password < / a > < a class ="sign-up ui button default" href="/signup" > Create an Account < / a > < / div > < / form >
# create driver
# load webdriver
# load webpage(login url)
# locator for email address
# send_keys (email address)
# click on email locator
# find locator of new page
# locate successful message welcome : assert
def test_email(url, email):
driver = webdriver.Chrome()
driver.get(url)
locator = driver.find_element_by_id("email")
locator.send_keys(email)
welcome_page = driver.find_element_by_id("welcome")
assert welcome.title == "welcome"
menu.json
{"menu": {
"id": "file",
"value": "File",
"popup": {
"menuitem": [
{"value": "New", "onclick": "CreateNewDoc()"},
{"value": "Open", "onclick": "OpenDoc()"},
{"value": "Close", "onclick": "CloseDoc()"}
]
}
}}
{[(<>)]}
True
# input is a string, consisting of {[(<>)]} and letters
{{]} False
load
json
a
python
dict
json.load(open("menu.json", 'r")
def match(string):
| [
"sunusd@yahoo.com"
] | sunusd@yahoo.com |
d697f737208e9caf8f79d0bba9cceecc106bba98 | 40c927ea44653c645c9540e68a8f5b439990fddd | /Chap 10/10.6.5 Exercise.py | afd2bc4c30edcf594d0113cb259cefb66ad026a9 | [] | no_license | simrit1/asimo | 211ff255434637ac6ad396e8ff5ed5cee6ea971d | 12564ab591129ebbb0c2daaa3c538cc6d39aee39 | refs/heads/master | 2022-02-25T07:53:05.034587 | 2019-09-22T15:34:11 | 2019-09-22T15:34:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,405 | py | # Exercise 10.6.5
import turtle
turtle.setup(400,500)
wn = turtle.Screen()
wn.title("Tess becomes a traffic light!")
wn.bgcolor("lightgreen")
green = turtle.Turtle() # Create 3 turtles correspond with 3 traffic lights
orange = turtle.Turtle()
red = turtle.Turtle()
def draw_housing():
""" Draw a nice housing to hold the traffic lights """
green.pensize(3)
green.color("black", "darkgrey")
green.begin_fill()
green.forward(80)
green.left(90)
green.forward(200)
green.circle(40, 180)
green.forward(200)
green.left(90)
green.end_fill()
draw_housing()
green.penup()
# Position "green" onto the place where the green light should be
green.forward(40)
green.left(90)
green.forward(50)
# Turn "green" into a big green circle
green.shape("circle")
green.shapesize(3)
green.fillcolor("green")
a = green.position() # We get them to the same position
orange.goto(a)
red.goto(a)
orange.left(90) # Turn "orange" into a big orange circle
orange.forward(70)
orange.shape("circle")
orange.shapesize(3)
orange.fillcolor("orange")
red.left(90) # Turn "red" into a big red circle
red.forward(140)
red.shape("circle")
red.shapesize(3)
red.fillcolor("red")
# This variable holds the current state of the machine
state_num = 0
def advance_state_machine():
"""
Modify previous program then we can still realize
the rest of lights when they turn off.
Green, green and orange, orange, red. We number these states 0, 1, 2, 3.
With timer like exercise requisition.
"""
global state_num
if state_num == 0:
red.color("black")
orange.color("black")
green.color("green")
state_num = 1
wn.ontimer(advance_state_machine, 3000)
elif state_num == 1:
red.color("black")
orange.color("orange")
green.color("green")
state_num = 2
wn.ontimer(advance_state_machine, 1000)
elif state_num == 2:
red.color("black")
orange.color("orange")
green.color("black")
state_num = 3
wn.ontimer(advance_state_machine, 1000)
else:
red.color("red")
orange.color("black")
green.color("black") # Show up our green
state_num = 0
wn.ontimer(advance_state_machine, 2000)
advance_state_machine()
wn.exitonclick()
| [
"noreply@github.com"
] | simrit1.noreply@github.com |
27d0ef92e9aba96fde4dacfa572ed41d33a84647 | aba00d6272765b71397cd3eba105fc79b3a346e0 | /Digisig/digsigvenv/lib/python3.6/site-packages/ufl/objects.py | f34c47c6d4cba9fedb8e0c36d0e65630f1ee7ba7 | [] | no_license | JosteinGj/School | a2c7cc090571b867637003fe6c647898ba9d8d24 | 3b5f29846e443b97f042241237dbda3208b20831 | refs/heads/master | 2023-05-02T11:07:29.517669 | 2021-04-26T09:04:57 | 2021-04-26T09:04:57 | 295,340,194 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,694 | py | # -*- coding: utf-8 -*-
"Utility objects for pretty syntax in user code."
# Copyright (C) 2008-2016 Martin Sandve Alnæs
#
# This file is part of UFL.
#
# UFL is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UFL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with UFL. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Anders Logg, 2008
# Modified by Kristian Oelgaard, 2009
from ufl.core.multiindex import indices
from ufl.cell import Cell
from ufl.measure import Measure
from ufl.measure import integral_type_to_measure_name
# Default indices
i, j, k, l = indices(4) # noqa: E741
p, q, r, s = indices(4)
for integral_type, measure_name in integral_type_to_measure_name.items():
globals()[measure_name] = Measure(integral_type)
# TODO: Firedrake hack, remove later
ds_tb = ds_b + ds_t # noqa: F821
# Default measure dX including both uncut and cut cells
dX = dx + dC # noqa: F821
# Create objects for builtin known cell types
vertex = Cell("vertex", 0)
interval = Cell("interval", 1)
triangle = Cell("triangle", 2)
tetrahedron = Cell("tetrahedron", 3)
quadrilateral = Cell("quadrilateral", 2)
hexahedron = Cell("hexahedron", 3)
# Facet is just a dummy declaration for RestrictedElement
facet = "facet"
| [
"jostein.gj@gmail.com"
] | jostein.gj@gmail.com |
8e838aceaeca95e269ad225ea7c65c28c335810c | e3030bb29b8c713daf360953e27b7752c6f9daa2 | /bubble_sort_test.py | 9c31b843dcadcdfa4fb66c1413e45d0460c932f7 | [] | no_license | vicvv/python_scripts | 4941316c92ec0bc7ebf6d7011071a2a727606de8 | ebe2858f2164085f75bdb1e832f894aa4ee5e729 | refs/heads/master | 2022-11-13T13:40:27.440346 | 2022-10-09T02:21:30 | 2022-10-09T02:21:30 | 217,444,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | list = [3,1,9,2,9]
print ("Original list: " , list)
changed = True
while changed:
print("New Loop!")
changed = False
for i in range(0,len(list) - 1):
print(list)
if list[i] > list[i+1]:
(list[i+1], list[i]) = (list[i], list[i+1])
changed = True
| [
"xolosno270@gmail.com"
] | xolosno270@gmail.com |
cfc80e9ee07487642639f480fc81954aaf00149f | e9261678450fee1b9f05b6b03972c62c79c2bc2c | /tensorflow_compression/python/ops/round_ops_test.py | 6e9ad7ae3c8393047da725a7d9e23868e3053372 | [
"Apache-2.0"
] | permissive | tensorflow/compression | 46aa22462eded425ea66d9f006da924d330e142f | 80d962f8f8532d9a3dbdaf0a97e249b7be7c29f6 | refs/heads/master | 2023-08-21T01:11:34.129210 | 2023-08-11T15:54:52 | 2023-08-11T15:55:37 | 133,584,278 | 818 | 296 | Apache-2.0 | 2022-11-10T19:48:07 | 2018-05-15T23:32:19 | Python | UTF-8 | Python | false | false | 3,791 | py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for soft round."""
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_compression.python.ops import round_ops
class SoftRoundTest(tf.test.TestCase, parameterized.TestCase):
def test_soft_round_small_alpha_is_identity(self):
x = tf.linspace(-2., 2., 50)
y = round_ops.soft_round(x, alpha=1e-13)
self.assertAllClose(x, y)
def test_soft_round_large_alpha_is_round(self):
# We don't care what happens exactly near half-integer values:
for offset in range(-5, 5):
x = tf.linspace(offset - 0.499, offset + 0.499, 100)
y = round_ops.soft_round(x, alpha=2000.0)
self.assertAllClose(tf.round(x), y, atol=0.02)
def test_soft_inverse_round_small_alpha_is_identity(self):
x = tf.linspace(-2., 2., 50)
y = round_ops.soft_round_inverse(x, alpha=1e-13)
self.assertAllEqual(x, y)
def test_soft_inverse_is_actual_inverse(self):
x = tf.constant([-1.25, -0.75, 0.75, 1.25], dtype=tf.float32)
y = round_ops.soft_round(x, alpha=2.0)
x2 = round_ops.soft_round_inverse(y, alpha=2.0)
self.assertAllClose(x, x2)
def test_soft_round_inverse_large_alpha_is_ceil_minus_half(self):
# We don't care what happens exactly near integer values:
for offset in range(-5, 5):
x = tf.linspace(offset + 0.001, offset + 0.999, 100)
y = round_ops.soft_round_inverse(x, alpha=5000.0)
self.assertAllClose(tf.math.ceil(x) - 0.5, y, atol=0.001)
def test_conditional_mean_large_alpha_is_round(self):
# We don't care what happens exactly near integer values:
for offset in range(-5, 5):
x = tf.linspace(offset + 0.001, offset + 0.999, 100)
y = round_ops.soft_round_conditional_mean(x, alpha=5000.0)
self.assertAllClose(tf.math.round(x), y, atol=0.001)
@parameterized.parameters(0., 1e-6, 1e-2, 5., 1e6)
def test_soft_round_values_and_gradients_are_finite(self, alpha):
x = tf.linspace(0., 1., 11) # covers exact integers and half-integers
with tf.GradientTape() as tape:
tape.watch(x)
y = round_ops.soft_round(x, alpha=alpha)
dy = tape.gradient(y, x)
self.assertAllEqual(tf.math.is_finite(y), tf.ones(x.shape, dtype=bool))
self.assertAllEqual(tf.math.is_finite(dy), tf.ones(x.shape, dtype=bool))
@parameterized.parameters(0., 1e-6, 1e-2, 5., 1e6)
def test_soft_round_inverse_values_and_gradients_are_finite(self, alpha):
x = tf.linspace(-.5, .5, 11) # covers exact integers and half-integers
with tf.GradientTape() as tape:
tape.watch(x)
y = round_ops.soft_round_inverse(x, alpha=alpha)
dy = tape.gradient(y, x)
self.assertAllEqual(tf.math.is_finite(y), tf.ones(x.shape, dtype=bool))
is_finite = tf.math.is_finite(dy)
expected_finite = tf.ones(dy.shape, dtype=bool)
if alpha > 15:
# We allow non-finite values at 0 for large alphas, since the function
# simply is extremely steep there.
expected_finite = tf.tensor_scatter_nd_update(
expected_finite, [[5]], [is_finite[5]])
self.assertAllEqual(is_finite, expected_finite)
if __name__ == "__main__":
tf.test.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
1dd995ecbf18a55f8656020e123f70bf4bbaca0e | 002ce67bd8b405ef097741165c16af3ef9c89b06 | /test1234_dev_3126/wsgi.py | 9db98b92f57c0333439ed44a259d3ec968dbcf55 | [] | no_license | crowdbotics-apps/test1234-dev-3126 | cd9bc5a3926546a92e9ab39de2726065b6854730 | 226847552ae1f8273977b005b395701a36dd3498 | refs/heads/master | 2023-03-30T00:23:04.619342 | 2020-04-20T04:40:19 | 2020-04-20T04:40:19 | 257,172,780 | 0 | 0 | null | 2021-04-09T18:25:46 | 2020-04-20T04:39:40 | Python | UTF-8 | Python | false | false | 411 | py | """
WSGI config for test1234_dev_3126 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test1234_dev_3126.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
01aa1c46ef883e8b40c3d3a86289407768ff42aa | 297497957c531d81ba286bc91253fbbb78b4d8be | /testing/web-platform/tests/webdriver/tests/classic/new_session/create_alwaysMatch.py | 4c4610966a6447eda19413197a76f56166971d38 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | marco-c/gecko-dev-comments-removed | 7a9dd34045b07e6b22f0c636c0a836b9e639f9d3 | 61942784fb157763e65608e5a29b3729b0aa66fa | refs/heads/master | 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 | NOASSERTION | 2019-09-29T01:27:49 | 2019-09-27T10:44:24 | C++ | UTF-8 | Python | false | false | 468 | py |
import pytest
from .conftest import product, flatten
from tests.support.asserts import assert_success
from tests.classic.new_session.support.create import valid_data
@pytest.mark.parametrize("key,value", flatten(product(*item) for item in valid_data))
def test_valid(new_session, add_browser_capabilities, key, value):
response, _ = new_session({"capabilities": {
"alwaysMatch": add_browser_capabilities({key: value})}})
assert_success(response)
| [
"mcastelluccio@mozilla.com"
] | mcastelluccio@mozilla.com |
65a09fdbf361363f147f0d6bb1e6b93b054e8390 | 559e336386e02c0e5ebc7316424c3b4a41380d99 | /fullstack/statistics/distro_evaluation_iso.py | e36f45cff0a3455c400f50e31fb62509daeaf52b | [] | no_license | maranemil/howto | edf1e294544ef6980894dcd345d73160d8aa9620 | f6270ed0affcdbd899dd8a2ff9b0b98625e63a5a | refs/heads/master | 2023-09-05T03:02:18.526914 | 2023-09-04T11:27:52 | 2023-09-04T11:27:52 | 22,177,757 | 48 | 26 | null | 2022-10-17T19:43:31 | 2014-07-23T21:04:50 | Python | UTF-8 | Python | false | false | 4,269 | py |
# https://www.onlinegdb.com/online_python_compiler
# https://ideone.com/
# https://www.tutorialspoint.com/execute_python_online.php - support numpy pandas matplotlib math
# https://repl.it/repls/DimLoathsomeTwintext - support numpy pandas matplotlib math sklearn
import numpy as np
import pandas as pd
import matplotlib.pyplot as pt
from sklearn import linear_model
import math
# https://bigdata-madesimple.com/how-to-run-linear-regression-in-python-scikit-learn/
# https://www.programiz.com/python-programming/array#introduction
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.assign.html
# https://towardsdatascience.com/building-a-deployable-ml-classifier-in-python-46ba55e1d720
# https://data-science-blog.com/blog/2017/10/17/lineare-regression-in-python-scitkit-learn/
# https://pandas.pydata.org/pandas-docs/stable/basics.html
# https://www.kaggle.com/riteshdash/linear-regression-numpy-pandas-sklearn-matplotlib
# https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.polyfit.html
# https://www.ritchieng.com/pandas-selecting-multiple-rows-and-columns/
# https://pythonhow.com/accessing-dataframe-columns-rows-and-cells/
# https://bigdata-madesimple.com/how-to-run-linear-regression-in-python-scikit-learn/
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.set_index.html
# https://towardsdatascience.com/embedding-machine-learning-models-to-web-apps-part-1-6ab7b55ee428
# https://towardsdatascience.com/building-a-deployable-ml-classifier-in-python-46ba55e1d720
# https://dziganto.github.io/data%20science/online%20learning/python/scikit-learn/An-Introduction-To-Online-Machine-Learning/
# https://www.digitalocean.com/community/tutorials/how-to-build-a-machine-learning-classifier-in-python-with-scikit-learn
arDistro = [
("distro","Yes Features","no Features","Score Seconds","Total Score Yes + Time"),
("Manjaro Xfce minimal 0.8.9 x86 64", 5, 0, 10, 15),
("Pop os 18 04 amd64 intel 37", 5, 0, 9, 14),
("Lubuntu 16.04.5 desktop amd64", 4, 1, 9, 13),
("Ubuntu 14.04.5 LTS (Trusty Tahr)", 5, 0, 8, 13),
("Ubuntu 9.04 desktop i386", 3, 2, 10, 13),
("Elementary OS 0.4.1 stable", 5, 0, 7, 12),
("Fuduntu 2013 2 i686 LiteDVD", 3, 2, 9, 12),
("OpenSUSE 11 1 GNOME LiveCD i686", 3, 2, 9, 12),
("Trisquel mini 8.0 amd64", 3, 2, 9, 12),
("Ubuntu 10.04 desktop i386", 3, 2, 9, 12),
("Ubuntu 12.04.5 LTS (Precise Pangolin)", 3, 2, 9, 12),
("Ubuntu 16.04.5 LTS (Xenial Xerus)", 5, 0, 7, 12),
("Black Lab bll 8 unity x86 64", 4, 1, 7, 11),
("LinuxMint 19 xfce 64bit", 4, 1, 7, 11),
("Pure-OS 8 0 gnome 20180904 amd64", 5, 0, 6, 11),
("Ubuntu 18.04.1 desktop amd64", 5, 0, 6, 11),
("Ubuntu 18.10 (Cosmic Cuttlefish) amd64", 5, 0, 6, 11),
("Feren_OS_x64", 5, 0, 5, 10),
("FreeBSD 11 2 RELEASE amd64", 2, 3, 8, 10),
("Kali Linux light 2018 3 amd64", 3, 2, 7, 10),
("Kali linux light 2018 2 amd64", 3, 2, 7, 10),
("RaspberryPi Debian 2017 x86 stretch", 3, 2, 7, 10),
("Debian live 8.5.0 i386 xfce desktop", 2, 3, 7, 9),
("CentOS 7 x86 64 Minimal 1804", 2, 3, 6, 8),
("Debian live 9.5.0 amd64 xfce", 2, 3, 6, 8),
("Red-Hat rhel server 7 5 x86 64", 2, 3, 6, 8),
("Debian 7.11 0 i386 xfce CD 1", 3, 2, 4, 7),
("Fedora Workstation Live x86 64", 5, 0, 2, 7),
("Debian 9.5.0 amd64 xfce CD 1", 2, 3, 4, 6),
("Scientific Linux SL 7 5 x86 64 2018", 2, 3, 3, 5),
("Linux Mint 18 3 xfce 64bit", 4, 1, 0, 4)
]
#df = pd.DataFrame({'A': [1, 2, 3]})
df = pd.DataFrame(arDistro)
#df["Distro"] = np.zeros(df.shape[0])
print(df.head())
print ("--------------------------------------------------------------------")
print(df.columns)
print ("--------------------------------------------------------------------")
#print(df.describe())
#print "--------------------------------------------------------------------"
#print(df.tail(6))
print(df.index)
print(df.loc[3,1]) # row 3, col 1
print ("--------------------------------------------------------------------")
x = df.loc[1:22,1]
y = df.loc[1:22,2]
#print(x)
# eshape your data either using array.reshape(-1, 1)
# if your data has a single feature or array.reshape(1, -1)
from sklearn.linear_model import LinearRegression
#x = np.arange(10000).reshape(-1,1)
#y = np.arange(10000)+100*np.random.random_sample((10000,))
regr = LinearRegression()
print( regr.fit(x,y))
| [
"maran.emil@gmail.com"
] | maran.emil@gmail.com |
3f81577375d63aefb453286aabffd215d6e539a2 | 688df3b704d072fc05f5d23a432f30037e864d35 | /sorting/quick_sort.py | 793378ba8787b7ad85a239a51eb46683ffb51e4d | [] | no_license | mfuentesg/problem-solving | 87d459cc532b81ec0882bd88b4fd0d4d62357824 | ffb91831a303ce8670c04580a10dcc4b7dd35dcb | refs/heads/master | 2023-03-19T00:20:27.245732 | 2021-03-07T19:23:43 | 2021-03-07T19:23:43 | 315,674,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | def quick_sort(items):
def swap(i, j):
if i == j:
return
temp = items[i]
items[i] = items[j]
items[j] = temp
def partition(left, right):
pivot = items[right]
pointer = left - 1
for i in range(left, right):
if items[i] < pivot:
pointer += 1
swap(i, pointer)
swap(pointer + 1, right)
return pointer + 1
def qs(left, right):
if left >= right:
return
pi = partition(left, right)
qs(left, pi - 1)
qs(pi + 1, right)
qs(0, len(items) - 1)
ll = [10, 80, 30, 90, 40, 50, 70]
quick_sort(ll)
print(ll)
| [
"marceloe.fuentes@gmail.com"
] | marceloe.fuentes@gmail.com |
b3ef8f8b0b2b71c3623b3d8b0ba12a381961635c | f7d47249f7e74bec51eacaa05f381674b92e3611 | /interview/19_多线程threading.Thread.py | 729ee1370c66c0b35d3a2915947de85fc312ba42 | [] | no_license | jinlijiang123/crawler | f96764bc5e7ae6f254e397189c4228336889a0d1 | cd3f16d04cc7c83b78d5a78afa7a57951399d490 | refs/heads/master | 2020-06-15T19:44:48.892953 | 2019-03-26T04:35:29 | 2019-03-26T04:35:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | # -*- coding: utf-8-*-
import random
import time,threading
def thread_run(urls):
print "Current %s is running..."%threading.current_thread().name
for url in urls:
print '%s......%s'%(threading.current_thread().name,url)
time.sleep(random.random())
print "Current %s ends..."%threading.current_thread().name
print '%s is running...'%threading.currentThread().name
#通过threading.Thread()方法创建实例
t1 = threading.Thread(target = thread_run,name = 'Thread1',args = (['url1','url2','url3'],))#注意传入的args是一个tuole
t2 = threading.Thread(target = thread_run,name = 'Thread2',args = (['url4','url5','url6'],))
t1.start()
t2.start()
t1.join()
t2.join()
print '%s ends...'%threading.currentThread().name
| [
"648672371@qq.com"
] | 648672371@qq.com |
def004f06e653303926927e3b105f622f2d9984f | 9ca9cad46f2358717394f39e2cfac2af4a2f5aca | /Week04/01_basics/01_basics_LGY.py | f3d3afb393caffb65ae5b7cb0cc3a3bf563069da | [] | no_license | Artinto/Python_and_AI_Study | ddfd165d1598914e99a125c3019a740a7791f6f6 | 953ff3780287825afe9ed5f9b45017359707d07a | refs/heads/main | 2023-05-05T15:42:25.963855 | 2021-05-24T12:24:31 | 2021-05-24T12:24:31 | 325,218,591 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,924 | py | #01_basics.py
import numpy as np # 행렬계산을 용이하게 해주는 라이브러리
import matplotlib.pyplot as plt # 시각적으로 볼 수 있도록 그래프를 만들어주는 라이브러리
x_data = [1.0, 2.0, 3.0] # 학습시킬 문제 x data
y_data = [2.0, 4.0, 6.0] # 학습시킬 답안 y data
# 함수실행시 실행되는 함수
# linear regression
# y_pred_val = forward(x_val) // line 실행시 x_val이 통과하는 forward함수
def forward(x):
return x * w
# l = loss(x_val, y_val) // line 실행시 x_val, y_val가 통과하는 loss함수
def loss(x, y): # Loss function
y_pred = forward(x) # forward(x_val) 실행
# y_pred = x_val * w
return (y_pred - y) * (y_pred - y) # (x_val * w - y_val)^2
w_list = []
mse_list = []
for w in np.arange(0.0, 4.1, 0.1): # Weight 값 : 0.0에서 4.1전까지 0.1씩 증가한 array만들기
# Print the weights and initialize the lost
print("w=", w)
l_sum = 0 #loss값들의 합 : x_data*w(예측값)과 y_data(실제값)과의 오차들의 합
for x_val, y_val in zip(x_data, y_data): # 각각의 학습데이터를 가져옴
y_pred_val = forward(x_val) # 학습데이터 x를 forward라는 함수에 넣어줌. (forward함수 실행)
# (return x * w) y_pred_val에는 x_val * W 값이 들어감.
l = loss(x_val, y_val) # 두개의 데이터가 loss함수를 거침. (loss함수 실행)
# return (y_pred - y) * (y_pred - y) # (x_val * w - y_val)^2
l_sum += l #loss값들의 합
print("\t", x_val, y_val, y_pred_val, l) # /t : tab
print("MSE=", l_sum / len(x_data)) # MSE값 print
w_list.append(w)
mse_list.append(l_sum / len(x_data))
print(w_list,"asdfasdf")
plt.plot(w_list, mse_list) # x축에 w_list, y축엔 mse_list을 나타내기
plt.ylabel('Loss') # x축 이름
plt.xlabel('w') # y축 이름
plt.show() # 그래프 그려라
| [
"noreply@github.com"
] | Artinto.noreply@github.com |
9f11a8a4e2e6119aaf6a992994dea49a32aebe0d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_myspace.py | 5aa68abf70dd96af4471607b3e717ee92bc74843 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py |
#calss header
class _MYSPACE():
def __init__(self,):
self.name = "MYSPACE"
self.definitions = [u'a social media website used especially for sharing music, music videos, and information about musical artists ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
1df2a55cfa73babf59f39d8d79273ee2f586ee42 | 20e3010608e40a6ec5ea56f69d122a62182e4bdb | /1 - Python-2/10 - unit tests/test_prime_numbers.py | 846a190019cc7098811faedf3ee6b8c99fe68923 | [] | no_license | LarisaOvchinnikova/Python | ee65eac221cd03563d60110118175692564c5b2d | 9cc86a260828662995dec59a6d69528f96d37e79 | refs/heads/master | 2021-08-22T21:41:02.351589 | 2021-05-25T18:37:09 | 2021-05-25T18:37:09 | 253,842,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from prime_numbers import is_prime, fibonacci
def test_is_prime():
assert is_prime(2) == True
assert is_prime(3) == True
assert is_prime(-1) == False
assert is_prime(4) == False
def test_fibonacci():
assert fibonacci(1) == 1
| [
"larisaplantation@gmail.com"
] | larisaplantation@gmail.com |
c7480f1b1bec354bb6248ee2a68df0ae9e94dbc2 | 462c56e7454c97e0541588b9be66a4e216ea20fd | /453.minimum-moves-to-equal-array-elements.py | 2e6d96729301360e54f20d7f08acd741bd43ac3d | [] | no_license | LouisYLWang/leetcode_python | d5ac6289e33c5d027f248aa3e7dd66291354941c | 2ecaeed38178819480388b5742bc2ea12009ae16 | refs/heads/master | 2020-05-27T08:38:48.532000 | 2019-12-28T07:08:57 | 2019-12-28T07:08:57 | 188,549,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | #
# @lc app=leetcode id=453 lang=python
#
# [453] Minimum Moves to Equal Array Elements
#
class Solution:
def minMoves(self, nums):
minvalue = min(nums)
sum_ = sum(nums)
n = len(nums)
return sum_ - minvalue * n
| [
"louis.yl.wang@outlook.com"
] | louis.yl.wang@outlook.com |
492edc1fcfc189426a503a19e709f3bc7b819d30 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1245-042/sdB_pg_1245-042_lc.py | 91ce244322e683ec2e15ee1d01e0cc7f5bb72ef9 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[192.058083,-4.513167], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_1245-042/sdB_pg_1245-042_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
3b06e98026136575d9506393d4ebba3fb1fec542 | c1e46fb0a7d8d86fc52dfb54016e17d22f81c2b4 | /eterea_quickLocators/scripts/scale_selected_locators.py | 0bb1ed925f68f58a2d567fb23c92ac9f4ee4cd19 | [] | no_license | Tilapiatsu/modo-tila_customconfig | 130ac84397f87048c87cd670f152df74eefd6b26 | 749d02fcb4d05ec0dbe6895e3d415751f181592e | refs/heads/master | 2021-01-17T12:38:20.269540 | 2018-10-22T08:37:32 | 2018-10-22T08:37:32 | 59,156,743 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,947 | py | #python
# scale_selected_locators.py
#
# Version 1.2 - By Cristobal Vila, 2013 - With the help of other members from Luxology Forums :-)
# Special thanks to MonkeybrotherJR
#
# To scale all channels in a selected Locators,
# no matter the kind of Locators and if there are some channels greyed
#
# www.etereaestudios.com
import lx
try:
scene_svc = lx.Service("sceneservice")
# Define my argument:
myargument = lx.args()[0]
# get selected layers
selected_layers = lx.evalN("query sceneservice selection ? all")
# drop selection so that we can work on one item at a time
lx.eval("select.drop item")
# create empty list to put locators in
locators = []
for item in selected_layers:
# select layer
scene_svc.select("item",str(item))
lx.eval('select.item {%s} set' % item)
# get item type
itemType = scene_svc.query("item.type")
if itemType == 'locator':
locators.append(item)
# Ask if our locator has a default or custom shape:
lx.eval('item.channel locator$drawShape ?')
# This gives a result (default / custom)
# Save that result into a variable:
locatorShape = lx.eval1('item.channel locator$drawShape ?')
if locatorShape == 'default':
# Change size for standard default locator:
lx.eval("item.channel locator$size ?*" + myargument)
elif locatorShape == 'custom':
# Ask which is actual shape:
lx.eval("item.channel locator$isShape ?")
# This gives a result (box, pyramid, plane…)
# Save that result into a variable:
originalShape = lx.eval("item.channel locator$isShape ?")
# Change size for standard default locator:
lx.eval("item.channel locator$size ?*" + myargument)
# Set shape to Box:
lx.eval("item.channel locator$isShape box")
# Change properties for XYZ channels, since now all are available:
lx.eval("item.channel locator$isSize.X ?*" + myargument)
lx.eval("item.channel locator$isSize.Y ?*" + myargument)
lx.eval("item.channel locator$isSize.Z ?*" + myargument)
# Set shape to Circle:
lx.eval("item.channel locator$isShape circle")
# Change properties for Radius, since now this is available:
lx.eval("item.channel locator$isRadius ?*" + myargument)
# Change shape back to the one saved inside our first variable:
lx.eval("item.channel locator$isShape %s" % originalShape)
# re-select the user selected layers
for item in selected_layers:
lx.eval('select.item {%s} add' % item)
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.exc_traceback.tb_lineno)) | [
"tilapiatsu@hotmail.fr"
] | tilapiatsu@hotmail.fr |
a5c615b39fbe692f7ddbc540eb34891cbe602283 | ad20495c8df427211dba51c93c507365f9fce319 | /init_topics.py | 3f60ed5eadea3515bdf1c8b64457c332722eb00d | [
"LicenseRef-scancode-public-domain"
] | permissive | tilejet/tilejet-server | 779398257c65138c906f3989c63e029dfe45587e | 7bd0caa18cde98a8fd80aeea6e06bbe8aa2fa1be | refs/heads/master | 2021-01-10T02:41:23.553939 | 2015-12-06T07:18:56 | 2015-12-06T07:19:59 | 43,448,267 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | from django.conf import settings
from geowatchdjango.utils import provision_geowatch_client
verbose = True
enabled = settings.GEOWATCH_ENABLED
if not enabled:
print "GeoWatch not enabled via settings"
topic_requests = settings.TILEJET_GEOWATCH_TOPIC_REQUESTS
topic_logs = settings.TILEJET_GEOWATCH_TOPIC_LOGS
topic_stats = settings.TILEJET_GEOWATCH_TOPIC_STATS
if enabled:
client = provision_geowatch_client()
topics = client.list_topics(limit=100, verbose=verbose)
print topics
client.create_topic(topic_requests)
client.create_topic(topic_logs)
client.create_topic(topic_stats)
print "Final Check..."
print client.list_topics(limit=100, verbose=verbose)
else:
print "Missing settings"
| [
"pjdufour.dev@gmail.com"
] | pjdufour.dev@gmail.com |
0e828d914306ac83778d23de820a991cf5e6c1a2 | d257a3c9c96b919d7ba8ffe4b674437aea76afc7 | /zips/script.vistatv-installer/extractor.py | 7eedae1c68e2b89e9861ab34f4674f6ac0de2fe4 | [] | no_license | biglad/eptvinstall | 457053791684127c91bb847262d91cd76e9e0a12 | 4eaa522a7d9edc068e7824576147be190897fb09 | refs/heads/master | 2022-05-06T05:00:23.291801 | 2022-04-14T23:17:11 | 2022-04-14T23:17:11 | 230,219,713 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py |
import xbmcgui
import utils
import os
import xbmc
KODIV = float(xbmc.getInfoLabel("System.BuildVersion")[:4])
if KODIV > 17:
import zfile as zipfile #FTG mod for Kodi 18
else:
import zipfile
def extract(packedfile, unpackpath, dname, dp = None):
if not dname:
dname = "CerebroTV"
if not dp:
dp = xbmcgui.DialogProgress()
dp.create(dname,"Adding New Files to System",'[COLOR=black].[/COLOR]', 'DO NOT TURN OFF! ')
dp.update(0)
zfile = zipfile.ZipFile(packedfile, 'r')
nItem = float(len(zfile.infolist()))
index = 0
for item in zfile.infolist():
index += 1
percent = int(index / nItem *100)
filename = item.filename
dp.update(percent)
try:
zfile.extract(item, unpackpath)
except Exception, e:
utils.log('Changelog error in extractAll')
utils.log(e)
zfile.close()
dp.close()
dp.create("DOING HOUSE KEEPING",'[COLOR=black].[/COLOR]','CLEANING UP', ' ')
xbmc.sleep(2500)
try: os.unlink(packedfile)
except: pass
xbmc.sleep(2500)
try: utils.DeleteFile(packedfile)
except: pass
try: os.remove(packedfile)
except: pass
dp.close() | [
"biglad@mgawow.co.uk"
] | biglad@mgawow.co.uk |
5d2c11912d326b2c3506e24f1f9f563969a58800 | d3239c2e5652378b17932553f80be1dbcbbdfdbf | /python/week14/p_00.py | eff8eaff11bcd268f2727c5834064ef89fe0e157 | [] | no_license | jorge-alvarado-revata/code_educa | 673a8b10817c24b3fc2c5792d216837c15a701aa | 241e1e3f43586e486b73cee8f385ab74dd99caf1 | refs/heads/main | 2022-12-25T21:37:36.988225 | 2020-10-13T18:59:38 | 2020-10-13T18:59:38 | 303,801,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | class Foo:
def __init__(self, name):
self._name = name
def get_name(self):
return self._name
def __str__(self):
return 'object Foo: {}'.format(self._name)
a = Foo('myfoo')
print(a)
print(a.get_name())
| [
"serviciosplusapp@gmail.com"
] | serviciosplusapp@gmail.com |
a7dc79a7ddb9579d57776b334612349c8cf06707 | 5537eec7f43098d216d2b550678c8d10b2a26f09 | /venv/tower/lib/python2.7/site-packages/azure/mgmt/logic/models/run_workflow_parameters.py | c0fe65944511e5100dc38f57793055e7c3a88f22 | [] | no_license | wipro-sdx/Automation | f0ae1512b8d9d491d7bacec94c8906d06d696407 | a8c46217d0fbe51a71597b5db87cbe98ed19297a | refs/heads/master | 2021-07-08T11:09:05.314435 | 2018-05-02T07:18:54 | 2018-05-02T07:18:54 | 131,812,982 | 0 | 1 | null | 2020-07-23T23:22:33 | 2018-05-02T07:15:28 | Python | UTF-8 | Python | false | false | 1,043 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RunWorkflowParameters(Model):
"""RunWorkflowParameters.
:param name: Gets or sets the name of workflow run trigger.
:type name: str
:param outputs: Gets or sets the outputs of workflow run trigger.
:type outputs: object
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'outputs': {'key': 'outputs', 'type': 'object'},
}
def __init__(self, name=None, outputs=None):
self.name = name
self.outputs = outputs
| [
"admin@example.com"
] | admin@example.com |
85b27e1850e9ab3f662cc6e402360ba7f1a4fbbf | c2f85286d1e21fb803c35f6d996abc850b993e53 | /mystorage/views.py | bac4b48fe57ef1003f3a3b1a4254cadca2c86c66 | [] | no_license | devdw98/likelion_drf | dfeec1bf5ee153918807f99040c8c33240c4344c | 6d0171961bc93f4edd7998b7351034e0a936079d | refs/heads/master | 2020-07-29T20:38:29.041098 | 2019-10-27T07:22:53 | 2019-10-27T07:22:53 | 209,951,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,785 | py | from rest_framework import viewsets
from .models import Essay, Album, Files
from .serializers import EssaySerializer, AlbumSerializer, FilesSerializer
from rest_framework.filters import SearchFilter # 검색
from rest_framework.parsers import MultiPartParser,FormParser
from rest_framework.response import Response
from rest_framework import status
class PostViewSet(viewsets.ModelViewSet):
queryset = Essay.objects.all()
serializer_class = EssaySerializer
filter_backends = [SearchFilter]
search_fields = ('title', 'body')
def perform_create(self, serializer):
#직접 작성한 유저를 자동으로 저장
serializer.save(author=self.request.user)
#현재 request 낸 유저 == self.request.user >> 유저가 쓴 글만 나타남
def get_queryset(self):
qs = super().get_queryset()
if self.request.user.is_authenticated: #login
qs = qs.filter(author = self.request.user)
else: #not login
qs = qs.none
return qs
class ImgViewSet(viewsets.ModelViewSet):
queryset = Album.objects.all()
serializer_class = AlbumSerializer
class FileViewSet(viewsets.ModelViewSet):
queryset = Files.objects.all()
serializer_class = FilesSerializer
#parser_class 지정
parser_classes = (MultiPartParser, FormParser) #다양한 미디어 파일 형식을 수락할 수 있음
#create() Overriding - post()
def post(self, request, *args, **kwargs): #메소드 커스터마이징
serializer = FilesSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=HTTP_201_CREATED)
else:
return Response(serializer.error, status=HTTP_400_BAD_REQUEST) | [
"devdw98@gmail.com"
] | devdw98@gmail.com |
76798c14c341150c22b20258d37b3a778b75999d | 88023c9a62994e91291c67088156a2894cc26e9e | /corral/run/alert.py | 158db6152901180cc09d4f126540afa3dd86c8dc | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | toros-astro/corral | 41e9d0224d734c4268bf5161d472b3c0375842f0 | 75474b38ff366330d33644461a902d07374a5bbc | refs/heads/master | 2023-06-10T15:56:12.264725 | 2018-09-03T17:59:41 | 2018-09-03T17:59:41 | 44,282,921 | 6 | 5 | BSD-3-Clause | 2023-03-24T12:03:17 | 2015-10-14T23:56:40 | Python | UTF-8 | Python | false | false | 7,350 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Cabral, Juan; Sanchez, Bruno & Berois, Martín
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
# IMPORTS
# =============================================================================
import inspect
import datetime
import collections
import six
from .. import db, util, exceptions
from ..db.default_models import Alerted
from ..core import logger
from .base import Processor, Runner
conf = util.dimport("corral.conf", lazy=True)
# =============================================================================
# CONSTANTS
# =============================================================================
ALERT_TEMPLATE = (
"[{project_name}-ALERT @ {now}-15s] Check the object '{obj}'\n")
# =============================================================================
# ALERT CLASSES
# =============================================================================
class AlertRunner(Runner):
def validate_target(self, alert_cls):
if not (inspect.isclass(alert_cls) and issubclass(alert_cls, Alert)):
msg = "alert_cls '{}' must be subclass of 'corral.run.Alert'"
raise TypeError(msg.format(alert_cls))
def run(self):
alert_cls = self.target
logger.info("Executing alert '{}'".format(alert_cls))
with db.session_scope() as session, alert_cls(session) as alert:
for obj in alert.generate():
alert.validate(obj)
generator = alert.process(obj) or []
if not hasattr(generator, "__iter__"):
generator = (generator,)
for proc_obj in generator:
alert.validate(proc_obj)
alert.save(proc_obj)
logger.info("Done Alert '{}'".format(alert_cls))
class Alert(Processor):
runner_class = AlertRunner
model = None
conditions = None
ordering = None
auto_register = True
@classmethod
def retrieve_python_path(cls):
for import_string in conf.settings.ALERTS:
if cls == util.dimport(import_string):
return import_string
def setup(self):
for ep in self.alert_to:
ep.setup(self)
def teardown(self, type, value, traceback):
for ep in self.alert_to:
ep.teardown(type, value, traceback)
def generate(self):
if self.model is None or self.conditions is None:
clsname = type(self).__name__
raise NotImplementedError(
"'{}' subclass with a default generate must redefine "
"'model' and 'conditions' class-attributes".format(clsname))
query = self.session.query(self.model).filter(*self.conditions)
if self.auto_register:
query = self._filter_auto_registered(query)
else:
query = self.filter_registered(query)
if self.ordering is not None:
query = query.order_by(*self.ordering)
return query
def _filter_auto_registered(self, query):
filters = Alerted.alert_to_columns(type(self))
filters.update(Alerted.model_class_to_column(self.model))
alerteds = self.session.query(Alerted.model_ids).filter_by(**filters)
if alerteds.count():
grouped_id = collections.defaultdict(set)
for row in alerteds.all():
for k, v in six.iteritems(row[0]):
grouped_id[k].add(v)
exclude = []
for k, v in grouped_id.items():
exclude.append(getattr(self.model, k).in_(v))
query = query.filter(~db.and_(*exclude))
return query
def _auto_register(self, obj):
register = Alerted()
register.alert = type(self)
register.model = obj
register.created_at = datetime.datetime.utcnow()
return register
def filter_registered(self, query):
raise NotImplementedError()
def register(self, obj):
raise NotImplementedError()
def process(self, obj):
for ep in self.alert_to:
ep.process(obj)
if self.auto_register:
return self._auto_register(obj)
else:
return self.register(obj)
def render_alert(self, utcnow, endpoint, obj):
return ALERT_TEMPLATE.format(
project_name=conf.PACKAGE, now=utcnow.isoformat(), obj=obj)
# =============================================================================
# FUNCTIONS
# =============================================================================
def alerts_groups():
groups = set()
for cls in load_alerts():
groups.update(cls.get_groups())
return tuple(sorted(groups))
def load_alerts(groups=None):
alerts = []
logger.debug("Loading Alert Classes")
for import_string in conf.settings.ALERTS:
cls = util.dimport(import_string)
if not (inspect.isclass(cls) and issubclass(cls, Alert)):
msg = "STEP '{}' must be subclass of 'corral.run.Alert'"
raise exceptions.ImproperlyConfigured(msg.format(import_string))
if groups is None or set(cls.get_groups()).intersection(groups):
alerts.append(cls)
alerts.sort(key=lambda cls: cls.__name__)
return tuple(alerts)
def execute_alert(alert_cls, sync=False):
if not (inspect.isclass(alert_cls) and issubclass(alert_cls, Alert)):
msg = "alert_cls '{}' must be subclass of 'corral.run.Alert'"
raise TypeError(msg.format(alert_cls))
procs = []
alert_cls.class_setup()
runner = alert_cls.runner_class()
runner.setup(alert_cls)
if sync:
runner.run()
else:
db.engine.dispose()
runner.start()
procs.append(runner)
alert_cls.class_teardown()
return tuple(procs)
| [
"jbc.develop@gmail.com"
] | jbc.develop@gmail.com |
9880d0bf8f032a96170410d474dea6707d70f473 | 15102eb2c657a296eb00821dc378225b79fbc17e | /Homework/venv/Lib/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cache.py | 0fdcffdef8a23b2e8997bf89d25ee3f7bc7c5fe9 | [] | no_license | yuju13488/pyworkspace | 746446b3573fa6241d979b205e964e7d52af009b | 0c77836185237450ee446542e6ff3856c7cd7de1 | refs/heads/master | 2020-08-02T03:56:55.577735 | 2019-10-04T05:50:56 | 2019-10-04T05:50:56 | 211,226,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,690 | py | """Cache Management
"""
import errno
import hashlib
import logging
import os
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.download import path_to_url
from pip._internal.models.link import Link
from pip._internal.utils.compat import expanduser
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.wheel import InvalidWheelFilename, Wheel
if MYPY_CHECK_RUNNING:
from typing import Optional, Set, List, Any # noqa: F401
from pip._internal.index import FormatControl # noqa: F401
logger = logging.getLogger(__name__)
class Cache(object):
"""An abstract class_hw - provides cache directories for data from links
:param cache_dir: The root of the cache.
:param format_control: An object of FormatControl class_hw to limit
binaries being read from the cache.
:param allowed_formats: which formats of files the cache should store.
('binary' and 'source' are the only allowed values)
"""
def __init__(self, cache_dir, format_control, allowed_formats):
# type: (str, FormatControl, Set[str]) -> None
super(Cache, self).__init__()
self.cache_dir = expanduser(cache_dir) if cache_dir else None
self.format_control = format_control
self.allowed_formats = allowed_formats
_valid_formats = {"source", "binary"}
assert self.allowed_formats.union(_valid_formats) == _valid_formats
def _get_cache_path_parts(self, link):
# type: (Link) -> List[str]
"""Get parts of part that must be os.path.joined with cache_dir
"""
# We want to generate an url to use as our cache key, we don't want to
# just re-use the URL because it might have other items in the fragment
# and we don't care about those.
key_parts = [link.url_without_fragment]
if link.hash_name is not None and link.hash is not None:
key_parts.append("=".join([link.hash_name, link.hash]))
key_url = "#".join(key_parts)
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and
# thus less secure). However the differences don't make a lot of
# difference for our use case here.
hashed = hashlib.sha224(key_url.encode()).hexdigest()
# We want to nest the directories some to prevent having a ton of top
# level directories where we might run out of sub directories on some
# FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
return parts
def _get_candidates(self, link, package_name):
# type: (Link, Optional[str]) -> List[Any]
can_not_cache = (
not self.cache_dir or
not package_name or
not link
)
if can_not_cache:
return []
canonical_name = canonicalize_name(package_name)
formats = self.format_control.get_allowed_formats(
canonical_name
)
if not self.allowed_formats.intersection(formats):
return []
root = self.get_path_for_link(link)
try:
return os.listdir(root)
except OSError as err:
if err.errno in {errno.ENOENT, errno.ENOTDIR}:
return []
raise
def get_path_for_link(self, link):
# type: (Link) -> str
"""Return a directory to store cached items in for link.
"""
raise NotImplementedError()
def get(self, link, package_name):
# type: (Link, Optional[str]) -> Link
"""Returns a link to a cached item if it exists, otherwise returns the
passed link.
"""
raise NotImplementedError()
def _link_for_candidate(self, link, candidate):
# type: (Link, str) -> Link
root = self.get_path_for_link(link)
path = os.path.join(root, candidate)
return Link(path_to_url(path))
def cleanup(self):
# type: () -> None
pass
class SimpleWheelCache(Cache):
"""A cache of wheels for future installs.
"""
def __init__(self, cache_dir, format_control):
# type: (str, FormatControl) -> None
super(SimpleWheelCache, self).__init__(
cache_dir, format_control, {"binary"}
)
def get_path_for_link(self, link):
# type: (Link) -> str
"""Return a directory to store cached wheels for link
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were
not unique. E.g. ./package might have dozens of installs done for it
and build a version of 0.0...and if we built and cached a wheel, we'd
end up using the same wheel even if the source has been edited.
:param link: The link of the sdist for which this will cache wheels.
"""
parts = self._get_cache_path_parts(link)
# Store wheels within the root cache_dir
return os.path.join(self.cache_dir, "wheels", *parts)
def get(self, link, package_name):
# type: (Link, Optional[str]) -> Link
candidates = []
for wheel_name in self._get_candidates(link, package_name):
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if not wheel.supported():
# Built for a different python/arch/etc
continue
candidates.append((wheel.support_index_min(), wheel_name))
if not candidates:
return link
return self._link_for_candidate(link, min(candidates)[1])
class EphemWheelCache(SimpleWheelCache):
"""A SimpleWheelCache that creates it's own temporary cache directory
"""
def __init__(self, format_control):
# type: (FormatControl) -> None
self._temp_dir = TempDirectory(kind="ephem-wheel-cache")
self._temp_dir.create()
super(EphemWheelCache, self).__init__(
self._temp_dir.path, format_control
)
def cleanup(self):
# type: () -> None
self._temp_dir.cleanup()
class WheelCache(Cache):
"""Wraps EphemWheelCache and SimpleWheelCache into a single Cache
This Cache allows for gracefully degradation, using the ephem wheel cache
when a certain link is not found in the simple wheel cache first.
"""
def __init__(self, cache_dir, format_control):
# type: (str, FormatControl) -> None
super(WheelCache, self).__init__(
cache_dir, format_control, {'binary'}
)
self._wheel_cache = SimpleWheelCache(cache_dir, format_control)
self._ephem_cache = EphemWheelCache(format_control)
def get_path_for_link(self, link):
# type: (Link) -> str
return self._wheel_cache.get_path_for_link(link)
def get_ephem_path_for_link(self, link):
# type: (Link) -> str
return self._ephem_cache.get_path_for_link(link)
def get(self, link, package_name):
# type: (Link, Optional[str]) -> Link
retval = self._wheel_cache.get(link, package_name)
if retval is link:
retval = self._ephem_cache.get(link, package_name)
return retval
def cleanup(self):
# type: () -> None
self._wheel_cache.cleanup()
self._ephem_cache.cleanup()
| [
"shiyoo123@hotmail.com"
] | shiyoo123@hotmail.com |
9bf8399b0d96619d46fa8d08f62c9db0def0eaee | f842b77b50015456f1396b71e527180d48a2eadc | /demo/libdemo/write_names.py | 860bf2382a5dc182f36c923677e879b906d1374a | [] | no_license | srikanthpragada/PYTHON_16_JUNE_2020 | 75e4d2b42607e31e26d6a5df3ea0065df941c750 | 50c2d0c355eef94ed93c4e124796fe3add7a60d9 | refs/heads/master | 2022-11-24T12:42:28.594097 | 2020-07-29T02:28:21 | 2020-07-29T02:28:21 | 273,117,380 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | with open("names.txt", "wt") as f:
while True:
name = input("Enter name [end to stop] : ")
if name == 'end':
break
f.write(name + "\n")
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
21675dfebc1da3e403944ba6c29730499d64b6c6 | d9dbeafdcbe65f1121acb6f3d2ea789c33dc9edf | /data_structures/binary_tree.py | f1744e98626cbacd893c6a2a5cdc96cc357f4f4f | [] | no_license | Ethic41/LearningAlgorithms | 2227547064f0027a265e62a48d12923013cf2511 | 614fcf534344e643cda4867c0e45be507ebe46b8 | refs/heads/master | 2022-11-28T11:57:56.899894 | 2022-11-24T12:28:14 | 2022-11-24T12:28:14 | 192,438,021 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,833 | py | # Author: Dahir Muhammad Dahir
# Date: 04-July-2019 10:14 PM
# About: Binary Tree is a non-linear, finite,
# connected, undirected graph with vertex
# or node of degree !> 3, data structure.
# this is my first implementation of a
# binary tree, note that due to it's varying
# nature, this is not the only possible implementation
from collections import deque
from tree import Tree # type: ignore
class BinaryTree(Tree):
def __init__(self):
super().__init__()
def add_node(self, node):
"""Add node to the first found leaf"""
if self.root is None:
current_node = self.root
else:
current_node = self.find_leaf()
if current_node is None:
self.root = node
elif current_node.left is None:
current_node.left = node
current_node.left.parent = current_node
elif current_node.right is None:
current_node.right = node
current_node.right.parent = current_node
def find_leaf(self):
que = deque()
que.append(self.root)
current_node = None
while len(que) > 0:
current_node = que.popleft()
if current_node.left is not None:
que.append(current_node.left)
else:
return current_node
if current_node.right is not None:
que.append(current_node.right)
else:
return current_node
return current_node
def get_size_dft(self):
current_node = self.root
previous_node = None
node_count = 0
while current_node is not None:
if previous_node == current_node.parent: # we are at a new node
node_count += 1
if current_node.left is not None:
next_node = current_node.left
elif current_node.right is not None:
next_node = current_node.right
else:
next_node = current_node.parent
elif previous_node == current_node.left:
if current_node.right is not None:
next_node = current_node.right
else:
next_node = current_node.parent
else:
next_node = current_node.parent
previous_node = current_node
current_node = next_node
return node_count
def breadth_first_traverse(self):
que = deque()
if self.root is not None:
que.append(self.root)
while len(que) > 0:
current_node = que.popleft()
if current_node.left is not None:
que.append(current_node.left)
if current_node.right is not None:
que.append(current_node.right)
def add_internal_node(self, internal_node, node):
pass
def print_tree_dfs(self):
current_node = self.root
previous_node = None
while current_node is not None:
if previous_node == current_node.parent:
print("{}=>".format(current_node.data), end="")
if current_node.left is not None:
next_node = current_node.left
elif current_node.right is not None:
next_node = current_node.right
else:
next_node = current_node.parent
elif previous_node == current_node.left:
if current_node.right is not None:
next_node = current_node.right
else:
next_node = current_node.parent
else:
next_node = current_node.parent
previous_node = current_node
current_node = next_node
print(None)
| [
"dahirmuhammad3@gmail.com"
] | dahirmuhammad3@gmail.com |
133d9271892fc9ec2092352c9c8710004c210f59 | c69b8b1ac98f26de39292c5e058f8de5d6776204 | /ditto/utils/commands.py | 4bc2c63b8d5f0846600f8b3629a42eaf0ff5006d | [
"MIT"
] | permissive | Kuchenmampfer/Ditto | 1ba9c2909d43c4d7f36f62126523b510d76500db | 435d539c059a25207c70c74818233afe26ad4a38 | refs/heads/master | 2023-07-10T19:29:47.375307 | 2021-08-10T10:52:39 | 2021-08-10T10:52:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | from typing import TypeVar
from discord.ext import commands
from .. import Context
__all__ = ("auto_help",)
_CT = TypeVar("_CT", bound=commands.Command)
_GT = TypeVar("_GT", bound=commands.Group)
async def _call_help(ctx: Context):
await ctx.send_help(ctx.command.parent)
def auto_help(group: _GT, *, cls: type[_CT] = commands.Command) -> _GT:
if not isinstance(group, commands.Group):
raise TypeError("Auto help can only be applied to groups.")
command = cls(_call_help, name="help", hidden=True)
group.add_command(command)
return group
| [
"josh.ja.butt@gmail.com"
] | josh.ja.butt@gmail.com |
4fbb695f1765d44bcbfa2e7b38fbc1edd9e9df08 | c719e7be97de57f4ffaebbf4edbd15ef0bebac75 | /webapp/__init__.py | 5adc2d1b61f9e4d194c416b843910fd80dd30945 | [] | no_license | jehiah/gtfs-data-exchange | 01feccec5da08afed61e78f51ae46fd2a9a650f6 | e9eb891d0067e20c2b157c6093c57654849c87e9 | refs/heads/master | 2021-01-21T12:11:54.754952 | 2016-04-04T00:58:58 | 2016-04-04T00:58:58 | 828,058 | 7 | 3 | null | 2015-04-28T00:32:09 | 2010-08-10T05:21:41 | Python | UTF-8 | Python | false | false | 4,133 | py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# import wsgiref.headers
# import wsgiref.util
from google.appengine.ext.webapp import*
class RequestHandler(RequestHandler):
def __init__(self):
self.template_file = 'index.html'
self.template_vals = {}
def __before__(self,*args):
"""
Allows common code to be used for all get/post/delete methods
"""
pass
def __after__(self,*args):
"""
This runs AFTER response is returned to browser.
If you have follow up work that you don't want to do while
browser is waiting put it here such as sending emails etc
"""
pass
def render(self,template_file=None,template_vals={}):
"""
Helper method to render the appropriate template
"""
raise NotImplemented()
# if not template_file == None:
# self.template_file = template_file
# self.template_vals.update(template_vals)
# path = os.path.join(os.path.dirname(__file__), self.template_file)
# self.response.out.write(template.render(path, self.template_vals))
class WSGIApplication2(WSGIApplication):
"""
Modifyed to add new methods __before__ and __after__
before the get/post/delete/etc methods and then
AFTER RESPONSE. This is important because it means you
can do work after the response has been returned to the browser
"""
def __init__(self, url_mapping, debug=False):
"""Initializes this application with the given URL mapping.
Args:
url_mapping: list of (URI, RequestHandler) pairs (e.g., [('/', ReqHan)])
debug: if true, we send Python stack traces to the browser on errors
"""
self._init_url_mappings(url_mapping)
self.__debug = debug
WSGIApplication.active_instance = self
self.current_request_args = ()
def __call__(self, environ, start_response):
"""Called by WSGI when a request comes in."""
request = Request(environ)
response = Response()
WSGIApplication.active_instance = self
handler = None
groups = ()
for regexp, handler_class in self._url_mapping:
match = regexp.match(request.path)
if match:
handler = handler_class()
handler.initialize(request, response)
groups = match.groups()
break
self.current_request_args = groups
if handler:
try:
handler.__before__(*groups)
method = environ['REQUEST_METHOD']
if method == 'GET':
handler.get(*groups)
elif method == 'POST':
handler.post(*groups)
elif method == 'HEAD':
handler.head(*groups)
elif method == 'OPTIONS':
handler.options(*groups)
elif method == 'PUT':
handler.put(*groups)
elif method == 'DELETE':
handler.delete(*groups)
elif method == 'TRACE':
handler.trace(*groups)
else:
handler.error(501)
response.wsgi_write(start_response)
handler.__after__(*groups)
except Exception, e:
handler.handle_exception(e, self.__debug)
else:
response.set_status(404)
response.wsgi_write(start_response)
return ['']
| [
"jehiah@gmail.com"
] | jehiah@gmail.com |
ae0e9dfa494523f579bf8050481c1ecf75a95d7c | 286c7b7dd9bd48c73fd94f8e89bde99a8d3f74c5 | /modelscript/base/brackets.py | 69de0b6d4787b90d2ada466c53d75e57287aad8b | [
"MIT"
] | permissive | ScribesZone/ModelScript | e7738471eff24a74ee59ec88d8b66a81aae16cdc | a36be1047283f2e470dc2dd4353f2a714377bb7d | refs/heads/master | 2023-03-18T02:43:57.953318 | 2021-03-08T15:26:40 | 2021-03-08T15:26:40 | 31,960,218 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,158 | py | # coding=utf-8
""" Conversion of a regular indented file into a bracketed file.
This module makes it possible to use regular parser like textX
with indented based language. Basically BracketedScript is a
preprocesssor that replace all indentations by some "brackets"
like { and } so that a parser can find these block markers.
The preprocessor is aware of comments (//) and documentation
lines (starting with |).
Consider for instance the following text ::
class model Cl_association01
class Elephant
class Banana
class Tree
abstract association class Meal
| Take record of the bananas eaten by elephants.
roles
eater : Elephant[0..1]
bananas : Banana[*]
The text above is bracketed as following. The character separator
are prefixed with special character to avoid confusion with the text
itself. ::
class model Cl_association01 ;
class Elephant ;
class Banana ;
class Tree ;
abstract association class Meal {
| Take record of the bananas eaten by elephants. | ;
roles {
eater : Elephant[0..1] ;
bananas : Banana[*] ; } ; } ;
For more examples see for instance testcases/cls/.mdl/*.clsb
"""
__all__ = (
'BracketError',
'BracketedScript'
)
from typing import Match, ClassVar, List
import re
# The following dependencies could be removed if necessary.
# The environment is used only to save bracketed file in a
# convenient way.
from modelscript.interfaces.environment import Environment
class BracketError(Exception):
""" Error message for an illegal indentation. """
def __init__(self, message, line):
super(BracketError, self).__init__(message)
self.line = line
class BracketedScript(object):
""" Converter of a indented file into a bracketed file.
"""
# -- input parameters ------------------------------------------
SPACE_INDENT: int = 4
""" Number of spaces for each indentation. """
IS_BLANK_LINE: ClassVar[Match[str]] = \
re.compile('^ *((--[^@|]*)|(//.*))?$')
""" Regular expressions matching blank lines (includes comments).
Comments are just ignored for indentation purposes but they are still
taken into account for regular parsing.
The definition of comments is also implemented in the grammar
textblocks/parser/grammar.tx
"""
# TODO:2 remove support for ModelScript. See below
# ModelScript1:
# added [^@\|] so that --@ and --| are not treated as comment
IS_DOC_LINE_REGEX: ClassVar[Match[str]] = re.compile('^ *\|')
""" Regular expression for a documentation line. """
# -- output parameters -----------------------------------------
OPENING_BRACKET: ClassVar[str] = '\000{'
""" Opening bracket string. """
CLOSING_BRACKET: ClassVar[str] = '\000}'
""" Closing bracket string. """
EOL: ClassVar[str] = '\000;'
""" End of line string. """
CLOSING_DOC_LINE: ClassVar[str] = '\000|'
""" Closing documentation line string. """
DOC_LINE_CONTENT: ClassVar[Match[str]] = \
re.compile(' *\| ?(?P<content>.*)\000\|\000;(\000}\000;)*$')
""" Regular expression for a documentation line. """
# -- output parameters -----------------------------------------
file: str
""" Name of the input file. """
lines: List[str]
""" Content of the input file represented as list of lines. """
bracketedLines: List[str]
""" """
targetFilename: str
""" Name of the output file.
The location of the output file is computed by the Environment.
See modelscript.interfaces.environment. """
def __init__(self, file: str) -> None:
self.file = file
self.lines = [line.rstrip('\n') for line in open(file)]
self.bracketedLines = []
basic_file_name = self.file+'b'
self.targetFilename = Environment.getWorkerFileName(basic_file_name)
def _is_blank_line(self, index: int) -> bool:
""" Check if the line is blank or a comment line """
m = re.match(self.IS_BLANK_LINE, self.lines[index])
return m is not None
def _is_doc_line(self, index: int) -> bool:
m = re.match(self.IS_DOC_LINE_REGEX, self.lines[index])
return m is not None
def _terminate_doc_line(self, docLine: str) -> str:
return docLine + self.CLOSING_DOC_LINE
@classmethod
def extractDocLineText(cls, docLine: str) -> str:
m = re.match(cls.DOC_LINE_CONTENT, docLine)
assert m is not None
return m.group('content')
def _nb_spaces(self, index: int) -> int:
m = re.match(' *', self.lines[index])
if m:
return len(m.group(0))
else:
return 0
def _line_indent(self, index: int) -> int:
blanks = self._nb_spaces(index)
if blanks % self.SPACE_INDENT == 0:
return blanks // self.SPACE_INDENT
else:
raise BracketError( # raise:OK
message = '%i spaces found. Multiple of %i expected.'
% (blanks, self.SPACE_INDENT),
line = index+1)
def _suffix(self, delta: int) -> str:
if delta == 1:
return self.OPENING_BRACKET
elif delta == 0:
return self.EOL
else:
return (
self.EOL
+ (self.CLOSING_BRACKET+self.EOL) * - delta
)
@property
def text(self) -> str:
""" Returns the bracketed text. """
self.bracketedLines = list(self.lines)
# LNBL = Last Non Black Line
lnbl_index = -1
lnbl_indent = 0
# take all lines + a extra virtual line to close everything
for (index, line) in enumerate(self.lines):
if not self._is_blank_line(index):
indent = self._line_indent(index)
delta = indent-lnbl_indent
if self._is_doc_line(index):
self.bracketedLines[index] = (
self._terminate_doc_line(self.bracketedLines[index])
)
if delta > 1:
# this will never happened for the last line
raise BracketError( # raise:OK
message = '"%s"' % line,
line=index+1)
else:
if lnbl_index != -1:
self.bracketedLines[lnbl_index] \
+= self._suffix(delta)
lnbl_index = index
lnbl_indent = indent
# close the last line if any
if lnbl_index != -1:
delta = 0-lnbl_indent
self.bracketedLines[lnbl_index] += self._suffix(delta)
return '\n'.join(self.bracketedLines)
def save(self) -> str:
""" Save the bracked text into the output file.
:return: the name of the output file
"""
f = open(self.targetFilename, "w")
f.write(self.text)
f.close()
return self.targetFilename
import sys
if __name__ == "__main__":
source=sys.argv[1]
text = BracketedScript(source).save()
| [
"escribis@users.noreply.github.com"
] | escribis@users.noreply.github.com |
da328818e0e708e72e7445267b75ac3eacc9d658 | 67b3a18730887046d67b4930ffc6fa0793a28011 | /integration_tests/test_drawing_matplotlib_backend.py | defd0e53a041e686422c1ea694239173d19faffd | [
"MIT"
] | permissive | hh-wu/ezdxf | 38eeef4e4498411758ef87039532d9df2d5bb178 | 62509ba39b826ee9b36f19c0a5abad7f3518186a | refs/heads/master | 2022-11-11T17:53:15.144144 | 2020-07-02T10:31:36 | 2020-07-02T10:31:36 | 266,539,503 | 0 | 0 | NOASSERTION | 2020-07-02T10:31:37 | 2020-05-24T12:48:03 | null | UTF-8 | Python | false | false | 443 | py | # Created: 06.2020
# Copyright (c) 2020, Matthew Broadway
# License: MIT License
import pytest
plt = pytest.importorskip('matplotlib.pyplot')
from ezdxf.addons.drawing.matplotlib_backend import MatplotlibBackend
@pytest.fixture()
def backend():
fig, ax = plt.subplots()
return MatplotlibBackend(ax)
def test_get_text_width(backend):
assert backend.get_text_line_width(' abc', 100) > backend.get_text_line_width('abc', 100)
| [
"me@mozman.at"
] | me@mozman.at |
c459f9c7006657b277a0ba92551c3cbb372ba3a9 | 78d5a6e0846cb6b03544e4f717651ca59dfc620c | /treasury-admin/treasury/migrations/0024_auto_20180327_1612.py | c9a2524b3f70c92aa04a2a876672520105bf4e78 | [] | no_license | bsca-bank/treasury-admin | 8952788a9a6e25a1c59aae0a35bbee357d94e685 | 5167d6c4517028856701066dd5ed6ac9534a9151 | refs/heads/master | 2023-02-05T12:45:52.945279 | 2020-12-13T08:07:41 | 2020-12-13T08:07:41 | 320,323,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-03-27 15:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('util', '0011_auto_20180326_1157'),
('contenttypes', '0002_remove_content_type_name'),
('treasury', '0023_auto_20180325_1515'),
]
operations = [
migrations.AlterField(
model_name='fxcli',
name='content_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType'),
),
migrations.AlterField(
model_name='fxcli',
name='object_id',
field=models.PositiveIntegerField(),
),
migrations.AlterUniqueTogether(
name='fxcli',
unique_together=set([('type_product', 'content_type', 'object_id')]),
),
migrations.AlterIndexTogether(
name='fxcli',
index_together=set([('content_type', 'object_id')]),
),
]
| [
"cn.makodo@gmail.com"
] | cn.makodo@gmail.com |
4042a684d5c1f62a2bcd39fd6130f48e889cee75 | a14ec6e367e6a471bfc74c066fb958ef585bc269 | /2019/13/common.py | 671c2ca179b9cdb8fcb560b79faf104e864337eb | [] | no_license | jimhendy/AoC | 90641814ed431f46a8500ff0f022c6c957567563 | a1727f88bc2e6f739d65902dce188377966b3fb4 | refs/heads/master | 2023-09-02T14:48:39.860352 | 2023-08-28T08:09:19 | 2023-08-28T08:09:19 | 225,152,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | from enum import Enum
import pandas as pd
class Tile(Enum):
EMPTY = 0
WALL = 1
BLOCK = 2
PADDLE = 3
BALL = 4
pass
tile_content = {
Tile.EMPTY: " ",
Tile.WALL: "|",
Tile.BLOCK: "#",
Tile.PADDLE: "_",
Tile.BALL: "o",
}
def print_game(tiles):
display = pd.DataFrame(tiles, columns=["x", "y", "c"])
display["cell"] = display["c"].map(Tile)
display = display.pivot_table(
index="y",
columns="x",
values="cell",
aggfunc="last",
).values
for row in display:
for cell in row:
print(tile_content[cell], end="")
pass
print()
pass
pass
| [
"jimhendy88@gmail.com"
] | jimhendy88@gmail.com |
c2dcce0e83d9778b483b0c915a696f05e3d0666b | 3b89c0a97ac6b58b6923a213bc8471e11ad4fe69 | /python/CodingExercises/LeetCode518.py | e1374f7eb905e1a098517a80b3af1c7968d5d55b | [] | no_license | ksayee/programming_assignments | b187adca502ecf7ff7b51dc849d5d79ceb90d4a6 | 13bc1c44e1eef17fc36724f20b060c3339c280ea | refs/heads/master | 2021-06-30T07:19:34.192277 | 2021-06-23T05:11:32 | 2021-06-23T05:11:32 | 50,700,556 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | '''
518. Coin Change 2
You are given coins of different denominations and a total amount of money. Write a function to compute the number of combinations that make up that amount. You may assume that you have infinite number of each kind of coin.
Note: You can assume that
0 <= amount <= 5000
1 <= coin <= 5000
the number of coins is less than 500
the answer is guaranteed to fit into signed 32-bit integer
Example 1:
Input: amount = 5, coins = [1, 2, 5]
Output: 4
Explanation: there are four ways to make up the amount:
5=5
5=2+2+1
5=2+1+1+1
5=1+1+1+1+1
Example 2:
Input: amount = 3, coins = [2]
Output: 0
Explanation: the amount of 3 cannot be made up just with coins of 2.
'''
import collections
def LeetCode518(coins, amt):
fnl_lst=[]
tmp=[]
Combinations_recur(coins,fnl_lst,tmp,amt)
return fnl_lst
def Combinations_recur(coins,fnl_lst,tmp,amt):
if amt==0:
if sorted(tmp) not in fnl_lst:
fnl_lst.append(sorted(tmp.copy()))
for i in range(0,len(coins)):
if coins[i]>amt:
break
tmp.append(coins[i])
Combinations_recur(coins, fnl_lst, tmp, amt-coins[i])
tmp.pop()
def main():
coins=[1, 2, 5]
amt=5
print(LeetCode518(coins,amt))
coins = [2]
amt = 3
print(LeetCode518(coins, amt))
if __name__=='__main__':
main() | [
"kartiksayee@gmail.com"
] | kartiksayee@gmail.com |
062a1ea6d662fa8571cfb07a3a76e6dd8640867c | 64d8d80c9a292f1552190af17cf1fe984968d5dc | /python/8kyu/8kyu - Calculate BMI.py | a2730d1915df136e63922afb1959c1b6dedf05a2 | [] | no_license | zurgis/codewars | 3acc880e0f3a40fc77532bcac537452d419fc268 | 045d74d6a36f4bc8a69a76dd3f21fef22c338ca2 | refs/heads/master | 2021-04-20T22:47:45.833147 | 2020-05-19T14:01:55 | 2020-05-19T14:01:55 | 249,723,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # Write function bmi that calculates body mass index (bmi = weight / height ^ 2).
# if bmi <= 18.5 return "Underweight"
# if bmi <= 25.0 return "Normal"
# if bmi <= 30.0 return "Overweight"
# if bmi > 30 return "Obese"
def bmi(weight, height):
bmi = weight / (height ** 2)
if bmi <= 18.5: return "Underweight"
if bmi <= 25.0: return "Normal"
if bmi <= 30.0: return "Overweight"
if bmi > 30: return "Obese" | [
"khdr437@gmail.com"
] | khdr437@gmail.com |
70b39f393505ebedcff0c5d67930ca3c6fb34989 | 0b3e9b3bd28a611ac4081931c8434590eba2898c | /DiabeticRetinopathyApp/DiabeticRetinopathy/settings.py | 06b2f24f286c039994631c25961621a5a61b2eda | [] | no_license | Ram-Aditya/Diabetic-Retinopathy-Application | 4b8d2fdf95cd554b1bd9305dcff2f719e3326f95 | aafd1b858a213f53d5f3bb80f216533b23f1d004 | refs/heads/master | 2022-12-03T03:23:23.340165 | 2019-11-25T02:43:26 | 2019-11-25T02:43:26 | 223,847,563 | 0 | 0 | null | 2022-11-22T04:50:56 | 2019-11-25T02:42:09 | Python | UTF-8 | Python | false | false | 3,322 | py | """
Django settings for DiabeticRetinopathy project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yr^xgob_*_1n*$873^6-y_4m4gr_+e=$i145xfx1)_65du6pjv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'api',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DiabeticRetinopathy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DiabeticRetinopathy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'api/static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| [
"ramaditya.danbrown@gmail.com"
] | ramaditya.danbrown@gmail.com |
bd93eb04763d5659ef3de8964ef2be3b14b6a7d4 | cec0cdfbd057c2d2ba153aa6f163adb250565e9a | /Core_Python_Programming/chapter-2/simple/tsUclnt.py | fecf61afe58d25842614847570dd8ee0473fa0d1 | [] | no_license | Best1s/python_re | 91117cd5b1f896c2b2f3987f1625663aa1952354 | abd526743c67a1bf72ddce39a0268b8e9fe15d26 | refs/heads/master | 2020-05-05T13:37:41.428881 | 2020-02-25T03:41:00 | 2020-02-25T03:41:00 | 180,086,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | #!/usr/bin/env python
from socket import *
HOST = 'localhost'
PORT = 22223
BUFSIZ = 1024
ADDR = (HOST,PORT)
udpCliSock = socket(AF_INET,SOCK_DGRAM)
while True:
data = raw_input('> ')
if not data:
break
udpCliSock.sendto(data,ADDR)
data,ADDR = udpCliSock.recvfrom(BUFSIZ)
if not data:
break
print data
udpCliSock.close() | [
"best.oneself@foxmail.com"
] | best.oneself@foxmail.com |
2680ecba6fb8682cdeb0d6871c0bba8c11da6300 | 41523dd4871e8ed1043d2b3ddf73417fcbdde209 | /day06/函数.py | 3ae295c7e48faa9b12c2b531cf7a0362c7e36bf6 | [] | no_license | WayneChen1994/Python1805 | 2aa1c611f8902b8373b8c9a4e06354c25f8826d6 | a168cd3b7749afc326ec4326db413378fd3677d5 | refs/heads/master | 2020-03-30T23:19:00.773288 | 2018-11-02T10:47:40 | 2018-11-02T10:47:40 | 151,697,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,457 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# author: Wayne.Chen
'''
函数的定义:
在项目开发的过程中,有些功能,我们会反复的使用,为了方便使用,我们将这些功能封装成一个函数,在需要使用此功能的时候,调用即可。
优点:
1、简化代码结构,增加代码的复用性
2、增加代码的可维护性,若想修改某个bug或者某个功能,找到对应的函数更改即可
'''
'''
语法:
def 函数名(参数列表):
语句块
return 表达式
def:关键字来声明函数
函数名:标识符
参数列表:参数与参数之间使用逗号隔开
: 标识函数语句块的开始
语句块:函数要实现的功能
return:函数的返回值
return的结果可以是数值也可以是表达式
【注意:return可以写也可以不写,具体由函数功能开决定,当return不写的时候,默认return None】
注意:return的时候,函数体已经结束,因此return后边的语句不会执行到
'''
# 最简单的函数:无参无返回值
def myPrint():
for x in range(5):
print("hello, world!!!")
'''
函数的调用:
函数名(参数列表)
参数列表的作用:函数的调用者,给函数传递信息的
实质:实参给形参赋值的过程
'''
myPrint()
'''
需求:1+2+3+……+100
'''
def sum1():
res = 0
for x in range(1, 101):
res += x
return res
print(sum1())
| [
"waynechen1994@163.com"
] | waynechen1994@163.com |
a9b8e43931d529c548c0a4547aff0caa186cbc3a | 8b68fb2eeb5d10082fc2083bc6323aca5b4378b7 | /Server/app/views/__init__.py | c23988213ce9d89d5948846467aa10feceffa455 | [
"MIT"
] | permissive | JoMingyu/BookCheck-Backend | edf7529db95f9183939c0f81ef0ef0906b3c2318 | fbe71a39e385a3c739e7e40ab1153efbe7835576 | refs/heads/master | 2021-09-10T00:07:29.923714 | 2018-03-20T09:55:25 | 2018-03-20T09:55:25 | 113,041,979 | 1 | 0 | MIT | 2017-12-09T13:18:35 | 2017-12-04T12:51:21 | null | UTF-8 | Python | false | false | 783 | py | from flask_restful import Api
from flasgger import Swagger
from app.docs import TEMPLATE
from app.views.user import *
from app.views.library.book import *
from app.views.library.borrow import *
from app.views.library.library import *
class ViewInjector(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
Swagger(app, template=TEMPLATE)
api = Api(app)
api.add_resource(Signup, '/signup')
api.add_resource(AuthCommonUser, '/auth/common')
api.add_resource(AuthAdmin, '/auth/admin')
api.add_resource(Refresh, '/refresh')
api.add_resource(Book, '/book')
api.add_resource(Borrow, '/borrow')
api.add_resource(Library, '/library')
| [
"city7310@naver.com"
] | city7310@naver.com |
6a55346eb4a479e9cfec583f80024042ffc59f42 | e8d5471bd4a47794d66162060343f740e0febca4 | /server/src/uds/core/jobs/__init__.py | a36a52b16819d2d15be2ddb8f0a4383942b120aa | [] | no_license | git38438/openuds | ef939c2196d6877e00e92416609335d57dd1bd55 | 7d66d92f85f01ad1ffd549304672dd31008ecc12 | refs/heads/master | 2020-06-22T14:07:33.227703 | 2019-07-18T11:03:56 | 2019-07-18T11:03:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,934 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 Virtual Cable S.L.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
UDS jobs related modules
.. moduleauthor:: Adolfo Gómez, dkmaster at dkmon dot com
"""
from uds.core.jobs.Job import Job
from uds.core.jobs.DelayedTask import DelayedTask
def factory():
"""
Returns a singleton to a jobs factory
"""
from uds.core.jobs.JobsFactory import JobsFactory
return JobsFactory.factory()
| [
"dkmaster@dkmon.com"
] | dkmaster@dkmon.com |
5ba702808ada3c9cd1aae54e283990fe3232d401 | ddb8c14775dfbe9424691dabf1617273d118d317 | /catkin_ws/build/geographic_msgs/catkin_generated/pkg.develspace.context.pc.py | eb6b51c3a7776951fe341bcd8fb40bc99747dfcb | [] | no_license | rishabhdevyadav/fastplanneroctomap | e8458aeb1f2d3b126d27dc57011c87ae4567687a | de9d7e49cb1004f3b01b7269dd398cf264ed92b4 | refs/heads/main | 2023-05-12T22:12:27.865900 | 2021-05-26T19:25:31 | 2021-05-26T19:25:31 | 356,674,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/rishabh/catkin_ws/devel/.private/geographic_msgs/include".split(';') if "/home/rishabh/catkin_ws/devel/.private/geographic_msgs/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;geometry_msgs;uuid_msgs;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "geographic_msgs"
PROJECT_SPACE_DIR = "/home/rishabh/catkin_ws/devel/.private/geographic_msgs"
PROJECT_VERSION = "0.5.5"
| [
"rishabhdevyadav95@gmail.com"
] | rishabhdevyadav95@gmail.com |
fce89a0524d511165b52d279a419cfc86ad5c216 | e81576012330e6a6024d14f3e241f88ca34b73cd | /python_code/vnev/Lib/site-packages/jdcloud_sdk/services/jdccs/models/DescribeAlarm.py | 99d362a38e2debde465939d0cae11309536c8aa1 | [
"MIT"
] | permissive | Ureimu/weather-robot | eba6a84147755aa83c941a306bac1a7c4e95e23e | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | refs/heads/master | 2021-01-15T07:23:42.274413 | 2020-03-23T02:30:19 | 2020-03-23T02:30:19 | 242,912,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,499 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class DescribeAlarm(object):
def __init__(self, alarmId=None, name=None, idc=None, idcName=None, resourceType=None, resourceId=None, resourceName=None, metric=None, metricName=None, period=None, statisticMethod=None, operator=None, threshold=None, times=None, noticePeriod=None, status=None):
"""
:param alarmId: (Optional) 规则实例ID
:param name: (Optional) 规则名称
:param idc: (Optional) 机房英文标识
:param idcName: (Optional) 机房名称
:param resourceType: (Optional) 资源类型 bandwidth:带宽
:param resourceId: (Optional) 资源ID
:param resourceName: (Optional) 资源名称
:param metric: (Optional) 监控项英文标识
:param metricName: (Optional) 监控项名称
:param period: (Optional) 统计周期(单位:分钟)
:param statisticMethod: (Optional) 统计方法:平均值=avg、最大值=max、最小值=min
:param operator: (Optional) 计算方式 >=、>、<、<=、=、!=
:param threshold: (Optional) 阈值
:param times: (Optional) 连续多少次后报警
:param noticePeriod: (Optional) 通知周期 单位:小时
:param status: (Optional) 规则状态 disabled:禁用 enabled:启用
"""
self.alarmId = alarmId
self.name = name
self.idc = idc
self.idcName = idcName
self.resourceType = resourceType
self.resourceId = resourceId
self.resourceName = resourceName
self.metric = metric
self.metricName = metricName
self.period = period
self.statisticMethod = statisticMethod
self.operator = operator
self.threshold = threshold
self.times = times
self.noticePeriod = noticePeriod
self.status = status
| [
"a1090693441@163.com"
] | a1090693441@163.com |
0c0025b66e84787c79a192d52506191ad76db35a | ad372f7753c70e3997d035097ee03f740a5fb068 | /trace_challenge/admin.py | f7a8a726f1094caab378bda4e82f5e630813155d | [] | no_license | Insper/servidor-de-desafios | a5f09fe9368887b06b98800f2bb8f35ff13f80a9 | 9875e9b9248c14237161ca73983595f7d929e963 | refs/heads/master | 2022-12-14T17:28:42.963112 | 2022-09-12T19:18:36 | 2022-09-12T19:18:36 | 167,026,050 | 3 | 42 | null | 2022-12-08T07:36:47 | 2019-01-22T16:19:46 | Python | UTF-8 | Python | false | false | 267 | py | from django.contrib import admin
from trace_challenge.models import TraceChallenge, TraceStateSubmission, UserTraceChallengeInteraction
admin.site.register(TraceChallenge)
admin.site.register(TraceStateSubmission)
admin.site.register(UserTraceChallengeInteraction)
| [
"andrew.kurauchi@gmail.com"
] | andrew.kurauchi@gmail.com |
46f0fbed19875c78825487e43ce6a3c1936dc4b7 | c2ddadd3cf14dfc56ec1e4b8d52b8c1a23ea1e61 | /index/models.py | 0e00f72ef97a2fe38f9fa7e54588261bdab74a29 | [] | no_license | ashimmitra/Varsity-Final-Project-by-Django | 09f944a9f1aae7be4212f0c09cfe5d2c596bd848 | 6274d966f09d9ead2344542b56576a77e0758d5a | refs/heads/main | 2023-07-17T15:50:04.414565 | 2021-08-20T12:31:24 | 2021-08-20T12:31:24 | 342,790,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | from django.db import models
class AboutSite(models.Model):
title=models.CharField(max_length=150,blank=False)
description=models.TextField(max_length=800,blank=False)
def __str__(self):
return self.title
class Slider(models.Model):
title=models.CharField(max_length=150,blank=False)
description=models.TextField(max_length=800,blank=False)
image=models.ImageField(upload_to='slider/',blank=False)
def __str__(self):
return self.title
class Contact(models.Model):
name=models.CharField(max_length=100,blank=False)
email=models.EmailField(max_length=100,blank=False)
subject=models.CharField(max_length=200,blank=False)
message=models.TextField(max_length=800,blank=False)
def __str__(self):
return self.subject
class Books(models.Model):
title = models.CharField(max_length=50)
image=models.ImageField(upload_to='static/img/',blank=False)
pdf = models.FileField(upload_to='static/media')
def __str__(self):
return self.title
class Notice(models.Model):
title = models.CharField(max_length=50)
image=models.ImageField(upload_to='static/img/',blank=False)
description=models.TextField(max_length=800,blank=False)
def __str__(self):
return self.title | [
"34328617+ashimmitra@users.noreply.github.com"
] | 34328617+ashimmitra@users.noreply.github.com |
29e7404835a884d2b0f0858126f3b4f4788249c0 | 1996b0e9252362d91c809c4e7f95e7075f13816b | /test/test_tfidf.py | bfa334d962d609402c8a406da134ce352d1a2522 | [] | no_license | scheeloong/MovieQA_benchmark | 846c63d8a361bd4f630e31e3120772bc4965f999 | fb3e1b8fe9ddc6084b3c93206e7dfaed5ad42149 | refs/heads/master | 2020-05-30T08:41:44.764034 | 2017-05-03T05:30:19 | 2017-05-03T05:30:19 | 70,122,910 | 1 | 1 | null | 2016-10-06T04:33:34 | 2016-10-06T04:33:34 | null | UTF-8 | Python | false | false | 796 | py | """
Test for Term Frequency Inverse Document Frequency
TODO(scheeloong): Implement test
"""
import unittest
# Import the package (which is made by having a file called __init__.py
import src
import MovieQA
# Import the module tfidf.py
from src import tfidf
# From tfidf.py, import the class TfIdf
from src.tfidf import TfIdf
class TestTfIdf(unittest.TestCase):
def test_nothing(self):
self.assertEqual('lala', 'lala')
dL = MovieQA.DataLoader()
# Use training data for training
[story, qa] = dL.get_story_qa_data('train', 'plot')
# Use test data for testing
[story2, qa2] = dL.get_story_qa_data('test', 'plot')
# TODO: Uncomment this once done questions
tfidf_ = TfIdf(story)
if __name__ == '__main__':
unittest.main()
| [
"scheeloong@gmail.com"
] | scheeloong@gmail.com |
4c65e49cfbd1385e17184100805b7ad76143d4e5 | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res_bw/scripts/client/postprocessing/effects/distortiontransfer.py | fba89b3f70bcc804cbf5dba56a150d3693d47d9d | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,958 | py | # 2015.11.10 21:32:32 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/PostProcessing/Effects/DistortionTransfer.py
from PostProcessing.RenderTargets import *
from PostProcessing import Effect
from PostProcessing.Phases import *
from PostProcessing.FilterKernels import *
from PostProcessing import getEffect
from PostProcessing.Effects.Properties import *
from PostProcessing.Effects import implementEffectFactory
import Math
alpha = MaterialFloatProperty('Fisheye', -1, 'alpha', 1, primary=True)
scale = MaterialFloatProperty('Fisheye', -1, 'scale', 1)
tile = MaterialFloatProperty('Fisheye', -1, 'tile', 1)
@implementEffectFactory('Distortion transfer', 'Redraw the scene, using a normal map to distort the image.', 'system/maps/post_processing/hexagonal_norms.bmp')
def distortionTransfer(distortionTexture):
"""This method creates and returns a post-process effect that redraws
the screen, using a normal map to distort the image. Use this for
a fish-eye effect, full-screen shimmer/distort etc.
"""
backBufferCopy = rt('PostProcessing/backBufferCopy')
c = buildBackBufferCopyPhase(backBufferCopy)
r = buildPhase(backBufferCopy.texture, None, 'shaders/post_processing/legacy/transfer_distort.fx', straightTransfer4Tap, BW_BLEND_SRCALPHA, BW_BLEND_INVSRCALPHA)
r.name = 'distort and transfer'
r.material.distortionTexture = distortionTexture
e = Effect()
e.name = 'Distort and Transfer'
e.phases = [c, r]
return e
@implementEffectFactory('Fisheye', 'Distortion transfer that defaults to a fisheye lens effect.')
def fisheye():
e = distortionTransfer('system/maps/post_processing/fisheye_norms.bmp')
e.name = 'Fisheye'
return e
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\client\postprocessing\effects\distortiontransfer.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:32:32 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
281dece3986aa0e47c5f5d16610e3fa153dcd132 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Spacy/source2.7/thinc/extra/wrappers.py | 7e51741312749e804eaf2d8a6a8439286f23079c | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 2,346 | py | from ..compat import BytesIO
from ..neural._classes.model import Model
try:
import torch.autograd
import torch
except ImportError:
pass
class PytorchWrapper(Model):
'''Wrap a PyTorch model, so that it has the same API as Thinc models.
To optimize the model, you'll need to create a PyTorch optimizer and call
optimizer.step() after each batch --- see examples/wrap_pytorch.py
'''
def __init__(self, model):
Model.__init__(self)
self._model = model
def begin_update(self, x_data, drop=0.):
'''Return the output of the wrapped PyTorch model for the given input,
along with a callback to handle the backward pass.
'''
x_var = torch.autograd.Variable(torch.Tensor(x_data),
requires_grad=True)
# Make prediction
y_var = self._model(x_var)
def backward_pytorch(dy_data, sgd=None):
dy_var = torch.autograd.Variable(torch.Tensor(dy_data))
torch.autograd.backward((y_var,), grad_variables=(dy_var,))
dX = self.ops.asarray(x_var.grad.data)
if sgd is not None:
optimizer.step()
return dX
return self.ops.asarray(y_var.data), backward
def to_disk(self, path):
# TODO: Untested
torch.save(self._model.state_dict(), str(path))
def from_disk(self, path):
# TODO: Untested
self._model.load_state_dict(torch.load(path))
def to_bytes(self):
# TODO: Untested
filelike = BytesIO()
torch.save(self._model.state_dict(), filelike)
return filelike.read()
def from_bytes(self, data):
# TODO: Untested
filelike = BytesIO(data)
self._model.load_state_dict(torch.load(filelike))
def to_gpu(self, device_num):
# TODO: Implement
raise NotImplementedError
def to_cpu(self):
# TODO: Implement
raise NotImplementedError
def resize_output(self):
# TODO: Required for spaCy add label
raise NotImplementedError
def resize_input(self):
# TODO: Not required yet, but should be useful
raise NotImplementedError
@contextlib.contextmanager
def use_params(self, params): # pragma: no cover
# TODO: Implement
raise NotImplementedError
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.