blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d293c66679798c3156d7d4d81887dd17635b22c | e21f7d14e564d7fb921277a329ff078e86ad86a2 | /2018/Day 03/day3_part1.py | 9608667c4f614f00c804d40b776d5d7de8749d61 | [] | no_license | MrGallo/advent-of-code-solutions | 31456a0718303cca6790cf1227831bcb14649e27 | 28e0331e663443ffa2638188437cc7e46d09f465 | refs/heads/master | 2022-07-07T08:49:30.460166 | 2020-12-17T17:22:24 | 2020-12-17T17:22:24 | 160,988,019 | 0 | 1 | null | 2022-06-21T22:26:19 | 2018-12-08T23:34:51 | Python | UTF-8 | Python | false | false | 523 | py | import numpy as np
with open('input.txt') as f:
claims = []
for claim in f:
c_id = int(claim[1:claim.index(" ")])
at_i = claim.index("@")
colon_i = claim.index(":")
x, y = [int(n) for n in claim[at_i+2:colon_i].split(",")]
w, h = [int(n) for n in claim[colon_i+2:].split("x")]
claims.append((c_id, x, y, w, h))
grid = np.zeros((1000, 1000), dtype=int)
# Place claims on grid
for c_id, x, y, w, h in claims:
grid[y:y+h, x:x+w] += 1
print((grid > 1).sum())
| [
"daniel.gallo@ycdsbk12.ca"
] | daniel.gallo@ycdsbk12.ca |
db71f979debd03e7f83d2826f8d2638ade23dd94 | 32b0f6aa3f52db84099205a3f6b59464469617c8 | /aa/unit/unit.py | 9bb67ee8fe20a7ea6515a6293b06f499bf44489c | [
"MIT"
] | permissive | projectweekend/aa | 9eed3b75153cee74990640e9a40475172d5175b3 | 5c6da28121306b1125b2734d5a96677b3e3786e0 | refs/heads/master | 2022-01-29T11:34:22.467002 | 2018-09-04T02:33:43 | 2018-09-04T02:33:43 | 126,966,057 | 0 | 0 | MIT | 2022-01-21T19:18:28 | 2018-03-27T09:56:16 | Python | UTF-8 | Python | false | false | 2,248 | py | from random import randint
from .info import *
class Bonus:
def __init__(self, targets, boosted_attribute, boost_value):
self.targets = targets
self.boosted_attribute = boosted_attribute
self.boost_value = boost_value
class Roll:
def __init__(self, unit):
self._unit = unit
def _roll(self):
return randint(1, 6)
def attack(self):
roll = self._roll()
if roll <= self._unit.attack:
return roll, 1
return roll, 0
def defense(self):
roll = self._roll()
if roll <= self._unit.defense:
return roll, 1
return roll, 0
class Rank:
def __init__(self, unit):
self._unit = unit
@property
def attack(self):
return self._unit.attack + self._unit.cost + self.bonus
@property
def defense(self):
return self._unit.defense + self._unit.cost + self.bonus
@property
def bonus(self):
return 1.5 if self._unit.bonuses_granted else 0
class Unit:
def __init__(self, name, attack, defense, cost, movement, type,
bonuses_granted, active_bonus=None):
self.name = name
self._attack = attack
self._defense = defense
self.cost = cost
self.movement = movement
self.type = type
self.bonuses_granted = bonuses_granted
self.active_bonus = active_bonus
self.roll = Roll(unit=self)
self.rank = Rank(unit=self)
def __repr__(self):
return self.name
@classmethod
def build_by_name(cls, name):
unit_args = UNIT_INFO[name.title()]
kwargs = dict(unit_args)
kwargs[BONUSES] = [Bonus(**e) for e in unit_args[BONUSES]]
return cls(**kwargs)
@property
def attack(self):
if self.active_bonus is None:
return self._attack
if self.active_bonus.boosted_attribute != ATTACK:
return self._attack
return self.active_bonus.boost_value
@property
def defense(self):
if self.active_bonus is None:
return self._defense
if self.active_bonus.boosted_attribute != DEFENSE:
return self._defense
return self.active_bonus.boost_value
| [
"brian@projectweekend.net"
] | brian@projectweekend.net |
a57790abe4d9ce8e9cc97d8ff297729b1c3ab13c | b1892cfaba853689e6db9bff2cc930e7472e4a67 | /src/Calculating_Synthetic_Networks_Organize_Runs.py | c4b2538e7428c0ee41c0da5948e6d6850c392a69 | [] | no_license | jeffalstott/technologyspace | 0cb01655cec4d8c18c5496ecb1b621eeac4a3648 | 34185de439d58830392aaeadc61c2c59ccd84afa | refs/heads/master | 2020-12-09T13:56:54.386249 | 2018-04-30T13:56:43 | 2018-04-30T13:56:43 | 46,704,128 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,648 | py |
# coding: utf-8
# In[2]:
import pandas as pd
from pylab import *
# In[3]:
# class_system = 'IPC'
# n_controls = 1000
# target_year = 2010
# In[4]:
# n_years = 'cumulative'
if n_years is None or n_years=='all' or n_years=='cumulative':
n_years_label = ''
else:
n_years_label = '%i_years_'%n_years
# In[5]:
# occurrence_entities = {'Firm': ('occurrences_organized.h5', 'entity_classes_Firm'),
# 'Inventor': ('occurrences_organized.h5', 'entity_classes_Inventor'),
# 'Country': ('occurrences_organized.h5', 'entity_classes_Country'),
# 'PID': ('classifications_organized.h5', 'patent_classes'),
# }
# entity_types = list(occurrence_entities.keys())
# In[6]:
# cooccurrence_base_file_name = 'synthetic_control_cooccurrence_'+n_years_label+'%s_preserve_years_%s'
citations_base_file_name = 'synthetic_control_citations_'+n_years_label+'%s'
# In[7]:
# data_directory = '../data/'
citations_controls_directory = data_directory+'Class_Relatedness_Networks/citations/controls/%s/'%class_system
coocurrence_controls_directory = data_directory+'Class_Relatedness_Networks/cooccurrence/controls/%s/'%class_system
# In[8]:
import gc
from time import time
# In[9]:
def organize_runs(df_name,
file_name,
controls_directory=citations_controls_directory,
n_controls=n_controls,
target_year=target_year,
controls=None,
multiple_metrics=True,
target_metric=None
):
t = time()
for randomization_id in range(n_controls):
if not randomization_id%100:
print(randomization_id)
print("%.0f seconds"%(time()-t))
t = time()
f = '%s_%i.h5'%(file_name, randomization_id)
try:
if multiple_metrics:
x = pd.read_hdf(controls_directory+f, df_name).ix[:,target_year]
else:
x = pd.read_hdf(controls_directory+f, df_name).ix[target_year]
except:
print("Data not loading for %s. Continuing."%f)
continue
if controls is None:
controls = pd.Panel4D(labels=x.items, items=arange(n_controls),
major_axis=x.major_axis, minor_axis=x.minor_axis)
if multiple_metrics:
controls.ix[x.items, randomization_id] = x.values
else:
controls.ix[target_metric, randomization_id] = x
gc.collect()
return controls
# In[10]:
controls = organize_runs('synthetic_citations_%s'%class_system,
citations_base_file_name%class_system,
citations_controls_directory,
controls=None
)
# In[18]:
for entity in entity_types:
controls = organize_runs('synthetic_cooccurrence_%s_%s'%(entity, class_system),
cooccurrence_base_file_name%(entity, class_system),
coocurrence_controls_directory,
controls=controls,
multiple_metrics=False,
target_metric='Class_CoOccurrence_Count_%s'%entity)
# In[20]:
store.close()
# In[21]:
store = pd.HDFStore(data_directory+'Class_Relatedness_Networks/class_relatedness_networks_controls_organized_%s.h5'%class_system,
mode='a', table=True)
store.put('/controls_%s'%class_system, controls, 'table', append=False)
store.close()
| [
"jeffalstott@gmail.com"
] | jeffalstott@gmail.com |
e85cf3dcfa4ce7b1ff0c56d2b706b9c987245f76 | 0d8d40eba3eb0b54e6e7346c5e8160d922b0953f | /alalala_17403/urls.py | cc63fdcba3efd3f3eabe93788f5d84dcb48ae98d | [] | no_license | crowdbotics-apps/alalala-17403 | e96a2e1197965e43105873efd82c37e9649b9d4f | bae7b1f3b1fd7a12a6a8ac34073565b5f3ddcfa2 | refs/heads/master | 2023-05-19T02:29:20.073969 | 2020-05-26T09:36:07 | 2020-05-26T09:36:07 | 267,001,767 | 0 | 0 | null | 2021-06-12T13:03:48 | 2020-05-26T09:34:37 | Python | UTF-8 | Python | false | false | 2,046 | py | """alalala_17403 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("dating.api.v1.urls")),
path("dating/", include("dating.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "alalala"
admin.site.site_title = "alalala Admin Portal"
admin.site.index_title = "alalala Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="alalala API",
default_version="v1",
description="API documentation for alalala App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
80f631cf3d47346c28c011a1d97a2ee7e6ad5594 | 7cad80770718e655766dd038bc654ebe1ad2ab3e | /network/fedoracoin.py | 4daa642227ed53cbb62e9be465e0740e0ab9d2c5 | [] | no_license | kindanew/clove_bounty | d0e1f917dff5b3a23eae6a05d6449af068fb4dc9 | e707eebde301ac3728a5835d10d927aa83d27ab5 | refs/heads/master | 2021-04-29T00:29:37.698092 | 2018-02-24T02:34:33 | 2018-02-24T02:34:33 | 121,830,043 | 0 | 0 | null | 2018-02-17T05:34:54 | 2018-02-17T05:26:51 | Python | UTF-8 | Python | false | false | 822 | py | from clove.network.bitcoin import Bitcoin
class FedoraCoin(Bitcoin):
"""
Class with all the necessary FedoraCoin (TIPS) network information based on
https://github.com/fedoracoin-dev/fedoracoin/blob/master-0.9/src/chainparams.cpp
(date of access: 02/17/2018)
"""
name = 'fedoracoin'
symbols = ('TIPS', )
seeds = ('seed.fedoracoin.net', '45.55.250.196', 'tips1.netcraft.ch' , 'tips2.netcraft.ch')
port = 44890
class FedoraCoinTestNet(FedoraCoin):
"""
Class with all the necessary FedoraCoin (TIPS) testing network information based on
https://github.com/fedoracoin-dev/fedoracoin/blob/master-0.9/src/chainparams.cpp
(date of access: 02/17/2018)
"""
name = 'test-fedoracoin'
seeds = ('115.29.37.248', 'testnet-dnsseed.fedoracoin.com')
port = 19336
| [
"noreply@github.com"
] | kindanew.noreply@github.com |
d7e4f73db84e320d839e02f46066f13f6057272f | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_noisy2986.py | e015914e151e5d9651cefff1550ef4347ec47356 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,210 | py | # qubit number=4
# total number=39
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=30
prog.x(input_qubit[3]) # number=11
prog.h(input_qubit[3]) # number=13
prog.cz(input_qubit[0],input_qubit[3]) # number=14
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[3],input_qubit[1]) # number=19
prog.z(input_qubit[3]) # number=25
prog.cx(input_qubit[0],input_qubit[3]) # number=36
prog.x(input_qubit[3]) # number=37
prog.cx(input_qubit[0],input_qubit[3]) # number=38
prog.h(input_qubit[1]) # number=20
prog.rx(-3.141592653589793,input_qubit[3]) # number=26
prog.h(input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[2]) # number=17
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=27
prog.cz(input_qubit[1],input_qubit[0]) # number=28
prog.h(input_qubit[0]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=22
prog.cx(input_qubit[2],input_qubit[1]) # number=34
prog.x(input_qubit[1]) # number=23
prog.x(input_qubit[1]) # number=24
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2986.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
848a1c59e318fe3cedaa0d7702194ef1a1d6b211 | a8062308fb3bf6c8952257504a50c3e97d801294 | /problems/N538_Convert_BST_To_Greater_Tree.py | 0cf76aad40516b7710da549a39a22762f8d82918 | [] | no_license | wan-catherine/Leetcode | 650d697a873ad23c0b64d08ad525bf9fcdb62b1b | 238995bd23c8a6c40c6035890e94baa2473d4bbc | refs/heads/master | 2023-09-01T00:56:27.677230 | 2023-08-31T00:49:31 | 2023-08-31T00:49:31 | 143,770,000 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | class Solution(object):
def convertBST(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if not root:
return None
stack = [root]
prev = 0
visited = set()
visited.add(root)
visited.add(None)
while stack:
node = stack[-1]
if node.left in visited and node.right in visited:
node.val += prev
prev = node.val
stack.pop()
if node.right and node.right not in visited:
stack.append(node.right)
visited.add(node.right)
continue
if node.left and node.left not in visited:
node.val += prev
prev = node.val
stack.pop()
stack.append(node.left)
visited.add(node.left)
continue
return root
| [
"rarry2012@gmail.com"
] | rarry2012@gmail.com |
5976b48f4a882bac42ead1b05645367c90e2f149 | 69bcc45028038351a7f891025df1f8e7d4b855f1 | /unsupervised_learning/0x02-hmm/4-viterbi.py | d1b2dad6af9348540262ec79bcaa71f9e3174c85 | [] | no_license | linkjavier/holbertonschool-machine_learning | 6db799844821d450fed2a33a8819cb8df0fef911 | c7b6ea4c37b7c5dc41e63cdb8142b3cdfb3e1d23 | refs/heads/main | 2023-08-17T21:00:24.182003 | 2021-09-09T05:47:06 | 2021-09-09T05:47:06 | 304,503,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | #!/usr/bin/env python3
""" The Viretbi Algorithm """
import numpy as np
def viterbi(Observation, Emission, Transition, Initial):
""" Function that calculates the most likely sequence
of hidden states for a hidden markov model
"""
if not isinstance(Observation, np.ndarray) or len(Observation.shape) != 1:
return (None, None)
if not isinstance(Emission, np.ndarray) or len(Emission.shape) != 2:
return None, None
if not isinstance(Transition, np.ndarray) or len(
Transition.shape) != 2 or \
Transition.shape[0] != Transition.shape[1]:
return None, None
if not isinstance(Initial, np.ndarray) or len(Initial.shape) != 2:
return None, None
if Emission.shape[0] != Transition.shape[0] != Transition.shape[0] !=\
Initial.shape[0]:
return None, None
if Initial.shape[1] != 1:
return None, None
T = Observation.shape[0]
N, _ = Emission.shape
zeroMatrix = np.zeros([N, T])
f = np.empty([N, T], dtype=int)
zeroMatrix[:, 0] = np.multiply(Initial.T, Emission[:, Observation[0]])
for t in range(1, T):
for i in range(N):
zeroMatrix[i, t] = np.max(
zeroMatrix[:, t - 1] * Transition[:, i]) *\
Emission[i, Observation[t]]
f[i, t] = np.argmax(zeroMatrix[:, t - 1] * Transition[:, i])
path = np.zeros(T)
path[T - 1] = np.argmax(zeroMatrix[:, T - 1])
for i in range(T - 2, -1, -1):
path[i] = f[int(path[i + 1]), i + 1]
P = np.max(zeroMatrix[:, T - 1:], axis=0)[0]
path = [int(i) for i in path]
return (path, P)
| [
"linkjavier@hotmail.com"
] | linkjavier@hotmail.com |
d574c0c5bff1863e1788e44fe024613dc1f96569 | 1f21f836d8eaf9e68ef102612787912011dafa4b | /bc_study/two_stock_datafeed.py | e104c8f48858dec4b210c9d1ffb5b05f877e576b | [] | no_license | abcijkxyz/think_in_backtrader | 2461b20dc24bff11c1cd2693a74da1a1c28b7bd1 | 1645a7527d24929a900755186c18efb65b3a1672 | refs/heads/main | 2023-03-09T06:46:05.943303 | 2021-02-28T10:26:52 | 2021-02-28T10:26:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,741 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : two_stock_datafeed.py
@Time : 2020/11/27 13:33:27
@Author : Jeffrey Wang
@Version : 1.0
@Contact : shwangjj@163.com
@Desc : 同时读取两个股票DataFeed的示例
读取两个股票的日线数据,每日输出close价格
'''
import backtrader as bt
import bc_study.tushare_csv_datafeed as ts_df
# 演示用策略,每日输出开盘价
class DemoStrategy(bt.Strategy):
def log(self, txt, dt=None):
''' Logging function for this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# 建立对于DataFeed的Open/Close价格的引用参数
self.dataclose_A = self.datas[0].close
self.dataclose_B = self.datas[1].close
def next(self):
self.log('A Close={0}'.format(self.dataclose_A[0]))
self.log('B Close={0}'.format(self.dataclose_B[0]))
# 启动回测
def engine_run():
# 初始化引擎
cerebro = bt.Cerebro()
# 给Cebro引擎添加策略
cerebro.addstrategy(DemoStrategy)
# 设置初始资金:
cerebro.broker.setcash(200000.0)
# 加载两个股票的数据
data1 = ts_df.get_csv_daily_data(stock_id="600016.SH", start="20190101", end="20190105")
cerebro.adddata(data1)
data2 = ts_df.get_csv_daily_data(stock_id="000001.SZ", start="20190101", end="20190105")
cerebro.adddata(data2)
print('初始市值: %.2f' % cerebro.broker.getvalue())
# 回测启动运行
result = cerebro.run()
print("回测运行返回值 = {0}".format(result))
print('期末市值: %.2f' % cerebro.broker.getvalue())
if __name__ == '__main__':
engine_run()
| [
"shwangjj@163.com"
] | shwangjj@163.com |
534a4e3a572af24eb566340df6d9ff2f6a779074 | e1dd0997239951d4d459b1ba0229493512b0b331 | /mds_py/mds-env/lib/python3.11/site-packages/cleo/io/inputs/token_parser.py | 1b0f702287034dd015189b66f64297578fa1aa06 | [] | no_license | alexmy21/Octopus | bd17777cf66654c1e7959654f63ca82b716865b5 | 7844ec616376ec6cd9c1a8b73dbcad9c729557ae | refs/heads/master | 2022-12-22T22:42:29.473433 | 2022-12-21T16:52:09 | 2022-12-21T16:52:09 | 61,543,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,937 | py | from typing import List
from typing import Optional
class TokenParser(object):
"""
Parses tokens from a string passed to StringArgs.
"""
def __init__(self) -> None:
self._string = "" # type: str
self._cursor = 0 # type: int
self._current = None # type: Optional[str]
self._next_ = None # type: Optional[str]
def parse(self, string: str) -> List[str]:
self._string = string
self._cursor = 0
self._current = None
if len(string) > 0:
self._current = string[0]
self._next_ = None
if len(string) > 1:
self._next_ = string[1]
tokens = self._parse()
return tokens
def _parse(self) -> List[str]:
tokens = []
while self._is_valid():
if self._current.isspace():
# Skip spaces
self._next()
continue
if self._is_valid():
tokens.append(self._parse_token())
return tokens
def _is_valid(self) -> bool:
return self._current is not None
def _next(self) -> None:
"""
Advances the cursor to the next position.
"""
if not self._is_valid():
return
self._cursor += 1
self._current = self._next_
if self._cursor + 1 < len(self._string):
self._next_ = self._string[self._cursor + 1]
else:
self._next_ = None
def _parse_token(self) -> str:
token = ""
while self._is_valid():
if self._current.isspace():
self._next()
break
if self._current == "\\":
token += self._parse_escape_sequence()
elif self._current in ["'", '"']:
token += self._parse_quoted_string()
else:
token += self._current
self._next()
return token
def _parse_quoted_string(self) -> str:
string = ""
delimiter = self._current
# Skip first delimiter
self._next()
while self._is_valid():
if self._current == delimiter:
# Skip last delimiter
self._next()
break
if self._current == "\\":
string += self._parse_escape_sequence()
elif self._current == '"':
string += '"{}"'.format(self._parse_quoted_string())
elif self._current == "'":
string += "'{}'".format(self._parse_quoted_string())
else:
string += self._current
self._next()
return string
def _parse_escape_sequence(self) -> str:
if self._next_ in ['"', "'"]:
sequence = self._next_
else:
sequence = "\\" + self._next_
self._next()
self._next()
return sequence
| [
"alex.mylnikov@hitachivantara.com"
] | alex.mylnikov@hitachivantara.com |
a4cab5589801580f49853730f9ebb47d44b78a6a | dc7dc1ab85403a4467044d4c0c936c17fff5225a | /fstmerge/examples/Fail2ban/rev579-732/base-trunk-579/server/datetemplate.py | a495df29c03ed39cdbb248484605dae12d27a53e | [] | no_license | RoDaniel/featurehouse | d2dcb5f896bbce2c5154d0ba5622a908db4c5d99 | df89ce54ddadfba742508aa2ff3ba919a4a598dc | refs/heads/master | 2020-12-25T13:45:44.511719 | 2012-01-20T17:43:15 | 2012-01-20T17:43:15 | 1,919,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | __author__ = "Cyril Jaquier"
__version__ = "$Revision: 1.1 $"
__date__ = "$Date: 2010-07-25 12:46:34 $"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import re
class DateTemplate:
def __init__(self):
self.__name = ""
self.__regex = ""
self.__cRegex = None
self.__pattern = ""
self.__hits = 0
def setName(self, name):
self.__name = name
def getName(self):
return self.__name
def setRegex(self, regex):
self.__regex = regex.strip()
self.__cRegex = re.compile(regex)
def getRegex(self):
return self.__regex
def setPattern(self, pattern):
self.__pattern = pattern.strip()
def getPattern(self):
return self.__pattern
def isValid(self):
return self.__regex != "" and self.__pattern != ""
def incHits(self):
self.__hits = self.__hits + 1
def getHits(self):
return self.__hits
def matchDate(self, line):
dateMatch = self.__cRegex.search(line)
return dateMatch
def getDate(self, line):
raise Exception("matchDate() is abstract")
| [
"joliebig"
] | joliebig |
4211a9ccc3f5e8c8113247b6341eec5da41389f9 | 14ea45983fdcabe18f948ee45c388e10d825f53d | /SolarSystem.py | 0237a9a3620a71e8edd2fee5698fa9789b946dd5 | [] | no_license | aodarc/Galileo | f4d6cbc3b8b3dbc9f0af70fe1874013d8d981b5d | f201a2ba3c560d893206ba29a1eefcf793025103 | refs/heads/master | 2020-12-28T20:30:31.899852 | 2016-01-14T10:29:27 | 2016-01-14T10:29:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,717 | py | from tkinter import *
from time import sleep
from math import sqrt, pi, sin, cos
class Element(object):
_x = int()
_y = int()
_mass = float()
_is_movable = False
speed = 1.
img = 'atom50x50.png'
def __init__(self, root, mass=0.):
self._mass = mass
self._root = root
self.photo = PhotoImage(file='image/'+self.img)
self.body = Label(background='#0F2A33', image=self.photo)
self.body.image = self.photo # hz
self.body.pack()
def move(self, x=0, y=0):
if not self._is_movable:
return # later add raise
self.body.place_configure(x=x % 600, y=y % 600)
self._root.update()
self._x = x
self._y = y
class Sun(Element):
def __init__(self, root, mass=0):
self.img = 'sun50x50.png'
Element.__init__(self, root)
self._x = self._root.winfo_width()/2-25
self._y = self._root.winfo_height()/2-25
self.body.place_configure(x=self._x, y=self._y)
def main():
root = Tk()
root.title(u'Сонячна система')
root.configure(background='#0F2A33')
root.geometry('600x600+600+200')
root.minsize(600, 600)
root.maxsize(1000, 1000)
root.protocol('WM_DELETE_WINDOW', lambda: root.quit())
root.resizable(False, False) # розширення вікна по ширині і по висоті
root.update()
b = Sun(root)
b.speed = 10.
c = Element(root)
c._is_movable = True
c.speed = 2.
radius = 190
cs = [(275 + cos(pi*x/180) * radius, 275 + sin(pi*x/180) * radius) for x in range(360)]
for x, y in cs*4:
c.move(x, y)
sleep(0.001)
root.mainloop()
main()
| [
"odarchenko@ex.ua"
] | odarchenko@ex.ua |
c622faefc313d7d0db83653fc9d302fc1d646689 | 658ab464e9c796f819ad85f569ad06ab6e66992e | /src/oop/student.py | 65c635d844e4dc672fcce29362f2c86fcd0be550 | [] | no_license | huowolf/python-demo | 03e5731ba632caada819dd70d0f9dc07c98308a1 | e3b80dcc0e0bc2437a0b2882e17563c8171460a2 | refs/heads/master | 2020-03-23T22:00:57.515258 | 2018-09-07T15:33:22 | 2018-09-07T15:33:22 | 142,147,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | class student(object):
def __init__(self,name,age):
self.__name = name #私有属性
self.__age = age
def print_student(self):
print('%s : %s' % (self.__name,self.__age))
def get_name(self):
return self.__name
def get_age(self):
return self.__age
def set_name(self,name):
self.__name=name
zhangsan=student('zhangsan',25)
lisi=student('lisi',22)
zhangsan.print_student()
lisi.print_student()
#非法操作,私有属性不可以直接访问
# print(lisi.__name)
# lisi.__name='wangwu'
# lisi.print_student()
print(zhangsan.get_name())
print(zhangsan.get_age())
zhangsan.set_name("zhangsan2")
print(zhangsan.get_name())
| [
"274956285@qq.com"
] | 274956285@qq.com |
758711a827b03415e97b4a4791f38fa8ead78674 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_4/lngchl001/ndom.py | 7c44c51178f0b565fa9e5abcdb94faf7ca087691 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | def ndom_to_decimal(a):
b=0
c=1
firstmult=0
for i in range(len(str(a))):
first=eval(str(a)[b:c])
firstmult+=first
firstmult=firstmult*6
b+=1
c+=1
final=(firstmult//6)
return int(final)
def decimal_to_ndom(a):
dec1=''
while True:
dec=str(round(a%6,1))
dec1= dec+dec1
a=round(a//6)
#print(dec1,end="")
#ans=ans[::-1]
#print(ans)
if a==0:
break
return int(dec1)
def ndom_add(a,b):
no1=ndom_to_decimal(a)
no2=ndom_to_decimal(b)
added=no1+no2
no3=decimal_to_ndom(added)
return no3
def ndom_multiply(a,b):
no1=ndom_to_decimal(a)
no2=ndom_to_decimal(b)
mult=no1*no2
no3=decimal_to_ndom(mult)
return no3 | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
768a967f1b7f95199fec72a2acc7d628d7aba565 | e0b0abc1e8d442adb63cf0e5b8641f4185c5611c | /apps/myapp/migrations/0003_auto_20190416_1503.py | 0afc907d80ed6c5f380233840e2ef2b88fb44147 | [] | no_license | nramiscal/petDemo | f7d0c149941b2efabdb74a4e10717f540b6d7eee | c4d341105c1b1f14e868f198199fe37cf3687c8f | refs/heads/master | 2020-05-14T08:21:23.669095 | 2019-04-17T23:02:24 | 2019-04-17T23:02:24 | 181,721,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,697 | py | # Generated by Django 2.1.4 on 2019-04-16 15:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('myapp', '0002_auto_20190416_1503'),
]
operations = [
migrations.CreateModel(
name='Pet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('breed', models.CharField(max_length=255)),
('species', models.CharField(max_length=255)),
('age', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fname', models.CharField(max_length=255)),
('lname', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('phone', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='pet',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pets', to='myapp.User'),
),
]
| [
"nramiscal@gmail.com"
] | nramiscal@gmail.com |
fe0b74f15b5d93ae0b7558f4334f7d75135b18f6 | 6223dc2e5de7921696cb34fb62142fd4a4efe361 | /.metadata/.plugins/org.eclipse.core.resources/.history/ec/e0e81f3a218f001414849ea2c9622fd7 | 62f7a07fa6b5d81c8ef0610651721bffd9527d76 | [] | no_license | Mushirahmed/python_workspace | 5ef477b2688e8c25b1372f546752501ee53d93e5 | 46e2ed783b17450aba29e4e2df7b656522b2b03b | refs/heads/master | 2021-03-12T19:24:50.598982 | 2015-05-25T10:23:54 | 2015-05-25T10:23:54 | 24,671,376 | 0 | 1 | null | 2015-02-06T09:27:40 | 2014-10-01T08:40:33 | Python | UTF-8 | Python | false | false | 4,108 | #!/usr/bin/env python
#
# Copyright 2014 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import gras
import numpy
import serial
import time
import sys
from sbhs import *
from scan_machines import *
import IN
class sbfan(gras.Block):
def __init__(self,window,fan_value,heat_value):
self.n = window
self.fan = fan_value
self.heat = heat_value
#self.set_fan_heat(self.fan,self.heat)
#self.fan1 = 0
#self.heat1 = 0
gras.Block.__init__(self,
name="sbfan",
in_sig=[numpy.float32,numpy.float32],
out_sig=[numpy.float32])
#from scan_machines import *
print "Scanning Machines"
scan_machines()
# SBHS init
self.new_device = Sbhs()
self.new_device.connect(1)
self.new_device.connect_device(0)
def set_fan_heat(self,fan_value,heat_value):
self.fan = fan_value
self.heat = heat_value
#self.new_device.setFan(self.fan)
#self.new_device.setHeat(self.heat)
return (self.fan,self.heat)
"""def set_parameters(self,window,fan_value,heat_value):
self.n = window
self.fan = fan_value
self.heat = heat_value"""
def isIntegralWin(self,input_item,window):
if(len(input_item) % window):
raise Exception(" Value of Window should be an integral value of length of input items")
def work(self, input_items, output_items):
#for heat_items in input_items[0]:
#print "Heat Written", heat_items
# Set heat as 0 for negative values of heat
#if heat_items < 0:
#self.new_device.setHeat(0)
#else:
# self.new_device.setHeat(heat_items)
#in0 = []
#in1 = []
out = output_items[0]
"""
in0 = input_items[0][0]
print "Input Zero : ",in0
in1 = input_items[1][0]
print "Input One : ",in1
self.new_device.setFan(in0)
self.new_device.setHeat(in1)
"""
print "INPUT_LENGTH",len(input_items)
for i in range(0,len(input_items-1)):
print " I ",i
in0 = input_items[i][0]
print "IN0 :",in0
#in1 = input_items[i][0]
self.new_device.setFan(in0)
#self.new_device.setHeat(in1)
for i in range(1,len(input_items-1)):
print " I ",i
in1 = input_items[i][0]
print "IN0 :",in1
#in1 = input_items[i][0]
self.new_device.setHeat(in1)
#self.new_device.setHeat(in1)
#self.set_fan_heat(self.fan,self.heat)
#new_fan,new_heat = self.set_fan_heat(in0,in1)
#self.new_device.setFan(self.fan)
#self.new_device.setFan(new_fan)
#time.sleep(0.5)
#self.new_device.setHeat(self.heat)
#self.new_device.setHeat(new_heat)
#time.sleep(0.5)
#For zero Temperatures
if not self.new_device.getTemp():
raise Exception(" Check SBHS conection try relogging it and run scan_machines.py")
#get temperature
a = self.new_device.getTemp()
#out = output_items[:]
print "A : ",a
out[:] = a
#out[:1] = a
print "Temperature:" ,output_items
#print "temperature:" ,out
#self.consume(0,1) #consume from port 0
self.consume(0,1)
self.consume(1,1)
self.produce(0,1)
| [
"imushir@gmail.com"
] | imushir@gmail.com | |
32c0b16bc6274f687e88566938c5cf3eb5a7cbf1 | 94575dcfd3699f4654fa6102cc908e929306b964 | /search/templatetags/search_tags.py | 11993c2a81d9f2ff40b9781ef7fa1d3f3e9be82c | [] | no_license | ViktorOgnev/tradenplay | 7c0f0535edd25dee17aacbb0bda0f28987547003 | b9d303ec4b1db3a97f04b8e3a17445a48ea8a9e9 | refs/heads/master | 2020-12-24T15:41:03.589087 | 2013-11-12T21:06:26 | 2013-11-12T21:06:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | import urllib
from django import template
from search.forms import SearchForm
register = template.Library()
@register.inclusion_tag("tags/search_box.html")
def search_box(request):
q = request.GET.get('q', '')
form = SearchForm({'q': q})
return {'form': form}
@register.inclusion_tag("tags/pagination_links.html")
def pagination_links(request, paginator):
raw_params = request.GET.copy()
page = raw_params.get('page', 1)
p = paginator.page(page)
try:
del raw_params['page']
except KeyError:
pass
params = urllib.urlencode(raw_params)
return {'request': request,
'paginator':paginator,
'p': p,
'params': params
}
| [
"ognev.victor@gmail.com"
] | ognev.victor@gmail.com |
9e859f7f3957aec59205b030144805f23ffc59c5 | ee6acbd5fcd0fcd16230e96a4a539de41a02c97e | /operators/submariner/python/setup.py | c44ed5e96d91acc0c0fa554327e991420f75af4c | [
"Apache-2.0"
] | permissive | isabella232/pulumi-kubernetes-crds | 777e78137aaf6525a44b61a02dccf91bf0d87a14 | 372c4c0182f6b899af82d6edaad521aa14f22150 | refs/heads/master | 2023-03-15T04:29:16.039753 | 2020-12-30T19:35:54 | 2020-12-30T19:35:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | # coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import errno
from setuptools import setup, find_packages
from setuptools.command.install import install
from subprocess import check_call
class InstallPluginCommand(install):
def run(self):
install.run(self)
try:
check_call(['pulumi', 'plugin', 'install', 'resource', 'pulumi_kubernetes_crds_operators_submariner', '${PLUGIN_VERSION}'])
except OSError as error:
if error.errno == errno.ENOENT:
print("""
There was an error installing the pulumi_kubernetes_crds_operators_submariner resource provider plugin.
It looks like `pulumi` is not installed on your system.
Please visit https://pulumi.com/ to install the Pulumi CLI.
You may try manually installing the plugin by running
`pulumi plugin install resource pulumi_kubernetes_crds_operators_submariner ${PLUGIN_VERSION}`
""")
else:
raise
def readme():
with open('README.md', encoding='utf-8') as f:
return f.read()
setup(name='pulumi_pulumi_kubernetes_crds_operators_submariner',
version='${VERSION}',
long_description=readme(),
long_description_content_type='text/markdown',
cmdclass={
'install': InstallPluginCommand,
},
packages=find_packages(),
package_data={
'pulumi_pulumi_kubernetes_crds_operators_submariner': [
'py.typed'
]
},
install_requires=[
'parver>=0.2.1',
'pulumi>=2.0.0,<3.0.0',
'pyyaml>=5.1,<5.2',
'requests>=2.21.0,<2.22.0',
'semver>=2.8.1'
],
zip_safe=False)
| [
"albertzhong0@gmail.com"
] | albertzhong0@gmail.com |
699b70eb414da2b57563595e6a27cd4605d03fff | fa44abffdfe00e5a44fffe6610dce25a325ee93e | /instagram_clone/users/migrations/0013_auto_20180815_1540.py | e22b26aaf1bcb563d48860f3fb74a24da6fe6537 | [
"MIT"
] | permissive | devjinius/IG_clone | e539e44318cdf9baf5137057a0c671e8748c36bf | 6a525524ec357d5617b09e013b449df2ec9336ad | refs/heads/master | 2020-03-21T08:55:23.803771 | 2018-12-07T13:55:25 | 2018-12-07T13:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # Generated by Django 2.0.6 on 2018-08-15 06:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0012_auto_20180811_1614'),
]
operations = [
migrations.AlterField(
model_name='user',
name='gender',
field=models.CharField(choices=[('not-specified', 'Not Specified'), ('femail', 'Female'), ('male', 'Male')], max_length=80, null=True),
),
]
| [
"eugenekang94@gmail.com"
] | eugenekang94@gmail.com |
a1ce1f81eb074822e1b694d7c4fbd4c00ab933b6 | a03c75b3a47c3d0f4662b5115162074802698bec | /w3/tests/tests.py | 133f0638b446cf0250123b92da0338ccac9e1133 | [] | no_license | shants/fss18 | 0ea88a4a41684a8b47a8b4aadafe285d90d172ef | 4e78ed3529934ca2f2d3f984ce4ba2df77b5b300 | refs/heads/master | 2020-03-26T22:37:40.406379 | 2018-11-01T20:24:03 | 2018-11-01T20:24:03 | 145,474,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | #ACK Did refer to code https://github.com/FahmidMorshed/fss18/tree/master/Week%203
# Once or twice to get doubt cleared, did not copy
import testingModule
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from Sample import Sample
from Sym import Sym
from Num import Num
from testingModule import O
import random
@O.k
def testing_Sample():
random.seed(1)
s = []
#-- create some samples holding 32,64,128... max items
for i in range(5,10):
o = Sample(2**i)
s.append(o)
#-- 10,000 store the same number in all samples
for i in range(1,10000):
y= random.random()
for t in s:
t.sampleInc(y)
#-- check if any of them are +/- 0.2 of 0.5
for t in s:
o = t.nth(0.5)
print(t.max, o)
assert (( o >= .05-.33) and ( o <= 0.5+.33 ))
#assert 1 == 1
@O.k
def testing_Sym():
s1 = Sym()
s1 = s1.syms([ 'y','y','y','y','y','y','y','y','y', 'n','n','n','n','n'])
print(s1.symEnt())
assert round(s1.symEnt(),4) == 0.9403
#assert 1 == 1
@O.k
def testing_Num():
n1 = Num()
n1 = n1.nums([ 4,10,15,38,54,57,62,83,100,100,174,190,215,225,
233,250,260,270,299,300,306,333,350,375,443,475,
525,583,780,1000])
print(n1.mu, n1.sd)
assert(n1.mu == 270.3 )
assert(round(n1.sd,3) == 231.946)
if __name__== "__main__":
O.report()
| [
"mailtoshants@gmail.com"
] | mailtoshants@gmail.com |
a12920082831e56c19bf3a6ab7bca2d8d737aa11 | 53181572c4b22df4b569a9901bcd5347a3459499 | /ceit_190910_zhuhaotian/quiz2_1025/q3_sort_words.py | 2b92084de924b1aa44ec88047e5d2dd08b3b85e5 | [] | no_license | edu-athensoft/ceit4101python_student | 80ef067b77421fce76d04f778d5c6de8b12f676c | 33cfa438c062d45e8d246b853e93d3c14b92ff2d | refs/heads/master | 2020-07-30T01:04:21.084384 | 2020-07-27T02:21:57 | 2020-07-27T02:21:57 | 210,027,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | # sorting words
# input 5 words from keyboard
#
word_list = []
word_list.append('aaa')
word_list.append('bab')
word_list.append('aba')
word_list.append('baa')
word_list.append('abb')
# word_list = ['aaa','bab','aba','baa','abb']
print(word_list)
word_list.sort();
print(word_list) | [
"inf.athensoft@hotmail.com"
] | inf.athensoft@hotmail.com |
bf0b4282f32489a6113d59a0f489ec203840d4e7 | 9f91fa2910d13273a50ae416c116e16385a4eb95 | /natvenv/env/bin/futurize | f4d2e794f7d69d3d86a7f87393fb0f58566e13ad | [] | no_license | natrayanp/mysb_v2 | cac811e7f66670f8546cccdbca386ba6ff4f8cd6 | 24dea04e2a631ca6b465b3f62077a83a5dce9758 | refs/heads/master | 2022-11-20T16:49:30.341095 | 2018-07-31T17:18:04 | 2018-07-31T17:18:04 | 116,319,931 | 0 | 1 | null | 2022-11-14T21:09:17 | 2018-01-05T00:05:13 | Python | UTF-8 | Python | false | false | 457 | #!/home/natrayan/project/AwsProject/Python/Tradingapp/tradingapp5/natvenv/env/bin/python3.6
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.16.0','console_scripts','futurize'
__requires__ = 'future==0.16.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.16.0', 'console_scripts', 'futurize')()
)
| [
"natrayan@localhost.localdomain"
] | natrayan@localhost.localdomain | |
c27e5d09683ac47879f811cb0cae11bcab8ea895 | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/eBay/Trading/GetMemberMessages.py | d16e5be16b3903621ecda033d6f17f043b859ee1 | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,062 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# GetMemberMessages
# Retrieves a list of the messages that buyers have posted about your active item listings.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetMemberMessages(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetMemberMessages Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/eBay/Trading/GetMemberMessages')
def new_input_set(self):
return GetMemberMessagesInputSet()
def _make_result_set(self, result, path):
return GetMemberMessagesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetMemberMessagesChoreographyExecution(session, exec_id, path)
class GetMemberMessagesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetMemberMessages
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_DisplayToPublic(self, value):
"""
Set the value of the DisplayToPublic input for this Choreo. ((optional, boolean) When set to true, only public messages (viewable in the Item listing) are returned.)
"""
InputSet._set_input(self, 'DisplayToPublic', value)
def set_EndCreationTime(self, value):
"""
Set the value of the EndCreationTime input for this Choreo. ((optional, date) Used to filter by date range (e.g., 2013-02-08T00:00:00.000Z).)
"""
InputSet._set_input(self, 'EndCreationTime', value)
def set_EntriesPerPage(self, value):
"""
Set the value of the EntriesPerPage input for this Choreo. ((optional, integer) The maximum number of records to return in the result.)
"""
InputSet._set_input(self, 'EntriesPerPage', value)
def set_ItemID(self, value):
"""
Set the value of the ItemID input for this Choreo. ((optional, string) The ID of the item the message is about.)
"""
InputSet._set_input(self, 'ItemID', value)
def set_MailMessageType(self, value):
"""
Set the value of the MailMessageType input for this Choreo. ((required, string) The type of message to retrieve. Valid values are: All and AskSellerQuestion. When set to AskSellerQuestion, ItemID or a date range filter must be specified.)
"""
InputSet._set_input(self, 'MailMessageType', value)
def set_MemberMessageID(self, value):
"""
Set the value of the MemberMessageID input for this Choreo. ((optional, string) An ID that uniquely identifies the message for a given user to be retrieved.)
"""
InputSet._set_input(self, 'MemberMessageID', value)
def set_MessageStatus(self, value):
"""
Set the value of the MessageStatus input for this Choreo. ((optional, string) The status of the message. Valid values are: Answered and Unanswered.)
"""
InputSet._set_input(self, 'MessageStatus', value)
def set_PageNumber(self, value):
"""
Set the value of the PageNumber input for this Choreo. ((optional, integer) Specifies the page number of the results to return.)
"""
InputSet._set_input(self, 'PageNumber', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
InputSet._set_input(self, 'ResponseFormat', value)
def set_SandboxMode(self, value):
"""
Set the value of the SandboxMode input for this Choreo. ((optional, boolean) Indicates that the request should be made to the sandbox endpoint instead of the production endpoint. Set to 1 to enable sandbox mode.)
"""
InputSet._set_input(self, 'SandboxMode', value)
def set_SenderID(self, value):
"""
Set the value of the SenderID input for this Choreo. ((optional, string) The seller's UserID.)
"""
InputSet._set_input(self, 'SenderID', value)
def set_SiteID(self, value):
"""
Set the value of the SiteID input for this Choreo. ((optional, string) The eBay site ID that you want to access. Defaults to 0 indicating the US site.)
"""
InputSet._set_input(self, 'SiteID', value)
def set_StartCreationTime(self, value):
"""
Set the value of the StartCreationTime input for this Choreo. ((optional, date) Used to filter by date range (e.g., 2013-02-08T00:00:00.000Z).)
"""
InputSet._set_input(self, 'StartCreationTime', value)
def set_UserToken(self, value):
"""
Set the value of the UserToken input for this Choreo. ((required, string) A valid eBay Auth Token.)
"""
InputSet._set_input(self, 'UserToken', value)
class GetMemberMessagesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetMemberMessages Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from eBay.)
"""
return self._output.get('Response', None)
class GetMemberMessagesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetMemberMessagesResultSet(response, path)
| [
"dattasaurabh82@gmail.com"
] | dattasaurabh82@gmail.com |
cbec3524bde8a5ee537bdc6d6c7858463522890b | 199677a3a5b4d205f03f39ef517f17a0f1fd450d | /transformer/Optim.py | 9f1a30ccc4f6ba35e173be280643bbdbcc4abc8f | [] | no_license | RRisto/jyri_bot | b304b834db3cb059afd63246fecd80569fea8f01 | 0018235436331cf733051a0125069cf052c1d28a | refs/heads/master | 2023-05-14T12:19:40.672041 | 2020-03-15T16:11:13 | 2020-03-15T16:11:13 | 243,827,027 | 0 | 0 | null | 2023-05-01T21:21:31 | 2020-02-28T18:18:04 | Jupyter Notebook | UTF-8 | Python | false | false | 2,329 | py | import torch
import numpy as np
# code from AllenNLP
class CosineWithRestarts(torch.optim.lr_scheduler._LRScheduler):
"""
Cosine annealing with restarts.
Parameters
----------
optimizer : torch.optim.Optimizer
T_max : int
The maximum number of iterations within the first cycle.
eta_min : float, optional (default: 0)
The minimum learning rate.
last_epoch : int, optional (default: -1)
The index of the last epoch.
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
T_max: int,
eta_min: float = 0.,
last_epoch: int = -1,
factor: float = 1.) -> None:
# pylint: disable=invalid-name
self.T_max = T_max
self.eta_min = eta_min
self.factor = factor
self._last_restart: int = 0
self._cycle_counter: int = 0
self._cycle_factor: float = 1.
self._updated_cycle_len: int = T_max
self._initialized: bool = False
super(CosineWithRestarts, self).__init__(optimizer, last_epoch)
def get_lr(self):
"""Get updated learning rate."""
# HACK: We need to check if this is the first time get_lr() was called, since
# we want to start with step = 0, but _LRScheduler calls get_lr with
# last_epoch + 1 when initialized.
if not self._initialized:
self._initialized = True
return self.base_lrs
step = self.last_epoch + 1
self._cycle_counter = step - self._last_restart
lrs = [
(
self.eta_min + ((lr - self.eta_min) / 2) *
(
np.cos(
np.pi *
((self._cycle_counter) % self._updated_cycle_len) /
self._updated_cycle_len
) + 1
)
) for lr in self.base_lrs
]
if self._cycle_counter % self._updated_cycle_len == 0:
# Adjust the cycle length.
self._cycle_factor *= self.factor
self._cycle_counter = 0
self._updated_cycle_len = int(self._cycle_factor * self.T_max)
self._last_restart = step
return lrs
| [
"ristohinno@gmail.com"
] | ristohinno@gmail.com |
6315f499af64fbc9b68fcc99493d8765b4c830e6 | db2a6433d6cbc0652308ad31f0ceab12a32cc37e | /data/base_data_manager.py | 623a8cbaab49ffe755b3ce77b17aa97e46a13c5d | [] | no_license | ablewald/RLDP | c42e6657024410753c770614feaad5ee505eb2a5 | 6d39348e94ec86f3f9f43fc0eaba112c5b2e13c7 | refs/heads/master | 2020-04-26T20:49:05.565537 | 2019-03-11T16:52:08 | 2019-03-11T16:52:08 | 173,822,474 | 1 | 0 | null | 2019-03-04T21:14:36 | 2019-03-04T21:14:36 | null | UTF-8 | Python | false | false | 1,969 | py | import logging
from typing import NamedTuple, Optional
import pandas as pd
from carball.generated.api.game_pb2 import Game
from data.utils.utils import normalise_df
logger = logging.getLogger(__name__)
class GameData(NamedTuple):
proto: Optional[Game]
df: Optional[pd.DataFrame]
class DataManager:
"""
Abstract class that implements get_data() and the need_proto, need_df, and normalise_df attributes.
Also implements the various methods required from subclasses.
"""
def __init__(self, need_proto: bool = False, need_df: bool = False, normalise_df: bool = True):
"""
:param need_proto: Whether to load the .proto attribute when get_data is called.
:param need_df: Whether to load the .df attribute when get_data is called.
:param normalise_df: Whether to normalise the df when get_data is called.
"""
self.need_proto = need_proto
self.need_df = need_df
self.normalise_df = normalise_df
def get_data(self, id_: str) -> GameData:
"""
Returns a GameData object which has a .proto and .df attribute.
Both default to None, unless self.need_proto or self.need_df are True respectively.
If self.normalise_df is True, the returned GameData.df would be normalised.
:param id_: Replay id
:return: GameData object which has a .proto and .df attribute.
"""
proto = self.get_proto(id_) if self.need_proto else None
df = self.get_df(id_) if self.need_df else None
if self.normalise_df:
df = normalise_df(df)
logger.info(f"Got data for replay: {id_}")
return GameData(proto, df)
def get_replay_list(self, num: int = 50):
raise NotImplementedError()
def get_df(self, id_: str) -> pd.DataFrame:
raise NotImplementedError()
def get_proto(self, id_: str) -> Game:
raise NotImplementedError()
class BrokenDataError(Exception):
pass
| [
"harry1996@gmail.com"
] | harry1996@gmail.com |
476f7760da9d24dd776d5c5707f96d54ad244c15 | 1d05ebcbfcf806f5459990a6a64e73b48ba9892d | /docs/conf.py | cd2d2b71b2982fd6fa2387e27e3f406013990318 | [
"MIT"
] | permissive | asmodehn/caerbannog | 2da3cafb5c0323a4802467b1d42573ad450ed1e7 | 47bb1138190748041a4c0d02e522c0924a9af962 | refs/heads/master | 2023-07-09T15:34:04.100983 | 2019-04-03T09:15:19 | 2019-04-03T09:15:19 | 167,014,660 | 0 | 0 | MIT | 2019-03-10T11:12:01 | 2019-01-22T15:10:09 | Python | UTF-8 | Python | false | false | 5,490 | py | # -*- coding: utf-8 -*-
#
# Caerbannog documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 9 19:32:44 2019.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(1, os.path.abspath('../01'))
sys.path.insert(1, os.path.abspath('../02'))
sys.path.insert(1, os.path.abspath('../03'))
sys.path.insert(1, os.path.abspath('../04'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Caerbannog'
copyright = u'2019, AlexV'
author = u'AlexV'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Link to python documentation
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Caerbannogdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Caerbannog.tex', u'Caerbannog Documentation',
u'AlexV', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'caerbannog', u'Caerbannog Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Caerbannog', u'Caerbannog Documentation',
author, 'Caerbannog', 'Follow the white rabbit.',
'Miscellaneous'),
]
| [
"asmodehn@gmail.com"
] | asmodehn@gmail.com |
d95b0548f2e6420f3ccee5b833c033df4a965499 | 6e12c2e6d453ea1caf64c0eafaf3410b30f434e0 | /shop/migrations/0034_supplier_show_in_order.py | fe6c4a45d5be5bbddeb3d730c251d63778ff5f7d | [] | no_license | andreynovikov/django-shop | 43b66ec639037473cd72f7480f83811d911104fb | 8f843b0b15354903a335c324daa65714bfb2f8cc | refs/heads/master | 2023-04-28T01:26:16.938227 | 2023-04-22T15:33:29 | 2023-04-22T15:33:29 | 43,815,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0033_auto_20161125_1809'),
]
operations = [
migrations.AddField(
model_name='supplier',
name='show_in_order',
field=models.BooleanField(db_index=True, verbose_name='показывать в заказе', default=False),
),
]
| [
"novikov@gmail.com"
] | novikov@gmail.com |
36db238a5b5abe3f0914813641510bb82143db4f | 51888119e10cdff12dafb060a54824632edccf3f | /Folders/Python/expressionMy.py | 14699ba1a009fe68ceae66e06157b87238811cde | [
"BSD-2-Clause"
] | permissive | kuchinal/lamakaha | b64511ad8c6d2b36da5a84a266b9e7a69acd3106 | 24e3b2ff53bcac2ad1c0e5a3b9afd4593d85f22d | refs/heads/master | 2023-09-01T17:55:56.551183 | 2023-07-31T19:32:04 | 2023-07-31T19:32:04 | 182,849,747 | 0 | 0 | null | 2021-09-10T06:34:22 | 2019-04-22T19:00:02 | Python | UTF-8 | Python | false | false | 370 | py | import nuke
def expressionMy():
try:
if 'Deep' in nuke.selectedNode().Class() and "DeepHoldout" not in nuke.selectedNode()['name'].value() and "DeepToImage" not in nuke.selectedNode()['name'].value():
nuke.createNode("DeepExpression")
else:
nuke.createNode("Expression")
except:
nuke.createNode("Expression") | [
"lamakaha@gmail.com"
] | lamakaha@gmail.com |
4aaf88b9b7c2ae1f4dd6a2157c55eae876572c46 | ad9bd58a3ec8fa08dfcc994d4101ee815a9f5bc0 | /02_algorithm/programmers/Level3/정수 삼각형.py | 799f7bd3aaebc37d94b122073b679fc586cec6c8 | [] | no_license | wally-wally/TIL | 93fc1d0e3bc7d030341ed54155294c68c48b4c7d | 936783bc86f563646c0398c24e2fcaa707f0ed23 | refs/heads/master | 2023-04-28T08:59:48.235747 | 2023-04-12T12:06:52 | 2023-04-12T12:06:52 | 195,918,111 | 40 | 7 | null | 2020-09-29T16:20:46 | 2019-07-09T02:31:02 | Python | UTF-8 | Python | false | false | 625 | py | def solution(data):
n = len(data)
arr = [[num for num in data[idx]] + [0] * (n - idx - 1) for idx in range(n)]
DP = [[0] * n for _ in range(n)]
DP[0][0] = arr[0][0]
if n == 1:
return DP[0][0]
for i in range(1, n):
for j in range(i + 1):
if j == 0:
DP[i][j] = DP[i - 1][j] + arr[i][j]
elif j == i:
DP[i][j] = DP[i - 1][j - 1] + arr[i][j]
else:
DP[i][j] = max(DP[i - 1][j - 1], DP[i - 1][j]) + arr[i][j]
return max(DP[n - 1])
print(solution([[7], [3, 8], [8, 1, 0], [2, 7, 4, 4], [4, 5, 2, 6, 5]])) | [
"wallys0213@gmail.com"
] | wallys0213@gmail.com |
bcbf988f50e81324829f054a398cd2482060cfc2 | 640d26baa9322b92ea5d247280668b4ad7475f8d | /robot_assignment_ws/build/turtlebot/turtlebot_see/catkin_generated/pkg.installspace.context.pc.py | 87d357364c348496a3a8053c19362e393f8311c9 | [] | no_license | JulianVJacobs/Robotics-Project-2021 | 6baa5a6423a28cc278b84d831f2d8c9f5239da90 | 18a58cee8e2793bd05e5e158c0c998099fc62d5c | refs/heads/main | 2023-06-03T02:47:15.579120 | 2021-06-25T19:56:32 | 2021-06-25T19:56:32 | 374,733,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot_see"
PROJECT_SPACE_DIR = "/home/julian/robot_assignment_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"1605267@students.wits.ac.za"
] | 1605267@students.wits.ac.za |
6ebe306e963ff722a39f95e97c0e7e0176f232db | 61fc42a1411551a023426705825692b570209f24 | /seleniumbase/translate/japanese.py | 54afb345ddc23d76b3f31a9e51052c5a96d40412 | [
"MIT"
] | permissive | dE1l/SeleniumBase | 9c33be707cd9773f9c4a53439eed74583ef14f43 | 6020f9fb3dd25700636b3837d5be192387d23acd | refs/heads/master | 2022-04-23T23:22:07.196648 | 2020-04-22T07:18:31 | 2020-04-22T07:18:31 | 257,905,752 | 0 | 0 | null | 2020-04-22T13:13:03 | 2020-04-22T13:13:02 | null | UTF-8 | Python | false | false | 8,569 | py | # Japanese / 日本語 - Translations - Python 3 Only!
from seleniumbase import BaseCase
class セレニウムテストケース(BaseCase): # noqa
def URLを開く(self, *args, **kwargs):
# open(url)
return self.open(*args, **kwargs)
def クリックして(self, *args, **kwargs):
# click(selector)
return self.click(*args, **kwargs)
def ダブルクリックして(self, *args, **kwargs):
# double_click(selector)
return self.double_click(*args, **kwargs)
def ゆっくりクリックして(self, *args, **kwargs):
# slow_click(selector)
return self.slow_click(*args, **kwargs)
def リンクテキストをクリックします(self, *args, **kwargs):
# click_link_text(link_text)
return self.click_link_text(*args, **kwargs)
def テキストを更新(self, *args, **kwargs):
# update_text(selector, new_value)
return self.update_text(*args, **kwargs)
def テキストを追加(self, *args, **kwargs):
# add_text(selector, new_value)
return self.add_text(*args, **kwargs)
def テキストを取得(self, *args, **kwargs):
# get_text(selector, new_value)
return self.get_text(*args, **kwargs)
def テキストを確認する(self, *args, **kwargs):
# assert_text(text, selector)
return self.assert_text(*args, **kwargs)
def 正確なテキストを確認する(self, *args, **kwargs):
# assert_exact_text(text, selector)
return self.assert_exact_text(*args, **kwargs)
def 要素を確認する(self, *args, **kwargs):
# assert_element(selector)
return self.assert_element(*args, **kwargs)
def タイトルを確認(self, *args, **kwargs):
# assert_title(title)
return self.assert_title(*args, **kwargs)
def 検証が正しい(self, *args, **kwargs):
# assert_true(expr)
return self.assert_true(*args, **kwargs)
def 検証は偽です(self, *args, **kwargs):
# assert_false(expr)
return self.assert_false(*args, **kwargs)
def 検証が等しい(self, *args, **kwargs):
# assert_equal(first, second)
return self.assert_equal(*args, **kwargs)
def 検証が等しくない(self, *args, **kwargs):
# assert_not_equal(first, second)
return self.assert_not_equal(*args, **kwargs)
def ページを更新する(self, *args, **kwargs):
# refresh_page()
return self.refresh_page(*args, **kwargs)
def 現在のURLを取得(self, *args, **kwargs):
# get_current_url()
return self.get_current_url(*args, **kwargs)
def ページのソースコードを取得する(self, *args, **kwargs):
# get_page_source()
return self.get_page_source(*args, **kwargs)
def 戻る(self, *args, **kwargs):
# go_back()
return self.go_back(*args, **kwargs)
def 進む(self, *args, **kwargs):
# go_forward()
return self.go_forward(*args, **kwargs)
def テキストが表示されています(self, *args, **kwargs):
# is_text_visible(text, selector="html")
return self.is_text_visible(*args, **kwargs)
def 要素は表示されますか(self, *args, **kwargs):
# is_element_visible(selector)
return self.is_element_visible(*args, **kwargs)
def 要素が存在するかどうか(self, *args, **kwargs):
# is_element_present(selector)
return self.is_element_present(*args, **kwargs)
def テキストを待つ(self, *args, **kwargs):
# wait_for_text(text, selector)
return self.wait_for_text(*args, **kwargs)
def 要素を待つ(self, *args, **kwargs):
# wait_for_element(selector)
return self.wait_for_element(*args, **kwargs)
def 眠る(self, *args, **kwargs):
# sleep(seconds)
return self.sleep(*args, **kwargs)
def を提出す(self, *args, **kwargs):
# submit(selector)
return self.submit(*args, **kwargs)
def JSクリックして(self, *args, **kwargs):
# js_click(selector)
return self.js_click(*args, **kwargs)
def HTMLをチェック(self, *args, **kwargs):
# inspect_html()
return self.inspect_html(*args, **kwargs)
def スクリーンショットを保存(self, *args, **kwargs):
# save_screenshot(name)
return self.save_screenshot(*args, **kwargs)
def ファイルを選択(self, *args, **kwargs):
# choose_file(selector, file_path)
return self.choose_file(*args, **kwargs)
def スクリプトを実行する(self, *args, **kwargs):
# execute_script(script)
return self.execute_script(*args, **kwargs)
def ブロック広告(self, *args, **kwargs):
# ad_block()
return self.ad_block(*args, **kwargs)
def スキップ(self, *args, **kwargs):
# skip(reason="")
return self.skip(*args, **kwargs)
def リンク切れを確認する(self, *args, **kwargs):
# assert_no_404_errors()
return self.assert_no_404_errors(*args, **kwargs)
def JSエラーを確認する(self, *args, **kwargs):
# assert_no_js_errors()
return self.assert_no_js_errors(*args, **kwargs)
def フレームに切り替え(self, *args, **kwargs):
# switch_to_frame(frame)
return self.switch_to_frame(*args, **kwargs)
def デフォルトのコンテンツに切り替える(self, *args, **kwargs):
# switch_to_default_content()
return self.switch_to_default_content(*args, **kwargs)
def 新しいウィンドウを開く(self, *args, **kwargs):
# open_new_window()
return self.open_new_window(*args, **kwargs)
def ウィンドウに切り替え(self, *args, **kwargs):
# switch_to_window(window)
return self.switch_to_window(*args, **kwargs)
def デフォルトのウィンドウに切り替える(self, *args, **kwargs):
# switch_to_default_window()
return self.switch_to_default_window(*args, **kwargs)
def ハイライト(self, *args, **kwargs):
# highlight(selector)
return self.highlight(*args, **kwargs)
def ハイライトしてクリックして(self, *args, **kwargs):
# highlight_click(selector)
return self.highlight_click(*args, **kwargs)
def スクロールして(self, *args, **kwargs):
# scroll_to(selector)
return self.scroll_to(*args, **kwargs)
def 一番上までスクロール(self, *args, **kwargs):
# scroll_to_top()
return self.scroll_to_top(*args, **kwargs)
def 一番下までスクロール(self, *args, **kwargs):
# scroll_to_bottom()
return self.scroll_to_bottom(*args, **kwargs)
def 上にマウスを移動しクリック(self, *args, **kwargs):
# hover_and_click(hover_selector, click_selector)
return self.hover_and_click(*args, **kwargs)
def 選択されていることを(self, *args, **kwargs):
# is_selected(selector)
return self.is_selected(*args, **kwargs)
def 上矢印を押します(self, *args, **kwargs):
# press_up_arrow(selector="html", times=1)
return self.press_up_arrow(*args, **kwargs)
def 下矢印を押します(self, *args, **kwargs):
# press_down_arrow(selector="html", times=1)
return self.press_down_arrow(*args, **kwargs)
def 左矢印を押します(self, *args, **kwargs):
# press_left_arrow(selector="html", times=1)
return self.press_left_arrow(*args, **kwargs)
def 右矢印を押します(self, *args, **kwargs):
# press_right_arrow(selector="html", times=1)
return self.press_right_arrow(*args, **kwargs)
def 表示要素をクリックします(self, *args, **kwargs):
# click_visible_elements(selector)
return self.click_visible_elements(*args, **kwargs)
def テキストでオプションを選択(self, *args, **kwargs):
# select_option_by_text(dropdown_selector, option)
return self.select_option_by_text(*args, **kwargs)
def インデックスでオプションを選択(self, *args, **kwargs):
# select_option_by_index(dropdown_selector, option)
return self.select_option_by_index(*args, **kwargs)
def 値でオプションを選択(self, *args, **kwargs):
# select_option_by_value(dropdown_selector, option)
return self.select_option_by_value(*args, **kwargs)
| [
"mdmintz@gmail.com"
] | mdmintz@gmail.com |
4f82d7c8c1484dc4f051d1bbc2e00c5af99f5175 | 7b750c5c9df2fb05e92b16a43767c444404de7ae | /src/leetcode/python3/leetcode831.py | b5df08e76e5758d1e9c9d370b21a13f7000a71bc | [] | no_license | renaissance-codes/leetcode | a68c0203fe4f006fa250122614079adfe6582d78 | de6db120a1e709809d26e3e317c66612e681fb70 | refs/heads/master | 2022-08-18T15:05:19.622014 | 2022-08-05T03:34:01 | 2022-08-05T03:34:01 | 200,180,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
隐藏个人信息
"""
class Solution:
def maskPII(self, S: str) -> str:
if "@" in S:
sarray = S.split("@")
firstname = sarray[0].lower()
lastname = sarray[1].lower()
return firstname[0] + "*****" + firstname[-1] + "@" + lastname
else:
nums = []
for s in S:
if s.isdigit():
nums.append(s)
if len(nums) == 10:
return "***-***-" + "".join(nums[-4:])
else:
return "+" + "*" * (len(nums) - 10) + "-***-***-" + "".join(nums[-4:])
| [
"jack.li@eisoo.com"
] | jack.li@eisoo.com |
a28b5082bb8fc7c6b6d6ed95d0d894e1c957d6dd | 7f9dfa2cccf77764940ffcbbf92939e37c138c43 | /crawl_file/file_path/pylab_examples/colours.py | e405f6214b0577b7746527d95670fbfdc7c89f73 | [] | no_license | zhangmman/scrapy_spider | f80bd8d213edde0dea083babe610ca7b1bc449a3 | 2bda4aa29f2550c649c939045ce4fcdea2736187 | refs/heads/master | 2020-09-11T13:58:49.930929 | 2019-12-21T08:40:56 | 2019-12-21T08:43:43 | 222,080,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,356 | py | # -*- noplot -*-
"""
Some simple functions to generate colours.
"""
import numpy as np
from matplotlib import colors as mcolors
def pastel(colour, weight=2.4):
""" Convert colour into a nice pastel shade"""
rgb = np.asarray(mcolors.to_rgba(colour)[:3])
# scale colour
maxc = max(rgb)
if maxc < 1.0 and maxc > 0:
# scale colour
scale = 1.0 / maxc
rgb = rgb * scale
# now decrease saturation
total = rgb.sum()
slack = 0
for x in rgb:
slack += 1.0 - x
# want to increase weight from total to weight
# pick x s.t. slack * x == weight - total
# x = (weight - total) / slack
x = (weight - total) / slack
rgb = [c + (x * (1.0 - c)) for c in rgb]
return rgb
def get_colours(n):
""" Return n pastel colours. """
base = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if n <= 3:
return base[0:n]
# how many new colours to we need to insert between
# red and green and between green and blue?
needed = (((n - 3) + 1) / 2, (n - 3) / 2)
colours = []
for start in (0, 1):
for x in np.linspace(0, 1, needed[start] + 2):
colours.append((base[start] * (1.0 - x)) +
(base[start + 1] * x))
return [pastel(c) for c in colours[0:n]]
| [
"zhangman@ncepu.cn"
] | zhangman@ncepu.cn |
431403eaa0ceb56b26d30a9e7c72a61329582028 | 8ce0fd5e5c5b858fa24e388f2114885160421c03 | /python/socket/socket_communication_server.py | 0a170df96928af5a20e86ab71ac48d87ae3483c5 | [] | no_license | kong-ling/scripts | 266e9975ae0156d6fdddf43b8f1d7ee20469b388 | 3c41c49646358d46871c8fd8ebe1ba52bdea046c | refs/heads/master | 2021-01-10T08:29:34.772634 | 2020-01-03T09:04:57 | 2020-01-03T09:04:57 | 43,275,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | # this is the program for socket communication
# it can be used both for server and client
# python socket_communication.py -server for server
# python socket_communication.py -client for client
# sys.argv[0] : script name
# sys.argv[1] : -server or -client
# sys.argv[2] : string to send to server
import socket
import time
import sys
import os
import datetime
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
#ec_sise = os.environ['EC_SITE']
ec_sise = ''
#host = host + '.' + ec_sise + '.intel.com'
port = 1234
print('host name is: %s' % host)
# for server point
if '-server' in sys.argv: #server
print('server program running')
sock.bind((host, port))
sock.listen(10)
#connection.settimeout(5)
while True:
try:
print('\n\n')
print(datetime.datetime.now())
print('Wait for new request ...')
print('\n\n')
connection, address = sock.accept()
received = connection.recv(1024)
#send back to counerpart
print('Got connection from %s: %s' % (address, received))
connection.send(received)
#convert the command to sting for host to run
cmd_seq = received.split(' ')
cmd = ''
for seq in cmd_seq:
if '@' not in seq and 'from' not in seq:
cmd = cmd + ' ' + seq
print('received=[%s]' % received)
cmd_strip = cmd.strip()
print('cmd=[%s]' % cmd.strip)
os.system(cmd_strip)
except socket.timeout:
print('time out')
# for client point
if '-client' in sys.argv: #client
#print 'client program running'
sock.connect((host, port))
loop = 1
#send sys.argv[1], and wait for response from server
#if the recevied from the server is the same as sys.argv[1], exit the program
#otherwise, retransmit the string
while True:
#print 'Send \'%s\'' % (sys.argv[2])
#print 'User Name: %s' % os.environ.get('USER')
#request_to_send = sys.argv[2] + ' from ' + os.environ.get('USER') + '@' + host
request_to_send = sys.argv[2]
sock.send(request_to_send) #send to server
response_received = sock.recv(1024) #receive the response from server
if (response_received == request_to_send):
print('Your request is: [%s]' % response_received)
#time.sleep(1);
break
loop += 1
#both server point and client point need this operation
sock.close
| [
"kong.ling@outlook.com"
] | kong.ling@outlook.com |
a8c5736efe67d43d9082343c8675d35c51e02d45 | eaf4027bfff9c063af70ac8393ccf2a960ea4485 | /app/views.py | 31b8262e10f072baefaac074d14e8c10df814e1a | [] | no_license | williamjohngardner/movie_api | de062040fef7cdf8796040aa992600f18a88d0cd | 1b452b3da9cb19f97864f34f5c590f6f11798192 | refs/heads/master | 2020-12-19T04:42:10.354866 | 2016-06-28T19:25:55 | 2016-06-28T19:25:55 | 62,158,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | from django.shortcuts import render
from app.models import Movie, Rater, Rating
from django.views.generic import View
from rest_framework import generics
from app.serializers import MovieSerializer, RaterSerializer, RatingSerializer
class MovieListAPIView(generics.ListCreateAPIView):
queryset = Movie.objects.all()
serializer_class = MovieSerializer
class MovieDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Movie.objects.all()
serializer_class = MovieSerializer
class RaterListAPIView(generics.ListCreateAPIView):
queryset = Rater.objects.all()
serializer_class = RaterSerializer
class RaterDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Rater.objects.all()
serializer_class = RaterSerializer
class RatingListAPIView(generics.ListCreateAPIView):
queryset = Rating.objects.all()
serializer_class = RatingSerializer
class RatingDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Rating.objects.all()
serializer_class = RatingSerializer
| [
"bill@williamjohngardner.com"
] | bill@williamjohngardner.com |
c4a68abfea2dc3f7dff0da1407aeeddae4027912 | f83f5cef508f0356e33e9cca83dce176249090e2 | /apps/calculate_percentage.py | 5437777e461de4e23769c716d917dbf5f6280bfa | [] | no_license | AnupJoseph/topic_centrality | 1bcb75b4eaee4536c28dc0fec9927421412f6463 | 21811e8a500f74b4032b0fea5d664320b6f335e8 | refs/heads/master | 2023-01-19T11:26:59.218020 | 2020-11-28T09:13:50 | 2020-11-28T09:13:50 | 280,427,036 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,023 | py | import pandas as pd
from collections import Counter
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
def calculate_percentage():
politicians = ['SenSanders', 'realDonaldTrump', 'JoeBiden', 'andrewcuomo', 'TeamPelosi',
'NikkiHaley', 'MittRomney', 'Mike_Pence', 'SenatorCollins', 'PeteButtigieg']
COLS = ['id', 'created_at', 'original_text', 'clean_text',
'retweet_count', 'hashtags', 'mentions', 'original_author']
data = pd.DataFrame(columns=COLS)
for politician in politicians:
df = pd.read_csv(f"data/{politician}/{politician}_data_temp.csv")
# df.drop(labels=['Unnamed: 0','Unnamed: 0.1'],inplace=True)
df.drop('Unnamed: 0',inplace=True,axis=1)
df.drop('Unnamed: 0.1',inplace=True,axis=1)
data = pd.concat([data,df])
percentage_data = Counter(data['lda_cluster'])
total = sum(percentage_data.values())
return[(item/total)*100 for item in percentage_data.values()]
| [
"anup20joseph@gmail.com"
] | anup20joseph@gmail.com |
98bbe7cbbd152d0babe84eee89c53b25ed74ea83 | 78f50f3777963500aa78e0d98314a54c46ceed07 | /cupy/random/__init__.py | 08fbaac10a4b123afc442d5a291a276548fb9b48 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | KillEdision/chainer | 78d360f713936de9ed9200b0bc08dc27435f8c27 | 3f70edd67db4d9b687bd4b5f9cc21e426ad58beb | refs/heads/master | 2020-12-24T18:22:51.794784 | 2015-09-12T12:35:05 | 2015-09-12T12:35:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | import numpy
from cupy.random import distributions
from cupy.random import generator
from cupy.random import sample
rand = sample.rand
randn = sample.randn
random_sample = sample.random_sample
random = random_sample
ranf = random_sample
sample = random_sample
bytes = numpy.random.bytes
lognormal = distributions.lognormal
normal = distributions.normal
standard_normal = distributions.standard_normal
uniform = distributions.uniform
RandomState = generator.RandomState
get_random_state = generator.get_random_state
seed = generator.seed
reset_states = generator.reset_states
| [
"beam.web@gmail.com"
] | beam.web@gmail.com |
64d00c72230a28b3a56922bc825e9b10434e3e27 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_155/3071.py | f0ee9ea5da1cad7eb9ae4cc51b642c17264b5911 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | file=open("E:/q1.in")
ansfile=open("E:/ans01.txt","w")
lines=file.readlines();
noitems=int(lines[0])
l=[]
c=0
ans=0
for i in range (1,noitems+1):
c=0
ans=0
l=list(lines[i].strip("\n").split(" ")[1])
for j in range(0,len(l)):
if c<j:
ans+=j-c
c=j
c+=int(l[j])
ansfile.write("case #"+str(i)+": "+str(ans)+"\n");
file.close()
ansfile.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
5d9b4ae82cb669a73d1b4b521ff3accb5d759d9e | 5b9a7423f4f52b905be652cb8bbd10072bf9ffcd | /brute_http_form.py | cdb6b030d7886562eb7bd8b7e5c4af268ea95e3f | [] | no_license | obnosis/scripts | f411722253215c73bfc467dfa107912980d52e45 | df9db475ab5920823e4a11faf4a880de7e633cc6 | refs/heads/master | 2021-01-16T18:14:29.109986 | 2013-04-05T15:55:32 | 2013-04-05T15:55:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | import requests
URL = ''
USER_VAR = 'user_id'
PASS_VAR = 'user_pw'
def get_users():
return ['admin']
def get_pwds():
return ['Test1', 'Test2', 'Camera1']
for user in get_users():
for pwd in get_pwds():
auth = {USER_VAR: user, PASS_VAR: pwd}
resp = requests.post(URL, data=auth)
print resp.text
| [
"stephen@averagesecurityguy.info"
] | stephen@averagesecurityguy.info |
f0ca5cacf63bb531c1973663118d4212e89662e0 | 21b0b4c27193898207751c91b8b2ed168a1b1638 | /py/py_0418_factorisation_triples.py | bf71bc166aae2762f714f628cc860fd4ee8c29c9 | [
"MIT"
] | permissive | lcsm29/project-euler | 67560a4e66968f1671a3d7ecf2dda6c956893dca | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | refs/heads/main | 2023-07-04T11:45:24.374841 | 2021-08-07T08:20:41 | 2021-08-07T08:20:41 | 371,808,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # Solution of;
# Project Euler Problem 418: Factorisation triples
# https://projecteuler.net/problem=418
#
# Let n be a positive integer. An integer triple (a, b, c) is called a
# factorisation triple of n if: 1 ≤ a ≤ b ≤ c a·b·c = n. Define f(n) to be a +
# b + c for the factorisation triple (a, b, c) of n which minimises c / a. One
# can show that this triple is unique. For example, f(165) = 19, f(100100) =
# 142 and f(20!) = 4034872. Find f(43!).
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 418
timed.caller(dummy, n, i, prob_id)
| [
"lcsm29@outlook.com"
] | lcsm29@outlook.com |
d8a5ceb674eade3bb97a5b46c8118190fc310a72 | ce74ed4ad6834168b81d6ec5e53c80935f247fe1 | /scripts/evaluate.simple.py | 131baf5404548192c5e940fb3814493809dd7554 | [] | no_license | chenghuige/melt | 6b6984243c71a85ec343cfaa67a66e3d1b48c180 | d2646ffe84eabab464b4bef6b31d218abdbf6ce5 | refs/heads/master | 2021-01-25T16:46:57.567890 | 2017-08-26T04:30:13 | 2017-08-26T04:30:13 | 101,304,210 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,796 | py | #!/usr/bin/env python
#coding=gbk
# ==============================================================================
# \file evaluate.py
# \author chenghuige
# \date 2014-01-04 08:58:40.965360
# \Description similary to TLC show : confusion matrix , pic of auc
# input is one file : instance,true,probability,assigned,..
# for libsvm test, need to file as input feature(.libsvm) and result(.predict) ->svm-evluate.py or svm-gen-evaluate.py first
# for tlc the header format is: instance,true, assigned,output, probability
# TODO understancd other output of tlc and add more
# ==============================================================================
import sys,os,glob
from gflags import *
#hack for some machine sklearn/externals/joblib/parallel.py:41: UserWarning: This platform lacks a functioning sem_open implementation, therefore, the required synchronization primitives needed will not function, see issue 3770.. joblib will operate in serial mode
import warnings
warnings.filterwarnings("ignore")
#hack for cxfreeze
import sklearn.utils.sparsetools._graph_validation
from scipy.sparse.csgraph import _validation
from sklearn.utils import lgamma
#import pylab as pl
#import matplotlib.pyplot as pl
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import precision_recall_curve
DEFINE_boolean('show', False, 'wehter to show the roc pic')
DEFINE_float('thre', 0.5, 'thre for desciding predict')
DEFINE_string('image', 'temp.roc.pr.png', 'output image')
DEFINE_integer('max_num', 20, 'most to deal')
DEFINE_string('regex', '', 'use regex to find files to deal')
DEFINE_string('column', 'probability', 'score index name')
#confusion matrix, auc, roc curve
def evaluate(label_list, predicts, predict_list, file_name):
#---------------------------------confusion table
tp = 0
fp = 0
tn = 0
fn = 0
for i in range(len(label_list)):
if (predict_list[i] == 1):
if (label_list[i] == 1):
tp += 1
else:
fp += 1
else:
if (label_list[i] == 1):
fn += 1
else:
tn += 1
num_pos = tp + fn
num_neg = fp + tn
total_instance = num_pos + num_neg
pratio = num_pos * 1.0 / total_instance
#true positive rate
tpr = tp * 1.0 / num_pos
tnr = tn * 1.0 / num_neg
#num of predicted positive
num_pp = tp + fp
num_pn = fn + tn
#tur postive accuracy
tpa = 1
tna = 1
if num_pp != 0:
tpa = tp * 1.0 / num_pp
if num_pn != 0:
tna = tn * 1.0 / num_pn
ok_num = tp + tn
accuracy = ok_num * 1.0 / total_instance
print """
TEST POSITIVE RATIO: %.4f (%d/(%d+%d))
Confusion table:
||===============================|
|| PREDICTED |
TRUTH || positive | negative | RECALL
||===============================|
positive|| %-5d | %-5d | [%.4f] (%d / %d)
negative|| %-5d | %-5d | %.4f (%d / %d) wushang:[%.4f]
||===============================|
PRECISION [%.4f] (%d/%d) %.4f(%d/%d)
OVERALL 0/1 ACCURACY: %.4f (%d/%d)
"""%(pratio, num_pos, num_pos, num_neg, tp, fn, tpr, tp, num_pos, fp, tn, tnr, tn, num_neg, 1 - tnr, tpa, tp, num_pp, tna, tn, num_pn, accuracy, ok_num, total_instance)
#----------------------------------------------------- auc area
#from sklearn.metrics import roc_auc_score
#auc = roc_auc_score(label_list, predicts)
fpr_, tpr_, thresholds = roc_curve(label_list, predicts)
roc_auc = auc(fpr_, tpr_)
print """
ACCURACY: %.4f
POS. PRECISION: %.4f
POS. RECALL: %.4f
NEG. PRECISION: %.4f
NEG. RECALL: %.4f
AUC: [%.4f]
"""%(accuracy, tpa, tpr, tna, tnr, roc_auc)
#------------------------------------------------------roc curve
#pl.clf()
#pl.plot(fpr_, tpr_, label='%s: (area = %0.4f)' % (file_name, roc_auc))
#pl.plot([0, 1], [0, 1], 'k--')
#pl.xlim([0.0, 1.0])
#pl.ylim([0.0, 1.0])
#pl.xlabel('False Positive Rate')
#pl.ylabel('True Positive Rate')
#pl.title('Roc Curve:')
#pl.legend(loc="upper right")
def parse_input(input):
lines = open(input).readlines()
header = lines[0]
lines = lines[1:]
label_idx = 1
output_idx = 3
probability_idx = 4
names = header.split()
for i in range(len(names)):
if (names[i].lower() == 'label' or names[i].lower() == 'true'):
label_idx = i
if (names[i].lower() == 'output'):
output_idx = i
if (names[i].lower() == FLAGS.column.lower()):
probability_idx = i
try:
line_list = [line.strip().split() for line in lines]
label_list = [int(float((l[label_idx]))) for l in line_list]
predicts = [float(l[probability_idx]) for l in line_list]
#predicts = [float(l[output_idx]) for l in line_list]
predict_list = [int(item >= FLAGS.thre) for item in predicts]
return label_list, predicts, predict_list
except Exception:
print "label_idx: " + str(label_idx) + " prob_idx: " + str(probability_idx)
exit(1)
def precision_recall(label_list, predicts, file_name):
# Compute Precision-Recall and plot curve
precision, recall, thresholds = precision_recall_curve(label_list, predicts)
area = auc(recall, precision)
#print("Area Under Curve: %0.2f" % area)
#pl.clf()
#pl.plot(recall, precision, label='%s (area = %0.4f)'%(file_name, area))
#pl.xlabel('Recall')
#pl.ylabel('Precision')
#pl.ylim([0.0, 1.05])
#pl.xlim([0.0, 1.0])
#pl.title('Precision-Recall curve')
#pl.legend(loc="lower left")
def main(argv):
try:
argv = FLAGS(argv) # parse flags
except gflags.FlagsError, e:
print '%s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
pos = len(argv) - 1
try:
FLAGS.thre = float(argv[-1])
pos -= 1
except Exception:
pass
#---------------------------------thre
print "Thre: %.4f"%FLAGS.thre
#---------------------------------deal input
l = []
if (FLAGS.regex != ""):
print "regex: " + FLAGS.regex
l = glob.glob(FLAGS.regex)
print l
else:
input = argv[1]
l = input.split()
if (len(l) > 1):
FLAGS.show = True
if (len(l) > FLAGS.max_num):
l = l[:FLAGS.max_num]
#deal with more than 1 input
#f = pl.figure("Model Evaluation",figsize=(32,12), dpi = 100)
#f.add_subplot(1, 2, 1)
for input in l:
print "--------------- " + input
label_list, predicts, predict_list = parse_input(input)
evaluate(label_list, predicts, predict_list, input)
#f.add_subplot(1, 2, 0)
for input in l:
label_list, predicts, predict_list = parse_input(input)
precision_recall(label_list, predicts, input)
else:
input2 = ""
if (pos > 1):
input2 = argv[2]
#FLAGS.show = True
print "--------------- " + input
label_list, predicts, predict_list = parse_input(input)
#f = pl.figure(figsize=(32,12))
#f.add_subplot(1, 2, 1)
evaluate(label_list, predicts, predict_list, input)
print "--------------- " + input2
label_list2 = []
predicts2 = []
predict_list2 = []
if (input2 != ""):
label_list2, predicts2, predict_list2 = parse_input(input2)
evaluate(label_list2, predicts2, predict_list2, input2)
#f.add_subplot(1, 2, 0)
precision_recall(label_list, predicts, input)
if (input2 != ""):
precision_recall(label_list2, predicts2, input2)
#pl.savefig(FLAGS.image)
#if (FLAGS.show):
# pl.show()
if __name__ == "__main__":
main(sys.argv)
| [
"chenghuige@fa64baa9-71d1-4fed-97ae-c15534abce97"
] | chenghuige@fa64baa9-71d1-4fed-97ae-c15534abce97 |
b1f21c6a67c048f668ee6bd826fcfd903544ad41 | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /third_party/blink/web_tests/external/wpt/mathml/tools/xHeight.py | 724352bf91679ea9a7e4d862ad43b5707f9cfb7b | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
] | permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 260 | py | #!/usr/bin/python
from utils import mathfont
import fontforge
v = mathfont.em / 2
f = mathfont.create("xheight%d" % v)
g = f.createChar(ord('x'))
mathfont.drawRectangleGlyph(g, mathfont.em, v, 0)
assert f.xHeight == v, "Bad x-height value!"
mathfont.save(f)
| [
"sunny.nam@samsung.com"
] | sunny.nam@samsung.com |
76e8517821c4f615d7904b26dc91fa843601111f | e16fbfdf1e3074d59b70902073c3024bafa77235 | /spider/dongguan/dongguan/pipelines.py | 28c993d4e8d12f03a0724243a46208c56c7fa9da | [] | no_license | haha479/Scrapy | aa52a0999ef3b2c8570696a85cc6dfd95ebd1d03 | d6c2c12b94e2ecf4846d20bfe8349e3bd09a3beb | refs/heads/master | 2021-09-04T14:29:38.555015 | 2018-01-19T14:42:07 | 2018-01-19T14:42:07 | 117,560,606 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import codecs
import json
class DongguanPipeline(object):
def __init__(self):
self.filename = codecs.open("donggguan.json", "w", encoding="utf-8")
def process_item(self, item, spider):
text = json.dumps(dict(item),ensure_ascii=False) + "\n"
self.filename.write(text)
return item
def close_spider(self,spider):
self.filename.close() | [
"283275935@qq.com"
] | 283275935@qq.com |
424f0c0c5072d11b2dae9aff8f3df1b4504f7a0b | 6163c502cba634a922c448219afb6021e88f2747 | /Data Visualization/Seaborn/faceting with Seaborn.py | d1d558c9b2b05a87ac10d7ae795d3d571f1d9c16 | [] | no_license | Okroshiashvili/Data-Science-Lab | 9cddf8ff7dae41dabe321fa8804a9b041c24596b | 796d4f5c0ec8c90d373bde2bfbc0cf244f62e69b | refs/heads/master | 2023-04-05T05:36:48.088628 | 2023-03-22T18:52:04 | 2023-03-22T18:52:04 | 130,345,437 | 12 | 2 | null | 2022-11-21T23:41:31 | 2018-04-20T10:08:23 | Python | UTF-8 | Python | false | false | 2,018 | py |
"""
Data Link:
https://www.kaggle.com/thec03u5/fifa-18-demo-player-dataset
Faceting is the act of breaking data variables up across multiple subplots,
and combining those subplots into a single figure.
"""
import pandas as pd
import numpy as np
import re
import seaborn as sns
pd.set_option('max_columns',None)
# Read data
footballers = pd.read_csv('data/CompleteDataset.csv', index_col=0)
### Some data pre-processing steps.
# Make a copy
df = footballers.copy(deep=True)
df['Unit'] = footballers['Value'].str[-1]
df['Value (M)'] = np.where(df['Unit'] == '0',0,
df['Value'].str[1:-1].replace(r'[a-zA-Z]',''))
df['Value (M)'] = df['Value (M)'].astype(float)
df['Value (M)'] = np.where(df['Unit'] == 'M',
df['Value (M)'],
df['Value (M)']/1000)
df = df.assign(Value=df['Value (M)'],
position=df['Preferred Positions'].str.split().str[0])
### The FacetGrid ###
# We're interested in comparing strikers with goalkeepers in some way.
data = df[df['position'].isin(['ST','GK'])]
g = sns.FacetGrid(data, col='position')
# We can use map object to plot the data into laid-out grid
g.map(sns.kdeplot, "Overall")
# FacetGrid for all positions
g = sns.FacetGrid(df, col='position', col_wrap=6)
g.map(sns.kdeplot, 'Overall')
# Suppose we're interested in comparing the talent distribution across rival clubs
data = df[df['position'].isin(['ST', 'GK'])]
data = data[data['Club'].isin(['Real Madrid CF', 'FC Barcelona','Atlético Madrid'])]
g = sns.FacetGrid(df, row='position', col='Club')
g.map(sns.violinplot, 'Overall')
# We can order subplots
g = sns.FacetGrid(df, row='position', col='Club',
row_order=['GK', 'ST'],
col_order=['Atlético Madrid', 'FC Barcelona', 'Real Madrid CF'])
g.map(sns.violinplot, 'Overall')
### Pairplot ###
sns.pairplot(df[['Overall','Potential','Value']])
| [
"n.okroshiashvili@gmail.com"
] | n.okroshiashvili@gmail.com |
dd074b8b470a687ed2c08f6fea742702f346f792 | b610b21ad9645bf099ad9ab0c024ccd212c36b53 | /AdvancedBlogAPI/Todo/models.py | 640f45a4439954103cdd19680e2772ee35ff14dd | [] | no_license | co-codin/AdvancedBlogAPI | 7c98aa36a2c9da3df6b23a2fbf6803d5f4ca3f43 | f478eb272ed067f175dbcfc20efdc10601d5c8bc | refs/heads/master | 2022-02-03T04:35:48.212984 | 2018-08-09T14:23:30 | 2018-08-09T14:23:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | from django.db import models
# Create your models here.
class Todo(models.Model):
text = models.CharField(max_length=120)
complete = models.BooleanField(default=False)
def __str__(self):
return self.text
| [
"cuiyeqing960904@gmail.com"
] | cuiyeqing960904@gmail.com |
80e47ba9061751983d8ba2a0898defab0b7c9e12 | 94c8dd4126da6e9fe9acb2d1769e1c24abe195d3 | /test/python/visualization/timeline/test_events.py | b799fc7a36f5508ab72eec1b8df9e9c969e5d531 | [
"Apache-2.0"
] | permissive | levbishop/qiskit-terra | a75c2f96586768c12b51a117f9ccb7398b52843d | 98130dd6158d1f1474e44dd5aeacbc619174ad63 | refs/heads/master | 2023-07-19T19:00:53.483204 | 2021-04-20T16:30:16 | 2021-04-20T16:30:16 | 181,052,828 | 1 | 0 | Apache-2.0 | 2019-06-05T15:32:13 | 2019-04-12T17:20:54 | Python | UTF-8 | Python | false | false | 6,989 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for event of timeline drawer."""
import qiskit
from qiskit import QuantumCircuit, transpile
from qiskit.circuit import library
from qiskit.test import QiskitTestCase
from qiskit.visualization.timeline import events, types
class TestLoadScheduledCircuit(QiskitTestCase):
"""Test for loading program."""
def setUp(self) -> None:
"""Setup."""
super().setUp()
circ = QuantumCircuit(3)
circ.delay(100, 2)
circ.barrier(0, 1, 2)
circ.h(0)
circ.cx(0, 1)
self.circ = transpile(circ,
scheduling_method='alap',
basis_gates=['h', 'cx'],
instruction_durations=[('h', 0, 200), ('cx', [0, 1], 1000)],
optimization_level=0)
def test_create_from_program(self):
"""Test factory method."""
bit_event_q0 = events.BitEvents.load_program(self.circ, self.circ.qregs[0][0])
bit_event_q1 = events.BitEvents.load_program(self.circ, self.circ.qregs[0][1])
bit_event_q2 = events.BitEvents.load_program(self.circ, self.circ.qregs[0][2])
gates_q0 = list(bit_event_q0.get_gates())
links_q0 = list(bit_event_q0.get_gate_links())
barriers_q0 = list(bit_event_q0.get_barriers())
self.assertEqual(len(gates_q0), 3)
self.assertEqual(len(links_q0), 1)
self.assertEqual(len(barriers_q0), 1)
# h gate
self.assertEqual(gates_q0[1].t0, 100)
# cx gate
self.assertEqual(gates_q0[2].t0, 300)
# link
self.assertEqual(links_q0[0].t0, 800)
# barrier
self.assertEqual(barriers_q0[0].t0, 100)
gates_q1 = list(bit_event_q1.get_gates())
links_q1 = list(bit_event_q1.get_gate_links())
barriers_q1 = list(bit_event_q1.get_barriers())
self.assertEqual(len(gates_q1), 3)
self.assertEqual(len(links_q1), 0)
self.assertEqual(len(barriers_q1), 1)
# cx gate
self.assertEqual(gates_q0[2].t0, 300)
# barrier
self.assertEqual(barriers_q1[0].t0, 100)
gates_q2 = list(bit_event_q2.get_gates())
links_q2 = list(bit_event_q2.get_gate_links())
barriers_q2 = list(bit_event_q2.get_barriers())
self.assertEqual(len(gates_q2), 2)
self.assertEqual(len(links_q2), 0)
self.assertEqual(len(barriers_q2), 1)
# barrier
self.assertEqual(barriers_q2[0].t0, 100)
class TestBitEvents(QiskitTestCase):
"""Tests for bit events."""
def setUp(self) -> None:
"""Setup."""
super().setUp()
self.qubits = list(qiskit.QuantumRegister(2))
self.clbits = list(qiskit.ClassicalRegister(2))
self.instructions = [
types.ScheduledGate(t0=0, operand=library.U1Gate(0),
duration=0, bits=[self.qubits[0]], bit_position=0),
types.ScheduledGate(t0=0, operand=library.U2Gate(0, 0),
duration=10, bits=[self.qubits[0]], bit_position=0),
types.ScheduledGate(t0=10, operand=library.CXGate(),
duration=50, bits=[self.qubits[0], self.qubits[1]],
bit_position=0),
types.ScheduledGate(t0=100, operand=library.U3Gate(0, 0, 0),
duration=20, bits=[self.qubits[0]], bit_position=0),
types.ScheduledGate(t0=120, operand=library.Barrier(2),
duration=0, bits=[self.qubits[0], self.qubits[1]],
bit_position=0),
types.ScheduledGate(t0=120, operand=library.CXGate(),
duration=50, bits=[self.qubits[1], self.qubits[0]],
bit_position=1),
types.ScheduledGate(t0=200, operand=library.Barrier(1),
duration=0, bits=[self.qubits[0]], bit_position=0),
types.ScheduledGate(t0=200, operand=library.Measure(),
duration=100, bits=[self.qubits[0], self.clbits[0]],
bit_position=0),
]
def test_gate_output(self):
"""Test gate output."""
bit_event = events.BitEvents(self.qubits[0], self.instructions, 300)
gates = list(bit_event.get_gates())
ref_list = [
types.ScheduledGate(t0=0, operand=library.U1Gate(0),
duration=0, bits=[self.qubits[0]], bit_position=0),
types.ScheduledGate(t0=0, operand=library.U2Gate(0, 0),
duration=10, bits=[self.qubits[0]], bit_position=0),
types.ScheduledGate(t0=10, operand=library.CXGate(),
duration=50, bits=[self.qubits[0], self.qubits[1]],
bit_position=0),
types.ScheduledGate(t0=100, operand=library.U3Gate(0, 0, 0),
duration=20, bits=[self.qubits[0]], bit_position=0),
types.ScheduledGate(t0=120, operand=library.CXGate(),
duration=50, bits=[self.qubits[1], self.qubits[0]],
bit_position=1),
types.ScheduledGate(t0=200, operand=library.Measure(),
duration=100, bits=[self.qubits[0], self.clbits[0]],
bit_position=0)
]
self.assertListEqual(gates, ref_list)
def test_barrier_output(self):
"""Test barrier output."""
bit_event = events.BitEvents(self.qubits[0], self.instructions, 200)
barriers = list(bit_event.get_barriers())
ref_list = [
types.Barrier(t0=120, bits=[self.qubits[0], self.qubits[1]], bit_position=0),
types.Barrier(t0=200, bits=[self.qubits[0]], bit_position=0)
]
self.assertListEqual(barriers, ref_list)
def test_bit_link_output(self):
"""Test link output."""
bit_event = events.BitEvents(self.qubits[0], self.instructions, 250)
links = list(bit_event.get_gate_links())
ref_list = [
types.GateLink(t0=35.0, opname=library.CXGate().name,
bits=[self.qubits[0], self.qubits[1]]),
types.GateLink(t0=250.0, opname=library.Measure().name,
bits=[self.qubits[0], self.clbits[0]])
]
self.assertListEqual(links, ref_list)
| [
"noreply@github.com"
] | levbishop.noreply@github.com |
4ae64eb893a2bc7e08bc9513e6d66cb09bb75910 | d9c95cd0efad0788bf17672f6a4ec3b29cfd2e86 | /disturbance/migrations/0028_auto_20200505_1219.py | 64dd3427825f2d5b508840eb281397175b84262b | [
"Apache-2.0"
] | permissive | Djandwich/disturbance | cb1d25701b23414cd91e3ac5b0207618cd03a7e5 | b1ba1404b9ca7c941891ea42c00b9ff9bcc41237 | refs/heads/master | 2023-05-05T19:52:36.124923 | 2021-06-03T06:37:53 | 2021-06-03T06:37:53 | 259,816,629 | 1 | 1 | NOASSERTION | 2021-06-03T09:46:46 | 2020-04-29T03:39:33 | Python | UTF-8 | Python | false | false | 1,225 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-05-05 04:19
from __future__ import unicode_literals
import disturbance.components.compliances.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('disturbance', '0027_auto_20200505_1156'),
]
operations = [
migrations.CreateModel(
name='OnSiteInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('period_from', models.DateField(blank=True, null=True)),
('period_to', models.DateField(blank=True, null=True)),
('comments', models.TextField(blank=True)),
('apiary_site', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='disturbance.ApiarySite')),
],
),
migrations.AlterField(
model_name='compliancedocument',
name='_file',
field=models.FileField(max_length=500, upload_to=disturbance.components.compliances.models.update_proposal_complaince_filename),
),
]
| [
"katsufumi.shibata@dbca.wa.gov.au"
] | katsufumi.shibata@dbca.wa.gov.au |
8583f5bb6cababfed20301df5b0a8835b294325b | c7330806e61bb03e69e859b2ed33ae42fc7916e6 | /Discovery/Content/CollectionBook.py | 74ca10f7499a72757dade2cde554ef893670d284 | [] | no_license | ixfalia/ZeroProjects | ec2f91000a5ce014f7413f32873b10fb01a3ed20 | 17bd2c0f9c3a5ef3705b008f6b128d589aef4168 | refs/heads/master | 2020-06-03T05:20:44.370134 | 2019-06-24T05:01:18 | 2019-06-24T05:01:18 | 191,456,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,520 | py | import Zero
import Events
import Property
import VectorMath
class Marvel:
def __init__(self, name, text, discovered = False, color = None):
self.Name = name
self.Text = text
self.Discovered = discovered
#endclass
class Collectable:
def __init__(self, name, amount, text = None):
self.Name = name
self.Amount = amount
self.Text = text
class CollectionBook:
def DefineProperties(self):
#self.Lives = Property.Int(9)
self.CollectionResource = Property.ResourceTable()
pass
def Initialize(self, initializer):
self.CollectionTable = {}
self.MarvelTable = {}
self.Marvels = {}
Zero.Connect(self.Owner, "CollectionEvent", self.onCollection)
Zero.Connect(self.Owner, "MarvelEvent", self.onMarvel)
pass
def addMarvel(self, name, text, discovered = False):
if not discovered:
discovered = False
self.MarvelTable[name] = Marvel(name, text, discovered)
m = self.getMarvel(name)
print(m.Name, m.Discovered, m.Text)
print(self.MarvelTable)
def discoverMarvel(self, name, discoverState = True):
last = self.MarvelTable[name].Discovered
self.Marvels[name] = discoverState
self.MarvelTable[name].Discovered = discoverState
if not last and discoverState:
self.makeMarvelMessage("marvel")
else:
self.makeMarvelMessage("gotit")
def makeMarvelMessage(self, name):
children = self.Owner.PlayerTracker.Camera.Children
for child in children:
if child.Name == name:
child.Celebration.onActivation(None)
def getMarvel(self, name):
if name in self.MarvelTable:
return self.MarvelTable[name]
else:
return None
def getMarvelCount(self):
count = 0
for m in self.MarvelTable:
if m.Discovered:
count += 1
return count
def getTotalMarvels(self):
return len(self.MarvelTable)
def addCollection(self, name, amount = 1, description = None):
print(amount)
raise
if not name in self.CollectionTable:
self.CollectionTable[name] = 0
self.CollectionTable[name] += amount #to just add entry put amount = 0, to subtract just put amount = -#
def getCollection(self, name):
if name in self.CollectionTable:
return self.CollectionTable[name]
else:
return None
def onCollection(self, CollectionEvent):
name = CollectionEvent.Name
if CollectionEvent.Amount:
amount = CollectionEvent.Amount
else:
amount = 1
if self.CollectionEvent.TextBlock:
description = CollectionEvent.TextBlock
self.addCollection(name, CollectionEvent.Amount, CollectionEvent.TextBlock)
def onMarvel(self, MarvelEvent):
name = MarvelEvent.Name
discovered = MarvelEvent.Discovered
print("onMarvel:", name in self.MarvelTable, self.MarvelTable)
if not name in self.MarvelTable:
self.addMarvel(name, MarvelEvent.Text)
print("onMarvel after Add:", self.MarvelTable)
else:
self.discoverMarvel(name, discovered)
Zero.RegisterComponent("CollectionBook", CollectionBook) | [
"systematisoperandi@gmail.com"
] | systematisoperandi@gmail.com |
326d0c7de68e073287f3a396f8a137a169102766 | 139af68b78734a6bc53bd942ffa05476baf3d71d | /PYTHON OOP/Previous Exams/Python OOP Exam - 10 Apr 2021/exam_skeleton/project/aquarium/aquarium/freshwater_aquarium.py | d117fdaea034b8266d55f12414f36f536bf8cbd0 | [] | no_license | MiroVatov/Python-SoftUni | 7fe3fc0a3928848c5317fb120f789c773bfc117e | 0d0d6f116281b4de8c413d254386e27d992d047b | refs/heads/main | 2023-08-24T09:44:31.261137 | 2021-10-18T14:04:03 | 2021-10-18T14:04:03 | 317,510,574 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | from project.aquarium.base_aquarium import BaseAquarium
class FreshwaterAquarium(BaseAquarium):
def __init__(self, name):
super().__init__(name, capacity=50)
self.aquarium_fish_type = "FreshwaterFish"
| [
"noreply@github.com"
] | MiroVatov.noreply@github.com |
60cfc9014c29b483d349ca6430bea75baef9e675 | 5164dc11387ac2bab5d4bcabf7c1ce0e63cdbaaa | /appenlight_client/ext/logging/logbook.py | 5ec8ef3a02b434ec8892e436aead2f68d0adc3fe | [
"BSD-3-Clause"
] | permissive | jpwilliams/appenlight-client-python | ba472035794556c6624284150b5724c280eae7b0 | 9f3b0fd7d7035bcafb11e686c218dd1393909912 | refs/heads/master | 2020-12-24T16:15:00.770966 | 2015-09-01T15:31:45 | 2015-09-01T15:31:45 | 41,746,574 | 0 | 0 | null | 2015-09-01T15:23:01 | 2015-09-01T15:23:00 | null | UTF-8 | Python | false | false | 3,050 | py | from __future__ import absolute_import
import logbook
import logging
import threading
import datetime
import time
from appenlight_client.ext.logging import EXCLUDED_LOG_VARS
from appenlight_client.timing import get_local_storage
from appenlight_client.utils import asbool, parse_tag, PY3
log = logging.getLogger(__name__)
class ThreadLocalHandler(logbook.Handler):
def __init__(self, client_config=None, *args, **kwargs):
logbook.Handler.__init__(self, *args, **kwargs)
self.ae_client_config = client_config
def emit(self, record):
appenlight_storage = get_local_storage()
r_dict = convert_record_to_dict(record, self.ae_client_config)
if r_dict:
if r_dict not in appenlight_storage.logs:
appenlight_storage.logs.append(r_dict)
def get_records(self, thread=None):
"""
Returns a list of records for the current thread.
"""
appenlight_storage = get_local_storage()
return appenlight_storage.logs
def clear_records(self, thread=None):
""" Clears ALL logs from AE storage """
appenlight_storage = get_local_storage()
appenlight_storage.logs = []
def convert_record_to_dict(record, client_config):
if record.channel in client_config.get('log_namespace_blacklist', []):
return None
if not getattr(record, 'time'):
time_string = datetime.datetime.utcnow().isoformat()
else:
time_string = record.time.isoformat()
try:
message = record.msg
tags_list = []
log_dict = {'log_level': record.level_name,
"namespace": record.channel,
'server': client_config.get('server_name', 'unknown'),
'date': time_string,
'request_id': None}
if PY3:
log_dict['message'] = '%s' % message
else:
msg = message.encode('utf8') if isinstance(message,
unicode) else message
log_dict['message'] = '%s' % msg
if client_config.get('logging_attach_exc_text'):
pass
# populate tags from extra
for k, v in record.extra.iteritems():
if k not in EXCLUDED_LOG_VARS:
try:
tags_list.append(parse_tag(k, v))
if k == 'ae_primary_key':
log_dict['primary_key'] = unicode(v)
if k == 'ae_permanent':
try:
log_dict['permanent'] = asbool(v)
except Exception:
log_dict['permanent'] = True
except Exception as e:
log.info(u'Couldn\'t convert attached tag %s' % e)
if tags_list:
log_dict['tags'] = tags_list
return log_dict
except (TypeError, UnicodeDecodeError, UnicodeEncodeError) as e:
# handle some weird case where record.getMessage() fails
log.warning(e) | [
"info@webreactor.eu"
] | info@webreactor.eu |
da196e42ba4f3cf221f3ee5836971d6489da71ed | 95d1dd5758076c0a9740d545a6ef2b5e5bb8c120 | /PY/algorithm/expression_evaluation.py | 897fa67d8b5d6359098f536be23c4e30788a29fd | [] | no_license | icoding2016/study | 639cb0ad2fe80f43b6c93c4415dc6e8a11390c85 | 11618c34156544f26b3b27886b55c771305b2328 | refs/heads/master | 2023-08-31T14:15:42.796754 | 2023-08-31T05:28:38 | 2023-08-31T05:28:38 | 117,061,872 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,254 | py | # Expression Evaluation (infix)
# Medium
#
# to support basic calculation with:
# - operator: + - * / ^
# - operand: int/float
# - Parenthesis are permitted
# - blank(s) allowed in the expression
#
#
# Solution include 2 parts:
# 1) parse the str, identify operators and operands
# 2) Expression Evaluate
# Method ref: http://csis.pace.edu/~murthy/ProgrammingProblems/Programming_Problems.htm#16_Evaluation_of_infix_expressions
#
# Expression Evaluate:
# Use two stacks:
# Operand stack: to keep values (numbers) and
# Operator stack: to keep operators (+, -, *, . and ^).
# In the following, “process” means,
# (i) pop operand stack once (value2)
# (ii) pop operator stack once (operator)
# (iii) pop operand stack again (value1)
# (iv) compute value1 operator value2
# (v) push the value obtained in operand stack.
# Algorithm:
# Until the end of the expression is reached, get one character and perform only one of the steps (a) through (f):
# (a) If the character is an operand, push it onto the operand stack.
# (b) If the character is an operator, and the operator stack is empty then push it onto the operator stack.
# (c) If the character is an operator and the operator stack is not empty, and the character's precedence is
# greater than the precedence of the stack top of operator stack, then push the character onto the operator stack.
# (d) If the character is "(", then push it onto operator stack.
# (e) If the character is ")", then "process" as explained above until the corresponding "(" is encountered in operator stack.
# At this stage POP the operator stack and ignore "(."
# (f) If cases (a), (b), (c), (d) and (e) do not apply, then process as explained above.
# When there are no more input characters, keep processing until the operator stack becomes empty.
# The values left in the operand stack is the final result of the expression.
#
# e.g.
# 10*3-((6+5)-2*4)^2+20/4 =
#
class InvalidInput(Exception):
pass
class ExpressionEvaluator(object):
OPERATOR = ['+', '-', '*', '/', '^', '(', ')']
PRIO = {'(':4, ')':4, '^':3, '*':2, '/':2, '+':1, '-':1}
def __init__(self):
#self._init_evaluator()
pass
def _init_evaluator(self):
self.operators = []
self.operands = []
def parse(self, expression: str) -> list[str]:
"""Prase the expression str, split the operators and operand into a list."""
output = []
number = ''
number_exp = [str(i) for i in range(0,10)] + ['.']
expression.replace(' ', '') # remove blanks
for c in expression:
if c in self.OPERATOR:
if number:
output.append(number)
number = ''
output.append(c)
elif c in number_exp:
number += c
else:
raise InvalidInput('Expression contains invalid operator/number')
if number:
output.append(number)
# print(output)
return output
def evaluate(self, expression: str):
self._init_evaluator()
if len(expression) < 3:
raise InvalidInput('Invalid input.')
for c in self.parse(expression):
if c not in self.OPERATOR:
self.operands.append(c)
elif not self.operators:
self.operators.append(c)
elif c == '(':
self.operators.append(c)
elif c == ')':
# process until reach '('
while self.operators[-1]!='(':
self.process()
if self.operators[-1]=='(':
self.operators.pop() # pop '('
elif self.PRIO[c] > self.PRIO[self.operators[-1]]:
self.operators.append(c)
elif self.PRIO[c] <= self.PRIO[self.operators[-1]]:
while self.operators and self.operators[-1]!='(':
self.process()
self.operators.append(c)
while self.operators and self.operators[-1]!='(':
self.process()
return self.operands[0]
def process(self):
if not self.operands or not self.operators:
return
v2 = self.operands.pop()
op = self.operators.pop()
v1 = self.operands.pop()
if op == '^':
exp = f'int({v1})**int({v2})'
else:
exp = f'{v1}{op}{v2}'
self.operands.append(str(eval(exp)))
def test_fixture(s:ExpressionEvaluator):
testdata = [ # (input, expect),
(('3+5*2-1',), 12),
(('2*3-((6+5)-2*4)+2',), 5),
(('2+3-(2^2+3*(5-6/2)^2)*2+20',),-7),
(('5-6/2',),2.0),
(('3-2',),1),
(('4*5',),20),
(('3.0*2+(4.0+2.5*(4/2)+3.5*2)/2.0',),14.0),
]
for i in range(len(testdata)):
ret = s.evaluate(*testdata[i][0])
exp = str(testdata[i][1])
#exp = s.maxProfit_bf(*testdata[i][0])
print("{} -> \t{} \t expect {}".format("testdata[i][0]", ret, exp), end='\t')
print("{}".format('pass' if ret==exp else 'fail'))
import timeit
def test():
s = ExpressionEvaluator()
test_fixture(s)
test()
| [
"icoding2016@gmail.com"
] | icoding2016@gmail.com |
cd7fa9170f10eae6dfe847fde231d58eef59b1ef | db302f4f35f9c9df55ae9bbaf95f53116b77a7a8 | /specviz/core/hub.py | d893da489fe0acb1960325d07b8ab8321fe02157 | [
"BSD-3-Clause"
] | permissive | imagineagents/specviz | c8c9ef033397a20029cfbd5972594a1d1cf0ee06 | 099e05ed40a7db56f338c3b89e3a8ec646586ac7 | refs/heads/master | 2020-04-04T22:57:21.131095 | 2018-11-05T21:46:48 | 2018-11-05T21:46:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,169 | py | import logging
from .items import DataItem
class Hub:
def __init__(self, workspace, *args, **kwargs):
self._workspace = workspace
@property
def workspace(self):
"""The active workspace."""
return self._workspace
@property
def model(self):
"""The data item model of the active workspace."""
return self.workspace.model
@property
def proxy_model(self):
"""The proxy model of the active workspace."""
return self.workspace.proxy_model
@property
def plot_window(self):
"""The currently selected plot window of the workspace."""
return self.workspace.current_plot_window
@property
def plot_windows(self):
"""The currently selected plot window of the workspace."""
return self.workspace.mdi_area.subWindowList()
@property
def plot_widget(self):
"""The plot widget of the currently active plot window."""
return self.workspace.current_plot_window.plot_widget
@property
def plot_item(self):
"""The currently selected plot item."""
if self.workspace is not None:
return self.workspace.current_item
@property
def plot_items(self):
"""Returns the currently selected plot item."""
return self.proxy_model.items
@property
def visible_plot_items(self):
"""Plotted data that are currently visible."""
if self.plot_widget is not None:
return self.plot_widget.listDataItems()
@property
def selected_region(self):
"""The currently active ROI on the plot."""
return self.plot_window.plot_widget.selected_region
@property
def selected_region_bounds(self):
"""The bounds of currently active ROI on the plot."""
return self.plot_window.plot_widget.selected_region_bounds
@property
def data_item(self):
"""The data item of the currently selected plot item."""
if self.plot_item is not None:
return self.plot_item.data_item
@property
def data_items(self):
"""List of all data items held in the data item model."""
return self.model.items
def append_data_item(self, data_item):
"""
Adds a new data item object to appear in the left data list view.
Parameters
----------
data_item : :class:`~specviz.core.items.PlotDataItem`
The data item to be added to the list view.
"""
if isinstance(data_item, DataItem):
self.workspace.model.appendRow(data_item)
else:
logging.error("Data item model only accepts items of class "
"'DataItem', received '{}'.".format(type(data_item)))
def plot_data_item_from_data_item(self, data_item):
"""
Returns the PlotDataItem associated with the provided DataItem.
Parameters
----------
data_item : :class:`~specviz.core.items.PlotDataItem`
The DataItem from which the associated PlotDataItem will be
returned.
Returns
-------
plot_data_item : :class:`~specviz.core.items.PlotDataItem`
The PlotDataItem wrapping the DataItem.
"""
plot_data_item = self.workspace.proxy_model.item_from_id(
data_item.identifier)
return plot_data_item
def set_active_plugin_bar(self, name=None, index=None):
"""
Sets the currently displayed widget in the plugin side panel.
Parameters
----------
name : str, optional
The displayed name of the widget in the tab title.
index : int, optional
The index of the widget in the plugin tab widget.
"""
if name is None and index is None:
return
elif index is not None:
self.workspace.plugin_tab_widget.setCurrentIndex(index)
elif name is not None:
for i in range(self.workspace.plugin_tab_widget.count()):
if self.workspace.plugin_tab_widget.tabText(i) == name:
self.workspace.plugin_tab_widget.setCurrentIndex(i) | [
"contact@nicholasearl.me"
] | contact@nicholasearl.me |
2a45ff408fd370aab4729c41774fcc2437b0919b | 62fe26b309b402d0c304624bf63a8e9b9c025148 | /backend/src/food/migrations/0008_auto_20160526_1948.py | c5a13f42d2b89acff07771bcad8c733195ccddf5 | [] | no_license | brmley/fuchtard | 20c9c8229debaf54897faabffa43d87bff1f0995 | 833bcb9655fff9ff733f798d19561d9b7e6c476c | refs/heads/master | 2021-09-06T15:13:13.317894 | 2018-02-07T22:08:54 | 2018-02-07T22:08:54 | 120,173,994 | 0 | 0 | null | 2018-02-04T10:12:36 | 2018-02-04T10:12:35 | null | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-26 12:48
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('food', '0007_foodcategory_slug'),
]
operations = [
migrations.AlterModelOptions(
name='discount',
options={'verbose_name': 'Скидка', 'verbose_name_plural': 'Скидки'},
),
]
| [
"roboxv@gmail.com"
] | roboxv@gmail.com |
32089584dd27fac775fd61e0b1956be135b7f3ec | 8ad9faa828ce54cddc38dc86eef30e6635babd0c | /RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/tlvprofile/type.py | be7ed87f72c7869b64748c8b290d96533fa14e13 | [
"MIT"
] | permissive | ralfjon/IxNetwork | d1a50069bc5a211f062b2b257cb6775e7cae8689 | c0c834fbc465af69c12fd6b7cee4628baba7fff1 | refs/heads/master | 2020-04-04T00:36:24.956925 | 2018-10-26T16:37:13 | 2018-10-26T16:37:13 | 155,655,988 | 0 | 0 | MIT | 2018-11-01T03:19:30 | 2018-11-01T03:19:30 | null | UTF-8 | Python | false | false | 2,820 | py |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Type(Base):
"""The Type class encapsulates a required type node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the Type property from a parent instance.
The internal properties list will contain one and only one set of properties which is populated when the property is accessed.
"""
_SDM_NAME = 'type'
def __init__(self, parent):
super(Type, self).__init__(parent)
@property
def Object(self):
"""An instance of the Object class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.object.Object)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.object import Object
return Object(self)
@property
def IsEditable(self):
"""Indicates whether this is editable or not
Returns:
bool
"""
return self._get_attribute('isEditable')
@IsEditable.setter
def IsEditable(self, value):
self._set_attribute('isEditable', value)
@property
def IsRequired(self):
"""Indicates whether this is required or not
Returns:
bool
"""
return self._get_attribute('isRequired')
@IsRequired.setter
def IsRequired(self, value):
self._set_attribute('isRequired', value)
@property
def Name(self):
"""Name of the node
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
| [
"hubert.gee@keysight.com"
] | hubert.gee@keysight.com |
17524959bbf4502b8a5089c5ceb2e5302c989256 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/189/29214/submittedfiles/swamee.py | 68f4b3257c60ef461e7b8c0b735fb769787a3d60 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | import math
f= float(input('digite f:'))
l= float(input('digite l:'))
q= float(input('digite q:'))
deltaH= float(input('digite deltaH:'))
v= float(input('digite v:'))
d=(8*f*l)*(q*q)/((3.14159*3.14159)*(9.81*deltaH))
rey=(4*q)/(math.pi*d*v)
k=0.25/(math.log10(0.000002/3.7*d+5.74/rey**0.9))**2
print('O valor de D é %.4f' %d)
print('O valor de Rey é %.4f' %rey)
print('O valor de K é %.4f' %k) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
736153dda8380835b19679e18d2ea20c3f6b2f72 | ca41bc15576624f4be22c777833b6dbf80a3d5f9 | /dolly/usuarios/migrations/0002_auto_20201217_0129.py | 967d04b2b9a94870212d7fcd8ef2a34be0324eac | [] | no_license | aris-osorio/dolly | 74840477e01a020dfaaaf3a4e94c4f95f48f690e | 256042bae4d4253fbc93f50aa125047e5090b68c | refs/heads/main | 2023-02-01T14:48:19.840785 | 2020-12-17T07:30:34 | 2020-12-17T07:30:34 | 321,873,299 | 0 | 0 | null | 2020-12-17T06:51:59 | 2020-12-16T04:58:55 | Python | UTF-8 | Python | false | false | 442 | py | # Generated by Django 2.2.14 on 2020-12-17 07:29
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('usuarios', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='usuario',
name='id',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
]
| [
"aris.osorio@alumnos.udg.mx"
] | aris.osorio@alumnos.udg.mx |
6f8c0cd76c7b346759c4b53b13862f9053534529 | 7bed80964437032d9e1faf801153d79e089adff5 | /_downloads/plot_haxby_anova_svm1.py | 5d707d298495ba5eb28e885ad254c9dcd6e25dd0 | [] | no_license | MartinPerez/nilearn.github.io | 56fccaf0997cbf34e7929fac1da7eac26b453537 | f4f2438b5dae7aafe12ab96b24a941f5e0dde4b1 | refs/heads/master | 2021-01-22T10:25:49.288164 | 2015-07-13T21:31:42 | 2015-07-13T21:31:42 | 39,084,452 | 0 | 0 | null | 2015-07-14T15:46:53 | 2015-07-14T15:46:53 | null | UTF-8 | Python | false | false | 4,392 | py | """
The haxby dataset: face vs house in object recognition
=======================================================
A significant part of the running time of this example is actually spent
in loading the data: we load all the data but only use the face and
houses conditions.
"""
### Load Haxby dataset ########################################################
from nilearn import datasets
import numpy as np
dataset_files = datasets.fetch_haxby_simple()
y, session = np.loadtxt(dataset_files.session_target).astype("int").T
conditions = np.recfromtxt(dataset_files.conditions_target)['f0']
mask = dataset_files.mask
# fmri_data.shape is (40, 64, 64, 1452)
# and mask.shape is (40, 64, 64)
### Preprocess data ###########################################################
### Restrict to faces and houses ##############################################
# Keep only data corresponding to faces or houses
condition_mask = np.logical_or(conditions == 'face', conditions == 'house')
y = y[condition_mask]
session = session[condition_mask]
conditions = conditions[condition_mask]
# We have 2 conditions
n_conditions = np.size(np.unique(y))
### Loading step ##############################################################
from nilearn.input_data import NiftiMasker
# For decoding, standardizing is often very important
nifti_masker = NiftiMasker(mask=mask, sessions=session, smoothing_fwhm=4,
standardize=True, memory="nilearn_cache",
memory_level=1)
X = nifti_masker.fit_transform(dataset_files.func)
# Apply our condition_mask
X = X[condition_mask]
### Prediction function #######################################################
### Define the prediction function to be used.
# Here we use a Support Vector Classification, with a linear kernel
from sklearn.svm import SVC
svc = SVC(kernel='linear')
### Dimension reduction #######################################################
from sklearn.feature_selection import SelectKBest, f_classif
### Define the dimension reduction to be used.
# Here we use a classical univariate feature selection based on F-test,
# namely Anova. We set the number of features to be selected to 500
feature_selection = SelectKBest(f_classif, k=500)
# We have our classifier (SVC), our feature selection (SelectKBest), and now,
# we can plug them together in a *pipeline* that performs the two operations
# successively:
from sklearn.pipeline import Pipeline
anova_svc = Pipeline([('anova', feature_selection), ('svc', svc)])
### Fit and predict ###########################################################
anova_svc.fit(X, y)
y_pred = anova_svc.predict(X)
### Visualisation #############################################################
### Look at the SVC's discriminating weights
coef = svc.coef_
# reverse feature selection
coef = feature_selection.inverse_transform(coef)
# reverse masking
weight_img = nifti_masker.inverse_transform(coef)
### Create the figure
from nilearn import image
import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map
# Plot the mean image because we have no anatomic data
mean_img = image.mean_img(dataset_files.func)
plot_stat_map(weight_img, mean_img, title='SVM weights')
### Saving the results as a Nifti file may also be important
import nibabel
nibabel.save(weight_img, 'haxby_face_vs_house.nii')
### Cross validation ##########################################################
from sklearn.cross_validation import LeaveOneLabelOut
### Define the cross-validation scheme used for validation.
# Here we use a LeaveOneLabelOut cross-validation on the session label
# divided by 2, which corresponds to a leave-two-session-out
cv = LeaveOneLabelOut(session // 2)
### Compute the prediction accuracy for the different folds (i.e. session)
cv_scores = []
for train, test in cv:
anova_svc.fit(X[train], y[train])
y_pred = anova_svc.predict(X[test])
cv_scores.append(np.sum(y_pred == y[test]) / float(np.size(y[test])))
### Print results #############################################################
### Return the corresponding mean prediction accuracy
classification_accuracy = np.mean(cv_scores)
### Printing the results
print "=== ANOVA ==="
print "Classification accuracy: %f" % classification_accuracy, \
" / Chance level: %f" % (1. / n_conditions)
# Classification accuracy: 0.986111 / Chance level: 0.500000
plt.show()
| [
"gael.varoquaux@normalesup.org"
] | gael.varoquaux@normalesup.org |
f694f3dae80fbbf984c2319b63e7c6a577940e56 | 33a0d5ec6ca440986f22b010ffb310bf34c4fcac | /Basic_grammar/文件读写/当前目录下文件.py | cb5b5c8f00f064aa786aafd82a96d58b531d59c2 | [] | no_license | zhaozongzhao/learngit | c3de619f07840839819ffee4bbacb590caba8dbe | a6471e6d63b298882ceed020cc3b56e457ed2ca0 | refs/heads/master | 2023-07-21T23:04:51.249300 | 2021-09-17T10:29:54 | 2021-09-17T10:29:54 | 100,707,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | #能在当前目录以及当前目录的所有子目录下查找文件名包含指定字符串的文件,并打印出相对路径
import os
def get_walk(patah):
list = []
for i in os.walk(patah):
list.append(i)
return list
def get_file_path(zzz):
path = os.getcwd()
filename = get_walk(path)
list = []
for j in filename:
for name in j[2]:
if zzz in name:
list.append(name)
path1 = os.path.split(path)
for i in list:
path2 = os.path.join(path1[0],i)
print(path2)
get_file_path('file') | [
"2206321864@qq.com"
] | 2206321864@qq.com |
1120dbc166dbe15daef9175ed8f8fb4716705a95 | 0d0afd1dce972b4748ce8faccd992c019794ad9e | /integra/integra_crm/models/__init__.py | 9975a438862c880f17b75251bec25d7b121421bd | [] | no_license | danimaribeiro/odoo-erp | e2ca2cfe3629fbedf413e85f7c3c0453fd16941e | d12577bf7f5266b571cbedeb930720d653320e96 | refs/heads/master | 2020-01-23T21:32:16.149716 | 2016-11-05T15:35:40 | 2016-11-05T15:35:40 | 67,892,809 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | # -*- coding: utf-8 -*-
#from __future__ import division, print_function, unicode_literals
from crm_motivo import crm_motivo
from crm_lead import crm_lead
from crm_lead_report import crm_lead_report
from sale_report import sale_report
| [
"danimaribeiro@gmail.com"
] | danimaribeiro@gmail.com |
0f2e65ef53114eaa498af4a3c30172d850c94f92 | 1a59a9076c1e9f1eb98e24ff41a4c1c95e2b353e | /xcp2k/classes/_detailed_energy2.py | 8b2ddffbe58ddb7a150d834a20b889d29d47662a | [] | no_license | Roolthasiva/xcp2k | 66b2f30ebeae1a946b81f71d22f97ea4076e11dc | fc3b5885503c6f6dc549efeb4f89f61c8b6b8242 | refs/heads/master | 2022-12-23T06:03:14.033521 | 2020-10-07T08:01:48 | 2020-10-07T08:01:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._each181 import _each181
class _detailed_energy2(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each181()
self._name = "DETAILED_ENERGY"
self._keywords = {'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS', 'Filename': 'FILENAME', 'Log_print_key': 'LOG_PRINT_KEY'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
5afadda78affa9fc07b1ed5f8c8dfb417881723f | 37433c8f7ec4ff9fded3c7bcc9403e2293436552 | /blog/admin.py | 247a86c6e199c98ee1a1d100ca4f05337c70cdf6 | [] | no_license | FreeGodCode/TYCarry_Blog | 4420d896e735789ac9104568e7bf59a85b796373 | 9be47be8ff1e33980f237227786bc9d472155114 | refs/heads/master | 2023-03-22T09:36:35.476398 | 2021-03-17T07:29:44 | 2021-03-17T07:29:44 | 314,501,947 | 1 | 0 | null | 2021-03-17T07:29:45 | 2020-11-20T09:10:48 | Python | UTF-8 | Python | false | false | 1,040 | py | from django.contrib import admin
from blog.models import Article, Category, Tag, User, ArticleComment
# Register your models here.
from django_summernote.admin import SummernoteModelAdmin
class PostAdmin(SummernoteModelAdmin):
summernote_fields = ('content') #给content字段添加富文本
list_display = ['article_id', 'title', 'created_time'] #列表显示字段
search_fields = ['title'] #搜索框
list_filter = ['created_time'] #过滤器
"""
from tinymce.models import HTMLField
class Blog(models.Model):
sblog = HTMLField()
在页面的head中添加script
<script>
tinyMCE.init({
'mode': 'textareas',
'theme': 'advanced',
'width': 800,
'height': 600,
})
</script>
"""
class CommentAdmin(admin.ModelAdmin):
list_display = ['username', 'body', 'title']
search_fields = ['title']
admin.site.register(Article, PostAdmin)
admin.site.register(Category)
admin.site.register(Tag)
admin.site.register(User)
admin.site.register(ArticleComment, CommentAdmin)
| [
"2501160661@qq.com"
] | 2501160661@qq.com |
6ebc87ec541d5938b401e1d177cedd46d683529b | ded0b1a8a91fd9af7bae98cce3cfadbb6d03f84d | /examples/upload.py | 6c3908b418a0cf28ac0b73c0d7e2511f435ff923 | [
"MIT"
] | permissive | rjw57/bdfu | d38b3871ff60703f971b6cef4fae298dfa3faf73 | 386d800738e6943ed9063f1bf904ece86410c7c7 | refs/heads/master | 2021-01-10T21:05:08.602572 | 2015-02-26T13:58:35 | 2015-02-26T13:58:35 | 31,363,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
from bdfu.client import Client
def main():
if len(sys.argv) != 4:
sys.stderr.write('usage: {0} <endpoint> <token> <file>\n'.format(
os.path.basename(sys.argv[0])))
return 1
endpoint, token, filename = sys.argv[1:]
c = Client(endpoint, token)
with open(filename, 'rb') as f:
file_id = c.upload(f)
print('uploaded file with id: ' + str(file_id))
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"rjw57@cam.ac.uk"
] | rjw57@cam.ac.uk |
b1e3f6259a636db56b537dc1e6d558ffccfe0925 | e2f9d506dcc3fee7dbbbce370c7e2c3f48275828 | /tests/test_helpers.py | f817a336beaeddf6adbd9c6a2bf097ba88fc2d9f | [
"MIT"
] | permissive | MacHu-GWU/s3splitmerge-project | d33829f1ff6aed9cc77c9b4bec30601ce4570f60 | 873892158f4a2d0ee20f291e5d3b2a80f0bae1ba | refs/heads/main | 2023-08-30T09:07:32.312453 | 2021-11-07T16:08:24 | 2021-11-07T16:08:24 | 394,803,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,914 | py | # -*- coding: utf-8 -*-
import pytest
from pytest import raises, approx
from s3splitmerge import exc
from s3splitmerge.helpers import (
b64encode_str, b64decode_str,
get_s3_object_metadata,
is_s3_object_exists,
count_lines_in_s3_object,
)
from s3splitmerge.tests import s3_client, bucket, prefix
def test_b64_encode_decode():
s = "s3://bucket/key"
assert b64encode_str(b64encode_str(s)) == s
def test_get_s3_object_metadata():
with raises(exc.S3ObjectNotFound):
get_s3_object_metadata(
s3_client=s3_client,
bucket=bucket,
key=f"{prefix}/helpers/{get_s3_object_metadata.__name__}/not-exists-object.json",
)
s3_client.put_object(
Bucket=bucket,
Key=f"{prefix}/helpers/{get_s3_object_metadata.__name__}/existing-object.json",
Body='{"id": 1}',
)
metadata = get_s3_object_metadata(
s3_client=s3_client,
bucket=bucket,
key=f"{prefix}/helpers/{get_s3_object_metadata.__name__}existing-object.json",
)
assert metadata.size == 9
def test_is_s3_object_exists():
assert is_s3_object_exists(
s3_client=s3_client,
bucket=bucket,
key=f"{prefix}/helpers/{is_s3_object_exists.__name__}/not-exists-object.json",
) is False
s3_client.put_object(
Bucket=bucket,
Key=f"{prefix}/helpers/{is_s3_object_exists.__name__}/existing-object.json",
Body='{"id": 1}',
)
assert is_s3_object_exists(
s3_client=s3_client,
bucket=bucket,
key=f"{prefix}/helpers/{is_s3_object_exists.__name__}/exists-object.json",
) is False
# def test_count_lines_in_s3_object():
# count_lines_in_s3_object(
# s3_client=s3_client,
# bucket=bucket
# )
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| [
"MacHu-GWU@users.noreply.github.com"
] | MacHu-GWU@users.noreply.github.com |
e39b793d65bd411519cedbdc9c917e80ada47a62 | 7868c0496a96f51e602641de99e2c5d85f478c09 | /src/anomalydetection/inqmeasurement.py | f060bd647e518bc4865e6293d42d71f6665de1dd | [
"Apache-2.0"
] | permissive | Joaggi/Incremental-Anomaly-Detection-using-Quantum-Measurements | 7e05a13f62bb867ded02f4bbfad075693bbddca4 | c53cba3691f6a7af8b4b061be4a03f05121a2db9 | refs/heads/main | 2023-07-08T05:48:58.483713 | 2023-06-30T17:00:31 | 2023-06-30T17:00:31 | 522,575,822 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,371 | py | import jax
from jax import jit
import jax.numpy as jnp
import numpy as np
from functools import partial
from sklearn.kernel_approximation import RBFSampler
from tqdm import tqdm
class QFeatureMap_rff():
def __init__(
self,
input_dim: int,
dim: int = 100,
gamma: float = 1,
random_state=None,
**kwargs
):
super().__init__(**kwargs)
self.input_dim = input_dim
self.dim = dim
self.gamma = gamma
self.random_state = random_state
self.vmap_compute = jax.jit(jax.vmap(self.compute, in_axes=(0, None, None, None), out_axes=0))
def build(self):
rbf_sampler = RBFSampler(
gamma=self.gamma,
n_components=self.dim,
random_state=self.random_state)
x = np.zeros(shape=(1, self.input_dim))
rbf_sampler.fit(x)
self.rbf_sampler = rbf_sampler
self.weights = jnp.array(rbf_sampler.random_weights_)
self.offset = jnp.array(rbf_sampler.random_offset_)
self.dim = rbf_sampler.get_params()['n_components']
def update_rff(self, weights, offset):
self.weights = jnp.array(weights)
self.offset = jnp.array(offset)
def get_dim(self, num_features):
return self.dim
@staticmethod
def compute(X, weights, offset, dim):
vals = jnp.dot(X, weights) + offset
#vals = jnp.einsum('i,ik->k', X, weights) + offset
vals = jnp.cos(vals)
vals *= jnp.sqrt(2.) / jnp.sqrt(dim)
return vals
@partial(jit, static_argnums=(0,))
def __call__(self, X):
vals = self.vmap_compute(X, self.weights, self.offset, self.dim)
norms = jnp.linalg.norm(vals, axis=1)
psi = vals / norms[:, jnp.newaxis]
return psi
class InqMeasurement():
def __init__(self, input_shape, dim_x, gamma, random_state=None, batch_size = 300):
self.gamma = gamma
self.dim_x = dim_x
self.fm_x = QFeatureMap_rff( input_dim=input_shape, dim = dim_x, gamma = gamma, random_state = random_state)
self.fm_x.build()
self.num_samples = 0
self.train_pure_batch = jax.jit(jax.vmap(self.train_pure, in_axes=(0)))
self.collapse_batch = jax.jit(jax.vmap(self.collapse, in_axes=(0, None)))
self.sum_batch = jax.jit(self.sum)
self.key = jax.random.PRNGKey(random_state)
self.batch_size = batch_size
@staticmethod
def train_pure(inputs):
oper = jnp.einsum(
'...i,...j->...ij',
inputs, jnp.conj(inputs),
optimize='optimal') # shape (b, nx, nx)
return oper
@staticmethod
def sum(rho_res):
return jnp.sum(rho_res, axis=0)
@staticmethod
@partial(jit, static_argnums=(1,2,3,4))
def compute_training_jit(batch, alpha, fm_x, train_pure_batch, sum_batch, rho):
inputs = fm_x(batch)
rho_res = train_pure_batch(inputs)
rho_res = sum_batch(rho_res)
return jnp.add((alpha)*rho_res, (1-alpha)*rho) if rho is not None else rho_res
#return jnp.add(rho_res, rho) if rho is not None else rho_res
@staticmethod
def compute_training(values, alpha, perm, i, batch_size, fm_x, train_pure_batch, sum_batch, rho, compute_training_jit):
batch_idx = perm[i * batch_size: (i + 1)*batch_size]
batch = values[batch_idx, :]
return compute_training_jit(batch, alpha, fm_x, train_pure_batch, sum_batch, rho)
def initial_train(self, values, alpha):
num_batches = InqMeasurement.obtain_params_batches(values, self.batch_size, self.key)
#print('Time obtain_params_batches: ', stop - start)
num_train = values.shape[0]
perm = jnp.arange(num_train)
for i in range(num_batches):
#start = timeit.default_timer()
#batch_idx = perm[i * self.batch_size: (i + 1)*self.batch_size]
#stop = timeit.default_timer()
#print('Time batch_idx: ', stop - start)
#
#start = timeit.default_timer()
#batch = values[batch_idx, :]
##batch = values
#stop = timeit.default_timer()
#print('Time capture data: ', stop - start)
#
#
#start = timeit.default_timer()
#inputs = self.fm_x(batch)
#stop = timeit.default_timer()
#print('Time fm_x: ', stop - start)
#
#start = timeit.default_timer()
#rho_res = self.train_pure_batch(inputs)
#stop = timeit.default_timer()
#print('Time rho_res: ', stop - start)
#
#start = timeit.default_timer()
#rho_res = self.sum_batch(rho_res)
#stop = timeit.default_timer()
#print('Time sum rho_res: ', stop - start)
#print(self.fm_x.weights.shape)
if hasattr(self, "rho_res"):
self.rho_res = self.compute_training(values, alpha, perm, i, self.batch_size, self.fm_x,
self.train_pure_batch, self.sum_batch, self.rho_res, self.compute_training_jit)
else:
self.rho_res = self.compute_training(values, alpha, perm, i, self.batch_size, self.fm_x,
self.train_pure_batch, self.sum_batch, None, self.compute_training_jit)
#print('Time sum rho_res and self: ', stop - start)
self.num_samples += values.shape[0]
#print('Time initial_training: ', stop_initial_train - start_initial_train)
@staticmethod
def collapse(inputs, rho_res):
rho_h = jnp.matmul(jnp.conj(inputs), rho_res)
rho_res = jnp.einsum(
'...i, ...i -> ...',
rho_h, jnp.conj(rho_h),
optimize='optimal') # shape (b,)
#rho_res = jnp.dot(rho_h, jnp.conj(rho_h))
return rho_res
@staticmethod
def obtain_params_batches(values, batch_size, key):
num_train = values.shape[0]
num_complete_batches, leftover = divmod(num_train, batch_size)
num_batches = num_complete_batches + bool(leftover)
return num_batches
@partial(jit, static_argnums=(0,))
def predict(self, values):
num_batches = InqMeasurement.obtain_params_batches(values, self.batch_size, self.key)
results = None
rho_res = self.rho_res / self.num_samples
num_train = values.shape[0]
perm = jnp.arange(num_train)
for i in range(num_batches):
batch_idx = perm[i * self.batch_size: (i + 1)*self.batch_size]
batch = values[batch_idx, :]
inputs = self.fm_x(batch)
batch_probs = self.collapse_batch(inputs, rho_res)
results = jnp.concatenate([results, batch_probs], axis=0) if results is not None else batch_probs
return results
| [
"joaggi@gmail.com"
] | joaggi@gmail.com |
b1c8b3133b7b734b65f124e5a32c497835087c81 | a6b1f5ac26861dc7efd002cf3795e70f58eb76fe | /train/train_mnist.py | 053bd48b3ce0c55507dc14da6b01f32919eed938 | [] | no_license | BingzheWu/BayesNet | 6ad0e2acb1f901aaa4cd758fc815bf6cfb03742b | 7c3f87486c67e42d3c2a64548dde4a0edcb73bb3 | refs/heads/master | 2020-03-29T22:34:58.261930 | 2019-07-12T09:31:07 | 2019-07-12T09:31:07 | 150,428,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,009 | py | import torch
import sys
sys.path.append('.')
from utils import write_loss_scalars
from utils import write_weight_histograms
from models.minist_models import BayesLeNet
from dataset import data_factory
from train.train_utils import parse_args, bayes_batch_processor, get_logger
from mmcv import Config
from mmcv.runner import Runner
from torch.nn.parallel import DataParallel
from dataset.data_factory import make_dataset, make_dataloader
from models.model_factory import model_creator
def main():
args = parse_args()
cfg = Config.fromfile(args.cfg_file)
print(cfg)
logger = get_logger(cfg.log_level)
if args.launcher == 'none':
dist = False
else:
pass
if dist:
pass
else:
num_workers = cfg.data_workers
batch_size = cfg.batch_size
train_sampler = None
val_sampler = None
shuffle = True
train_dataset = make_dataset(cfg, True)
train_loader = make_dataloader(train_dataset, batch_size, num_workers, shuffle, train_sampler)
val_dataset = make_dataset(cfg, False)
val_loader = make_dataloader(val_dataset, batch_size, num_workers, shuffle, val_sampler)
model = model_creator(cfg)
if dist:
pass
else:
#model = DataParallel(model, device_ids=[0, 1]).cuda()
device = 'cuda'
model = model.to(device)
#model = DataParallel(model, device_ids=[0,1]).cuda()
runner = Runner(
model,
bayes_batch_processor,
None,
cfg.work_dir,
log_level=cfg.log_level
)
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
]
)
runner.register_training_hooks(
lr_config=cfg.lr_config,
optimizer_config=None,
checkpoint_config=cfg.checkpoint_config,
log_config=log_config
)
workflow = [('train', 1), ('val', 1)]
runner.run([train_loader, val_loader], workflow, cfg.total_epochs)
if __name__ == '__main__':
main()
| [
"wubingzheagent@gmail.com"
] | wubingzheagent@gmail.com |
887b1abd80116fb103cbdd1166213d2360b0017e | 0e3d1d4107a2664663c6906697faa3a41c7d4f57 | /src/ui/view/opalview/aui2.py | 2f91af53c015e51a85cd8dc3d4d5b752f2743740 | [] | no_license | struts2spring/Opal | c0915452fd4eab4c0bd4690cac346be8e6dc3f60 | c59c03baa10c915ca7c05196ed411da4a26ff49d | refs/heads/master | 2021-01-23T21:01:39.977412 | 2017-07-09T16:49:44 | 2017-07-09T16:49:44 | 48,582,382 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,353 | py | import wx
import wx.aui
# import images # contains toolbar icons
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY,
"AUI Tutorial",
size=(600,400))
self._mgr = wx.aui.AuiManager()
self._mgr.SetManagedWindow(self)
notebook = wx.aui.AuiNotebook(self)
nb_panel = TabPanel(notebook)
my_panel = MyPanel(self)
notebook.AddPage(nb_panel, "First Tab", False)
self._mgr.AddPane(notebook,
wx.aui.AuiPaneInfo().Name("notebook-content").
CenterPane().PaneBorder(False))
self._mgr.AddPane(my_panel,
wx.aui.AuiPaneInfo().Name("txtctrl-content").
CenterPane().PaneBorder(False))
self._mgr.GetPane("notebook-content").Show().Top().Layer(0).Row(0).Position(0)
self._mgr.GetPane("txtctrl-content").Show().Bottom().Layer(1).Row(0).Position(0)
self._mgr.Update()
class MyPanel(wx.Panel):
"""
My panel with a toolbar and richtextctrl
"""
def __init__(self,parent):
wx.Panel.__init__(self,parent=parent,id=wx.ID_ANY)
sizer = wx.BoxSizer(wx.VERTICAL)
toolbar = wx.ToolBar(self,-1)
toolbar.AddLabelTool(wx.ID_EXIT, '',wx.art_q)
self.Bind(wx.EVT_TOOL, self.OnExit, id=wx.ID_EXIT)
toolbar.Realize()
sizer.Add(toolbar,proportion=0,flag=wx.ALL | wx.ALIGN_TOP)
text = ""
txtctrl = wx.TextCtrl(self,-1, text, wx.Point(0, 0), wx.Size(150, 90),
wx.NO_BORDER | wx.TE_MULTILINE | wx.TE_READONLY|wx.HSCROLL)
sizer.Add(txtctrl,proportion=0,flag=wx.EXPAND)
self.SetSizer(sizer)
def OnExit(self,event):
self.Close()
class TabPanel(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent=parent,id=wx.ID_ANY)
sizer = wx.BoxSizer(wx.VERTICAL)
txtOne = wx.TextCtrl(self, wx.ID_ANY, "")
txtTwo = wx.TextCtrl(self, wx.ID_ANY, "")
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(txtOne, 0, wx.ALL, 5)
sizer.Add(txtTwo, 0, wx.ALL, 5)
self.SetSizer(sizer)
if __name__ == "__main__":
app = wx.PySimpleApp()
frame = MyFrame()
frame.Show()
app.MainLoop() | [
"struts2spring@gmail.com"
] | struts2spring@gmail.com |
f5b0d6e2ad6c275feacd9fd0fdd544bd3a7e7b88 | 12e27bcea0c43655f3c0c4690c67de2feaf8edad | /apptools/apptools-android-tests/apptools/manifest_xwalk_permissions.py | db57069587b5188da4e69dc2d3524a9f0d699666 | [
"BSD-3-Clause"
] | permissive | xzhan96/crosswalk-test-suite | b24288443463698cd60f74ff25b0e9b262d8d640 | 47710b138f4ed3498b40c2480811e24ff8d0435a | refs/heads/master | 2021-01-20T23:05:45.268897 | 2016-02-04T06:34:19 | 2016-02-04T06:34:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,960 | py | #!/usr/bin/env python
#
# Copyright (c) 2016 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yun, Liu<yunx.liu@intel.com>
import unittest
import os
import comm
import shutil
from xml.etree import ElementTree
import json
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_permission_default(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + ' --manifest="org.xwalk.test" --keep --crosswalk=' + comm.crosswalkzip + " ./"
(return_code, output) = comm.getstatusoutput(cmd)
projectDir = output[0].split(" * " + os.linesep)[-1].split(' ')[-1].strip(os.linesep)
root = ElementTree.parse(projectDir + "/prj/android/AndroidManifest.xml").getroot()
permission_attributes = root.findall('uses-permission')
name = []
for x in permission_attributes:
name.append(x.attrib.items()[0][1])
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
self.assertEquals(len(permission_attributes), 3)
self.assertIn("android.permission.ACCESS_NETWORK_STATE", name)
self.assertIn("android.permission.ACCESS_WIFI_STATE", name)
self.assertIn("android.permission.INTERNET", name)
def test_permission_name(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --keep --crosswalk=" + comm.crosswalkzip + " " + comm.ConstPath + "/../testapp/camera_permissions_enable/"
(return_code, output) = comm.getstatusoutput(cmd)
projectDir = output[0].split(" * " + os.linesep)[-1].split(' ')[-1].strip(os.linesep)
root = ElementTree.parse(projectDir + "/prj/android/AndroidManifest.xml").getroot()
permission_attributes = root.findall('uses-permission')
name = []
for x in permission_attributes:
name.append(x.attrib.items()[0][1])
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
self.assertEquals(len(permission_attributes), 4)
self.assertIn("android.permission.CAMERA", name)
if __name__ == '__main__':
unittest.main()
| [
"yunx.liu@intel.com"
] | yunx.liu@intel.com |
fa9e8b9e1d471cf9ad80bcbe1a6ef93e1748b3e1 | f1a9769b3589d802a4c26adfbe67915b920f3b49 | /dj_rest_security/rest_demo/views.py | ba976ed64928bb0036c3eb37278aa73f5fbb6bd1 | [] | no_license | chavhanpunamchand/Django_REST_framework | ac8b04102439f153ee77f8572dded20aac02121f | c4d602c201dff4caec645049c733127f20c0fa57 | refs/heads/main | 2023-03-18T07:42:37.072865 | 2021-03-04T13:34:05 | 2021-03-04T13:34:05 | 336,339,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | #ReadOnlyModelViewSet
from rest_framework.viewsets import ModelViewSet
from .models import Emp
from .empserializer import EmpToJson
# from rest_framework.viewsets import GenericViewSet
# from rest_framework.mixins import DestroyModelMixin,UpdateModelMixin
# class MyOwnViewSet(GenericViewSet,DestroyModelMixin,UpdateModelMixin):
# pass
# model to json
class EmpCrudAPIs(ModelViewSet):
queryset = Emp.objects.all()
serializer_class = EmpToJson | [
"chavhanpunamchand@gmail.com"
] | chavhanpunamchand@gmail.com |
892dc7ef6d0384a18b2c2d612fb7032dad0acffe | 03a9facf029d3b63ef996f34395e407b2a9bafe0 | /3rdParty/waf/waflib/extras/cabal.py | 1604c753e68af733d796904002b8181b13744cd2 | [
"MIT"
] | permissive | MDudek-ICS/peach | 737b29346685106c50035177f3e656661342d5b6 | 2bea2d524707a98d007ca39455a3db175b44e8ed | refs/heads/main | 2023-03-06T19:10:48.312671 | 2021-02-17T22:00:09 | 2021-02-17T22:00:09 | 372,488,843 | 1 | 0 | NOASSERTION | 2021-05-31T11:51:38 | 2021-05-31T11:51:37 | null | UTF-8 | Python | false | false | 5,307 | py | #!/usr/bin/env python
# encoding: utf-8
# Anton Feldmann, 2012
# "Base for cabal"
import re
import time
from waflib import TaskGen, Task, Utils
from waflib.Configure import conf
from waflib.Task import always_run
from waflib.TaskGen import extension, feature, after, before, before_method
from waflib.Utils import threading
from shutil import rmtree
lock = threading.Lock()
registering = False
def configure(self):
self.find_program('cabal', var='CABAL')
self.find_program('ghc-pkg', var='GHCPKG')
pkgconfd = self.bldnode.abspath() + '/package.conf.d'
self.env.PREFIX = self.bldnode.abspath() + '/dist'
self.env.PKGCONFD = pkgconfd
if self.root.find_node(pkgconfd + '/package.cache'):
self.msg('Using existing package database', pkgconfd, color='CYAN')
else:
pkgdir = self.root.find_dir(pkgconfd)
if pkgdir:
self.msg('Deleting corrupt package database', pkgdir.abspath(), color ='RED')
rmtree(pkgdir.abspath())
pkgdir = None
self.cmd_and_log([self.env.GHCPKG, 'init', pkgconfd])
self.msg('Created package database', pkgconfd, color = 'YELLOW' if pkgdir else 'GREEN')
@extension('.cabal')
def process_cabal(self, node):
out_dir_node = self.bld.root.find_dir(self.bld.out_dir)
package_node = node.change_ext('.package')
package_node = out_dir_node.find_or_declare(package_node.name)
build_node = node.parent.get_bld()
build_path = build_node.abspath()
config_node = build_node.find_or_declare('setup-config')
inplace_node = build_node.find_or_declare('package.conf.inplace')
config_task = self.create_task('cabal_configure', node)
config_task.cwd = node.parent.abspath()
config_task.depends_on = getattr(self, 'depends_on', '')
config_task.build_path = build_path
config_task.set_outputs(config_node)
build_task = self.create_task('cabal_build', config_node)
build_task.cwd = node.parent.abspath()
build_task.build_path = build_path
build_task.set_outputs(inplace_node)
copy_task = self.create_task('cabal_copy', inplace_node)
copy_task.cwd = node.parent.abspath()
copy_task.depends_on = getattr(self, 'depends_on', '')
copy_task.build_path = build_path
last_task = copy_task
task_list = [config_task, build_task, copy_task]
if (getattr(self, 'register', False)):
register_task = self.create_task('cabal_register', inplace_node)
register_task.cwd = node.parent.abspath()
register_task.set_run_after(copy_task)
register_task.build_path = build_path
pkgreg_task = self.create_task('ghcpkg_register', inplace_node)
pkgreg_task.cwd = node.parent.abspath()
pkgreg_task.set_run_after(register_task)
pkgreg_task.build_path = build_path
last_task = pkgreg_task
task_list += [register_task, pkgreg_task]
touch_task = self.create_task('cabal_touch', inplace_node)
touch_task.set_run_after(last_task)
touch_task.set_outputs(package_node)
touch_task.build_path = build_path
task_list += [touch_task]
return task_list
def get_all_src_deps(node):
hs_deps = node.ant_glob('**/*.hs')
hsc_deps = node.ant_glob('**/*.hsc')
lhs_deps = node.ant_glob('**/*.lhs')
c_deps = node.ant_glob('**/*.c')
cpp_deps = node.ant_glob('**/*.cpp')
proto_deps = node.ant_glob('**/*.proto')
return sum([hs_deps, hsc_deps, lhs_deps, c_deps, cpp_deps, proto_deps], [])
class Cabal(Task.Task):
def scan(self):
return (get_all_src_deps(self.generator.path), ())
class cabal_configure(Cabal):
run_str = '${CABAL} configure -v0 --prefix=${PREFIX} --global --user --package-db=${PKGCONFD} --builddir=${tsk.build_path}'
shell = True
def scan(self):
out_node = self.generator.bld.root.find_dir(self.generator.bld.out_dir)
deps = [out_node.find_or_declare(dep).change_ext('.package') for dep in Utils.to_list(self.depends_on)]
return (deps, ())
class cabal_build(Cabal):
run_str = '${CABAL} build -v1 --builddir=${tsk.build_path}/'
shell = True
class cabal_copy(Cabal):
run_str = '${CABAL} copy -v0 --builddir=${tsk.build_path}'
shell = True
class cabal_register(Cabal):
run_str = '${CABAL} register -v0 --gen-pkg-config=${tsk.build_path}/pkg.config --builddir=${tsk.build_path}'
shell = True
class ghcpkg_register(Cabal):
run_str = '${GHCPKG} update -v0 --global --user --package-conf=${PKGCONFD} ${tsk.build_path}/pkg.config'
shell = True
def runnable_status(self):
global lock, registering
val = False
lock.acquire()
val = registering
lock.release()
if val:
return Task.ASK_LATER
ret = Task.Task.runnable_status(self)
if ret == Task.RUN_ME:
lock.acquire()
registering = True
lock.release()
return ret
def post_run(self):
global lock, registering
lock.acquire()
registering = False
lock.release()
return Task.Task.post_run(self)
class cabal_touch(Cabal):
run_str = 'touch ${TGT}'
| [
"justin@controlthings.io"
] | justin@controlthings.io |
59cec4e635106d98c8f73c90d259cf476ea169bc | e5d059896640e25a57f29f5ec972c114f8ef5866 | /src/scs_analysis/histo_chart.py | 0911cb33aa5dba10c5b7846450c692df04dd9084 | [
"MIT"
] | permissive | tonybushido/scs_analysis | 10add7b13cee29e1445ea18240bdb08e3bc908a4 | 1121be19c83b0d616772da42ea90623d6f6573c4 | refs/heads/master | 2021-01-03T03:11:31.474595 | 2020-02-11T14:27:32 | 2020-02-11T14:27:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,618 | py | #!/usr/bin/env python3
"""
Created on 3 Aug 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
source repo: scs_analysis
DESCRIPTION
The histo_chart utility is used to create Matplotlib histogram charts and comma-separated value (CSV) histogram files.
The utility analyses a given path to a leaf node of the input JSON data stream.
An optional "batch" ("-b") flag can be set, causing the plotting only to take place when all data points have been
received.
Depending on operating system, it may be necessary to edit the matplotlibrc file, which specifies the Matplotlib
back-end graphics system.
SYNOPSIS
histo_chart.py [-b] [-x MIN MAX] [-c BIN_COUNT] [-o FILENAME] [-e] [-v] PATH
EXAMPLES
socket_receiver.py | histo_chart.py val.CO2.cnc -x -10 10 -e -o CO2.csv
FILES
~/SCS/scs_analysis/src/scs_analysis/matplotlibrc
SEE ALSO
scs_analysis/multi_chart
scs_analysis/single_chart
"""
import sys
import warnings
from scs_analysis.chart.histo_chart import HistoChart
from scs_analysis.cmd.cmd_histo_chart import CmdHistoChart
from scs_core.data.json import JSONify
from scs_core.data.path_dict import PathDict
from scs_core.sync.line_reader import LineReader
# --------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
warnings.filterwarnings("ignore", module="matplotlib")
# ----------------------------------------------------------------------------------------------------------------
# cmd...
cmd = CmdHistoChart()
if not cmd.is_valid():
cmd.print_help(sys.stderr)
exit(2)
if cmd.verbose:
print("histo_chart: %s" % cmd, file=sys.stderr)
chart = None
proc = None
try:
# ------------------------------------------------------------------------------------------------------------
# resources...
# reader...
reader = LineReader(sys.stdin.fileno())
if cmd.verbose:
print("histo_chart: %s" % reader, file=sys.stderr)
# chart...
chart = HistoChart(cmd.batch_mode, cmd.x[0], cmd.x[1], cmd.bin_count, cmd.path, cmd.outfile)
if cmd.verbose:
print("histo_chart: %s" % chart, file=sys.stderr)
sys.stderr.flush()
# ------------------------------------------------------------------------------------------------------------
# run...
proc = reader.start()
for line in reader.lines:
if chart.closed:
break
if line is None:
chart.pause()
continue
datum = PathDict.construct_from_jstr(line)
if datum is None:
continue
if cmd.echo:
print(JSONify.dumps(datum))
sys.stdout.flush()
chart.plot(datum)
# ----------------------------------------------------------------------------------------------------------------
# end...
except KeyboardInterrupt:
if cmd.verbose:
print("histo_chart: KeyboardInterrupt", file=sys.stderr)
# ----------------------------------------------------------------------------------------------------------------
# close...
finally:
if proc:
proc.terminate()
if chart is not None and not chart.closed:
if cmd.verbose:
print("histo_chart: holding", file=sys.stderr)
# noinspection PyBroadException
try:
chart.hold()
except Exception:
pass
| [
"bruno.beloff@southcoastscience.com"
] | bruno.beloff@southcoastscience.com |
3afbf51c838358e1e7c6a436752ee852c8429703 | a298d0b4a3e9e12170651a6bf728093b4badfac7 | /LeetCode/bstToGst.py | 7de288787fbea1fb42cae497b5d058f68fcff994 | [] | no_license | gavinz0228/AlgoPractice | fc8ecd194ea2d26de59df45909838161c802b8cd | 1cb183a326a0612a5cd941778500a8265e1d7255 | refs/heads/master | 2022-07-27T11:42:06.887668 | 2022-07-18T20:38:31 | 2022-07-18T20:38:31 | 172,929,652 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def bstToGst(self, root: TreeNode) -> TreeNode:
self.s = 0
newRoot = self.aux(root)
return newRoot
def aux(self, root):
if not root:
return None
rn = self.aux(root.right)
self.s += root.val
newRoot = TreeNode(self.s)
ln= self.aux(root.left)
newRoot.left = ln
newRoot.right = rn
return newRoot
| [
"gavinz0228@gmail.com"
] | gavinz0228@gmail.com |
2fdc9014f5baed53ee73acdfc52cbc28aebab88b | 657aef335ad3cd75d8355aaf9bc2db6641f0ee0e | /10.modules/lol.py | 7490a9c21a9a0865eb870529e3f75cd5df69f8e0 | [] | no_license | petershan1119/Python-practice | 73179baaa662ecf1a6861d440049f71ff5685d21 | 2234f301f2324514ac23304d181c21c0a125d0dc | refs/heads/master | 2020-04-13T00:41:01.306943 | 2017-09-21T05:45:30 | 2017-09-21T05:45:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | #from functions.game import play_game
#from functions.shop import buy_item
#import functions
from functions import play_game, buy_item, abc
from friends.chat import send_message
def turn_on():
print('= Turn on game =')
while True:
choice = input('뭐할래요\n 1: 상점, 2: 게임하기, 3: 친구에게 메시지 전송, 0: 나가기\n 선택 : ')
if choice == '0':
break
elif choice == '1':
buy_item()
elif choice == '2':
play_game()
elif choice == '3':
send_message()
else:
print('있는번호로 선택하세요')
print('--------')
print('= Turn off game =')
if __name__ == '__main__':
turn_on()
| [
"dev@azelf.com"
] | dev@azelf.com |
eeafb1939c0530403221403c3254a0aef2b343df | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/era5_scripts/02_preprocessing/lag82/426-tideGauge.py | 48f8dd1ce1973a730042bae1b65a4df4e76305b7 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,984 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 17:12:23 2020
****************************************************
Load predictors & predictands + predictor importance
****************************************************
@author: Michael Tadesse
"""
#import packages
import os
import pandas as pd
import datetime as dt #used for timedelta
from datetime import datetime
#define directories
dir_in = '/lustre/fs0/home/mtadesse/ereaFiveCombine'
dir_out = '/lustre/fs0/home/mtadesse/eraFiveLag'
def lag():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 426
y = 427
for t in range(x, y):
tg_name = tg_list_name[t]
print(tg_name, '\n')
# #check if the file exists
# os.chdir(dir_out)
# if (os.path.isfile(tg_name)):
# print('file already exists')
# continue
#cd to where the actual file is
os.chdir(dir_in)
pred = pd.read_csv(tg_name)
pred.sort_values(by = 'date', inplace=True)
pred.reset_index(inplace = True)
pred.drop('index', axis = 1, inplace = True)
#create a daily time series - date_range
#get only the ymd of the start and end times
start_time = pred['date'][0].split(' ')[0]
end_time = pred['date'].iloc[-1].split(' ')[0]
print(start_time, ' - ', end_time, '\n')
date_range = pd.date_range(start_time, end_time, freq = 'D')
#defining time changing lambda functions
time_str = lambda x: str(x)
time_converted_str = pd.DataFrame(map(time_str, date_range), columns = ['date'])
time_converted_stamp = pd.DataFrame(date_range, columns = ['timestamp'])
"""
first prepare the six time lagging dataframes
then use the merge function to merge the original
predictor with the lagging dataframes
"""
#prepare lagged time series for time only
#note here that since ERA20C has 3hrly data
#the lag_hrs is increased from 6(eraint) to 11 (era20C)
time_lagged = pd.DataFrame()
lag_hrs = [0, 6, 12, 18, 24, 30]
for lag in lag_hrs:
lag_name = 'lag'+str(lag)
lam_delta = lambda x: str(x - dt.timedelta(hours = lag))
lag_new = pd.DataFrame(map(lam_delta, time_converted_stamp['timestamp']), \
columns = [lag_name])
time_lagged = pd.concat([time_lagged, lag_new], axis = 1)
#datafrmae that contains all lagged time series (just time)
time_all = pd.concat([time_converted_str, time_lagged], axis = 1)
pred_lagged = pd.DataFrame()
for ii in range(1,time_all.shape[1]): #to loop through the lagged time series
print(time_all.columns[ii])
#extracting corresponding tag time series
lag_ts = pd.DataFrame(time_all.iloc[:,ii])
lag_ts.columns = ['date']
#merge the selected tlagged time with the predictor on = "date"
pred_new = pd.merge(pred, lag_ts, on = ['date'], how = 'right')
pred_new.drop('Unnamed: 0', axis = 1, inplace = True)
#sometimes nan values go to the bottom of the dataframe
#sort df by date -> reset the index -> remove old index
pred_new.sort_values(by = 'date', inplace=True)
pred_new.reset_index(inplace=True)
pred_new.drop('index', axis = 1, inplace= True)
#concatenate lagged dataframe
if ii == 1:
pred_lagged = pred_new
else:
pred_lagged = pd.concat([pred_lagged, pred_new.iloc[:,1:]], axis = 1)
#cd to saving directory
os.chdir(dir_out)
pred_lagged.to_csv(tg_name)
os.chdir(dir_in)
#run script
lag()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
db3020dac85e7558a8c3c49c190f2af1eae54733 | 417d1065b90d3647c8cf83b89f0eb1b1a93f6b58 | /eventsourcing/tests/datastore_tests/test_sqlalchemy.py | b87d159862d47372c6f27fcc632744598dd76545 | [
"BSD-3-Clause"
] | permissive | AlanFoster/eventsourcing-1 | d725513c9f8e48b4fd40a8d449ae26acc4c6b6dc | 964f9473043da81f80d2e9407ef7aefee02aae11 | refs/heads/master | 2020-08-06T12:57:27.096010 | 2019-10-01T20:21:52 | 2019-10-01T20:21:52 | 212,983,685 | 0 | 0 | BSD-3-Clause | 2019-10-05T10:50:13 | 2019-10-05T10:50:13 | null | UTF-8 | Python | false | false | 2,949 | py | from tempfile import NamedTemporaryFile
from uuid import uuid4
from sqlalchemy.exc import OperationalError, ProgrammingError
from eventsourcing.infrastructure.datastore import DatastoreTableError
from eventsourcing.infrastructure.sqlalchemy.datastore import DEFAULT_SQLALCHEMY_DB_URI, SQLAlchemyDatastore, \
SQLAlchemySettings
from eventsourcing.infrastructure.sqlalchemy.factory import SQLAlchemyInfrastructureFactory
from eventsourcing.infrastructure.sqlalchemy.records import Base, IntegerSequencedNoIDRecord, \
IntegerSequencedWithIDRecord, SnapshotRecord, TimestampSequencedNoIDRecord, TimestampSequencedWithIDRecord
from eventsourcing.tests.datastore_tests import base
class SQLAlchemyDatastoreTestCase(base.AbstractDatastoreTestCase):
"""
Base class for test cases that use an SQLAlchemy datastore.
"""
use_named_temporary_file = False
connection_strategy = 'plain'
infrastructure_factory_class = SQLAlchemyInfrastructureFactory
contiguous_record_ids = True
def construct_datastore(self):
if self.use_named_temporary_file:
self.temp_file = NamedTemporaryFile('a', delete=True)
uri = 'sqlite:///' + self.temp_file.name
else:
uri = DEFAULT_SQLALCHEMY_DB_URI
# kwargs = {}
# if not self.use_named_temporary_file:
# kwargs['connect_args'] = {'check_same_thread':False}
# kwargs['poolclass'] = StaticPool
return SQLAlchemyDatastore(
base=Base,
settings=SQLAlchemySettings(uri=uri),
tables=(
IntegerSequencedWithIDRecord,
IntegerSequencedNoIDRecord,
TimestampSequencedWithIDRecord,
TimestampSequencedNoIDRecord,
SnapshotRecord
),
connection_strategy=self.connection_strategy,
# **kwargs
)
class TestSQLAlchemyDatastore(SQLAlchemyDatastoreTestCase, base.DatastoreTestCase):
"""
Test case for SQLAlchemy datastore.
"""
def list_records(self):
try:
query = self.datastore.session.query(IntegerSequencedNoIDRecord)
return list(query)
except (OperationalError, ProgrammingError) as e:
# OperationalError from sqlite, ProgrammingError from psycopg2.
self.datastore.session.rollback()
raise DatastoreTableError(e)
finally:
self.datastore.session.close()
def create_record(self):
try:
record = IntegerSequencedNoIDRecord(
sequence_id=uuid4(),
position=0,
topic='topic',
state='{}'
)
self.datastore.session.add(record)
self.datastore.session.commit()
except (OperationalError, ProgrammingError) as e:
self.datastore.session.rollback()
raise DatastoreTableError(e)
return record
| [
"john.bywater@appropriatesoftware.net"
] | john.bywater@appropriatesoftware.net |
73bd1399a667fde2ebc55ea845c58e86949ac808 | e4eabccc6d971289cf13653d1b6f290e39b870ab | /1227-number-of-equivalent-domino-pairs/number-of-equivalent-domino-pairs.py | 50269f16fcfe4f0d8b0cc7b72998c9203a01c939 | [] | no_license | HEroKuma/leetcode | 128b38a9f559dc9e3f21c86a47ede67ad72f7675 | b3045aaedbe98eddc7e4e518a03a9337a63be716 | refs/heads/master | 2023-01-03T12:12:31.018717 | 2020-11-01T16:56:47 | 2020-11-01T16:56:47 | 260,488,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | # Given a list of dominoes, dominoes[i] = [a, b] is equivalent to dominoes[j] = [c, d] if and only if either (a==c and b==d), or (a==d and b==c) - that is, one domino can be rotated to be equal to another domino.
#
# Return the number of pairs (i, j) for which 0 <= i < j < dominoes.length, and dominoes[i] is equivalent to dominoes[j].
#
#
# Example 1:
# Input: dominoes = [[1,2],[2,1],[3,4],[5,6]]
# Output: 1
#
#
# Constraints:
#
#
# 1 <= dominoes.length <= 40000
# 1 <= dominoes[i][j] <= 9
#
class Solution:
def numEquivDominoPairs(self, dominoes: List[List[int]]) -> int:
return sum( [ (v-1)*v//2 for v in collections.Counter([ 10*min(i)+max(i) for i in dominoes ]).values() ])
| [
"zx8733520+github@gapp.nthu.edu.tw"
] | zx8733520+github@gapp.nthu.edu.tw |
f88f886baeef5abcf18ab0d0cbffcb906bd5e6e1 | d458b72b4d0e5c51446bb8b9f8a6276015dfb594 | /supervised_learning/0x0E-time_series/main.py | 138a3132081e49e51e938fc6a075feb8c63d734a | [] | no_license | mecomontes/Machine-Learning-projects | d6588cfaa7d020d3fae0fb74f6550c9e84500578 | 50e1828b58bb58eecfd3a142501b37fe701f4e49 | refs/heads/main | 2023-07-14T12:30:19.792332 | 2021-08-29T15:33:16 | 2021-08-29T15:33:16 | 376,129,791 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 4 5:40:12 2021
@author: Robinson Montes
"""
import pandas as pd
import datetime as dt
preprocess = __import__('preprocess_data').preprocessing
if __name__ == "__main__":
file_path = '../data/coinbaseUSD_1-min_data_2014-12-01_to_2019-01-09.csv'
train, validation, test = preprocess(file_path)
print('Train values: ')
print(train.head())
print('Validation values:')
print(validation.head())
print('Test values')
print(test.head())
| [
"1574@holbertonschool.com"
] | 1574@holbertonschool.com |
745e8be5553cabb487ef2d2a32d9fd05f5ba9c87 | 6e00e1ad30e19635c943b370a1aaf9b7eab4beb8 | /backend/chat_user_profile/migrations/0001_initial.py | f41e67e03a512f175dd8bb3a41235a0793f1b06d | [] | no_license | crowdbotics-apps/chat-28286 | c06bafd9ba1bd8b821fd6b76f7580cf88caae44d | 3aa33ad493ab36e00f136654aa96e6ddc4b57135 | refs/heads/master | 2023-06-22T01:56:22.263016 | 2021-06-29T06:53:41 | 2021-06-29T06:53:41 | 381,143,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,659 | py | # Generated by Django 2.2.20 on 2021-06-28 19:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(max_length=20)),
('pin', models.CharField(max_length=100)),
('photo', models.URLField()),
('status', models.CharField(max_length=50)),
('birthdate', models.DateField()),
('gender', models.CharField(max_length=1)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('last_login', models.DateTimeField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='VerificationCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=255)),
('is_verified', models.BooleanField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('timestamp_verified', models.DateTimeField()),
('sent_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='verificationcode_sent_to', to='chat_user_profile.Profile')),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_blocked', models.BooleanField()),
('is_favorite', models.BooleanField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('added_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contact_added_by', to=settings.AUTH_USER_MODEL)),
('added_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contact_added_profile', to='chat_user_profile.Profile')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
abb119c8b27fc4e320f4d2c5e27a2aabd4ad94d0 | 23ef5d0633595817341b73c706db4a4c46e12354 | /it/structures/python2/default_naming-default/upper_snake.py | 4db7ebbc4fd69b28715f397a5b3bde174990a84c | [
"MIT",
"Apache-2.0"
] | permissive | c0ding/reproto | 707eb25c8d28e6d052da6d428ca00bcd5617bb91 | 92f0a4b258095bc2f8a394d0bd44209e3a599c4f | refs/heads/master | 2022-11-18T08:13:23.789214 | 2020-07-18T10:28:12 | 2020-07-18T10:28:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | class Value:
def __init__(self, _foo_bar):
self._foo_bar = _foo_bar
@property
def foo_bar(self):
return self._foo_bar
@staticmethod
def decode(data):
f_foo_bar = data["FOO_BAR"]
if not isinstance(f_foo_bar, unicode):
raise Exception("not a string")
return Value(f_foo_bar)
def encode(self):
data = dict()
if self._foo_bar is None:
raise Exception("FOO_BAR: is a required field")
data["FOO_BAR"] = self._foo_bar
return data
def __repr__(self):
return "<Value foo_bar:{!r}>".format(self._foo_bar)
| [
"udoprog@tedro.se"
] | udoprog@tedro.se |
55d9f917d9b308fe78ca6904fd50850bdeeef740 | a206e5d97bf4da00722ee8841fe2e0c2884d1c92 | /feature.py | 36107eba78f7ffc5f82ecfc9665070df891c70c6 | [] | no_license | mikbuch/eeg_02_interface | 969bb925758a330dbd287ad71e787bd0faa5ddf8 | 7efd6649c2176fba50c3113570697dc7f95ef9a4 | refs/heads/master | 2016-09-06T14:29:22.842641 | 2015-04-23T23:21:10 | 2015-04-23T23:21:10 | 34,334,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | #!/usr/bin/env python
from statlib import stats
# zero crossing and negative sum
def zero_negative(package):
negative = 0
zero_crossed = 0
for sample in range(len(package)):
if package[sample] > 0:
if sample != len(package) - 1:
if package[sample + 1] < 0:
zero_crossed += 1
else:
negative += package[sample]
if sample != len(package) - 1:
if package[sample + 1] > 0:
zero_crossed += 1
return stats.stdev(package), negative, zero_crossed
| [
"mikolaj.buchwald@gmail.com"
] | mikolaj.buchwald@gmail.com |
06637c07f994b92112d2115b4c12d9bd35b01444 | 9703641c14b7c19f2fcf937150204ab85b4151a2 | /code pratice/设计指定长度随机密码.py | 6631266c946ef35f46c0ea5935dca28c3dfc7679 | [] | no_license | walkmiao/Little_Case | 8effbea554c930e0eb32d4335ecbd5541a9c1251 | ab445659e19c85ecfd9b99f8d615c33f900662f8 | refs/heads/master | 2021-06-11T05:30:39.415720 | 2019-05-14T10:37:29 | 2019-05-14T10:37:29 | 128,582,484 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : 设计指定长度随机密码.py
# @Author: lch
# @Date : 2018/10/10
# @Desc :
"""
题目2: 设计一个函数,生成指定长度的验证码(由数字和大小写英文字母构成的随机字符串)
"""
import random
def generate_code(length=6):
caplize = [ chr(i) for i in range(ord('a'), ord('z')+1)]
low = [ chr(i) for i in range(ord('A'), ord('Z')+1)]
num_list = [i for i in range(10)]
len_cap = random.randint(1, length-2)
len_low = random.randint(1, length-len_cap-1)
len_num = length - len_cap - len_low
return random.sample(caplize, len_cap) + random.sample(low, len_low) + random.sample(num_list, len_num)
print(generate_code())
| [
"372815340@qq.com"
] | 372815340@qq.com |
1b48fa35782b5a093d246a4325888a0a5482ada6 | 780fe51f58008bc901aef74baccacb22a796b308 | /thrift/perf/py3/py3_server.py | 8131991cc4b305b002cfb7b0bdf21e4e587a4f74 | [
"Apache-2.0"
] | permissive | joseroubert08/fbthrift | f2e9f3adb9cca4c5248070383310d4573136fbb5 | 8edc86b4c3b991039e110f378cfa1d8a19665b55 | refs/heads/master | 2021-01-16T19:00:49.105359 | 2017-01-15T04:42:45 | 2017-01-15T04:47:30 | 79,018,635 | 1 | 0 | null | 2017-01-15T09:15:45 | 2017-01-15T09:15:45 | null | UTF-8 | Python | false | false | 1,603 | py | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from argparse import ArgumentParser
import asyncio
import signal
import sys
from thrift.lib.py3.thrift_server import ThriftServer
from apache.thrift.test.py3.load_handler import LoadTestHandler
def main():
parser = ArgumentParser()
parser.add_argument(
'--port',
default=1234,
type=int,
help='Port to run on'
)
options = parser.parse_args()
loop = asyncio.get_event_loop()
handler = LoadTestHandler(loop)
server = ThriftServer(handler, options.port, loop=loop)
loop.add_signal_handler(signal.SIGINT, server.stop)
loop.add_signal_handler(signal.SIGTERM, server.stop)
print("Running Py3 server on port {}".format(options.port))
loop.run_until_complete(server.serve())
if __name__ == '__main__':
sys.exit(main())
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
a333ccba45a929a80bcfc1e647c92d5977fe109c | da0a7446122a44887fa2c4f391e9630ae033daa2 | /python/ray/air/tests/test_resource_changing.py | 2299c5dabb9a112d5349433b2a4a5c95bfbd96ac | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | whiledoing/ray | d8d9ba09b7545e8fd00cca5cfad451278e61fffd | 9272bcbbcae1630c5bb2db08a8279f0401ce6f92 | refs/heads/master | 2023-03-06T16:23:18.006757 | 2022-07-22T02:06:47 | 2022-07-22T02:06:47 | 252,420,044 | 0 | 0 | Apache-2.0 | 2023-03-04T08:57:20 | 2020-04-02T10:07:23 | Python | UTF-8 | Python | false | false | 4,697 | py | from ray.air import session
from ray.air.checkpoint import Checkpoint
from ray.air.config import FailureConfig, RunConfig, ScalingConfig
from ray.air.constants import TRAIN_DATASET_KEY
from ray.tune.tune_config import TuneConfig
from ray.tune.tuner import Tuner
from ray.train.data_parallel_trainer import DataParallelTrainer
from ray.train.xgboost import XGBoostTrainer
from sklearn.datasets import load_breast_cancer
import pandas as pd
import pytest
import ray
from ray import tune
from ray.tune.schedulers.resource_changing_scheduler import (
DistributeResources,
ResourceChangingScheduler,
)
from ray.tune.schedulers.async_hyperband import ASHAScheduler
@pytest.fixture
def ray_start_8_cpus():
address_info = ray.init(num_cpus=8)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
def train_fn(config):
start_epoch = 0
print(session.get_trial_resources())
checkpoint = session.get_checkpoint()
if checkpoint:
# assume that we have run the session.report() example
# and successfully save some model weights
checkpoint_dict = checkpoint.to_dict()
start_epoch = checkpoint_dict.get("epoch", -1) + 1
# wrap the model in DDP
for epoch in range(start_epoch, config["num_epochs"]):
checkpoint = Checkpoint.from_dict(dict(epoch=epoch))
session.report(
{
"metric": config["metric"] * epoch,
"epoch": epoch,
"num_cpus": session.get_trial_resources().required_resources["CPU"],
},
checkpoint=checkpoint,
)
class AssertingDataParallelTrainer(DataParallelTrainer):
def training_loop(self) -> None:
scaling_config = self._validate_scaling_config(self.scaling_config)
pgf = scaling_config.as_placement_group_factory()
tr = session.get_trial_resources()
assert pgf == tr, (pgf, tr)
return super().training_loop()
class AssertingXGBoostTrainer(XGBoostTrainer):
@property
def _ray_params(self):
scaling_config = self._validate_scaling_config(self.scaling_config)
assert (
scaling_config.as_placement_group_factory() == session.get_trial_resources()
)
return super()._ray_params
def test_data_parallel_trainer(ray_start_8_cpus):
num_workers = 2
trainer = AssertingDataParallelTrainer(
train_fn, scaling_config=ScalingConfig(num_workers=num_workers)
)
tuner = Tuner(
trainer,
param_space={
"train_loop_config": {
"num_epochs": 100,
"metric": tune.grid_search([1, 2, 3, 4, 5]),
}
},
tune_config=TuneConfig(
mode="max",
metric="metric",
scheduler=ResourceChangingScheduler(
ASHAScheduler(),
resources_allocation_function=DistributeResources(
add_bundles=True, reserve_resources={"CPU": 1}
),
),
),
run_config=RunConfig(failure_config=FailureConfig(fail_fast=True)),
)
result_grid = tuner.fit()
assert not any(x.error for x in result_grid)
# + 1 for Trainable
assert result_grid.get_dataframe()["num_cpus"].max() > num_workers + 1
def test_gbdt_trainer(ray_start_8_cpus):
data_raw = load_breast_cancer()
dataset_df = pd.DataFrame(data_raw["data"], columns=data_raw["feature_names"])
dataset_df["target"] = data_raw["target"]
train_ds = ray.data.from_pandas(dataset_df).repartition(16)
trainer = AssertingXGBoostTrainer(
datasets={TRAIN_DATASET_KEY: train_ds},
label_column="target",
scaling_config=ScalingConfig(num_workers=2),
params={
"objective": "binary:logistic",
"eval_metric": ["logloss"],
},
)
tuner = Tuner(
trainer,
param_space={
"num_boost_round": 100,
"params": {
"eta": tune.grid_search([0.28, 0.29, 0.3, 0.31, 0.32]),
},
},
tune_config=TuneConfig(
mode="min",
metric="train-logloss",
scheduler=ResourceChangingScheduler(
ASHAScheduler(),
resources_allocation_function=DistributeResources(
add_bundles=True, reserve_resources={"CPU": 1}
),
),
),
run_config=RunConfig(failure_config=FailureConfig(fail_fast=True)),
)
result_grid = tuner.fit()
assert not any(x.error for x in result_grid)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| [
"noreply@github.com"
] | whiledoing.noreply@github.com |
71dbebb820dbd7f4b8c24505f4b638a923e62c66 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2475/60758/282446.py | 47d5f768676492e85fb963899c2835b0bc9fed29 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | k=int(input())
for qqq in range(0,k):
n=int(input())
num=list(map(int,input().split()))
num.sort()
out=0
count=0
for i in range(n-1):
if(num[i]+1==num[i+1]):
count+=1
out=max(out,count)
else:
count=0
print(out) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
89a87ed681efbb6b832737666b35f803561484c1 | 33226b2cf373cb2d1ceac94fbc2a1558c1fd4f65 | /Simple_BBS/Regiest_models.py | e9072fef27185d2d6b34304fe880859e4fe39411 | [] | no_license | cnbjljf/simple_project | af54c6b494aa7f807e0bcfdd4fabfebf1a15cd76 | 95287682d7406be76c6dcd2974174fc2c1f4a372 | refs/heads/master | 2020-04-02T21:02:22.292978 | 2016-07-03T06:35:02 | 2016-07-03T06:35:02 | 62,478,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | #!/usr/bin/env python
'''
'''
import os
import sys
path = os.path.dirname( os.path.dirname( __file__ ) )
sys.path.append( path )
from django.contrib import admin
# register your models here
from django.apps import apps
from django.contrib.admin.sites import AlreadyRegistered
app_models = apps.get_app_config('Simple_BBS').get_models()
for model in app_models:
try:
admin.site.register(model)
except AlreadyRegistered:
pass
| [
"1403208717@qq.com"
] | 1403208717@qq.com |
4831effd489528b9694cd5a1b45cb52a10ed6b7b | 0f089307fe04a68569fe20bdb78be6ba43a589ea | /tests/helpers/test_restore_state.py | 3a4c058f8534fdffa7677a0ae4d11a4d24057fec | [
"Apache-2.0"
] | permissive | wanman/home-alone | 6983d4e25bbf6b046cc6eaf426816c2a2dca4eea | 633aaed22b0de0129d1e72e23bcd974b9ce13656 | refs/heads/master | 2020-05-21T08:19:00.077989 | 2017-03-10T22:14:31 | 2017-03-10T22:14:31 | 84,603,132 | 1 | 0 | null | 2017-03-10T22:21:20 | 2017-03-10T21:44:26 | Python | UTF-8 | Python | false | false | 3,475 | py | """The tests for the Restore component."""
import asyncio
from datetime import timedelta
from unittest.mock import patch, MagicMock
from homeassistant.bootstrap import setup_component
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.core import CoreState, split_entity_id, State
import homeassistant.util.dt as dt_util
from homeassistant.components import input_boolean, recorder
from homeassistant.helpers.restore_state import (
async_get_last_state, DATA_RESTORE_CACHE)
from tests.common import (
get_test_home_assistant, mock_coro, init_recorder_component)
@asyncio.coroutine
def test_caching_data(hass):
"""Test that we cache data."""
hass.config.components.add('recorder')
hass.state = CoreState.starting
states = [
State('input_boolean.b0', 'on'),
State('input_boolean.b1', 'on'),
State('input_boolean.b2', 'on'),
]
with patch('homeassistant.helpers.restore_state.last_recorder_run',
return_value=MagicMock(end=dt_util.utcnow())), \
patch('homeassistant.helpers.restore_state.get_states',
return_value=states), \
patch('homeassistant.helpers.restore_state.async_get_instance',
return_value=mock_coro()):
state = yield from async_get_last_state(hass, 'input_boolean.b1')
assert DATA_RESTORE_CACHE in hass.data
assert hass.data[DATA_RESTORE_CACHE] == {st.entity_id: st for st in states}
assert state is not None
assert state.entity_id == 'input_boolean.b1'
assert state.state == 'on'
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
yield from hass.async_block_till_done()
assert DATA_RESTORE_CACHE not in hass.data
def _add_data_in_last_run(entities):
"""Add test data in the last recorder_run."""
# pylint: disable=protected-access
t_now = dt_util.utcnow() - timedelta(minutes=10)
t_min_1 = t_now - timedelta(minutes=20)
t_min_2 = t_now - timedelta(minutes=30)
recorder_runs = recorder.get_model('RecorderRuns')
states = recorder.get_model('States')
with recorder.session_scope() as session:
run = recorder_runs(
start=t_min_2,
end=t_now,
created=t_min_2
)
recorder._INSTANCE._commit(session, run)
for entity_id, state in entities.items():
dbstate = states(
entity_id=entity_id,
domain=split_entity_id(entity_id)[0],
state=state,
attributes='{}',
last_changed=t_min_1,
last_updated=t_min_1,
created=t_min_1)
recorder._INSTANCE._commit(session, dbstate)
def test_filling_the_cache():
"""Test filling the cache from the DB."""
test_entity_id1 = 'input_boolean.b1'
test_entity_id2 = 'input_boolean.b2'
hass = get_test_home_assistant()
hass.state = CoreState.starting
init_recorder_component(hass)
_add_data_in_last_run({
test_entity_id1: 'on',
test_entity_id2: 'off',
})
hass.block_till_done()
setup_component(hass, input_boolean.DOMAIN, {
input_boolean.DOMAIN: {
'b1': None,
'b2': None,
}})
hass.start()
state = hass.states.get('input_boolean.b1')
assert state
assert state.state == 'on'
state = hass.states.get('input_boolean.b2')
assert state
assert state.state == 'off'
hass.stop()
| [
"paulus@paulusschoutsen.nl"
] | paulus@paulusschoutsen.nl |
98a6e00741c6fdbf71c95bdee694690cc1d91e5d | 8ebc3925894d4f796efb703cdf3254fc56724c3a | /aws-apigateway-py-routes/__main__.py | c243fc1ce23aa47798dadf5ad13b3cd563bd256e | [
"Apache-2.0"
] | permissive | pulumi/examples | 8db27b8847f8c05bcc8d99cdec8eb6c7b7ffa2a3 | 26ffb4bb327f00457796c96676e7db5e25e2bbd6 | refs/heads/master | 2023-09-04T04:56:53.098380 | 2023-08-31T14:33:12 | 2023-08-31T14:33:12 | 108,589,232 | 2,156 | 974 | Apache-2.0 | 2023-09-13T23:27:18 | 2017-10-27T19:50:31 | TypeScript | UTF-8 | Python | false | false | 5,573 | py | # Copyright 2016-2021, Pulumi Corporation.
import json
import pulumi
import pulumi_aws as aws
import pulumi_aws_apigateway as apigateway
import lambdas
from dns import configure_dns
# Create a Cognito User Pool of authorized users
user_pool = aws.cognito.UserPool("user-pool")
user_pool_client = aws.cognito.UserPoolClient(
"user-pool-client", user_pool_id=user_pool.id, explicit_auth_flows=["ADMIN_NO_SRP_AUTH"])
# Define an endpoint that invokes a lambda to handle requests
api = apigateway.RestAPI('api', routes=[
# Serve an entire directory of static content
apigateway.RouteArgs(path="static", local_path="www"),
# Invoke our Lambda to handle a single route
apigateway.RouteArgs(path="lambda", method="GET",
event_handler=lambdas.hello_handler),
# Proxy requests to another service
apigateway.RouteArgs(path="proxy", target=apigateway.TargetArgs(
uri="https://www.google.com", type="http_proxy")),
# Use Swagger to define an HTTP proxy route
apigateway.RouteArgs(path="swagger", method="GET", data={
"x-amazon-apigateway-integration": {
"httpMethod": "GET",
"passthroughBehavior": "when_no_match",
"type": "http_proxy",
"uri": "https://httpbin.org/uuid",
},
}),
# Authorize requests using Cognito
apigateway.RouteArgs(
path="cognito-authorized",
method="GET",
event_handler=lambdas.hello_handler,
# Define an authorizer which uses Cognito to validate the token from the Authorization header
authorizers=[apigateway.AuthorizerArgs(
parameter_name="Authorization",
identity_source=["method.request.header.Authorization"],
provider_arns=[user_pool.arn]
)]
),
# Authorize requests using a Lambda function
apigateway.RouteArgs(path="lambda-authorized", method="GET", event_handler=lambdas.hello_handler,
authorizers=[apigateway.AuthorizerArgs(
auth_type="custom",
parameter_name="Authorization",
type="request",
identity_source=[
"method.request.header.Authorization"],
handler=lambdas.auth_lambda
)]),
apigateway.RouteArgs(path="key-authorized", method="GET",
event_handler=lambdas.hello_handler,
api_key_required=True)
])
# Define whole API using swagger (OpenAPI)
swagger_api = apigateway.RestAPI("swagger-api",
swagger_string=json.dumps({
"swagger": "2.0",
"info": {
"title": "example",
"version": "1.0",
},
"paths": {
"/": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "GET",
"passthroughBehavior": "when_no_match",
"type": "http_proxy",
"uri": "https://httpbin.org/uuid",
},
},
},
},
"x-amazon-apigateway-binary-media-types": ["*/*"],
})
)
# Create an API key to manage usage
api_key = aws.apigateway.ApiKey("api-key")
# Define usage plan for an API stage
usage_plan = aws.apigateway.UsagePlan("usage-plan",
api_stages=[aws.apigateway.UsagePlanApiStageArgs(
api_id=api.api.id,
stage=api.stage.stage_name)])
# Associate the key to the plan
aws.apigateway.UsagePlanKey('usage-plan-key',
key_id=api_key.id,
key_type="API_KEY",
usage_plan_id=usage_plan.id)
# Set up DNS if a domain name has been configured
config = pulumi.Config()
domain = config.get("domain")
if domain != None:
# Load DNS zone for the domain
zone = aws.route53.get_zone_output(name=config.require("dns-zone"))
# Create SSL Certificate and DNS entries
api_domain_name = configure_dns(domain=domain, zone_id=zone.id)
# Tell API Gateway what to serve on our custom domain
base_path_mapping = aws.apigateway.BasePathMapping("api-domain-mapping",
rest_api=api.api.id,
stage_name=api.stage.stage_name,
domain_name=api_domain_name.domain_name)
pulumi.export(
"custom-url", base_path_mapping.domain_name.apply(lambda domain: f'https://{domain}/'))
pulumi.export("url", api.url)
pulumi.export("user-pool-id", user_pool.id)
pulumi.export("user-pool-client-id", user_pool_client.id)
pulumi.export("swagger-url", swagger_api.url)
pulumi.export("api-key-value", api_key.value)
| [
"noreply@github.com"
] | pulumi.noreply@github.com |
99fa9801cdae9f52933c03149abc616dfe5cdadb | d418edb92b92d35a32a198d8675defb21448f513 | /Assignment/4.py | 4e6f781c3c332785be89906032891ee2fef22146 | [] | no_license | JAntonioMarin/CursoPython3 | a75ce49696e23903398fc186c81b5fb7c2116c21 | ba04888eb6e5495b5180cbc5ed7a5c804ee8dbaf | refs/heads/master | 2020-12-27T04:18:21.010039 | 2020-02-17T12:09:37 | 2020-02-17T12:09:37 | 237,762,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | def num_max(enteros):
valorInicial = enteros[0]
valorMaximo = valorInicial
for valor in range (1, len(enteros)):
if(valorMaximo<enteros[valor]):
valorMaximo = enteros[valor]
elif(valorMaximo==enteros[valor]):
valorInicial = valorMaximo
if(valorInicial==valorMaximo):
print("Existen 2 o mas máximos")
else:
print("El valor maximo es", valorMaximo)
enteros = input("Por favor introduce enteros separados por espacio:")
cadenas = enteros.split(' ')
cadenaEnteros = []
for cadena in cadenas:
cadenaEnteros.append(int(cadena))
if(len(cadenaEnteros)>1):
num_max(cadenaEnteros)
else:
print("El valor maximo es", cadenaEnteros[0])
| [
"avalanch.psp@gmail.com"
] | avalanch.psp@gmail.com |
769fdc01c08347489e610c53259a047ffcf4ba3e | 6d9fbe6e6a2abfd8455e92f6dba67a5f02d87f41 | /lib/phonenumbers/data/region_HU.py | 92c290b8f89c02dee52458951bc4d2c5236316bb | [] | no_license | JamesBrace/InfluenceUWebLaunch | 549d0b48ff3259b139cb891a19cb8b5382ffe2c8 | 332d25940e4b1b45a7a2a8200f77c8413543b199 | refs/heads/master | 2021-09-04T04:08:47.594900 | 2018-01-15T16:49:29 | 2018-01-15T16:49:29 | 80,778,825 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,133 | py | """Auto-generated file, do not edit by hand. HU metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_HU = PhoneMetadata(id='HU', country_code=36, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[1-9]\\d{7,8}', possible_number_pattern='\\d{6,9}', possible_length=(8, 9), possible_length_local_only=(6,)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:1\\d|2[2-9]|3[2-7]|4[24-9]|5[2-79]|6[23689]|7[2-9]|8[2-57-9]|9[2-69])\\d{6}', possible_number_pattern='\\d{6,8}', example_number='12345678', possible_length=(8,), possible_length_local_only=(6,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:[257]0|3[01])\\d{7}', possible_number_pattern='\\d{9}', example_number='201234567', possible_length=(9,)),
toll_free=PhoneNumberDesc(national_number_pattern='[48]0\\d{6}', possible_number_pattern='\\d{8}', example_number='80123456', possible_length=(8,)),
premium_rate=PhoneNumberDesc(national_number_pattern='9[01]\\d{6}', possible_number_pattern='\\d{8}', example_number='90123456', possible_length=(8,)),
shared_cost=PhoneNumberDesc(),
personal_number=PhoneNumberDesc(),
voip=PhoneNumberDesc(national_number_pattern='21\\d{7}', possible_number_pattern='\\d{9}', example_number='211234567', possible_length=(9,)),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(national_number_pattern='38\\d{7}', possible_number_pattern='\\d{6,9}', example_number='381234567', possible_length=(9,)),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc(national_number_pattern='[48]0\\d{6}', possible_number_pattern='\\d{8}', example_number='80123456', possible_length=(8,)),
national_prefix='06',
national_prefix_for_parsing='06',
number_format=[NumberFormat(pattern='(1)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['1'], national_prefix_formatting_rule='(\\1)'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['[2-9]'], national_prefix_formatting_rule='(\\1)')],
mobile_number_portable_region=True)
| [
"james.brace@mail.mcgill.ca"
] | james.brace@mail.mcgill.ca |
00fbff804c306f16c0d80cab257452d1511c1d7a | f8f6dadfb4215776ee40f022265d6c87ad2cc65b | /examples/dev/coord_transform.py | b0f390b9fd0a2c8d78a51c7f10bfcf4361189195 | [] | no_license | oldbay/raster_tools | 559112431146ccf3e0ed29a4a8ee3e3ac8adf025 | 171ed1313a0805cc2d7e8f8049914848ebee8331 | refs/heads/master | 2021-01-10T20:38:49.075348 | 2019-04-08T18:49:17 | 2019-04-08T18:49:17 | 31,228,764 | 11 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | from rtools import geom_conv
coords = [
(296153.369,7137678.937),
(296203.959,7137570.986),
(296256.938,7137645.476)
]
conv = geom_conv(32638)
print conv.coords_reproj(4326, *coords) | [
"old_bay@mail.ru"
] | old_bay@mail.ru |
807a837d4cf69d4aa7173c4051e4c3c14d413ad2 | a63d907ad63ba6705420a6fb2788196d1bd3763c | /src/datamgr/metadata/metadata/backend/mysql/replica_base.py | 3ab330a878a927334f7842a1f2dc037e3d41de13 | [
"MIT"
] | permissive | Tencent/bk-base | a38461072811667dc2880a13a5232004fe771a4b | 6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2 | refs/heads/master | 2022-07-30T04:24:53.370661 | 2022-04-02T10:30:55 | 2022-04-02T10:30:55 | 381,257,882 | 101 | 51 | NOASSERTION | 2022-04-02T10:30:56 | 2021-06-29T06:10:01 | Python | UTF-8 | Python | false | false | 3,555 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# 用于生成映射表的ReplicaBase。会自动合入MixIn信息。
from collections import Sequence
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
from metadata.db_models.meta_service.replica_conf import replica_mixins
class ReplicaMixIn(object):
"""
映射表补充字段MixIn父类。
"""
pass
replica_mixin_classes_info = {}
for module in replica_mixins:
for attr in dir(module):
item = getattr(module, attr)
if isinstance(item, type) and issubclass(item, ReplicaMixIn) and item is not ReplicaMixIn:
replica_mixin_classes_info[attr.split('MixIn')[0]] = item
class ReplicaMeta(DeclarativeMeta):
"""自动生成映射表Model的元类。"""
def __new__(mcs, name, bases, namespace):
# 自动添加mixin
if name == 'Base':
return super(ReplicaMeta, mcs).__new__(mcs, name, bases, namespace)
else:
namespace[str('__abstract__')] = True
table_args = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8', 'mysql_collate': 'utf8_general_ci'}
if namespace.get('__table_args__'):
if isinstance(namespace['__table_args__'], Sequence):
table_args_lst = list(namespace['__table_args__'])
if isinstance(table_args_lst[-1], dict):
table_args_lst[-1].update(table_args)
else:
table_args_lst.append(table_args)
else:
namespace['__table_args__'].update(table_args)
namespace['__table_args__'] = table_args
cls = super(ReplicaMeta, mcs).__new__(mcs, name, tuple(bases), namespace)
mix_bases = [cls]
if name in replica_mixin_classes_info:
mix_bases.insert(0, replica_mixin_classes_info[name])
mixed_cls = super(ReplicaMeta, mcs).__new__(mcs, str('Replica') + name, tuple(mix_bases), {})
return mixed_cls
ReplicaBase = declarative_base(metaclass=ReplicaMeta)
metadata = ReplicaBase.metadata
ReplicaBase.db_name = ReplicaBase._db_name = 'bkdata_meta'
| [
"terrencehan@tencent.com"
] | terrencehan@tencent.com |
bf79e6abe3010bd150292be1c8375b3bc68486c1 | 56939ddccf722903ef6cc7ebc6aa85ee6b80b6dc | /orders/migrations/0003_alter_ordermodel_user.py | ffe024e4fa843e2440cd5c3d6357c752c5ace8d0 | [] | no_license | Colibri7/felix_shop | d519b038446a49fc33cb1ce1c9108291e949c848 | d9d635ad7797505503b56df2b36b4333bfa86b31 | refs/heads/master | 2023-07-21T03:30:36.714548 | 2021-08-30T11:03:53 | 2021-08-30T11:03:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | # Generated by Django 3.2.4 on 2021-07-22 09:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('orders', '0002_auto_20210717_2208'),
]
operations = [
migrations.AlterField(
model_name='ordermodel',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
]
| [
"sarikkenjaev2002@gmail.com"
] | sarikkenjaev2002@gmail.com |
db2dbeaf40c8d5ecc580df89a0eff4a1cd09dca8 | a3ddd9a1cae3271d285daf272d733b67758d5cc7 | /award/models.py | e54c876cf57587d41ec24e8352cfc90a1e61dccd | [] | no_license | peroh/memba-api | 21edf7757b1dfcd2a42b6f52ed3bc25d0780106a | e6e5cc5bbd2afdcba5c2b7e9ee45ff717e4b75f1 | refs/heads/master | 2021-09-08T05:09:56.948766 | 2018-03-07T11:00:34 | 2018-03-07T11:00:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class AwardCategory(models.Model):
title = models.CharField(max_length=128)
description = models.TextField()
def __str__(self):
return self.title
class Award(models.Model):
award_category = models.ForeignKey('award.AwardCategory')
member = models.ForeignKey('member.Member')
attained = models.DateField()
def __str__(self):
return self.award_category.__str__()
| [
"mperrott6@gmail.com"
] | mperrott6@gmail.com |
62669ff92a52f59f2282192d2d60739662712041 | 98b76260f5c31563aa40e76c412be514c1844fc2 | /fHDHR_web/files/__init__.py | 905566b59642e2809bef5b2c68fc79c8b3236bfc | [
"WTFPL"
] | permissive | DanAustinGH/fHDHR_Locast | cb54b235200a6123213853a133d6231df3b3a1ea | 002117b666ad650c523aedb0209f1c996d576169 | refs/heads/main | 2023-02-15T20:27:21.141592 | 2021-01-05T19:09:35 | 2021-01-05T19:09:35 | 327,135,296 | 0 | 0 | WTFPL | 2021-01-05T22:28:13 | 2021-01-05T22:28:13 | null | UTF-8 | Python | false | false | 314 | py |
from .favicon_ico import Favicon_ICO
from .style_css import Style_CSS
from .device_xml import Device_XML
class fHDHR_Files():
def __init__(self, fhdhr):
self.fhdhr = fhdhr
self.favicon = Favicon_ICO(fhdhr)
self.style = Style_CSS(fhdhr)
self.device_xml = Device_XML(fhdhr)
| [
"github@deathbybandaid.net"
] | github@deathbybandaid.net |
0b8eb9d2a021eff067d7027dde5cd87766102069 | a93fe443762b5183c10bfa2ea71dc1b000708f34 | /spacecode/ibanpy/models.py | 304714c7bd02f6d91ac422a7999168109ae8e4e2 | [] | no_license | zoleikha-mousavipak/Django_Advance-Models | 06d89fe63d1210224c478cbd3e2c0a19987ade0b | c238a69134232b7d54c69233c082787913c7eabd | refs/heads/master | 2020-11-25T00:04:22.553911 | 2019-12-16T14:29:31 | 2019-12-16T14:29:31 | 228,400,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | from django.db import models
class MyIBANField(models.Field):
def db_type(self, connection):
return 'char(25)'
def from_db_value(self, value, expression, connection):
return value
def to_python(self, value):
return value
def value_to_string(self, obj):
value = self.value_from_object(obj)
return self.get_prep_value(value)
class MyIBANModel(models.Model):
_iban = MyIBANField()
@property
def iban(self):
return self._iban
@iban.setter
def iban(self, value):
self._iban = value
def __str__(self):
return self.iban
| [
"zmpak2000@gmail.com"
] | zmpak2000@gmail.com |
1e3302bdc5b0746f1f6287f3c77657b08e6f1931 | 11c8dbe77dce5616e8e7ff5df647192887c279d3 | /Sodda dasturlar/Sutkani boshidan secungacha necha minut va soat borligini aniqlash.py | bb0df7b1b966e4c76ca39f992fc06c7ad76bea7e | [] | no_license | extremums/the-simplest-programs | 1fe9be078dc53260c47e7c1e3b2a2300b287e862 | 71d9be72fac1013c7b8ee8c0f792d24fc54e854a | refs/heads/main | 2023-04-27T07:20:13.836780 | 2021-05-11T11:03:47 | 2021-05-11T11:03:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | # Sutkani boshidan secungacha necha minut va soat borligini aniqlash
sekund = input("Sekundni kiriting ")
if sekund.isdigit():
sekund = int(sekund)
soat = sekund//3600
sekund %= 3600
minut = sekund//60
sekund %= 60
print("Sutkani boshidan {} soat {} minut va {} secund o'tdi".format(soat,minut,sekund))
else :
print('Natural son kiritish lozim')
| [
"84010165+Nurullox@users.noreply.github.com"
] | 84010165+Nurullox@users.noreply.github.com |
8c96ef0652543325ee68e9afa0303c98f18ad8fb | e5c56d6340edd36167f53605e8d766972fe706ca | /sen/tui/widgets/graph.py | c019a34c1cb52dc740435f933d58b52881a6cbc6 | [
"MIT"
] | permissive | TomasTomecek/sen | 47090e7edcbf7f9358cb256d5d9d6885ee662840 | ec292b5a723cd59818e3a36a7ea5091625fb3258 | refs/heads/master | 2023-06-21T14:19:45.138546 | 2023-04-12T07:21:42 | 2023-04-12T07:21:42 | 43,115,475 | 1,011 | 78 | MIT | 2023-04-12T07:21:43 | 2015-09-25T07:13:43 | Python | UTF-8 | Python | false | false | 1,686 | py | import math
import logging
import urwid
logger = logging.getLogger(__name__)
def find_max(list_of_lists):
list_of_ints = [x[0] for x in list_of_lists]
m = max(list_of_ints)
try:
return 2 ** int(math.log2(m) + 1)
except ValueError:
return 1
class ContainerInfoGraph(urwid.BarGraph):
def __init__(self, fg, bg, graph_bg="graph_bg", bar_width=None):
"""
create a very simple graph
:param fg: attr for smoothing (fg needs to be set)
:param bg: attr for bars (bg needs to be set)
:param graph_bg: attr for graph background
:param bar_width: int, width of bars
"""
# satt smoothes graph lines
satt = {(1, 0): fg}
super().__init__(
[graph_bg, bg],
hatt=[fg],
satt=satt,
)
if bar_width is not None:
# breaks badly when set too high
self.set_bar_width(bar_width)
def render(self, size, focus=False):
data, top, hlines = self._get_data(size)
maxcol, maxrow = size
if len(data) < maxcol:
data += [[0] for x in range(maxcol - len(data))]
self.set_data(data, top, hlines)
logger.debug(data)
return super().render(size, focus)
def rotate_value(self, val, max_val=None, adaptive_max=False):
"""
"""
data, _, _ = self.data
data = data[1:] + [[int(val)]]
if adaptive_max:
max_val = find_max(data)
self.set_data(data, max_val)
return max_val
def set_max(self, value):
data, top, hlines = self.data
self.set_data(data, value, hlines)
| [
"ttomecek@redhat.com"
] | ttomecek@redhat.com |
9724ad7aa8b657a2fb8995698be8b31bb74da85b | 4bfc3c184e736bb68dccbb6d5657f11c950df002 | /tests/common/test_run/cos_run.py | bff15552b892ebb77b63bcedbb20fc31b913efe3 | [
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | laekov/akg | 159aa64ef6135222b5af784c408731275dfa9bdb | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | refs/heads/master | 2022-12-01T04:09:03.548063 | 2020-08-19T08:38:57 | 2020-08-19T08:41:28 | 288,678,192 | 0 | 0 | Apache-2.0 | 2020-08-19T08:41:30 | 2020-08-19T08:36:53 | Python | UTF-8 | Python | false | false | 1,779 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""cos run function."""
import numpy as np
from tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from test_op import cos
from base import get_rtol_atol
from gen_random import random_gaussian
def cos_run(shape, dtype, attrs):
# Generate data for testing the op
inputs = random_gaussian(shape, miu=0, sigma=0.1).astype(dtype)
expect = np.cos(inputs)
# inputs and output to hold the data
output = np.full(shape, np.nan, dtype)
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(cos.cos, [shape], [dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
return mod, expect, (inputs, output)
else:
return mod
else:
mod = utils.op_build_test(cos.cos, [shape], [dtype], kernel_name='cos', attrs=attrs)
# result_tvm
output = utils.mod_launch(mod, (inputs, output))
# compare result
rtol, atol = get_rtol_atol("cos", dtype)
TestCase_Result = compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=False)
return inputs, output, expect, TestCase_Result | [
"ckey.chengbin@huawei.com"
] | ckey.chengbin@huawei.com |
1d4f134c14d925405ea4bbb418b726779acaf34c | 20e4b646c2b17483d5bd298b2a0ae0361534bd75 | /Book_python3_web_spider/6_Ajax结果提取/test.py | ea0c67459127673c8c005fbf760199bfda6a34b5 | [] | no_license | zzf531/WebSpider | fd68080fe9847d0a781024916a09959c58ef0ce3 | 6cd150aeeacd1bc6ec42c80579b2a4f25a39acce | refs/heads/master | 2020-08-09T23:16:05.585782 | 2020-03-01T03:46:58 | 2020-03-01T03:46:58 | 214,198,000 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | """
https://www.toutiao.com/api/search/content/?
aid=24&app_name=web_search
&offset=20&format=json
&keyword=%E8%A1%97%E6%8B%8D
&autoload=true
&count=20
&en_qc=1
&cur_tab=1
&from=search_tab
&pd=synthesis
×tamp=1582979073578"""
params = {
'aid': '24',
'app_name': 'web_search',
'offset': offset,
'format': 'json',
'keyword': '街拍',
'autoload': 'true',
'count': '20',
'en_qc': '1',
'cur_tab': '1',
'from': 'search_tab',
'pd': 'synthesis',
} | [
"2315519934@qq.com"
] | 2315519934@qq.com |
adb73ef3cb0ce41f0e0a48c8f32809f66c3f59e6 | 77f6f49fbb71a51c77b169d2e902507e68e33615 | /1373_3.py | dba9b02d1759cf11ed2e6455eb67e65386094acd | [] | no_license | goodsosbva/BOJ_theothers | 1d39657b34f11cc940f0013011d83f5b3dfdc474 | cf3da17e488b50d3d7cff65d403ba9dc58b8170e | refs/heads/main | 2023-07-17T22:06:42.964302 | 2023-07-11T09:31:53 | 2023-07-11T09:31:53 | 334,390,930 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | print(oct(int(input(), 2))[2:]) # (int(input(), 2) input받은 값은 이진수 값이고, 이값을 십진수로 바꾸겠다는 뜻
# 십진수 값을 8진수로 바꾸고 출력 2번째부터 출력([2:])
| [
"noreply@github.com"
] | goodsosbva.noreply@github.com |
d126ba6aec22a916b3e5a0f5725922602063046c | cefa560ae84e4bcb7a8f2828357ffd3ba8a88b49 | /setup.py | 9549c17fef0d9acac16f246cb4c2586a92b74d8d | [] | no_license | paultag/python-jackpot | eb294240925e598873598c96edb65cb191863e67 | f4a832153a2eb71d2fed4e929b944093181a4d19 | refs/heads/master | 2021-04-03T09:07:54.113177 | 2018-03-18T15:59:06 | 2018-03-18T15:59:20 | 124,565,769 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | #!/usr/bin/env python
from setuptools import setup, find_packages
from jackpot import __version__
long_description = ''
setup(name='jackpot',
version=__version__,
packages=find_packages(),
description='',
long_description=long_description,
platforms=['any'],
install_requires=[
'jsonschema==2.6.0',
],
extras_require={},
)
| [
"tag@pault.ag"
] | tag@pault.ag |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.