blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
77e3a994f5aac16a79d46f57d851b6e0b920b3ba | eaa68c471c333336a7facad1ecb42f97aeca74f5 | /backend/msm_gtfrd051101_dev_14630/urls.py | 3c48dcc5318541339f45d7dca954cd710ed6dfa9 | [] | no_license | crowdbotics-apps/msm-gtfrd051101-dev-14630 | d1ba14f914db0ba5eb55a27f2828fa172c0c2a3a | 9e1ef45aec6d690b8279aac71242e664cd4055d9 | refs/heads/master | 2023-01-04T19:29:24.277234 | 2020-11-05T05:49:43 | 2020-11-05T05:49:43 | 310,202,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,965 | py | """msm_gtfrd051101_dev_14630 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "MSM-gtfrd051101"
admin.site.site_title = "MSM-gtfrd051101 Admin Portal"
admin.site.index_title = "MSM-gtfrd051101 Admin"
# swagger
api_info = openapi.Info(
title="MSM-gtfrd051101 API",
default_version="v1",
description="API documentation for MSM-gtfrd051101 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
92fad4e71acbdd48c5803a52796be580d18dab58 | 407e2e0448c92cb258c4b8c57f7b023efcbbd878 | /MachineLearning/DeepLearningNN_KerasTensorFlow/env4keras2/lib/python3.9/site-packages/qtpy/QtWebEngineWidgets.py | 69f40d9e067a6ce159fdb5a7f338eccaec47aa68 | [] | no_license | KPAdhikari/PythonStuff | f017aa8aa1ad26673263e7dc31761c46039df8c4 | bea3a58792270650b5df4da7367686e2a9a76dbf | refs/heads/master | 2022-10-19T05:04:40.454436 | 2022-02-28T06:02:11 | 2022-02-28T06:02:11 | 98,236,478 | 1 | 1 | null | 2022-09-30T18:59:18 | 2017-07-24T21:28:31 | Python | UTF-8 | Python | false | false | 1,846 | py | #
# Copyright © 2014-2015 Colin Duquesnoy
# Copyright © 2009- The Spyder development Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""
Provides QtWebEngineWidgets classes and functions.
"""
from . import PYQT5, PYQT6, PYSIDE2, PYSIDE6, PythonQtError
# To test if we are using WebEngine or WebKit
WEBENGINE = True
if PYQT5:
try:
from PyQt5.QtWebEngineWidgets import QWebEnginePage
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtWebEngineWidgets import QWebEngineSettings
# Based on the work at https://github.com/spyder-ide/qtpy/pull/203
from PyQt5.QtWebEngineWidgets import QWebEngineProfile
except ImportError:
from PyQt5.QtWebKitWidgets import QWebPage as QWebEnginePage
from PyQt5.QtWebKitWidgets import QWebView as QWebEngineView
from PyQt5.QtWebKit import QWebSettings as QWebEngineSettings
WEBENGINE = False
elif PYQT6:
from PyQt6.QtWebEngineWidgets import *
from PyQt6.QtWebEngineCore import QWebEnginePage
from PyQt6.QtWebEngineCore import QWebEngineSettings
from PyQt6.QtWebEngineCore import QWebEngineProfile
elif PYSIDE6:
from PySide6.QtWebEngineWidgets import *
from PySide6.QtWebEngineCore import QWebEnginePage
from PySide6.QtWebEngineCore import QWebEngineSettings
from PySide6.QtWebEngineCore import QWebEngineProfile
elif PYSIDE2:
from PySide2.QtWebEngineWidgets import QWebEnginePage
from PySide2.QtWebEngineWidgets import QWebEngineView
from PySide2.QtWebEngineWidgets import QWebEngineSettings
# Based on the work at https://github.com/spyder-ide/qtpy/pull/203
from PySide2.QtWebEngineWidgets import QWebEngineProfile
else:
raise PythonQtError('No Qt bindings could be found')
| [
"kpadhikari@MyMacs-MacBook-Air.local"
] | kpadhikari@MyMacs-MacBook-Air.local |
0e0887a54e76dea68a4c769949c2d9f6ddbe309c | 8b3ca44ee3d990233e74655b7131d616094f70c2 | /experiments/runtime/drug_sensitivity_gdsc/gaussian_laplace_inversegaussian.py | f9bb4292ab74f6fba25423f0efab7ffe599636c1 | [] | no_license | zshwuhan/BMF_Priors | 8b8c54271285a72d2085a56a9475c0756f375e67 | 6a600da1c41f1ccde2f2ba99298b40e68fb9910a | refs/heads/master | 2021-05-13T19:10:07.203215 | 2017-12-01T13:30:21 | 2017-12-01T13:30:21 | 116,883,181 | 1 | 0 | null | 2018-01-09T23:36:13 | 2018-01-09T23:36:13 | null | UTF-8 | Python | false | false | 905 | py | '''
Measure runtime on the GDSC drug sensitivity dataset, with the Gaussian + Laplace + IG model.
'''
project_location = "/Users/thomasbrouwer/Documents/Projects/libraries/"
import sys
sys.path.append(project_location)
from BMF_Priors.code.models.bmf_gaussian_laplace_inversegaussian import BMF_Gaussian_Laplace_IG
from BMF_Priors.data.drug_sensitivity.load_data import load_gdsc_ic50_integer
from BMF_Priors.experiments.runtime.runtime_experiment import measure_runtime
''' Run the experiment. '''
R, M = load_gdsc_ic50_integer()
model_class = BMF_Gaussian_Laplace_IG
values_K = [5, 10, 20, 50]
settings = {
'R': R,
'M': M,
'hyperparameters': { 'alpha':1., 'beta':1. },
'init': 'random',
'iterations': 100,
}
fout = './results/times_gaussian_laplace_ig.txt'
times_per_iteration = measure_runtime(values_K, model_class, settings, fout)
print zip(values_K, times_per_iteration) | [
"tab43@cam.ac.uk"
] | tab43@cam.ac.uk |
691326cf620d757d749fd0cea91b4c52d295a97c | 78d17c3a7332be85078b513eee02f7ae4f18b3db | /lintcode/best_time_to_buy_and_sell_stockIII.py | 98aa9d43dbefced47f890d948e90f8c6c0446edd | [] | no_license | yuhanlyu/coding-challenge | c28f6e26acedf41cef85519aea93e554b43c7e8e | 9ff860c38751f5f80dfb177aa0d1f250692c0500 | refs/heads/master | 2021-01-22T21:59:27.278815 | 2017-11-26T07:34:04 | 2017-11-26T07:34:04 | 85,498,747 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | class Solution:
"""
@param prices: Given an integer array
@return: Maximum profit
"""
def maxProfit(self, prices):
buy1, buy2, sell1, sell2 = -2 ** 32, -2 ** 32, 0, 0
for price in prices:
buy2, sell2 = max(buy2, sell1 - price), max(sell2, buy2 + price)
buy1, sell1 = max(buy1, - price), max(sell1, buy1 + price)
return sell2
| [
"yuhanlyu@gmail.com"
] | yuhanlyu@gmail.com |
f8ac9401d36296f4a1f0612d0736446bc3cdb4ca | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2849/60673/267341.py | eaf3ff20eb6312f521c762fbe44ab3b471ce01c9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | inp = int(input())
nums = input().split(" ")
res = -1
for i in range(inp):
nums[i]=int(nums[i])
allposs = []
for i in range(1,min(nums)):
res = i
for j in range(inp):
if(j%i!=0):
res = -1
break
if(res!=-1):
allposs.append(res)
print (max(allposs)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
7643cfd178b387b8b06cff656e50bb6d918ebaaa | 471c3ad9912423763295c353c3dcbb7e2e74b818 | /seqmod/modules/encoder.py | d9a24cc8d3e8309c5359bdf3fba1972041ed9d17 | [] | no_license | mikekestemont/seqmod | a3bfcbf4c7418005e71cb5381c30a837cff88aec | 7d8c976a03836fcf347395c192987dba531c2144 | refs/heads/master | 2021-01-21T22:05:18.518306 | 2017-06-21T10:26:39 | 2017-06-21T10:26:39 | 95,155,787 | 0 | 0 | null | 2017-06-22T20:55:51 | 2017-06-22T20:55:51 | null | UTF-8 | Python | false | false | 3,168 | py |
import torch
import torch.nn as nn
from torch.autograd import Variable
import seqmod.utils as u
class Encoder(nn.Module):
"""
RNN Encoder that computes a sentence matrix representation
of the input using an RNN.
"""
def __init__(self, in_dim, hid_dim, num_layers, cell,
dropout=0.0, bidi=True):
self.cell = cell
self.num_layers = num_layers
self.num_dirs = 2 if bidi else 1
self.bidi = bidi
self.hid_dim = hid_dim // self.num_dirs
assert hid_dim % self.num_dirs == 0, \
"Hidden dimension must be even for BiRNNs"
super(Encoder, self).__init__()
self.rnn = getattr(nn, cell)(in_dim, self.hid_dim,
num_layers=self.num_layers,
dropout=dropout,
bidirectional=self.bidi)
def init_hidden_for(self, inp):
batch = inp.size(1)
size = (self.num_dirs * self.num_layers, batch, self.hid_dim)
h_0 = Variable(inp.data.new(*size).zero_(), requires_grad=False)
if self.cell.startswith('LSTM'):
c_0 = Variable(inp.data.new(*size).zero_(), requires_grad=False)
return h_0, c_0
else:
return h_0
def forward(self, inp, hidden=None, compute_mask=False, mask_symbol=None):
"""
Paremeters:
-----------
inp: torch.Tensor (seq_len x batch x emb_dim)
hidden: tuple (h_0, c_0)
h_0: ((num_layers * num_dirs) x batch x hid_dim)
n_0: ((num_layers * num_dirs) x batch x hid_dim)
Returns: output, (h_t, c_t)
--------
output: (seq_len x batch x hidden_size * num_directions)
h_t: (num_layers x batch x hidden_size * num_directions)
c_t: (num_layers x batch x hidden_size * num_directions)
"""
if compute_mask: # fixme, somehow not working
seqlen, batch, _ = inp.size()
outs, hidden = [], hidden or self.init_hidden_for(inp)
for inp_t in inp.chunk(seqlen):
out_t, hidden = self.rnn(inp_t, hidden)
mask_t = inp_t.data.squeeze(0).eq(mask_symbol).nonzero()
if mask_t.nelement() > 0:
mask_t = mask_t.squeeze(1)
if self.cell.startswith('LSTM'):
hidden[0].data.index_fill_(1, mask_t, 0)
hidden[1].data.index_fill_(1, mask_t, 0)
else:
hidden.data.index_fill_(1, mask_t, 0)
outs.append(out_t)
outs = torch.cat(outs)
else:
outs, hidden = self.rnn(inp, hidden or self.init_hidden_for(inp))
if self.bidi:
# BiRNN encoder outputs (num_layers * 2 x batch x hid_dim)
# but decoder expects (num_layers x batch x hid_dim * 2)
if self.cell.startswith('LSTM'):
hidden = (u.repackage_bidi(hidden[0]),
u.repackage_bidi(hidden[1]))
else:
hidden = u.repackage_bidi(hidden)
return outs, hidden
| [
"enrique.manjavacas@gmail.com"
] | enrique.manjavacas@gmail.com |
c72dcbc745508edb37b6bfd454e99223aa888a0d | 94a511b7b31858d383be63ee5a3c1d3272bb6bf3 | /week_2/2_12.py | c2c728ccca1175842f85ad3c14238b72a14c012f | [] | no_license | kolevatov/python_lessons | 90bb3c2139e23cfc0f25f993c4ee636737e7daa8 | ff9290d87a5bdc20ddfb7109015ddb48429a4dd8 | refs/heads/master | 2021-01-13T01:18:15.997795 | 2018-03-06T15:03:03 | 2018-03-06T15:03:03 | 81,450,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,711 | py | # Шашки
# На доске стоит белая шашка. Требуется определить,
# может ли она попасть в заданную клетку, делая ходы по правилам
# (не превращаясь в дамку). Белые шашки могут ходить по черным клеткам
# по диагонали вверх-влево или вверх-вправо. Ходов может быть несколько!
# Формат ввода
# Вводится клетка, где стоит шашка, а затем клетка, куда шашка должна попасть
# Каждая клетка описывается номером вертикали, а затем номером горизонтали.
# Формат вывода
# Выведите слово YES (заглавными буквами), если шашка может попасть из
# начальной клетки в указанную, и NO в противном случае.
# Примечания
# Доска имеет размер 8x8, вертикали и горизонтали нумеруются числами от 1 до 8
# начиная с левого нижнего угла. Исходная и конечная клетки не совпадают.
X1 = int(input())
Y1 = int(input())
X2 = int(input())
Y2 = int(input())
blackX = ((X1 + Y1) % 2 == 0) and ((X2 + Y2) % 2 == 0)
blackY = (((X1 + Y1) % 2 != 0) and ((X2 + Y2) % 2 != 0))
if blackX or blackY:
if (Y2 > Y1):
if (Y2 - Y1) >= (X2 - X1):
print('YES')
else:
print('NO')
else:
print('NO')
else:
print('NO')
| [
"kolevatov@bpcbt.com"
] | kolevatov@bpcbt.com |
396f618507110469fb3700af03b351713222ae05 | dded9fb6567928952a283fc1c6db6a5a860bc1a6 | /nerodia/elements/d_list.py | a4897b8ded9bf4603dc2794d59fdbd088c35962b | [
"MIT"
] | permissive | watir/nerodia | 08b84aca4b72eae37e983006c15b824412335553 | 7e020f115b324ad62fe7800f3e1ec9cc8b25fcfe | refs/heads/master | 2023-04-15T20:02:34.833489 | 2023-04-06T23:46:14 | 2023-04-06T23:46:14 | 87,383,565 | 88 | 14 | MIT | 2023-04-06T23:42:29 | 2017-04-06T03:43:47 | Python | UTF-8 | Python | false | false | 311 | py | import six
from .html_elements import HTMLElement
from ..meta_elements import MetaHTMLElement
@six.add_metaclass(MetaHTMLElement)
class DList(HTMLElement):
def to_dict(self):
keys = [e.text for e in self.dts()]
values = [e.text for e in self.dds()]
return dict(zip(keys, values))
| [
"lucast1533@gmail.com"
] | lucast1533@gmail.com |
0a01821ba0aab521ea8241cf8c72d240e40870cf | 4a90ed83fce4632d47f7eb2997eb742d0230c7e2 | /tests/brython_jinja2/test_context.py | 7ccd7b92e76b4fef6102670685bbc548234ba4ae | [
"BSD-3-Clause"
] | permissive | jonathanverner/brython-jinja2 | 943b0eecc435ee5551ee464e3134164aad7aef27 | cec6e16de1750203a858d0acf590f230fc3bf848 | refs/heads/master | 2022-12-14T15:04:11.668530 | 2017-09-29T02:18:08 | 2017-09-29T02:18:08 | 100,823,068 | 2 | 0 | BSD-3-Clause | 2021-06-01T21:48:13 | 2017-08-19T21:47:17 | Python | UTF-8 | Python | false | false | 875 | py | import asyncio
import pytest
from brython_jinja2.context import Context
def test_extension():
base = Context()
base.a = 10
base.c = 30
child = Context(base=base)
# Child should have access to parent
assert child.a == 10
# The _get method should work for accessing parent
assert child._get('a') == 10
# Child should not be allowed to modify parent
child.a = 20
assert child.a == 20
assert base.a == 10
# Attributes should propagate recursively
second_child = Context(base=child)
assert second_child.c == 30
assert second_child.a == 20
def test_future(event_loop):
asyncio.set_event_loop(event_loop)
ctx = Context()
fut = asyncio.async(asyncio.sleep(0.1, result=3))
ctx.test = fut
assert hasattr(ctx, 'test') is False
event_loop.run_until_complete(fut)
assert ctx.test == 3
| [
"jonathan.verner@matfyz.cz"
] | jonathan.verner@matfyz.cz |
3887e47d266aa37eeacf4a8d0fe2ecb63c05ffc8 | 342a3af41306cf607eb49bde49348926c6dcd73b | /Packages/Dead/demo/Lib/geoparse.py | d9d4d2a17606539c34e5ed8e9a9790b3b39a6a6c | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause"
] | permissive | CDAT/cdat | 17e5de69a0d9d3bb5b79aaaecb4198ae9d92ed47 | 5133560c0c049b5c93ee321ba0af494253b44f91 | refs/heads/master | 2022-06-05T02:41:12.155720 | 2022-05-18T22:31:18 | 2022-05-18T22:31:18 | 6,660,536 | 72 | 17 | NOASSERTION | 2022-05-18T22:31:19 | 2012-11-12T20:58:18 | Fortran | UTF-8 | Python | false | false | 1,158 | py | #############################################################################
# File: geoparse.py #
# Author: Velimir Mlaker, mlaker1@llnl.gov #
# Date: 05-Aug-2005 #
# Desc: Parsers for geometry string. #
# KNOWN BUG: Only works on form wxh+x+y, i.e. with '+' for x and #
# y coords. It will fail if using '-' for x and y. #
#############################################################################
# Extract width from the geometry string.
def get_w (geo):
return geo [:geo.find('x')]
# Extract height from the geometry string.
def get_h (geo):
return geo [geo.find('x')+1 : geo.find('+')]
# Extract X and Y from the geometry string.
def get_xy (geo):
return geo [geo.find('+')+1 : len(geo)]
# Extract X from the geometry string.
def get_x (geo):
xy = get_xy(geo)
return xy [:xy.find('+')]
# Extract Y from the geometry string.
def get_y (geo):
xy = get_xy(geo)
return xy [xy.find('+')+1 : len(xy)]
| [
"doutriaux1@meryem.llnl.gov"
] | doutriaux1@meryem.llnl.gov |
aeaf592f72b9d0bb705e1c4cdd9ed1b97ee88ac1 | 20810657fed82d4fce65487a83e8b057da6dd794 | /python1/validate_input.py | 130007da5b8ee27bb6ceb9d662bc17c7f8aff8f4 | [] | no_license | jmwoloso/Python_1 | a2ddc7e2a3e9caeefe21c90c618c0c91871239b7 | ce49a142f96bca889684261f997c6ac667bd15ce | refs/heads/master | 2016-09-05T11:13:24.296676 | 2015-06-08T17:35:51 | 2015-06-08T17:35:51 | 37,082,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | #!/usr/local/bin/python3
"""Validate user input."""
while True:
s = input("Type 'yes' or 'no': ")
if s == 'yes':
break
if s == 'no':
break
print("Wrong! Try again.")
print(s) | [
"jmwoloso@asu.edu"
] | jmwoloso@asu.edu |
de53fed39a76f4a71654451bf454b8401756b843 | 5a0f6aeb6147115a8318d5f517cc62f01cfd3f1c | /python/example_publish.py | c621fbd7d9fe24deb9f6f8ab9f14f6b5e2950d00 | [
"MIT"
] | permissive | magictimelapse/mqtt-iota | 2ec61b31281af91dd9f18b9f764d4c5b8789c0a5 | d5c06c4193ca1519c110856c1967dfea01ed9050 | refs/heads/master | 2020-04-14T11:50:53.276082 | 2019-01-02T18:48:42 | 2019-01-02T18:48:42 | 163,824,361 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | #!/usr/bin/env python
import paho.mqtt.client as paho
import json
broker = 'localhost'
port = 1883
client = paho.Client()
client.connect(broker,port)
data = {'temperature':22, 'humidity': 15}
# stringify the json data:
stringified_data = json.dumps(data, separators=(',',':'))
ret = client.publish('sensors/data',stringified_data)
| [
"michael.rissi@gmail.com"
] | michael.rissi@gmail.com |
2c118bb2cfb4390ce34d88766238155d957b2817 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_8/kpprya001/question2.py | 35d0a565502a114e8a6ac7400df28b01ba4b664a | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | word = input("Enter a message:\n")
string1 = word
pairs = 0
def count_characters(string1,pairs):
if(string1[1:2]!=""):
if(string1[0:1]==string1[1:2]):
pairs += 1
return count_characters(string1[2:],pairs)
else:
return count_characters(string1[2:],pairs)
return pairs
print("Number of pairs:",count_characters(string1,pairs))
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
870d8432b7bd7ef532bd4460e8619c7b327af57e | ff441ab720623b522ba522d0fbd6232215a1dc85 | /src/CreateFakeDataForTesting.py | 266512ed534858b229c871fdc88ad1b49d1f56fa | [] | no_license | LeeHuangChen/2017_12_21_ADDA_ExecutionWrapper_versionControlled | 0872dbb76598b4fb0695fcd8a27a12566fd7c96f | 04e6117de3164b12815d195831d6e591d321038f | refs/heads/master | 2021-09-06T22:53:25.693378 | 2018-02-12T23:52:42 | 2018-02-12T23:52:42 | 115,046,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | import os, random
def random_AA_seq(length):
seq = ''.join(random.choice('ACDEFGHIKLMNPQRSTVWY') for i in range(length))
# return "TaxonA "+seq
return seq
def appendFile(filepath, message):
with open(filepath, "a") as f:
f.write(message)
def toFastaSeq(name, seq, taxa):
header = ">" + name + " [" + taxa + "]\n"
return header + seq + "\n\n"
CreateFilename = "Test30AA.fasta"
CreateFolder = "Sequences/"
filepath = os.path.join(CreateFolder, CreateFilename)
proteinLength = 30
ABFuseCount = 1
BAFuseCount = 1
ACount = 20
BCount = 20
A = random_AA_seq(proteinLength)
B = random_AA_seq(proteinLength)
open(filepath, "w")
length = len(A)
mid = length / 2
for i in range(ABFuseCount):
appendFile(filepath, toFastaSeq("AB" + str(i + 1), A[0:mid] + B[mid:length], "test taxa"))
for i in range(BAFuseCount):
appendFile(filepath, toFastaSeq("BA" + str(i + 1), B[0:mid] + A[mid:length], "test taxa"))
for i in range(ACount):
appendFile(filepath, toFastaSeq("A" + str(i + 1), A, "test taxa"))
for i in range(BCount):
appendFile(filepath, toFastaSeq("B" + str(i + 1), B, "test taxa"))
| [
"lhc1@rice.edu"
] | lhc1@rice.edu |
d6fc4824750ae5886e59f819e7cd36f2ad6e2073 | 714f0c4a37771b98de3cb817c1950fd08b18a8eb | /WebFrame/WebFrame.py | 9eb045e30a633f1ff641dfabdb4ea0efbb120a55 | [] | no_license | haoen110/http-server | 8543fd6e3cebc63f1390468e44b032c3b0f493fd | 809dcbeed4d2cd10c0f91a5c9b247984e1d28625 | refs/heads/master | 2020-09-14T14:58:17.213936 | 2019-11-21T11:49:37 | 2019-11-21T11:49:37 | 223,162,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,756 | py | # coding = utf - 8
from socket import *
import time
from setting import *
from urls import *
from views import *
class Application(object):
def __init__(self):
self.sockfd = socket()
self.sockfd.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sockfd.bind(frame_addr)
def start(self):
self.sockfd.listen(5)
while True:
connfd, addr = self.sockfd.accept()
# 接收请求方法
method = connfd.recv(128).decode()
# 接收请求内容
path = connfd.recv(128).decode()
if method == "GET":
if path == '/' or path[-5:] == '.html':
status, response_body = self.get_html(path)
else:
status, response_body = self.get_data(path)
# 将结果给HttpServer
connfd.send(status.encode())
time.sleep(0.05)
connfd.send(response_body.encode())
elif method == "POST":
pass
def get_html(self, path):
if path == '/': # 主页
get_file = STATIC_DIR + '/index.html'
else:
get_file = STATIC_DIR + path
try:
f = open(get_file)
except IOError:
response = ('404', '===Sorry not found the page===')
else:
response = ('200', f.read())
finally:
return response
def get_data(self, path):
for url, handler in urls:
if path == url:
response_body = handler()
return "200", response_body
return "404","Not Found data"
if __name__ == "__main__":
app = Application()
app.start() # 启动框架等待request | [
"haoenwei@outlook.com"
] | haoenwei@outlook.com |
c7ce27bd37e52a5d67cf6f140e15d898a4b4a87f | cadfcc0879aa94cc0a5b4b4993bf9bcbddbf424d | /is_livraison_16831/urls.py | 40ce640e380c9119a0a6eeed1b119514dd4a30de | [] | no_license | crowdbotics-apps/is-livraison-16831 | 72ab3f68a4cd0a10c7ad2bf4d3e22f07168dfd90 | 5105eda2fcd9ae1ab27cccbc3aa4b547dc9cb839 | refs/heads/master | 2023-05-29T11:31:43.194218 | 2020-05-10T08:23:05 | 2020-05-10T08:23:05 | 262,741,914 | 0 | 0 | null | 2021-06-12T03:02:34 | 2020-05-10T08:19:28 | Python | UTF-8 | Python | false | false | 2,430 | py | """is_livraison_16831 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("delivery_order.api.v1.urls")),
path("delivery_order/", include("delivery_order.urls")),
path("api/v1/", include("driver.api.v1.urls")),
path("driver/", include("driver.urls")),
path("home/", include("home.urls")),
path("api/v1/", include("menu.api.v1.urls")),
path("menu/", include("menu.urls")),
path("api/v1/", include("delivery_user_profile.api.v1.urls")),
path("delivery_user_profile/", include("delivery_user_profile.urls")),
]
admin.site.site_header = "IS Livraison"
admin.site.site_title = "IS Livraison Admin Portal"
admin.site.index_title = "IS Livraison Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="IS Livraison API",
default_version="v1",
description="API documentation for IS Livraison App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
a309fd2997f541b938f725a4a3034e26bdaf4f12 | 5bc589ae06cd11da557bb84553fcb10330abf980 | /warunek.py | 6506a250c40625aa10f344b3f7863c5ab714fda7 | [] | no_license | keinam53/Zrozumiec_Programowanie | b06d8260a4f227f521d0b762b9cbfb0ad8a17989 | 5da9fc9eaaedd6962b225517c4c02297ae18c800 | refs/heads/master | 2023-06-04T17:31:10.114679 | 2021-07-03T19:23:39 | 2021-07-03T19:23:39 | 358,726,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,781 | py | # jablka = 3
# banany = 4.5
# gruszki = 3
# print(f"Czy jabłka są droższe od bananów?\t\t\t\t {jablka>banany}")
# print(f"czy gruszki mają taką samą cenę jak banany?\t\t{gruszki==banany}")
#
# result = jablka==banany
# print(type(result))
# name = "Mariusz"
# result = name =="Mariusz"
# print(result)
# name = input("Jak masz na imię? ")
# porownanie = name == "Mariusz"
# print(f"Twoje imię to Mariusz?\t\t\t {porownanie}")
# shopping_list = ["Mąka","Jogurt"]
# my_list = ["Czekolada","Marchewka","chleb"]
# print(f"{shopping_list} > {my_list} -> {shopping_list>my_list}")
# zad1
# ceny = []
# price = float(input("Podaj cenę pierwszego produktu "))
# ceny.append(price)
# price = float(input("Podaj cenę drugiego produktu "))
# ceny.append(price)
# price = float(input("Podaj cenę trzeciego produktu "))
# ceny.append(price)
#
# print("Porównanie cen")
# print(f"Produkt 1 jest droższy do 2? {ceny[0]>ceny[1]}")
# print(f"Produkt 3 jest droższy do 1? {ceny[2]>ceny[0]}")
# print(f"Produkt 2 jest droższy do 3? {ceny[1]>ceny[2]}")
# zad2
# shopping_elements = input("Podaj listę zakupów rozdzielając elementy przecinkami ")
# shopping_list = shopping_elements.split(",")
# long = len(shopping_list) > 4
# print(f"Czy uważam, że lista zakupów jest długa?\t {long}")
#zad3
print("Kalkulator oprocentowania")
wartosc_pocz = float(input("Jaką wartość wpłaciłeś? "))
oprocentowanie = float(input("Jakie jest oprocentowanie? "))
czas = float(input("Ile lat trwa lokata? "))
wartosc_koncowa = wartosc_pocz * (1+oprocentowanie/100)**czas
zysk = wartosc_koncowa - wartosc_pocz
zysl_w_proc = (wartosc_koncowa/wartosc_pocz)*100
print(f"Zysk na lokacie wyniesie {zysk} zł")
print(f"Czy zysk na lokacie będzie większy niż 10%\t{zysl_w_proc >= 10}")
| [
"mariusz.baran536@gmail.com"
] | mariusz.baran536@gmail.com |
f07a987ce8d79b004f67e63ac870971f4bc7b1b7 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part001088.py | d44bfe80ca43817a7cad73759d1a8d2507d6d110 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,978 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher87108(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i3.1.2.0', 1, 1, S(0)), Add)
]),
1: (1, Multiset({1: 1}), [
(VariableWithCount('i3.1.2.0', 1, 1, S(0)), Add)
]),
2: (2, Multiset({2: 1}), [
(VariableWithCount('i3.1.2.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher87108._instance is None:
CommutativeMatcher87108._instance = CommutativeMatcher87108()
return CommutativeMatcher87108._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 87107
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i3.1.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 87109
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp2 = subjects.popleft()
subjects3 = deque(tmp2._args)
# State 87110
if len(subjects3) >= 1:
tmp4 = subjects3.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i3.1.2.1.1', tmp4)
except ValueError:
pass
else:
pass
# State 87111
if len(subjects3) >= 1:
tmp6 = subjects3.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i3.1.2.1.2', tmp6)
except ValueError:
pass
else:
pass
# State 87112
if len(subjects3) == 0:
pass
# State 87113
if len(subjects) == 0:
pass
# 0: d*x**n /; (cons_f2) and (cons_f3) and (cons_f8) and (cons_f29) and (cons_f5) and (cons_f1575)
yield 0, subst3
# 1: d*x**n /; (cons_f2) and (cons_f3) and (cons_f8) and (cons_f29) and (cons_f4) and (cons_f5) and (cons_f1497)
yield 1, subst3
# 2: d*x**n /; (cons_f29) and (cons_f4) and (cons_f70) and (cons_f71)
yield 2, subst3
subjects3.appendleft(tmp6)
subjects3.appendleft(tmp4)
subjects.appendleft(tmp2)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp8 = subjects.popleft()
associative1 = tmp8
associative_type1 = type(tmp8)
subjects9 = deque(tmp8._args)
matcher = CommutativeMatcher87115.get()
tmp10 = subjects9
subjects9 = []
for s in tmp10:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp10, subst0):
pass
if pattern_index == 0:
pass
# State 87120
if len(subjects) == 0:
pass
# 0: d*x**n /; (cons_f2) and (cons_f3) and (cons_f8) and (cons_f29) and (cons_f5) and (cons_f1575)
yield 0, subst1
# 1: d*x**n /; (cons_f2) and (cons_f3) and (cons_f8) and (cons_f29) and (cons_f4) and (cons_f5) and (cons_f1497)
yield 1, subst1
# 2: d*x**n /; (cons_f29) and (cons_f4) and (cons_f70) and (cons_f71)
yield 2, subst1
subjects.appendleft(tmp8)
return
yield
from .generated_part001089 import *
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
8eadc0f79a7b8575a34f645f947de6a744f43919 | c9f67529e10eb85195126cfa9ada2e80a834d373 | /lib/python3.5/site-packages/torch/distributions/chi2.py | ff789c17370fb76cf21ee26e22e1025d3ff63a63 | [
"Apache-2.0"
] | permissive | chilung/dllab-5-1-ngraph | 10d6df73ea421bfaf998e73e514972d0cbe5be13 | 2af28db42d9dc2586396b6f38d02977cac0902a6 | refs/heads/master | 2022-12-17T19:14:46.848661 | 2019-01-14T12:27:07 | 2019-01-14T12:27:07 | 165,513,937 | 0 | 1 | Apache-2.0 | 2022-12-08T04:59:31 | 2019-01-13T14:19:16 | Python | UTF-8 | Python | false | false | 759 | py | from torch.distributions import constraints
from torch.distributions.gamma import Gamma
class Chi2(Gamma):
r"""
Creates a Chi2 distribution parameterized by shape parameter `df`.
This is exactly equivalent to Gamma(alpha=0.5*df, beta=0.5)
Example::
>>> m = Chi2(torch.tensor([1.0]))
>>> m.sample() # Chi2 distributed with shape df=1
0.1046
[torch.FloatTensor of size 1]
Args:
df (float or Tensor): shape parameter of the distribution
"""
arg_constraints = {'df': constraints.positive}
def __init__(self, df, validate_args=None):
super(Chi2, self).__init__(0.5 * df, 0.5, validate_args=validate_args)
@property
def df(self):
return self.concentration * 2
| [
"chilung.cs06g@nctu.edu.tw"
] | chilung.cs06g@nctu.edu.tw |
7bfed9146727f6bf33c206145d54f8f7e691afc7 | cf457dacc75ade598d52a4cfd58c2120192da84c | /Python1808/第一阶段/打飞机/game01/04-动画效果.py | ff0c9a113d67985f54c3802c8f4eb93e0598e79f | [] | no_license | LYblogs/python | b62608d73eb0a5a19306cabd4fd5706806fd557b | 1ee0bcecc3a78c6d2b191600872a1177e9c8df60 | refs/heads/master | 2020-04-12T10:12:01.478382 | 2018-12-19T10:59:47 | 2018-12-19T10:59:47 | 162,422,214 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | """__author__ = 余婷"""
"""
动画原理:不断的刷新界面上的内容(一帧一帧的画)
"""
import pygame
from random import randint
def static_page(screen):
"""
页面上的静态内容
"""
# 静态文字
font = pygame.font.SysFont('Times', 40)
title = font.render('Welcome', True, (0, 0, 0))
screen.blit(title, (200, 200))
def animation_title(screen):
"""
字体颜色发生改变
"""
font = pygame.font.SysFont('Times', 40)
title = font.render('Python', True, (randint(0,255), randint(0,255), randint(0,255)))
screen.blit(title, (100, 100))
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode((600, 400))
screen.fill((255, 255, 255))
static_page(screen)
pygame.display.flip()
while True:
# for里面的代码只有事件发生后才会执行
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
# 在下面去写每一帧要显示的内容
"""程序执行到这个位置,cup休息一段时间再执行后面的代码(线程在这儿阻塞指定的时间)
单位:毫秒 (1000ms == 1s)
"""
pygame.time.delay(60)
# 动画前要将原来的内容全部清空
screen.fill((255, 255, 255))
static_page(screen)
animation_title(screen)
# 内容展示完成后,要更新到屏幕上
pygame.display.update()
| [
"2271032145@qq.com"
] | 2271032145@qq.com |
8a19f905ec49cb35ed33920e86912dccf5e9c127 | f2a2f41641eb56a17009294ff100dc9b39cb774b | /old_session/session_1/_188/_188_best_time_to_buy_and_sell_stock_4.py | e9c2d60469b92cb5e4467a4d0984402cb459fd56 | [] | no_license | YJL33/LeetCode | 0e837a419d11d44239d1a692140a1468f6a7d9bf | b4da922c4e8406c486760639b71e3ec50283ca43 | refs/heads/master | 2022-08-13T01:46:14.976758 | 2022-07-24T03:59:52 | 2022-07-24T04:11:32 | 52,939,733 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,466 | py | """
188. Best Time to Buy and Sell Stock IV
Total Accepted: 35261
Total Submissions: 150017
Difficulty: Hard
Contributors: Admin
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete at most k transactions.
Note:
You may not engage in multiple transactions at the same time
(ie, you must sell the stock before you buy again).
"""
class Solution(object):
def maxProfit(self, k, prices):
"""
:type k: int
:type prices: List[int]
:rtype: int
"""
# dp[i][j]: max profit until day j, using i transactions
# dp[i][j] = max(dp[i][j-1], (price[j]- price[jj] + dp[i-1][jj]), jj in range(0, j-1)))
# dp[0][j]: 0
# dp[i][0]: 0
n = len(prices)
if n <= 1: return 0
# if k >= n/2, then you can make maximum number of transactions.
if k >= n/2:
maxPro = 0
for i in xrange(1,n):
if prices[i] > prices[i-1]:
maxPro += prices[i] - prices[i-1]
return maxPro
dp = [[0 for _ in xrange(n)] for _ in xrange(k+1)]
for i in xrange(1, k+1):
localMax = dp[i-1][0] - prices[0]
for j in xrange(1, n):
dp[i][j] = max(dp[i][j-1], prices[j]+localMax)
localMax = max(localMax, dp[i-1][j]-prices[j])
return dp[k][n-1]
| [
"yunjun.l33@gmail.com"
] | yunjun.l33@gmail.com |
39f7c7acf75f6b9a74c34de56a6e06a69f0ccd96 | 93e9bbcdd981a6ec08644e76ee914e42709579af | /backtracking/526_Beautiful_Arrangement.py | 44a4d02c074bc1b7a25249856c55e2ac84376c9c | [] | no_license | vsdrun/lc_public | 57aa418a8349629494782f1a009c1a8751ffe81d | 6350568d16b0f8c49a020f055bb6d72e2705ea56 | refs/heads/master | 2020-05-31T11:23:28.448602 | 2019-10-02T21:00:57 | 2019-10-02T21:00:57 | 190,259,739 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,116 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
https://leetcode.com/problems/beautiful-arrangement/description/
Suppose you have N integers from 1 to N.
We define a beautiful arrangement as an array that is constructed by
these N numbers successfully if one of the following is true for
the ith position (1 <= i <= N) in this array:
The number at the ith position is divisible by i.
OR
i is divisible by the number at the ith position.
Now given N, how many beautiful arrangements can you construct?
Example 1:
Input: 2
Output: 2
Explanation:
The first beautiful arrangement is [1, 2]:
Number at the 1st position (i=1) is 1, and 1 is divisible by i (i=1).
Number at the 2nd position (i=2) is 2, and 2 is divisible by i (i=2).
The second beautiful arrangement is [2, 1]:
Number at the 1st position (i=1) is 2, and 2 is divisible by i (i=1).
Number at the 2nd position (i=2) is 1, and i (i=2) is divisible by 1.
Note:
N is a positive integer and will not exceed 15.
"""
cache = {}
class Solution(object):
def countArrangement(self, N):
"""
:type N: int
:rtype: int
"""
def helper(i, X):
"""
X 為 index tuple , starts from 1 ~ N
"""
# i 為index, 由後往前。
if i == 1:
# 因為任何數 % 1 為0
return 1
# index: 從0 ~ 此index的所有數
# 由後往前 i 初始為 N
key = (i, X)
if key in cache:
return cache[key]
recursive = []
for j, x in enumerate(X):
if x % i == 0 or i % x == 0:
recursive.append(helper(i - 1, X[:j] + X[j + 1:]))
total = sum(recursive)
cache[key] = total
return total
# 以index 為pivot支點。
# 由後往前
return helper(N, tuple(range(1, N + 1)))
def build_input():
return 2
if __name__ == "__main__":
b = build_input()
s = Solution()
result = s.countArrangement(b)
# Return ["eat","oath"].
print(result)
| [
"vsdmars@gmail.com"
] | vsdmars@gmail.com |
a9ba0366d74eb0694176c79d13f83932130643ee | 3a1fea0fdd27baa6b63941f71b29eb04061678c6 | /src/ch10/rtda/heap/ConstantPool.py | a3c1c48aa318b84890d2ffb6c8daf0408f4390cf | [] | no_license | sumerzhang/JVMByPython | 56a7a896e43b7a5020559c0740ebe61d608a9f2a | 1554cf62f47a2c6eb10fe09c7216518416bb65bc | refs/heads/master | 2022-12-02T17:21:11.020486 | 2020-08-18T06:57:10 | 2020-08-18T06:57:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,922 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: HuRuiFeng
@file: ConstantPool.py
@time: 2019/9/16 17:25
@desc: 运行时常量池,主要存放两类信息:字面量和符号引用,字面量包括整数、浮点数和字符串字面量;
符号引用包括类符号引用、字段符号引用、方法符号引用和接口符号引用。
"""
class ConstantPool:
def __init__(self, clazz, consts):
self._class = clazz
self.consts = consts
from ch10.classfile.ConstantPool import ConstantPool
# 把class文件中的常量池转换成运行时常量池
@staticmethod
def new_constant_pool(clazz, cfConstantPool: ConstantPool):
from ch10.classfile.CpNumeric import ConstantDoubleInfo, ConstantLongInfo, ConstantFloatInfo, ConstantIntegerInfo
from ch10.classfile.ConstantStringInfo import ConstantStringInfo
from ch10.classfile.ConstantClassInfo import ConstantClassInfo
from ch10.classfile.ConstantMemberRefInfo import ConstantFieldRefInfo, ConstantMethodRefInfo, \
ConstantInterfaceMethodRefInfo
from ch10.rtda.heap.CpClassRef import ClassRef
from ch10.rtda.heap.CpFieldRef import FieldRef
from ch10.rtda.heap.CpMethodRef import MethodRef
from ch10.rtda.heap.CpInterfaceMethodRef import InterfaceMethodRef
cp_count = len(cfConstantPool.cp)
consts = [None for _ in range(cp_count)]
rt_constant_pool = ConstantPool(clazz, consts)
for i in range(1, cp_count):
cp_info = cfConstantPool.cp[i]
if isinstance(cp_info, ConstantIntegerInfo):
consts[i] = cp_info.val
elif isinstance(cp_info, ConstantFloatInfo):
consts[i] = cp_info.val
elif isinstance(cp_info, ConstantLongInfo):
consts[i] = cp_info.val
elif isinstance(cp_info, ConstantDoubleInfo):
consts[i] = cp_info.val
elif isinstance(cp_info, ConstantStringInfo):
consts[i] = str(cp_info)
elif isinstance(cp_info, ConstantClassInfo):
consts[i] = ClassRef(rt_constant_pool, cp_info)
elif isinstance(cp_info, ConstantFieldRefInfo):
consts[i] = FieldRef(rt_constant_pool, cp_info)
elif isinstance(cp_info, ConstantMethodRefInfo):
consts[i] = MethodRef(rt_constant_pool, cp_info)
elif isinstance(cp_info, ConstantInterfaceMethodRefInfo):
consts[i] = InterfaceMethodRef(rt_constant_pool, cp_info)
# rt_constant_pool.consts = consts
return rt_constant_pool
def get_class(self):
return self._class
# 根据索引返回常量
def get_constant(self, index):
c = self.consts[index]
if c is not None:
return c
else:
raise RuntimeError("No constants at index {0}".format(index))
| [
"huruifeng1202@163.com"
] | huruifeng1202@163.com |
6d13df9f1490f790caa07de014986fd9c92569f8 | d0e953d791920b508104d5f3ca298eab2f6e7bea | /面向对象编程/test5.py | 4ec9b113ff3b5cd1eaafc7fdb07d9173c3233d98 | [] | no_license | JKFjkf/Practise | 97ebabc376e0929f50fd542d0ede77739e3f9088 | 3371d5cc878bdb64f645311f2eb097f59c492c3c | refs/heads/master | 2023-07-03T21:28:06.873370 | 2021-08-11T12:19:39 | 2021-08-11T12:19:39 | 394,978,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | class Father():
def __init__(self):
self.money = 0
def action(self):
print("调用了父类方法")
class Son(Father):
def __init__(self):
self.money = 1000
def action(self):
print("子类重写父类方法")
son = Son()#子类son继承父类father的所有属性和方法
son.action()#子类son调用自身的action而不是父类的action方法
print(son.money)#自己的1000 | [
"1920578919@qq.com"
] | 1920578919@qq.com |
db76ae5a9d42054ec2eb762f17657fd4835fe398 | b019cb48889c67b1818605154e757dfeba626cf5 | /Lecon_un/06_02_1.py | d4a889965401a77a1e6a8055206be5a82ad17a3c | [] | no_license | citroen8897/Python_2 | 0ee5d46501bb8b42fe2ed686fbffc98843c8f046 | 035a6384e857221eca0f9d88fb3758313998d5f9 | refs/heads/main | 2023-03-02T20:47:42.150668 | 2021-02-14T23:03:48 | 2021-02-14T23:03:48 | 336,619,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | class A:
def __init__(self, name, years):
self.name = name
self.years = years
print('Hello class A!')
def test(self):
print(f'Hello {self.name}\nYou are {self.years} years...')
def test_2(self):
self.name = input('Введите имя: ')
self.years = input('Ваш возраст: ')
self.test()
def test_3(self):
print(self.surname)
t_1 = A('Ivan', 25)
t_1.test()
t_1.test_2()
print(t_1.name)
t_1.name = 'Vasya'
print(t_1.name)
t_1.surname = 'Jackson'
print(t_1.surname)
t_1.test_3()
| [
"citroen8897@gmail.com"
] | citroen8897@gmail.com |
98dead09577ada0f018d2403a2bcaa0dac33096a | c0c3f303ff6407f055bb24b4d13e3a4a3e796988 | /peachpy/encoder.py | 5353c1f42a4182be6dee5a674f49bd91de441ec4 | [
"BSD-2-Clause"
] | permissive | lemire/PeachPy | d610700476c9ff805fa5dd0d3554b21ecbfef012 | 650a4d866bd67d007371effdc7c096788f0acf20 | refs/heads/master | 2023-08-18T23:50:35.026362 | 2015-09-17T05:24:18 | 2015-09-17T05:24:18 | 42,653,254 | 1 | 1 | null | 2015-09-17T11:49:26 | 2015-09-17T11:49:26 | null | UTF-8 | Python | false | false | 7,810 | py | # This file is part of Peach-Py package and is licensed under the Simplified BSD license.
# See license.rst for the full text of the license.
from peachpy.abi import Endianness
class Encoder:
def __init__(self, endianness, bitness=None):
assert endianness in {Endianness.Little, Endianness.Big}
if endianness == Endianness.Little:
self.int16 = Encoder.int16le
self.uint16 = Encoder.uint16le
self.int32 = Encoder.int32le
self.uint32 = Encoder.uint32le
self.int64 = Encoder.int64le
self.uint64 = Encoder.uint64le
else:
self.int16 = Encoder.int16be
self.uint16 = Encoder.uint16be
self.int32 = Encoder.int32be
self.uint32 = Encoder.uint32be
self.int64 = Encoder.int64be
self.uint64 = Encoder.uint64be
self.bitness = bitness
if bitness is not None:
assert bitness in {32, 64}, "Only 32-bit and 64-bit encoders are supported"
if bitness == 32:
self.signed_offset = self.int32
self.unsigned_offset = self.uint32
else:
self.signed_offset = self.int64
self.unsigned_offset = self.uint64
@staticmethod
def int8(n):
"""Converts signed 8-bit integer to bytearray representation"""
assert -128 <= n <= 127, "%u can not be represented as an 8-bit signed integer" % n
return bytearray([n & 0xFF])
@staticmethod
def uint8(n):
"""Converts unsigned 8-bit integer to bytearray representation"""
assert 0 <= n <= 255, "%u can not be represented as an 8-bit unsigned integer" % n
return bytearray([n])
@staticmethod
def int16le(n):
"""Converts signed 16-bit integer to little-endian bytearray representation"""
assert -32768 <= n <= 32767, "%u can not be represented as a 16-bit signed integer" % n
return bytearray([n & 0xFF, (n >> 8) & 0xFF])
@staticmethod
def int16be(n):
"""Converts signed 16-bit integer to big-endian bytearray representation"""
assert -32768 <= n <= 32767, "%u can not be represented as a 16-bit signed integer" % n
return bytearray([n >> 8, (n & 0xFF) & 0xFF])
@staticmethod
def uint16le(n):
"""Converts unsigned 16-bit integer to little-endian bytearray representation"""
assert 0 <= n <= 65535, "%u can not be represented as a 16-bit unsigned integer" % n
return bytearray([n & 0xFF, n >> 8])
@staticmethod
def uint16be(n):
"""Converts unsigned 16-bit integer to big-endian bytearray representation"""
assert 0 <= n <= 65535, "%u can not be represented as a 16-bit unsigned integer" % n
return bytearray([n >> 8, n & 0xFF])
@staticmethod
def int32le(n):
"""Converts signed 32-bit integer to little-endian bytearray representation"""
assert -2147483648 <= n <= 2147483647, "%u can not be represented as a 32-bit signed integer" % n
return bytearray([n & 0xFF, (n >> 8) & 0xFF, (n >> 16) & 0xFF, (n >> 24) & 0xFF])
@staticmethod
def int32be(n):
"""Converts signed 32-bit integer to big-endian bytearray representation"""
assert -2147483648 <= n <= 2147483647, "%u can not be represented as a 32-bit signed integer" % n
return bytearray([(n >> 24) & 0xFF, (n >> 16) & 0xFF, (n >> 8) & 0xFF, n & 0xFF])
@staticmethod
def uint32le(n):
"""Converts unsigned 32-bit integer to little-endian bytearray representation"""
assert 0 <= n <= 4294967295, "%u can not be represented as a 32-bit unsigned integer" % n
return bytearray([n & 0xFF, (n >> 8) & 0xFF, (n >> 16) & 0xFF, n >> 24])
@staticmethod
def uint32be(n):
"""Converts unsigned 32-bit integer to big-endian bytearray representation"""
assert 0 <= n <= 4294967295, "%u can not be represented as a 32-bit unsigned integer" % n
return bytearray([n >> 24, (n >> 16) & 0xFF, (n >> 8) & 0xFF, n & 0xFF])
@staticmethod
def int64le(n):
"""Converts signed 64-bit integer to little-endian bytearray representation"""
assert -9223372036854775808 <= n <= 9223372036854775807, \
"%u can not be represented as a 64-bit signed integer" % n
return bytearray([n & 0xFF, (n >> 8) & 0xFF, (n >> 16) & 0xFF, (n >> 24) & 0xFF,
(n >> 32) & 0xFF, (n >> 40) & 0xFF, (n >> 48) & 0xFF, (n >> 56) & 0xFF])
@staticmethod
def int64be(n):
"""Converts signed 64-bit integer to big-endian bytearray representation"""
assert -9223372036854775808 <= n <= 9223372036854775807, \
"%u can not be represented as a 64-bit signed integer" % n
return bytearray([(n >> 56) & 0xFF, (n >> 48) & 0xFF, (n >> 40) & 0xFF, (n >> 32) & 0xFF,
(n >> 24) & 0xFF, (n >> 16) & 0xFF, (n >> 8) & 0xFF, n & 0xFF])
@staticmethod
def uint64le(n):
"""Converts unsigned 64-bit integer to little-endian bytearray representation"""
assert 0 <= n <= 18446744073709551615, "%u can not be represented as a 64-bit unsigned integer" % n
return bytearray([n & 0xFF, (n >> 8) & 0xFF, (n >> 16) & 0xFF, (n >> 24) & 0xFF,
(n >> 32) & 0xFF, (n >> 40) & 0xFF, (n >> 48) & 0xFF, (n >> 56) & 0xFF])
@staticmethod
def uint64be(n):
"""Converts unsigned 64-bit integer to big-endian bytearray representation"""
assert 0 <= n <= 18446744073709551615, "%u can not be represented as a 64-bit unsigned integer" % n
return bytearray([(n >> 56) & 0xFF, (n >> 48) & 0xFF, (n >> 40) & 0xFF, (n >> 32) & 0xFF,
(n >> 24) & 0xFF, (n >> 16) & 0xFF, (n >> 8) & 0xFF, n & 0xFF])
def int16(self, n):
"""Converts signed 16-bit integer to bytearray representation according to encoder endianness"""
pass
def uint16(self, n):
"""Converts unsigned 16-bit integer to bytearray representation according to encoder endianness"""
pass
def int32(self, n):
"""Converts signed 32-bit integer to bytearray representation according to encoder endianness"""
pass
def uint32(self, n):
"""Converts unsigned 32-bit integer to bytearray representation according to encoder endianness"""
pass
def int64(self, n):
"""Converts signed 64-bit integer to bytearray representation according to encoder endianness"""
pass
def uint64(self, n):
"""Converts unsigned 64-bit integer to bytearray representation according to encoder endianness"""
pass
@staticmethod
def fixed_string(string, size):
"""Converts string to fixed-length bytearray representation"""
assert isinstance(size, (int, long)) and size > 0, "size %u is not a positive integer" % size
if string is None:
return bytearray(size)
import codecs
byte_string = codecs.encode(string, "utf8")
if len(byte_string) > size:
raise ValueError("The length of %s exceeds the target %d" % (string, size))
elif len(byte_string) == size:
return byte_string
else:
return byte_string + bytearray(size - len(byte_string))
def signed_offset(self, n):
"""Converts signed integer offset to bytearray representation according to encoder bitness and endianness"""
raise ValueError("Can not encode signed offset: encoder bitness not specified")
def unsigned_offset(self, n):
"""Converts unsigned integer offset to bytearray representation according to encoder bitness and endianness"""
raise ValueError("Can not encode unsigned offset: encoder bitness not specified")
| [
"maratek@gmail.com"
] | maratek@gmail.com |
57108dea039fca66dfd1781469adfe35ca38ca46 | 72612d94e07649586dda53c94a058a26af5ed3e6 | /amr_maldi_ml/deprecated/mean_intensities.py | e2e3b8e8f74586a0b063ad466cd78fa16eb5bbf1 | [
"BSD-3-Clause"
] | permissive | SanmiAndreSofa/maldi_amr | 91e88d0a23d2cb1e5007f73a8ba04be6828d6b6e | cc084d73a2d14c5936878e609f6d44fad0b524c7 | refs/heads/master | 2023-08-06T10:26:58.989597 | 2021-10-04T09:12:05 | 2021-10-04T09:12:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,091 | py | """Calculate mean intensity values of a given scenario for both classes."""
import argparse
import dotenv
import json
import logging
import pathlib
import os
import sys
import numpy as np
from maldi_learn.driams import DRIAMSDatasetExplorer
from maldi_learn.driams import DRIAMSLabelEncoder
from maldi_learn.driams import load_driams_dataset
from maldi_learn.utilities import stratify_by_species_and_label
from models import load_pipeline
from utilities import generate_output_filename
dotenv.load_dotenv()
DRIAMS_ROOT = os.getenv('DRIAMS_ROOT')
# These parameters should remain fixed for this particular
# experiment. We always train on the same data set, using
# *all* available years.
site = 'DRIAMS-A'
years = ['2015', '2016', '2017', '2018']
if __name__ == '__main__':
# Basic log configuration to ensure that we see where the process
# spends most of its time.
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(message)s'
)
parser = argparse.ArgumentParser()
parser.add_argument(
'INPUT',
type=str,
help='Input file',
nargs='+',
)
name = 'mean_intensities'
parser.add_argument(
'-o', '--output',
default=pathlib.Path(__file__).resolve().parent.parent / 'results'
/ name,
type=str,
help='Output path for storing the results.'
)
parser.add_argument(
'-f', '--force',
action='store_true',
help='If set, overwrites all files. Else, skips existing files.'
)
args = parser.parse_args()
# Create the output directory for storing all results of the
# individual combinations.
os.makedirs(args.output, exist_ok=True)
# Keeps track of the parameters used for all scenarios. This ensures
# that the user does not call this script for incomparable scenarios
# that would lead to inconsistent results.
all_antibiotics = []
all_sites = []
all_years = []
all_seeds = []
all_models = []
all_species = []
all_metadata_versions = []
# Will contain the mean of all intensities over all scenarios of
# this run.
all_mean_intensities = {}
for f in args.INPUT:
pipeline, data = load_pipeline(f)
# Extract the required parameters to build the exact scenario
# used in the input file.
antibiotic = data['antibiotic']
site = data['site']
years = data['years']
model = data['model']
seed = data['seed']
species = data['species']
logging.info(f'Site: {site}')
logging.info(f'Years: {years}')
logging.info(f'Seed: {seed}')
explorer = DRIAMSDatasetExplorer(DRIAMS_ROOT)
metadata_fingerprints = explorer.metadata_fingerprints(site)
driams_dataset = load_driams_dataset(
DRIAMS_ROOT,
site,
years,
species=species,
antibiotics=antibiotic, # Only a single one for this run
encoder=DRIAMSLabelEncoder(),
handle_missing_resistance_measurements='remove_if_all_missing',
spectra_type='binned_6000',
)
logging.info(f'Loaded data set for {species} and {antibiotic}')
# Create feature matrix from the binned spectra. We only need to
# consider the second column of each spectrum for this.
X = np.asarray([spectrum.intensities for spectrum in driams_dataset.X])
logging.info('Finished vectorisation')
# Stratified train--test split
train_index, test_index = stratify_by_species_and_label(
driams_dataset.y,
antibiotic=antibiotic,
random_state=seed,
)
logging.info('Finished stratification')
y = driams_dataset.to_numpy(antibiotic)
X_train, y_train = X[train_index], y[train_index]
mean_intensities = {}
# Pretend that we do not know the labels; there should be only
# two, but this script actually does not care.
for l in np.unique(y_train):
spectra = X_train[y_train == l]
mean_intensities[str(l)] = np.mean(spectra, axis=0)
# We do *not* yet convert the resulting array because it is
# to keep `np.array` around for sums etc.
if str(l) in all_mean_intensities:
all_mean_intensities[str(l)] += mean_intensities[str(l)]
else:
all_mean_intensities[str(l)] = mean_intensities[str(l)]
# Convert to list in order to ensure proper serialisation
# later on. This is not the most elegant thing.
mean_intensities[str(l)] = mean_intensities[str(l)].tolist()
if years not in all_years:
all_years.append(years)
all_antibiotics.append(antibiotic)
all_sites.append(site)
all_seeds.append(seed)
all_species.append(species)
all_models.append(model)
all_metadata_versions.append(metadata_fingerprints)
# Reduce the output and only report the relevant parts. We do
# not need information about the model, for example, because
# no model was involved in the training. It is purely needed
# for the output name generation, though.
output = {
'site': site,
'years': years,
'seed': seed,
'antibiotic': antibiotic,
'species': species,
'model': model,
'metadata_versions': metadata_fingerprints,
'mean_intensities': mean_intensities,
}
output_filename = generate_output_filename(
args.output,
output
)
if not os.path.exists(output_filename) or args.force:
logging.info(f'Saving {os.path.basename(output_filename)}')
with open(output_filename, 'w') as f:
json.dump(output, f, indent=4)
else:
logging.warning(
f'Skipping {output_filename} because it already exists.'
)
if len(args.INPUT) > 1:
mean_intensities = {}
for l in all_mean_intensities:
mean_intensities[l] = all_mean_intensities[l] / len(args.INPUT)
print(len(mean_intensities[l]))
print(sum(mean_intensities[l]))
mean_intensities[l] = mean_intensities[l].ravel().tolist()
sites = list(set(all_sites))
antibiotics = list(set(all_antibiotics))
species = list(set(all_species))
models = list(set(all_models))
# Stop if files from more than one antibiotics-species-model scenario
# were given as input.
if any([len(l) > 1 for l in [all_years,
sites,
antibiotics,
species,
models]]):
logging.warning(
'Cannot include more than one scenario in average '
'intensity calculation.')
sys.exit(0)
output = {
'site': sites[0],
'years': all_years[0],
'seed': all_seeds,
'antibiotic': antibiotics[0],
'species': species[0],
'model': models[0],
'mean_intensities': mean_intensities,
}
output_print = output.copy()
output_print['seed'] = '-'.join([str(seed) for seed in all_seeds])
output_filename = generate_output_filename(
args.output,
output_print,
suffix='mean_intensities',
)
if not os.path.exists(output_filename) or args.force:
logging.info(f'Saving {os.path.basename(output_filename)}')
with open(output_filename, 'w') as f:
json.dump(output, f, indent=4)
else:
logging.warning(
f'Skipping {output_filename} because it already exists.'
)
| [
"bastian.rieck@bsse.ethz.ch"
] | bastian.rieck@bsse.ethz.ch |
6e90f2dd477e7c47bc2b9d1496d59b963db72248 | 97f88c3382903ea93391e67523744e4c8aba5214 | /2018_cfg.py | ce7bddea6f14a0730474802697bbe677b48d661a | [] | no_license | diemort/pps-quick-test | 60194090be5de3ec3ae4e9a164051b1f8628d8ca | 0b599608689bb2bfde13b41581cf269d29f7a685 | refs/heads/master | 2023-07-15T19:41:22.552022 | 2019-12-12T16:19:46 | 2019-12-12T16:19:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('TEST', eras.Run2_2018)
from base import *
SetDefaults(process)
#process.source.fileNames = cms.untracked.vstring("/store/data/Run2018D/ZeroBias/RAW/v1/000/320/688/00000/601A721D-AD95-E811-B21A-FA163E28A50A.root")
process.source.fileNames = cms.untracked.vstring("root://eoscms.cern.ch//eos/cms/store/group/phys_pps/sw_test_input/601A721D-AD95-E811-B21A-FA163E28A50A.root")
process.ctppsProtonReconstructionPlotter.rpId_45_F = 23
process.ctppsProtonReconstructionPlotter.rpId_45_N = 3
process.ctppsProtonReconstructionPlotter.rpId_56_N = 103
process.ctppsProtonReconstructionPlotter.rpId_56_F = 123
process.ctppsProtonReconstructionPlotter.outputFile = "2018_reco_plots.root"
| [
"jan.kaspar@cern.ch"
] | jan.kaspar@cern.ch |
546ebf18503cfe45d623bc097095c2ce40c8e910 | 905da4dc6a829845dba931c18517a4d8b38cc163 | /docs/conf.py | 7743f60c6077222d7cbbde52915a7d7598bbb39f | [
"BSD-2-Clause"
] | permissive | interrogator/drum | 3995cefa9b0e7751b149355a4e19c7a1863549cd | 7a25c574941f9da8b89b0ae162b205f0e3fd5eba | refs/heads/master | 2020-04-29T12:14:24.490049 | 2019-03-20T23:46:35 | 2019-03-20T23:46:35 | 176,129,523 | 0 | 0 | BSD-2-Clause | 2019-03-17T16:34:44 | 2019-03-17T16:34:43 | null | UTF-8 | Python | false | false | 149 | py | from __future__ import unicode_literals
# This file is automatically generated via sphinx-me
from sphinx_me import setup_conf; setup_conf(globals())
| [
"steve@jupo.org"
] | steve@jupo.org |
5f873cf2ba2a4f40f7e64fce82de01710715ef70 | f8f2536fa873afa43dafe0217faa9134e57c8a1e | /aliyun-python-sdk-dataworks-public/aliyunsdkdataworks_public/request/v20200518/ListDataServiceApisRequest.py | db47ea5b60789ea1e5ddd70c65b2e677d34e8dad | [
"Apache-2.0"
] | permissive | Sunnywillow/aliyun-openapi-python-sdk | 40b1b17ca39467e9f8405cb2ca08a85b9befd533 | 6855864a1d46f818d73f5870da0efec2b820baf5 | refs/heads/master | 2022-12-04T02:22:27.550198 | 2020-08-20T04:11:34 | 2020-08-20T04:11:34 | 288,944,896 | 1 | 0 | NOASSERTION | 2020-08-20T08:04:01 | 2020-08-20T08:04:01 | null | UTF-8 | Python | false | false | 2,519 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class ListDataServiceApisRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'ListDataServiceApis','dide')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ApiNameKeyword(self):
return self.get_body_params().get('ApiNameKeyword')
def set_ApiNameKeyword(self,ApiNameKeyword):
self.add_body_params('ApiNameKeyword', ApiNameKeyword)
def get_ApiPathKeyword(self):
return self.get_body_params().get('ApiPathKeyword')
def set_ApiPathKeyword(self,ApiPathKeyword):
self.add_body_params('ApiPathKeyword', ApiPathKeyword)
def get_CreatorId(self):
return self.get_body_params().get('CreatorId')
def set_CreatorId(self,CreatorId):
self.add_body_params('CreatorId', CreatorId)
def get_PageNumber(self):
return self.get_body_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_body_params('PageNumber', PageNumber)
def get_PageSize(self):
return self.get_body_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_body_params('PageSize', PageSize)
def get_TenantId(self):
return self.get_body_params().get('TenantId')
def set_TenantId(self,TenantId):
self.add_body_params('TenantId', TenantId)
def get_ProjectId(self):
return self.get_body_params().get('ProjectId')
def set_ProjectId(self,ProjectId):
self.add_body_params('ProjectId', ProjectId) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
2672a1e1230d39d2ec8eb0b54f8fc7cc59b208b4 | 5726f72427fa9f9881c4610749427c33dba84714 | /ginga/misc/plugins/Operations.py | 2ecb4a0b6c5e85c40d2218c28935e527e530d044 | [
"BSD-3-Clause"
] | permissive | saimn/ginga | ff73829b540dfb53c06c38b482e09d877e36887f | 9daf1875b4c1b0fad0a053c5f258bf7d4c0f3455 | refs/heads/master | 2020-12-24T23:10:25.305394 | 2016-08-29T20:34:17 | 2016-08-29T20:34:17 | 12,879,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,681 | py | #
# Operations.py -- Operations management plugin for Ginga viewer
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga import GingaPlugin
from ginga.misc import Bunch
from ginga.gw import Widgets
class Operations(GingaPlugin.GlobalPlugin):
"""
This plugin defines the GUI for managing local plugins, AKA "operations".
By replacing or subclassing this plugin you can customize the way
the reference viewer starts and manages operations.
"""
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Operations, self).__init__(fv)
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_Operations')
self.settings.addDefaults(show_channel_control=True,
use_popup_menu=True,
focuscolor="lightgreen")
self.settings.load(onError='silent')
fv.add_callback('add-channel', self.add_channel_cb)
fv.add_callback('delete-channel', self.delete_channel_cb)
fv.add_callback('channel-change', self.change_channel_cb)
fv.add_callback('add-operation', self.add_operation_cb)
self.operations = list(fv.get_operations())
self.focuscolor = self.settings.get('focuscolor', "lightgreen")
self.use_popup = True
def build_gui(self, container):
hbox = Widgets.HBox()
hbox.set_border_width(0)
hbox.set_spacing(2)
cbox1 = Widgets.ComboBox()
self.w.channel = cbox1
cbox1.set_tooltip("Select a channel")
cbox1.add_callback('activated', self.channel_select_cb)
if self.settings.get('show_channel_control', True):
hbox.add_widget(cbox1, stretch=0)
self.use_popup = self.settings.get('use_popup_menu', True)
if self.use_popup:
opmenu = Widgets.Menu()
btn = Widgets.Button("Operation")
else:
opmenu = Widgets.ComboBox()
opmenu.set_tooltip("Select an operation")
hbox.add_widget(opmenu, stretch=0)
btn = Widgets.Button("Go")
self.w.operation = opmenu
btn.add_callback('activated', self.invoke_op_cb)
btn.set_tooltip("Invoke operation")
self.w.opbtn = btn
hbox.add_widget(btn, stretch=0)
self.w.optray = Widgets.HBox()
self.w.optray.set_border_width(0)
self.w.optray.set_spacing(2)
hbox.add_widget(self.w.optray, stretch=1)
container.add_widget(hbox, stretch=0)
def add_channel_cb(self, viewer, channel):
chname = channel.name
self.w.channel.insert_alpha(chname)
pl_mgr = channel.opmon
pl_mgr.add_callback('activate-plugin', self.activate_plugin_cb)
pl_mgr.add_callback('deactivate-plugin', self.deactivate_plugin_cb)
pl_mgr.add_callback('focus-plugin', self.focus_plugin_cb)
pl_mgr.add_callback('unfocus-plugin', self.unfocus_plugin_cb)
self.logger.debug("added channel %s" % (chname))
def delete_channel_cb(self, viewer, channel):
chname = channel.name
self.w.channel.delete_alpha(chname)
self.logger.debug("deleted channel %s" % (chname))
def start(self):
# get the list of channels and populate our channel control
names = self.fv.get_channelNames()
for name in names:
channel = self.fv.get_channelInfo(name)
self.add_channel_cb(self.fv, channel)
# get the list of local plugins and populate our operation control
operations = self.fv.get_operations()
for opname in operations:
self.add_operation_cb(self.fv, opname)
def add_operation_cb(self, viewer, opname):
opmenu = self.w.operation
if self.use_popup:
item = opmenu.add_name(opname)
item.add_callback('activated',
lambda *args: self.start_operation_cb(opname))
else:
opmenu.insert_alpha(opname)
def start_operation_cb(self, name):
self.logger.debug("invoking operation menu")
idx = self.w.channel.get_index()
chname = str(self.w.channel.get_alpha(idx))
self.fv.error_wrap(self.fv.start_local_plugin, chname, name, None)
def channel_select_cb(self, widget, index):
if index >= 0:
chname = self.fv.get_channelNames()[index]
self.logger.debug("Channel changed, index=%d chname=%s" % (
index, chname))
self.fv.change_channel(chname)
def change_channel_cb(self, viewer, channel):
# Update the channel control
self.w.channel.show_text(channel.name)
def invoke_op_cb(self, btn_w):
self.logger.debug("invoking operation menu")
menu = self.w.operation
if self.use_popup:
menu.popup(btn_w)
else:
idx = menu.get_index()
opname = str(menu.get_alpha(idx))
self.start_operation_cb(opname)
def activate_plugin_cb(self, pl_mgr, bnch):
lname = bnch.pInfo.name.lower()
menu = Widgets.Menu()
item = menu.add_name("Focus")
item.add_callback('activated', lambda *args: pl_mgr.set_focus(lname))
item = menu.add_name("Unfocus")
item.add_callback('activated', lambda *args: pl_mgr.clear_focus(lname))
item = menu.add_name("Stop")
item.add_callback('activated', lambda *args: pl_mgr.deactivate(lname))
lblname = bnch.lblname
lbl = Widgets.Label(lblname, halign='center', style='clickable',
menu=menu)
lbl.set_tooltip("Right click for menu")
self.w.optray.add_widget(lbl, stretch=0)
lbl.add_callback('activated', lambda w: pl_mgr.set_focus(lname))
bnch.setvals(widget=lbl, label=lbl, menu=menu)
def deactivate_plugin_cb(self, pl_mgr, bnch):
if bnch.widget is not None:
self.logger.debug("removing widget from taskbar")
self.w.optray.remove(bnch.widget)
bnch.widget = None
bnch.label = None
def focus_plugin_cb(self, pl_mgr, bnch):
self.logger.debug("highlighting widget")
if bnch.label is not None:
bnch.label.set_color(bg=self.focuscolor)
def unfocus_plugin_cb(self, pl_mgr, bnch):
self.logger.debug("unhighlighting widget")
if bnch.label is not None:
bnch.label.set_color(bg='grey')
def __str__(self):
return 'operations'
#END
| [
"eric@naoj.org"
] | eric@naoj.org |
b62bc2db0a6ee6b5f3cb7e989a85c62fde7672cf | eebafeddcdbb520ab2afcac4e9d7dd75c58318af | /APO/prep_data/featurize_dihedrals.py | affde20d60e2e428fb455bda72382b599b473c49 | [
"MIT",
"CC-BY-4.0"
] | permissive | choderalab/SETD8-materials | 0e91f1c7c0348d4aa100df6bc33b16ab3ab96555 | 60a03632c8667ca91514f41a48cb27a255a47821 | refs/heads/master | 2021-09-21T04:12:22.596465 | 2018-08-20T00:36:45 | 2018-08-20T00:36:45 | 145,294,223 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | import pyemma
import mdtraj as md
import glob
import numpy as np
fnames = glob.glob('data_cut_start_noH_stride10/*/*.h5')
traj = md.load(fnames[0])
top = traj.top
feat = pyemma.coordinates.featurizer(top)
feat.add_backbone_torsions(cossin = True)
feat.add_chi1_torsions(cossin = True)
source = pyemma.coordinates.source(fnames, features = feat)
X = source.get_output()
for i in range(len(X)):
x = X[i]
np.save('data_cut_start_noH_stride10_featurized/dih/%d.npy' % x, x)
np.save('data_cut_start_noH_stride10_featurized/dih_comb/X.npy', X)
| [
"rafwiewiora@gmail.com"
] | rafwiewiora@gmail.com |
cee870482805f37c8d778e6d1300cb40e5facb92 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2587/60693/238740.py | ee8c7a54d56f3a4755d187c9812d54cb532e7ef9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | pnum=int(input())
points=[]
for i in range(pnum):
co=list(map(int,input().split(',')))
points.append(co)
steps=0
for i in range(pnum-1):
pax,pay=points[i][0],points[i][1]
pbx,pby=points[i+1][0],points[i+1][1]
disx,disy=abs(pbx-pax),abs(pby-pay)
steps+=max(disx,disy)
print(steps) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
e913cf4c9f8ca280ae64674e4b5b530734accc8b | c61c27a778f0d11502acbd76ec69e77745c920ee | /go/apps/rapidsms/vumi_app.py | 85c7b8b0548cce8db5dd3a694e30fb48ea960d8e | [
"BSD-2-Clause"
] | permissive | ChrisNolan1992/vumi-go | baf852b2b8a85aa5f3d43b1362409cddc407d4b1 | be8d358a0a6efc0799c758644b6c8759a22db180 | refs/heads/master | 2020-12-31T03:56:45.262961 | 2014-08-19T13:56:08 | 2014-08-19T13:56:08 | 23,417,739 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,883 | py | # -*- test-case-name: go.apps.rapidsms.tests.test_vumi_app -*-
# -*- coding: utf-8 -*-
"""Vumi Go application worker for RapidSMS."""
from twisted.internet.defer import inlineCallbacks, returnValue
from vumi.application.rapidsms_relay import RapidSMSRelay
from vumi import log
from go.vumitools.app_worker import (
GoApplicationMixin, GoApplicationConfigMixin, GoWorkerConfigData)
class RapidSMSConfig(RapidSMSRelay.CONFIG_CLASS, GoApplicationConfigMixin):
pass
class RapidSMSApplication(GoApplicationMixin, RapidSMSRelay):
CONFIG_CLASS = RapidSMSConfig
worker_name = 'rapidsms_application'
# Basic AUTH uses colon to combine the username and password so don't use
# colon as the separator.
AUTH_SEP = "@"
@inlineCallbacks
def setup_application(self):
yield super(RapidSMSApplication, self).setup_application()
yield self._go_setup_worker()
@inlineCallbacks
def teardown_application(self):
yield super(RapidSMSApplication, self).teardown_application()
yield self._go_teardown_worker()
@classmethod
def vumi_username_for_conversation(cls, conversation):
return cls.AUTH_SEP.join(
[conversation.user_account.key, conversation.key])
def get_config_data_for_conversation(self, conversation):
dynamic_config = conversation.config.get('rapidsms', {}).copy()
dynamic_config["vumi_auth_method"] = "basic"
dynamic_config["vumi_username"] = self.vumi_username_for_conversation(
conversation)
auth_config = conversation.config.get('auth_tokens', {})
api_tokens = auth_config.get("api_tokens", [])
dynamic_config["vumi_password"] = api_tokens[0] if api_tokens else None
dynamic_config["conversation"] = conversation
return GoWorkerConfigData(self.config, dynamic_config)
@inlineCallbacks
def get_ctxt_config(self, ctxt):
username = getattr(ctxt, 'username', None)
if username is None:
raise ValueError("No username provided for retrieving"
" RapidSMS conversation.")
user_account_key, _, conversation_key = username.partition(
self.AUTH_SEP)
if not user_account_key or not conversation_key:
raise ValueError("Invalid username for RapidSMS conversation.")
conv = yield self.get_conversation(user_account_key, conversation_key)
if conv is None:
log.warning("Cannot find conversation '%s' for user '%s'." % (
conversation_key, user_account_key))
raise ValueError("No conversation found for retrieiving"
" RapidSMS configuration.")
config = yield self.get_config_for_conversation(conv)
returnValue(config)
def get_config(self, msg, ctxt=None):
if msg is not None:
return self.get_message_config(msg)
elif ctxt is not None:
return self.get_ctxt_config(ctxt)
else:
raise ValueError("No msg or context provided for"
" retrieving a RapidSMS config.")
def send_rapidsms_nonreply(self, to_addr, content, config, endpoint):
"""Call .send_to() for a message from RapidSMS that is not a reply.
This overrides the base method and adds conversation metadata.
"""
helper_metadata = {}
config.conversation.set_go_helper_metadata(helper_metadata)
return self.send_to(to_addr, content, endpoint=endpoint,
helper_metadata=helper_metadata)
def process_command_start(self, user_account_key, conversation_key):
log.info("Starting RapidSMS conversation (key: %r)." %
(conversation_key,))
return super(RapidSMSApplication, self).process_command_start(
user_account_key, conversation_key)
| [
"hodgestar@gmail.com"
] | hodgestar@gmail.com |
91eac5619a238e229992da106d3038013dd9373e | caf8cbcafd448a301997770165b323438d119f5e | /.history/mercari/mercari_search_20201124184532.py | 60067c2157e28c296d99956f5b4b379b1aeb19a2 | [
"MIT"
] | permissive | KustomApe/nerdape | 03e0691f675f13ce2aefa46ee230111247e90c72 | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | refs/heads/main | 2023-01-23T10:13:26.584386 | 2020-11-28T22:29:49 | 2020-11-28T22:29:49 | 309,897,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,653 | py | from selenium import webdriver
from selenium.webdriver.support.ui import Select
import pandas as pd
import re
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import PyQt5
import time
"""[Initial Settings]
初期設定
"""
options = webdriver.ChromeOptions()
options.add_argument('--headeless')
options.add_argument('--disable-gpu')
options.add_argument('--lang-ja')
browser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')
"""[CSS Selector Settings]
CSSセレクターの設定
"""
PAGER = "li.pager-next a"
word = input("検索したいキーワードを入力してください:")
while True:
if PAGER:
res = browser.get("https://www.mercari.com/jp/search/?page="+str(n)+"&keyword="+word)
df_main = pd.DataFrame(columns=['在庫有無','タイトル','値段','URL'])
df_graf = pd.DataFrame(columns=['SOLD','PRICE'])
browser.get(res)
item_boxlist = browser.find_elements_by_css_selector(".items-box")
for item_box in item_boxlist:
try:
if len(item_box.find_elements_by_css_selector(".item-sold-out-badge")) > 0:
sold = "SOLD"
else:
sold = "NOT SOLD"
sub_title = item_box.find_element_by_class_name("items-box-body")
title = sub_title.find_element_by_tag_name("h3").text
item_price = item_box.find_element_by_css_selector(".items-box-price")
price_text = item_price.text
price_text = re.sub(r",", "", price_text).lstrip("¥ ")
price_text_int = int(price_text)
print(price_text_int)
url = item_box.find_element_by_tag_name("a").get_attribute("href")
data = pd.Series( [ sold,title,price_text_int,url ], index=df_main.columns )
grdata = pd.Series( [ sold,price_text_int ], index=df_graf.columns )
df_main = df_main.append( data, ignore_index=True )
df_graf = df_graf.append( grdata, ignore_index=True )
except Exception as e:
print(e)
btn = browser.find_element_by_css_selector(PAGER).get_attribute('href')
n += 1
print('next url:{}'.format(btn))
time.sleep(3)
browser.get(btn)
print('Moving to next page...')
else:
print('No items anymore...')
break
print(df_main)
sns.stripplot(x='SOLD', y='PRICE', data=df_graf)
plt.show()
sns.pairplot(df_graf,hue="SOLD")
plt.show()
print('Writing out to CSV file...')
df_main.to_csv("pricedata.csv", encoding="utf_8_sig")
print("Done") | [
"kustomape@gmail.com"
] | kustomape@gmail.com |
787612f8fc43ee97d1b22bd8b708397309dc26ec | c3a61c9420c941722bad57a8cbcb7a58e3072012 | /sb3.py | 4375afbe9250308a448a01ec8cb8320797ed787d | [] | no_license | vwxyzjn/microrts-sb3 | e18db9427fd7cd3622c1356437c8fdbcbf796e19 | 72f4382f2926e3de61671d943b625391c8cc98f6 | refs/heads/master | 2023-08-22T14:54:27.869007 | 2021-09-20T01:59:28 | 2021-09-20T01:59:28 | 408,281,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py | import wandb
from sb3_contrib import MaskablePPO
from sb3_contrib.common.wrappers import ActionMasker
from stable_baselines3.common.vec_env import VecVideoRecorder, VecMonitor
from wandb.integration.sb3 import WandbCallback
from gym_microrts import microrts_ai
from gym_microrts.envs.new_vec_env import MicroRTSGridModeVecEnv
import numpy as np
import gym
def mask_fn(env: gym.Env) -> np.ndarray:
# Uncomment to make masking a no-op
# return np.ones_like(env.action_mask)
return env.get_action_mask()
def get_wrapper(env: gym.Env) -> gym.Env:
return ActionMasker(env, mask_fn)
config = {
"total_timesteps": int(100e6),
"num_envs": 8,
"env_name": "BreakoutNoFrameskip-v4",
}
run = wandb.init(
project="sb3",
config=config,
sync_tensorboard=True, # auto-upload sb3's tensorboard metrics
monitor_gym=True, # auto-upload the videos of agents playing the game
save_code=True, # optional
)
num_envs = 24
envs = MicroRTSGridModeVecEnv(
num_selfplay_envs=0,
num_bot_envs=num_envs,
partial_obs=False,
max_steps=2000,
render_theme=2,
ai2s=[microrts_ai.coacAI for _ in range(num_envs - 6)]
+ [microrts_ai.randomBiasedAI for _ in range(min(num_envs, 2))]
+ [microrts_ai.lightRushAI for _ in range(min(num_envs, 2))]
+ [microrts_ai.workerRushAI for _ in range(min(num_envs, 2))],
map_path="maps/16x16/basesWorkers16x16.xml",
reward_weight=np.array([10.0, 1.0, 1.0, 0.2, 1.0, 4.0]),
)
envs = VecMonitor(envs)
envs = VecVideoRecorder(envs, "videos", record_video_trigger=lambda x: x % 100000 == 0, video_length=2000) # record videos
model = MaskablePPO(
"CnnPolicy",
envs,
n_steps=128,
n_epochs=4,
learning_rate=lambda progression: 2.5e-4 * progression,
ent_coef=0.01,
clip_range=0.1,
batch_size=256,
verbose=1,
tensorboard_log=f"runs",
)
model.learn(
total_timesteps=config["total_timesteps"],
callback=WandbCallback(
gradient_save_freq=1000,
model_save_path=f"models/{run.id}",
),
) | [
"costa.huang@outlook.com"
] | costa.huang@outlook.com |
1a0155105047bf78d9a8c8e733b0206d8aa10225 | 7813f41039c4fc96c56849792d4a411688696fd9 | /12.py | 9b2f1b5f7c2df2c6fc7d7f250b907cba32047d6a | [] | no_license | vkurup/project-euler | 497e00d4b7e22cfc27feb06837f87fa7ba0d3158 | fb357b9c4a8ba681fa1b638d4e82223502be5687 | refs/heads/master | 2021-01-02T22:58:49.737821 | 2012-04-29T10:31:20 | 2012-04-29T10:31:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | #!/usr/bin/env python2
# triangle numbers: 1, 3, 6, 10, 15, 21, 28, 36, 45
# What is the value of the first triangle number to have over 500
# divisors?
import math
def divisors(n):
"Return all factors of n"
divisors = []
for i in range(1, int(math.sqrt(n))+1):
if n%i == 0:
divisors.append(i)
divisors.append(n / i)
return sorted(set(divisors))
def odd(n):
return n%2 != 0
def first_triangle_with_n_divisors(n):
"Return first triangle number with greater than n divisors"
length = 0
i = 1
next_triangle = 1
while length <= n:
i += 1
next_triangle += i
if odd(next_triangle): continue
length = len(divisors(next_triangle))
return next_triangle
print "answer = ", first_triangle_with_n_divisors(500)
| [
"vinod@kurup.com"
] | vinod@kurup.com |
1be71155adf6f95d31377bd9bbad76fdcef9006f | d93159d0784fc489a5066d3ee592e6c9563b228b | /CondCore/RunInfoPlugins/test/inspect.py | 7bcf713503af8db60a91ef3b1e1c9d820f80d829 | [] | permissive | simonecid/cmssw | 86396e31d41a003a179690f8c322e82e250e33b2 | 2559fdc9545b2c7e337f5113b231025106dd22ab | refs/heads/CAallInOne_81X | 2021-08-15T23:25:02.901905 | 2016-09-13T08:10:20 | 2016-09-13T08:53:42 | 176,462,898 | 0 | 1 | Apache-2.0 | 2019-03-19T08:30:28 | 2019-03-19T08:30:24 | null | UTF-8 | Python | false | false | 752 | py | import os,sys, DLFCN
sys.setdlopenflags(DLFCN.RTLD_GLOBAL+DLFCN.RTLD_LAZY)
from pluginCondDBPyInterface import *
a = FWIncantation()
os.putenv("CORAL_AUTH_PATH","/afs/cern.ch/cms/DB/conddb")
rdbms = RDBMS()
dbName = "oracle://cms_orcoff_prod/CMS_COND_21X_RUN_INFO"
logName = "oracle://cms_orcoff_prod/CMS_COND_21X_POPCONLOG"
rdbms.setLogger(logName)
from CondCore.Utilities import iovInspector as inspect
db = rdbms.getDB(dbName)
tags = db.allTags()
tag = 'l1triggerscaler_test_v2'
try :
log = db.lastLogEntry(tag)
print log.getState()
iov = inspect.Iov(db,tag)
print iov.list()
for x in iov.summaries():
print x[1],x[3]
# print iov.trend("",[0,2,12])
except RuntimeError :
print " no iov? in", tag
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
fc6f84346460d776b8a6cbd2b89fb45637a259b6 | 321e58ab3e6b2385bb3549aaaefd56a58c2a51e7 | /python/tests/tokyoperf_test.py | 04b1583da6c773994ccef0548f9749143f7f2646 | [] | no_license | alexmadon/atpic_photosharing | 7829118d032344bd9a67818cd50e2c27a228d028 | 9fdddeb78548dadf946b1951aea0d0632e979156 | refs/heads/master | 2020-06-02T15:00:29.282979 | 2017-06-12T17:09:52 | 2017-06-12T17:09:52 | 94,095,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | # -*- coding: utf-8 -*-
"""
Unit tests for Tokyo Cabinet
Compare the speed of the different drivers
Conclusion:
google tc is 5X faster than atpic ctypes implementation
"""
import unittest
import os
import time
import atpic.tokyoctypes as tc
import tc as tc2
import tokyo.cabinet as tc3
from pyrant import Tyrant, Q
"""
Compares speed of atpic.tokyo and tc
"""
import psyco # package python-psyco
psyco.full()
class testTdbOpen(unittest.TestCase):
def setUp(self):
print "setUp"
self.dbfile="casketperf.tct"
if os.path.exists(self.dbfile):
os.remove(self.dbfile)
def tearDown(self):
print "tearDown"
if os.path.exists(self.dbfile):
os.remove(self.dbfile)
def testall(self):
print "Hiiiii"
# tdb=tc.Tdb()
tdb=tc.TDB(self.dbfile, tc.TDBOWRITER | tc.TDBOCREAT)
print tdb
print tdb.tdb
print "number of records in db: %s" % tdb.rnum()
time1=time.time()
# tdb.tranbegin()
for i in range(1,1000000):
tdb.put("key%s" %i ,{"firstname":"Alex","lastname":"Madon","age":34})
# tdb.trancommit()
time2=time.time()
dt=time2-time1
print "Atpic tc: %s" % dt
class testTdbOpen2(unittest.TestCase):
def setUp(self):
print "setUp"
self.dbfile="casketperf.tct"
if os.path.exists(self.dbfile):
os.remove(self.dbfile)
def tearDown(self):
print "tearDown"
if os.path.exists(self.dbfile):
os.remove(self.dbfile)
def testall(self):
print "Hiiiii"
# tdb=tc.Tdb()
tdb=tc2.TDB(self.dbfile, tc.TDBOWRITER | tc.TDBOCREAT)
# print "number of records in db: %s" % tdb.rnum()
time1=time.time()
for i in range(1,1000000):
tdb.put("key%s" %i ,{"firstname":"Alex","lastname":"Madon","age":"34"})
time2=time.time()
dt=time2-time1
print "Google tc: %s" % dt
class testTdbOpen3(unittest.TestCase):
"""Uses http://bitbucket.org/lasizoillo/tokyocabinet/"""
def setUp(self):
print "setUp"
self.dbfile="casketperf3.tct"
if os.path.exists(self.dbfile):
os.remove(self.dbfile)
def tearDown(self):
print "tearDown"
if os.path.exists(self.dbfile):
os.remove(self.dbfile)
def testall(self):
print "Hiiiii3"
# tdb=tc.Tdb()
tdb = tc3.TableDB()
tdb.open(self.dbfile, tc.TDBOWRITER | tc.TDBOCREAT)
# print "number of records in db: %s" % tdb.rnum()
time1=time.time()
for i in range(1,1000000):
tdb.put("key%s" %i ,{"firstname":"Alex","lastname":"Madon","age":"34"})
time2=time.time()
dt=time2-time1
print "bitbucket tc: %s" % dt
class testTdbTyrant(unittest.TestCase):
""" You need to start tyrant:
ttserver test.tct
"""
def NOtestall(self):
t = Tyrant(host='127.0.0.1', port=1978)
time1=time.time()
for i in range(1,10000):
key="key%s" %i
t[key]={"firstname":"Alex","lastname":"Madon","age":"34"}
time2=time.time()
dt=time2-time1
print "Tyran tc: %s" % dt
if __name__=="__main__":
unittest.main()
| [
"alex.madon@gmail.com"
] | alex.madon@gmail.com |
a4f63492557f24270930521e041d540a6598d393 | 0178e6a705ee8aa6bb0b0a8512bf5184a9d00ded | /Sungjin/Bruteforce/1436.py | f234d2c7ee02122bc2abe3d99724475881e3efb1 | [] | no_license | comojin1994/Algorithm_Study | 0379d513abf30e3f55d6a013e90329bfdfa5adcc | 965c97a9b858565c68ac029f852a1c2218369e0b | refs/heads/master | 2021-08-08T14:55:15.220412 | 2021-07-06T11:54:33 | 2021-07-06T11:54:33 | 206,978,984 | 0 | 1 | null | 2020-05-14T14:06:46 | 2019-09-07T14:23:31 | Python | UTF-8 | Python | false | false | 205 | py | import sys
input = sys.stdin.readline
N = int(input())
word = '666'
cnt = 0
num = 666
while True:
if word in str(num):
cnt += 1
if cnt == N:
print(num)
break
num += 1 | [
"comojin1994@gmail.com"
] | comojin1994@gmail.com |
d7cae89b1fe581e2ffda58f40388a10988806fa0 | 5da023dcc3ea1a4ad5d92c610de4aed981d6acf6 | /day05/migrations/0002_auto_20200917_0101.py | 8140298f7df4d9be525d5f37994e1ef4e7e77bae | [] | no_license | zwy-888/drf03 | a2bf9deea7badc3f070bd2515b1d273b71df0909 | 53e512376e2a52fea6978cbe30376fddb950cfbe | refs/heads/master | 2022-12-18T14:55:50.728061 | 2020-09-17T06:02:51 | 2020-09-17T06:02:51 | 295,579,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | # Generated by Django 3.0 on 2020-09-16 17:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('day05', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='employee2',
options={'verbose_name': '员工', 'verbose_name_plural': '员工'},
),
]
| [
"l"
] | l |
ef6a5849becb23b9f3c407f12ac2a9e8c0e7e182 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-oms/huaweicloudsdkoms/v2/model/task_group_dst_node_resp.py | e8c3ba6f581dcca705484bddfff656f83caf8ce8 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,971 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class TaskGroupDstNodeResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'bucket': 'str',
'region': 'str',
'save_prefix': 'str'
}
attribute_map = {
'bucket': 'bucket',
'region': 'region',
'save_prefix': 'save_prefix'
}
def __init__(self, bucket=None, region=None, save_prefix=None):
"""TaskGroupDstNodeResp
The model defined in huaweicloud sdk
:param bucket: 目的端桶的名称。
:type bucket: str
:param region: 目的端桶所处的区域。
:type region: str
:param save_prefix: 目的端桶内路径前缀(拼接在对象key前面,组成新的key,拼接后不能超过1024个字符)。
:type save_prefix: str
"""
self._bucket = None
self._region = None
self._save_prefix = None
self.discriminator = None
if bucket is not None:
self.bucket = bucket
if region is not None:
self.region = region
if save_prefix is not None:
self.save_prefix = save_prefix
@property
def bucket(self):
"""Gets the bucket of this TaskGroupDstNodeResp.
目的端桶的名称。
:return: The bucket of this TaskGroupDstNodeResp.
:rtype: str
"""
return self._bucket
@bucket.setter
def bucket(self, bucket):
"""Sets the bucket of this TaskGroupDstNodeResp.
目的端桶的名称。
:param bucket: The bucket of this TaskGroupDstNodeResp.
:type bucket: str
"""
self._bucket = bucket
@property
def region(self):
"""Gets the region of this TaskGroupDstNodeResp.
目的端桶所处的区域。
:return: The region of this TaskGroupDstNodeResp.
:rtype: str
"""
return self._region
@region.setter
def region(self, region):
"""Sets the region of this TaskGroupDstNodeResp.
目的端桶所处的区域。
:param region: The region of this TaskGroupDstNodeResp.
:type region: str
"""
self._region = region
@property
def save_prefix(self):
"""Gets the save_prefix of this TaskGroupDstNodeResp.
目的端桶内路径前缀(拼接在对象key前面,组成新的key,拼接后不能超过1024个字符)。
:return: The save_prefix of this TaskGroupDstNodeResp.
:rtype: str
"""
return self._save_prefix
@save_prefix.setter
def save_prefix(self, save_prefix):
"""Sets the save_prefix of this TaskGroupDstNodeResp.
目的端桶内路径前缀(拼接在对象key前面,组成新的key,拼接后不能超过1024个字符)。
:param save_prefix: The save_prefix of this TaskGroupDstNodeResp.
:type save_prefix: str
"""
self._save_prefix = save_prefix
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TaskGroupDstNodeResp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
6010bd42f0854269a3b919f84f88b5380165c751 | 485cf3c70fcaa68689a2b690b6465f1d6bcf21bd | /string_process/30_decorator_test_1.py | 3722bcbbd44c4c78e85834f45eaf86b6b10019b7 | [] | no_license | lxz0503/study_20190608 | 5ffe08c4704bb00ad8d1980baf16b8f5e7135ff4 | 47c37798140883b8d6dc21ec5da5bc7a20988ce9 | refs/heads/master | 2022-12-23T17:23:45.039015 | 2021-06-23T14:50:19 | 2021-06-23T14:50:19 | 190,884,812 | 1 | 3 | null | 2022-12-15T23:17:33 | 2019-06-08T12:22:56 | Python | UTF-8 | Python | false | false | 1,748 | py | # http://c.biancheng.net/view/2270.html refer to this link
# 装饰器本身就是一个函数,不会修改被修饰函数里面的代码,不能修改函数的调用方式。这是原则
# 可以理解为在一个函数外面加另外一个函数,来实现某些功能
# 应用场景,例如, 不能修改函数体
# 装饰器 = 高阶函数 + 函数嵌套 + 闭包
# 带固定参数的装饰器
# import time
#
# def deco(f): # 以函数名作为参数,就是高阶函数 high level function
# def wrapper(a,b): # 函数嵌套 function nesting
# start_time = time.time()
# f(a,b)
# end_time = time.time()
# execution_time = (end_time - start_time)*1000
# print("time is %d ms" % execution_time)
# return wrapper # 返回值是嵌套函数的函数名
#
# @deco
# def f(a,b):
# print("be on")
# time.sleep(1)
# print("result is %d" %(a+b)) # 执行完这一步,跳转到def wrapper(a,b)里面的f(a,b)
#
# if __name__ == '__main__':
# f(3,4) # 此处设置断点,查看函数如何执行,顺序是先执行deco()函数,在执行f(a,b)之前跳转到def f(a,b)函数里面
# 无固定参数的装饰器
import time
def deco(f):
def wrapper(*args, **kwargs):
start_time = time.time()
f(*args, **kwargs)
end_time = time.time()
execution_time = (end_time - start_time) * 1000
print("time is %d ms" % execution_time)
return wrapper
@deco
def f(a, b):
print("be on")
time.sleep(1)
print("result is %d" %(a+b))
@deco
def f2(a, b, c):
print("be on")
time.sleep(1)
print("result is %d" %(a+b+c))
#
if __name__ == '__main__':
f2(3,4,5)
f(3,4) | [
"lxz_20081025@163.com"
] | lxz_20081025@163.com |
dcb848e9f8e9bdc7e8cfdd036e3b5d1bb2d2f373 | 077e5ab67f2936b0aa531b8ee177ecf83a0a2e18 | /实例/4、sqlite3/Alchemy.py | 7a31d9808ded210b4e777112dda0dde8c0ee12c4 | [] | no_license | MKDevil/Python | 43ef6ebcc6a800b09b4fb570ef1401add410c51a | 17b8c0bdd2e5a534b89cdec0eb51bfcc17c91839 | refs/heads/master | 2022-02-02T08:34:06.622336 | 2022-01-28T08:52:25 | 2022-01-28T08:52:25 | 163,807,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | '''
Author: MK_Devil
Date: 2022-01-13 11:13:09
LastEditTime: 2022-01-14 14:13:31
LastEditors: MK_Devil
'''
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sqlite3
# 建立数据库连接
conn = sqlite3.connect(r'.\实例\4、sqlite3\Alchemy.db')
# 创建游标
cur = conn.cursor()
# 查询输出所有
# cur.execute(r'select * from material')
# print(cur.fetchall())
| [
"MK_Devil@163.com"
] | MK_Devil@163.com |
b34d92c61e668e83b5eee607b7d5a8f484f82506 | 571a89f94f3ebd9ec8e6b618cddb7d05811e0d62 | /abc141/e/main.py | 45d936346360e80af662a6a165b30c1659e9bbe2 | [] | no_license | ryu19-1/atcoder_python | 57de9e1db8ff13a107b5861f8f6a231e40366313 | cc24b3c2895aad71d40cefbb8e2893dc397b8f4f | refs/heads/master | 2023-05-10T05:32:16.507207 | 2021-05-19T17:48:10 | 2021-05-19T17:48:10 | 368,954,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | # Z-algorithmで解いてみる
N = int(input())
S = input()
ans = 0
for h in range(N):
T = S[h:]
M = len(T)
Z = [0] * M
c = 0
for i in range(1, M):
l = i - c # 今見ている場所が計算済みcからどれだけ離れているか
if i + Z[l] < c + Z[c]:
Z[i] = Z[l]
else:
j = max(0, c + Z[c] - i)
while i + j < M and T[j] == T[i + j]:
j += 1
Z[i] = j
c = i
# Z[0] = M
# print(i, Z[i])
if i >= Z[i]:
ans = max(ans, Z[i])
print(ans)
| [
"ryu1007kami@gmail.com"
] | ryu1007kami@gmail.com |
e85a3f6d0b811cbb9545632cd020fd96c7bd4d2f | fff54b01b46cef0bbc70a6469c88c01c82af5a57 | /programming/language/python3/python3-qt5/actions.py | a166a816f0eb75c9660606d54552c2b738686a23 | [] | no_license | LimeLinux/Packages | e51deae6c0d1406e31f06caa5aaa7749466bef0b | d492e075d8b051df68b98c315ad0628e33a8fac4 | refs/heads/master | 2021-01-11T12:37:22.150638 | 2018-08-30T18:24:32 | 2018-08-30T18:24:32 | 77,054,292 | 5 | 19 | null | 2018-02-02T17:24:06 | 2016-12-21T13:33:45 | Python | UTF-8 | Python | false | false | 1,558 | py |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import pythonmodules
from pisi.actionsapi import shelltools
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir="PyQt5_gpl-%s" % get.srcVERSION()
def setup():
pythonmodules.run("configure.py --confirm-license \
--qsci-api \
--sip /usr/bin/sip \
--qmake='/usr/bin/qmake' \
--destdir='/usr/lib/python3.6/site-packages' \
--sip-incdir='/usr/include/python3.6m' \
CFLAGS='%s' CXXFLAGS='%s'" % (get.CFLAGS(), get.CXXFLAGS()), pyVer = "3")
shelltools.system("find -name 'Makefile' | xargs sed -i 's|-Wl,-rpath,/usr/lib||g;s|-Wl,-rpath,.* ||g'")
def build():
autotools.make()
def install():
shelltools.cd("%s/PyQt5_gpl-%s" % (get.workDIR(),get.srcVERSION()))
autotools.rawInstall("-C pyrcc DESTDIR=%(DESTDIR)s INSTALL_ROOT=%(DESTDIR)s" % {'DESTDIR':get.installDIR()})
autotools.rawInstall("-C pylupdate DESTDIR=%(DESTDIR)s INSTALL_ROOT=%(DESTDIR)s" % {'DESTDIR':get.installDIR()})
autotools.rawInstall("DESTDIR=%(DESTDIR)s INSTALL_ROOT=%(DESTDIR)s" % {'DESTDIR':get.installDIR()})
#pisitools.dohtml("doc/html/*")
pisitools.dodoc("NEWS", "README","LICENSE*")
| [
"ergunsalman@hotmail.com"
] | ergunsalman@hotmail.com |
d86de11a83b6047dbc1dfc7aaddb0246e1930762 | 430791dcde1596a554984e38c554a367b85c9951 | /classes_and_instances/exercises/account.py | 66b666b637c2f594a8d2e92bc5a0a9e2e627fe1c | [] | no_license | mialskywalker/PythonOOP | 5fa8606cfe7c9ceb72ada8e62ff89513bac10d32 | e7f2d5f46983e9c8c50d9356497fcc9ed9f6d4dc | refs/heads/master | 2023-04-06T05:43:13.408741 | 2021-04-03T13:22:13 | 2021-04-03T13:22:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | class Account:
def __init__(self, id, name, balance=0):
self.id = id
self.name = name
self.balance = balance
def credit(self, amount):
self.balance += amount
return self.balance
def debit(self, amount):
if amount > self.balance:
return "Amount exceeded balance"
else:
self.balance -= amount
return self.balance
def info(self):
return f"User {self.name} with account {self.id} has {self.balance} balance"
account = Account(1234, "George", 1000)
print(account.credit(500))
print(account.debit(1500))
print(account.info())
account = Account(5411256, "Peter")
print(account.debit(500))
print(account.credit(1000))
print(account.debit(500))
print(account.info())
| [
"kalqga123@gmail.com"
] | kalqga123@gmail.com |
2e4b626d7a5009c01affd1651a2db5942cd936bd | d1ef84d05beedc811161314800193ded398bff07 | /tests/views/test_user_login.py | b65e0155aebd90b9a9ed87c86cd81702f49cffb2 | [
"MIT"
] | permissive | spookey/observatory | 8f4a98aeb214182124bc6a4ab6d1ddac697cd0bc | be5cc92f53f12e6341e7e3040f26360e54cfdf7d | refs/heads/master | 2023-04-22T03:31:34.879735 | 2021-01-16T17:50:07 | 2021-01-16T17:50:07 | 224,500,136 | 0 | 0 | MIT | 2021-05-12T03:53:02 | 2019-11-27T19:11:24 | Python | UTF-8 | Python | false | false | 3,122 | py | from flask import url_for
from flask_login import current_user
from pytest import mark
from tests.conftest import USER_PASS
ENDPOINT = 'user.login'
@mark.usefixtures('session')
class TestUserLogin:
@staticmethod
@mark.usefixtures('ctx_app')
def test_url():
assert url_for(ENDPOINT) == '/user/login'
@staticmethod
def test_basic_form(visitor):
res = visitor(ENDPOINT)
form = res.soup.select('form')[-1]
assert form.attrs['method'].lower() == 'post'
assert form.attrs['action'] == url_for(ENDPOINT, _external=True)
@staticmethod
def test_form_fields(visitor):
res = visitor(ENDPOINT)
form = res.soup.select('form')[-1]
fields = [
(inpt.attrs.get('name'), inpt.attrs.get('type'))
for inpt in form.select('input,button')
]
assert fields == [
('username', 'text'),
('password', 'password'),
('remember', 'checkbox'),
('submit', 'submit'),
]
@staticmethod
def test_form_wrong(visitor, gen_user):
assert current_user.is_authenticated is False
user = gen_user()
res = visitor(
ENDPOINT,
method='post',
data={
'username': user.username,
'password': user.pw_hash,
'remember': True,
'submit': True,
},
)
form = res.soup.select('form')[-1]
for sel, exp in [
('#username', user.username),
('#password', ''),
('#remember', 'True'),
]:
assert form.select(sel)[-1].attrs['value'] == exp
assert current_user.is_authenticated is False
@staticmethod
def test_form_login(visitor, gen_user):
assert current_user.is_authenticated is False
user = gen_user(password=USER_PASS)
home_url = url_for('user.home', _external=True)
res = visitor(
ENDPOINT,
method='post',
data={
'username': user.username,
'password': USER_PASS,
'remember': True,
'submit': True,
},
code=302,
)
assert res.request.headers['Location'] == home_url
assert current_user == user
assert current_user.is_authenticated is True
assert current_user.username == user.username
@staticmethod
def test_form_login_next(visitor, gen_user):
assert current_user.is_authenticated is False
user = gen_user(password=USER_PASS)
next_url = url_for('side.favicon', _external=True)
res = visitor(
ENDPOINT,
method='post',
data={
'username': user.username,
'password': USER_PASS,
'remember': True,
'submit': True,
},
query_string={'next': next_url},
code=302,
)
assert res.request.headers['Location'] == next_url
assert current_user.is_authenticated is True
| [
"frieder.griesshammer@der-beweis.de"
] | frieder.griesshammer@der-beweis.de |
cc39d1f7c55512474c929007c0a06a66c8e2a5d6 | 378eea7cbb49d52c13c3bd0bb86bc93fc93d3d56 | /100Days/Day13/process_ex3.py | 26f733e1574e399722e49c9603b1001315678062 | [] | no_license | Zpadger/Python | b9e54524841e14d05e8f52b829c8c99c91e308b8 | f13da6d074afac50396621c9df780bf5ca30ce6b | refs/heads/master | 2020-08-16T01:10:00.534615 | 2020-04-12T15:15:53 | 2020-04-12T15:15:53 | 172,426,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # 进程间通信
from multiprocessing import Process
from time import sleep
counter = 0
def sub_task(string):
global counter
while counter<10:
print(string,end='',flush=True)
counter += 1
sleep(0.01)
def main():
Process(target=sub_task,args=('Ping',)).start()
Process(target=sub_task,args=('Pong',)).start()
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | Zpadger.noreply@github.com |
b5e17615285ca3c14fc8f5b3719570fdd384c7b7 | 795c2d7e2188f2ecb3e72bbb4053726856009c0d | /ctrl/cmorph/old/cmorph_extract_asia_ctrl.py | 2963d584be19522945de2a631d07ec2836a70891 | [
"Apache-2.0"
] | permissive | markmuetz/cosmic | 3a4ef310cb9cb92b81ff57b74bb1511841f790a5 | f215c499bfc8f1d717dea6aa78a58632a4e89113 | refs/heads/master | 2023-08-01T10:55:52.596575 | 2021-09-20T19:26:33 | 2021-09-20T19:26:33 | 217,045,140 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | from pathlib import Path
SCRIPT_PATH = 'cmorph_extract_asia.py'
BASEDIR = Path('/gws/nopw/j04/cosmic/mmuetz/data/cmorph_data')
years = range(1998, 2020)
CONFIG_KEYS = years
BSUB_KWARGS = {
'job_name': 'cmorph_cv',
'queue': 'new_users',
'max_runtime': '00:30',
}
SCRIPT_ARGS = {}
for year in CONFIG_KEYS:
SCRIPT_ARGS[str(year)] = year
| [
"markmuetz@gmail.com"
] | markmuetz@gmail.com |
5b6406cdc05d879e8f344ee3f33b1933286a85aa | d3e51b088f77ccd7ad21393136731667c4c91282 | /doc/source/usage_np.py | 5271c67d9107890a5a8b6d2ee4b918af7cc62567 | [
"MIT"
] | permissive | ForeverWintr/function-pipe | 2283e99902cdfd30b5ebdb26c928a545cdc36968 | 36f8653dbc6916c6e60d4a405a547a5a3ddf395f | refs/heads/master | 2021-07-04T03:32:27.723479 | 2017-01-24T17:13:50 | 2017-01-24T17:13:50 | 80,170,802 | 0 | 0 | null | 2017-01-27T00:56:12 | 2017-01-27T00:56:12 | null | UTF-8 | Python | false | false | 5,604 | py |
import sys
import argparse
from functools import reduce
import numpy as np
import function_pipe as fpn
class PixelFontInput(fpn.PipeNodeInput):
SHAPE = (5,5)
def __init__(self, pixel='*', scale=1):
super().__init__()
self.scale = scale
self.pixel = pixel
@fpn.pipe_node
def frame(**kwargs):
pfi = kwargs[fpn.PN_INPUT]
shape = tuple(s * pfi.scale for s in pfi.SHAPE)
return np.zeros(shape=shape, dtype=bool)
@fpn.pipe_node
def v_line(**kwargs):
pfi = kwargs[fpn.PN_INPUT]
m = kwargs[fpn.PREDECESSOR_RETURN].copy()
m[:, slice(0, pfi.scale)] = True
return m
@fpn.pipe_node
def h_line(**kwargs):
pfi = kwargs[fpn.PN_INPUT]
m = kwargs[fpn.PREDECESSOR_RETURN].copy()
m[slice(0, pfi.scale), :] = True
return m
@fpn.pipe_node_factory
def v_shift(steps, **kwargs):
if kwargs[fpn.PREDECESSOR_PN].unwrap == v_line.unwrap:
raise Exception('cannot v_shift a v_line')
pfi = kwargs[fpn.PN_INPUT]
return np.roll(kwargs[fpn.PREDECESSOR_RETURN], pfi.scale * steps, axis=0)
@fpn.pipe_node_factory
def h_shift(steps, **kwargs):
if kwargs[fpn.PREDECESSOR_PN].unwrap == h_line.unwrap:
raise Exception('cannot h_shift an h_line')
pfi = kwargs[fpn.PN_INPUT]
return np.roll(kwargs[fpn.PREDECESSOR_RETURN], pfi.scale * steps, axis=1)
@fpn.pipe_node_factory
def flip(*coords, **kwargs):
pfi = kwargs[fpn.PN_INPUT]
m = kwargs[fpn.PREDECESSOR_RETURN].copy()
for coord in coords: # x, y pairs
start = [i * pfi.scale for i in coord]
end = [i + pfi.scale for i in start]
iloc = slice(start[1], end[1]), slice(start[0], end[0])
m[iloc] = ~m[iloc]
return m
@fpn.pipe_node_factory
def union(*args, **kwargs):
return reduce(np.logical_or, args)
@fpn.pipe_node_factory
def intersect(*args, **kwargs):
return reduce(np.logical_and, args)
@fpn.pipe_node_factory
def concat(*args, **kwargs):
pfi = kwargs[fpn.PN_INPUT]
space = np.zeros(shape=(pfi.SHAPE[0] * pfi.scale, 1 * pfi.scale),
dtype=bool)
concat = lambda x, y: np.concatenate((x, space, y), axis=1)
return reduce(concat, args)
@fpn.pipe_node
def display(**kwargs):
pfi = kwargs[fpn.PN_INPUT]
m = kwargs[fpn.PREDECESSOR_RETURN]
for row in m:
for pixel in row:
if pixel:
print(pfi.pixel, end='')
else:
print(' ', end='')
print()
return m
# font based on http://www.dafont.com/visitor.font
chars = {
'_' : frame,
'.' : frame | flip((2,4)),
'p' : union(
frame | v_line,
frame | h_line,
frame | h_line | v_shift(2),
) | flip((4,0), (4,1)),
'y' : (frame | h_line | v_shift(2) |
flip((0,0), (0,1), (2,3), (2,4), (4,0), (4,1))),
'0' : union(
frame | v_line,
frame | v_line | h_shift(-1),
frame | h_line,
frame | h_line | v_shift(-1),
),
'1' : frame | v_line | h_shift(2) | flip((1,0)),
'2' : union(
frame | h_line,
frame | h_line | v_shift(2),
frame | h_line | v_shift(4),
) | flip((4, 1), (0, 3)),
'3' : union(
frame | h_line,
frame | h_line | v_shift(-1),
frame | v_line | h_shift(4),
) | flip((2, 2), (3, 2)),
'4' : union(
frame | h_line | v_shift(2),
frame | v_line | h_shift(-1),
) | flip((0, 0), (0, 1)),
'5' : union(
frame | h_line,
frame | h_line | v_shift(2),
frame | h_line | v_shift(-1),
) | flip((0, 1), (4, 3)),
'6' : union(
frame | h_line,
frame | h_line | v_shift(2),
frame | h_line | v_shift(-1),
frame | v_line,
) | flip((4, 3)),
#---------------------------------------------------------------------------
'a' : union(
frame | v_line,
frame | v_line | h_shift(-1),
frame | h_line,
frame | h_line | v_shift(2),
),
'b' : union(
frame | v_line,
frame | v_line | h_shift(-1),
frame | h_line,
frame | h_line | v_shift(-1),
frame | h_line | v_shift(2),
) | flip((4,0), (4,4)),
'h' : union(
frame | v_line,
frame | v_line | h_shift(-1),
frame | h_line | v_shift(-3),
),
'i' : union(
frame | h_line,
frame | h_line | v_shift(-1),
frame | v_line | h_shift(2),
),
'o' : union(
frame | v_line,
frame | v_line | h_shift(-1),
frame | h_line,
frame | h_line | v_shift(-1),
),
}
def msg_display_pipeline(msg):
get_char = lambda char: chars.get(char.lower(), chars['_'])
return concat(*tuple(map(get_char, msg))) | display
def version_banner(args):
p = argparse.ArgumentParser(
description='Display the Python version in a banner',
)
p.add_argument('--pixel', default='*',
help=('Set the character used for each pixel of the banner.')
)
p.add_argument('--scale', default=1, type=int,
help=('Set the pixel scale for the banner.')
)
ns = p.parse_args(args)
assert len(ns.pixel) == 1
assert ns.scale > 0
# get pipeline function
msg = 'py%s.%s.%s' % sys.version_info[:3]
f = msg_display_pipeline(msg)
pfi = PixelFontInput(pixel=ns.pixel, scale=ns.scale)
f[pfi]
if __name__ == '__main__':
version_banner(sys.argv[1:])
| [
"ariza@flexatone.com"
] | ariza@flexatone.com |
239da3b98f1e473cb137fac0bdb1d421e1cd0590 | b394bb6bd3e8848688b525f55e82962f152c1bb3 | /demos/upload/linear_systems/Elimination Matrices II.py | c988e68a06b987615742f555122112da1b5d72cc | [] | no_license | lukeolson/cs450-f20-demos | 02c2431d7696348cf9ca1ab67bdd5c44a97ac38b | 040e7dfa15c68f7f426cf69655cb600926f9f626 | refs/heads/master | 2023-01-22T19:12:33.394521 | 2020-12-03T19:48:18 | 2020-12-03T19:48:18 | 288,542,898 | 5 | 10 | null | 2020-10-05T19:39:07 | 2020-08-18T19:13:52 | null | UTF-8 | Python | false | false | 502 | py | #!/usr/bin/env python
# coding: utf-8
# # Behavior of Elimination Matrices
# In[3]:
import numpy as np
# In[30]:
n = 4
# ----------------
# Let's create some elimination matrices:
# In[40]:
M1 = np.eye(n)
M1[1,0] = 0.5
M1
# In[41]:
M2 = np.eye(n)
M2[3,0] = 4
M2
# In[42]:
M3 = np.eye(n)
M3[2,1] = 1.3
M3
# -------------------
# Now play around with them:
# In[43]:
M1.dot(M2)
# In[44]:
M2.dot(M1)
# In[45]:
M1.dot(M2).dot(M3)
# BUT:
# In[47]:
M3.dot(M1).dot(M2)
| [
"luke.olson@gmail.com"
] | luke.olson@gmail.com |
1ab2b623e06bba1dcbe4c824cacaa054bbb4d5a7 | 288952acfb81b217ac9cdc920c65d00aad1146c4 | /vendor/views.py | 6af0d4958fc56c1d2f724cd0268e0317aebb1e4c | [] | no_license | turamant/CompShop | 054e16cf929976b7246897cdfdcd6bc5dc984bbe | b30e17e6eabfa1f4b4a5ccbb74da81b53a926f82 | refs/heads/main | 2023-09-03T12:03:02.833691 | 2021-10-28T15:49:22 | 2021-10-28T15:49:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from django.contrib.auth import login
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import redirect, render
from vendor.models import Vendor
def become_vendor(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
vendor = Vendor.objects.create(name=user.username, created_by=user)
return redirect('frontpage')
else:
form = UserCreationForm()
return render(request, 'vendor/become_vendor.html', {'form': form})
| [
"tur1amant@gmail.com"
] | tur1amant@gmail.com |
01e9ab4a6e000a580c29b7ed2c47633aa2770d19 | 5d34003423b4bcf641cb31b3d58c062d2011c7b7 | /venv/lib/python3.6/site-packages/panflute/utils.py | 73d5795605bcf6ed4e139db515ca7db7d58a0bc2 | [] | no_license | feiwl/Coding | a9f84cb867f7a84f0924b848a412dc1bedbb7d84 | 85973fe3d839b65f4f0b73c35ca0d0134588a76d | refs/heads/main | 2023-02-17T21:54:04.441162 | 2021-01-07T04:25:19 | 2021-01-07T04:25:19 | 327,518,641 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,463 | py | """
Auxiliary functions that have no dependencies
"""
# ---------------------------
# Imports
# ---------------------------
from collections import OrderedDict
import sys
import os.path as p
from importlib import import_module
# ---------------------------
# Functions
# ---------------------------
def get_caller_name():
'''Get the name of the calling Element
This is just the name of the Class of the __init__ calling function
'''
# References:
# https://jugad2.blogspot.com/2015/09/find-caller-and-callers-caller-of.html
# https://stackoverflow.com/a/47956089/3977107
# https://stackoverflow.com/a/11799376/3977107
pos = 1
while True:
pos += 1
try:
callingframe = sys._getframe(pos)
except ValueError:
return 'Panflute'
#print(pos, callingframe.f_code.co_name, file=sys.stderr)
if callingframe.f_code.co_name == '__init__':
class_name = callingframe.f_locals['self'].__class__.__name__
if 'Container' not in class_name:
return class_name
def check_type(value, oktypes):
# This allows 'Space' instead of 'Space()'
if callable(value):
value = value()
if isinstance(value, oktypes):
return value
# Invalid type
caller = get_caller_name()
tag = type(value).__name__
#oktypes_names = [x.name for x in oktypes]
#print(oktypes, file=sys.stderr)
msg = '\n\nElement "{}" received "{}" but expected {}\n'.format(caller, tag, oktypes)
raise TypeError(msg)
def check_group(value, group):
if value not in group:
tag = type(value).__name__
msg = 'element {} not in group {}'.format(tag, repr(group))
raise TypeError(msg)
else:
return value
def encode_dict(tag, content):
return OrderedDict((("t", tag), ("c", content)))
# ---------------------------
# Classes
# ---------------------------
class ContextImport:
"""
Import module context manager.
Temporarily prepends extra dir
to sys.path and imports the module,
Example:
>>> # /path/dir/fi.py
>>> with ContextImport('/path/dir/fi.py') as module:
>>> # prepends '/path/dir' to sys.path
>>> # module = import_module('fi')
>>> module.main()
>>> with ContextImport('dir.fi', '/path') as module:
>>> # prepends '/path' to sys.path
>>> # module = import_module('dir.fi')
>>> module.main()
"""
def __init__(self, module, extra_dir=None):
"""
:param module: str
module spec for import or file path
from that only basename without .py is used
:param extra_dir: str or None
extra dir to prepend to sys.path
if module then doesn't change sys.path if None
if file then prepends dir if None
"""
def remove_py(s):
return s[:-3] if s.endswith('.py') else s
self.module = remove_py(p.basename(module))
if (extra_dir is None) and (module != p.basename(module)):
extra_dir = p.dirname(module)
self.extra_dir = extra_dir
def __enter__(self):
if self.extra_dir is not None:
sys.path.insert(0, self.extra_dir)
return import_module(self.module)
def __exit__(self, exc_type, exc_value, traceback):
if self.extra_dir is not None:
sys.path.pop(0)
| [
"feiwl8378@163.com"
] | feiwl8378@163.com |
3858c9e67177860c510d2efa26a438da7c05c325 | 1986f044d6476fab476a9b5eb9a95cc30d6a8eac | /Chapter07/pygal_2.py | 276356af1474708c96e1e1f0a2509f75e8be4717 | [
"MIT"
] | permissive | PacktPublishing/Mastering-Python-Networking | 711f47ecff9ca2fec51f948badff22cd8c73ada4 | 52a2827919db1773f66700f3946390f200bd6dab | refs/heads/master | 2023-02-08T01:39:44.670413 | 2023-01-30T09:03:30 | 2023-01-30T09:03:30 | 82,666,812 | 138 | 127 | MIT | 2020-11-05T11:34:15 | 2017-02-21T10:25:34 | Python | UTF-8 | Python | false | false | 263 | py | #!/usr/bin/env python3
import pygal
line_chart = pygal.Pie()
line_chart.title = "Protocol Breakdown"
line_chart.add('TCP', 15)
line_chart.add('UDP', 30)
line_chart.add('ICMP', 45)
line_chart.add('Others', 10)
line_chart.render_to_file('pygal_example_3.svg')
| [
"echou@yahoo.com"
] | echou@yahoo.com |
40842236d29a3fe65789a8875561df71b77fc1bd | 113d0858a2476e5bd1b39ff25e41da33970b5dda | /blendernc/nodes/outputs/BlenderNC_NT_preloader.py | 9dddc69f3a5ec5b4c18f41c9326904028d0d8055 | [
"MIT"
] | permissive | peterhob/blendernc | aa52d387b74720f90950b8fb2df23f041ce25588 | 61eef2670fd299637633060ad5597bc3c6b53a02 | refs/heads/master | 2023-06-13T08:33:59.694357 | 2021-07-05T10:57:51 | 2021-07-05T10:57:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,476 | py | #!/usr/bin/env python3
# Imports
import bpy
class BlenderNC_NT_preloader(bpy.types.Node):
# === Basics ===
# Description string
"""A netcdf node"""
# Optional identifier string. If not explicitly defined,
# the python class name is used.
bl_idname = "netCDFPreloadNode"
# Label for nice name display
bl_label = "Load netCDF"
# Icon identifier
bl_icon = "SOUND"
blb_type = "NETCDF"
# TODO: This node will receive a datacube as
# input and store all the images in disk for easier import and animation.
# === Optional Functions ===
# Initialization function, called when a new node is created.
# This is the most common place to create the sockets for a node,
# as shown below.
def init(self, context):
pass
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
# scene = context.scene
layout.label(text="INFO: Work in progress", icon="INFO")
# if scene.nc_dictionary:
# layout.prop(self, "file_name")
# else:
# layout.label(text="No netcdf loaded")
# if self.file_name:
# layout.prop(self, "var_name")
# if self.var_name:
# layout.prop(self, "frame_start")
# layout.prop(self, "frame_end")
# if self.frame_end > self.frame_start:
# op = layout.operator("blendernc.preloader",
# icon="FILE_REFRESH",)
# op.file_name = self.file_name
# op.var_name = self.var_name
# op.frame_start = self.frame_start
# op.frame_end = self.frame_end
# else:
# layout.label(text="Cannot preload!")
# Detail buttons in the sidebar.
# If this function is not defined,
# the draw_buttons function is used instead
def draw_buttons_ext(self, context, layout):
pass
# Optional: custom label
# Explicit user label overrides this,
# but here we can define a label dynamically
def draw_label(self):
return "Load netCDF"
def update_value(self, context):
self.update()
def update(self):
pass
| [
"josue.martinezmoreno@anu.edu.au"
] | josue.martinezmoreno@anu.edu.au |
5aadf807bfd7c7562417684c05b510a369cbab93 | 8d47af9482444b07b52cf44cebcaf4b992df4d09 | /agents/31_StochasticMaxStochasticDeletionPRB/StochasticMaxStochasticDeletionPRB.py | 2e72a6673349262ca21c05cd26f5be8d587ef0ce | [] | no_license | w0lv3r1nix/retro-agents | f4dbce2db558c880b161062796e5397be65bdd10 | c7f93a737dc6c6fc5d8343c099e14bd2bc97aaf1 | refs/heads/master | 2020-08-01T01:19:41.660018 | 2018-06-13T04:28:09 | 2018-06-13T04:28:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,087 | py | import numpy as np
import random
from math import sqrt
from anyrl.rollouts import PrioritizedReplayBuffer
class StochasticMaxStochasticDeletionPRB(PrioritizedReplayBuffer):
"""
A prioritized replay buffer with stochastic maximum collection,
stochastic deletion and loss-proportional sampling.
"""
def __init__(self, capacity, alpha, beta, first_max=1, epsilon=0):
self.capacity = capacity
self.alpha = alpha
self.beta = beta
self.epsilon = epsilon
self.transitions = []
self.errors = CustomFloatBuffer(capacity)
self._max_weight_arg = first_max
def add_sample(self, sample, init_weight=None):
"""
Add a sample to the buffer.
When new samples are added without an explicit initial weight, the
maximum weight argument ever seen is used. When the buffer is empty,
first_max is used.
"""
if init_weight is None:
new_error = self._process_weight(self._max_weight_arg)
self.transitions.append(sample)
self.errors.append(new_error)
else:
new_error = self._process_weight(init_weight)
if random.random() < new_error / self.errors.max():
self.transitions.append(sample)
self.errors.append(new_error)
while len(self.transitions) > self.capacity:
del self.transitions[self.errors.inverse_sample(1)[0]]
class CustomFloatBuffer:
"""A ring-buffer of floating point values."""
def __init__(self, capacity, dtype='float64'):
self._capacity = capacity
self._start = 0
self._used = 0
self._buffer = np.zeros((capacity,), dtype=dtype)
self._bin_size = int(sqrt(capacity))
num_bins = capacity // self._bin_size
if num_bins * self._bin_size < capacity:
num_bins += 1
self._bin_sums = np.zeros((num_bins,), dtype=dtype)
self._min = 0
self._min_id = 0
self._max = 0
def append(self, value):
"""
Add a value to the end of the buffer.
If the buffer is full, the first value is removed.
"""
idx = (self._start + self._used) % self._capacity
if self._used < self._capacity:
self._used += 1
else:
self._start = (self._start + 1) % self._capacity
self._set_idx(idx, value)
def sample(self, num_values):
"""
Sample indices in proportion to their value.
Returns:
A tuple (indices, probs)
"""
assert self._used >= num_values
res = []
probs = []
bin_probs = self._bin_sums / np.sum(self._bin_sums)
while len(res) < num_values:
bin_idx = np.random.choice(len(self._bin_sums), p=bin_probs)
bin_values = self._bin(bin_idx)
sub_probs = bin_values / np.sum(bin_values)
sub_idx = np.random.choice(len(bin_values), p=sub_probs)
idx = bin_idx * self._bin_size + sub_idx
res.append(idx)
probs.append(bin_probs[bin_idx] * sub_probs[sub_idx])
return (np.array(list(res)) - self._start) % self._capacity, np.array(probs)
def inverse_sample(self, num_values):
"""
Sample indices in inverse proportion to their value. The sampling used
is $e^{-x}$.
Returns:
List of indices sampled.
"""
assert self._used >= num_values
res = []
e_neg_bin_sums = np.exp(-1 * self._bin_sums)
bin_probs = e_neg_bin_sums / np.sum(e_neg_bin_sums)
while len(res) < num_values:
bin_idx = np.random.choice(len(self._bin_sums), p=bin_probs)
bin_values = self._bin(bin_idx)
e_neg_bin_values = np.exp(-1 * bin_values)
sub_probs = e_neg_bin_values / np.sum(e_neg_bin_values)
sub_idx = np.random.choice(len(bin_values), p=sub_probs)
idx = bin_idx * self._bin_size + sub_idx
res.append(idx)
return (np.array(list(res)) - self._start) % self._capacity
def set_value(self, idx, value):
"""Set the value at the given index."""
idx = (idx + self._start) % self._capacity
self._set_idx(idx, value)
def min(self):
"""Get the minimum value in the buffer."""
return self._min
def max(self):
"""Get the maximum value in the buffer."""
return self._max
def min_id(self):
"""Get the index of minimum value in the buffer."""
return self._min_id
def sum(self):
"""Get the sum of the values in the buffer."""
return np.sum(self._bin_sums)
def _set_idx(self, idx, value):
assert not np.isnan(value)
assert value > 0
needs_recompute_min = False
if self._min == self._buffer[idx]:
needs_recompute_min = True
elif value < self._min:
self._min = value
needs_recompute_max = False
if self._max == self._buffer[idx]:
needs_recompute_max = True
elif value > self._max:
self._max = value
bin_idx = idx // self._bin_size
self._buffer[idx] = value
self._bin_sums[bin_idx] = np.sum(self._bin(bin_idx))
if needs_recompute_min:
self._recompute_min()
if needs_recompute_max:
self._recompute_max()
def _bin(self, bin_idx):
if bin_idx == len(self._bin_sums) - 1:
return self._buffer[self._bin_size * bin_idx:]
return self._buffer[self._bin_size * bin_idx: self._bin_size * (bin_idx + 1)]
def _recompute_min(self):
if self._used < self._capacity:
self._min_id = np.argmin(self._buffer[:self._used])
else:
self._min_id = np.argmin(self._buffer)
self._min = self._buffer[self._min_id]
def _recompute_max(self):
if self._used < self._capacity:
self._max = np.max(self._buffer[:self._used])
else:
self._max = np.max(self._buffer)
| [
"seungjaeryanlee@gmail.com"
] | seungjaeryanlee@gmail.com |
a76830600e020b9c16b8bc42c056f49e358bb5a0 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/plugin/core/functiongraph/mvc/LazySaveableXML.pyi | 0ac3ad1a85c90eb8dc01e18e730a2294cdc70003 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | pyi | from typing import List
import ghidra.app.plugin.core.functiongraph.mvc
import ghidra.util
import java.lang
import org.jdom
class LazySaveableXML(ghidra.app.plugin.core.functiongraph.mvc.SaveableXML):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getElement(self) -> org.jdom.Element: ...
def getObjectStorageFields(self) -> List[java.lang.Class]: ...
def getSchemaVersion(self) -> int: ...
def hashCode(self) -> int: ...
def isEmpty(self) -> bool: ...
def isPrivate(self) -> bool: ...
def isUpgradeable(self, __a0: int) -> bool: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def restore(self, __a0: ghidra.util.ObjectStorage) -> None: ...
def save(self, __a0: ghidra.util.ObjectStorage) -> None: ...
def toString(self) -> unicode: ...
def upgrade(self, __a0: ghidra.util.ObjectStorage, __a1: int, __a2: ghidra.util.ObjectStorage) -> bool: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def empty(self) -> bool: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
de252d7d6be728353eb46d94dd4648cc6fa950b6 | 046c1141399890afa13fd243e55da3dbf31085c5 | /corl/wc_test/test1.py | dcc721a40aa8d1c33ced4c39281c74deb2bacee8 | [] | no_license | carusyte/tflab | 1d0edf87282352aeb5a38b83c58ab9c0189bbb1a | 2324c3b0ad22d28c50a4fd8db56e36a2836735c3 | refs/heads/master | 2021-05-12T06:58:26.270868 | 2019-03-24T14:57:44 | 2019-03-24T14:57:44 | 117,232,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,248 | py | from __future__ import print_function
# Path hack.
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
import tensorflow as tf
# pylint: disable-msg=E0401
from model import base as model0
from wc_data import base as data0
from time import strftime
import os
import numpy as np
import math
N_TEST = 100
TEST_INTERVAL = 50
LAYER_WIDTH = 256
MAX_STEP = 30
TIME_SHIFT = 2
LEARNING_RATE = 1e-3
USE_PEEPHOLES = True
TIED = False
LOG_DIR = 'logdir'
# pylint: disable-msg=E0601
def collect_summary(sess, model, base_dir):
train_writer = tf.summary.FileWriter(base_dir + "/train", sess.graph)
test_writer = tf.summary.FileWriter(base_dir + "/test", sess.graph)
with tf.name_scope("Basic"):
tf.summary.scalar("Mean_Diff", tf.sqrt(model.cost))
summary = tf.summary.merge_all()
return summary, train_writer, test_writer
def run():
tf.logging.set_verbosity(tf.logging.INFO)
loader = data0.DataLoader(TIME_SHIFT)
print('{} loading test data...'.format(strftime("%H:%M:%S")))
tuuids, tdata, tvals, tseqlen = loader.loadTestSet(MAX_STEP, N_TEST)
print('input shape: {}'.format(tdata.shape))
print('target shape: {}'.format(tvals.shape))
featSize = tdata.shape[2]
data = tf.placeholder(tf.float32, [None, MAX_STEP, featSize], "input")
target = tf.placeholder(tf.float32, [None], "target")
seqlen = tf.placeholder(tf.int32, [None], "seqlen")
with tf.Session() as sess:
model = model0.SRnnRegressorV2(
data=data,
target=target,
seqlen=seqlen,
cell='grid3lstm',
use_peepholes=USE_PEEPHOLES,
tied=TIED,
layer_width=LAYER_WIDTH,
learning_rate=LEARNING_RATE)
model_name = model.getName()
f = __file__
fbase = f[f.rfind('/')+1:f.rindex('.py')]
base_dir = '{}/{}_{}/{}'.format(LOG_DIR, fbase,
model_name, strftime("%Y%m%d_%H%M%S"))
print('{} using model: {}'.format(strftime("%H:%M:%S"), model_name))
if tf.gfile.Exists(base_dir):
tf.gfile.DeleteRecursively(base_dir)
tf.gfile.MakeDirs(base_dir)
sess.run(tf.global_variables_initializer())
summary, train_writer, test_writer = collect_summary(
sess, model, base_dir)
saver = tf.train.Saver()
bno = 0
epoch = 0
while True:
bno = epoch*TEST_INTERVAL
print('{} running on test set...'.format(strftime("%H:%M:%S")))
feeds = {data: tdata, target: tvals, seqlen: tseqlen}
mse, worst, test_summary_str = sess.run(
[model.cost, model.worst, summary], feeds)
bidx, max_diff, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} Epoch {} diff {:3.5f} max_diff {:3.4f} predict {} actual {} uuid {}'.format(
strftime("%H:%M:%S"), epoch, math.sqrt(mse), max_diff, predict, actual, tuuids[bidx]))
summary_str = None
fin = False
for _ in range(TEST_INTERVAL):
bno = bno+1
print('{} loading training data for batch {}...'.format(
strftime("%H:%M:%S"), bno))
_, trdata, trvals, trseqlen = loader.loadTrainingData(
bno, MAX_STEP)
if len(trdata) > 0:
print('{} training...'.format(strftime("%H:%M:%S")))
else:
print('{} end of training data, finish training.'.format(
strftime("%H:%M:%S")))
fin = True
break
feeds = {data: trdata, target: trvals, seqlen: trseqlen}
summary_str, worst = sess.run(
[summary, model.worst, model.optimize], feeds)[:-1]
bidx, max_diff, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} bno {} max_diff {:3.4f} predict {} actual {}'.format(
strftime("%H:%M:%S"), bno, max_diff, predict, actual))
train_writer.add_summary(summary_str, bno)
test_writer.add_summary(test_summary_str, bno)
train_writer.flush()
test_writer.flush()
checkpoint_file = os.path.join(base_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=bno)
epoch += 1
if fin:
break
# test last epoch
print('{} running on test set...'.format(strftime("%H:%M:%S")))
feeds = {data: tdata, target: tvals, seqlen: tseqlen}
mse, worst, test_summary_str = sess.run(
[model.cost, model.worst, summary], feeds)
bidx, max_diff, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} Epoch {} diff {:3.5f} max_diff {:3.4f} predict {} actual {} uuid {}'.format(
strftime("%H:%M:%S"), epoch, math.sqrt(mse), max_diff, predict, actual, tuuids[bidx]))
train_writer.add_summary(summary_str, bno)
test_writer.add_summary(test_summary_str, bno)
train_writer.flush()
test_writer.flush()
if __name__ == '__main__':
run()
| [
"carusyte@163.com"
] | carusyte@163.com |
d547ea2ba4d64c65dab0c2bb26a6c4c03f3992af | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/storage/v20190601/list_storage_account_sas.py | d80c546cf9140068777a371bf7a7064bb5ddf67a | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,714 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'ListStorageAccountSASResult',
'AwaitableListStorageAccountSASResult',
'list_storage_account_sas',
]
@pulumi.output_type
class ListStorageAccountSASResult:
"""
The List SAS credentials operation response.
"""
def __init__(__self__, account_sas_token=None):
if account_sas_token and not isinstance(account_sas_token, str):
raise TypeError("Expected argument 'account_sas_token' to be a str")
pulumi.set(__self__, "account_sas_token", account_sas_token)
@property
@pulumi.getter(name="accountSasToken")
def account_sas_token(self) -> str:
"""
List SAS credentials of storage account.
"""
return pulumi.get(self, "account_sas_token")
class AwaitableListStorageAccountSASResult(ListStorageAccountSASResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListStorageAccountSASResult(
account_sas_token=self.account_sas_token)
def list_storage_account_sas(account_name: Optional[str] = None,
i_p_address_or_range: Optional[str] = None,
key_to_sign: Optional[str] = None,
permissions: Optional[Union[str, 'Permissions']] = None,
protocols: Optional['HttpProtocol'] = None,
resource_group_name: Optional[str] = None,
resource_types: Optional[Union[str, 'SignedResourceTypes']] = None,
services: Optional[Union[str, 'Services']] = None,
shared_access_expiry_time: Optional[str] = None,
shared_access_start_time: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListStorageAccountSASResult:
"""
The List SAS credentials operation response.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str i_p_address_or_range: An IP address or a range of IP addresses from which to accept requests.
:param str key_to_sign: The key to sign the account SAS token with.
:param Union[str, 'Permissions'] permissions: The signed permissions for the account SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p).
:param 'HttpProtocol' protocols: The protocol permitted for a request made with the account SAS.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param Union[str, 'SignedResourceTypes'] resource_types: The signed resource types that are accessible with the account SAS. Service (s): Access to service-level APIs; Container (c): Access to container-level APIs; Object (o): Access to object-level APIs for blobs, queue messages, table entities, and files.
:param Union[str, 'Services'] services: The signed services accessible with the account SAS. Possible values include: Blob (b), Queue (q), Table (t), File (f).
:param str shared_access_expiry_time: The time at which the shared access signature becomes invalid.
:param str shared_access_start_time: The time at which the SAS becomes valid.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['iPAddressOrRange'] = i_p_address_or_range
__args__['keyToSign'] = key_to_sign
__args__['permissions'] = permissions
__args__['protocols'] = protocols
__args__['resourceGroupName'] = resource_group_name
__args__['resourceTypes'] = resource_types
__args__['services'] = services
__args__['sharedAccessExpiryTime'] = shared_access_expiry_time
__args__['sharedAccessStartTime'] = shared_access_start_time
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storage/v20190601:listStorageAccountSAS', __args__, opts=opts, typ=ListStorageAccountSASResult).value
return AwaitableListStorageAccountSASResult(
account_sas_token=__ret__.account_sas_token)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
1e1a343dc5ae02796835273443f4f1bc06e98ac6 | 04f4f051ebbbcf5fdd4ffe4a8d24697c2dc55735 | /virtual/bin/alembic | 3694c1a6b7c9f67c9c6ab9abf81897de9d5ed23d | [
"MIT"
] | permissive | Pnshimiye/blog-app | 854a9ddea8387989ca487d308badc000d261fbd4 | ae6d7e7118d798c3f90cea660e13ba58ae649b63 | refs/heads/master | 2020-04-26T09:03:21.936209 | 2019-03-06T07:22:24 | 2019-03-06T07:22:24 | 173,442,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | #!/home/pauline/Documents/core-projects/Self_Pitch/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from alembic.config import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"pnshimiye@gmail.com"
] | pnshimiye@gmail.com | |
c2ad586c7ddc7ca370f5fc12f0ae2ffcbbefdb8b | cf1d45f536a5922f1a15e0340b6ccb6f2124b7f8 | /main.py | 8f91e2f4ea22e418d3c5123d8ad21225c1f6ab77 | [] | no_license | purpleyoung/A-star-maze | 4a658c633dcf5cab0cd28cf9f16647c16e35d78c | 60a1250da044366e332e19abf2bc80490d71fbe1 | refs/heads/main | 2023-02-01T00:09:19.225081 | 2020-12-08T09:02:43 | 2020-12-08T09:02:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | import time
from opt import Node, FQueue, h, g, valid_neighbors, add_close
from maze import maze
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--width", help="window width", default=7, type=int)
parser.add_argument("-he", "--height", help="window height", default=5, type=int)
parser.add_argument("-u", "--unit", help="window unit", default=50, type=int)
parser.add_argument("-hw", "--h_weight", help="weight for h function", default=2, type=float)
args = parser.parse_args()
def loop(q: FQueue) -> Node:
while True:
time.sleep(0.1)
n = q.get()
add_close(n)
if maze.is_end_node(n):
found = n
break
neighbors = valid_neighbors(n, q, maze)
for n_ in neighbors:
if n_.pre != n:
new_path_g = g(n, n_)
if n_.g > new_path_g:
n_.pre = n
n_.g = new_path_g
f = h(n_, maze.e_node, weight=args.h_weight) + n_.g
q.put(f, n_)
maze.add_f(f, n_)
return found
def main(event=None):
q = FQueue()
q.put(0, maze.s_node)
n = loop(q)
maze.backward(n)
maze.build(args.width, args.height, args.unit)
maze.bind('<space>', main)
maze.mainloop()
| [
"morvanzhou@hotmail.com"
] | morvanzhou@hotmail.com |
aed31ff15357a0315ac03bf1a02ac63151ca3b76 | ebb0bd236009cb203035ab31ce136df45584ad35 | /resources/admin.py | 8283c744d589b640f060e0139d45490978ec158b | [] | no_license | the-akira/Django-GraphQL-Tutorial | 7a798b0fdfc4c1d8c7cb47ac31a71ec1ccae29b6 | 74c543467fd0e93b75411f34215513fa3d498dfd | refs/heads/master | 2023-04-28T22:28:11.051719 | 2021-09-25T03:16:14 | 2021-09-25T03:16:14 | 248,372,724 | 2 | 0 | null | 2023-04-21T20:53:09 | 2020-03-19T00:21:32 | Python | UTF-8 | Python | false | false | 150 | py | from django.contrib import admin
from resources.models import Artista, Disco
classes = [Artista, Disco]
for c in classes:
admin.site.register(c) | [
"gabrielfelippe90@gmail.com"
] | gabrielfelippe90@gmail.com |
ced4d8baa28a26a118b0acb4155d12e6cdef3d5f | 8c39ba92cc71ff78242477d3256f6ee3daa872c7 | /conans/test/unittests/util/test_encrypt.py | 749a81bd3de44ecdc928a477a0923d413788d743 | [
"MIT"
] | permissive | conan-io/conan | eb4427e534a0edbb1fb06c753d5d9587faaef93c | bac455d1329b6744cdc41747354a727c9233179f | refs/heads/release/2.0 | 2023-09-03T18:51:54.345761 | 2023-09-03T17:30:43 | 2023-09-03T17:30:43 | 47,190,624 | 7,754 | 1,182 | MIT | 2023-09-14T15:16:09 | 2015-12-01T13:17:02 | Python | UTF-8 | Python | false | false | 1,463 | py | import uuid
import pytest
from conans.util import encrypt
def test_encryp_basic():
key = str(uuid.uuid4())
message = 'simple data ascii string'
data = encrypt.encode(message, key)
assert type(message) == type(data)
assert message != data
assert message != data
decoded = encrypt.decode(data, key)
assert type(message) == type(data)
assert message == decoded
def test_encrypt_unicode():
key = str(uuid.uuid4())
message_enc = b'espa\xc3\xb1a\xe2\x82\xac$' # Conan codebase allows only ASCII source files
message = message_enc.decode('utf-8')
data = encrypt.encode(message, key)
assert type(message) == type(data)
assert message != data
decoded = encrypt.decode(data, key)
assert type(message) == type(data)
assert message == decoded
def test_key_unicode():
key = b'espa\xc3\xb1a\xe2\x82\xac$'.decode('utf-8') # codebase allows only ASCII files
message = 'the message'
data = encrypt.encode(message, key)
assert type(message) == type(data)
assert message != data
decoded = encrypt.decode(data, key)
assert type(message) == type(data)
assert message == decoded
def test_key_empty():
# Empty keys, or keys with only non-ascii chars are not allowed
with pytest.raises(AssertionError):
encrypt.encode('message', '')
with pytest.raises(AssertionError):
encrypt.encode('message', b'\xc3\xb1\xe2\x82\xac'.decode('utf-8'))
| [
"noreply@github.com"
] | conan-io.noreply@github.com |
049931e552b123be245a259edb76d7fa1ba55f9b | 6733716dcdcacfcc739ae5c4af976db81ead852b | /ROOT/Project/functions/rootHist_TXT/func/D1H_rootHist_TXT_conversion_largeBin.py | 846d3c15ee2fe360a962709ea5b6f410983abfb4 | [] | no_license | StudyGroupPKU/fruit_team | 45202a058d59057081670db97b9229ee720fa77e | 9f9f673f5ce22ce6d25736871f3d7a5bd232c29d | refs/heads/master | 2021-01-24T08:15:37.909327 | 2018-05-11T08:53:06 | 2018-05-11T08:53:06 | 122,975,404 | 0 | 5 | null | 2018-04-05T02:37:14 | 2018-02-26T13:41:24 | Python | UTF-8 | Python | false | false | 5,645 | py | #Author : Junho LEE
#input/output txt format :: Nth_bin Start_of_bin End_of_bin Entry
#filename :: D1H_rootHist_TXT_conversion.py
def D1H_roothist_to_txt_largeBin(filename, outputpath = ''):
from ROOT import TFile, TCanvas, TPad
import os
if(filename[0]=="/"):
filename = filename
elif(filename[0] == '~'):
filename = filename.replace("~",os.environ['HOME'])
else:
filename = os.getcwd() + "/" + filename # get the path included filename
loca=len(filename)
for i in range (1,len(filename)+1): # find the "/" location
if(filename[-i] == "/"):
loca = i-1
break
FILENAME = filename.replace(filename[:-loca],"") # this is the shorten filename, excluded path
FILE = FILENAME.replace(".root","")
filename_NoRoot = filename.replace(filename[len(filename)-loca:len(filename)],"")
# print(FILENAME, "******")
filetxt = filename.replace(".root","")
filetxt = filetxt.replace("//","/")
if(outputpath==''):
pass
else:
if(outputpath[0] == "/"):
filetxt = outputpath+ "/" + FILENAME.replace(".root","")
filetxt = filetxt.replace("//","/")
elif(outputpath[0] == "~"):
filetxt = outputpath.replace("~",os.environ['HOME']) + "/" + FILENAME.replace(".root","")
filetxt = filetxt.replace("//","/")
else:
filetxt = os.getcwd() + "/" + outputpath+ "/" + FILENAME.replace(".root","")
filetxt = filetxt.replace("//","/")
print(filetxt)
f = TFile(filename,"READ"); # read file
dirlist = f.GetListOfKeys();
ITER = dirlist.MakeIterator();
key = ITER.Next();
jj = 0; FILE = None; LIST = []
while key: # iterating contained histogram inside of the read file
if key.GetClassName().index("TH1")>-1 :
FILE = key.ReadObj()
Name = FILE.GetName()
LIST.append(Name)
jj = jj + 1
key = ITER.Next()
# print(LIST); print(len(LIST))
OutputList = []
for ijk in range(0,len(LIST)):
hist = f.Get(LIST[ijk])
Nbin = hist.GetNbinsX()
# Filetxt = filetxt +"_"+ LIST[ijk] + "_F.txt"
Filetxt = LIST[ijk] + "_hist_largeBin.txt"
# print("!@#!!#R@#@", LIST[ijk])
wf= open(Filetxt,"w+")
OutputList.append(Filetxt)
print(Filetxt, "is generated")
for ii in range(1,Nbin+1):
bin_num = ii
bin_l = hist.GetBinLowEdge(ii)
bin_width = hist.GetBinWidth(ii);
bin_h = bin_l + bin_width;
binCont = hist.GetBinContent(ii);
wf.write("%i %f %f %f\n" %(bin_num,bin_l,bin_h,binCont))
f.Close()
wf.close()
# print(OutputList)
return OutputList
def D1H_txt_to_roothist(filename, outputpath=''):
from ROOT import TFile, TCanvas, TPad, TH1D, TLatex, TStyle, gStyle, TText, gPad, TPaveText
from inspect import currentframe, getframeinfo
import os
#gStyle.SetOptStat(0)
can = TCanvas("can","can",200,10,500,500);
if(filename[0]=="/"):
filename = filename
else:
filename = os.getcwd() + "/" + filename # get the path included filename
loca=len(filename)
for i in range (1,len(filename)+1): # find the "/" location
if(filename[-i] == "/"):
loca = i-1
break
FILENAME = filename.replace(filename[:-loca],"") # this is the shorten filename
# print(FILENAME, "******")
fileroot = filename.replace(".txt","_F.root")
fileroot = fileroot.replace("//","/")
f = open(filename,"r")
lineList = f.readlines()
Nbin = (len(lineList)) # get number of bins
Line_string = str(lineList[0])
_,bin_init,_,_ = Line_string.split(); bin_init = float(bin_init) # get initial bin
Line_string = str(lineList[len(lineList)-1])
_,_,bin_final,_ = Line_string.split(); bin_final = float(bin_final) # get final bin
f.seek(0) # reset python read line
hist = TH1D("hist","hist",Nbin,bin_init,bin_final)
total_e = 0
for i in range(1,Nbin+1):
Line_string = str(f.readline())
_,_,_,bin_c = Line_string.split();
bin_c = float(bin_c)
hist.SetBinContent(i,bin_c)
total_e = total_e + bin_c
total_e = int(total_e)
hist.Draw()
text = TText(hist.GetXaxis().GetBinCenter(2), hist.GetYaxis().GetBinCenter(1), "Recycled. Total Entry : %i" %total_e)
text.SetTextFont(10)
text.Draw()
gPad.Update()
can.Update()
if(outputpath==''):
wf = TFile(fileroot,"RECREATE")
print(fileroot, " root file is generated !!!")
else:
if(outputpath[0] == "/"):
fileroot = outputpath+ "/" + FILENAME.replace(".txt","_F.root")
fileroot = fileroot.replace("//","/")
elif(outputpath[0] == "~"):
fileroot = outputpath.replace("~",os.environ['HOME']) + "/" + FILENAME.replace(".txt","_F.root")
fileroot = fileroot.replace("//","/")
else:
fileroot = os.getcwd() + "/" + outputpath+ "/" + FILENAME.replace(".txt","_F.root")
fileroot = fileroot.replace("//","/")
wf = TFile(fileroot,"RECREATE")
print(fileroot, " root file is generated !!!")
hist.Write()
wf.Close()
fileroot = fileroot.replace("//","/")
# print(fileroot)
return fileroot
def main():
D1H_roothist_to_txt("/Users/leejunho/Desktop/git/python3Env/group_study/fruit_team/ROOT/Project/root_generator/root3_sin.root")
if __name__=="__main__":
main()
| [
"skyblue1293@naver.com"
] | skyblue1293@naver.com |
4270325a4df263a86fc3fec300870a7c3a26fb92 | 81cf04ed71fb1e141d7531e9cc9a82138adf1903 | /tensorflow_federated/python/research/gans/experiments/emnist/emnist_data_utils_test.py | 6ef867732cc5aefe49d2935761b1b59c50b2ba85 | [
"Apache-2.0"
] | permissive | tf-encrypted/federated | 1671fcae7c939dbe142f78c97ac43c1329db870c | 7797df103bf965a9d0cd70e20ae61066650382d9 | refs/heads/master | 2021-03-09T17:08:47.491876 | 2020-05-06T18:06:59 | 2020-05-27T20:37:50 | 246,360,226 | 1 | 2 | Apache-2.0 | 2020-05-08T02:43:41 | 2020-03-10T17:06:06 | Python | UTF-8 | Python | false | false | 3,770 | py | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Federated EMNIST dataset utilities."""
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.research.gans.experiments.emnist import emnist_data_utils
BATCH_SIZE = 7
def _summarize_model(model):
model.summary()
print('\n\n\n')
def _get_example_client_dataset():
client_data = tff.simulation.datasets.emnist.get_synthetic(num_clients=1)
return client_data.create_tf_dataset_for_client(client_data.client_ids[0])
def _get_example_client_dataset_containing_lowercase():
_, client_data = tff.simulation.datasets.emnist.load_data(only_digits=False)
return client_data.create_tf_dataset_for_client(client_data.client_ids[0])
class EmnistTest(tf.test.TestCase):
def test_preprocessed_img_inversion(self):
raw_images_ds = _get_example_client_dataset()
# Inversion turned off, average pixel is dark.
standard_images_ds = emnist_data_utils.preprocess_img_dataset(
raw_images_ds, invert_imagery=False, batch_size=BATCH_SIZE)
for batch in iter(standard_images_ds):
for image in batch:
self.assertLessEqual(np.average(image), -0.7)
# Inversion turned on, average pixel is light.
inverted_images_ds = emnist_data_utils.preprocess_img_dataset(
raw_images_ds, invert_imagery=True, batch_size=BATCH_SIZE)
for batch in iter(inverted_images_ds):
for image in batch:
self.assertGreaterEqual(np.average(image), 0.7)
def test_preprocessed_img_labels_are_case_agnostic(self):
raw_images_ds = _get_example_client_dataset_containing_lowercase()
raw_ds_iterator = iter(raw_images_ds)
# The first element in the raw dataset is an uppercase 'I' (label is 18).
self.assertEqual(next(raw_ds_iterator)['label'].numpy(), 18)
# The second element in the raw dataset is an uppercase 'C' (label is 12).
self.assertEqual(next(raw_ds_iterator)['label'].numpy(), 12)
# The third element in the raw dataset is a lowercase 'd' (label is 39).
self.assertEqual(next(raw_ds_iterator)['label'].numpy(), 47)
processed_ds = emnist_data_utils.preprocess_img_dataset(
raw_images_ds, include_label=True, batch_size=BATCH_SIZE, shuffle=False)
_, label_batch = next(iter(processed_ds))
processed_label_iterator = iter(label_batch)
# The first element (in first batch) in the processed dataset has a case
# agnostic label of 18 (i.e., assert that value remains unchanged).
self.assertEqual(next(processed_label_iterator).numpy(), 18)
# The second element (in first batch) in the processed dataset has a case
# agnostic label of 12 (i.e., assert that value remains unchanged).
self.assertEqual(next(processed_label_iterator).numpy(), 12)
# The third element (in first batch) in the processed dataset should now
# have a case agnostic label of 47 - 26 = 21.
self.assertEqual(next(processed_label_iterator).numpy(), 47 - 26)
for _, label_batch in iter(processed_ds):
for label in label_batch:
self.assertGreaterEqual(label, 0)
self.assertLessEqual(label, 36)
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| [
"tensorflow.copybara@gmail.com"
] | tensorflow.copybara@gmail.com |
aa32e29c4b9131c0574be5bfb4f0959e114502b6 | 2d21730e5625cde259703bf6df8f74aef5cc6a58 | /tests/test_augments.py | 8f5896508facc2bdacf51cab344e9aadfc96375e | [
"MIT"
] | permissive | pattonw/augment | b0ea35bd774c760320a74e35d59c5ba54ccf3266 | 3643873d7b818da417c3d4fbf662bc36164ca10c | refs/heads/master | 2023-04-07T14:19:32.229258 | 2023-03-22T16:07:10 | 2023-03-22T16:07:10 | 202,191,635 | 0 | 0 | MIT | 2019-08-13T17:20:10 | 2019-08-13T17:20:09 | null | UTF-8 | Python | false | false | 856 | py | from augment import (
create_elastic_transformation,
create_identity_transformation,
create_rotation_transformation,
)
import numpy as np
def test_basics():
rot_transform = create_rotation_transformation((2, 2), 90)
id_transform = create_identity_transformation((2, 2))
el_transform = create_elastic_transformation((2, 2), (1, 1), 5)
expected_rot_transform = np.array(
[
[[0.27703846, 1.171035], [-1.171035, -0.27703846]],
[[1.171035, -0.27703846], [0.27703846, -1.171035]],
],
dtype=np.float32,
)
expected_id_transform = np.array(
[[[0.0, 0.0], [1.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]], dtype=np.float32
)
assert all(np.isclose(rot_transform, expected_rot_transform).flatten())
assert all(np.isclose(id_transform, expected_id_transform).flatten())
| [
"pattonw@hhmi.org"
] | pattonw@hhmi.org |
e5914b3dbc97d8b3fbbab18f94c22d54dbd77dd8 | 03bbb27095d2afc03d0bd1a62caa91356292e5d5 | /MapMyPlaylist/MapMyPlaylist/urls.py | a86722dce18423a3169e51caedf2e8e7a5614f81 | [] | no_license | LindaAlblas/MapMyPlaylist | d624728daa6be2bed04018cdf1ab4dea609e9605 | 2e5b7a9c10231369d2c7722a4d8ae35dc8ac2f8f | refs/heads/master | 2020-12-24T22:39:34.805944 | 2013-02-20T15:21:27 | 2013-02-20T15:21:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'MapMyPlaylist.views.home', name='home'),
# url(r'^MapMyPlaylist/', include('MapMyPlaylist.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^findartist/(?P<artistName>[\w ]+)/$', 'findartist.views.artistQuery'),
)
| [
"finlaymccourt@gmail.com"
] | finlaymccourt@gmail.com |
9a5502d7f1c3da7c41263037451b3f748e01a83c | 5cf5750bb8fb1cba865d25628aef91df3f28da87 | /pyhnko/twilio/20-valid_parentheses.py | 2ee473f6d84df0cd06dcc0b9668b0414ec9f15de | [] | no_license | nextsde/daily-coding-prep | a3d8d4f8920ed06dc7077fa4968d22ad88af62fa | 09d6a768ccadbff6ebc8bb36816ca3795e6d0f66 | refs/heads/master | 2021-05-20T10:36:18.290954 | 2020-07-12T16:54:31 | 2020-07-12T16:54:31 | 252,252,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | class Solution:
def isValid(self, s: str) -> bool:
parenthesis = {'(': ')', '{':'}', '[':']'}
stack = []
for char in s:
if char in '([{':
stack.append(char)
elif not stack or parenthesis[stack.pop()] != char: return False
return not stack
| [
"nicolas.wolyniec@estudiante.uam.es"
] | nicolas.wolyniec@estudiante.uam.es |
e5672144935aa5bf4c908a1be53ac0fbfde893a1 | eb1148f75215739a7ca5ba35027fe46b9dcc5e30 | /practice/0416_practice/SWEA_4366_정식이의 은행업무_이정민.py | d09dcc26aafb4b5bf91d31679301927909e68846 | [] | no_license | ljm9748/Algorithm_ProblemSolving | 8ee594c0d1226ebe3670e772a7fc1c08ddf62e43 | d1ebc34019ae2d795417ef47f74f1407a7f3cb9e | refs/heads/master | 2023-05-02T20:00:01.784017 | 2021-04-26T13:57:29 | 2021-04-26T13:57:29 | 339,901,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | def bintonum():
two=1
tmpsum=0
for i in range(len(bininput)-1,-1,-1):
tmpsum+=bininput[i]*two
two*=2
return tmpsum
def tritonum():
tri = 1
tmpsum = 0
for i in range(len(triinput)-1, -1, -1):
tmpsum += triinput[i] * tri
tri *= 3
return tmpsum
for tc in range(int(input())):
bin=[]
bininput=list(map(int, list(input())))
triinput=list(map(int,list(input())))
for i in range(len(bininput)):
if bininput[i]==0:
bininput[i]=1
bin.append(bintonum())
bininput[i]=0
else:
bininput[i]=0
bin.append(bintonum())
bininput[i]=1
flag=False
for i in range(len(triinput)):
if flag:
break
for j in range(2):
triinput[i]=(triinput[i]+1)%3
tmpanswer=tritonum()
if tmpanswer in bin:
print('#{} {}'.format(tc+1,tmpanswer))
flag=True
break
triinput[i] = (triinput[i] + 1) % 3
| [
"ljm9748@naver.com"
] | ljm9748@naver.com |
b718e0a18a4216579a39362df3c537ddbf64dd79 | 423bbc654e6ebe426a27ae9daa1b48232b0e9047 | /rpn_092.py | 49a01607ecb344766f03fe5e68ee211426034da1 | [] | no_license | mcclosr5/Python-code | fb0824b0e6c250b44c85705db740e7ddb3efae41 | 794e43351327abf3a83ace5dd7f51e2ef011fb19 | refs/heads/main | 2023-07-11T06:08:45.292511 | 2021-08-18T13:13:02 | 2021-08-18T13:13:02 | 397,591,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | from stack_092 import Stack
def cal(sym, p1, p2):
if sym == "+":
return p1 + p2
elif sym == "-":
return p2 - p1
elif sym == "*":
return p1 * p2
elif sym == "/":
return p2 / p1
def cal2(sym, p):
if sym == "n":
return -p
elif sym == "r":
return p ** (1 / 2)
def calculator(line):
s = Stack()
d = ["+", "-", "*", "/"]
d2 = ["r", "n"]
l = line.split()
for chars in l:
if chars not in d and chars not in d2:
s.push(chars)
elif chars in d:
p1 = float(s.pop())
p2 = float(s.pop())
s.push(cal(chars, p1, p2))
elif chars in d2:
p = float(s.pop())
s.push(cal2(chars, p))
return float(s.top())
| [
"noreply@github.com"
] | mcclosr5.noreply@github.com |
641b32b9ba936495238128b5b25732b687e2479d | 39fa403d46a4456a07c761e1aaa8af2d418c5f87 | /kid_readout/analysis/resources/experiments.py | 728292c1d96bb19e30bd33d9770f53f432eda98c | [
"BSD-2-Clause"
] | permissive | vapor36/kid_readout | 72d94d96e964d6a2eef3aa57ed6fc814946cfe46 | 07202090d468669200cab78297122880c1c03e87 | refs/heads/master | 2020-12-12T13:32:47.267337 | 2018-11-11T15:36:40 | 2018-11-11T15:36:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | import bisect
import socket
import time
if socket.gethostname() == 'detectors':
default_cryostat = 'HPD'
else:
default_cryostat = 'StarCryo'
import starcryo_experiments
import hpd_experiments
def get_experiment_info_at(unix_time, cryostat=None):
if cryostat is None:
cryostat = default_cryostat
if cryostat.lower() == 'hpd':
_unix_time_index = hpd_experiments._unix_time_index
by_unix_time_table = hpd_experiments.by_unix_time_table
else:
_unix_time_index = starcryo_experiments._unix_time_index
by_unix_time_table = starcryo_experiments.by_unix_time_table
index = bisect.bisect(_unix_time_index, unix_time)
index = index - 1
if index < 0:
raise Exception("No experiment found for timestamp %s" % time.ctime(unix_time))
info = by_unix_time_table[index]
if info['optical_state'] == 'dark':
info['is_dark'] = True
else:
info['is_dark'] = False
return info | [
"glenn.caltech@gmail.com"
] | glenn.caltech@gmail.com |
50fdea7cb51a62a4e0e88fd8f44cd9fa5d56b2ea | ba2b40a762feb52f6a0a1dc43da96b41112a3bbb | /code/experiment/dcase_simpleNetCNN/ex2/0.0001/testNetSimple.py | 93ded45503a2c3bb4a12707a4d4f0c3baf51239c | [] | no_license | YuanGongND/DeepVis2 | e03a3c8b4231e61c6442314935490d9131b41046 | 6514f403c0df9dab4fa1c66b3a0a95ea0aea2ec3 | refs/heads/master | 2021-08-22T23:35:13.949128 | 2017-12-01T17:26:29 | 2017-12-01T17:26:29 | 112,768,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,398 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 11:25:16 2017
Keras model of
@author: Kyle
"""
import tensorflow as tf
import numpy as np
import keras
from keras.models import Model
from keras import regularizers
import math
import matplotlib.pyplot as plt
#%%
def genSineFilter( frequency, points = 64, sampleRate = 16000 ):
Ts = 1 /sampleRate
t = list( np.linspace( -points/2*Ts, points/2*Ts, num= points ) )
#t = list( xrange( -points/2*Ts, points/2*Ts-Ts, Ts ) )
sinFilter = [ math.sin( 2 * math.pi * frequency *elem) for elem in t ]
plt.plot( sinFilter )
return sinFilter
#%%
def sineInit( shape, dtype=None ):
print( shape )
InitKernal = np.zeros( shape )
# the rest filter
for filterIndex in range( 1, shape[ 3 ] ):
InitKernal[ 0, :, 0, filterIndex ] = genSineFilter( 150 *( filterIndex ), points = shape[ 1 ] )
InitKernal = InitKernal / shape[ 1 ]
InitKernal[ 0, :, 0, 0 ] = np.zeros( shape[ 1 ] )
InitKernal[ 0, 0, 0, 0 ] = 1
return InitKernal
#%% loadInit
def loadInit( shape, dtype = None, upbound = 95 ):
print( shape )
InitKernal = np.zeros( shape )
if __name__ == '__main__':
exterFilterFile = np.loadtxt( '../initializer/bandPassFilters_256_64' + '_' + str(upbound) + '.csv', delimiter = ',' )
else:
exterFilterFile = np.loadtxt( '../../initializer/bandPassFilters_256_64' + '_' + str(upbound) + '.csv', delimiter = ',' )
for filterIndex in range( 0, shape[ 3 ] ):
InitKernal[ 0, :, 0, filterIndex ] = exterFilterFile[ filterIndex, : ]
return InitKernal
#%% loadInit
def loadInitScene( shape, dtype = None, upbound = 50 ):
print( shape )
InitKernal = np.zeros( shape )
if __name__ == '__main__':
exterFilterFile = np.loadtxt( '../initializer/bandPassFilters_256_64' + '_' + str(upbound) + '.csv', delimiter = ',' )
else:
exterFilterFile = np.loadtxt( '../../initializer/bandPassFilters_256_64' + '_' + str(upbound) + '.csv', delimiter = ',' )
for filterIndex in range( 0, shape[ 3 ] ):
InitKernal[ 0, :, 0, filterIndex ] = exterFilterFile[ filterIndex, : ]
print( 'Initializer' + str( upbound ) + 'is used' )
return InitKernal
#%%
def testNet( input, timeStep_num = 150, convLayer_num_front = 1, filter_num = 64, numClass = 4, init = 'glorot_uniform',\
activationUnit = 'relu', conv_filter_size_front = 256, pooling_size = 2, convLayer_num_back = 4, conv_filter_size_back = 40, l2_reg = 0.01,\
denseUnitNum = 64, task = 'emotion' ):
# the input shape is [ example_num, whole_audio_length ], e.g., [ 200 samples, 96000 points ]
# convert it to tensor
input = tf.convert_to_tensor( input )
# parameters of the network
example_num = input.get_shape().as_list()[ 0 ]
# length of each sub-sequence, e.g., 96000/timeStep(150)
subSequence_length = int( input.get_shape().as_list()[ 1 ] / timeStep_num )
# reshape into [ example_num * sequence, subsequence_length ]
input = tf.reshape( input, [ example_num *timeStep_num, 1, subSequence_length, 1 ] )
print( input.shape )
# first conduct average pooling
#input = tf.layers.batch_normalization( input )
input = keras.layers.pooling.AveragePooling2D( pool_size=( 1, 1 ), strides=None, padding='same' )( input )
print( input.shape )
# convLayer_num *( conv + maxpooling )
for i in range( convLayer_num_front ):
input = tf.layers.batch_normalization( input )
with tf.name_scope( 'conv' + str( i + 1 ) ):
if i == 0:
if task == 'scene':
input = keras.layers.convolutional.Conv2D( filter_num, ( 1, conv_filter_size_front ), padding='same', activation= 'relu', kernel_initializer = loadInitScene )( input )
else:
input = keras.layers.convolutional.Conv2D( filter_num, ( 1, conv_filter_size_front ), padding='same', activation= 'relu', kernel_initializer = loadInit )( input )
else:
pass
#input = keras.layers.convolutional.Conv2D( filter_num, ( 1, conv_filter_size_front ), padding='same', activation= activationUnit, kernel_regularizer=regularizers.l2( l2_reg ), kernel_initializer = init )( input )
print( input.shape )
print( input.shape )
print( i )
input = tf.abs( input )
#input = keras.layers.pooling.AveragePooling2D( ( 1, pooling_size **(convLayer_num_front + 8 ) ), padding='valid' )( input )
input = keras.layers.pooling.AveragePooling2D( ( 1, subSequence_length ), padding='valid' )( input )
#input = tf.scalar_mul( pooling_size **(convLayer_num_front - 2), input )
# reshape for preparision of LSTM layers
print( input.shape )
input = tf.transpose( input, [ 3, 0, 1, 2 ] ) # change the column order
print( input.shape )
restPoint = input.get_shape().as_list()[ -1 ]
print( input.shape )
input = tf.reshape( input, [ filter_num, 1, example_num, timeStep_num*restPoint ] )
print( input.shape )
input = tf.transpose( input, [ 2, 3, 0, 1 ] )
print( input.shape )
for i in range( convLayer_num_back ):
input = tf.layers.batch_normalization( input )
input = keras.layers.convolutional.Conv2D( 32, ( 3, 3 ), padding='same', activation= activationUnit, kernel_regularizer=regularizers.l2( l2_reg ), kernel_initializer = init )( input )
print( input.shape )
input = keras.layers.pooling.MaxPooling2D( ( 2, 2 ), padding='same' )( input )
print( input.shape )
print( i )
newSubSequence_length = input.get_shape().as_list()[ -3 ] *input.get_shape().as_list()[ -2 ] *input.get_shape().as_list()[ -1 ]
input = tf.reshape( input, [ example_num, newSubSequence_length ] )
print( input.shape )
# start the LSTM layers
input = keras.layers.core.Dense( denseUnitNum, activation = activationUnit, kernel_initializer = init )( input )
input = tf.nn.dropout( input, keep_prob = 0.5 )
print( input.shape )
output = keras.layers.core.Dense( numClass, activation = 'softmax' )( input )
print( output.shape )
return output
#%%
if __name__ == '__main__':
time_seq = list( range( 1, 16 ) )
testInput = np.zeros( [ 1, 441000 ] )
testNet( input = testInput ) | [
"ygong1@nd.edu"
] | ygong1@nd.edu |
d2aedbae6a5abb925a4d445183833edf30996453 | ebc111217351cda19e1a64a8fe67c956db1ddef2 | /urls_and_templates/settings.py | 7d99534bbd58af5af255a625ffa81ee03389fee3 | [] | no_license | PeterM358/Python-web-2021 | cf08beaa3330495afc53e640f4a2aaf0429049e9 | 96dc40fa433329ea3deaa39532934b2fab83489f | refs/heads/main | 2023-07-09T15:09:08.868548 | 2021-07-02T12:55:58 | 2021-07-02T12:55:58 | 382,328,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,403 | py | """
Django settings for urls_and_templates project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os.path
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-(sozs*28h@re8^*mne-_a-b-(zmng!v8lr*x@wt54liv)t=+8p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main_app',
'secondary_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls_and_templates.urls'
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'urls_and_templates.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"estestveno"
] | estestveno |
c67c21b30aa610dc55042a6e27204e50e29b4146 | 084e35c598426b1137f9cd502e1b5e7f09cdf034 | /并查集/problem1202_交换字符串中的元素_DFS.py | 5671b255c350dad26c0c50b2b8ea6bfec3f777be | [] | no_license | sakurasakura1996/Leetcode | 3a941dadd198ee2f54b69057ae3bbed99941974c | 78f239959af98dd3bd987fb17a3544010e54ae34 | refs/heads/master | 2021-09-11T05:07:44.987616 | 2021-09-07T05:39:34 | 2021-09-07T05:39:34 | 240,848,992 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,179 | py | """
1202. 交换字符串中的元素
给你一个字符串 s,以及该字符串中的一些「索引对」数组 pairs,其中 pairs[i] = [a, b] 表示字符串中的两个索引(编号从 0 开始)。
你可以 任意多次交换 在 pairs 中任意一对索引处的字符。
返回在经过若干次交换后,s 可以变成的按字典序最小的字符串。
示例 1:
输入:s = "dcab", pairs = [[0,3],[1,2]]
输出:"bacd"
解释:
交换 s[0] 和 s[3], s = "bcad"
交换 s[1] 和 s[2], s = "bacd"
示例 2:
输入:s = "dcab", pairs = [[0,3],[1,2],[0,2]]
输出:"abcd"
解释:
交换 s[0] 和 s[3], s = "bcad"
交换 s[0] 和 s[2], s = "acbd"
交换 s[1] 和 s[2], s = "abcd"
示例 3:
输入:s = "cba", pairs = [[0,1],[1,2]]
输出:"abc"
解释:
交换 s[0] 和 s[1], s = "bca"
交换 s[1] 和 s[2], s = "bac"
交换 s[0] 和 s[1], s = "abc"
提示:
1 <= s.length <= 10^5
0 <= pairs.length <= 10^5
0 <= pairs[i][0], pairs[i][1] < s.length
s 中只含有小写英文字母
"""
# date: 2021/1/11 好像昨天周赛也是这样一道题,思路很相近,应该用并查集来做,好好做一做这道题
# 这种题目,发现用并查集的思路倒不是很难,因为题目很容易发现是一个关系转换的过程,比如位置0的字符可以和位置1的字符交换
# 顺序,位置1的字符可以和位置2的字符交换顺序,那么位置0和位置2的字符也是可以交换顺序的,那么他们三个都是在同一个集合中。
# 然后只要在同一个集合中的字符,我们是可以实现任意顺序排列的,那么只要按照字典排序就行了。
# 然后还需要注意的是,可能最后归纳之后,还有好几个集合,这个时候我们要字典排序之后然后再放回到该集合所占的位置上去。
from typing import List
from collections import defaultdict
class Solution:
def dfs(self, res, graph, visited, x):
for neighbor in graph[x]:
if not visited[neighbor]:
if __name__ == '__main__':
solu = Solution()
s = "dcab"
pairs = [[0,3],[1,2],[0,2]]
ans = solu.smallestStringWithSwaps(s, pairs)
print(ans)
| [
"2470375551@qq.com"
] | 2470375551@qq.com |
d7bcc01c5138406388ef33179a20381a87be7be3 | 76a7dccc6aaa5ece69edf586a0493471e9a32103 | /tests/ethpm/test_package_validation_utils.py | 5c0d98c93d715e5ec4d9b2097fd359f342c0dc70 | [
"MIT"
] | permissive | LieutenantRoger/py-ethpm | 5f95d77d47fd7111cd7f51d2fb3fc9fbc814572a | 9e7a9f4d28ad922c9349c1ac9216a328bdb73527 | refs/heads/master | 2020-03-20T01:23:52.062447 | 2018-06-01T21:39:20 | 2018-06-01T21:39:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | import pytest
from ethpm.exceptions import ValidationError
from ethpm.utils.package_validation import (
validate_package_exists,
validate_package_against_schema,
validate_package_deployments,
)
def test_validate_package_exists_validates():
assert validate_package_exists("./v2-packages/safe-math-lib/1.0.0.json") is None
def test_validate_package_exists_invalidates():
with pytest.raises(ValidationError):
validate_package_exists("DNE")
def test_validate_package_validates(valid_package):
assert validate_package_against_schema(valid_package) is None
def test_validate_package_against_all_packages(all_packages):
for pkg in all_packages:
assert validate_package_against_schema(pkg) is None
def test_validate_package_invalidates(invalid_package):
with pytest.raises(ValidationError):
validate_package_against_schema(invalid_package)
def test_validate_deployed_contracts_present_validates(package_with_conflicting_deployments):
with pytest.raises(ValidationError):
validate_package_deployments(package_with_conflicting_deployments)
def test_validate_deployments(package_with_matching_deployment):
validate = validate_package_deployments(package_with_matching_deployment)
assert validate is None
def test_validate_deployed_contracts_pr(package_with_no_deployments):
validate = validate_package_deployments(package_with_no_deployments)
assert validate is None
| [
"nickgheorghita@gmail.com"
] | nickgheorghita@gmail.com |
232507fa6d1a15e2d78a9cacab939e0e2489a4eb | d2e2fb0ed2405ad5e43c73c1339cfb24d6863bb6 | /src/improveo/wsgi.py | 99d0cde2a9570b15d080c1e735b4c814e363be09 | [] | no_license | AsemAntar/Django-Made-Easy.-Build-an-application-for-companies | c23ce30af93537eb691003e600f9f31dc8e1abce | 3957495c83e2abd8a011dcc33c70f3c83a1730b5 | refs/heads/master | 2022-12-11T20:55:03.800665 | 2020-09-01T17:09:34 | 2020-09-01T17:09:34 | 267,568,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for improveo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'improveo.settings')
application = get_wsgi_application()
| [
"asemantar@gmail.com"
] | asemantar@gmail.com |
d52040f5592facb0fcc3a45fc1defe8bedbed45d | 4443b7ee1cdfd4dd21663230a7f5995aa0e3e079 | /Word_Break.py | 5c33fab731876e5683e67f9500300c9c24fcd6bf | [] | no_license | pranabsarkar/Algorithm-Pratice-Questions-LeetCode | c56c754781e6afb38352f10e6b4993d8a6876e8d | c135bb322fbda8505f85deaa9cfe3b9ed279a443 | refs/heads/master | 2022-12-12T00:03:04.549840 | 2020-08-21T20:30:12 | 2020-08-21T20:30:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,684 | py | # Given a non-empty string s and a dictionary wordDict containing a list of non-empty words,
# determine if s can be segmented into a space-separated sequence of one or more dictionary words.
#
# Note:
#
# The same word in the dictionary may be reused multiple times in the segmentation.
# You may assume the dictionary does not contain duplicate words.
# Example 1:
#
# Input: s = "leetcode", wordDict = ["leet", "code"]
# Output: true
# Explanation: Return true because "leetcode" can be segmented as "leet code".
# Example 2:
#
# Input: s = "applepenapple", wordDict = ["apple", "pen"]
# Output: true
# Explanation: Return true because "applepenapple" can be segmented as "apple pen apple".
# Note that you are allowed to reuse a dictionary word.
# Example 3:
#
# Input: s = "catsandog", wordDict = ["cats", "dog", "sand", "and", "cat"]
# Output: false
class Solution:
# Brute Force 2^n Time and O(n) Space
def wordBreak(self, s, wordDict):
# if not s:
# return True
#
# for word in wordDict:
#
# if s[0:len(word)] == word and self.wordBreak(s[len(word):], wordDict):
# return True
#
# return False
memo = {}
return self.helper(s, wordDict, memo)
def helper(self, s, wordDict, memo):
if not s:
return True
elif s in memo:
return memo[s]
for word in wordDict:
if s[0:len(word)] == word and self.helper(s[len(word):], wordDict, memo):
memo[s] = True
return True
memo[s] = False
return False
# Using Memo O(n^2) Time and O(n) Space
| [
"saurabhchris1@gmail.com"
] | saurabhchris1@gmail.com |
cd18161f73be5325aacfb94fa29cdc0da7c8de5e | 04e5b6df2ee3bcfb7005d8ec91aab8e380333ac4 | /Lib/objc/_QuickLookSupport.py | fe4f4ad4cc1e4346f428551f9e388c66c59e649c | [
"MIT"
] | permissive | ColdGrub1384/Pyto | 64e2a593957fd640907f0e4698d430ea7754a73e | 7557485a733dd7e17ba0366b92794931bdb39975 | refs/heads/main | 2023-08-01T03:48:35.694832 | 2022-07-20T14:38:45 | 2022-07-20T14:38:45 | 148,944,721 | 884 | 157 | MIT | 2023-02-26T21:34:04 | 2018-09-15T22:29:07 | C | UTF-8 | Python | false | false | 939 | py | """
Classes from the 'QuickLookSupport' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
QLExtensionPreview = _Class("QLExtensionPreview")
QLURLExtensionPreview = _Class("QLURLExtensionPreview")
QLCoreSpotlightExtensionPreview = _Class("QLCoreSpotlightExtensionPreview")
QLZipArchive = _Class("QLZipArchive")
QLZipArchiveEntry = _Class("QLZipArchiveEntry")
QLExtension = _Class("QLExtension")
QLGracePeriodTimer = _Class("QLGracePeriodTimer")
QLPlatformImage = _Class("QLPlatformImage")
QLExtensionManager = _Class("QLExtensionManager")
QLUTIManager = _Class("QLUTIManager")
QLThumbnailUTICache = _Class("QLThumbnailUTICache")
QLExtensionThumbnailGenerator = _Class("QLExtensionThumbnailGenerator")
QLExtensionManagerCache = _Class("QLExtensionManagerCache")
| [
"adrilabbelol@gmail.com"
] | adrilabbelol@gmail.com |
8e63afaf2bfa0ac779abb55ef2e140ed86259012 | 356740062993a5967717098a7a3ee78ac6c6cf3f | /chapter01/examples/hello.py | ac2bb10551cf24337d234a8dcf96e4904245bf41 | [] | no_license | xerifeazeitona/autbor | 79588302f14c0c09b1f9f57fcb973e656ee1da5c | c37ccbfa87c1ac260e728a3a91a8f2be97978f04 | refs/heads/main | 2023-04-03T18:01:34.588984 | 2021-04-07T17:59:26 | 2021-04-07T17:59:26 | 348,749,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | """
This program says hello and asks for my name.
"""
print('Hello, world!')
print('What is your name') # prompts for name
myName = input() # stores input text into myName
print('It is good to meet you, ' + myName)
print('The length of your name is: ')
print(len(myName))
print('What is your age?')
myAge = input()
print('You will be ' + str(int(myAge) + 1) + ' in a year.')
| [
"juliano.amaral@gmail.com"
] | juliano.amaral@gmail.com |
6bb20e9b387afb48bc5562cd4cf07a53bf8cb999 | 30d5943710f6b2468b7a844166deb7c2f8ec4d52 | /pal/writer/writer_factory.py | 038bc5e63be3a1a13e43e734eb5e23f0ed888303 | [
"MIT"
] | permissive | qazxsw1597532018/pal | 7c3d1f35fdcfcf3ca3b361ada85c390b3d78c4a1 | f47fa19bdad8898b42479d41ac18f5d88f028d07 | refs/heads/master | 2023-08-10T21:50:51.396604 | 2021-09-28T03:43:36 | 2021-09-29T23:19:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,963 | py | from pal.writer.abstract_writer import AbstractWriter
from pal.writer.register.c.register_writer import CRegisterWriter
from pal.writer.register.cxx11.register_writer import Cxx11RegisterWriter
from pal.writer.register.rust.register_writer import RustRegisterWriter
from pal.writer.register.yaml import YamlRegisterWriter
from pal.writer.register.none import NoneRegisterWriter
from pal.writer.access_mechanism.gnu_inline_x64 import \
GnuInlineX64AccessMechanismWriter
from pal.writer.access_mechanism.gnu_inline_aarch64 import \
GnuInlineAarch64AccessMechanismWriter
from pal.writer.access_mechanism.gnu_inline_aarch32 import \
GnuInlineAarch32AccessMechanismWriter
from pal.writer.access_mechanism.libpal import \
LibpalAccessMechanismWriter
from pal.writer.access_mechanism.rust_libpal import \
RustLibpalAccessMechanismWriter
from pal.writer.access_mechanism.cxx_test import \
CxxTestAccessMechanismWriter
from pal.writer.access_mechanism.c_test import \
CTestAccessMechanismWriter
from pal.writer.access_mechanism.yaml import \
YamlAccessMechanismWriter
from pal.writer.access_mechanism.none import \
NoneAccessMechanismWriter
from pal.writer.print_mechanism.printf_utf8 import PrintfUtf8PrintMechanismWriter
from pal.writer.print_mechanism.rust_println import RustPrintlnPrintMechanismWriter
from pal.writer.print_mechanism.none import NonePrintMechanismWriter
from pal.writer.file_format.unix import UnixFileFormatWriter
from pal.writer.file_format.windows import WindowsFileFormatWriter
from pal.writer.file_format.yaml import YamlFileFormatWriter
from pal.writer.file_format.none import NoneFileFormatWriter
from pal.writer.comment.c_multiline import CMultilineCommentWriter
from pal.writer.comment.rust import RustCommentWriter
from pal.writer.comment.yaml import YamlCommentWriter
from pal.writer.comment.none import NoneCommentWriter
from pal.writer.instruction.gnu_inline import GnuInlineInstructionWriter
from pal.writer.instruction.libpal_c import LibpalCInstructionWriter
from pal.writer.instruction.libpal_cxx11 import LibpalCxx11InstructionWriter
from pal.writer.instruction.libpal_rust import LibpalRustInstructionWriter
from pal.writer.instruction.none import NoneInstructionWriter
from pal.writer.peripheral.none import NonePeripheralWriter
from pal.writer.peripheral.c import CPeripheralWriter
from pal.writer.peripheral.cxx11 import Cxx11PeripheralWriter
language_options = [
"c",
"c++11",
"rust",
"yaml",
"none",
]
access_mechanism_options = [
"gnu_inline",
"libpal",
"test",
"yaml",
"none",
]
print_mechanism_options = {
"printf_utf8": PrintfUtf8PrintMechanismWriter,
"rust_println": RustPrintlnPrintMechanismWriter,
"none": NonePrintMechanismWriter,
}
file_format_options = {
"unix": UnixFileFormatWriter,
"windows": WindowsFileFormatWriter,
"yaml": YamlFileFormatWriter,
"none": NoneFileFormatWriter,
}
def get_access_mechanism_writer(config):
if config.execution_state == "intel_64bit" and config.access_mechanism == "gnu_inline":
return GnuInlineX64AccessMechanismWriter
elif config.execution_state == "armv8a_aarch64" and config.access_mechanism == "gnu_inline":
return GnuInlineAarch64AccessMechanismWriter
elif config.execution_state == "armv8a_aarch32" and config.access_mechanism == "gnu_inline":
return GnuInlineAarch32AccessMechanismWriter
elif config.access_mechanism == "test" and config.language == "c++11":
return CxxTestAccessMechanismWriter
elif config.access_mechanism == "test" and config.language == "c":
return CTestAccessMechanismWriter
elif config.access_mechanism == "yaml":
return YamlAccessMechanismWriter
elif config.access_mechanism == "libpal" and config.language == "c++11":
return LibpalAccessMechanismWriter
elif config.access_mechanism == "libpal" and config.language == "c":
return LibpalAccessMechanismWriter
elif config.access_mechanism == "libpal" and config.language == "rust":
return RustLibpalAccessMechanismWriter
else:
return NoneAccessMechanismWriter
def get_register_writer(config):
if config.language == "c":
return CRegisterWriter
elif config.language == "c++11":
return Cxx11RegisterWriter
elif config.language == "rust":
return RustRegisterWriter
elif config.language == "yaml":
return YamlRegisterWriter
else:
return NoneRegisterWriter
def get_instruction_writer(config):
if config.language == "c" and config.access_mechanism == "libpal":
return LibpalCInstructionWriter
elif config.language == "c" and config.access_mechanism == "gnu_inline":
return GnuInlineInstructionWriter
elif config.language == "c++11" and config.access_mechanism == "libpal":
return LibpalCxx11InstructionWriter
elif config.language == "c++11" and config.access_mechanism == "gnu_inline":
return GnuInlineInstructionWriter
elif config.language == "rust":
return LibpalRustInstructionWriter
else:
return NoneInstructionWriter
def get_peripheral_writer(config):
if config.language == "c":
return CPeripheralWriter
elif config.language == "c++11":
return Cxx11PeripheralWriter
else:
return NonePeripheralWriter
def get_comment_writer(config):
if config.language == "c":
return CMultilineCommentWriter
elif config.language == "c++11":
return CMultilineCommentWriter
elif config.language == "rust":
return RustCommentWriter
elif config.language == "yaml":
return YamlCommentWriter
else:
return NoneCommentWriter
def make_writer(config):
if config.language not in language_options:
raise Exception("invalid language option: " + str(language))
if config.access_mechanism not in access_mechanism_options:
raise Exception("invalid access mechanism option: " + str(access_mechanism))
if config.print_mechanism not in print_mechanism_options:
raise Exception("invalid print_mechanism option: " +
str(print_mechanism))
if config.file_format not in file_format_options:
raise Exception("invalid file_format option: " + str(file_format))
access_mechanism_writer = get_access_mechanism_writer(config)
register_writer = get_register_writer(config)
instruction_writer = get_instruction_writer(config)
peripheral_writer = get_peripheral_writer(config)
comment_writer = get_comment_writer(config)
class Writer(
AbstractWriter,
register_writer,
instruction_writer,
peripheral_writer,
access_mechanism_writer,
print_mechanism_options[config.print_mechanism],
file_format_options[config.file_format],
comment_writer
):
pass
return Writer()
| [
"jared.wright12@gmail.com"
] | jared.wright12@gmail.com |
80623741abfd8ad0974b4f7aaf1ffa0c5a93a268 | 5ab43d03001ae459fae26e9bd17b659f2e9decb1 | /web/urls/sql.py | fad91f6f121765c49eaa457040e8477436106364 | [] | no_license | bradbann/dbops | c022f0bf15e1af3438c4726a57ede139bfcbfc18 | 1097e142ba03406c99ac7d007001f35a2db39fcd | refs/heads/master | 2023-02-05T18:50:55.351188 | 2020-12-28T00:50:22 | 2020-12-28T00:50:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/12/5 12:33
# @Author : 马飞
# @File : logon.py.py
# @Software: PyCharm
from web.services.sql import sqlquery,sql_query,sqlrelease,sql_check,sql_format,sql_check_result,sql_release,sqlaudit,sql_audit,sqlrun,sql_run,sql_audit_query,sql_audit_detail,sql_run_query
from web.services.sql import get_tree_by_sql
# 功能:数据库操作API
sql = [
(r"/sql/query", sqlquery),
(r"/sql/_query", sql_query),
(r"/sql/release", sqlrelease),
(r"/sql/_release", sql_release),
(r"/sql/_check", sql_check),
(r"/sql/_check/result", sql_check_result),
(r"/sql/audit", sqlaudit),
(r"/sql/_audit", sql_audit),
(r"/sql/audit/query", sql_audit_query),
(r"/sql/audit/detail", sql_audit_detail),
(r"/sql/_format", sql_format),
(r"/sql/run", sqlrun),
(r"/sql/_run", sql_run),
(r"/sql/run/query", sql_run_query),
(r"/get_tree", get_tree_by_sql),
] | [
"zhdn_791005@163.com"
] | zhdn_791005@163.com |
b9cbeabe33fbd53d97f99921215dbbdf94b0ebb5 | 8c39fa8241e1ecefab6c693862bee127fd3e1461 | /proyectoferreteria/apps/gestionadmin/migrations/0073_auto_20200325_1923.py | 10e23128d48a25bd9985d70d6b4e39300a33539f | [] | no_license | ujcv4273/Ferreteriav-0.0.5 | b5433e727b68e318204386f84416208f99470446 | 9dd16363ce9f4a012a177aa3d5414051b79cd3a2 | refs/heads/master | 2022-11-29T16:50:19.066725 | 2020-08-01T18:16:35 | 2020-08-01T18:16:35 | 284,314,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # Generated by Django 3.0.4 on 2020-03-26 01:23
from django.db import migrations, models
import proyectoferreteria.apps.gestionadmin.models
class Migration(migrations.Migration):
dependencies = [
('gestionadmin', '0072_auto_20200325_1913'),
]
operations = [
migrations.AlterField(
model_name='planilla',
name='IHSS',
field=models.IntegerField(validators=[proyectoferreteria.apps.gestionadmin.models.validarnegativos]),
),
migrations.AlterField(
model_name='planilla',
name='RAP',
field=models.IntegerField(validators=[proyectoferreteria.apps.gestionadmin.models.validarnegativos]),
),
]
| [
"cristian.godoy0000@gmail.com"
] | cristian.godoy0000@gmail.com |
3883e7c11bd8857f07067e92b0a5ca46c2de6bbf | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/Bert-CRF_for_PyTorch/examples/training_trick/task_sentiment_exponential_moving_average_warmup.py | 125a35edeb7a0b9cf9fb56a694fcacd20820b621 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,678 | py | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
#! -*- coding:utf-8 -*-
# 情感分类任务, 指数滑动平均ema+warmup两种策略
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset, seed_everything, get_pool_emb
from bert4torch.optimizers import extend_with_exponential_moving_average, get_linear_schedule_with_warmup
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 256
batch_size = 16
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed_everything(42)
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""加载数据,并尽量划分为不超过maxlen的句子
"""
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
for filename in filenames:
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
for t in text_segmentate(text, maxlen - 2, seps, strips):
D.append((t, int(label)))
return D
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for text, label in batch:
token_ids = tokenizer.encode(text, maxlen=maxlen)[0]
batch_token_ids.append(token_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self, pool_method='cls') -> None:
super().__init__()
self.pool_method = pool_method
self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, with_pool=True, segment_vocab_size=0)
self.dropout = nn.Dropout(0.1)
self.dense = nn.Linear(self.bert.configs['hidden_size'], 2)
def forward(self, token_ids):
hidden_states, pooling = self.bert([token_ids])
pooled_output = get_pool_emb(hidden_states, pooling, token_ids.gt(0).long(), self.pool_method)
output = self.dropout(pooled_output)
output = self.dense(output)
return output
model = Model().to(device)
optimizer = optim.Adam(model.parameters(), lr=2e-5)
ema_schedule = extend_with_exponential_moving_average(model, decay=0.99)
warmup_scheduler = get_linear_schedule_with_warmup(optimizer, len(train_dataloader), num_training_steps=len(train_dataloader)*10, last_epoch=-1)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=nn.CrossEntropyLoss(),
optimizer=optimizer,
scheduler=[ema_schedule, warmup_scheduler],
metrics=['accuracy']
)
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = self.evaluate(valid_dataloader)
test_acc = self.evaluate(test_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, test_acc: {test_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
# 定义评价函数
def evaluate(self, data):
ema_schedule.apply_ema_weights() # 使用滑动平均的ema权重
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
ema_schedule.restore_raw_weights() # 恢复原来模型的参数
return right / total
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
dc7b2e654c8f78b1e9ffc0af46f43d30baa54e55 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03329/s613563948.py | e52a2eae61fa6172d407b873df69a031908af97c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | n = int(input())
dp = [i for i in range(n+1)]
for i in range(n):
dp[i+1] = min(dp[i+1],dp[i] + 1)
cou = 1
while True:
if i + 6**cou <= n:
dp[i+6**cou] = min(dp[i+6**cou],dp[i]+1)
cou += 1
elif i + 6**cou > n:
break
cou = 1
while True:
if i + 9**cou <= n:
dp[i+9**cou] = min(dp[i+9**cou],dp[i]+1)
cou += 1
elif i + 9**cou > n:
break
print(dp[n]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
15f70fff1dbdd73ae7becb2f3e22c2671385319c | b6c09a1b87074d6e58884211ce24df8ec354da5c | /714. 买卖股票的最佳时机含手续费.py | 9c9467240aa30698f8796de96dccb3b8fbea196d | [] | no_license | fengxiaolong886/leetcode | a0ee12d67c4a10fb12d6ca4369762ab5b090cab1 | 4c0897bc06a297fa9225a0c46d8ec9217d876db8 | refs/heads/master | 2023-03-18T22:16:29.212016 | 2021-03-07T03:48:16 | 2021-03-07T03:48:16 | 339,604,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | '''
给定一个整数数组 prices,其中第 i 个元素代表了第 i 天的股票价格 ;非负整数 fee 代表了交易股票的手续费用。
你可以无限次地完成交易,但是你每笔交易都需要付手续费。如果你已经购买了一个股票,在卖出它之前你就不能再继续购买股票了。
返回获得利润的最大值。
注意:这里的一笔交易指买入持有并卖出股票的整个过程,每笔交易你只需要为支付一次手续费。
'''
def maxProfit(prices, fee):
dp = [[0 for i in prices] for _ in range(2)]
dp[0][0] = -prices[0]
dp[1][0] = 0
for i in range(1, len(prices)):
# buy
dp[0][i] = max(dp[1][i-1] - prices[i], dp[0][i-1])
# sell
dp[1][i] = max(dp[0][i-1] + prices[i] - fee, dp[1][i-1])
return dp[1][-1]
print(maxProfit(prices = [1, 3, 2, 8, 4, 9], fee = 2))
| [
"xlfeng886@163.com"
] | xlfeng886@163.com |
b63e4c9ebaa4ca58ea3ebbd502a5151c669d1102 | 9238c5adf211d66cbe9bea5a89e97ca02c31da9a | /bin/.venv-ansible-venv/lib/python2.6/site-packages/ansible/modules/core/cloud/amazon/ec2_tag.py | 409041f906b23179804c4dd7ffd517fea198c8e4 | [
"MIT"
] | permissive | marcusramberg/dotfiles | 803d27fb88da8e46abb283b2e2987e51a83b08aa | 413727089a15e775f532d2da363c03d9fb3fb90a | refs/heads/main | 2023-03-04T17:08:40.123249 | 2023-03-01T07:46:51 | 2023-03-01T07:46:51 | 7,285,450 | 4 | 2 | MIT | 2022-12-22T14:39:35 | 2012-12-22T11:57:42 | Python | UTF-8 | Python | false | false | 4,962 | py | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_tag
short_description: create and remove tag(s) to ec2 resources.
description:
- Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
version_added: "1.3"
options:
resource:
description:
- The EC2 resource id.
required: true
default: null
aliases: []
state:
description:
- Whether the tags should be present or absent on the resource. Use list to interrogate the tags of an instance.
required: false
default: present
choices: ['present', 'absent', 'list']
aliases: []
region:
description:
- region in which the resource exists.
required: false
default: null
aliases: ['aws_region', 'ec2_region']
author: Lester Wade
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic example of adding tag(s)
tasks:
- name: tag a resource
ec2_tag: resource=vol-XXXXXX region=eu-west-1 state=present
args:
tags:
Name: ubervol
env: prod
# Playbook example of adding tag(s) to spawned instances
tasks:
- name: launch some instances
ec2: keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image_id }} wait=true region=eu-west-1
register: ec2
- name: tag my launched instances
ec2_tag: resource={{ item.id }} region=eu-west-1 state=present
with_items: ec2.instances
args:
tags:
Name: webserver
env: prod
'''
import sys
import time
try:
import boto.ec2
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
resource = dict(required=True),
tags = dict(),
state = dict(default='present', choices=['present', 'absent', 'list']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
resource = module.params.get('resource')
tags = module.params.get('tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
# We need a comparison here so that we can accurately report back changed status.
# Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate.
filters = {'resource-id' : resource}
gettags = ec2.get_all_tags(filters=filters)
dictadd = {}
dictremove = {}
baddict = {}
tagdict = {}
for tag in gettags:
tagdict[tag.name] = tag.value
if state == 'present':
if not tags:
module.fail_json(msg="tags argument is required when state is present")
if set(tags.items()).issubset(set(tagdict.items())):
module.exit_json(msg="Tags already exists in %s." %resource, changed=False)
else:
for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()):
dictadd[key] = value
tagger = ec2.create_tags(resource, dictadd)
gettags = ec2.get_all_tags(filters=filters)
module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True)
if state == 'absent':
if not tags:
module.fail_json(msg="tags argument is required when state is absent")
for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()):
baddict[key] = value
if set(baddict) == set(tags):
module.exit_json(msg="Nothing to remove here. Move along.", changed=False)
for (key, value) in set(tags.items()):
if (key, value) in set(tagdict.items()):
dictremove[key] = value
tagger = ec2.delete_tags(resource, dictremove)
gettags = ec2.get_all_tags(filters=filters)
module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True)
if state == 'list':
module.exit_json(changed=False, tags=tagdict)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| [
"marcus.ramberg@usit.uio.no"
] | marcus.ramberg@usit.uio.no |
7a807bb9bd34fd9fcc6a9ff4ba2affba6b6b435c | 6392354e74cce4a303a544c53e13d0a7b87978ee | /m6/MyBlog/venv/Lib/site-packages/django/contrib/gis/admin/widgets.py | c02d31c5101d1b4d47789fc374fb418343dc8180 | [] | no_license | music51555/wxPythonCode | dc35e42e55d11850d7714a413da3dde51ccdd37e | f77b71ed67d926fbafd1cfec89de8987d9832016 | refs/heads/master | 2020-04-11T20:20:38.136446 | 2019-04-01T09:17:34 | 2019-04-01T09:17:34 | 162,067,449 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,723 | py | import logging
from django.contrib.gis.gdal import GDALException
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from django.forms.widgets import Textarea
from django.utils import translation
# Creating a templates context that contains Django settings
# values needed by admin map templates.
geo_context = {'LANGUAGE_BIDI': translation.get_language_bidi()}
logger = logging.getLogger('django.contrib.gis')
class OpenLayersWidget(Textarea):
"""
Render an OpenLayers map using the WKT of the geometry.
"""
def get_context(self, name, value, attrs):
# Update the templates parameters with any attributes passed in.
if attrs:
self.params.update(attrs)
self.params['editable'] = self.params['modifiable']
else:
self.params['editable'] = True
# Defaulting the WKT value to a blank string -- this
# will be tested in the JavaScript and the appropriate
# interface will be constructed.
self.params['wkt'] = ''
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if value and isinstance(value, str):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError) as err:
logger.error("Error creating geometry from value '%s' (%s)", value, err)
value = None
if (value and value.geom_type.upper() != self.geom_type and
self.geom_type != 'GEOMETRY'):
value = None
# Constructing the dictionary of the map options.
self.params['map_options'] = self.map_options()
# Constructing the JavaScript module name using the name of
# the GeometryField (passed in via the `attrs` keyword).
# Use the 'name' attr for the field name (rather than 'field')
self.params['name'] = name
# note: we must switch out dashes for underscores since js
# functions are created using the module variable
js_safe_name = self.params['name'].replace('-', '_')
self.params['module'] = 'geodjango_%s' % js_safe_name
if value:
# Transforming the geometry to the projection used on the
# OpenLayers map.
srid = self.params['srid']
if value.srid != srid:
try:
ogr = value.ogr
ogr.transform(srid)
wkt = ogr.wkt
except GDALException as err:
logger.error(
"Error transforming geometry from srid '%s' to srid '%s' (%s)",
value.srid, srid, err
)
wkt = ''
else:
wkt = value.wkt
# Setting the parameter WKT with that of the transformed
# geometry.
self.params['wkt'] = wkt
self.params.update(geo_context)
return self.params
def map_options(self):
"""Build the map options hash for the OpenLayers templates."""
# JavaScript construction utilities for the Bounds and Projection.
def ol_bounds(extent):
return 'new OpenLayers.Bounds(%s)' % extent
def ol_projection(srid):
return 'new OpenLayers.Projection("EPSG:%s")' % srid
# An array of the parameter name, the name of their OpenLayers
# counterpart, and the type of variable they are.
map_types = [('srid', 'projection', 'srid'),
('display_srid', 'displayProjection', 'srid'),
('units', 'units', str),
('max_resolution', 'maxResolution', float),
('max_extent', 'maxExtent', 'bounds'),
('num_zoom', 'numZoomLevels', int),
('max_zoom', 'maxZoomLevels', int),
('min_zoom', 'minZoomLevel', int),
]
# Building the map options hash.
map_options = {}
for param_name, js_name, option_type in map_types:
if self.params.get(param_name, False):
if option_type == 'srid':
value = ol_projection(self.params[param_name])
elif option_type == 'bounds':
value = ol_bounds(self.params[param_name])
elif option_type in (float, int):
value = self.params[param_name]
elif option_type in (str,):
value = '"%s"' % self.params[param_name]
else:
raise TypeError
map_options[js_name] = value
return map_options
| [
"music51555@163.com"
] | music51555@163.com |
c9db57b26f26ac2232980fef4c2269f1b22bb554 | 9a486a87e028303a551fbd0d1e1b6b650387ea14 | /deal_xzj_log/guild_battle.py | 512946b2d6e02df97e80609b5411c54e41c4496a | [] | no_license | shanlihou/pythonFunc | 7b8e7064fddd4522e492c915c086cc6c5abc6eec | 646920256551ccd8335446dd4fe11aa4b9916f64 | refs/heads/master | 2022-08-24T20:33:12.287464 | 2022-07-21T12:00:10 | 2022-07-21T12:00:10 | 24,311,639 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | import re
import utils
class GuildBattle(object):
def __init__(self, filename):
self.gbid_set = set()
self.account_set = set()
self.day = 0
self.init_data(filename)
def parse_day(self, line):
pat = re.compile(r'\[\d+\-\d+\-(\d+)')
find = pat.search(line)
if find:
day = int(find.group(1))
self.day = day
def init_data(self, filename):
pat = re.compile(r'gbId\:([^,]+), kill\:')
with open(filename, encoding='utf-8') as fr:
for line in fr:
find = pat.search(line)
if find:
gbid = int(find.group(1))
if utils.is_gbid_inter(gbid):
continue
self.gbid_set.add(gbid)
account = utils.get_account(gbid)
self.account_set.add(account)
else:
self.parse_day(line)
def generate_col(self, day_dict):
login_account_num = day_dict.get(self.day)
col = []
col.append(self.day)
col.append(len(self.gbid_set))
account_num = len(self.account_set)
col.append(account_num)
col.append(account_num / login_account_num)
return col
def deal_guild_battle(filename, day_dict):
gb = GuildBattle(filename)
return gb.generate_col(day_dict)
if __name__ == '__main__':
deal_guild_battle()
| [
"shanlihou@gmail.com"
] | shanlihou@gmail.com |
f11c0be7aa67af6235a14f8e6198576a7e95013e | d780df6e068ab8a0f8007acb68bc88554a9d5b50 | /python/g1/scripts/g1/scripts/bases.py | e710b0e363fa3d1cf71663dc650af30be31f0d9e | [
"MIT"
] | permissive | clchiou/garage | ed3d314ceea487b46568c14b51e96b990a50ed6f | 1d72863d3a5f5d620b170f4dd36f605e6b72054f | refs/heads/master | 2023-08-27T13:57:14.498182 | 2023-08-15T07:09:57 | 2023-08-15T19:53:52 | 32,647,497 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,980 | py | """Helpers for constructing ``subprocess.run`` calls contextually.
NOTE: For the ease of use, this module implements context with global
variables, and thus is not concurrent safe (not thread safe and not
asynchronous coroutine safe). Although we could redesign the interface
to make it concurrent safe (like requiring passing around context
explicitly or using stdlib's contextvars), for now we think ease of use
is more important then concurrent safe (we might change our mind later).
"""
__all__ = [
'popen',
'run',
# Context manipulations.
'doing_capture_output',
'doing_capture_stderr',
'doing_capture_stdout',
'doing_check',
'doing_dry_run',
'get_cwd',
'get_dry_run',
'merging_env',
'preserving_sudo_env',
'using_cwd',
'using_env',
'using_input',
'using_relative_cwd',
'using_prefix',
'using_stderr',
'using_stdin',
'using_stdout',
'using_sudo',
]
import contextlib
import logging
import os
import subprocess
from pathlib import Path
from g1.bases.assertions import ASSERT
LOG = logging.getLogger(__name__)
# We don't use threading.local here since we don't pretend this module
# is thread safe.
_CONTEXT = {}
# Context entry names and default values.
_CAPTURE_OUTPUT = 'capture_output'
_CHECK = 'check'
_CWD = 'cwd'
_DRY_RUN = 'dry_run'
_ENV = 'env'
_INPUT = 'input'
_PREFIX = 'prefix'
_STDIN = 'stdin'
_STDOUT = 'stdout'
_STDERR = 'stderr'
_SUDO = 'sudo'
_SUDO_ENV = 'sudo_env'
_DEFAULTS = {
_CAPTURE_OUTPUT: False,
_CHECK: True,
_CWD: None,
_DRY_RUN: False,
_ENV: None,
_INPUT: None,
_PREFIX: (),
_STDIN: None,
_STDOUT: None,
_STDERR: None,
_SUDO: False,
_SUDO_ENV: (),
}
def _get(name):
return _get2(name)[0]
def _get2(name):
"""Return (value, is_default) pair."""
try:
return _CONTEXT[name], False
except KeyError:
return ASSERT.getitem(_DEFAULTS, name), True
@contextlib.contextmanager
def _using(name, new_value):
"""Context of using an entry value."""
old_value, is_default = _get2(name)
_CONTEXT[name] = new_value
try:
yield old_value
finally:
if is_default:
_CONTEXT.pop(name)
else:
_CONTEXT[name] = old_value
def doing_capture_output(capture_output=True):
return _using(_CAPTURE_OUTPUT, capture_output)
def doing_capture_stdout(capture_stdout=True):
return using_stdout(subprocess.PIPE if capture_stdout else None)
def doing_capture_stderr(capture_stderr=True):
return using_stderr(subprocess.PIPE if capture_stderr else None)
def doing_check(check=True):
return _using(_CHECK, check)
def get_dry_run():
return _get(_DRY_RUN)
def doing_dry_run(dry_run=True):
return _using(_DRY_RUN, dry_run)
def get_cwd():
cwd = _get(_CWD)
if cwd is None:
cwd = Path.cwd()
if not isinstance(cwd, Path):
cwd = Path(cwd)
return cwd
def using_cwd(cwd):
"""Context of using an absolute cwd value."""
return _using(_CWD, cwd)
def using_relative_cwd(relative_cwd):
"""Context of using a relative cwd value."""
if relative_cwd is None:
return _using(_CWD, None)
else:
return _using(_CWD, get_cwd() / relative_cwd)
def using_env(env):
"""Context of using an environment dict.
NOTE: This replaces, not merges, the environment dict.
"""
return _using(_ENV, env)
def merging_env(env):
"""Context of merging an environment dict.
If the current `env` is None (which is the default), the given
environment dict will be merged with os.environ.
"""
old_env, is_default = _get2(_ENV)
if env:
new_env = dict(os.environ if is_default else old_env)
new_env.update(env)
return using_env(new_env)
else:
return contextlib.nullcontext(old_env)
def using_input(input): # pylint: disable=redefined-builtin
return _using(_INPUT, input)
def using_stdin(stdin):
return _using(_STDIN, stdin)
def using_stdout(stdout):
return _using(_STDOUT, stdout)
def using_stderr(stderr):
return _using(_STDERR, stderr)
def using_prefix(prefix):
return _using(_PREFIX, prefix)
def using_sudo(sudo=True):
return _using(_SUDO, sudo)
def preserving_sudo_env(sudo_env):
# Typically sudo is configured to reset PATH to a known good value
# via secure_path option. So we forbid preserving PATH here.
return _using(_SUDO_ENV, ASSERT.not_contains(sudo_env, 'PATH'))
def popen(args):
LOG.debug('popen: args=%s, context=%s', args, _CONTEXT)
# It does not seem like we can return a fake Popen object.
ASSERT.false(_get(_DRY_RUN))
return subprocess.Popen(_prepare_args(args), **_prepare_kwargs())
def run(args):
LOG.debug('run: args=%s, context=%s', args, _CONTEXT)
if _get(_DRY_RUN):
# It seems better to return a fake value than None.
return subprocess.CompletedProcess(args, 0, b'', b'')
return subprocess.run(
_prepare_args(args),
capture_output=_get(_CAPTURE_OUTPUT),
check=_get(_CHECK),
input=_get(_INPUT),
**_prepare_kwargs(),
)
def _prepare_args(args):
args = list(map(str, args))
if _get(_SUDO):
sudo_env = _get(_SUDO_ENV)
if sudo_env:
preserve_env_arg = ('--preserve-env=%s' % ','.join(sudo_env), )
else:
preserve_env_arg = ()
args[:0] = ['sudo', '--non-interactive', *preserve_env_arg]
prefix = _get(_PREFIX)
if prefix:
args[:0] = prefix
return args
def _prepare_kwargs():
kwargs = {
'cwd': _get(_CWD),
'env': _get(_ENV),
}
# Work around subprocess.run limitation that it checks presence of
# stdin, stdout, and stderr in kwargs, not whether their value is
# not None.
for key in (_STDIN, _STDOUT, _STDERR):
value = _get(key)
if value is not None:
kwargs[key] = value
return kwargs
| [
"clchiou@gmail.com"
] | clchiou@gmail.com |
e2ef3dc9a479b8b4e011179eff0820b73109b14a | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/course_wiki/tests/test_middleware.py | b4ce9c0f3de1609b2abfcc018e4c85d9fd6326bf | [
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] | permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 1,544 | py | """
Tests for wiki middleware.
"""
from django.test.client import Client
from wiki.models import URLPath
from common.djangoapps.student.tests.factories import InstructorFactory
from lms.djangoapps.course_wiki.views import get_or_create_root
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class TestWikiAccessMiddleware(ModuleStoreTestCase):
"""Tests for WikiAccessMiddleware."""
def setUp(self):
"""Test setup."""
super().setUp()
self.wiki = get_or_create_root()
self.course_math101 = CourseFactory.create(org='edx', number='math101', display_name='2014', metadata={'use_unique_wiki_id': 'false'}) # lint-amnesty, pylint: disable=line-too-long
self.course_math101_instructor = InstructorFactory(course_key=self.course_math101.id, username='instructor', password='secret') # lint-amnesty, pylint: disable=line-too-long
self.wiki_math101 = URLPath.create_article(self.wiki, 'math101', title='math101')
self.client = Client()
self.client.login(username='instructor', password='secret')
def test_url_tranform(self):
"""Test that the correct prefix ('/courses/<course_id>') is added to the urls in the wiki."""
response = self.client.get('/courses/edx/math101/2014/wiki/math101/')
self.assertContains(response, '/courses/edx/math101/2014/wiki/math101/_edit/')
self.assertContains(response, '/courses/edx/math101/2014/wiki/math101/_settings/')
| [
"rafael.luque@osoco.es"
] | rafael.luque@osoco.es |
4160757e1eb7885ffb9361bec4a8b1841ce5c5eb | f2502813aa34cb6262bb2780debb51bb080aecd4 | /toeplitz_decomp_gpu/run_real_new.py | 16ae80f0f1ba99c14b3424a16cb3240a716315f7 | [] | no_license | sufkes/scintillometry | 6fafc7601f12ea32b3dfa142ae5ef6beec7e3585 | a9624eecd307bc0ea5ce2e412feec6909bd762aa | refs/heads/master | 2021-01-22T03:49:27.370870 | 2018-02-23T21:37:06 | 2018-02-23T21:37:06 | 93,168,533 | 2 | 1 | null | 2017-06-02T13:32:59 | 2017-06-02T13:32:59 | null | UTF-8 | Python | false | false | 1,257 | py | import os,sys
from mpi4py import MPI
import numpy as np
from new_factorize_parallel import ToeplitzFactorizor
from time import time
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
if len(sys.argv) != 8 and len(sys.argv) != 9:
if rank==0:
print "Please pass in the following arguments: method offsetn offsetm n m p pad"
else:
method = sys.argv[1]
offsetn = int(sys.argv[2])
offsetm = int(sys.argv[3])
n = int(sys.argv[4])
m = int(sys.argv[5])
p = int(sys.argv[6])
pad = sys.argv[7] == "1" or sys.argv[7] == "True"
detailedSave = False
if len(sys.argv) == 9:
detailedSave = sys.argv[8] == "1" or sys.argv[8] == "True"
if not os.path.exists("processedData/"):
os.makedirs("processedData/")
if pad == 0:
folder = "gate0_numblock_{}_meff_{}_offsetn_{}_offsetm_{}".format(n, m, offsetn, offsetm)
c = ToeplitzFactorizor(folder, n, m, pad, detailedSave)
if pad == 1:
folder = "gate0_numblock_{}_meff_{}_offsetn_{}_offsetm_{}".format(n, m*2, offsetn, offsetm)
c = ToeplitzFactorizor(folder, n, m*2, pad, detailedSave)
for i in range(0, n*(1 + pad)//size):
c.addBlock(rank + i*size)
c.fact(method, p)
| [
"you@example.com"
] | you@example.com |
31a0238162c4142eafff1a78205a6c5ea1531adb | 2aeb619d07ba15ca95607238d41ad33b88cf51c7 | /src/courcelles/urban/dataimport/architects/mappers.py | 36823093a58a185857dbc706e1bf11d0f2409771 | [] | no_license | IMIO/courcelles.urban.dataimport | 65edd044b60cbc148e1345b701e609b4a6446828 | 0d36fbe8c61a5c8da15b05df5452d7970e645509 | refs/heads/master | 2020-05-18T16:43:57.841783 | 2016-01-04T13:44:38 | 2016-01-04T13:44:38 | 39,009,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | # -*- coding: utf-8 -*-
from imio.urban.dataimport.mapper import Mapper
from imio.urban.dataimport.factory import BaseFactory
from Products.CMFPlone.utils import normalizeString
# Factory
class ArchitectFactory(BaseFactory):
def getCreationPlace(self, factory_args):
return self.site.urban.architects
def getPortalType(self, container, **kwargs):
return 'Architect'
class IdMapper(Mapper):
def mapId(self, line):
name = '%s' % self.getData('nom')
name = name.replace(' ', '').replace('-', '')
contact_id = normalizeString(self.site.portal_urban.generateUniqueId(name))
return contact_id
class PhoneMapper(Mapper):
def mapPhone(self, line):
phone = self.getData('téléphone')
gsm = self.getData('gsm')
if (phone and phone != '-') and (gsm and gsm != '-'):
phones = '{phone}, {gsm}'.format(
phone=phone,
gsm=gsm,
)
return phones
elif phone and phone != '-':
return phone
elif gsm and gsm != '-':
return gsm
return ''
| [
"delcourt.simon@gmail.com"
] | delcourt.simon@gmail.com |
a3885d1f776f0b334e529ebd57270412a8f1d539 | 6e57bdc0a6cd18f9f546559875256c4570256c45 | /cts/apps/CameraITS/tests/scene1/test_linearity.py | 1f4aa142984e847efe3d0edd3bdeaca2436c1d99 | [] | no_license | dongdong331/test | 969d6e945f7f21a5819cd1d5f536d12c552e825c | 2ba7bcea4f9d9715cbb1c4e69271f7b185a0786e | refs/heads/master | 2023-03-07T06:56:55.210503 | 2020-12-07T04:15:33 | 2020-12-07T04:15:33 | 134,398,935 | 2 | 1 | null | 2022-11-21T07:53:41 | 2018-05-22T10:26:42 | null | UTF-8 | Python | false | false | 4,201 | py | # Copyright 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import its.image
import its.caps
import its.device
import its.objects
import its.target
import numpy
import math
from matplotlib import pylab
import os.path
import matplotlib
import matplotlib.pyplot
NAME = os.path.basename(__file__).split('.')[0]
RESIDUAL_THRESHOLD = 0.0003 # approximately each sample is off by 2/255
# The HAL3.2 spec requires that curves up to 64 control points in length
# must be supported.
L = 64
LM1 = float(L-1)
def main():
"""Test that device processing can be inverted to linear pixels.
Captures a sequence of shots with the device pointed at a uniform
target. Attempts to invert all the ISP processing to get back to
linear R,G,B pixel data.
"""
gamma_lut = numpy.array(
sum([[i/LM1, math.pow(i/LM1, 1/2.2)] for i in xrange(L)], []))
inv_gamma_lut = numpy.array(
sum([[i/LM1, math.pow(i/LM1, 2.2)] for i in xrange(L)], []))
with its.device.ItsSession() as cam:
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.compute_target_exposure(props) and
its.caps.per_frame_control(props))
debug = its.caps.debug_mode()
largest_yuv = its.objects.get_largest_yuv_format(props)
if debug:
fmt = largest_yuv
else:
match_ar = (largest_yuv['width'], largest_yuv['height'])
fmt = its.objects.get_smallest_yuv_format(props, match_ar=match_ar)
e,s = its.target.get_target_exposure_combos(cam)["midSensitivity"]
s /= 2
sens_range = props['android.sensor.info.sensitivityRange']
sensitivities = [s*1.0/3.0, s*2.0/3.0, s, s*4.0/3.0, s*5.0/3.0]
sensitivities = [s for s in sensitivities
if s > sens_range[0] and s < sens_range[1]]
req = its.objects.manual_capture_request(0, e)
req['android.blackLevel.lock'] = True
req['android.tonemap.mode'] = 0
req['android.tonemap.curve'] = {
'red': gamma_lut.tolist(),
'green': gamma_lut.tolist(),
'blue': gamma_lut.tolist()}
r_means = []
g_means = []
b_means = []
for sens in sensitivities:
req["android.sensor.sensitivity"] = sens
cap = cam.do_capture(req, fmt)
img = its.image.convert_capture_to_rgb_image(cap)
its.image.write_image(
img, '%s_sens=%04d.jpg' % (NAME, sens))
img = its.image.apply_lut_to_image(img, inv_gamma_lut[1::2] * LM1)
tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
rgb_means = its.image.compute_image_means(tile)
r_means.append(rgb_means[0])
g_means.append(rgb_means[1])
b_means.append(rgb_means[2])
pylab.title(NAME)
pylab.plot(sensitivities, r_means, '-ro')
pylab.plot(sensitivities, g_means, '-go')
pylab.plot(sensitivities, b_means, '-bo')
pylab.xlim([sens_range[0], sens_range[1]/2])
pylab.ylim([0, 1])
pylab.xlabel('sensitivity(ISO)')
pylab.ylabel('RGB avg [0, 1]')
matplotlib.pyplot.savefig('%s_plot_means.png' % (NAME))
# Check that each plot is actually linear.
for means in [r_means, g_means, b_means]:
line, residuals, _, _, _ = numpy.polyfit(range(len(sensitivities)),
means, 1, full=True)
print 'Line: m=%f, b=%f, resid=%f'%(line[0], line[1], residuals[0])
assert residuals[0] < RESIDUAL_THRESHOLD
if __name__ == '__main__':
main()
| [
"dongdong331@163.com"
] | dongdong331@163.com |
f2c806b3579aa78fdc0528554e68e4e95bfd6ba4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03745/s708529071.py | fda6321c5f6a0b6b2235ba3390bb482ce3d143d3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | from collections import deque
n = int(input())
a = list(map(int,input().split()))
d = deque(a)
tmp = []
cnt = 0
while d:
v = d.popleft()
if len(tmp)<=1:
pass
else:
if not (v >= tmp[-1] >= tmp[-2] >= tmp[0] or v <= tmp[-1] <= tmp[-2] <= tmp[0]):
tmp = []
cnt += 1
tmp.append(v)
# print(d,tmp,cnt)
if tmp:
cnt+=1
print(cnt) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
cf0b16781cb06a5c1d1a297c66310a9e41261b13 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part004569.py | 3176b40d2fc0c98546282a3dac165244dfbd02f6 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher82715(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.4.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.4.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher82715._instance is None:
CommutativeMatcher82715._instance = CommutativeMatcher82715()
return CommutativeMatcher82715._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 82714
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
af83d06559cbd367b6208cb44f53dde62ca2c08b | 212ccad4e9f19fdcdf7d6b15b16eb3724d18c363 | /bioprocs/scripts/cnvkit/pCNVkitSeg.py | a442568d6a62de0baec24d3fa06b2e9a0d88e1e1 | [
"MIT"
] | permissive | LeaveYeah/bioprocs | 997792add2150467f668f42bea57d195ec7db9ff | c5d2ddcc837f5baee00faf100e7e9bd84222cfbf | refs/heads/master | 2020-04-16T16:48:15.924979 | 2019-02-15T23:02:52 | 2019-02-15T23:02:52 | 165,750,151 | 0 | 0 | MIT | 2019-02-15T23:02:53 | 2019-01-14T23:14:24 | HTML | UTF-8 | Python | false | false | 521 | py | from pyppl import Box
from bioprocs.utils import shell
cnvkit = {{args.cnvkit | quote}}
infile = {{i.infile | quote}}
outfile = {{o.outfile | quote}}
nthread = {{args.nthread | repr}}
params = {{args.params}}
shell.TOOLS['cnvkit'] = cnvkit
envs = dict(
OPENBLAS_NUM_THREADS = 1,
OMP_NUM_THREADS = 1,
NUMEXPR_NUM_THREADS = 1,
MKL_NUM_THREADS = 1
)
ckshell = shell.Shell(subcmd = True, equal = ' ', envs = envs).cnvkit
params.o = outfile
params.p = nthread
ckshell.segment(infile, **params).run()
| [
"pwwang@pwwang.com"
] | pwwang@pwwang.com |
f20293425cb4e9ee0276d3820ee193b9e800b864 | 01fdd206c8c825b30870bdd3f6e75f0aa113b849 | /test/record/parser/test_response_whois_isoc_org_il_status_available.py | 77cdabf8416280afc5c848dd46cffbb481885417 | [
"MIT"
] | permissive | huyphan/pyyawhois | 0fbc5a7d64a53ae6e3393fdc1c7ff0d0ac5f22b5 | 77fb2f73a9c67989f1d41d98f37037406a69d136 | refs/heads/master | 2021-01-23T22:42:55.989651 | 2015-09-19T16:40:06 | 2015-09-19T16:40:06 | 23,335,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.isoc.org.il/status_available
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisIsocOrgIlStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.isoc.org.il/status_available.txt"
host = "whois.isoc.org.il"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'available')
def test_available(self):
eq_(self.record.available, True)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(self.record.nameservers, [])
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.created_on)
def test_updated_on(self):
eq_(self.record.updated_on, None)
def test_expires_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.expires_on)
| [
"dachuy@gmail.com"
] | dachuy@gmail.com |
a958e8c86174eda061fc353adafef21d96d696d5 | b98e9e5ec77b65bf307b87ea1129d3d778a915af | /python/15684_ladder.py | 6fc8f06fdc00ef951c1ae26bfe1f2500d03d3802 | [] | no_license | KimYeong-su/Baekjoon | 8ea5e5fab711d05c0f273e68a849750fdcdbae4b | 0e56b2cfdf67c0e6ffbbe3119e2ab944d418f919 | refs/heads/master | 2021-07-08T11:25:00.019106 | 2021-04-21T12:47:49 | 2021-04-21T12:47:49 | 239,902,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | import sys
input = sys.stdin.readline
N, M, H = map(int, input().rstrip('\n').split())
if M == 0:
print(0)
exit()
maps = [[False]*N for _ in range(H)]
for _ in range(M):
a, b = map(lambda x: int(x)-1, input().rstrip('\n').split())
maps[a][b] = True
answer = 4
def check():
for s in range(N):
tmp = s
for x in range(H):
if maps[x][tmp]:
tmp += 1
elif tmp > 0 and maps[x][tmp-1]:
tmp -= 1
if s != tmp:
return False
return True
def dfs(cnt, x, y):
global answer
if cnt >= answer:
return
if check():
if answer > cnt:
answer = cnt
return
for i in range(x,H):
k = y if i==x else 0
for j in range(k,N-1):
if not maps[i][j] and not maps[i][j+1]:
maps[i][j] = True
dfs(cnt+1, i, j+2)
maps[i][j] = False
dfs(0,0,0)
print(answer) if answer < 4 else print(-1) | [
"suwon0521@naver.com"
] | suwon0521@naver.com |
b5658157564b48b4a45e26c602cb8f7359e1d74e | 94120f2c22fb2ff44e47a6a545daa9ecbb95c3eb | /Analysis/HiggsTauTau/scripts/compareSystShapes.py | 63240e650ec30733eb71fa6567095179270af6ee | [] | no_license | DebabrataBhowmik/ICHiggsTauTau | 6d4ad3807209232f58d7310858c83d0ce316b495 | c9568974a523c41326df069c0efe1ce86ba4166a | refs/heads/master | 2020-03-31T10:20:44.757167 | 2018-10-08T15:46:16 | 2018-10-08T15:46:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,464 | py | import ROOT
import UserCode.ICHiggsTauTau.plotting as plotting
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--datacard', '-d', help= 'Data card')
parser.add_argument('--systematic', '-s', help= 'Name of systematic to make the comparrison plots for')
parser.add_argument('--output', '-o', help= 'Output directory')
args = parser.parse_args()
infile = ROOT.TFile(args.datacard)
chan='mt'
if 'htt_tt.' in args.datacard: chan='tt'
if 'htt_et.' in args.datacard: chan='et'
if 'htt_em.' in args.datacard: chan='em'
to_print=[]
for key in infile.GetListOfKeys():
if isinstance(infile.Get(key.GetName()),ROOT.TDirectory):
dirname=key.GetName()
directory = infile.Get(dirname)
for dirkey in directory.GetListOfKeys():
name = dirkey.GetName()
if 'norm' in name and 'jetFakes' in name: continue
if args.systematic in name and 'Up' in name:
histo_up = directory.Get(name)
if isinstance(histo_up,ROOT.TH1D) or isinstance(histo_up,ROOT.TH1F):
histo_nom = directory.Get(name.replace('_'+args.systematic+'Up',''))
histo_down = directory.Get(name.replace('Up','Down'))
if isinstance(histo_up,ROOT.TDirectory): continue
plot_name = '%s/systs_%s_%s' % (args.output, dirname, name.replace('Up',''))
plotting.CompareSysts([histo_nom,histo_up,histo_down],
plot_name,
dirname+"_"+name.replace('Up',''))
proc=name.replace('_'+args.systematic+'Up','')
noPrint=False
if '0jet' in dirname: binnum=1
elif 'boosted' in dirname and 'dijet' not in dirname: binnum=2
elif 'dijet_loosemjj_lowboost' in dirname: binnum=3
elif 'dijet_loosemjj_boosted' in dirname: binnum=4
elif 'dijet_tightmjj_lowboost' in dirname: binnum=5
elif 'dijet_tightmjj_boosted' in dirname: binnum=6
else: noPrint = True
if '_jhu_' in proc or '_ph_' in proc or '_total_bkg_' in proc or '_ZTT_' in proc or 'plus' in proc or 'minus' in proc: noPrint=True
if histo_nom.Integral() > 0 and not noPrint:
up = histo_up.Integral()/histo_nom.Integral()
down = histo_down.Integral()/histo_nom.Integral()
to_print.append('({\"%s\"}, {%i}, {\"%s\"}, %.3f, %.3f)' % (chan, binnum, proc, down, up))
directory.Close()
infile.Close()
for i in to_print: print i
| [
"daniel.winterbottom@cern.ch"
] | daniel.winterbottom@cern.ch |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.