blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c925b2637715a9f2f135c947d06035c815739be7
|
434b6556038ad326ffaa8584a8a91edf8ad5c037
|
/DP-1/MinStepsTo1_IterativeDP.py
|
a3f670d079c5c84019726bd05d7717a5a128e3d0
|
[] |
no_license
|
Pranav016/DS-Algo-in-Python
|
60702460ad6639dd3e8a1fdc3caf0821b8e0b4c2
|
5557e371ccdf801d78ba123ca83c0dd47b3bdb3b
|
refs/heads/master
| 2023-01-23T08:29:32.186861
| 2020-11-01T17:14:12
| 2020-11-01T17:14:12
| 284,651,382
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
import sys
def minStepsIterative(n):
dp=[-1 for i in range(n+1)]
dp[0]=dp[1]=0
for i in range(2,n+1):
ans1=ans2=ans3=sys.maxsize
if i%3==0:
ans1=dp[i//3]
if i%2==0:
ans2=dp[i//2]
ans3=dp[i-1]
dp[i]=1+min(ans1,ans2,ans3)
return dp[n]
# main
n=int(input())
print(minStepsIterative(n))
|
[
"pranavmendi@gmail.com"
] |
pranavmendi@gmail.com
|
7f6ada10257846c167743e85db32815a21168d88
|
d24cef73100a0c5d5c275fd0f92493f86d113c62
|
/SRC/common/brushstyle.spy
|
e0783b6eccb843c7e84f0f7042b33858e2133c9c
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
rlinder1/oof3d
|
813e2a8acfc89e67c3cf8fdb6af6b2b983b8b8ee
|
1fb6764d9d61126bd8ad4025a2ce7487225d736e
|
refs/heads/master
| 2021-01-23T00:40:34.642449
| 2016-09-15T20:51:19
| 2016-09-15T20:51:19
| 92,832,740
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,777
|
spy
|
# -*- python -*-
# $RCSfile: brushstyle.spy,v $
# $Revision: 1.2.18.1 $
# $Author: langer $
# $Date: 2014/09/27 22:33:47 $
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
from ooflib.common import cregisteredclass
from ooflib.common.IO import parameter
from ooflib.common.IO import xmlmenudump
cregisteredclass.registerCClass(BrushStylePtr)
BrushStylePtr.tip = "Brush styles for pixel selection."
BrushStylePtr.discussion = """<para>
Objects of the <classname>BrushStyle</classname> are used as the
<varname>style</varname> parameter in the <xref
linkend='MenuItem:OOF.Graphics_n.Toolbox.Pixel_Select.Brush'/> command
for selecting pixels.
</para>"""
circleReg = cregisteredclass.Registration(
"Circle",
BrushStylePtr,
CircleBrush,
ordering=0,
params=[parameter.FloatParameter('radius', 0,
tip='Radius of the brush in physical units.')],
tip="Brush with a circular profile.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/common/reg/circlebrush.xml')
)
squareReg = cregisteredclass.Registration(
"Square",
BrushStylePtr,
SquareBrush,
ordering=1,
params=[parameter.FloatParameter('size', 0,
tip='Half the side of the brush in physical units.')],
tip="Brush with a square profile.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/common/reg/squarebrush.xml')
)
|
[
"faical.congo@nist.gov"
] |
faical.congo@nist.gov
|
381da1385d47377eeaa12cedb3eee290fa920879
|
3517bef3bb174fef1f2f6a5edd221d23af2a4a99
|
/backend/emma_phillips_3684/urls.py
|
4c3f152a16e4c4be9cdae8fe722eacab23a51993
|
[] |
no_license
|
crowdbotics-apps/emma-phillips-3684
|
e8fe2c0c7e68abda4f199c4cc4396bb2268afffc
|
7ee256d6a167d5236800751a478023c48056b162
|
refs/heads/master
| 2020-05-25T23:38:41.448295
| 2019-05-22T12:55:07
| 2019-05-22T12:55:07
| 188,038,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
"""emma_phillips_3684 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
admin.site.site_header = 'Emma Phillips'
admin.site.site_title = 'Emma Phillips Admin Portal'
admin.site.index_title = 'Emma Phillips Admin'
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
55fc69c3daeea8d2a118a7e02188c932abb24f83
|
8d2a785ffc06ec46a546cdf50af41054a382f05a
|
/classes/day11/practice/数据库博库/db_con_insert.py
|
201679238f0035ffd314aad79db425ae24484c53
|
[] |
no_license
|
Pigcanflysohigh/Py27
|
4be0d9ad93f5d695c48fd89157952230ec4d111a
|
2f6568fce2a6f09c73cdc08342a8b05645c87736
|
refs/heads/master
| 2020-06-18T08:09:08.217036
| 2019-11-20T16:14:05
| 2019-11-20T16:14:05
| 196,225,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
import pymysql
conn = pymysql.connect(host='10.211.55.5',user='root',password='root',database='mlg')
cur = conn.cursor()
cur.execute("insert into t8 values('女','rap');")
conn.commit() # 涉及到修改/写入(insert/update/delete)数据库内容的,多需要执行提交操作才能生效
cur.close()
conn.close()
|
[
"729167925@qq.com"
] |
729167925@qq.com
|
3be2fbf38cc96ef463cecd4366ea19e030cca99a
|
3c8aaef535328f7c4d812cf086a637b27d891752
|
/interview/google/hard/LC327.py
|
6e8c0bbf06d684ef7b05a58684d8b05985f7d649
|
[] |
no_license
|
zhangshv123/superjump
|
9339cd7f5e75d8a94be60d44c752267cc38183d3
|
7de5f69e6e44ca4e74d75fed2af390b3d2cbd2b9
|
refs/heads/master
| 2020-03-20T20:36:34.378950
| 2019-03-08T04:37:22
| 2019-03-08T04:37:22
| 137,696,605
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,574
|
py
|
#!/usr/bin/python
"""
First compute the prefix sums: first[m] is the sum of the first m numbers.
Then the sum of any subarray nums[i:k] is simply first[k] - first[i].
So we just need to count those where first[k] - first[i] is in [lower,upper].
To find those pairs, I use mergesort with embedded counting. The pairs in the left half and the pairs in the right half get counted in the recursive calls. We just need to also count the pairs that use both halves.
For each left in first[lo:mid] I find all right in first[mid:hi] so that right - left lies in [lower, upper]. Because the halves are sorted, these fitting right values are a subarray first[i:j]. With increasing left we must also increase right, meaning must we leave out first[i] if it's too small and and we must include first[j] if it's small enough.
Besides the counting, I also need to actually merge the halves for the sorting. I let sorted do that, which uses Timsort and takes linear time to recognize and merge the already sorted halves.
"""
def countRangeSum(self, nums, lower, upper):
first = [0]
for num in nums:
first.append(first[-1] + num)
def sort(lo, hi):
mid = (lo + hi) / 2
if mid == lo:
return 0
count = sort(lo, mid) + sort(mid, hi)
i = j = mid
for left in first[lo:mid]:
while i < hi and first[i] - left < lower: i += 1
while j < hi and first[j] - left <= upper: j += 1
count += j - i
first[lo:hi] = sorted(first[lo:hi])
return count
return sort(0, len(first))
|
[
"sz1266@nyu.edu"
] |
sz1266@nyu.edu
|
76da49d71b796760d616591e77762c095f4a80b5
|
d2f893a95f74b59ec7f073a008d9502c22afb04a
|
/cwcnn/extend/cuda_functions/round_cuda.py
|
8066ff17b0f52d0d7e12a1c8cf7951624e09e703
|
[] |
no_license
|
liu-yangyang/CNN-based-Image-Compression-Guided-by-YOLOv2
|
bbf03ce26b51e1247b1655c6d8aa909530702339
|
850391525908ca751b832ed882eca3f5eccd819c
|
refs/heads/master
| 2020-03-19T03:07:26.152652
| 2018-05-27T08:23:09
| 2018-05-27T08:23:09
| 135,697,677
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,172
|
py
|
import torch as th
from torch.autograd import Variable, Function
#include_path = '/home/snk/Desktop/workspace/pytorch_implement/extend/'
include_path = "/home/zhangwenqiang/jobs/pytorch_implement/extend"
import sys
if include_path not in sys.path:
sys.path.append(include_path)
from round import round_forward_wrapper, round_backward_wrapper
class RoundCudaFunction(Function):
'''
Pytorch Function wrapper of cuda implementation of round layer
'''
def forward(self, x):
y = th.zeros_like(x)
round_forward_wrapper(x, y, x.numel())
return y
def backward(self, grad_y):
grad_x = th.zeros_like(grad_y)
round_backward_wrapper(grad_x, grad_y, grad_y.numel())
return grad_x
class RoundCuda(th.nn.Module):
def forward(self, x):
return RoundCudaFunction()(x)
def test():
inp_tensor = th.Tensor([[1,2],[-1,0]]).cuda()
inp = th.sigmoid(inp_tensor)
x = Variable(inp, requires_grad = True)
round_= RoundCuda()
y = round_(x)
print (x)
print (y)
y.backward(th.cuda.FloatTensor([[1,2],[3,4]]))
print (x.grad)
if __name__ == '__main__':
test()
|
[
"1243535683@qq.com"
] |
1243535683@qq.com
|
9acd73bb18291d9817ce151961855002e16b1075
|
fdca7a2818602fa40b0848a15f630afb68a2ec13
|
/page/search.py
|
163526ad1ac64c5ba863746b6dc9ed85152a1222
|
[] |
no_license
|
sisul1204/appium_zhuangshiqi
|
0bded9307c92f57749ad6eb514431b0348deaebc
|
c457d267fee86ee0f516e3cbab25afd514a7c7fc
|
refs/heads/main
| 2023-01-07T07:05:31.420987
| 2020-11-13T03:47:48
| 2020-11-13T03:47:48
| 312,163,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
# * coding:utf-8 *
# Author:sisul
#创建时间:2020/11/11 17:11
import yaml
from selenium.webdriver.common.by import By
from page.base_page import BasePage
class Search(BasePage):
def search(self, name):
self._params['name'] = name
self.steps('../page/search.yaml')
def add(self, name):
self._params['name'] = name
self.steps('../page/search.yaml')
def is_choose(self, name):
self._params['name'] = name
return self.steps('../page/search.yaml')
def reset(self, name):
self._params['name'] = name
self.steps('../page/search.yaml')
|
[
"lizhipeng1021@163.com"
] |
lizhipeng1021@163.com
|
d22d9284208ebd8f92edf3f7139fd34bf723d63a
|
6bf1b595a7f4d3cbf0995455869d438a7d0e0624
|
/lingvo/core/scatter_update.py
|
4be545fde721c2587c20f8f3dff3ccfb2a8f9048
|
[
"Apache-2.0"
] |
permissive
|
huaxz1986/lingvo
|
889abc82b1bab6f37ba861c41eb480b7e89362c0
|
b83984577610423e3b1c6b04ca248cd23f2842f7
|
refs/heads/master
| 2022-05-15T03:29:56.903688
| 2022-04-02T01:41:25
| 2022-04-02T01:41:25
| 173,536,461
| 1
| 0
|
Apache-2.0
| 2019-03-03T05:52:01
| 2019-03-03T05:52:01
| null |
UTF-8
|
Python
| false
| false
| 2,341
|
py
|
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for scatter updates."""
import contextlib
import lingvo.compat as tf
from lingvo.core import py_utils
from lingvo.core import thread_local_utils
_global_inplace_update_stack = thread_local_utils.ThreadLocalStack()
@contextlib.contextmanager
def SetInplaceUpdate(inplace_update):
_global_inplace_update_stack.stack.append(inplace_update)
try:
yield
finally:
_global_inplace_update_stack.stack.pop()
def UseInplaceUpdate():
if not _global_inplace_update_stack.stack:
# TODO(rpang): set the default value to False in a follow-up CL.
return True
return _global_inplace_update_stack.stack[-1]
def Update(x, i, v, *, inplace_update=None):
"""Performs scatter update: x[i] = v.
A drop-in replacement for inplace_ops.alias_inplace_update (
aka tf.InplaceUpdate).
Args:
x: the source tensor.
i: the index tensor. If None, do x = v. If a scalar, do x[i, ...] = v. If a
vector, do x[j, ...] = v[j, ...] for j in i.
v: the update value tensor.
inplace_update: whether to perform inplace updates. If None, follows the
current context set by SetInplaceUpdate.
Returns:
The updated tensor.
"""
if inplace_update is None:
inplace_update = UseInplaceUpdate()
if inplace_update:
return tf.InplaceUpdate(x, i, v)
if i is None:
return py_utils.HasShape(v, tf.shape(x))
i = tf.convert_to_tensor(i)
assert i.shape, i
assert i.shape.rank in (0, 1), i
if i.shape.rank == 0:
y = tf.concat([x[:i, ...], v[None, ...], x[i + 1:, ...]], axis=0)
y.set_shape(x.shape)
return y
return tf.tensor_scatter_nd_update(x, i[:, None], v)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
b832f61d96cc32d7408a37057ca8f6beeaa6d209
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/dalvik/dex/inject/InjectPayloadDexRange.pyi
|
6efab5017e40b6f8e42699d88edaa8c65a0cbbb8
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,994
|
pyi
|
from typing import List
import ghidra.app.plugin.processors.sleigh
import ghidra.program.model.lang
import ghidra.program.model.listing
import ghidra.program.model.pcode
import java.lang
class InjectPayloadDexRange(object, ghidra.program.model.lang.InjectPayload):
CALLFIXUP_TYPE: int = 1
CALLMECHANISM_TYPE: int = 3
CALLOTHERFIXUP_TYPE: int = 2
EXECUTABLEPCODE_TYPE: int = 4
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getInput(self) -> List[ghidra.program.model.lang.InjectPayload.InjectParameter]: ...
def getName(self) -> unicode: ...
def getOutput(self) -> List[ghidra.program.model.lang.InjectPayload.InjectParameter]: ...
def getParamShift(self) -> int: ...
def getPcode(self, __a0: ghidra.program.model.listing.Program, __a1: ghidra.program.model.lang.InjectContext) -> List[ghidra.program.model.pcode.PcodeOp]: ...
def getSource(self) -> unicode: ...
def getType(self) -> int: ...
def hashCode(self) -> int: ...
def inject(self, __a0: ghidra.program.model.lang.InjectContext, __a1: ghidra.app.plugin.processors.sleigh.PcodeEmit) -> None: ...
def isFallThru(self) -> bool: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def fallThru(self) -> bool: ...
@property
def input(self) -> List[ghidra.program.model.lang.InjectPayload.InjectParameter]: ...
@property
def name(self) -> unicode: ...
@property
def output(self) -> List[ghidra.program.model.lang.InjectPayload.InjectParameter]: ...
@property
def paramShift(self) -> int: ...
@property
def source(self) -> unicode: ...
@property
def type(self) -> int: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
741d55862503d5f145872c689ccfd3f4780a57c2
|
645b5211c50b1a07a5d576b96624b22055802dc4
|
/pvw-dependencies/pv-flow/flow/plugins/__init__.py
|
1b8e3c325a97cccaf1c8c5f4654e6b07c027e9f1
|
[
"Apache-2.0"
] |
permissive
|
dealenx/hpccloud-kemsu
|
7c3a33e5ce01560d6fc7abcb9524e4526b9f4848
|
42fc44b06385c6eb25a979477dcea53fe66cfbfa
|
refs/heads/master
| 2023-02-05T21:13:07.328928
| 2021-06-25T04:56:39
| 2021-06-25T04:56:39
| 252,550,259
| 3
| 0
|
Apache-2.0
| 2023-01-24T23:21:39
| 2020-04-02T19:41:31
|
Python
|
UTF-8
|
Python
| false
| false
| 626
|
py
|
import os
from paraview import simple
# -----------------------------------------------------------------------------
MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
PLUGINS = [
'parflow.py'
]
FULL_PATHS = [
'/Applications/ParaView-5.6.0-1626-g52acf2f741.app/Contents/Plugins/ParFlow.so',
]
# -----------------------------------------------------------------------------
# Load the plugins
# -----------------------------------------------------------------------------
for plugin in PLUGINS:
simple.LoadPlugin(os.path.join(MODULE_PATH, plugin))
for plugin in FULL_PATHS:
simple.LoadPlugin(plugin)
|
[
"sebastien.jourdain@kitware.com"
] |
sebastien.jourdain@kitware.com
|
6cd6eee44b489002c6e6e5258534b94e5f4f6c30
|
6be956588b6bfdb5004d812872ef23973de9e07c
|
/l_006_docker/ubuntu/load_all.py
|
e24265db010f75fe97e70320bfbd6f2c4639320b
|
[] |
no_license
|
Serg-sh/teleBots_aiogram
|
45c8ee60501cff9a6035dbbab820975aade897e8
|
4bc9d452b6b6098fb25d4c9704a025737d59d4c8
|
refs/heads/master
| 2023-06-10T14:28:10.696277
| 2021-07-05T14:30:46
| 2021-07-05T14:30:46
| 375,966,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
import asyncio
import logging
from aiogram import Bot, types
from aiogram import Dispatcher
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from config import TOKEN
from sql import create_pool, create_db
# from aiogram.contrib.fsm_storage.redis import RedisStorage2
logging.basicConfig(format=u'%(filename)s [LINE:%(lineno)d] #%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO)
loop = asyncio.get_event_loop()
# Поток нам не нужен, т.к. он и так создается в диспатчере.
# Set up storage (either in Redis or Memory)
storage = MemoryStorage()
# storage = RedisStorage2()
bot = Bot(token=TOKEN, parse_mode=types.ParseMode.HTML)
dp = Dispatcher(bot, storage=storage)
db = loop.run_until_complete(create_pool())
|
[
"serg.shp@gmail.com"
] |
serg.shp@gmail.com
|
f5efd08c910b830ef549e690e939ca4a01a2f950
|
9615178b79a69519883c092b20cfdd060b426a69
|
/sublemon/version.py
|
2e863c21379228f97e1ba1ac4cb053db461ad577
|
[
"MIT"
] |
permissive
|
emuhedo/sublemon
|
3635799567a8b18f863d582e7b23d6840069ce37
|
198da2ec96d4d50c7017f4ebfa8e69e5aa0681b0
|
refs/heads/master
| 2020-04-02T09:22:31.272188
| 2018-10-20T19:10:18
| 2018-10-20T19:10:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
"""Version info for the `sublemon` library."""
__version_info__ = (0, 0, 2)
__version__ = '.'.join(str(i) for i in __version_info__)
|
[
"welch18@vt.edu"
] |
welch18@vt.edu
|
8a70a022a1bff29b68d30aa56e8fbe8aadb30ed0
|
14f1af759b594b4fab570fd98fc8dceae668d62f
|
/file_IO_exercise/bonus_crashTest.py
|
b5c7ff5ef2a8d870dfe9eba6a717490f50abbd02
|
[] |
no_license
|
ziqingW/python-exercise-flex-Mar08
|
cb10bf8c6f376808ff5bfadc86066a7c31b48120
|
070be19fb63b1ec6312c477bb656c19339448d67
|
refs/heads/master
| 2021-04-03T01:45:53.533366
| 2018-03-09T16:51:43
| 2018-03-09T16:51:43
| 124,464,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
import io
file_handle = io.StringIO()
while True:
file_handle.write("B" * 1024 * 1024)
size_contents = len(file_handle.getvalue())
print("Characters count: {}".format(size_contents))
# crash happens at counting of 208666624
# MemoryError
# I use 1mb as one time write, so totally it reached to 200mb
# it's later than I thought, I never expect cloud9 has memory as much as 200mb
|
[
"redtaq@hotmail.com"
] |
redtaq@hotmail.com
|
2e18e65d3098282a56dc5d1a6a480e964b0032af
|
176bda9771b0ec07a3206112eb4dbd34f5dc293a
|
/seznamy/seznamy/06_zmena_polozky.py
|
2f5345e2776005aa4f10f4fe61bfd83a219421a2
|
[] |
no_license
|
HelenaJanda/pyladies-7
|
8b4254f85295fb5695c60b1b5d8f70e29d1a999f
|
00613e2ff1bea2b8b8f60d3e4ce7a83345f2300d
|
refs/heads/master
| 2022-12-24T03:12:35.338933
| 2020-10-14T15:32:46
| 2020-10-14T15:32:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
# Zkusme zmenit pismeno v retezci
retezec = "ahoj"
retezec[0] = "A"
print(retezec)
# A nyni v seznamu
cisla = [1, 0, 2, 3, -6, 8, 13]
cisla[0] = 42
print(cisla)
|
[
"vojtech.ruz@gmail.com"
] |
vojtech.ruz@gmail.com
|
676193468f407b65515fcd72b955175e02eb7f4c
|
afc8d5a9b1c2dd476ea59a7211b455732806fdfd
|
/Configurations/ggH_SF/Full2017_v6/cuts_loose.py
|
5b5523ca9223e6190987132c5c63712a4cce2ab3
|
[] |
no_license
|
latinos/PlotsConfigurations
|
6d88a5ad828dde4a7f45c68765081ed182fcda21
|
02417839021e2112e740607b0fb78e09b58c930f
|
refs/heads/master
| 2023-08-18T20:39:31.954943
| 2023-08-18T09:23:34
| 2023-08-18T09:23:34
| 39,819,875
| 10
| 63
| null | 2023-08-10T14:08:04
| 2015-07-28T07:36:50
|
Python
|
UTF-8
|
Python
| false
| false
| 5,984
|
py
|
supercut = ' mll>12 \
&& Lepton_pt[0]>25 \
&& Lepton_pt[1]>10 \
&& (abs(Lepton_pdgId[1])==13 || Lepton_pt[1]>13) \
&& (nLepton>=2 && Alt$(Lepton_pt[2],0)<10) \
&& abs(Lepton_eta[0])<2.5 && abs(Lepton_eta[1])<2.5 \
&& ptll>30 \
&& PuppiMET_pt > 20 \
'
# Some cuts
dymva0jet = 'dymva_alt_dnn_0j > 0.8 && dymva_alt_dnn_0j < 0.95'
dymva1jet = 'dymva_alt_dnn_1j > 0.8 && dymva_alt_dnn_1j < 0.95'
dymva2jet = 'dymva_alt_dnn_2j > 0.8 && dymva_alt_dnn_2j < 0.95'
dymvaVBF = 'dymva_alt_dnn_VBF > 0.8 && dymva_alt_dnn_VBF < 0.95'
dymvaVH = 'dymva_alt_dnn_VH > 0.8 && dymva_alt_dnn_VH < 0.95'
# Higgs Signal Regions: ee/uu * 0/1/2 jet
cuts['hww2l2v_13TeV'] = {
'expr': 'sr && (Lepton_pdgId[0]==-Lepton_pdgId[1])' ,
'categories' : {
'0j_ee' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgs0jet && '+dymva0jet,
'0j_mm' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgs0jet && '+dymva0jet,
'1j_ee' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgs1jet && '+dymva1jet,
'1j_mm' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgs1jet && '+dymva1jet,
'2j_ee' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgs2jet && '+dymva2jet,
'2j_mm' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgs2jet && '+dymva2jet,
'2j_vh_ee' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgsvh && '+dymvaVH,
'2j_vh_mm' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgsvh && '+dymvaVH,
'2j_vbf_ee' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgsvbf && '+dymvaVBF,
'2j_vbf_mm' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgsvbf && '+dymvaVBF,
}
}
## DY Background IN with DYMVA>0.9X : Split ee/mm , No H cut !
cuts['hww2l2v_13TeV_DYin'] = {
'expr' : 'Zpeak && bVeto',
'categories' : {
'0j_ee' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && '+dymva0jet,
'0j_mm' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && '+dymva0jet,
'0j_df' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*13) && '+dymva0jet,
'1j_ee' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && '+dymva1jet,
'1j_mm' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && '+dymva1jet,
'1j_df' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*13) && '+dymva1jet,
'2j_ee' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && '+dymva2jet,
'2j_mm' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && '+dymva2jet,
'2j_df' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*13) && '+dymva2jet,
'2j_vh_ee' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && '+dymvaVH,
'2j_vh_mm' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && '+dymvaVH,
'2j_vh_df' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*13) && '+dymvaVH,
'2j_vbf_ee' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && '+dymvaVBF,
'2j_vbf_mm' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && '+dymvaVBF,
'2j_vbf_df' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*13) && '+dymvaVBF,
}
}
# Numerator for DY acceptance in Signal region
cuts['hww2l2v_13TeV_HAccNum'] = {
'expr': 'sr && (Lepton_pdgId[0]==-Lepton_pdgId[1])' ,
'categories' : {
'0j_ee' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgs0jet && dymva_alt_dnn_0j > 0.8',
'0j_mm' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgs0jet && dymva_alt_dnn_0j > 0.8',
'1j_ee' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgs1jet && dymva_alt_dnn_1j > 0.8',
'1j_mm' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgs1jet && dymva_alt_dnn_1j > 0.8',
'2j_ee' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgs2jet && dymva_alt_dnn_2j > 0.8',
'2j_mm' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgs2jet && dymva_alt_dnn_2j > 0.8',
'2j_vh_ee' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgsvh && dymva_alt_dnn_VH > 0.8',
'2j_vh_mm' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgsvh && dymva_alt_dnn_VH > 0.8',
'2j_vbf_ee' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && Higgsvbf && dymva_alt_dnn_VBF > 0.8',
'2j_vbf_mm' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && Higgsvbf && dymva_alt_dnn_VBF > 0.8',
}
}
## Acc Denominator
cuts['hww2l2v_13TeV_AccDen'] = {
'expr' : 'sr * (Lepton_pdgId[0]==-Lepton_pdgId[1])',
'categories' : {
'0j_ee' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && dymva_alt_dnn_0j > 0.8',
'0j_mm' : 'zeroJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && dymva_alt_dnn_0j > 0.8',
'1j_ee' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && dymva_alt_dnn_1j > 0.8',
'1j_mm' : ' oneJet && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && dymva_alt_dnn_1j > 0.8',
'2j_ee' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && dymva_alt_dnn_2j > 0.8',
'2j_mm' : ' 2jggH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && dymva_alt_dnn_2j > 0.8',
'2j_vh_ee' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && dymva_alt_dnn_VH > 0.8',
'2j_vh_mm' : ' 2jVH && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && dymva_alt_dnn_VH > 0.8',
'2j_vbf_ee' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) && dymva_alt_dnn_VBF > 0.8',
'2j_vbf_mm' : ' 2jVBF && (Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) && dymva_alt_dnn_VBF > 0.8',
}
}
|
[
"nicolo.trevisani@cern.ch"
] |
nicolo.trevisani@cern.ch
|
81eaca22372f4565c7fec498ad3d996d96707f81
|
2ecfe0e10d10513917e4f2770e0a56075404c5d8
|
/oldnumba/exttypes/jitclass.py
|
2e89053cda7b823f46c4d499735c02ce96b0d72c
|
[
"BSD-2-Clause"
] |
permissive
|
laserson/numba
|
84ab7615ea0177b496a63e2a86319f0b12992cd2
|
35546517b27764a9120f6dfcd82eba7f4dd858cb
|
refs/heads/master
| 2020-05-20T23:13:23.011971
| 2014-12-08T20:16:20
| 2014-12-08T20:16:20
| 16,754,385
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,662
|
py
|
"""
Compiling @jit extension classes works as follows:
* Create an extension Numba type holding a symtab
* Capture attribute types in the symtab ...
* ... from the class attributes:
@jit
class Foo(object):
attr = double
* ... from __init__
@jit
class Foo(object):
def __init__(self, attr):
self.attr = double(attr)
* Type infer all methods
* Compile all extension methods
* Process signatures such as @void(double)
* Infer native attributes through type inference on __init__
* Path the extension type with a native attributes struct
* Infer types for all other methods
* Update the ext_type with a vtab type
* Compile all methods
* Create descriptors that wrap the native attributes
* Create an extension type:
{
PyObject_HEAD
...
virtual function table (func **)
native attributes
}
The virtual function table (vtab) is a ctypes structure set as
attribute of the extension types. Objects have a direct pointer
for efficiency.
"""
from numba import typesystem
from numba.exttypes import virtual
from numba.exttypes import signatures
from numba.exttypes import validators
from numba.exttypes import compileclass
from numba.exttypes import ordering
from numba.exttypes import types as etypes
#------------------------------------------------------------------------
# Jit Extension Class Compiler
#------------------------------------------------------------------------
class JitExtensionCompiler(compileclass.ExtensionCompiler):
"""
Compile @jit extension classes.
"""
method_validators = validators.jit_validators
exttype_validators = validators.jit_type_validators
#------------------------------------------------------------------------
# Build Attributes Struct
#------------------------------------------------------------------------
class JitAttributeBuilder(compileclass.AttributeBuilder):
def finalize(self, ext_type):
ext_type.attribute_table.create_attribute_ordering(ordering.extending)
def create_descr(self, attr_name):
"""
Create a descriptor that accesses the attribute on the ctypes struct.
This is set by the extension type constructor __new__.
"""
def _get(self):
return getattr(self._numba_attrs, attr_name)
def _set(self, value):
return setattr(self._numba_attrs, attr_name, value)
return property(_get, _set)
#------------------------------------------------------------------------
# Build Extension Type
#------------------------------------------------------------------------
def create_extension(env, py_class, flags):
"""
Compile an extension class given the NumbaEnvironment and the Python
class that contains the functions that are to be compiled.
"""
flags.pop('llvm_module', None)
# ext_type = etypes.jit_exttype(py_class)
ext_type = typesystem.jit_exttype(py_class)
extension_compiler = JitExtensionCompiler(
env, py_class, dict(vars(py_class)), ext_type, flags,
signatures.JitMethodMaker(),
compileclass.AttributesInheriter(),
compileclass.Filterer(),
JitAttributeBuilder(),
virtual.StaticVTabBuilder(),
compileclass.MethodWrapperBuilder())
extension_compiler.init()
extension_compiler.infer()
extension_compiler.finalize_tables()
extension_compiler.validate()
extension_type = extension_compiler.compile()
return extension_type
|
[
"markflorisson88@gmail.com"
] |
markflorisson88@gmail.com
|
e54bf79baf0acedc05e53d70a7ae27e37fdab96a
|
59254f1c203bd7ebd3a5d85d5ec31959c1e90182
|
/rdis/formalisms/RDIS/primitive2connection.py
|
a7f32d38fdb66e9533143417be4bde7ef91ff1b1
|
[] |
no_license
|
monicadelaine/preop_create
|
c9c687012a23d99d200d4396237ba69862a285fc
|
34dbe0bb8d96d6adcb2c79ac33474007044b65dd
|
refs/heads/master
| 2020-04-11T04:23:20.150444
| 2013-02-22T22:02:51
| 2013-02-22T22:02:51
| 68,144,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,255
|
py
|
"""
__primitive2connection.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: pkilgo
Modified: Sun Apr 15 13:18:31 2012
______________________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from graph_primitive2connection import *
class primitive2connection(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.graphClass_ = graph_primitive2connection
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.generatedAttributes = { }
self.realOrder = []
self.directEditing = []
def clone(self):
cloneObject = primitive2connection( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <--- Remove this if you want to use QOCA
# Get the high level constraint helper and solver
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
# Constraint only makes sense if there exists 2 objects connected to this link
if(not (self.in_connections_ and self.out_connections_)): return
# Get the graphical objects (subclass of graphEntity/graphLink)
graphicalObjectLink = self.graphObject_
graphicalObjectSource = self.in_connections_[0].graphObject_
graphicalObjectTarget = self.out_connections_[0].graphObject_
objTuple = (graphicalObjectSource, graphicalObjectTarget, graphicalObjectLink)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.LeftExactDistance(objTuple, 20)
oc.resolve() # Resolve immediately after creating entity & constraint
|
[
"mahodnett@crimson.ua.edu"
] |
mahodnett@crimson.ua.edu
|
39643dc2ed9ecf04dec6ff9dde56590ba88e04a0
|
0bde5f7f09aa537ed1f4828d4e5ebee66475918f
|
/h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_relevel.py
|
cd2911c2ef668d093498efb7a014986253e94b6f
|
[
"Apache-2.0"
] |
permissive
|
Winfredemalx54/h2o-3
|
d69f1c07e1f5d2540cb0ce5e6073415fa0780d32
|
dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7
|
refs/heads/master
| 2022-12-14T08:59:04.109986
| 2020-09-23T08:36:59
| 2020-09-23T08:36:59
| 297,947,978
| 2
| 0
|
Apache-2.0
| 2020-09-23T11:28:54
| 2020-09-23T11:28:54
| null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import numpy as np
def h2o_H2OFrame_relevel():
"""
Python API test: h2o.frame.H2OFrame.relevel(y)
"""
python_lists = np.random.randint(-5,5, (100, 2))
h2oframe = h2o.H2OFrame(python_obj=python_lists)
newFrame = h2oframe.asfactor()
allLevels = newFrame.levels()
lastLevels = len(allLevels[0])-1
newZeroLevel = allLevels[0][lastLevels]
newFrame[0] = newFrame[0].relevel(newZeroLevel) # set last level as 0
newLevels = newFrame.levels()
assert allLevels != newLevels, "h2o.H2OFrame.relevel() command is not working." # should not equal
assert newLevels[0][0]==allLevels[0][lastLevels], "h2o.H2OFrame.relevel() command is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2o_H2OFrame_relevel())
else:
h2o_H2OFrame_relevel()
|
[
"noreply@github.com"
] |
Winfredemalx54.noreply@github.com
|
20441db34cdbb7bf136e9eb5365a49f9a7aa8058
|
65fce73a1e6a36718238cdef09a17493b19532a0
|
/16/test/test_document_frequency_response_entity.py
|
c5787167fde15302326b94889bfb8d06f4cdb9e8
|
[
"Apache-2.0"
] |
permissive
|
apitore/apitore-sdk-python
|
eb419589609efb86bd279cd1733c2a03cdc03680
|
c0814c5635ddd09e9a20fcb155b62122bee41d33
|
refs/heads/master
| 2020-03-21T10:06:34.557781
| 2018-06-23T21:26:27
| 2018-06-23T21:26:27
| 138,434,217
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,063
|
py
|
# coding: utf-8
"""
Document frequency APIs
Document frequency of Wikipedia.<BR />[Endpoint] https://api.apitore.com/api/16 # noqa: E501
OpenAPI spec version: 0.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.document_frequency_response_entity import DocumentFrequencyResponseEntity # noqa: E501
from swagger_client.rest import ApiException
class TestDocumentFrequencyResponseEntity(unittest.TestCase):
"""DocumentFrequencyResponseEntity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDocumentFrequencyResponseEntity(self):
"""Test DocumentFrequencyResponseEntity"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.document_frequency_response_entity.DocumentFrequencyResponseEntity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"keigoht@gmail.com"
] |
keigoht@gmail.com
|
cd5c3d30b568aaa4322afa96b7a059e9f2d2c049
|
68d9fffda9c1ee0f4819371067adfd4985332319
|
/python/108.将有序数组转换为二叉搜索树.py
|
987eb6ba870034bcba75c49281ce7e0aac793c31
|
[
"MIT"
] |
permissive
|
Wanger-SJTU/leetcode-solutions
|
ade9486cef05ede6fa44cbbb5d726037518fac15
|
eb7f2fb142b8a30d987c5ac8002a96ead0aa56f4
|
refs/heads/master
| 2023-04-11T19:56:13.561234
| 2021-05-10T12:00:28
| 2021-05-10T12:00:28
| 129,606,869
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sortedArrayToBST(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
return self.buildBST(nums)
def buildBST(self, nums):
if not nums:
return None
mid = len(nums)//2
node = TreeNode(nums[mid])
node.left = self.buildBST(nums[:mid])
node.right = self.buildBST(nums[mid+1:])
return node
|
[
"howiechen@sjtu.edu.cn"
] |
howiechen@sjtu.edu.cn
|
00b5fbd1b10e3a326da7fa05e534df9b62574feb
|
5fd32c6c52fda2739f3423be9d11fb932611bea5
|
/Python/bosch/bbs_mop_crawl.py
|
c13f3b74571579a28adc094bd5c4d05c085f4bd4
|
[] |
no_license
|
luogangyi/Spider
|
446765886c328602cd4224bfe6b7b5e51633a7d7
|
e703f69399854331a141a2f2270d4e9b9e2c63e3
|
refs/heads/master
| 2021-01-18T18:13:03.048432
| 2014-06-10T17:21:46
| 2014-06-10T17:21:46
| 11,663,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
#!/usr/bin/python
#-*-coding:utf-8-*-
from config import *
from bbs_utils import *
from utils import *
from baidu import Baidu
from goole_search import Google
MOP_INFO_SOURCE_ID = 20
def main():
try:
obj = Baidu(id,'dzh.mop.com','bbs')
obj.main()
except Exception, e:
store_error(id)
bbs_logger.exception(e)
try:
obj = Google(id,'dzh.mop.com','bbs')
obj.main()
except Exception, e:
store_error(id)
bbs_logger.exception(e)
if __name__ == '__main__':
main()
|
[
"lgy181@qq.com"
] |
lgy181@qq.com
|
38bbe3bd03f55ab1849071c514c13807a7883541
|
488de2f3cadeb866ccbe4064411f7db5d3dc3a57
|
/ttt.py
|
252be389ef5f8e434dbf58c94879cffea666b1a6
|
[] |
no_license
|
yp000925/Holo_synthetic
|
8a23d587462e79efe5ba27f8c0a6ad8d9fc028b9
|
711b449bd6295a95d2a2a6e73fcea47c8058dad4
|
refs/heads/master
| 2023-08-30T04:39:16.820671
| 2021-10-31T04:14:08
| 2021-10-31T04:14:08
| 317,855,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,162
|
py
|
from __future__ import print_function, division
import sys
import os
import torch
import numpy as np
import random
import csv
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.data.sampler import Sampler
import pandas as pd
from PIL import Image
class myDataloader(Dataset):
"""
Dataset for 3D particle detection using capsule net
"""
def __init__(self, root_dir, file_name = 'train_data.csv', transform = None, size=1024):
'''
:param holo_dir: directory for holograms
:param depthmap_dir: directory for depthmap
:param xycentre_dir: directory for xycentre
:param file_name: file_name
:param transform:
'''
# self.holo_dir = 'holo_dir'
# self.depthmap_dir = 'depthmap_dir'
# self.xycentre_dir = xycentre_dir
self.root_dir = root_dir
self.file_name = file_name
self.transform = transform
self.file = pd.read_csv(os.path.join(root_dir,file_name))
self.N =size
def __getitem__(self, idx):
data = self.file.iloc[idx]
holo_path = os.path.join(self.root_dir, 'hologram', data['hologram'])
param_path = os.path.join(self.root_dir, 'param', data['param'])
img = self.read_img(holo_path)
param = self.load_param(param_path)
size_projection,xycentre,xy_mask = self.get_maps(param)
return img,size_projection,xycentre,xy_mask
def __len__(self):
return len(self.file)
def get_maps(self,param):
size_projection, xy_mask = self.get_xy_projection(param)
xycentre = self.get_xycentre(param)
return (size_projection,xycentre,xy_mask)
def get_xy_projection(self,param):
"""
:param param: px,py,pz,psize stored in dataframe
:return: map: the xy_projection map, the pixel value is the corresponding depth, range from 0-1
mask: the indication map for overlapping 0: the overlap exists -> ignored when calculate the loss
"""
arr = np.zeros((256,self.N,self.N))
particle_field = np.zeros(arr.shape) # one stands for the exist of particle
for _,particle in param.iterrows():
px,py,pz,psize = particle.x,particle.y,particle.z,particle.size
Y, X = np.mgrid[:self.N, :self.N]
Y = Y - py
X = X - px
dist_sq = Y ** 2 + X ** 2
z_slice = np.zeros((self.N,self.N))
particle_field_slice = np.zeros((self.N,self.N))
z_slice[dist_sq <= psize ** 2] = pz
particle_field_slice[dist_sq <= psize ** 2] = 1
arr[pz,:,:] += z_slice # 可能某个depth上面有多个particles
particle_field[pz,:,:] += particle_field_slice
map = arr.sum(axis=0)/255.0
# check whether there are overlapping
particle_field_proj = particle_field.sum(axis=0)
mask_map = np.ones((self.N,self.N))
mask_map[particle_field_proj>1] = 0 #在后面计算loss的时候,只计算没有overlap的pixel,即mask里面为0的情况忽略
return map, mask_map
def get_xycentre(self,param):
arr = np.zeros((self.N, self.N))
idx_x = np.array(param['x'].values)
idx_y = np.array(param['y'].values)
arr[(idx_y,idx_x)] = 1.0
return arr
def load_param(self,param_path):
param = pd.read_csv(param_path)
x = param['x'].values
y = param['y'].values
z = param['z'].values
size = param['size']
frame = 10 * 1e-3
N=1024
xyres = frame/N
px = (x / frame * N + N / 2).astype(np.int)
py = (N / 2 + y / frame * N).astype(np.int)
pz = ((z - 1 * 1e-2)/ (3 * 1e-2 - 1 * 1e-2)*255).astype(np.int)
psize = (size/xyres).astype(np.int)
param_pixel = pd.DataFrame()
param_pixel['x'] = px
param_pixel['y'] = py
param_pixel['z'] = pz
param_pixel['size'] = psize
return param_pixel
def read_img(self,img_name):
img = Image.open(img_name)
if img.mode != 'RGB':
img = img.convert('RGB')
img = np.array(img).astype(np.float32)
return img/255.0
if __name__ == "__main__":
root_dir ='/Users/zhangyunping/PycharmProjects/Holo_synthetic/data_holo'
file_path = 'check.csv'
dataset = myDataloader(root_dir,file_path)
# img = Image.open("/Users/zhangyunping/PycharmProjects/Holo_synthetic/data_holo/hologram/0.jpg")
# param = dataloader.load_param(root_dir + '/param/0.csv')
# img = dataloader.read_img(root_dir + '/hologram/0.jpg')
# size_projection, xycentre, xy_mask = dataloader.get_maps(param)
# size_p = Image.fromarray(size_projection*255.0)
# size_p.show()
# xyc = Image.fromarray(xycentre*255.0)
# xyc.show()
# xy_m = Image.fromarray(xy_mask*255.0)
# xy_m.show()
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False,
num_workers=1)
for data in dataloader:
img, size_projection, xycentre, xy_mask = data
break
|
[
"yp000925@outlook.com"
] |
yp000925@outlook.com
|
82908ae8ac24e79217bf0b66161e59606ee3b4f4
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/dataexchange_read_2/revision_get.py
|
aa8e9858a1a6eee389251c26211af7da94e53e10
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import execute_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dataexchange/get-revision.html
if __name__ == '__main__':
"""
create-revision : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dataexchange/create-revision.html
delete-revision : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dataexchange/delete-revision.html
update-revision : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dataexchange/update-revision.html
"""
parameter_display_string = """
# data-set-id : The unique identifier for a data set.
# revision-id : The unique identifier for a revision.
"""
execute_two_parameter("dataexchange", "get-revision", "data-set-id", "revision-id", parameter_display_string)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
ed2dee8a9a297a14b1e6a0827a7ecca5e8a197c7
|
f3553f36a248d5e2a30713af68dd714df90953d7
|
/kuaishou/1.py
|
51178e065731b5fe3e9606a854b3219244ac41fe
|
[] |
no_license
|
Mrzhouqifei/offfer
|
8a699653850cf6cc91ed5a622ad166fd61b8e294
|
4c73e7a591e79348471e00272dcb8e1b5cc6d7cb
|
refs/heads/master
| 2023-04-09T05:58:49.858037
| 2020-12-30T06:13:52
| 2020-12-30T06:13:52
| 298,285,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
s = str(input().split())
match, left, right = 0, 0 , 0
stack = []
for x in s:
if x == '(' or x == ')':
stack.append(x)
while len(stack) > 0:
t = stack.pop()
if t == '(':
if right > 0:
match += 1
right -= 1
else:
left += 1
elif t == ')':
right += 1
print(match, end=' ')
print(left, end=' ')
print(right)
|
[
"18401620071@163.com"
] |
18401620071@163.com
|
c2b218be5ab2b6c61f063656e3d0cc3fad868684
|
0fd49b4779351c68bbe51ee978939f39c8e57d7c
|
/400-1000/412-Fizz Buzz.py
|
d10f822657cd03fff2f4df88968aae90c1ba0e31
|
[] |
no_license
|
jia0713/leetcode
|
8f632b96d0bc900cf4357ab1b8affd6068964dec
|
13565941f16c74d32124020285ce887a4cb31b27
|
refs/heads/master
| 2023-06-17T20:41:47.185832
| 2021-07-16T09:42:39
| 2021-07-16T09:42:39
| 247,866,418
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
res = [0] * (n + 1)
for i in range(n + 1):
res[i] = str(i)
for i in range(n // 3 + 1):
res[3 * i] = "Fizz"
for i in range(n // 5 + 1):
if res[5 * i] == "Fizz":
res[5 * i] = "FizzBuzz"
else:
res[5 * i] = "Buzz"
res.pop(0)
return res
|
[
"pkuljn@gmail.com"
] |
pkuljn@gmail.com
|
e719ea9ed023608f7635c6fd8bf85b0b352cde9c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03797/s857928689.py
|
5a492d61ed9095cbcb8fca6a600a3c13717f356e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
N, M = map(int, input().split())
cnt = 0
if N<=M//2:
cnt = N
N,M =0,M-2*N
cnt += M//4
print(cnt)
else:
cnt = M//2
print(cnt)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3f800f7039c5fc6489b128bf37624959ce17273a
|
8fcc27160f8700be46296568260fa0017a0b3004
|
/client/eve/client/script/ui/shared/planet/pinContainers/LaunchpadContainer.py
|
614755276a054e01a0a618a178a4f59d06d3a490
|
[] |
no_license
|
connoryang/dec-eve-serenity
|
5d867f4eedfa896a4ef60f92556356cafd632c96
|
b670aec7c8b4514fc47cd52e186d7ccf3aabb69e
|
refs/heads/master
| 2021-01-22T06:33:16.303760
| 2016-03-16T15:15:32
| 2016-03-16T15:15:32
| 56,389,750
| 1
| 0
| null | 2016-04-16T15:05:24
| 2016-04-16T15:05:24
| null |
UTF-8
|
Python
| false
| false
| 2,664
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\shared\planet\pinContainers\LaunchpadContainer.py
import carbonui.const as uiconst
from eve.client.script.ui.control.eveLabel import Label
import uiprimitives
import util
import localization
from .BasePinContainer import BasePinContainer
from .StorageFacilityContainer import StorageFacilityContainer
from .. import planetCommon
class LaunchpadContainer(StorageFacilityContainer):
__guid__ = 'planet.ui.LaunchpadContainer'
default_name = 'LaunchpadContainer'
def ApplyAttributes(self, attributes):
BasePinContainer.ApplyAttributes(self, attributes)
def _GetActionButtons(self):
btns = [util.KeyVal(id=planetCommon.PANEL_LAUNCH, panelCallback=self.PanelLaunch), util.KeyVal(id=planetCommon.PANEL_STORAGE, panelCallback=self.PanelShowStorage)]
btns.extend(BasePinContainer._GetActionButtons(self))
return btns
def PanelLaunch(self):
bp = sm.GetService('michelle').GetBallpark()
text = None
if bp is not None and not self.pin.IsInEditMode():
customsOfficeIDs = sm.GetService('planetInfo').GetOrbitalsForPlanet(sm.GetService('planetUI').planetID, const.groupPlanetaryCustomsOffices)
if len(customsOfficeIDs) > 0:
try:
customsOfficeID = None
for ID in customsOfficeIDs:
customsOfficeID = ID
break
sm.GetService('planetUI').OpenPlanetCustomsOfficeImportWindow(customsOfficeID, self.pin.id)
self.CloseByUser()
return
except UserError as e:
if e.msg == 'ShipCloaked':
text = localization.GetByLabel('UI/PI/Common/CannotAccessLaunchpadWhileCloaked')
else:
message = cfg.GetMessage(e.msg)
text = message.text
if text is None:
if self.pin.IsInEditMode():
text = localization.GetByLabel('UI/PI/Common/CustomsOfficeNotBuilt')
else:
solarSystemID = sm.GetService('planetUI').GetCurrentPlanet().solarSystemID
if solarSystemID == session.locationid:
text = localization.GetByLabel('UI/PI/Common/CannotAccessLaunchpadNotThere')
else:
text = localization.GetByLabel('UI/PI/Common/CannotAccessLaunchpadLocation')
return Label(parent=self.actionCont, text=text, align=uiconst.TOTOP)
|
[
"masaho.shiro@gmail.com"
] |
masaho.shiro@gmail.com
|
4e16ccc77fd56253143c198ecaa008a328bcd0b8
|
f0fa96d39a66c3ddaae4266442a13ec3feb7a462
|
/dynaminc_programing/perfectSquare.py
|
7b3a7b89619c22b047ff08c46b1d7e59fa335c19
|
[] |
no_license
|
ashishgupta2014/problem_solving_practices
|
14d587e98d9996a95efe822335ca4baccb39b1a1
|
bc4f4b07e1e33273010e34428e0c31d2d6656c14
|
refs/heads/master
| 2023-04-26T03:47:40.766508
| 2021-06-07T04:55:52
| 2021-06-07T04:55:52
| 298,063,915
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
import math
def perfectSquare(n):
if (n < 3): return n
square_nums = [i ** 2 for i in range(0, int(math.sqrt(n)) + 1)]
dp = [float('inf')] * (n + 1)
dp[0] = 0
for i in range(1, n + 1):
for square in square_nums:
if (i < square): break
dp[i] = min(dp[i], dp[i - square] + 1) # +1 is for that square we are substracting.
return dp[-1]
print(perfectSquare(12))
|
[
"ashish.2007g@gmail.com"
] |
ashish.2007g@gmail.com
|
5c9e8206af3d623bc4bcb23dcb9e1c079e59e878
|
bf7959048edc0005e04431a0864c719adc5ea9ea
|
/python版本/6038-MinimizeResult.py
|
e33698a2a56562956fcdb3f35ab04e87657c7df2
|
[] |
no_license
|
Yohager/Leetcode
|
7c24f490cfa5fd8e3cdb09e5a2305a134a064a93
|
585af82ff2c2d534053f6886714406019ed0c7d1
|
refs/heads/master
| 2022-12-07T23:51:16.347174
| 2022-11-28T02:30:53
| 2022-11-28T02:30:53
| 178,201,848
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 987
|
py
|
class Solution:
def minimizeResult(self, e: str) -> str:
n = len(e)
arr = e.split('+')
l1,l2 = len(arr[0]), len(arr[1])
init = eval(e)
res = float('inf')
p1,p2 = -1,-1
for i in range(l1):
for j in range(n,n-l2,-1):
if i == 0 and j == n:
cur = e[:i] + '(' + e[i:j] + ')' + e[j:]
elif i == 0 and j != n:
cur = e[:i] + '(' + e[i:j] + ')*' + e[j:]
elif j == n and i != 0:
cur = e[:i] + '*(' + e[i:j] + ')' + e[j:]
else:
cur = e[:i] + '*(' + e[i:j] + ')*' + e[j:]
# val = eval(cur)
if eval(cur) < res:
p1 = i
p2 = j
res = eval(cur)
# print(res,p1,p2)
if init < res:
return '(' + e + ')'
else:
return e[:p1] + '(' + e[p1:p2] + ')'+e[p2:]
|
[
"guoyuhang0921@gmail.com"
] |
guoyuhang0921@gmail.com
|
de40442e18ca727417a8eb58201487d77ae1f7eb
|
23107f38f7c28da5e2e5e51f6eda3ba6b5b9a2ff
|
/kitchen_project/settings.py
|
b1c6eb6dddc924b8e595337e856f15b714f1cb08
|
[] |
no_license
|
sarigu/kitchen_app
|
fe818aca3fb0605c185fe9ab0b496ea4e0bca0c7
|
f2eacf907eb75afd4cecd1cdce19900230b8fb33
|
refs/heads/master
| 2023-02-22T19:34:41.094263
| 2021-01-25T09:24:01
| 2021-01-25T09:24:01
| 315,796,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,204
|
py
|
"""
Django settings for kitchen_project project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g^w2u@*@88^s-*v%u&2z^th@ug*!_md54943ppa7swu09+fz!3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_rq',
'login_app',
'kitchen_app',
'channels',
'chat',
'api',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'loginrequired_middleware.middleware.LoginRequiredMiddleware'
]
ROOT_URLCONF = 'kitchen_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kitchen_project.wsgi.application'
ASGI_APPLICATION = "kitchen_project.asgi.application"
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
},
},
}
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
EXCLUDED_URLS = (
'api/images/',
'api/id/<int:id>/',
'admin/',
'admin/login/',
'accounts/login/',
'accounts/logout/',
'accounts/sign_up/',
'accounts/request_password_reset/',
'accounts/set_new_password/',
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
RQ_QUEUES = {
'default': {
'HOST': 'localhost',
'PORT': '6379',
'DB': 0,
'DEFAULT_TIMEOUT': 360,
}
}
# EMAIL SETTINGS
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp-relay.sendinblue.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'sarigucki@gmail.com'
EMAIL_HOST_PASSWORD = 'MqFtvLHkhNJXw2c6'
|
[
"sariguci@Saris-MacBook-Pro.local"
] |
sariguci@Saris-MacBook-Pro.local
|
8e1eaca2c534ab590ef058f10c521bcab1b4c678
|
6443a587e16658a58b884a2e5c6dbbab1be50674
|
/Leetcode/Unique Binary Search Trees.py
|
c83974a73a1bea84808319b93ca6f42ec0b06328
|
[] |
no_license
|
xiaochenchen-PITT/CC150_Python
|
a6cbe213946851639a827068961934920b6c3e57
|
e96394265d8a41a1b4558d5d2b34aa34af99662f
|
refs/heads/master
| 2020-12-24T17:18:14.606804
| 2014-11-08T21:48:20
| 2014-11-08T21:48:20
| 25,654,100
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 866
|
py
|
# Unique Binary Search Trees
class Solution:
# @return an integer
def numTrees(n):
# DP
mp = {0: 1, 1: 1} # key: n, value: number of different structures
if n in mp:
return mp[n]
for i in range(2, n+1): # i nodes
res = 0
sm = 0
for j in range(0, i):# j nodes can be put either on the left or on the right. j in [0,i-1]
sm += mp[j] * mp[i-1-j]
res += sm
mp[i] = res
return mp[n]
# recursive method
# if n == 0 or n == 1:
# return 1
# res = 0
# for i in xrange(0, n):
# # assign i nodes on the left and (n-1-i) on the right
# # because left side is independent of right side, so multiply them
# res += self.numTrees(i) * self.numTrees(n - 1 -i)
# return res
|
[
"cxc0520@hotmail.com"
] |
cxc0520@hotmail.com
|
50ab018c1be8d2a4d8012cffc93a214ded31a1c8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03339/s461187106.py
|
117afdfcdef0aa4d725b5db1581f92a01b0ab81a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
n = int(input())
s = input()
ec = 0
wc = 0
el = []
wl = []
for f,r in zip(s,reversed(s)):
el.append(ec)
wl.append(wc)
if f == 'W':
wc += 1
if r == 'E':
ec += 1
ans = n
for e,w in zip(wl, reversed(el)):
ans = min(ans, e+w)
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
60d9374afa434145e400c9430c0c5b40ef4a1df4
|
0b529ba1efe44c47b540dd22a7fd9cc6a73f907f
|
/src/1300-1400/_1344_angle-between-hands-of-a-clock.py
|
f352f9345e2663c99a674b740103ff27b7269469
|
[] |
no_license
|
alexparunov/leetcode_solutions
|
b9445a02182bc61f490257328a1960c2a627d7bc
|
bc19dbcc903782f91846d5b9d73a7ffb9b2f002d
|
refs/heads/master
| 2022-11-28T21:10:24.875260
| 2020-08-15T12:42:40
| 2020-08-15T12:42:40
| 261,517,109
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 494
|
py
|
"""
https://leetcode.com/problems/angle-between-hands-of-a-clock/
"""
class Solution:
def angleClock(self, hour: int, minutes: int) -> float:
angle_in_hour_minute = 360 / (12 * 60)
angle_in_minute = 360 // 60
angle_of_hour = (hour * angle_in_hour_minute * 60 + minutes * angle_in_hour_minute) % 360
angle_of_minutes = minutes * angle_in_minute
diff_angle = abs(angle_of_hour - angle_of_minutes)
return min(diff_angle, 360 - diff_angle)
|
[
"alexander.parunov@heyjobs.de"
] |
alexander.parunov@heyjobs.de
|
bd8669f2afe46f47983bf9b249cef07baa413cf6
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03000/s283498849.py
|
4b5184d2bd4bb37f346be633557e0c253010dab9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
def main():
N, X = (int(i) for i in input().split())
L = [int(i) for i in input().split()]
from itertools import accumulate
S = list(accumulate([0] + L))
ans = 0
for s in S:
if X < s:
break
ans += 1
print(ans)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
7e75de29e7392a2689f4241b3e42ee1e2d5a54a7
|
0c8214d0d7827a42225b629b7ebcb5d2b57904b0
|
/examples/matplotlib/E001_Basics/main.py
|
85b515aeab4272d66c0f3674054cc913aa4f050a
|
[] |
no_license
|
mertturkmenoglu/python-examples
|
831b54314410762c73fe2b9e77aee76fe32e24da
|
394072e1ca3e62b882d0d793394c135e9eb7a56e
|
refs/heads/master
| 2020-05-04T15:42:03.816771
| 2020-01-06T19:37:05
| 2020-01-06T19:37:05
| 179,252,826
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
import matplotlib.pyplot as mpl
def fib(n: int) -> list:
a = 1
b = 1
result = [a, b]
for i in range(3, n):
c = a + b
result.append(c)
a = b
b = c
return result
if __name__ == '__main__':
y_values = fib(10)
x_values = [i + 1 for i in range(len(y_values))]
mpl.plot(x_values, y_values)
mpl.xlabel('No')
mpl.ylabel('Values')
mpl.show()
|
[
"mertturkmenoglu99@gmail.com"
] |
mertturkmenoglu99@gmail.com
|
a4f3ed80aaf08dd5a18b2c21b6803d9b7bd49b9b
|
ddd4edc45481e6a7c7141b93e47b974634506d2d
|
/tradgram/chatrooms/serializers.py
|
b371a510b3309043cb8b9ef1ab0734ad2bea6c3c
|
[
"MIT"
] |
permissive
|
didils/tradgram
|
407de9d05d01bc840c5c165155d370f092d82f0d
|
4868ca082ab78a1b5b96f25ee9f958567bd1bb1e
|
refs/heads/master
| 2021-11-19T02:47:02.224088
| 2019-04-05T08:19:14
| 2019-04-05T08:19:14
| 148,162,588
| 0
| 0
|
MIT
| 2021-09-08T00:57:43
| 2018-09-10T13:49:57
|
Python
|
UTF-8
|
Python
| false
| false
| 350
|
py
|
from rest_framework import serializers
from . import models
from tradgram.users import models as user_models
class ChatRoomSerializer(serializers.ModelSerializer):
class Meta:
model = models.ChatRoom
fields = (
'user1',
'user2',
'last_message',
'new_message'
)
|
[
"didils1982@gmail.com"
] |
didils1982@gmail.com
|
3fc82e87b1bddde9014a48c4e580873adf678bc4
|
a367a015dbc36287ca933955ded1ee58b5a2a61a
|
/swagger_client/models/disease_group.py
|
776059fb3fa87b5485cc3d698aca7fb81e4dba90
|
[] |
no_license
|
kerniee/inno_intership_1_test_task
|
70211e153450011c427df595a02e3574dfe7ed9f
|
fc0619ef54b00806a3b59f3c07c1c1684682d65b
|
refs/heads/master
| 2023-05-23T02:24:40.083723
| 2021-06-21T16:15:04
| 2021-06-21T16:15:04
| 365,855,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,717
|
py
|
# coding: utf-8
"""
Teleagronom
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DiseaseGroup(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'name': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name'
}
def __init__(self, id=None, name=None): # noqa: E501
"""DiseaseGroup - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self.discriminator = None
self.id = id
self.name = name
@property
def id(self):
"""Gets the id of this DiseaseGroup. # noqa: E501
:return: The id of this DiseaseGroup. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this DiseaseGroup.
:param id: The id of this DiseaseGroup. # noqa: E501
:type: int
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def name(self):
"""Gets the name of this DiseaseGroup. # noqa: E501
:return: The name of this DiseaseGroup. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this DiseaseGroup.
:param name: The name of this DiseaseGroup. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DiseaseGroup, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DiseaseGroup):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"karapys.mor@gmail.com"
] |
karapys.mor@gmail.com
|
c565643c4f2c79d599b5eb9b424e914fcb11f621
|
88906fbe13de27413a51da917ebe46b473bec1b9
|
/Part-II/Project-2-Data-Visualisation/Chapter 15 - Generating Data/random_walk_2.py
|
a41d63019aa7797ec13e4ca91ffd50709a3776ab
|
[] |
no_license
|
lonewolfcub/Python-Crash-Course
|
0b127e40f5029d84ad036263fd9153f6c88c2420
|
322388dfb81f3335eeffabcdfb8f9c5a1db737a4
|
refs/heads/master
| 2021-01-01T16:45:50.617189
| 2017-10-27T14:23:58
| 2017-10-27T14:23:58
| 97,911,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,115
|
py
|
from random import choice
class RandomWalk():
"""A class to generate random walks."""
def __init__(self, num_points=5000):
"""Initialize attributes of a walk."""
self.num_points = num_points
# All walks start at (0, 0)
self.x_values = [0]
self.y_values = [0]
def fill_walk(self):
"""Calculate all the points in the walk."""
while len(self.x_values) < self.num_points:
# Decide how far to go in each direction
x_direction = choice([1, -1])
x_distance = choice([0, 1, 2, 3, 4])
x_step = x_direction * x_distance
y_direction = choice([1, -1])
y_distance = choice([0, 1, 2, 3, 4])
y_step = y_direction * y_distance
# Reject the moves that go nowhere
if x_step == 0 and y_step == 0:
continue
# Calculate the next x and y values.
next_x = self.x_values[-1] + x_step
next_y = self.y_values[-1] + y_step
self.x_values.append(next_x)
self.y_values.append(next_y)
|
[
"lonewolfcub020@gmail.com"
] |
lonewolfcub020@gmail.com
|
f9200b25f79758ec7d91ceee76d4b01687175579
|
b08d42933ac06045905d7c005ca9c114ed3aecc0
|
/src/coefSubset/evaluate/ranks/twentyPercent/rank_3uzv_J.py
|
675fb8afa38dcf61c4544fa34ba93dc97dac281a
|
[] |
no_license
|
TanemuraKiyoto/PPI-native-detection-via-LR
|
d148d53f5eb60a4dda5318b371a3048e3f662725
|
897e7188b0da94e87126a4acc0c9a6ff44a64574
|
refs/heads/master
| 2022-12-05T11:59:01.014309
| 2020-08-10T00:41:17
| 2020-08-10T00:41:17
| 225,272,083
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,392
|
py
|
# 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '3uzv.csv'
identifier = 'J'
coefFrac = 0.2
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/twentyPercent/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/twentyPercent/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
#df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Keep coefficients within the given fraction when ordered by decreasing order of coefficient magnitude
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs['absVal'] = np.abs(coefs['coefficients'])
coefs.sort_values(by = 'absVal', ascending = False, inplace = True)
coefs = coefs[:int(14028 * coefFrac + 0.5)]
keepList = list(coefs.index)
del coefs
df1 = df1[keepList]
df1 = df1.reindex(sorted(df1.columns), axis = 1)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
|
[
"tanemur1@msu.edu"
] |
tanemur1@msu.edu
|
e78096450e3762e13172fbb51ef0a06a34d1680c
|
957e5aef8b48cf21804d51447ed93a026aab35ff
|
/script/chk_dup.py
|
1fdcc86fc2f8ac8af76c1ce69c5e116ae660a27d
|
[
"Apache-2.0"
] |
permissive
|
dannysauer/oidctest
|
045a438ee934b5c9e27aae9876765e08dac16a37
|
e7593e02af7caa71f92220ad0f5b67bb40e30f97
|
refs/heads/master
| 2021-07-08T07:36:30.362597
| 2020-05-14T07:21:25
| 2020-05-14T07:21:25
| 152,679,266
| 0
| 0
|
NOASSERTION
| 2018-10-12T01:54:49
| 2018-10-12T01:54:49
| null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
#!/usr/bin/env python3
import json
ap = json.loads(open('assigned_ports.json').read())
inv = {}
for iss, port in ap.items():
try:
inv[port].append(iss)
except KeyError:
inv[port] = [iss]
for port, iss in inv.items():
if len(iss) != 1:
print(port, iss)
|
[
"roland@catalogix.se"
] |
roland@catalogix.se
|
c8e318873904d5e634587d89ee920d2feffa58ee
|
6cc37dfc44880f57823bb9523ea5f8206d5e3f22
|
/python_OOP/labs_and_homeworks/07_solid_exercise/05_emails.py
|
adeddfd64439568ad2e5a90b04ba83bc9cc780b0
|
[] |
no_license
|
dimitar-daskalov/SoftUni-Courses
|
70d265936fd86712a7bfe0586ec6ebd1c7384f77
|
2054bc58ffb5f41ed86f5d7c98729b101c3b1368
|
refs/heads/main
| 2023-05-31T06:44:35.498399
| 2021-07-11T10:16:08
| 2021-07-11T10:16:08
| 322,896,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,714
|
py
|
# SRP (Single Responsibility Principle)
from abc import ABC, abstractmethod
class IEmail(ABC):
@abstractmethod
def set_sender(self, sender):
pass
@abstractmethod
def set_receiver(self, receiver):
pass
@abstractmethod
def set_content(self, content):
pass
class Email(IEmail):
def __init__(self, protocol):
self.protocol = protocol
self.__sender = None
self.__receiver = None
self.__content = None
def set_sender(self, sender):
if self.protocol == 'IM':
self.__sender = ''.join(["I'm ", sender])
else:
self.__sender = sender
def set_receiver(self, receiver):
if self.protocol == 'IM':
self.__receiver = ''.join(["I'm ", receiver])
else:
self.__receiver = receiver
def set_content(self, content):
self.__content = content.format_text()
def __repr__(self):
template = "Sender: {sender}\nReceiver: {receiver}\nContent:\n{content}"
return template.format(sender=self.__sender, receiver=self.__receiver, content=self.__content)
class IContent(ABC):
@abstractmethod
def format_text(self):
pass
class MyContent(IContent):
def __init__(self, text):
self.text = text
def format_text(self):
return '\n'.join(['<myML>', self.text, '</myML>'])
class HTMLContent(IContent):
def __init__(self, text):
self.text = text
def format_text(self):
return '\n'.join(['<div>', self.text, '</div>'])
email = Email('IM')
email.set_sender('qmal')
email.set_receiver('james')
content = MyContent('Hello, there!')
email.set_content(content)
print(email)
|
[
"dimitar.daskalov22@gmail.com"
] |
dimitar.daskalov22@gmail.com
|
03fc2be0614708dcfbee8c1d6b82759f19bcf7fc
|
59f4e4f57c4590b9fe969274960c49e7218ed275
|
/.venv/bin/ptw
|
6cb2d4a3f8ba91373047a8d4474bb0d5b0042e9d
|
[] |
no_license
|
MohamadSheikhAlshabab/math-series
|
be82710d0cb0e8784543ee097c569964dfb8a376
|
6fe5772e2b67beadebbf6d27676bbe5aa91bd367
|
refs/heads/master
| 2022-12-06T12:56:18.678827
| 2020-08-17T16:49:47
| 2020-08-17T16:49:47
| 288,155,962
| 0
| 0
| null | 2020-08-20T22:58:04
| 2020-08-17T10:56:20
|
Python
|
UTF-8
|
Python
| false
| false
| 420
|
#!/home/mohamad/401/math-series/.venv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'pytest-watch==4.2.0','console_scripts','ptw'
__requires__ = 'pytest-watch==4.2.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pytest-watch==4.2.0', 'console_scripts', 'ptw')()
)
|
[
"alshabab.moh@gmail.com"
] |
alshabab.moh@gmail.com
|
|
815a53ab6d3b0f60494ac49b3988449512470445
|
38da8edb2102ad29eda8784cbb845cac0b96bbca
|
/151_lambda_expression.py
|
f4ae995bf65597a88920a1d5cd79443c18b826fd
|
[] |
no_license
|
Prateek2201/Python_codes
|
1a655a3e6820e7ecb1fb8a8abd266a8ae0508cb5
|
436a36544edac80cbe420c7b9ddb718df46b68da
|
refs/heads/main
| 2023-08-01T03:10:51.864186
| 2021-09-17T18:08:40
| 2021-09-17T18:08:40
| 407,635,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
##def is_even(a):
## return a%2==0
##print(is_even(5))
##
##is_even2=lambda a:a%2==0
##print(is_even2(6))
##def last_char(s):
## return s[-1]
##print(last_char('Prateek'))
##
##last_char2=lambda s: s[-1]
##print(last_char2('Prateek'))
def f(s):
if len(s)>5:
return True
return False
print(f('Prateek'))
func=lambda s:True if len(s)>5 else False
print(func('Prateek'))
func2=lambda s: len(s)>5
print(func2('harsh'))
|
[
"noreply@github.com"
] |
Prateek2201.noreply@github.com
|
57b36a522a4a39bda75590c6ed08055b2fd1ba63
|
f3d8e1351e52526959e2d44d72fd716924f1751d
|
/problems/56_merge_intervals.py
|
5b845275a27f0c956cd1a1031bf770ef73b34f38
|
[] |
no_license
|
xueyuanl/leetcode-py
|
c27a4faff5b9040d57cf864d3a11f1683d8182e3
|
03d3e34522c8c819388634ab4b63077da864a4e1
|
refs/heads/master
| 2021-07-14T23:40:32.913822
| 2021-07-14T13:43:19
| 2021-07-14T13:43:19
| 206,973,737
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: List[List[int]]
"""
if len(intervals) < 1:
return []
res = []
sorted_intervals = sorted(intervals)
new_pair = sorted_intervals[0]
res.append(new_pair)
for pair in sorted_intervals:
if pair[0] <= new_pair[1]:
new_pair[1] = max(pair[1], new_pair[1])
else:
new_pair = pair
res.append(new_pair)
return res
|
[
"15186846+xueyuanl@users.noreply.github.com"
] |
15186846+xueyuanl@users.noreply.github.com
|
d2c204a4d44b2ff1d4ff5c3b10a7ccc2a91de537
|
1c904e7b4ab661c9f90536c9bfcde970540271d8
|
/setup.py
|
918cc1176dfec9a809df9ea34f452fb6de684980
|
[] |
no_license
|
koslab/pydatamall.webui
|
a7803a652441acb74adc75d2d09d9dced7cc9520
|
b483e8ca1aeef73a2c2c430cabf74e8fd0d0daf2
|
refs/heads/master
| 2021-01-10T07:56:48.159898
| 2015-11-20T15:55:09
| 2015-11-20T15:55:09
| 45,684,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,269
|
py
|
from setuptools import setup, find_packages
import os
version = '1.0'
long_description = (
open('README.txt').read()
+ '\n' +
'Contributors\n'
'============\n'
+ '\n' +
open('CONTRIBUTORS.txt').read()
+ '\n' +
open('CHANGES.txt').read()
+ '\n')
setup(name='pydatamall.webui',
version=version,
description="",
long_description=long_description,
# Get more strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Programming Language :: Python",
],
keywords='',
author='',
author_email='',
url='http://github.com/koslab/pydatamall.webui/',
license='agplv3',
packages=find_packages('src'),
package_dir = {'': 'src'},
namespace_packages=['pydatamall'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'pyramid',
'pyramid_layout',
'pyramid_bowerstatic',
'pyramid_chameleon',
'python-social-auth',
'requests'
# -*- Extra requirements: -*-
],
entry_points={
'console_scripts': [
'webui=pydatamall.webui.runner:main'
]
}
)
|
[
"kagesenshi.87@gmail.com"
] |
kagesenshi.87@gmail.com
|
6e87c83ff642eaea9ea8bc5eccfac1ca58e50696
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_142/863.py
|
a5243b5969ac2f11082e3e3b90863e0c03738b35
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,195
|
py
|
class GameInstance:
def __init__(self, init_str):
self.strs = init_str
self.total_action = 0
self.tab_strs = []
self.tabularize()
def tabularize(self):
#set_trace()
N = len(self.strs)
for i in range(0, N):
self.tab_strs.append([[self.strs[i][0],1]])
for j in range(1, len(self.strs[i])):
if self.tab_strs[i][-1][0] == self.strs[i][j]:
self.tab_strs[i][-1][1] +=1
else:
self.tab_strs[i].append([self.strs[i][j],1])
def del_rep(self, si):
clean_ptr = 0
clean_str = self.strs[si][0]
#set_trace()
del_start = False
for i in xrange(1, len(self.strs[si])):
if clean_str[-1] == self.strs[si][i]:
#i+= 1
if not del_start:
self.total_action += 1
del_start = True
else:
del_start = False
clean_str += self.strs[si][i]
#i += 1
return clean_str
def solve(self):
#the point is that as long as there is no repetition we can't do anything.
#if there is a character in one of the string that is not in the other one
#then we are done impossible.
#also the order of repetition doesn't matter
#so we move the pointer for all of them if we can repair we repair if not game
#over
N = len(self.strs)
ref_len = len(self.tab_strs[0])
# mod_str = self.del_rep(0)
# poss = True
# for i in range(1,N):
# if (mod_str != self.del_rep(i)):
# return "Fegla Won"
for i in range(1,N):
if ref_len != len(self.tab_strs[i]):
return "Fegla Won"
for j in range(0, ref_len):
if (self.tab_strs[0][j][0] != self.tab_strs[i][j][0]):
return "Fegla Won"
#set_trace()
# all_mins = [self.tab_strs[0][i][1] for i in range(0, ref_len)]
# for i in range(1, N):
# for j in range(0, ref_len):
# if all_mins[j] > self.tab_strs[i][j][1]:
# all_mins[j] = self.tab_strs[i][j][1]
for j in range(0, ref_len):
sum_cl = 0
for i in range(0, N):
sum_cl += self.tab_strs[i][j][1]
average = float(sum_cl)/float(N)
av = [0,0]
no_action = [0,0]
av[0] = int(average)
av[1] = int(average)+1
for side in range(0,2):
for i in range(0, N):
no_action[side] += abs(av[side] - self.tab_strs[i][j][1])
if no_action[0] < no_action[1]:
self.total_action += no_action[0]
else:
self.total_action += no_action[1]
return str(self.total_action)
N = input()
for i in range(1,N+1):
T = input()
cur_case = []
from pdb import set_trace
for j in range(0,T):
cur_case.append(raw_input())
#set_trace()
cur_game = GameInstance(cur_case)
print "Case #%i: %s"%(i,cur_game.solve())
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
e5f2b67f813053e0c4f7d0204c27f0484fd58db9
|
89e3f694021f261b95e494d2b479367bacde8251
|
/followthemoney/cli/ocds.py
|
536ec92f0f70666a97530d0dcc850e5d8f6e74e3
|
[
"MIT"
] |
permissive
|
dchaplinsky/followthemoney
|
6f9c05f430f8bfb04f7841378fd2ee5cf9b33235
|
a2a150f558acb5a1c985b9dc891c98c0fdf2f17e
|
refs/heads/master
| 2020-09-10T08:16:14.617602
| 2019-11-14T09:15:52
| 2019-11-14T09:15:52
| 221,699,199
| 1
| 0
|
MIT
| 2019-11-14T13:03:41
| 2019-11-14T13:03:41
| null |
UTF-8
|
Python
| false
| false
| 5,146
|
py
|
import json
import click
import logging
from pprint import pprint # noqa
from followthemoney import model
from followthemoney.cli.cli import cli
from followthemoney.cli.util import write_object
log = logging.getLogger(__name__)
IDENTIFIERS = {
'TRADE_REGISTER': 'registrationNumber',
'TAX_ID': 'vatCode',
'ORGANIZATION_ID': 'classification',
'STATISTICAL': 'classification',
}
@cli.command('import-ocds', help="Import open contracting data")
@click.option('-i', '--infile', type=click.File('r'), default='-') # noqa
@click.option('-o', '--outfile', type=click.File('w'), default='-') # noqa
def import_ocds(infile, outfile):
try:
while True:
line = infile.readline()
if not line:
return
record = json.loads(line)
for entity in convert_record(record):
if entity.id is not None:
write_object(outfile, entity)
except BrokenPipeError:
raise click.Abort()
def clean_date(date):
if date is not None and 'T' in date:
date, _ = date.split('T', 1)
return date
def make_address(*parts):
return ' '.join((p for p in parts if p is not None))
def convert_party(party):
entity = model.make_entity('LegalEntity')
entity.make_id(party.pop('id', None))
entity.add('name', party.pop('name', None))
address = party.pop('address', {})
entity.add('country', address.pop('countryName', None))
address_text = make_address(address.pop('streetAddress', None),
address.pop('postalCode', None),
address.pop('region', None))
entity.add('address', address_text)
if len(address):
log.info("Unknown address part: %r", address.keys())
contact = party.pop('contactPoint', {})
entity.add('website', contact.pop('url', None))
entity.add('phone', contact.pop('telephone', None))
entity.add('email', contact.pop('email', None))
for identifier in party.pop('additionalIdentifiers', []):
scheme = identifier.pop('scheme', None)
prop = IDENTIFIERS.get(scheme, None)
if prop is None:
log.info("Unknown identifier scheme: %s", scheme)
continue
entity.add(prop, identifier.pop('id', None))
# pprint(party)
return entity
def convert_release(release):
for party in release.pop('parties', []):
yield convert_party(party)
buyer = release.pop('buyer', {})
authority = model.make_entity('LegalEntity')
authority.make_id(buyer.pop('id', None))
authority.add('name', buyer.pop('name', None))
yield authority
tender = release.pop('tender', {})
contract = model.make_entity('Contract')
contract.make_id(release.pop('id', None))
contract.add('authority', authority)
contract.add('name', tender.pop('title', None))
if not contract.has('name'):
contract.add('name', tender.get('id', None))
contract.add('description', tender.pop('description', None))
contract.add('procedureNumber', tender.pop('id', None))
contract.add('type', tender.pop('mainProcurementCategory', None))
value = tender.pop('value', {})
contract.add('amount', value.pop('amount', None))
contract.add('currency', value.pop('currency', None))
# pprint(tender)
yield contract
# contract.add('modifiedAt', published_date)
lots = tender.pop('lots', [])
for award in release.pop('awards', []):
ca = model.make_entity('ContractAward')
ca.make_id(contract.id, award.pop('id', None))
ca.add('contract', contract)
ca.add('date', clean_date(award.pop('date', None)))
value = award.pop('value', {})
ca.add('amount', value.pop('amount', None))
ca.add('currency', value.pop('currency', None))
reason = tender.get('procurementMethodDetails', None)
ca.add('decisionReason', reason)
for document in award.pop('documents', []):
ca.add('sourceUrl', document.get('url'))
for item in award.pop('items', []):
classification = item.pop('classification', {})
ca.add('cpvCode', classification.get('url'))
related_lots = award.pop('relatedLots', [])
for lot in lots:
if lot.get('id') in related_lots:
ca.add('role', lot.get('title'))
ca.add('summary', lot.get('description'))
for supplier in award.pop('suppliers', []):
entity = model.make_entity('LegalEntity')
entity.make_id(supplier.pop('id', None))
entity.add('name', supplier.pop('name', None))
ca.add('supplier', entity)
yield entity
# pprint(award)
yield ca
def convert_record(record):
published_date = clean_date(record.pop('publishedDate', None))
publisher = record.pop('publisher', {}).get('name')
for release in record.get('releases', []):
for entity in convert_release(release):
entity.add('publisher', publisher, quiet=True)
entity.add('modifiedAt', published_date, quiet=True)
yield entity
|
[
"friedrich@pudo.org"
] |
friedrich@pudo.org
|
43d460ce6a3a415277321f9a4f8658f6d7c4dbec
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_parody.py
|
947ea401561ad67bf848e6ab6ddc4814d3613dd2
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
#calss header
class _PARODY():
def __init__(self,):
self.name = "PARODY"
self.definitions = [u'to copy the style of someone or something in a humorous way: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
c515faf8793eb07a829146f36ac33429993b55ef
|
8ff6c3e513e17be6c51b484bed81d03150bdd175
|
/2013-04-analytic/part2/ex52b.py
|
be5ca1d980f5e1a94ea1ffb0ae488dd765182979
|
[] |
no_license
|
ricbit/Oldies
|
f1a2ac520b64e43d11c250cc372d526e9febeedd
|
2d884c61ac777605f7260cd4d36a13ed5a2c6a58
|
refs/heads/master
| 2023-04-27T20:35:19.485763
| 2023-04-26T04:45:44
| 2023-04-26T04:45:44
| 2,050,140
| 40
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
import itertools, sys
def surjection(seq):
hist = {}
for i in seq:
hist[i] = 1 + hist.get(i, 0)
m = max(hist.iterkeys())
for i in xrange(1, 1 + m):
if hist.get(i, 0) < 3:
return False
return True
def triple_surjections(n):
for seq in itertools.product(xrange(1, 1 + n / 3), repeat=n):
if surjection(seq):
yield seq
def tabular(seq):
size = 7
print "\\begin{tabular}{ %s }" % " ".join(["r"]*size)
for i in xrange((len(seq)+size-1)/size):
print "%s \\\\" % "&".join("".join(map(str,i))
for i in seq[i*size:i*size+size])
print "\\end{tabular}"
tabular(list(triple_surjections(int(sys.argv[1]))))
|
[
"bluepenguin@gmail.com"
] |
bluepenguin@gmail.com
|
cf53263187c3025a04b1d121a9c4f9bfaa1f2106
|
3d69b7fe8fa95fcd6dbab25885f2e3e42bc891d6
|
/src/nlp/classification/tf1/xlnet/prepro_utils.py
|
fc945d6d64a46f483a18389895831414c5f33e17
|
[
"Apache-2.0"
] |
permissive
|
wu-uw/OpenCompetition
|
ac652d066f667dc2b3061947af5ea0425643a1b5
|
9aa9d7a50ada1deb653d295dd8a7fe46321b9094
|
refs/heads/master
| 2021-01-03T04:59:28.987099
| 2020-03-02T07:49:11
| 2020-03-02T07:49:11
| 239,932,371
| 0
| 0
|
Apache-2.0
| 2020-03-02T07:49:12
| 2020-02-12T05:12:02
|
Python
|
UTF-8
|
Python
| false
| false
| 5,013
|
py
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unicodedata
import six
from functools import partial
SPIECE_UNDERLINE = '▁'
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def print_(*args):
new_args = []
for arg in args:
if isinstance(arg, list):
s = [printable_text(i) for i in arg]
s = ' '.join(s)
new_args.append(s)
else:
new_args.append(printable_text(arg))
print(*new_args)
def preprocess_text(
inputs,
lower=False,
remove_space=True,
keep_accents=False):
if remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if six.PY2 and isinstance(outputs, str):
outputs = outputs.decode('utf-8')
if not keep_accents:
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if not unicodedata.combining(c)])
if lower:
outputs = outputs.lower()
return outputs
def encode_pieces(sp_model, text, return_unicode=True, sample=False):
# return_unicode is used only for py2
# note(zhiliny): in some systems, sentencepiece only accepts str for py2
if six.PY2 and isinstance(text, unicode):
text = text.encode('utf-8')
if not sample:
pieces = sp_model.EncodeAsPieces(text)
else:
pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == ',' and piece[-2].isdigit():
cur_pieces = sp_model.EncodeAsPieces(
piece[:-1].replace(SPIECE_UNDERLINE, ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
# note(zhiliny): convert back to unicode for py2
if six.PY2 and return_unicode:
ret_pieces = []
for piece in new_pieces:
if isinstance(piece, str):
piece = piece.decode('utf-8')
ret_pieces.append(piece)
new_pieces = ret_pieces
return new_pieces
def encode_ids(sp_model, text, sample=False):
pieces = encode_pieces(sp_model, text, return_unicode=False, sample=sample)
ids = [sp_model.PieceToId(piece) for piece in pieces]
return ids
if __name__ == '__main__':
import sentencepiece as spm
sp = spm.SentencePieceProcessor()
sp.load('sp10m.uncased.v3.model')
print_(u'I was born in 2000, and this is falsé.')
print_(u'ORIGINAL', sp.EncodeAsPieces(
u'I was born in 2000, and this is falsé.'))
print_(
u'OURS',
encode_pieces(
sp,
u'I was born in 2000, and this is falsé.'))
print(encode_ids(sp, u'I was born in 2000, and this is falsé.'))
print_('')
prepro_func = partial(preprocess_text, lower=True)
print_(prepro_func('I was born in 2000, and this is falsé.'))
print_('ORIGINAL', sp.EncodeAsPieces(
prepro_func('I was born in 2000, and this is falsé.')))
print_('OURS', encode_pieces(sp, prepro_func(
'I was born in 2000, and this is falsé.')))
print(encode_ids(sp, prepro_func('I was born in 2000, and this is falsé.')))
print_('')
print_('I was born in 2000, and this is falsé.')
print_('ORIGINAL', sp.EncodeAsPieces(
'I was born in 2000, and this is falsé.'))
print_('OURS', encode_pieces(sp, 'I was born in 2000, and this is falsé.'))
print(encode_ids(sp, 'I was born in 2000, and this is falsé.'))
print_('')
print_('I was born in 92000, and this is falsé.')
print_('ORIGINAL', sp.EncodeAsPieces(
'I was born in 92000, and this is falsé.'))
print_('OURS', encode_pieces(sp, 'I was born in 92000, and this is falsé.'))
print(encode_ids(sp, 'I was born in 92000, and this is falsé.'))
|
[
"ran.wang.math@gmail.com"
] |
ran.wang.math@gmail.com
|
44cdec8d130987c667d3ddd3a464bad33f309eeb
|
5dd47abf7061201d9378e73e51f08fbb314ba2fd
|
/envdsys/envcontacts/migrations/0050_auto_20210219_2128.py
|
b9ec01c328ffb1adf3231e767f6654bbfec32bcf
|
[
"Unlicense"
] |
permissive
|
NOAA-PMEL/envDataSystem
|
4d264ae5209015e4faee648f37608d68a4461d0a
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
refs/heads/master
| 2023-02-23T22:33:14.334737
| 2021-07-22T01:09:16
| 2021-07-22T01:09:16
| 191,809,007
| 1
| 0
|
Unlicense
| 2023-02-08T00:45:54
| 2019-06-13T17:50:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,175
|
py
|
# Generated by Django 3.1.7 on 2021-02-19 21:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('envcontacts', '0049_auto_20210219_2127'),
]
operations = [
migrations.AlterField(
model_name='person',
name='email1_type',
field=models.CharField(choices=[('H', 'Home'), ('W', 'Work'), ('O', 'Other')], default='W', max_length=1),
),
migrations.AlterField(
model_name='person',
name='email2_type',
field=models.CharField(choices=[('H', 'Home'), ('W', 'Work'), ('O', 'Other')], default='W', max_length=1),
),
migrations.AlterField(
model_name='person',
name='phone1_type',
field=models.CharField(choices=[('H', 'Home'), ('W', 'Work'), ('M', 'Mobile'), ('O', 'Other')], default='M', max_length=1),
),
migrations.AlterField(
model_name='person',
name='phone2_type',
field=models.CharField(choices=[('H', 'Home'), ('W', 'Work'), ('M', 'Mobile'), ('O', 'Other')], default='M', max_length=1),
),
]
|
[
"derek.coffman@noaa.gov"
] |
derek.coffman@noaa.gov
|
4b50d3a9c44f387818b24514e492f94d5951050f
|
e0527bce5c53a196752d3a16adf50cb60754de5f
|
/03-Workshop/Workshop-Questions/C_fun_with_flags.py
|
8627af965e3f1c5c0e3e439a2dc9c83893f634a1
|
[] |
no_license
|
ARWA-ALraddadi/python-tutorial-for-beginners
|
ddeb657f419fbc176bea273bc9fb6b88d1894191
|
21cedfc47871ca4d25c2382464c60ab0a2121205
|
refs/heads/master
| 2023-06-30T20:24:30.688800
| 2021-08-08T08:22:29
| 2021-08-08T08:22:29
| 193,094,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
#--------------------------------------------------------------------
#
# Fun With Flags
#
# In the lecture demonstration program "stars and stripes" we saw
# how function definitions allowed us to reuse code that drew a
# star and a rectangle (stripe) multiple times to create a copy of
# the United States flag.
#
# As a further example of the way functions allow us to reuse code,
# in this exercise we will import the flag_elements module into
# this program and create a different flag. In the PDF document
# accompanying this file you will find several flags which can be
# constructed easily using the "star" and "stripe" functions already
# defined. Choose one of these and try to draw it.
#
# First we import the two functions we need (make sure a copy of file
# flag_elements.py is in the same folder as this one)
from flag_elements import star, stripe
# Import the turtle graphics functions
from turtle import *
# Set up the drawing environment
setup(600, 400)
##### PUT YOUR CODE FOR DRAWING THE FLAG HERE
pass
# Exit gracefully
hideturtle()
done()
|
[
"noreply@github.com"
] |
ARWA-ALraddadi.noreply@github.com
|
fe416a0e81300a32016388151c240e79727ff3ad
|
e7ec251afc62616525c573c1b1b9e6416454aaaa
|
/bcbio/pipeline/__init__.py
|
298c4775b143c01b611e1483575f90280a9da72a
|
[
"MIT"
] |
permissive
|
YTLogos/bcbio-nextgen
|
157e023341b9085b6c3f36d68c2b68ae31e063f2
|
f964a25ab74a31551273b7e50518f3451c90f473
|
refs/heads/master
| 2022-12-28T15:11:28.127131
| 2017-09-20T18:58:45
| 2017-09-20T18:59:57
| 104,303,076
| 1
| 1
|
MIT
| 2022-12-12T12:18:27
| 2017-09-21T04:52:21
|
Python
|
UTF-8
|
Python
| false
| false
| 598
|
py
|
"""High level code for driving a next-gen analysis pipeline.
This structures processing steps into the following modules:
- lane.py: Analyze a single fastq file.
- fastq.py: Utilities to retrieve fastq files.
- alignment.py: Align to a reference genome.
- sample.py: Analyze a sample, which may consist of multiple lanes or
barcoded samples on a lane.
- merge.py: Merge multiple sample files in one processing run.
- variation.py: Calculate SNP/indel variations for a sample.
- qcsummary.py: Quality control, alignment metrics and summary information.
"""
|
[
"chapmanb@50mail.com"
] |
chapmanb@50mail.com
|
f7db4248308429362c6ea3a4382920078bbd0636
|
465097858def678018ff76865bb09d34735d8eb9
|
/mysite/blog/forms.py
|
0d517f8db2dec175dc7af7cd362d511e1f0ffa33
|
[] |
no_license
|
bunnycast/django_girls
|
f9c3f3eb30955db64d2e643109bd2aa483b0f4b7
|
fc24a8301dd55d98b790c8fb19bd9e68129a7c63
|
refs/heads/master
| 2022-11-13T09:12:30.860813
| 2020-07-02T02:28:51
| 2020-07-02T02:28:51
| 275,992,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
from django import forms
from blog.models import Post, Comment
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'text',)
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('author', 'text',)
|
[
"berzzubunny@gmail.com"
] |
berzzubunny@gmail.com
|
77d9d1e0b97cfe7890c04957f93b007a82d99098
|
d318975fdf4daeccecbf90c24aba5009d51637eb
|
/server/env/bin/symilar
|
2bc1d53bf60868c4ab248a62a2e1b4c6610295af
|
[] |
no_license
|
Jrius4/data-shuffle
|
759702914b052c737b75f8cf5f84170f4e0cae40
|
4a0e7ac500d91903fcf4806d878ad01083068119
|
refs/heads/master
| 2023-01-24T10:38:59.467067
| 2019-10-13T20:01:33
| 2019-10-13T20:01:33
| 214,883,377
| 0
| 0
| null | 2023-01-04T12:23:25
| 2019-10-13T19:47:20
|
Python
|
UTF-8
|
Python
| false
| false
| 274
|
#!/home/jrius/Kaxiuz/investment/datastore/v1-UI/server/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
|
[
"kazibwejuliusjunior@gmail.com"
] |
kazibwejuliusjunior@gmail.com
|
|
b305b26622db5f2f5eb4e89f70911e77ea7254d5
|
f6f632bee57875e76e1a2aa713fdbe9f25e18d66
|
/python/CodingInterviews_2/30_bao-han-minhan-shu-de-zhan-lcof.py
|
e9a86f1b3ff003b07e9bfbf7235100b290513e37
|
[] |
no_license
|
Wang-Yann/LeetCodeMe
|
b50ee60beeeb3661869bb948bef4fbe21fc6d904
|
44765a7d89423b7ec2c159f70b1a6f6e446523c2
|
refs/heads/master
| 2023-08-07T05:31:23.428240
| 2021-09-30T15:33:53
| 2021-09-30T15:33:53
| 253,497,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,551
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Rock Wayne
# @Created : 2020-05-06 23:17:56
# @Last Modified : 2020-05-06 23:17:56
# @Mail : lostlorder@gamil.com
# @Version : alpha-1.0
# 定义栈的数据结构,请在该类型中实现一个能够得到栈的最小元素的 min 函数在该栈中,调用 min、push 及 pop 的时间复杂度都是 O(1)。
#
#
#
# 示例:
#
# MinStack minStack = new MinStack();
# minStack.push(-2);
# minStack.push(0);
# minStack.push(-3);
# minStack.min(); --> 返回 -3.
# minStack.pop();
# minStack.top(); --> 返回 0.
# minStack.min(); --> 返回 -2.
#
#
#
#
# 提示:
#
#
# 各函数的调用总次数不超过 20000 次
#
#
#
#
# 注意:本题与主站 155 题相同:https://leetcode-cn.com/problems/min-stack/
# Related Topics 栈 设计
# 👍 28 👎 0
import traceback
import pytest
import math, fractions, operator
from typing import List
import collections, bisect, heapq
import functools, itertools
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack=[]
def push(self, x: int) -> None:
if self.stack:
current_min=min(x,self.stack[-1][0])
self.stack.append((current_min,x))
else:
self.stack.append((x,x))
def pop(self) -> None:
return self.stack.pop()[1]
def top(self) -> int:
return self.stack[-1][1]
def min(self) -> int:
return self.stack[-1][0]
|
[
"wzy-511@163.com"
] |
wzy-511@163.com
|
36173a6b0f8010fa465e6f58b4576b374a85c962
|
24b1fa231f4e89f1a588c09ebee6fe4da6915c53
|
/Tutorials/Canvas/Fundamental-Theorem-Algebra.py
|
39eb49279e27df600cc9cb59f442cec0a5a30844
|
[] |
no_license
|
cyrt63/demos
|
a429214154cf0e51b58710f67670e1d902bfcac6
|
a4b54b862dba4ad33a707511896324829f4cc7b1
|
refs/heads/master
| 2020-04-08T13:51:40.823058
| 2015-04-21T14:01:41
| 2015-04-21T14:01:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
from browser import *
from workbench import *
from math import *
from units import *
from easel import *
from eight import *
popup = window.open("","","width=800,height=600")
popup.document.body.style.backgroundColor = "202020"
popup.document.body.style.overflow = "hidden"
popup.document.title = "Visualizing Geometric Algebra with WebGL"
canvas2D = popup.document.createElement("canvas")
canvas2D.style.position = "absolute"
canvas2D.style.top = "0px"
canvas2D.style.left = "0px"
workbench2D = Workbench2D(canvas2D, popup)
space2D = Stage(canvas2D)
space2D.autoClear = True
font = "20px Helvetica"
output = Text(popup.document.title + ". Hit Esc key to exit.", font, "white")
output.x = 100
output.y = 60
space2D.addChild(output)
stats = window.Stats()
stats.setMode(0)
stats.domElement.style.position = 'absolute'
stats.domElement.style.left = '0px'
stats.domElement.style.top = '0px'
popup.document.body.appendChild(stats.domElement)
def setUp():
workbench2D.setUp()
def tick(t):
stats.begin()
space2D.render()
stats.end()
def terminate(t):
return False
def tearDown(e):
popup.close()
if e:
print "Error during animation: %s" % (e)
else:
print "Goodbye!"
workbench2D.tearDown()
runner = windowAnimationRunner(tick, terminate, setUp, tearDown, popup)
runner.start()
|
[
"geometryzen@gmail.com"
] |
geometryzen@gmail.com
|
9a9b603dacd11b6877b6f71b4b1dbcf95b157098
|
cd0f3fa5c3b202599812ac8b49e374fe2b2f2e8b
|
/ExerciciosFixacao/Cap08/C08EXFIX01.py
|
2da857eb09708d1fde3beae9840f79314ba2abba
|
[] |
no_license
|
J-AugustoManzano/livro_Python
|
46c14dc4bc5fb361d850fcd361477a952de172c2
|
e42b79ef78c6b1ab936fe9a13d32ddc94deeb2a8
|
refs/heads/main
| 2023-06-25T03:10:30.297226
| 2023-06-08T23:34:54
| 2023-06-08T23:34:54
| 354,116,051
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
a = []
for i in range(10):
a.append(input("Entre o {0:2}o. nome: ".format(i + 1)))
print()
for i in range(10):
print("{0:2}o. nome {1}.".format(i + 1, a[i]))
enter = input("\nPressione <Enter> para encerrar... ")
|
[
"noreply@github.com"
] |
J-AugustoManzano.noreply@github.com
|
e32226900cf40f40d2d4e42c722d43e09866fa5f
|
b65f31d9d273c3d4bb826ff83a805368570bcd4d
|
/Lesson 13 - Email Search/mailgui.py
|
c5fb8aa0db53048004cfdfd786ea43e8f8f717fb
|
[] |
no_license
|
kobaltkween/python2
|
3fde6cc9ca1413b900c87656d8ceb99cb3f34f42
|
f7e529abd303b65f0b794c8a9ed87dbf085541a8
|
refs/heads/master
| 2020-12-31T05:09:39.297693
| 2016-04-13T23:27:10
| 2016-04-13T23:27:10
| 56,192,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,280
|
py
|
from tkinter import *
from maildb import msgs
import datetime
import mysql.connector as mysqlc
from database import loginInfo
def getDate(s):
"""
Assumes a date form of yyyy-mm-dd, returns a corresponding datetime.date.
"""
syear = s[:4]
smonth = s[5:7]
sday = s[8:]
return datetime.date(int(syear), int(smonth), int(sday))
class Application(Frame):
def __init__(self, master = None):
"""
Establish the window structure, leaving some widgets accessible
as app instance variables. Connect button clicks to searchMail
method and subject double-clicks to displayMail method.
"""
Frame.__init__(self, master)
self.master.rowconfigure(0, weight = 1)
self.master.columnconfigure(0, weight = 1)
self.grid(sticky = W + E + N + S)
l0 = Label(self, text = "Email Database Search", font = ("Helvetica", 16))
l0.grid(row = 0, column = 1, columnspan = 2)
l1 = Label(self, text = "Not Before (yyyy-mm-dd):")
l1.grid(row = 1, column = 1, sticky = E + N + S)
self.mindate = Entry(self)
self.mindate.grid(row = 1, column = 2, sticky = W + N + S)
l2 = Label(self, text="Not After (yyyy-mm-dd):")
l2.grid(row = 2, column = 1, sticky = E + N + S)
self.maxdate = Entry(self)
self.maxdate.grid(row = 2, column = 2, sticky = W + N + S)
l3 = Label(self, text= "Sender's E-mail Contains:")
l3.grid(row = 3, column = 1, sticky = E + N + S)
self.addsearch = Entry(self)
self.addsearch.grid(row = 3, column = 2, sticky = W + N + S)
l4 = Label (self, text = "Sender's Name Contains:")
l4.grid(row = 4, column = 1, sticky = E + N + S)
self.namesearch = Entry(self)
self.namesearch.grid(row = 4, column = 2, sticky = W + N + S)
button = Button(self, text = "Search", command = self.searchMail)
button.grid(row = 5, column = 2)
self.msgsubs = Listbox(self, height = 10, width = 100)
self.msgsubs.grid(row = 8, column = 1, columnspan = 2)
self.msgsubs.bind("<Double-Button-1>", self.displayMail)
self.message = Text(self, width = 100)
self.message.grid(row = 9, column = 1, columnspan = 2)
def searchMail(self):
"""
Take the database search parameters provided by the user
(trying to make sense of the dates) and select the appropriate
messages from the database, displaying the subject lines of the
messages in a scrolling selection list.
"""
mindate = self.mindate.get()
if not mindate:
mindate = None
else:
mindate = getDate(mindate)
maxdate = self.maxdate.get()
if not maxdate:
maxdate = None
else:
maxdate = getDate(maxdate)
addsearch = self.addsearch.get()
if not addsearch:
addsearch = None
namesearch = self.namesearch.get()
if not namesearch:
namesearch = None
conn = mysqlc.Connect(**loginInfo)
curs = conn.cursor()
table = "testMessage"
self.msglist = msgs(conn, curs, table, mindate = mindate, maxdate = maxdate, addsearch = addsearch, namesearch = namesearch)
self.msgsubs.delete(0, END)
for pk, msg in self.msglist:
self.msgsubs.insert(END, msg['subject'])
def displayMail(self, event):
"""
Display the message corresoponding to the subject line the
user just clicked on.
"""
indexes = self.msgsubs.curselection()
if len(indexes) != 1:
return
self.message.delete(1.0, END)
pk, msg = self.msglist[int(indexes[0])]
for headerName in "Subject", "Date", "From":
hdr = msg[headerName]
if hdr:
self.message.insert(INSERT, "{0}: {1}\n".format(headerName, hdr))
self.message.insert(END, "\n")
if msg.is_multipart():
self.message.insert(END, "MULTIPART MESSAGE - SORRY!")
self.message.insert(END, msg.get_payload())
if __name__ == "__main__":
root = Tk()
app = Application(master = root)
app.searchMail()
app.mainloop()
|
[
"kobaltkween@gmail.com"
] |
kobaltkween@gmail.com
|
f3ae9c1a7bf1d55613b290744e12c443dcac932d
|
693568f813603806fbde976a1c69a97b06195708
|
/mods/tests/test_install.py
|
05669735deeb25b553bc8a1df5f2d8a56faf3514
|
[
"MIT"
] |
permissive
|
danlkv/pywebviz
|
c664a584c5a16d66c49aa233b69ef3b29ccaa081
|
5892ef90f28dbd43c33fefbfa5a199d15322a120
|
refs/heads/master
| 2023-02-11T06:06:13.451408
| 2021-01-13T07:22:08
| 2021-01-13T07:23:17
| 172,800,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
from importlib import reload
import libvis.modules.installed as modules
import libvis_mods
from pathlib import Path
mocks = Path(__file__).parent / 'mocks'
def test_install_files():
global modules
pyfile, webfle = mocks/'module.py', mocks/'blah.coffee'
try:
libvis_mods.install('Test', pyfile, webfle)
modules = reload(modules)
_ = modules.Test()
finally:
libvis_mods.uninstall('Test')
def test_install_dirs():
global modules
try:
pyfile, webfle = mocks/'BirModule'/'back', mocks/'BirModule'/'front'
libvis_mods.install('BirModule', pyfile, webfle)
modules = reload(modules)
m = modules.BirModule(count=5)
finally:
libvis_mods.uninstall('BirModule')
if __name__ == '__main__':
test_install_dirs()
test_install_files()
|
[
"lkv97dn@gmail.com"
] |
lkv97dn@gmail.com
|
fb0b3cea6186400de9e2106c276c471deea1a9c1
|
e67fd8a02af7c913d5469b86b1fcc02a3497d863
|
/organizing_hub/migrations/0004_auto_20181003_2101.py
|
412f0ae3cf448c84b4866ed408a26659932c1147
|
[
"MIT"
] |
permissive
|
Our-Revolution/site
|
37268727ab4761ca5d3e222b9b11c809327e01c2
|
c8024b805ff5ff0e16f54dce7bf05097fd2f08e0
|
refs/heads/master
| 2023-01-20T18:10:57.479047
| 2019-08-02T17:26:52
| 2019-08-02T17:26:52
| 71,601,229
| 4
| 3
|
MIT
| 2023-01-12T08:22:58
| 2016-10-21T22:19:53
|
Python
|
UTF-8
|
Python
| false
| false
| 744
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-10-03 21:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizing_hub', '0003_organizinghubloginalert_alert_level'),
]
operations = [
migrations.AlterField(
model_name='organizinghubloginalert',
name='alert_level',
field=models.IntegerField(choices=[(1, 'Success'), (2, 'Info'), (3, 'Warning'), (4, 'Danger')], default=3, help_text='\n Set the alert style corresponding to Bootstrap 3 alert levels.\n\n See: https://getbootstrap.com/docs/3.3/components/#alerts-dismissible\n '),
),
]
|
[
"cjmab28@gmail.com"
] |
cjmab28@gmail.com
|
f283b2717969e97a9084442cb738ded2f130471c
|
5896669c7ccf3efe979a4780516fc810844bfbba
|
/conf.py
|
790504a29ba2e1d53b75d3f3ec6fffc60661f7ed
|
[
"MIT"
] |
permissive
|
Hiestaa/miniboard-factorio-manager
|
ea1ff7e6084ef88869db635cb866517601f5b055
|
9ff5f1f063f17c0eaa47f43ac05bce0e74d90d45
|
refs/heads/master
| 2021-01-01T03:47:25.674434
| 2016-04-30T14:45:03
| 2016-04-30T14:45:03
| 57,064,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,373
|
py
|
# -*- coding: utf8 -*-
from __future__ import unicode_literals
import logging
import netifaces
def getIpWindows(adapteridx):
try:
import wmi
except:
logging.error("You must need Win32com (win32 extensions for python)")
raise
adapters = wmi.WMI().Win32_NetworkAdapter()
wlan_int_id = adapters[adapteridx].Index
adaptername = adapters[adapteridx].NetConnectionID
ip = ''
for nic in wmi.WMI().Win32_NetworkAdapterConfiguration(IPEnabled=1):
if nic.Index == wlan_int_id:
ip = nic.IPAddress[0]
logging.info("[Windows] Showing IP for adapter %d (%s): %s",
adapteridx, adaptername, ip)
return ip
def filtre(addrInfo):
for typ, addrList in addrInfo.iteritems():
if len(addrList) == 0:
continue
for addrDetails in addrList:
if len(addrDetails.get('addr', '').split('.')) != 4:
continue
if not addrDetails.get('addr').startswith('192.168') and\
addrDetails.get('addr') != '127.0.0.1' and not \
addrDetails.get('addr').startswith('0'):
return addrDetails.get('addr')
def getIp(adapteridx):
adapters = netifaces.interfaces()
addrInfo = [netifaces.ifaddresses(a) for a in adapters]
addrInfo = [filtre(info) for info in addrInfo]
addrInfo = [info for info in addrInfo if info is not None]
return addrInfo[adapteridx % len(addrInfo)]
Conf = {
'state': 'DEBUG',
'log': {
'fileLevel': logging.WARNING
},
'database': {
'name': 'db/miniboard-factorio.db'
},
'server': {
'port': 15000,
'ip': '',
'assets': {
'minifiedCleanups': [
'http/assets/custom/css/',
'http/assets/custom/js/'
],
'minifyOnDebug': False
},
},
'factorio': {
'allowedPorts': sorted(
[34197, 34190, 34191, 34192, 34193]),
'savesFolder': (
'/Users/romain/Library/Application Support/factorio/saves'),
'binary': '/Applications/factorio.app',
'configFolder': (
'/Users/romain/Library/Application Support/factorio/config'),
'autosaveInterval': 15 # in minutes
}
}
|
[
"rom1guyot@gmail.com"
] |
rom1guyot@gmail.com
|
08ad9df9dd16c3d904a326e08dbe5b1848f362ff
|
c157097e9883757f588c6da74d419b964a1c75cc
|
/python_fundamentals/08-user-input/command-line-parameters-01.py
|
25984b506bf0dc30f4e2eb2bba2086dcb995dfb2
|
[] |
no_license
|
sudhansom/python_sda
|
8d888216740c559ab66b700d3bea54c05caa0333
|
25d563854ef9d31ab910f84c973e48e3259de585
|
refs/heads/master
| 2022-04-26T15:26:15.263236
| 2020-04-25T07:32:10
| 2020-04-25T07:32:10
| 257,564,556
| 0
| 0
| null | 2020-04-29T16:41:37
| 2020-04-21T10:49:59
|
Python
|
UTF-8
|
Python
| false
| false
| 191
|
py
|
import sys
my_dict = {}
country_list = sys.argv[1:]
for i in range(0, len(country_list), 2):
my_dict[country_list[i]] = country_list[i+1]
print(f"\nDictionary details : \n\n {my_dict}")
|
[
"bkspoudel@gmail.com"
] |
bkspoudel@gmail.com
|
79669a5b1eccf60216afd0fadf1e13d7389fd0d1
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_168/109.py
|
50abe5d4745d2991cd4d5af4fe4809c0886ebe1c
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,885
|
py
|
#!/usr/bin/python
import sys
import numpy as np # http://www.numpy.org/
import scipy # http://scipy.org/
import networkx as nx # https://networkx.github.io/
import sympy # http://www.sympy.org
import itertools
import operator
import string
import fractions
#import visual # vpython.org
#import Levenshtein # https://pypi.python.org/pypi/python-Levenshtein/0.12.0
import cmath
sys.setrecursionlimit(5000)
T = int(sys.stdin.readline())
charmap = {'.':0, '^': 1, 'v':-1,'>':2,'<':2}
dirR = {'.':0, '^': 1, 'v':-1,'>':0,'<':0}
dirC = {'.':0, '^': 0, 'v':0,'>':1,'<':-1}
def test(field):
bools = field!='.'
d1 = np.sum(bools,axis=1)
d2 = np.sum(bools,axis=0)
for i in range(R):
for j in range(C):
if field[i,j]=='.': continue
if d1[i]==1 and d2[j]==1: return "IMPOSSIBLE"
count = 0
for i in range(R):
for j in range(C):
if field[i,j]=='.': continue
if field[i,j]=='<':
count+=1
break
for j in range(C):
if field[i,C-j-1]=='.': continue
if field[i,C-j-1]=='>':
count+=1
break
for j in range(C):
for i in range(R):
if field[i,j]=='.': continue
if field[i,j]=='^':
count+=1
break
for i in range(R):
if field[R-i-1,j]=='.': continue
if field[R-i-1,j]=='v':
count+=1
break
return str(count)
for case in range(0, T):
R,C = map(int,sys.stdin.readline().strip().split())
field = np.chararray( (R,C))
for i in range(R):
line=sys.stdin.readline().strip()
for c in range(len(line)):
field[i,c] = line[c]
solution = test(field)
print "Case #%i: %s" % (case + 1, solution)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
7b993e2d2391a2c6f2fdf7c9b7dcc0ae0b47bb85
|
509823ea14f04d5791486b56a592d7e7499d7d51
|
/parte05/ex5.05_remover_duplicados_lista.py
|
e2c0ee58c5eaf7c39ca0c9d8479cad0f6d096521
|
[] |
no_license
|
Fhernd/Python-CursoV2
|
7613144cbed0410501b68bedd289a4d7fbefe291
|
1ce30162d4335945227f7cbb875f99bc5f682b98
|
refs/heads/master
| 2023-08-08T05:09:44.167755
| 2023-08-05T19:59:38
| 2023-08-05T19:59:38
| 239,033,656
| 64
| 38
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
# Ejercicio 5.5: Remover los valores duplicados en una lista.
numeros = [1, 2, 3, 1, 1, 1, 4, 5, 6, 3, 3, 2, 5]
print('Contenido actual de la lista `numeros`:', numeros)
print('Cantidad actual de la lista `numeros`:', len(numeros))
print()
# Solución #1:
print('Solución #1:')
numeros_sin_repetir = []
for n in numeros:
if n not in numeros_sin_repetir:
numeros_sin_repetir.append(n)
print('Contenido actual de la lista `numeros_sin_repetir`:', numeros_sin_repetir)
print('Cantidad actual de la lista `numeros_sin_repetir`:', len(numeros_sin_repetir))
print()
# Solución #2:
print('Solución #2')
conjunto_numeros = list(set(numeros))
print('Contenido actual de `conjunto_numeros`:', conjunto_numeros)
print('Cantidad actual de conjunto_numeros`:', len(conjunto_numeros))
|
[
"johnortizo@outlook.com"
] |
johnortizo@outlook.com
|
ff21461f29ea8d9161ba90e7c5ee44d3fba4e68d
|
f5ee595836adfb75047d2798928ca020533bd597
|
/nanobrok/ext/ssl.py
|
43a2b503b138db55be7ab8dc961127139043fb33
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
santaklouse/Nanobrok
|
efcc836484a799f614c21d50a75e0f5d1088f8bb
|
680b112f76e248f64c021337769bef163527bce0
|
refs/heads/master
| 2023-08-13T03:52:25.137896
| 2021-09-18T18:11:13
| 2021-09-18T18:11:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
# TODO disabled features
# from flask_talisman import Talisman
# def init_app(app):
# # need to run with disable debug
# talisman = Talisman(app)
|
[
"mh4root@gmail.com"
] |
mh4root@gmail.com
|
a5f3a1d7bc6d6ea8f54a13b11fa07fa758a81d39
|
00c6ded41b84008489a126a36657a8dc773626a5
|
/.history/Sizing_Method/ConstrainsAnalysis/DesignPointSelectStrategy_20210714191514.py
|
60ba3be53c8d65f3e3bf954ef6f39baf671d731c
|
[] |
no_license
|
12libao/DEA
|
85f5f4274edf72c7f030a356bae9c499e3afc2ed
|
1c6f8109bbc18c4451a50eacad9b4dedd29682bd
|
refs/heads/master
| 2023-06-17T02:10:40.184423
| 2021-07-16T19:05:18
| 2021-07-16T19:05:18
| 346,111,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,787
|
py
|
# author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPD as ca_pd
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPDP1P2 as ca_pd_12
from scipy.optimize import curve_fit
"""
The unit use is IS standard
"""
class Design_Point_Select_Strategy:
"""This is a design point select strategy from constrains analysis"""
def __init__(self, altitude, velocity, beta, method, p_turbofan_max, p_motorfun_max, n=12):
"""
:param altitude: m x 1 matrix
:param velocity: m x 1 matrix
:param beta: P_motor/P_total m x 1 matrix
:param p_turbofan_max: maximum propulsion power for turbofan (threshold value)
:param p_motorfun_max: maximum propulsion power for motorfun (threshold value)
:param n: number of motor
the first group of condition is for stall speed
the stall speed condition have to use motor, therefore with PD
:return:
power load: design point p/w and w/s
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.n_motor = n
self.p_turbofan_max = p_turbofan_max
self.p_motorfun_max = p_motorfun_max
# initialize the p_w, w_s, hp, n, m
self.n = 100
self.m = len(self.h)
self.hp = np.linspace(0, 1, self.n)
self.hp_threshold = self.p_motorfun_max / (self.p_motorfun_max + self.p_turbofan_max)
# method1 = Mattingly_Method, method2 = Gudmundsson_Method
if method == 1:
self.method1 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_electric
else:
self.method1 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric
problem = self.method(self.h[0], self.v[0], self.beta[0], 6000, self.hp_threshold)
self.w_s = problem.allFuncs[0](problem)
def p_w_compute(self):
p_w = np.zeros([self.m, self.n]) # m x n matrix
for i in range(1, 8):
for j in range(self.n):
problem1 = self.method1(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
problem2 = self.method2(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
if i >= 5:
p_w_1 = problem1.allFuncs[-1](problem1, roc=15 - 5 * (i - 5))
p_w_2 = problem2.allFuncs[-1](problem2, roc=15 - 5 * (i - 5))
else:
p_w_1 = problem1.allFuncs[i](problem1)
p_w_2 = problem2.allFuncs[i](problem2)
if p_w_1 > self.p_turbofan_max:
p_w_1 = 100000
elif p_w_2 > self.p_motorfun_max:
p_w_2 = 100000
self.p_w[i, j] = p_w_1 + p_w_2
return p_w
def strategy(self):
p_w = Design_Point_Select_Strategy.p_w_compute(self)
#find the min p_w for difference hp for each flight condition:
p_w_min = np.amin(p_w, axis=1)
hp_p_w_min = np.array(np.where(p_w == p_w_min))
design_point = np.amax(p_w_min)
return p_w_min, hp_p_w_min
|
[
"libao@gatech.edu"
] |
libao@gatech.edu
|
5cdb06fe2b728a7c56950f0ef7ab873a08acf5b7
|
df7b40e95718ac0f6071a0ba571b42efc81cf6de
|
/mmseg/models/backbones/fightingcv/conv/DepthwiseSeparableConvolution.py
|
8dde19054b1cbfee8452cf320b61bf165bbdeceb
|
[
"Apache-2.0"
] |
permissive
|
shinianzhihou/ChangeDetection
|
87fa2c498248e6124aeefb8f0ee8154bda36deee
|
354e71234bef38b6e142b6ba02f23db958582844
|
refs/heads/master
| 2023-01-23T20:42:31.017006
| 2023-01-09T11:37:24
| 2023-01-09T11:37:24
| 218,001,748
| 162
| 29
|
Apache-2.0
| 2022-11-03T04:11:00
| 2019-10-28T08:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 899
|
py
|
import torch
from torch import nn
class DepthwiseSeparableConvolution(nn.Module):
def __init__(self,in_ch,out_ch,kernel_size=3,stride=1,padding=1):
super().__init__()
self.depthwise_conv=nn.Conv2d(
in_channels=in_ch,
out_channels=in_ch,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=in_ch
)
self.pointwise_conv=nn.Conv2d(
in_channels=in_ch,
out_channels=out_ch,
kernel_size=1,
stride=1,
padding=0,
groups=1
)
def forward(self, x):
out=self.depthwise_conv(x)
out=self.pointwise_conv(out)
return out
if __name__ == '__main__':
input=torch.randn(1,3,224,224)
dsconv=DepthwiseSeparableConvolution(3,64)
out=dsconv(input)
print(out.shape)
|
[
"1178396201@qq.com"
] |
1178396201@qq.com
|
14e16f4f11ae53470f7f9898327d4ed7af13658a
|
ee6fc02e8392ff780a4f0d1a5789776e4d0b6a29
|
/code/practice/abc/abc017/b.py
|
00e2723566008b22396a8f37944c3663d2794fdc
|
[] |
no_license
|
mollinaca/ac
|
e99bb5d5c07159b3ef98cd7067424fa2751c0256
|
2f40dd4333c2b39573b75b45b06ad52cf36d75c3
|
refs/heads/master
| 2020-12-22T11:02:13.269855
| 2020-09-18T01:02:29
| 2020-09-18T01:02:29
| 236,757,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
s = input()
s = s.replace('ch','').replace('o','').replace('k','').replace('u','')
print ("YES") if len(s) == 0 else print ("NO")
|
[
"github@mail.watarinohibi.tokyo"
] |
github@mail.watarinohibi.tokyo
|
f78fe98818ec4e3c7f3f8938e6c2b1cc0aacfeb5
|
f030c1b724ad3a04dade2463374bd3c03e17b93c
|
/napari/layers/_tests/test_source.py
|
de9e0954979762eb743aedccddb1bc784505ff21
|
[
"BSD-3-Clause"
] |
permissive
|
sandutsar/napari
|
3c8568979c320d57cdb80e2ea2a5db7ea035413b
|
37d476bc0b00252177f17f25e7d1fd52ddc4bb69
|
refs/heads/master
| 2023-07-25T08:31:32.189843
| 2021-09-05T11:01:02
| 2021-09-05T11:01:02
| 390,003,115
| 0
| 0
|
BSD-3-Clause
| 2021-09-05T12:18:14
| 2021-07-27T13:56:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,457
|
py
|
from napari.layers import Points
from napari.layers._source import Source, current_source, layer_source
def test_layer_source():
"""Test basic layer source assignment mechanism"""
with layer_source(path='some_path', reader_plugin='builtins'):
points = Points()
assert points.source == Source(path='some_path', reader_plugin='builtins')
def test_source_context():
"""Test nested contexts, overrides, and resets."""
assert current_source() == Source()
# everything created within this context will have this sample source
with layer_source(sample=('samp', 'name')):
assert current_source() == Source(sample=('samp', 'name'))
# nested contexts override previous ones
with layer_source(path='a', reader_plugin='plug'):
assert current_source() == Source(
path='a', reader_plugin='plug', sample=('samp', 'name')
)
# note the new path now...
with layer_source(path='b'):
assert current_source() == Source(
path='b', reader_plugin='plug', sample=('samp', 'name')
)
# as we exit the contexts, they should undo their assignments
assert current_source() == Source(
path='a', reader_plugin='plug', sample=('samp', 'name')
)
assert current_source() == Source(sample=('samp', 'name'))
assert current_source() == Source()
|
[
"noreply@github.com"
] |
sandutsar.noreply@github.com
|
2c2b4b54559435087b2f62c0c283829e9b7231ac
|
f865fdd970f8e37ea2aa5157374af8c4d6ced987
|
/test/test_vehicle.py
|
bee0fd446e3e911c97a237c630c531bd3edceb95
|
[] |
no_license
|
gkeep-openapi/python-sdk
|
7e809448355bff535b3d64e013f001e9196c5e19
|
7c4f3785b47a110386ef10109619654522c95de5
|
refs/heads/master
| 2022-05-28T16:13:06.643958
| 2022-05-13T14:58:39
| 2022-05-13T14:58:39
| 235,536,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
# coding: utf-8
"""
Gkeep API
Gkeep API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from models.vehicle import Vehicle # noqa: E501
from swagger_client.rest import ApiException
class TestVehicle(unittest.TestCase):
"""Vehicle unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testVehicle(self):
"""Test Vehicle"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.vehicle.Vehicle() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"gkeep-ci-jenkins"
] |
gkeep-ci-jenkins
|
480ee4addf549a8560df46c79e497d97793f9f92
|
6dc9f1753f0e2ccaef6fb385324ba0602a04042a
|
/CUHK_CPM/GPS_Project/RR_Robot/build/pi_six_axis/pi_description/catkin_generated/pkg.develspace.context.pc.py
|
3d97e1ad3fa96f593b07648b7de288c2fbc559fa
|
[] |
no_license
|
SunnyLyz/Deep_Learning
|
c413abe3ef6510b3492f0a73c9a287b4bf56ec2c
|
9fa58688a7daffdded8037b9fa20c571a00f87e0
|
refs/heads/master
| 2021-06-21T12:12:39.450564
| 2017-07-18T12:20:45
| 2017-07-18T12:20:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "pi_description"
PROJECT_SPACE_DIR = "/home/turinglife/GPS_Project/RR_Robot/devel"
PROJECT_VERSION = "0.0.0"
|
[
"hswong1@uci.edu"
] |
hswong1@uci.edu
|
f617d8bc124c5f917b3c4b77f2bbec4e77496e8d
|
55ae369a3ef1593ff31a76847deb2a0d33898895
|
/mango/orderbookside.py
|
68ec8576483ae87a4daf73e9dff659332c7bc063
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Investin-pro/mango-explorer
|
63afb2ad4fb272f5640d18d3df367a6877b3a99a
|
4760bd5f9d7067e24c12941d3d7d113b1a7173ef
|
refs/heads/master
| 2023-07-31T23:23:00.590654
| 2021-10-01T17:13:18
| 2021-10-01T17:13:18
| 402,579,362
| 1
| 3
|
MIT
| 2021-10-02T16:31:43
| 2021-09-02T22:31:31
|
Python
|
UTF-8
|
Python
| false
| false
| 6,432
|
py
|
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:hello@blockworks.foundation)
import enum
import typing
from decimal import Decimal
from solana.publickey import PublicKey
from .accountinfo import AccountInfo
from .addressableaccount import AddressableAccount
from .context import Context
from .layouts import layouts
from .metadata import Metadata
from .orders import Order, OrderType, Side
from .perpmarketdetails import PerpMarketDetails
from .version import Version
# # 🥭 OrderBookSideType enum
#
# Does the orderbook side represent bids or asks?
#
class OrderBookSideType(enum.Enum):
# We use strings here so that argparse can work with these as parameters.
BIDS = "BIDS"
ASKS = "ASKS"
def __str__(self) -> str:
return self.value
def __repr__(self) -> str:
return f"{self}"
# # 🥭 PerpOrderBookSide class
#
# `PerpOrderBookSide` holds orders for one side of a market.
#
class PerpOrderBookSide(AddressableAccount):
def __init__(self, account_info: AccountInfo, version: Version,
meta_data: Metadata, perp_market_details: PerpMarketDetails, bump_index: Decimal,
free_list_len: Decimal, free_list_head: Decimal, root_node: Decimal,
leaf_count: Decimal, nodes: typing.Any):
super().__init__(account_info)
self.version: Version = version
self.meta_data: Metadata = meta_data
self.perp_market_details: PerpMarketDetails = perp_market_details
self.bump_index: Decimal = bump_index
self.free_list_len: Decimal = free_list_len
self.free_list_head: Decimal = free_list_head
self.root_node: Decimal = root_node
self.leaf_count: Decimal = leaf_count
self.nodes: typing.Any = nodes
@staticmethod
def from_layout(layout: typing.Any, account_info: AccountInfo, version: Version, perp_market_details: PerpMarketDetails) -> "PerpOrderBookSide":
meta_data = Metadata.from_layout(layout.meta_data)
bump_index: Decimal = layout.bump_index
free_list_len: Decimal = layout.free_list_len
free_list_head: Decimal = layout.free_list_head
root_node: Decimal = layout.root_node
leaf_count: Decimal = layout.leaf_count
nodes: typing.Any = layout.nodes
return PerpOrderBookSide(account_info, version, meta_data, perp_market_details, bump_index, free_list_len, free_list_head, root_node, leaf_count, nodes)
@staticmethod
def parse(context: Context, account_info: AccountInfo, perp_market_details: PerpMarketDetails) -> "PerpOrderBookSide":
data = account_info.data
if len(data) != layouts.ORDERBOOK_SIDE.sizeof():
raise Exception(
f"PerpOrderBookSide data length ({len(data)}) does not match expected size ({layouts.ORDERBOOK_SIDE.sizeof()})")
layout = layouts.ORDERBOOK_SIDE.parse(data)
return PerpOrderBookSide.from_layout(layout, account_info, Version.V1, perp_market_details)
@staticmethod
def load(context: Context, address: PublicKey, perp_market_details: PerpMarketDetails) -> "PerpOrderBookSide":
account_info = AccountInfo.load(context, address)
if account_info is None:
raise Exception(f"PerpOrderBookSide account not found at address '{address}'")
return PerpOrderBookSide.parse(context, account_info, perp_market_details)
def orders(self) -> typing.Sequence[Order]:
if self.leaf_count == 0:
return []
if self.meta_data.data_type == layouts.DATA_TYPE.Bids:
order_side = Side.BUY
else:
order_side = Side.SELL
stack = [self.root_node]
orders: typing.List[Order] = []
while len(stack) > 0:
index = int(stack.pop())
node = self.nodes[index]
if node.type_name == "leaf":
price = node.key["price"]
quantity = node.quantity
decimals_differential = self.perp_market_details.base_token.decimals - self.perp_market_details.quote_token.decimals
native_to_ui = Decimal(10) ** decimals_differential
quote_lot_size = self.perp_market_details.quote_lot_size
base_lot_size = self.perp_market_details.base_lot_size
actual_price = price * (quote_lot_size / base_lot_size) * native_to_ui
base_factor = Decimal(10) ** self.perp_market_details.base_token.decimals
actual_quantity = (quantity * self.perp_market_details.base_lot_size) / base_factor
orders += [Order(int(node.key["order_id"]),
node.client_order_id,
node.owner,
order_side,
actual_price,
actual_quantity,
OrderType.UNKNOWN)]
elif node.type_name == "inner":
if order_side == Side.BUY:
stack = [*stack, node.children[0], node.children[1]]
else:
stack = [*stack, node.children[1], node.children[0]]
return orders
def __str__(self) -> str:
nodes = "\n ".join([str(node).replace("\n", "\n ") for node in self.orders()])
return f"""« 𝙿𝚎𝚛𝚙𝙾𝚛𝚍𝚎𝚛𝙱𝚘𝚘𝚔𝚂𝚒𝚍𝚎 {self.version} [{self.address}]
{self.meta_data}
Perp Market: {self.perp_market_details}
Bump Index: {self.bump_index}
Free List: {self.free_list_head} (head) {self.free_list_len} (length)
Root Node: {self.root_node}
Leaf Count: {self.leaf_count}
{nodes}
»"""
|
[
"geoff@knife.opgeek.lan"
] |
geoff@knife.opgeek.lan
|
298d061ade3f8aae2939f4898a724e5ec2c4bd4d
|
432481b47d95ea2ce63f4e1ceb2e27e8a6f155a1
|
/Project/Portfolio_construction/data.py
|
4534e88433e71c81ef7b704e42acdf7eb5e05458
|
[] |
no_license
|
dxcv/Project-2
|
81fe3777fb7ee3db3df84d24b7321c8d40fcbb91
|
8105f996f97b657b5f1644a04f6f678005119b06
|
refs/heads/master
| 2020-09-08T09:58:56.829060
| 2019-11-08T17:51:36
| 2019-11-08T17:51:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,748
|
py
|
"""
Importation of the data.
"""
# Author: John Sibony <john.sibony@hotmail.fr>
from util import *
from password import *
import pandas as pd
from sqlalchemy import create_engine
def extraction_data(link_engine, query):
"""Extraction of the data using Vadim's database.
:param link_engine: Link to extact the data (see password.py file).
:param query: SQL query."""
engine = create_engine(link_engine)
data = pd.read_sql_query(query, engine)
return data
def import_data(index, contract, start_date='2006-01-01', freq=['EW1', 'EW2', 'EW3', 'EW4', 'EW']):
"""Extraction of specific data.
:param index: Name of the data index ('SP' or 'VIX'or 'VVIX' or '10Ybond' for respectively SP500 or Vix or Volatility of Vix or 10Year TBond index).
:param contract: Type of Contract ('call' or 'put' or 'future' or 'spot').
:param start_date: Begining date of the extracted data. String in the format %YYYY-%mm-%dd.
:param freq: Only valid for SPX index. List of the frequency of the option maturity.
(items should be 'EW1' or 'EW2' or 'EW3' or 'EW4' or 'EW' or 'ES' for respectively every 1st Friday or 2nd Friday or 3rd Friday or 4th Friday or end of the month)"""
link_engine = get_link_engine()
if(index=='SP'):
if(len(freq)>1):
freq = str(tuple(freq))
else:
freq = freq[0]
freq = """('"""+str(freq)+"""')"""
if(contract=='call'):
query = '''select option_expiration, date, underlying, strike, delta, value, std_skew, dte, iv from data_option.cme_es_ivol_rp where date >= '''+"""'"""+start_date+"""'"""+''' and "root.symbol" in '''+freq+''' and sense = 'c' '''
data = extraction_data(link_engine, query)
data.sort_values(['date', 'option_expiration'], inplace=True)
data = data.set_index("date")
elif(contract=='put'):
query = '''select option_expiration, date, underlying, strike, delta, value, std_skew, dte, iv from data_option.cme_es_ivol_rp where date >= '''+"""'"""+start_date+"""'"""+''' and "root.symbol" in '''+str(freq)+''' and sense = 'p' '''
data = extraction_data(link_engine, query)
data.sort_values(['date', 'option_expiration'], inplace=True)
data = data.set_index("date")
elif(contract=='future'):
query = '''select date,expiry_date,close from data_future.cme_es where date >= '''+"""'"""+start_date+"""'"""
data = extraction_data(link_engine, query)
data.sort_values(['date', 'expiry_date'], inplace=True)
data = data.set_index("date")
elif(contract=='spot'):
query = '''select date,close from data_ohlc.cboe_spx where date >= '''+"""'"""+start_date+"""'"""
data = extraction_data(link_engine, query)
data.sort_values(['date'], inplace=True)
data = data.set_index("date")
elif(index=='VIX'):
if(contract=='call'):
query = '''select date,option_expiration,strike,underlying,value,iv,delta,std_skew,dte from data_option.cbot_vx_ivol_rp where date >= '''+"""'"""+start_date+"""'"""+''' and "root.symbol" = 'VIX' and sense = 'c' '''
data = extraction_data(link_engine, query)
data.sort_values(['date', 'option_expiration'], inplace=True)
data = data.set_index("date")
elif(contract=='put'):
query = '''select date,option_expiration,strike,underlying,value,iv,delta,std_skew,dte from data_option.cbot_vx_ivol_rp where date >= '''+"""'"""+start_date+"""'"""+''' and "root.symbol" = 'VIX' and sense = 'p' '''
data = extraction_data(link_engine, query)
data.sort_values(['date', 'option_expiration'], inplace=True)
data = data.set_index("date")
elif(contract=='future'):
query = '''select date,expiry_date,close from data_future.cbot_vx where date >= '''+"""'"""+start_date+"""'"""
data = extraction_data(link_engine, query)
data.sort_values(['date', 'expiry_date'], inplace=True)
data = data.set_index("date")
elif(contract=='spot'):
query = '''select date,close from data_ohlc.cbot_vix where date >= '''+"""'"""+start_date+"""'"""
data = extraction_data(link_engine, query)
data.sort_values(['date'], inplace=True)
data = data.set_index("date")
elif(index=='VVIX'):
if(contract=='spot'):
query = '''select date,close from data_ohlc.cboe_vvix where date >= '''+"""'"""+start_date+"""'"""
data = extraction_data(link_engine, query)
data.sort_values(['date'], inplace=True)
data = data.set_index("date")
elif(index=='10Ybond'):
if(contract=='future'):
query = '''select date,expiry_date,close from data_future.cme_ty where date >= '''+"""'"""+start_date+"""'"""
data = extraction_data(link_engine, query)
data.sort_values(['date', 'expiry_date'], inplace=True)
data = data.set_index("date")
data['underlying'] = 0
elif(contract=='spot'):
query = '''select * from data_future_cont.ty1 where date >= '''+"""'"""+start_date+"""'"""
data = extraction_data(link_engine, query)
data.sort_values(['date'], inplace=True)
data = data.set_index("date")
try:
return data
except:
raise KeyError('Data not find. Look at the argument allowed in the function import_data in the file data.py')
if __name__ == '__main__':
import_data('SP', 'spot', '2006-01-01')
|
[
"noreply@github.com"
] |
dxcv.noreply@github.com
|
3f510935494dd7cead655b91bd5e53778d5689d1
|
2d9a706cb899dfc355fe49dc6a37a0dc257b22fd
|
/test/crab_HIMB2_pixel_eff_sysEta_v1.py
|
9d910a1c9daa66e6f3f82a2a1547f4b45f121d17
|
[] |
no_license
|
BetterWang/QWCumuGap
|
b1f4d3169d2019d3d465ea985fed2094279b62b6
|
61beb88799fd3c18398061b64b849ad5a849871d
|
refs/heads/master
| 2020-04-04T22:25:33.686266
| 2018-03-16T19:27:01
| 2018-03-16T19:27:01
| 82,000,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,737
|
py
|
from CRABAPI.RawCommand import crabCommand
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
from CRABClient.ClientExceptions import ClientException
from httplib import HTTPException
config = config()
config.General.requestName = 'HIMB2_CumuGap_Pixel_eff_cent_sysPos_v1'
config.General.workArea = 'CrabArea'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'qwcumu_PbPb15_Pix_eff_pos_v1.py'
config.JobType.inputFiles = ['EffCorrectionsPixel_TT_pt_0_10_v2.root']
config.JobType.maxJobRuntimeMin = 2500
config.Data.inputDataset = '/HIMinimumBias2/HIRun2015-25Aug2016-v1/AOD'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 20
config.Data.outLFNDirBase = '/store/group/phys_heavyions/qwang/PbPb2015_cumu/'
config.Data.lumiMask = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions15/HI/Cert_262548-263757_PromptReco_HICollisions15_JSON_v2.txt'
config.Data.publication = False
config.Data.useParent = False
config.Site.storageSite = 'T2_CH_CERN'
#config.Data.allowNonValidInputDataset = True
#try:
# crabCommand('submit', config = config)
#except HTTPException as hte:
# print "Failed submitting task: %s" % (hte.headers)
#except ClientException as cle:
# print "Failed submitting task: %s" % (cle)
config.General.requestName = 'HIMB2_CumuGap_Pixel_eff_cent_sysNeg_v2'
config.JobType.psetName = 'qwcumu_PbPb15_Pix_eff_neg_v1.py'
try:
crabCommand('submit', config = config)
except HTTPException as hte:
print "Failed submitting task: %s" % (hte.headers)
except ClientException as cle:
print "Failed submitting task: %s" % (cle)
|
[
"BetterWang@gmail.com"
] |
BetterWang@gmail.com
|
01720e33170d4697953e0ec099bcda60e4576d6c
|
923f707341f7e6a4c86673c52ca796f40638619c
|
/809. Expressive Words.py
|
3080ddd64ba075863b8c0ce379c733da3c6944d6
|
[] |
no_license
|
Huijuan2015/leetcode_Python_2019
|
bb1e54801faa15ee3ef2a7bd7628b6a16033f7c7
|
36c584e8f92a0725bab7a567dfd10b918408627b
|
refs/heads/master
| 2020-04-22T13:31:55.203162
| 2020-03-10T00:00:58
| 2020-03-10T00:00:58
| 170,412,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,205
|
py
|
class Solution(object):
def expressiveWords(self, S, words):
"""
:type S: str
:type words: List[str]
:rtype: int
"""
自己定义map,不能直接用map,还需要考虑key的顺序
def RLE(s): # return string, list
prev = -1
key = ""
cnts = []
for i in range(len(s)):
if i== len(s)-1 or s[i] != s[i+1]:
key += s[i]
cnts.append(i-prev)
prev = i
return (key,cnts)
def isExtended(skey, scnt, wkey, wcnt):
if skey != wkey or len(skey) != len(wkey):
return False
for i in range(len(scnt)):
c1, c2 = scnt[i], wcnt[i]
if c2 > c1:
return False
if c1 < 3 and c1 != c2:
return False
return True
skey, scnt = RLE(S)
cnt = 0
for word in words:
wkey, wcnt = RLE(word)
if isExtended(skey, scnt, wkey, wcnt):
cnt += 1
# print word
return cnt
|
[
"huijuan1991@hotmail.com"
] |
huijuan1991@hotmail.com
|
fa07ea6fbca874d31aa899db0aad1b1f300167e5
|
545f817485cbf75e5b791ef39c7ff25f66a8de29
|
/src/brasil/gov/portal/tests/test_externalcontent_content_type.py
|
2c1a81689ccd3e718b59d796319d9a806340079c
|
[] |
no_license
|
Assistevc/brasil.gov.portal
|
b5e85e749b19b3bc5080f1ed0b7ee727ad58bad0
|
54eb24e7e0ee81d74012a2af27bc8c9a8d56ef71
|
refs/heads/master
| 2021-01-15T19:05:01.335974
| 2014-12-17T13:46:55
| 2014-12-17T13:46:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,602
|
py
|
# -*- coding: utf-8 -*-
from brasil.gov.portal.browser.content.external import ExternalContentView
from brasil.gov.portal.content.external import IExternalContent
from brasil.gov.portal.testing import INTEGRATION_TESTING
from plone import api
from plone.dexterity.interfaces import IDexterityFTI
from plone.dexterity.schema import SCHEMA_CACHE
from plone.namedfile.file import NamedBlobImage
from zope.component import createObject
from zope.component import queryUtility
import os
import unittest2 as unittest
class ExternalContentTestCase(unittest.TestCase):
layer = INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
with api.env.adopt_roles(['Manager', ]):
self.folder = api.content.create(
type='Folder',
container=self.portal,
id='test-folder'
)
# Invalidate schema cache
SCHEMA_CACHE.invalidate('ExternalContent')
self.content = api.content.create(
type='ExternalContent',
container=self.folder,
id='external'
)
self.setup_content_data()
def setup_content_data(self):
path = os.path.dirname(__file__)
image = open(os.path.join(path, 'files', 'image.jpg')).read()
self.image = NamedBlobImage(image, 'image/jpeg', u'image.jpg')
def test_adding(self):
self.assertTrue(IExternalContent.providedBy(self.content))
def test_fti(self):
fti = queryUtility(IDexterityFTI, name='ExternalContent')
self.assertNotEqual(None, fti)
def test_factory(self):
fti = queryUtility(IDexterityFTI, name='ExternalContent')
factory = fti.factory
new_object = createObject(factory)
self.assertTrue(IExternalContent.providedBy(new_object))
def test_image_tag(self):
content = self.content
# Sem imagem, sem tag
self.assertEqual(content.tag(), '')
# Adicionamos a imagem
content.image = self.image
self.assertIn('tileImage', content.tag())
def test_image_thumb(self):
content = self.content
# Sem imagem, sem thumbnail
self.assertEqual(content.image_thumb(), None)
# Adicionamos a imagem
content.image = self.image
self.assertTrue(content.image_thumb())
class ExternalContentViewTestCase(unittest.TestCase):
layer = INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
# Invalidate schema cache
SCHEMA_CACHE.invalidate('ExternalContent')
with api.env.adopt_roles(['Manager', ]):
self.folder = api.content.create(
type='Folder',
container=self.portal,
id='test-folder'
)
self.content = api.content.create(
type='ExternalContent',
container=self.folder,
id='external'
)
def test_view(self):
view = self.content.restrictedTraverse('@@view')
self.assertTrue(isinstance(view, ExternalContentView))
def test_view_manager(self):
with api.env.adopt_roles(['Manager', ]):
view = self.content.restrictedTraverse('@@view')
self.assertIn('The link address is', view())
def test_view_anonymous(self):
with api.env.adopt_roles(['Anonymous', ]):
view = self.content.restrictedTraverse('@@view')
# Um redirecionamento ocorrera, que nao sera realizado neste teste
self.assertIsNone(view())
|
[
"erico@simplesconsultoria.com.br"
] |
erico@simplesconsultoria.com.br
|
3a60668b274b8710c9d34d5244a5c0d11c03ec42
|
22712d4a3633c93c6173b826882b01174a4c6928
|
/sign/migrations/0001_initial.py
|
04cd63636e29f7b459fdb68d99865fb8594ccfe3
|
[] |
no_license
|
New2object/guest2
|
e5dcbdcfb6fbbe386a5da51e7b7a18f97de8815d
|
30edbe54261a074fdea10150b52cb59e3bc6d781
|
refs/heads/master
| 2022-12-23T22:27:44.275577
| 2018-03-23T14:03:45
| 2018-03-23T14:03:45
| 124,031,317
| 1
| 1
| null | 2022-12-10T19:20:25
| 2018-03-06T06:24:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,662
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-30 09:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('limit', models.IntegerField()),
('status', models.BooleanField()),
('address', models.CharField(max_length=200)),
('start_time', models.DateTimeField(verbose_name='event_time')),
('create_time', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Guest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('realname', models.CharField(max_length=62)),
('phone', models.CharField(max_length=16)),
('email', models.EmailField(max_length=254)),
('sign', models.BooleanField()),
('create_time', models.DateTimeField(auto_now=True)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sign.Event')),
],
),
migrations.AlterUniqueTogether(
name='guest',
unique_together=set([('event', 'phone')]),
),
]
|
[
"a794281961@126.com"
] |
a794281961@126.com
|
a008914d98ae2a6baab427010b3bfc9a8e14ee65
|
1beac95667f9236084dfecdf2550fb6e8a28b0b8
|
/backend/api/decapod_api/exceptions.py
|
c172c2796f82a2f416855d9af5c3ba696ff06535
|
[
"Apache-2.0"
] |
permissive
|
lihaijing/ceph-lcm
|
52b9d2fae24ad8b54a386cda4c528d93288d603d
|
d7c07fbb87dc170d5b8a0a5c8a2cf857f71ae466
|
refs/heads/master
| 2021-01-12T08:17:03.919876
| 2016-12-12T07:58:58
| 2016-12-12T07:58:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,709
|
py
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains exceptions specific for API."""
import flask.json
from werkzeug import exceptions
from decapod_common import exceptions as app_exceptions
class DecapodJSONMixin(app_exceptions.DecapodError, exceptions.HTTPException):
"""Basic JSON mixin for the werkzeug exceptions.
Basic werkzeug exceptions return an HTML. This mixin
forces them to return correct JSON.
{
"code": <numberical HTTP status code>,
"error": <error ID>,
"message": <description suitable to show to humans>
}
"""
error_name = None
def get_description(self, environ=None):
return self.description
def get_body(self, environ=None):
error = self.error_name or self.__class__.__name__
error = str(error)
error_message = {
"code": self.code,
"error": error,
"message": self.get_description(environ)
}
json_error = flask.json.dumps(error_message)
return json_error
def get_headers(self, environ=None):
return [("Content-Type", "application/json")]
class BadRequest(DecapodJSONMixin, exceptions.BadRequest):
pass
class Unauthorized(DecapodJSONMixin, exceptions.Unauthorized):
def get_headers(self, environ=None):
headers = super().get_headers(environ=environ)
headers.append(("WWW-Authenticate", "Token realm=\"Application\""))
return headers
class Forbidden(DecapodJSONMixin, exceptions.Forbidden):
pass
class NotFound(DecapodJSONMixin, exceptions.NotFound):
pass
class MethodNotAllowed(DecapodJSONMixin, exceptions.MethodNotAllowed):
def get_headers(self, environ=None):
headers = DecapodJSONMixin.get_headers(self, environ)
headers.extend(exceptions.MethodNotAllowed.get_headers(self, environ))
return headers
class NotAcceptable(DecapodJSONMixin, exceptions.NotAcceptable):
pass
class InternalServerError(DecapodJSONMixin, exceptions.InternalServerError):
pass
class CannotConvertResultToJSONError(InternalServerError):
pass
class UnknownReturnValueError(InternalServerError):
pass
class InvalidJSONError(BadRequest):
def __init__(self, errors):
super().__init__("\n".join(errors))
class ImpossibleToCreateSuchModel(BadRequest):
description = (
"It is impossible to create such model because it violates "
"data model contracts."
)
class CannotUpdateManagedFieldsError(BadRequest):
description = "It is forbidden to update automanaged fields."
class UnknownUserError(BadRequest):
description = "Unknown user with ID {0}"
def __init__(self, user_id):
super().__init__(self.description.format(user_id))
class CannotUpdateDeletedModel(BadRequest):
"""Exception which is raised if you are trying to update deleted model."""
class CannotDeleteRoleWithActiveUsers(BadRequest):
"""Exception raised on attempt to delete role with active users."""
class CannotUpdateModelWithSuchParameters(ImpossibleToCreateSuchModel):
"""Exception raised on attempt to save data which violaties uniquiness."""
class CannotDeleteClusterWithServers(BadRequest):
description = "Cluster still has servers"
class UnknownPlaybookError(BadRequest):
description = "Unknown playbook {0}"
def __init__(self, playbook_name):
super().__init__(self.description.format(playbook_name))
class ServerListIsRequiredForPlaybookError(BadRequest):
description = "Explicit server list is required for playbook {0}"
def __init__(self, playbook_name):
super().__init__(self.description.format(playbook_name))
class UnknownClusterError(BadRequest):
description = "There is not cluster with ID {0}"
def __init__(self, cluster_id):
super().__init__(self.description.format(cluster_id))
class UnknownPlaybookConfiguration(BadRequest):
description = (
"There is no playbook configuration with ID {0} and "
"version {1}"
)
def __init__(self, item_id, version):
super().__init__(self.description.format(item_id, version))
|
[
"sarkhipov@mirantis.com"
] |
sarkhipov@mirantis.com
|
2c4b39edafd28a4c21b76214cd3c205f4ee1d683
|
d5c578256dc7d8f0bbd5c4b340e804c9d6676b90
|
/combine_A_and_B.py
|
e966b73df86d87a284b567626d54ac819d9b0d81
|
[] |
no_license
|
Bala93/Life_science
|
470728376a5ce37017bf9647d49b8fb2b93fcac6
|
fbd0f16ddde13e356269fe14c679af8e4005eb74
|
refs/heads/master
| 2021-09-17T16:15:21.356685
| 2018-07-03T19:14:49
| 2018-07-03T19:14:49
| 129,958,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,205
|
py
|
import os
import numpy as np
import cv2
import argparse
parser = argparse.ArgumentParser('create image pairs')
parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str, default='../dataset/50kshoes_edges')
parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str, default='../dataset/50kshoes_jpg')
parser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str, default='../dataset/test_AB')
parser.add_argument('--num_imgs', dest='num_imgs', help='number of images',type=int, default=1000000)
parser.add_argument('--use_AB', dest='use_AB', help='if true: (0001_A, 0001_B) to (0001_AB)',action='store_true')
args = parser.parse_args()
for arg in vars(args):
print('[%s] = ' % arg, getattr(args, arg))
splits = os.listdir(args.fold_A)
for sp in splits:
img_fold_A = os.path.join(args.fold_A, sp)
img_fold_B = os.path.join(args.fold_B, sp)
img_list = os.listdir(img_fold_A)
if args.use_AB:
img_list = [img_path for img_path in img_list if '_A.' in img_path]
num_imgs = min(args.num_imgs, len(img_list))
print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list)))
img_fold_AB = os.path.join(args.fold_AB, sp)
if not os.path.isdir(img_fold_AB):
os.makedirs(img_fold_AB)
print('split = %s, number of images = %d' % (sp, num_imgs))
for n in range(num_imgs):
name_A = img_list[n]
path_A = os.path.join(img_fold_A, name_A)
if args.use_AB:
name_B = name_A.replace('_A.', '_B.')
else:
name_B = name_A
path_B = os.path.join(img_fold_B, name_B)
if os.path.isfile(path_A) and os.path.isfile(path_B):
name_AB = name_A
if args.use_AB:
name_AB = name_AB.replace('_A.', '.') # remove _A
path_AB = os.path.join(img_fold_AB, name_AB)
im_A = cv2.imread(path_A)
im_B = cv2.imread(path_B)
# im_A = cv2.imread(path_A, cv2.CV_LOAD_IMAGE_COLOR)
# im_B = cv2.imread(path_B, cv2.CV_LOAD_IMAGE_COLOR)
im_AB = np.concatenate([im_A, im_B], 1)
cv2.imwrite(path_AB, im_AB)
|
[
"balamuralim.1993@gmail.com"
] |
balamuralim.1993@gmail.com
|
b73e66f56dd25716dad74184c383b3a7b077bf13
|
aa9fc66c8b94f05d4651f243f6f21799f4c1fd80
|
/jump-game-vi/jump-game-vi.py
|
4eadd56dcea8503cbfdedc446f3ea6d98917e497
|
[] |
no_license
|
baranee-18/Data-Structures-and-Algorithms
|
3cd739ba3c0710835d5995a6ccf2b44f612f8352
|
5074bac42b9323b8e7353d533355ece18dd5f5f1
|
refs/heads/main
| 2023-08-23T23:06:59.028649
| 2021-10-19T19:21:43
| 2021-10-19T19:21:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
class Solution:
def maxResult(self, nums: List[int], k: int) -> int:
n = len(nums)
queue = []
val = 0
for i in range(n):
maxV = 0
if queue:
maxV, indx = queue[0]
while indx+k < i:
maxV, indx = heapq.heappop(queue)
heapq.heappush(queue, [maxV,indx])
val = nums[i] + (-1) * maxV
heapq.heappush(queue, [-1 * val, i])
return val
|
[
"sivakumar.sk11865.sk@gmail.com"
] |
sivakumar.sk11865.sk@gmail.com
|
f7f9d2048aac2ff8422cdb78315139cfe63f6cc3
|
87a9706379670da62739b3c1fbbdd75edb5107b8
|
/alien_invasion/scoreboard.py
|
11f4eb35474043303f80b3f5ed7dcbf980ac7b77
|
[] |
no_license
|
zxbzxb180/python_work
|
ba21ab74f842e0d560a8bb192bb8a874d356b9e1
|
6406024e011aa06d1bda78d97cfecc47f7f2058c
|
refs/heads/master
| 2022-12-12T23:53:36.887963
| 2020-03-04T07:20:29
| 2020-03-04T07:20:29
| 194,494,744
| 0
| 0
| null | 2022-11-22T03:54:47
| 2019-06-30T08:48:44
|
Python
|
GB18030
|
Python
| false
| false
| 985
|
py
|
#coding=gbk
import pygame.font
class Scoreboard():
"""显示得分信息的类"""
def __init__(self,ai_settings,screen,stats):
"""初始化显示得分涉及的属性"""
self.screen = screen
self.screen_rect = screen.get_rect()
self.ai_settings = ai_settings
self.stats = stats
#显示得分信息时使用的字体设置
self.text_color = (30,30,30)
self.font = pygame.font.SysFont(None,48)
#准备初始得分图像
self.prep_score()
def prep_score(self):
"""将得分转换为一幅渲染的图像"""
score_str = str(self.stats.score)
self.score_image = self.font.render(score_str,True,self.text_color,self.ai_settings.bg_color)
#将得分放在屏幕右上角
self.score_rect = self.score_image.get_rect()
self.score_rect.right = self.screen_rect.right - 20
self.score_rect.top = 20
def show_score(self):
"""在屏幕上显示得分"""
self.screen.blit(self.score_image,self.score_rect)
|
[
"616529325@qq.com"
] |
616529325@qq.com
|
c518c6954e9b0640ead738942f5c31574b6e8035
|
3c2d4ed20da3aa3e045b617c787df68c7d0ddd1d
|
/src/drugex/__main__.py
|
50f052ac1a040383a0178b2488fc5a6b739f347c
|
[
"MIT"
] |
permissive
|
cthoyt/DrugEx
|
699ea37a86bfd0ed06e5c5112a68d5bd46ed05af
|
9e4d31adb2c65d0afc852948f502c79dcf8308a3
|
refs/heads/master
| 2020-06-07T22:08:26.799943
| 2019-06-21T16:38:20
| 2019-06-21T16:38:20
| 193,103,470
| 0
| 0
|
MIT
| 2019-06-21T13:34:04
| 2019-06-21T13:34:03
| null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
# -*- coding: utf-8 -*-
"""Entrypoint module, in case you use `python -m drugex`.
Why does this file exist, and why ``__main__``? For more info, read:
- https://www.python.org/dev/peps/pep-0338/
- https://docs.python.org/3/using/cmdline.html#cmdoption-m
"""
from drugex.cli import main
if __name__ == '__main__':
main()
|
[
"cthoyt@gmail.com"
] |
cthoyt@gmail.com
|
232c6641ae1d5833e25fbf1e833963f1e1d7e53d
|
956cc6ff2b58a69292f7d1223461bc9c2b9ea6f1
|
/monk/system_unit_tests/gluon/test_block_mobilenet_v2_inverted_linear_bottleneck.py
|
805df2c924d747b85329f2e810c0f1bdc52a05e7
|
[
"Apache-2.0"
] |
permissive
|
Aanisha/monk_v1
|
c24279b2b461df9b3de2984bae0e2583aba48143
|
c9e89b2bc0c1dbb320aa6da5cba0aa1c1526ad72
|
refs/heads/master
| 2022-12-29T00:37:15.320129
| 2020-10-18T09:12:13
| 2020-10-18T09:12:13
| 286,278,278
| 0
| 0
|
Apache-2.0
| 2020-08-09T16:51:02
| 2020-08-09T16:51:02
| null |
UTF-8
|
Python
| false
| false
| 1,512
|
py
|
import os
import sys
sys.path.append("../../../../monk_v1");
sys.path.append("../../../monk/");
import psutil
from gluon_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
import mxnet as mx
import numpy as np
from gluon.losses.return_loss import load_loss
def test_block_mobilenet_v2_inverted_linear_bottleneck(system_dict):
forward = True;
test = "test_block_mobilenet_v2_inverted_linear_bottleneck";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.mobilenet_v2_inverted_linear_bottleneck_block(output_channels=64, bottleneck_width=4, stride=1));
gtf.Compile_Network(network, use_gpu=False);
x = np.random.rand(1, 64, 64, 64);
x = mx.nd.array(x);
y = gtf.system_dict["local"]["model"].forward(x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
|
[
"abhishek4273@gmail.com"
] |
abhishek4273@gmail.com
|
004aa18f2e1b9effc7eca12b7058f92597767819
|
9acbf0279c38d11e89f16831e9c43b49badabb00
|
/IPTVPlayer/tsiplayer/addons/resources/hosters/uptostream.py
|
314392de9337b64f4093974ba2f8058e0b501c6c
|
[] |
no_license
|
dgbkn/e2iPlayer
|
4f101b87bc5f67bf14690d012a62cbe8755ab82c
|
e5f413ea032eb9012569d9d149a368a3e73d9579
|
refs/heads/master
| 2023-05-15T05:01:18.204256
| 2021-06-06T18:03:42
| 2021-06-06T18:03:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,329
|
py
|
# -*- coding: utf-8 -*-
# vStream https://github.com/Kodi-vStream/venom-xbmc-addons
#
import re
import json
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.config import GestionCookie
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.hosters.hoster import iHoster
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.comaddon import dialog, VSlog, isMatrix
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.premiumHandler import cPremiumHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.requestHandler import cRequestHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.parser import cParser
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.util import Unquote
class cHoster(iHoster):
def __init__(self):
self.__sDisplayName = 'UpToStream'
self.__sFileName = self.__sDisplayName
self.oPremiumHandler = None
def getDisplayName(self):
return self.__sDisplayName
def setDisplayName(self, sDisplayName):
self.__sDisplayName = sDisplayName + ' [COLOR skyblue]' + self.__sDisplayName + '[/COLOR]'
def setFileName(self, sFileName):
self.__sFileName = sFileName
def getFileName(self):
return self.__sFileName
def getPluginIdentifier(self):
return 'uptostream'
def isDownloadable(self):
return True
def isJDownloaderable(self):
return True
def getPattern(self):
return ''
def __getIdFromUrl(self):
if self.__sUrl[-4:] in '.mp4.avi.mkv':
return self.__sUrl.split('/')[3]
return self.__sUrl.split('/')[-1]
def setUrl(self, sUrl):
self.__sUrl = str(sUrl)
self.__sUrl = self.__sUrl.replace('iframe/', '')
self.__sUrl = self.__sUrl.replace('http:', 'https:')
def checkSubtitle(self, sHtmlContent):
if sHtmlContent:
Files = []
lab = []
for aEntry in sHtmlContent:
if aEntry["label"] == "French":
url = aEntry["src"]
if not url.startswith('http'):
url = 'http:' + url
Files.append(url)
else:
continue
return Files
return False
def checkUrl(self, sUrl):
return True
def getUrl(self):
return self.__sUrl
def getMediaLink(self):
self.oPremiumHandler = cPremiumHandler('uptobox')
premium = self.oPremiumHandler.isPremiumModeAvailable()
api_call = False
SubTitle = ""
if premium:
self.oPremiumHandler.Authentificate()
else:
dialog().VSok('Ce hoster demande un login, meme gratuit.')
return False, False
cookies = GestionCookie().Readcookie("uptobox")
import requests, re
s = requests.Session()
s.headers.update({"Cookie": cookies})
r = s.get('https://uptobox.com/api/streaming?file_code=' + self.__sUrl.split('/')[3]).json()
r1 = s.get(r["data"]["user_url"]).text
tok = re.search('token.+?;.+?;(.+?)&', r1).group(1)
r1 = s.post("https://uptobox.com/api/user/pin/validate?token=" + tok,json={"pin":r["data"]["pin"]}).json()
s.headers.update({"Referer": "https://uptobox.com/pin?pin=" + r["data"]["pin"]})
r = s.get(r["data"]["check_url"]).json()["data"]
sPattern = "'(.+?)': {(.+?)}"
oParser = cParser()
aResult = oParser.parse(r["streamLinks"], sPattern)
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.comaddon import dialog
url = []
qua = []
api_call = False
for aEntry in aResult[1]:
QUAL = aEntry[0]
d = re.findall("'u*(.+?)': u*'(.+?)'",aEntry[1])
for aEntry1 in d:
url.append(aEntry1[1])
qua.append(QUAL + ' (' + aEntry1[0] + ')')
# Affichage du tableau
api_call = dialog().VSselectqual(qua, url)
SubTitle = self.checkSubtitle(r["subs"])
if (api_call):
if SubTitle:
return True, api_call, SubTitle
else:
return True, api_call
return False, False
|
[
"echosmart76@gmail.com"
] |
echosmart76@gmail.com
|
b4b1cae9c7e54d74e89f8afd4bcbdbde27236d80
|
562d4bf000dbb66cd7109844c972bfc00ea7224c
|
/addons/advertising/controllers/controllers.py
|
1868e3ae1ee47ecfdd4a0d92df979f22c6b5bda9
|
[] |
no_license
|
Mohamed33/odoo-efact-11-pos
|
e9da1d17b38ddfe5b2d0901b3dbadf7a76bd2059
|
de38355aea74cdc643a347f7d52e1d287c208ff8
|
refs/heads/master
| 2023-03-10T15:24:44.052883
| 2021-03-06T13:25:58
| 2021-03-06T13:25:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
# -*- coding: utf-8 -*-
from odoo import http
# class Advertising(http.Controller):
# @http.route('/advertising/advertising/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/advertising/advertising/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('advertising.listing', {
# 'root': '/advertising/advertising',
# 'objects': http.request.env['advertising.advertising'].search([]),
# })
# @http.route('/advertising/advertising/objects/<model("advertising.advertising"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('advertising.object', {
# 'object': obj
# })
|
[
"root@vmi414107.contaboserver.net"
] |
root@vmi414107.contaboserver.net
|
ed83154aac965d7020394db30fc7d33772351c78
|
94f4bb0f6e43b2eb2f1bdb284a580b76121fa9af
|
/055.py
|
3426c8ddb88f7c434c1f5ca842561dae076bd58f
|
[] |
no_license
|
huosan0123/leetcode-py
|
f1ec8226bae732369d4e1989b99ab0ba4b4061c4
|
22794e5e80f534c41ff81eb40072acaa1346a75c
|
refs/heads/master
| 2021-01-25T11:48:17.365118
| 2019-09-12T15:45:34
| 2019-09-12T15:45:34
| 93,934,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
class Solution(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if not nums or len(nums)==1:
return True
pre = nums[0]
for i in range(1, len(nums)):
if pre == 0:
return False
else:
pre = max(pre-1, nums[i])
return True
|
[
"noreply@github.com"
] |
huosan0123.noreply@github.com
|
97e996c0fe83b4c2b8a1bafa79f0a29358a094de
|
904a87f73eb0e3902a738e823f959cbad2f68a82
|
/plotClass/plotting/plotGroups_mer.py
|
dc83b9a7dba4eeb9344b2b365d782974bbc46fe8
|
[] |
no_license
|
ashrafkasem/hepML1Lep
|
2ccf167432e7d2a1550991137b7a247e044af1b1
|
9ed3b73887b36f26b9d4ca0243eedd3cac0c420e
|
refs/heads/master
| 2021-07-11T22:31:18.498721
| 2020-08-31T21:30:10
| 2020-08-31T21:30:10
| 193,732,937
| 1
| 4
| null | 2020-05-18T06:55:53
| 2019-06-25T15:17:21
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,881
|
py
|
import ROOT
All_files = {
'DiLepTT' :
{
'files': ['TTJets_DiLepton','TTJets_LO_HT'] ,
'select' : '&& DiLep_Flag == 1',
'scale' : '1000.0/sumOfWeights*genWeight*Xsec*1*btagSF*puRatio*lepSF*nISRttweight',
"fill": ROOT.TAttFill(ROOT.kRed, 1001),
"line": ROOT.TAttLine(ROOT.kRed, ROOT.kSolid, 1),
"marker": None,
"Label" : "t#bar{t} ll + jets",
"Stackable" : True
},
'SemiLepTT' :
{
'files': ['TTJets_SingleLeptonFrom','TTJets_LO_HT'] ,
'select' : '&& semiLep_Flag == 1',
'scale' : '1000.0/sumOfWeights*genWeight*Xsec*1*btagSF*puRatio*lepSF*nISRttweight',
"fill": ROOT.TAttFill(ROOT.kBlue-7, 1001),
"line": ROOT.TAttLine(ROOT.kBlue-7, ROOT.kSolid, 1),
"marker": None,
"Label" : "t#bar{t} l + jets",
"Stackable" : True
},
'Others' :
{
'files': ["TBar_tWch","TBar_tch_powheg","T_tWch","T_tWch_ext","T_tch_powheg","VVTo","WWTo","WZTo","ZZTo",'TTW','TTZ',"QCD_","WJetsToLNu_HT","DYJetsToLL"],
'select' : '',
'scale' : '1000.0/sumOfWeights*genWeight*Xsec*1*btagSF*puRatio*lepSF',
"fill": ROOT.TAttFill(ROOT.kOrange-3, 1001),
"line": ROOT.TAttLine(ROOT.kOrange-3, ROOT.kSolid, 1),
"marker": None,
"Label" : "Others",
"Stackable" : True
},
'Data' :
{
'files': ['SingleElectron','SingleMuon','MET_Run'] ,
'select' : '',
'scale' : '1',
"fill": None,
"line": None,
"marker": ROOT.TAttMarker(ROOT.kBlack, ROOT.kFullCircle, 0.7),
"Label" : "Data",
"Stackable" : False
}
}
dPhiCut = '&& ((LT < 350 && fabs(dPhi) > 1.0) || (350 < LT && LT < 600 && fabs(dPhi) > 0.75) || (600 < LT && fabs(dPhi) > 0.5))'
AntidPhiCut = '&& ((LT < 350 && fabs(dPhi) < 1.0) || (350 < LT && LT < 600 && fabs(dPhi) < 0.75) || (600 < LT && fabs(dPhi) < 0.5))'
ntopCut = '&& nTop_Total_Combined >= 2 '
AntintopCut = '&& nTop_Total_Combined < 1'
oldbins = {"LT12HT01": "(LT < 450) && (HT < 1000) " ,
"LT12HT23": "(LT < 450) && (HT > 1000) && (HT < 1500)" ,
"LT12HT4i": "(LT < 450) && (HT > 1500) " ,
"LT3HT01" : "(LT > 450) && (LT < 600) && (HT < 1000)" ,
"LT3HT23" : "(LT > 450) && (LT < 600) && (HT > 1000) && (HT < 1500)" ,
"LT3HT4i" : "(LT > 450) && (LT < 600) && (HT > 1500)" ,
"LT4HT01" : "(LT > 600) && (LT < 750) && (HT < 1000)" ,
"LT4HT23" : "(LT > 600) && (LT < 750) && (HT > 1000) && (HT < 1500)" ,
"LT4HT4i" : "(LT > 600) && (LT < 750) && (HT > 1500)" ,
"LT5iHT0i": "(LT > 750)" }
|
[
"ashraf.mohamed@cern.ch"
] |
ashraf.mohamed@cern.ch
|
d51a4bd7dc7436067f703bec0084d907b03f9157
|
a5ba631dddaf2912c309601f8fbdd3c5b494fe20
|
/src/azure-cli-core/tests/test_logging.py
|
14cddef8a729a9767cf3f2dedc6a9e237df9fe80
|
[
"MIT"
] |
permissive
|
saurabsa/azure-cli-old
|
37471020cd2af9a53e949e739643299f71037565
|
f77477a98c9aa9cb55daf5b0d2f410d1455a9225
|
refs/heads/master
| 2023-01-09T04:00:15.642883
| 2018-04-23T21:40:04
| 2018-04-23T21:40:04
| 130,759,501
| 0
| 0
|
NOASSERTION
| 2022-12-27T14:59:06
| 2018-04-23T21:33:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,897
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
import azure.cli.core.azlogging as azlogging
class TestLogging(unittest.TestCase):
# When running verbose level tests, we check that argv is empty
# as we expect _determine_verbose_level to remove consumed arguments.
def test_determine_verbose_level_default(self):
argv = []
actual_level = azlogging._determine_verbose_level(argv) # pylint: disable=protected-access
expected_level = 0
self.assertEqual(actual_level, expected_level)
self.assertFalse(argv)
def test_determine_verbose_level_verbose(self):
argv = ['--verbose']
actual_level = azlogging._determine_verbose_level(argv) # pylint: disable=protected-access
expected_level = 1
self.assertEqual(actual_level, expected_level)
self.assertFalse(argv)
def test_determine_verbose_level_debug(self):
argv = ['--debug']
actual_level = azlogging._determine_verbose_level(argv) # pylint: disable=protected-access
expected_level = 2
self.assertEqual(actual_level, expected_level)
self.assertFalse(argv)
def test_determine_verbose_level_v_v_v_default(self):
argv = ['--verbose', '--debug']
actual_level = azlogging._determine_verbose_level(argv) # pylint: disable=protected-access
expected_level = 2
self.assertEqual(actual_level, expected_level)
# We still consumed the arguments
self.assertFalse(argv)
def test_determine_verbose_level_other_args_verbose(self):
argv = ['account', '--verbose']
actual_level = azlogging._determine_verbose_level(argv) # pylint: disable=protected-access
expected_level = 1
self.assertEqual(actual_level, expected_level)
# We consumed 1 argument
self.assertEqual(argv, ['account'])
def test_determine_verbose_level_other_args_debug(self):
argv = ['account', '--debug']
actual_level = azlogging._determine_verbose_level(argv) # pylint: disable=protected-access
expected_level = 2
self.assertEqual(actual_level, expected_level)
# We consumed 1 argument
self.assertEqual(argv, ['account'])
def test_get_az_logger(self):
az_logger = azlogging.get_az_logger()
self.assertEqual(az_logger.name, 'az')
def test_get_az_logger_module(self):
az_module_logger = azlogging.get_az_logger('azure.cli.module')
self.assertEqual(az_module_logger.name, 'az.azure.cli.module')
if __name__ == '__main__':
unittest.main()
|
[
"saurabsa@microsoft.com"
] |
saurabsa@microsoft.com
|
a2b971c67d0456fbbf6fd22640af49583f80fce2
|
e62d13d578ebbe3acc3713e3eb783c81c785f2a8
|
/myems-api/core/version.py
|
f1ef5dfaf78eabc8ce8847750632ade6fd4f7c29
|
[
"MIT"
] |
permissive
|
tianlinzhong/myems
|
c25d7ece4f1853bb4415e2cedfdc8cb9cf8ff991
|
07dd1eb8060f4145be66c8d1a20b5e064a68281b
|
refs/heads/master
| 2023-03-25T05:24:05.057248
| 2021-03-28T09:06:45
| 2021-03-28T09:06:45
| 340,333,276
| 2
| 0
|
MIT
| 2021-02-28T14:00:06
| 2021-02-19T10:22:32
|
Python
|
UTF-8
|
Python
| false
| false
| 452
|
py
|
import falcon
import simplejson as json
class VersionItem:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp):
result = {"version": 'MyEMS 1.1.0 (Community Edition)',
"release-date": '202103018',
"website": "https://myems.io"}
resp.body = json.dumps(result)
|
[
"13621160019@163.com"
] |
13621160019@163.com
|
67a45c24fd1b92104a81304a32b145bd2a77baa6
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_118/2715.py
|
53c87fa3b23e56f4d4d2ff037c02e48d28108491
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
import math
f = open('/home/dexter/input1.in', 'r')
cases = int(f.readline())
for k in range (0, cases):
a = f.readline()
itms=a.split()
out=0
for i in range(int(itms[0]),int(itms[1])+1):
x=str(i)
y =x[::-1]
if(y==x):
x=int(x)
if((math.sqrt(x)-int(math.sqrt(x))) == 0):
x=str(int(math.sqrt(x)))
y =x[::-1]
if(y==x):
out+=1
print "Case #"+str(k+1)+": "+str(out)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
80c70e681d1be2636cc0167b75f54d09254d1b14
|
301c85e8f2391896b11c9f4cf9f440283865593e
|
/armstrong/spiders/spider.py
|
1e2f822c32a2f498a48612c4df8490fd7bf8d844
|
[] |
no_license
|
hristo-grudev/armstrong
|
513e5639c347c8a3ffc8df3cafd5860d2ab3fb81
|
8bbcad8a72f58456638c84369f72c985c93e3cc9
|
refs/heads/main
| 2023-03-29T11:33:06.484366
| 2021-04-07T07:51:26
| 2021-04-07T07:51:26
| 355,458,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,208
|
py
|
import scrapy
from scrapy.loader import ItemLoader
from ..items import ArmstrongItem
from itemloaders.processors import TakeFirst
class ArmstrongSpider(scrapy.Spider):
name = 'armstrong'
start_urls = ['https://www.armstrong.bank/connect/news-and-updates']
def parse(self, response):
post_links = response.xpath('//div[@class="news-item-text"]')
for post in post_links:
url = post.xpath('.//a[@data-link-type-id="page"]/@href').get()
date = post.xpath('.//div[@class="news-item-text-date"]//text()[normalize-space()]').get()
if url:
yield response.follow(url, self.parse_post, cb_kwargs={'date': date})
def parse_post(self, response, date):
title = response.xpath('//h1/text()').get()
description = response.xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "mb-6", " " ))]//text()[normalize-space()]').getall()
description = [p.strip() for p in description if '{' not in p]
description = ' '.join(description).strip()
item = ItemLoader(item=ArmstrongItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
|
[
"hr.grudev@gmail.com"
] |
hr.grudev@gmail.com
|
3d7b94751b9c6a8ebf732eec60f889bc243c3977
|
08ddce92744c78432b69409d197ad1393ca685aa
|
/weixin/Bot/test2.py
|
3537329a673c998099f7e0f15bc0b992e3d3d01a
|
[] |
no_license
|
baliguan163/PythonDemo
|
71255eb21850134b4b6afb2eeed948cc34326e7a
|
c4fe1b6ea36bec2c531244ef95c809e17b64b727
|
refs/heads/master
| 2021-01-02T08:13:18.809740
| 2019-05-19T16:28:16
| 2019-05-19T16:28:16
| 98,963,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
#-*-coding:utf-8-*-
__author__ = 'Administrator'
from wxpy import *
# wxpy的好友统计功能非常好用,可以很方便地统计好友的地理位置分布和性别分布。
# 下面的代码中,强哥统计了下自己的好友的分布情况,并打印出人数最多的10个地
bot = Bot(cache_path=True)
friends_stat = bot.friends().stats()
# print(friends_stat)
friend_loc = [] # 每一个元素是一个二元列表,分别存储地区和人数信息
for province, count in friends_stat["province"].items():
if province != "":
friend_loc.append([province, count])
# 对人数倒序排序
friend_loc.sort(key=lambda x: x[1], reverse=True)
print('--------------统计人数最多的10个地区-------------')
# 打印人数最多的10个地区
for item in friend_loc[:10]:
print(item[0], item[1])
print('------------------统计性别分布-------------------')
# 统计性别分布的代码如下
for sex, count in friends_stat["sex"].items():
# 1代表MALE, 2代表FEMALE
if sex == 1:
print(" MALE %d" % count)
elif sex == 2:
print("FEMALE %d" % count)
# 定位群
company_group = bot.groups().search('优惠券')[0]
print(company_group)
boss = company_group.search('阿杜')[0] #定位老板
print(boss)
# 将老板的消息转发到文件传输助手
@bot.register(company_group)
def forward_boss_message(msg):
print(msg.member)
print(msg)
if msg.member == boss:
print('消息转发:' + msg['Text'])
# 堵塞线程
embed()
|
[
"baliguan163@163.com"
] |
baliguan163@163.com
|
386a8bf05a7ce8388ed78b86e6713dc8bb4e3535
|
aba442afba026d2130c4aeca863308ca26e7e472
|
/tabular/src/autogluon/tabular/__init__.py
|
132b85f2fd121822af13adfd0428350ff276ff5c
|
[
"Apache-2.0"
] |
permissive
|
stjordanis/autogluon
|
c8fd03a9bf7624911b13e90239e9260dd8885ddf
|
6af92e149491f6e5062495d87306b3625d12d992
|
refs/heads/master
| 2023-08-21T15:16:53.202431
| 2023-08-11T20:15:31
| 2023-08-11T20:15:31
| 228,360,888
| 0
| 0
|
Apache-2.0
| 2019-12-16T10:25:32
| 2019-12-16T10:25:30
| null |
UTF-8
|
Python
| false
| false
| 322
|
py
|
from autogluon.common.features.feature_metadata import FeatureMetadata
from autogluon.common.utils.log_utils import _add_stream_handler
from autogluon.core.dataset import TabularDataset
try:
from .version import __version__
except ImportError:
pass
from .predictor import TabularPredictor
_add_stream_handler()
|
[
"noreply@github.com"
] |
stjordanis.noreply@github.com
|
c68cdb6fed4d1c16def268726b0762202f070da8
|
f44b4e41d3b64fc64dc8f28cce1a42aac5715530
|
/metrics/plastic_analysis.py
|
66002d70c98efae0fbcf9a1785ee1d8a245228ca
|
[
"Apache-2.0"
] |
permissive
|
jmribeiro/PLASTIC-Algorithms
|
d4ba4dbae9fea15a446e6557b9fe58f06b687464
|
c59ad567a906f320220a09caff64c4a6273151f8
|
refs/heads/main
| 2022-12-31T00:16:10.108189
| 2020-10-20T22:06:11
| 2020-10-20T22:06:11
| 305,774,055
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,308
|
py
|
from agents.plastic.PLASTICAgent import PLASTICAgent
from agents.plastic.model.LearningPLASTICModel import LearningPLASTICModel
from agents.plastic.model.LearntPLASTICModel import LearntPLASTICModel
from agents.teammates.GreedyAgent import GreedyAgent
from agents.teammates.TeammateAwareAgent import TeammateAwareAgent
import numpy as np
from yaaf.evaluation import Metric
import scipy.stats
class PLASTICTeammate(PLASTICAgent):
def __init__(self, type, num_teammates, world_size):
super(PLASTICTeammate, self).__init__("Plastic teammate", num_teammates, world_size)
if type == "greedy":
self._underlying_agent = GreedyAgent(0, world_size)
elif type == "teammate aware" or type == "mixed":
self._underlying_agent = TeammateAwareAgent(0, world_size)
else:
raise ValueError()
def select_action_according_to_model(self, pursuit_state, most_likely_model):
return self._underlying_agent.action(pursuit_state.features())
def setup_learning_prior(self):
return LearningPLASTICModel(self.num_teammates)
def _load_prior_team(self, directory, name):
return LearntPLASTICModel(directory, name, self.num_teammates)
class PLASTICAnalyzer(Metric):
def __init__(self):
super(PLASTICAnalyzer, self).__init__("PLASTIC Analyzer")
self._entropy = []
self._beliefs = []
self._team_names = None
def reset(self):
self._entropy = []
def __call__(self, timestep):
info = timestep.info
for key in info:
if "Plastic" in key or key == "Adhoc":
agent_info = info[key]
belief_distribution = agent_info["belief distribution"]
if self._team_names is None:
self._team_names = list(belief_distribution.keys())
beliefs = np.array([belief_distribution[team] for team in self._team_names])
entropy = scipy.stats.entropy(beliefs)
self._beliefs.append(beliefs)
self._entropy.append(entropy)
return self._entropy[-1]
def result(self):
return np.array(self._entropy)
def team_names(self):
return self._team_names
def beliefs(self):
return np.array(self._beliefs)
|
[
"jmribeiro77209@gmail.com"
] |
jmribeiro77209@gmail.com
|
0ae73847354ad0243e92bc20077f9c2eef00d8b6
|
6227637b2b3e13e2d17d7dd2c954e879bc6947a8
|
/configs/bash/keyring.py
|
5509f45c1543c39f7696c29c13788dfe38180959
|
[] |
no_license
|
Owensa/confs
|
28c01e57984a9f8187740a19d95d9c51844c7a1d
|
f247448fbba3d873460a4f99228f372230f1b1bc
|
refs/heads/master
| 2021-04-15T04:36:03.755459
| 2018-03-31T22:31:50
| 2018-03-31T22:31:50
| 126,733,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
#!/usr/bin/env/ python3
import os
#Get archive key and get rid of strange apt behavior
def fetch():
os.system("wget -q -O - https://archive.kali.org/archive-key.asc | apt-key add && apt-get update >> bootstrap_log.md")
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
392c843a677b5ebc71e265798518ab247c504ee7
|
164b499e14412e7e5d0b1f917922873a7b5d072c
|
/studyNote/python-2/cmd_serve.py
|
87aace6bc1f982e3ab23341bf7b8f5656e6c00df
|
[] |
no_license
|
liangliang115715/pythonStudyNote
|
f55293b0ad2ded21dbb6938ac82f7bee77e724ef
|
c36ef8c032ee8d85570d0f2234a26370a3709402
|
refs/heads/master
| 2023-01-09T19:14:16.076798
| 2019-10-16T10:59:48
| 2019-10-16T10:59:48
| 215,523,473
| 0
| 0
| null | 2023-01-04T12:31:28
| 2019-10-16T10:42:51
|
Python
|
UTF-8
|
Python
| false
| false
| 778
|
py
|
#_author:
#date:
import socket
import subprocess
# 创建socket对象
sk=socket.socket()
# 为socket对象提供ip地址和端口,然后绑定
adress=("127.0.0.1",8000)
sk.bind(adress)
# 监听设置端口 等待客户端的请求
sk.listen(2)
while True:
print("waiting.....")
conn, addr = sk.accept()
print(addr)
while True:
try:
data=conn.recv(1024)
except Exception:
break
if not data:
break
# 将子进程转到主进程,并将执行结果存入obj对象内
obj=subprocess.Popen(str(data,"utf8"),shell=True,stdout=subprocess.PIPE)
# obj对象内存储的执行结果读出
cmd_result=obj.stdout.read()
result_len=bytes(str(len(cmd_result)),"utf8")
conn.sendall(result_len)
conn.sendall(cmd_result)
|
[
"ll@123.com"
] |
ll@123.com
|
5f570981e26c2bac4086a1ffe4a40b861456aeb1
|
687928e5bc8d5cf68d543005bb24c862460edcfc
|
/nssrc/com/citrix/netscaler/nitro/resource/stat/network/rnatip_stats.py
|
ff47d65c3c79e5bbfd9e2bf5f9e1a3baa6292bb4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] |
permissive
|
mbs91/nitro
|
c6c81665d6abd04de8b9f09554e5e8e541f4a2b8
|
be74e1e177f5c205c16126bc9b023f2348788409
|
refs/heads/master
| 2021-05-29T19:24:04.520762
| 2015-06-26T02:03:09
| 2015-06-26T02:03:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,255
|
py
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class rnatip_stats(base_resource) :
""" Statistics for RNAT ipaddress resource.
"""
def __init__(self) :
self._Rnatip = ""
self._clearstats = ""
self._iptd = 0
self._iprnattotrxbytes = 0
self._iprnatrxbytesrate = 0
self._iprnattottxbytes = 0
self._iprnattxbytesrate = 0
self._iprnattotrxpkts = 0
self._iprnatrxpktsrate = 0
self._iprnattottxpkts = 0
self._iprnattxpktsrate = 0
self._iprnattottxsyn = 0
self._iprnattxsynrate = 0
self._iprnatcursessions = 0
@property
def Rnatip(self) :
"""Specifies the NAT IP address of the configured RNAT entry for which you want to see the statistics. If you do not specify an IP address, this displays the statistics for all the configured RNAT entries.<br/>Minimum length = 1.
"""
try :
return self._Rnatip
except Exception as e:
raise e
@Rnatip.setter
def Rnatip(self, Rnatip) :
"""Specifies the NAT IP address of the configured RNAT entry for which you want to see the statistics. If you do not specify an IP address, this displays the statistics for all the configured RNAT entries.
"""
try :
self._Rnatip = Rnatip
except Exception as e:
raise e
@property
def clearstats(self) :
"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def iprnatrxpktsrate(self) :
"""Rate (/s) counter for iprnattotrxpkts.
"""
try :
return self._iprnatrxpktsrate
except Exception as e:
raise e
@property
def iprnattxpktsrate(self) :
"""Rate (/s) counter for iprnattottxpkts.
"""
try :
return self._iprnattxpktsrate
except Exception as e:
raise e
@property
def iprnattottxpkts(self) :
"""Packets sent from this IP address during RNAT sessions.
"""
try :
return self._iprnattottxpkts
except Exception as e:
raise e
@property
def iptd(self) :
"""Traffic domain for ipaddr.
"""
try :
return self._iptd
except Exception as e:
raise e
@property
def iprnattottxbytes(self) :
"""Bytes sent from this IP address during RNAT sessions.
"""
try :
return self._iprnattottxbytes
except Exception as e:
raise e
@property
def iprnatcursessions(self) :
"""Currently active RNAT sessions started from this IP address.
"""
try :
return self._iprnatcursessions
except Exception as e:
raise e
@property
def iprnatrxbytesrate(self) :
"""Rate (/s) counter for iprnattotrxbytes.
"""
try :
return self._iprnatrxbytesrate
except Exception as e:
raise e
@property
def iprnattotrxbytes(self) :
"""Bytes received on this IP address during RNAT sessions.
"""
try :
return self._iprnattotrxbytes
except Exception as e:
raise e
@property
def iprnattxsynrate(self) :
"""Rate (/s) counter for iprnattottxsyn.
"""
try :
return self._iprnattxsynrate
except Exception as e:
raise e
@property
def iprnattxbytesrate(self) :
"""Rate (/s) counter for iprnattottxbytes.
"""
try :
return self._iprnattxbytesrate
except Exception as e:
raise e
@property
def iprnattotrxpkts(self) :
"""Packets received on this IP address during RNAT sessions.
"""
try :
return self._iprnattotrxpkts
except Exception as e:
raise e
@property
def iprnattottxsyn(self) :
"""Requests for connections sent from this IP address during RNAT sessions.
"""
try :
return self._iprnattottxsyn
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(rnatip_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.rnatip
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.Rnatip) :
return str(self.Rnatip)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
""" Use this API to fetch the statistics of all rnatip_stats resources that are configured on netscaler.
"""
try :
obj = rnatip_stats()
if not name :
response = obj.stat_resources(service, option_)
else :
obj.Rnatip = name
response = obj.stat_resource(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class rnatip_response(base_response) :
def __init__(self, length=1) :
self.rnatip = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.rnatip = [rnatip_stats() for _ in range(length)]
|
[
"bensassimaha@gmail.com"
] |
bensassimaha@gmail.com
|
783de3fff23bcde48f9048bc5d4df16607e2da6c
|
39157aabbbab0f7824138a31ee26fbf88853e601
|
/users/migrations/0001_initial.py
|
2aeb55036fc284fbf105e0ea7fffddaaa53843ef
|
[
"MIT"
] |
permissive
|
CecileSerene/uptv
|
825d9bb0dc7e44cc8e7224632403b82f30443b07
|
47bd79b34d409405396e9640c18578837d45e91b
|
refs/heads/master
| 2020-04-10T22:59:43.281114
| 2018-11-06T12:22:32
| 2018-11-06T12:22:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,952
|
py
|
# Generated by Django 2.1.2 on 2018-10-12 08:34
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('avatar', models.ImageField(blank=True, null=True, upload_to='')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
[
"florimond.manca@gmail.com"
] |
florimond.manca@gmail.com
|
bc0bcf60649a00c4d9f7212b42af2077cb311863
|
d954e2f74d1186c8e35be8ea579656513d8d3b98
|
/rllib/connectors/agent/obs_preproc.py
|
93f016e1e22a3f7275f5ed6e1bc017d7b3ecd56e
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
vakker/ray
|
a865de214e60f9e62d61c03ae7ce55ad6030f84c
|
de238dd626a48a16c8b3cd006f3482db75f63a83
|
refs/heads/master
| 2023-01-23T22:30:44.839942
| 2022-10-23T01:05:48
| 2022-10-23T01:05:48
| 171,845,804
| 0
| 1
|
Apache-2.0
| 2023-01-14T08:01:04
| 2019-02-21T09:54:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,447
|
py
|
from typing import Any
from ray.rllib.connectors.connector import (
AgentConnector,
ConnectorContext,
register_connector,
)
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.typing import AgentConnectorDataType
from ray.util.annotations import PublicAPI
# Bridging between current obs preprocessors and connector.
# We should not introduce any new preprocessors.
# TODO(jungong) : migrate and implement preprocessor library in Connector framework.
@PublicAPI(stability="alpha")
class ObsPreprocessorConnector(AgentConnector):
"""A connector that wraps around existing RLlib observation preprocessors.
This includes:
- OneHotPreprocessor for Discrete and Multi-Discrete spaces.
- GenericPixelPreprocessor and AtariRamPreprocessor for Atari spaces.
- TupleFlatteningPreprocessor and DictFlatteningPreprocessor for flattening
arbitrary nested input observations.
- RepeatedValuesPreprocessor for padding observations from RLlib Repeated
observation space.
"""
def __init__(self, ctx: ConnectorContext):
super().__init__(ctx)
if hasattr(ctx.observation_space, "original_space"):
# ctx.observation_space is the space this Policy deals with.
# We need to preprocess data from the original observation space here.
obs_space = ctx.observation_space.original_space
else:
obs_space = ctx.observation_space
self._preprocessor = get_preprocessor(obs_space)(
obs_space, ctx.config.get("model", {})
)
def transform(self, ac_data: AgentConnectorDataType) -> AgentConnectorDataType:
d = ac_data.data
assert (
type(d) == dict
), "Single agent data must be of type Dict[str, TensorStructType]"
if SampleBatch.OBS in d:
d[SampleBatch.OBS] = self._preprocessor.transform(d[SampleBatch.OBS])
if SampleBatch.NEXT_OBS in d:
d[SampleBatch.NEXT_OBS] = self._preprocessor.transform(
d[SampleBatch.NEXT_OBS]
)
return ac_data
def to_state(self):
return ObsPreprocessorConnector.__name__, None
@staticmethod
def from_state(ctx: ConnectorContext, params: Any):
return ObsPreprocessorConnector(ctx)
register_connector(ObsPreprocessorConnector.__name__, ObsPreprocessorConnector)
|
[
"noreply@github.com"
] |
vakker.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.