blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
15ed13c2acac4ae704432839fec63024737531b7 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_7/bhrrae003/util.py | e84a57a6a42bd391b9d939d114e902f3ee049313 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,041 | py | """Raeesa Behardien
BHRRAE003
Assignment 7
Question 2
02 May 2014"""
def create_grid(grid):
"""create a 4x4 grid"""
for a in range(4):
grid.append([0,0,0,0])
return grid
def print_grid (grid):
"""print out a 4x4 grid in 5-width columns within a box"""
print("+--------------------+")
for a in range(4):
symbol="|"
for b in range(4):
val=str(grid[a][b])
if val=='0':
val=' '
symbol+=val+' '*(5-(len(val)))
symbol+='|'
print(symbol)
print("+--------------------+")
def check_lost (grid):
"""return True if there are no 0 values and no adjacent values that are equal; otherwise False"""
for a in range(4):
for b in range(4):
if grid[a][b]==0:
return False
for c in range(3):
if grid[a][c]==grid[a][c+1] or grid[c][a]==grid[c+1][a]:
return False
else: return True
def check_won (grid):
"""return True if a value>=32 is found in the grid; otherwise False"""
for a in range(4):
for b in range(4):
if grid[a][b]>=32:
return True
else: return False
def copy_grid (grid):
"""return a copy of the grid"""
for a in range(4):
for b in range(4):
grid[a][b]=str(grid[a][b])
#mirror to copy grid
for c in range(4):
grid[c]="/".join(grid[c])
symbol="*".join(grid)
mirror=symbol.split("*")
for d in range(4):
mirror[d]=mirror[d].split('/')
for e in range(4):
for f in range(4):
mirror[e][f]=eval(mirror[e][f])
for g in range(4):
grid[g]=grid[g].split('/')
for h in range(4):
for i in range(4):
grid[h][i]=eval(grid[h][i])
return mirror
def grid_equal (grid1, grid2):
"""check if 2 grids are equal - return boolean value"""
if grid1==grid2:
return True
else:
return False
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
29d310e981165585e9df37a87690f77880cb6a57 | fd21d6384ba36aa83d0c9f05f889bdbf8912551a | /a10sdk/core/vrrp/vrrp_a_interface_trunk.py | 6bbfc3c25628a4e18801f09572c963b7c1a24d0d | [
"Apache-2.0"
] | permissive | 0xtobit/a10sdk-python | 32a364684d98c1d56538aaa4ccb0e3a5a87ecd00 | 1ea4886eea3a1609b2ac1f81e7326758d3124dba | refs/heads/master | 2021-01-18T03:08:58.576707 | 2014-12-10T00:31:52 | 2014-12-10T00:31:52 | 34,410,031 | 0 | 0 | null | 2015-04-22T19:05:12 | 2015-04-22T19:05:12 | null | UTF-8 | Python | false | false | 2,065 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class Trunk(A10BaseClass):
"""Class Description::
VRRP-A interface trunk.
Class trunk supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param both: {"description": "both a router and server interface", "format": "flag", "default": 0, "optional": true, "not-list": ["router-interface", "server-interface"], "type": "number"}
:param vlan: {"description": "VLAN ID", "format": "number", "optional": true, "maximum": 4094, "minimum": 1, "not": "no-heartbeat", "type": "number"}
:param router_interface: {"description": "interface to upstream router", "format": "flag", "default": 0, "optional": true, "not-list": ["server-interface", "both"], "type": "number"}
:param no_heartbeat: {"description": "do not send out heartbeat packet from this interface", "format": "flag", "default": 0, "optional": true, "not": "vlan", "type": "number"}
:param server_interface: {"description": "interface to real server", "format": "flag", "default": 0, "optional": true, "not-list": ["router-interface", "both"], "type": "number"}
:param trunk_val: {"description": "Ethernet Interface", "format": "number", "type": "number", "maximum": 16, "minimum": 1, "optional": false}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/vrrp-a/interface/trunk/{trunk_val}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "trunk_val"]
self.b_key = "trunk"
self.a10_url="/axapi/v3/vrrp-a/interface/trunk/{trunk_val}"
self.DeviceProxy = ""
self.both = ""
self.vlan = ""
self.router_interface = ""
self.no_heartbeat = ""
self.server_interface = ""
self.trunk_val = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"doug@parksidesoftware.com"
] | doug@parksidesoftware.com |
5286ddc0d57db0e1cb2d944f3d00ffae3b12fec8 | c73165911c1e9f62178376ae1e860f42bdaf74f6 | /backend/apps/plugin/serializers/base.py | c974a9ad47e7ce09d3b70d296fbc010faa35fef2 | [
"MIT"
] | permissive | codelieche/erp | aa5994b0c79e99c07aaf3ea440e4cf4389d433b7 | 96861ff63a63a93918fbd5181ffb2646446d0eec | refs/heads/main | 2022-12-22T13:30:23.398639 | 2021-10-22T16:26:28 | 2021-10-22T16:26:28 | 171,668,277 | 0 | 0 | MIT | 2022-12-10T02:32:50 | 2019-02-20T12:22:17 | Python | UTF-8 | Python | false | false | 343 | py | # -*- coding:utf-8 -*-
from rest_framework import serializers
class PluginInfoSerializer(serializers.Serializer):
"""
插件信息Serailizer
"""
code = serializers.CharField(help_text="插件Code")
name = serializers.CharField(help_text="插件名称")
description = serializers.CharField(help_text="插件描述")
| [
"codelieche@gmail.com"
] | codelieche@gmail.com |
0626e2f2f2b884a02c2f86b3e380d86b73fd69f4 | 1608a43a29821106d361ab80ce61255d4a715f3a | /src/pretix/base/migrations/0012_remove_order_tickets.py | 7486be3f8e8355f81dd467e5a8b6d22c1d9cf596 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | ilmjstrope/pretix | 873f1bb14eb1f8bf63de2d98295655ccea2c734a | 7fc56b77db16e7e0783a4ba52b8ed5ef09ce9558 | refs/heads/master | 2021-01-18T00:42:02.873888 | 2015-09-29T14:46:45 | 2015-09-29T14:46:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0011_auto_20150915_2020'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='tickets',
),
]
| [
"mail@raphaelmichel.de"
] | mail@raphaelmichel.de |
447d2060908ab3a461a172209dc183a9fae81b6d | b0532e7e48729702db60918a8ea44b72319dadc7 | /roomai/kuhnpoker/KuhnPokerAction.py | b7dcdecf26880b932be755ad1a6bc439b54099d5 | [
"MIT"
] | permissive | abcdcamey/RoomAI | 7186b39d2d55dafa98cf40288b73d2977087da41 | fe884645b65bff6205d089d24c508b5a37dfdf3b | refs/heads/master | 2020-03-25T20:16:12.254187 | 2018-08-02T15:33:39 | 2018-08-02T15:33:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | import roomai.common
class KuhnPokerAction(roomai.common.AbstractAction):
'''
The KuhnPoker action used by the normal players. There are only two actions: bet and check. Examples of usages: \n
>> import roomai.kuhnpoker\n
>> action = roomai.kuhnpoker.KuhnPokerAction.lookup("bet")\n
>> action.key\n
"bet"\n
>> action = roomai.kuhnpoker.KuhnPokerAction.lookup("check")\n
>> action.key\n
"check"\n
'''
def __init__(self, key):
if key not in ["check","bet"]:
raise ValueError("The key for KuhnPokerAction must be in [\"check\",\"bet\"]")
super(KuhnPokerAction,self).__init__(key)
self.__key__ = key
def __get_key__(self):
return self.__key__
key = property(__get_key__, doc="The key of the KuhnPoker action, \"bet\" or \"check\".")
@classmethod
def lookup(cls, key):
return AllKuhnActions[key]
def __deepcopy__(self, memodict={}, newinstance = None):
return KuhnPokerAction.lookup(self.key)
AllKuhnActions = {"bet":KuhnPokerAction("bet"),"check":KuhnPokerAction("check")}
| [
"lili1987mail@gmail.com"
] | lili1987mail@gmail.com |
44392f80ccc1f2b66ba022fbbfaedec999972af8 | 0c110eb32f2eaea5c65d40bda846ddc05757ced6 | /python_scripts/pimriscripts/mastersort/scripts_dir/p7580_run2L6.py | e7e36dc227bc5518683dc0475d734610c58036b9 | [] | no_license | nyspisoccog/ks_scripts | 792148a288d1a9d808e397c1d2e93deda2580ff4 | 744b5a9dfa0f958062fc66e0331613faaaee5419 | refs/heads/master | 2021-01-18T14:22:25.291331 | 2018-10-15T13:08:24 | 2018-10-15T13:08:24 | 46,814,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | from __future__ import with_statement
import os, csv, shutil,tarfile, uf, dcm_ops
dest_root = '/ifs/scratch/pimri/soccog/test_working'
dst_path_lst = ['7580', 'run2L6']
uf.buildtree(dest_root, dst_path_lst)
uf.copytree('/ifs/scratch/pimri/soccog/old/SocCog_Raw_Data_By_Exam_Number/2961/E2961_e4354087/s4409419_1904_2L6_s24', '/ifs/scratch/pimri/soccog/test_working/7580/run2L6')
t = tarfile.open(os.path.join('/ifs/scratch/pimri/soccog/test_working/7580/run2L6','MRDC_files.tar.gz'), 'r')
t.extractall('/ifs/scratch/pimri/soccog/test_working/7580/run2L6')
for f in os.listdir('/ifs/scratch/pimri/soccog/test_working/7580/run2L6'):
if 'MRDC' in f and 'gz' not in f:
old = os.path.join('/ifs/scratch/pimri/soccog/test_working/7580/run2L6', f)
new = os.path.join('/ifs/scratch/pimri/soccog/test_working/7580/run2L6', f + '.dcm')
os.rename(old, new)
qsub_cnv_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7580/run2L6', '7580_run2L6', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cnv')
#qsub_cln_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7580/run2L6', '7580_run2L6', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cln')
| [
"katherine@Katherines-MacBook-Pro.local"
] | katherine@Katherines-MacBook-Pro.local |
4a2279505e0d062e31700ce37d3373c049a1adec | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_001/ch35_2019_06_06_00_27_42_702661.py | 1c0d89cb25c60940d66f046a84d71724d1ce4de7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | deposito_inicial = float(input("Depósito inicial: "))
deposito_mensal = float(input("Depósito mensal: "))
taxa_de_juros = float(input("Taxa de juros: "))
total = deposito_inicial
juros = taxa_de_juros/100 + 1
mes = 0
while mes < 24:
total = total*juros + deposito_mensal
mes += 1
print("Saldo do mês {0} é de R${1:.2f}".format(mes, total))
print ("Total de rendimentos = R${0:.2f}".format(total-deposito_inicial-deposito_mensal*23)) | [
"you@example.com"
] | you@example.com |
136a3ffeda37fe653bc8b661374d35eefd307b4a | 71501709864eff17c873abbb97ffabbeba4cb5e3 | /llvm14.0.4/lldb/test/API/functionalities/scripted_process/dummy_scripted_process.py | 67850cf57a73dd66d6f168913268a148917aec6d | [
"NCSA",
"Apache-2.0",
"LLVM-exception"
] | permissive | LEA0317/LLVM-VideoCore4 | d08ba6e6f26f7893709d3285bdbd67442b3e1651 | 7ae2304339760685e8b5556aacc7e9eee91de05c | refs/heads/master | 2022-06-22T15:15:52.112867 | 2022-06-09T08:45:24 | 2022-06-09T08:45:24 | 189,765,789 | 1 | 0 | NOASSERTION | 2019-06-01T18:31:29 | 2019-06-01T18:31:29 | null | UTF-8 | Python | false | false | 2,858 | py | import os,struct, signal
from typing import Any, Dict
import lldb
from lldb.plugins.scripted_process import ScriptedProcess
from lldb.plugins.scripted_process import ScriptedThread
class DummyScriptedProcess(ScriptedProcess):
def __init__(self, target: lldb.SBTarget, args : lldb.SBStructuredData):
super().__init__(target, args)
self.threads[0] = DummyScriptedThread(self, None)
def get_memory_region_containing_address(self, addr: int) -> lldb.SBMemoryRegionInfo:
return None
def get_thread_with_id(self, tid: int):
return {}
def get_registers_for_thread(self, tid: int):
return {}
def read_memory_at_address(self, addr: int, size: int) -> lldb.SBData:
data = lldb.SBData().CreateDataFromCString(
self.target.GetByteOrder(),
self.target.GetCodeByteSize(),
"Hello, world!")
return data
def get_loaded_images(self):
return self.loaded_images
def get_process_id(self) -> int:
return 42
def should_stop(self) -> bool:
return True
def is_alive(self) -> bool:
return True
def get_scripted_thread_plugin(self):
return DummyScriptedThread.__module__ + "." + DummyScriptedThread.__name__
class DummyScriptedThread(ScriptedThread):
def __init__(self, process, args):
super().__init__(process, args)
def get_thread_id(self) -> int:
return 0x19
def get_name(self) -> str:
return DummyScriptedThread.__name__ + ".thread-1"
def get_state(self) -> int:
return lldb.eStateStopped
def get_stop_reason(self) -> Dict[str, Any]:
return { "type": lldb.eStopReasonSignal, "data": {
"signal": signal.SIGINT
} }
def get_stackframes(self):
class ScriptedStackFrame:
def __init__(idx, cfa, pc, symbol_ctx):
self.idx = idx
self.cfa = cfa
self.pc = pc
self.symbol_ctx = symbol_ctx
symbol_ctx = lldb.SBSymbolContext()
frame_zero = ScriptedStackFrame(0, 0x42424242, 0x5000000, symbol_ctx)
self.frames.append(frame_zero)
return self.frame_zero[0:0]
def get_register_context(self) -> str:
return struct.pack(
'21Q', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21)
def __lldb_init_module(debugger, dict):
if not 'SKIP_SCRIPTED_PROCESS_LAUNCH' in os.environ:
debugger.HandleCommand(
"process launch -C %s.%s" % (__name__,
DummyScriptedProcess.__name__))
else:
print("Name of the class that will manage the scripted process: '%s.%s'"
% (__name__, DummyScriptedProcess.__name__)) | [
"kontoshi0317@gmail.com"
] | kontoshi0317@gmail.com |
b7d9ff78558e217ce2ba72d504a2fc2154ae91b1 | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractNovelGilegatiCom.py | 555142d8fee58800bc37a387984e5f7fdc425216 | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 519 | py | def extractNovelGilegatiCom(item):
'''
Parser for 'novel.gilegati.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Spirit Conductor', 'Spirit Conductor', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | [
"something@fake-url.com"
] | something@fake-url.com |
25c4685c444ce65edcdfff005e0060f97157f3b3 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/isosurface/colorbar/_tickcolor.py | 8d235833cab3bc7fb1364a4236a3091d952369eb | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 427 | py | import _plotly_utils.basevalidators
class TickcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="tickcolor", parent_name="isosurface.colorbar", **kwargs
):
super(TickcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
4f9b132d0127390b4ded48630da6093bf8a6a6c2 | 5946112229fe1d9a04b7536f076a656438fcd05b | /dev_env/lib/python3.8/site-packages/pygments/console.py | 6c024a8d3484cecf9ff30eea0e7135c8eace379a | [] | no_license | Gear-Droid/openCV_study_project | 3b117967eb8a28bb0c90088e1556fbc1d306a98b | 28c9a494680c4a280f87dd0cc87675dfb2262176 | refs/heads/main | 2023-05-14T14:27:42.284265 | 2021-06-05T00:16:09 | 2021-06-05T00:16:09 | 307,807,458 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,792 | py | # -*- coding: utf-8 -*-
"""
pygments.console
~~~~~~~~~~~~~~~~
Format colored console output.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
esc = "\x1b["
codes = {}
codes[""] = ""
codes["reset"] = esc + "39;49;00m"
codes["bold"] = esc + "01m"
codes["faint"] = esc + "02m"
codes["standout"] = esc + "03m"
codes["underline"] = esc + "04m"
codes["blink"] = esc + "05m"
codes["overline"] = esc + "06m"
dark_colors = ["black", "red", "green", "yellow", "blue",
"magenta", "cyan", "gray"]
light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
"brightmagenta", "brightcyan", "white"]
x = 30
for d, l in zip(dark_colors, light_colors):
codes[d] = esc + "%im" % x
codes[l] = esc + "%im" % (60 + x)
x += 1
del d, l, x
codes["white"] = codes["bold"]
def reset_color():
return codes["reset"]
def colorize(color_key, text):
return codes[color_key] + text + codes["reset"]
def ansiformat(attr, text):
"""
Format ``text`` with a color and/or some attributes::
color normal color
*color* bold color
_color_ underlined color
+color+ blinking color
"""
result = []
if attr[:1] == attr[-1:] == '+':
result.append(codes['blink'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '*':
result.append(codes['bold'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '_':
result.append(codes['underline'])
attr = attr[1:-1]
result.append(codes[attr])
result.append(text)
result.append(codes['reset'])
return ''.join(result)
| [
"Vladi003@yandex.ru"
] | Vladi003@yandex.ru |
0f29d4819c4edbbd216beaccd65020d52f2aab4c | 20176bf4fbd8aec139c7b5a27f2c2e155e173e6e | /data/all-pratic/oinam_singh/myprogram/dfEx4.py | e6046a036d10a2b7017578704d751ac80e542276 | [] | no_license | githubjyotiranjan/pytraining | 4ac4a1f83cc4270e2939d9d32c705019c5bc61c5 | 8b50c4ab7848bd4cbfdfbc06489768d577289c66 | refs/heads/master | 2020-03-19T06:22:20.793296 | 2018-06-15T20:08:11 | 2018-06-15T20:08:11 | 136,013,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | import pandas as pd
data1={'A':['A0','A1','A2','A3'],
'B':['B0','B1','B2','B3'],
'C':['C0','C1','C2','C3'],
'D':['D0','D1','D2','D3']}
df1=pd.DataFrame(data1,index=[0,1,2,3])
data2={'A':['A4','A5','A6','A7'],
'B':['B4','B5','B6','B7'],
'C':['C4','C5','C6','C7'],
'D':['D4','D5','D6','D7']
}
df2=pd.DataFrame(data2,index=[4,5,6,7])
data3={'A':['A8','A9','A10','A11'],
'B':['B8','B9','B10','B11'],
'C':['C8','C9','C10','C11'],
'D':['D8','D9','D10','D11']}
df3=pd.DataFrame(data3,index=[8,9,10,11])
dcon=pd.concat([df1,df2,df3])
print(dcon)
dcon1=pd.concat([df1,df2,df3],axis=1)
print(dcon1) | [
"jsatapathy007@gmail.com"
] | jsatapathy007@gmail.com |
86c175d1f1af29f44d196cc3b3948293dcccab2a | 01dad4d1d2ffaf2fa070e99fe828d42f59a9f9d1 | /src/pycrop2ml_ui/packages/SQ_Energy_Balance/src/openalea/Netradiationequivalentevaporation.py | 9b16174bb6d9032e26b78f9a1441551e454fadc7 | [
"BSD-3-Clause",
"MIT"
] | permissive | AgriculturalModelExchangeInitiative/Pycrop2ml_ui | 5e210facf9689348bb57c16060967118b7c5f49a | 3d5d2b87a74f0be306056b71808286922fef2945 | refs/heads/master | 2023-06-24T13:52:39.933728 | 2023-06-17T00:17:26 | 2023-06-17T00:17:26 | 193,912,881 | 0 | 4 | MIT | 2023-02-25T13:26:57 | 2019-06-26T13:44:34 | Jupyter Notebook | UTF-8 | Python | false | false | 2,389 | py | # coding: utf8
import numpy
from math import *
def model_netradiationequivalentevaporation(lambdaV = 2.454,
netRadiation = 1.566):
"""
- Description:
* Title: NetRadiationEquivalentEvaporation Model
* Author: Pierre Martre
* Reference: Modelling energy balance in the wheat crop model SiriusQuality2:
Evapotranspiration and canopy and soil temperature calculations
* Institution: INRA/LEPSE Montpellier
* Abstract: It is given by dividing net radiation by latent heat of vaporization of water
- inputs:
* name: lambdaV
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 10
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 2.454
** inputtype : parameter
** unit : MJ kg-1
** description : latent heat of vaporization of water
* name: netRadiation
** min : 0
** default : 1.566
** max : 5000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : state
** datatype : DOUBLE
** inputtype : variable
** unit : MJ m-2 d-1
** description : net radiation
- outputs:
* name: netRadiationEquivalentEvaporation
** min : 0
** variablecategory : auxiliary
** max : 5000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** datatype : DOUBLE
** unit : g m-2 d-1
** description : net Radiation in Equivalent Evaporation
"""
netRadiationEquivalentEvaporation = netRadiation / lambdaV * 1000.0
return netRadiationEquivalentEvaporation | [
"ahmedmidingoyi@yahoo.fr"
] | ahmedmidingoyi@yahoo.fr |
64a511b95ea60b2cca1c7f11eb3400f1df6d6211 | 6a61667e176b06ccdef07e84d79b382b2fb491bb | /app/interviews/tests/views/interview.py | d5b9842a2c394d0c54c84f5d15b493e834871cf6 | [] | no_license | vsokoltsov/Interview360Server | 333f08f13b33ef88928b3e4b844f60e72ebec809 | 252b0ebd77eefbcc945a0efc3068cc3421f46d5f | refs/heads/master | 2022-12-11T05:38:01.310133 | 2019-03-24T17:47:09 | 2019-03-24T17:47:09 | 95,320,167 | 2 | 3 | null | 2022-12-08T04:54:08 | 2017-06-24T20:09:08 | Python | UTF-8 | Python | false | false | 3,216 | py | from . import APITestCase, datetime, Token, Company, HR, CANDIDATE
import ipdb
class InterviewViewSetTests(APITestCase):
"""Tests for InterviewViewSet class."""
fixtures = [
"skill.yaml",
"user.yaml",
"auth_token.yaml",
"company.yaml",
"vacancy.yaml",
"interview.yaml"
]
def setUp(self):
"""Set up test dependencies."""
self.company = Company.objects.first()
date = datetime.datetime.now() + datetime.timedelta(days=10)
self.hr = self.company.get_employees_with_role(HR)[-1]
self.vacancy = self.company.vacancy_set.first()
self.candidate = self.company.get_employees_with_role(CANDIDATE)[-1]
self.interview = self.vacancy.interviews.first()
date = datetime.datetime.now() + datetime.timedelta(days=10)
self.token = Token.objects.get(user=self.hr)
self.candidate_token = Token.objects.get(user=self.candidate)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
self.form_data = {
'candidate_email': self.candidate.email,
'vacancy_id': self.vacancy.id,
'interviewee_ids': [
self.hr.email
],
'assigned_at': date
}
self.url = "/api/v1/companies/{}/interviews/".format(self.company.id)
def test_success_list_receiving(self):
"""Test success receiving list of the interviews."""
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
# TODO Fix after rebuilding interview tests with factory
# def test_success_retrieve_action(self):
# """Test success receiving detail interview."""
#
# self.client.credentials(
# HTTP_AUTHORIZATION='Token ' + self.candidate_token.key
# )
# response = self.client.get(
# self.url + "{}/".format(self.interview.id), format='json'
# )
# self.assertEqual(response.status_code, 200)
def test_success_interview_creation(self):
"""Test success creation of the interview."""
response = self.client.post(self.url, self.form_data, format='json')
self.assertEqual(response.status_code, 201)
self.assertTrue('interview' in response.data)
def test_failed_interview_creation(self):
"""Test failed creation of the interview."""
response = self.client.post(self.url, {}, format='json')
self.assertEqual(response.status_code, 400)
def test_success_interview_update(self):
"""Test success Interview's instance update."""
response = self.client.put(
self.url + "{}/".format(self.interview.id), self.form_data,
format='json'
)
self.assertEqual(response.status_code, 200)
self.assertTrue('interview' in response.data)
def test_success_interview_delete(self):
"""Test success Interview's instance delete."""
response = self.client.delete(
self.url + "{}/".format(self.interview.id), format='json'
)
self.assertEqual(response.status_code, 204)
| [
"vforvad@gmail.com"
] | vforvad@gmail.com |
ef80458b3c26c42b8ba73347abdde6d49679144f | 7db93c328243cd2f6ffcabb66b0d148bb0e3d198 | /lintcode/07BinaryTree/155MinDepthOfBinaryTree.py | e838e2337dde723bea7750ab777d5826379f940f | [
"MIT"
] | permissive | zhaoxinlu/leetcode-algorithms | 62cc67efdc1b0e8514c83bb7643b369b4f681948 | f5e1c94c99628e7fb04ba158f686a55a8093e933 | refs/heads/master | 2021-05-11T23:47:43.385660 | 2018-04-25T08:27:57 | 2018-04-25T08:27:57 | 117,520,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | # -*- coding: utf-8 -*-
"""
Editor: Zhao Xinlu
School: BUPT
Date: 2018-03-10
算法思想: 二叉树的最小深度
"""
"""
Definition of TreeNode:
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param root: The root of binary tree
@return: An integer
"""
def minDepth(self, root):
# write your code here
if not root:
return 0
if root.left == None and root.right == None:
return 1
if root.left:
left = self.minDepth(root.left)
else:
return self.minDepth(root.right) + 1
if root.right:
right = self.minDepth(root.right)
else:
return left + 1
return min(left, right) + 1 | [
"446571703@qq.com"
] | 446571703@qq.com |
cf70a287c7007e7788a1deba514996acca6a6361 | d326cd8d4ca98e89b32e6a6bf6ecb26310cebdc1 | /BioinformaticsStronghold/inod/inod.py | f56d33de9bff5554c7a1f662eb81fcda0118c1c3 | [] | no_license | dswisher/rosalind | d6af5195cdbe03adb5a19ed60fcbf8c05beac784 | 4519740350e47202f7a45ce70e434f7ee15c6afc | refs/heads/master | 2021-08-09T02:58:17.131164 | 2017-11-12T01:26:26 | 2017-11-12T01:26:26 | 100,122,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py |
import sys
if len(sys.argv) != 2:
print "Enter the number of leaf nodes."
sys.exit(1)
n = int(sys.argv[1])
print n - 2
| [
"big.swish@gmail.com"
] | big.swish@gmail.com |
335adfd99ad95714abcd6661f54e15f6570b44c8 | 6bb80d482bfd0cd5feb6f2d37c7235a27b3466d6 | /pretrained-model/multispeaker-separation/fastsep-4-mel.py | 1c1ff8e391e1667dc0c0bdcf0ef294f05dbbc4d2 | [
"MIT"
] | permissive | dakmatt/malaya-speech | deadb00e1aa8a03593721c26457f35158e67d96d | 957cfb1952760c30d3b4a2a2e60b7f142394cbd3 | refs/heads/master | 2023-04-03T13:56:53.675046 | 2021-04-19T03:31:40 | 2021-04-19T03:31:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,562 | py | import os
import warnings
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
warnings.filterwarnings('ignore')
import tensorflow as tf
import malaya_speech
import numpy as np
import IPython.display as ipd
import matplotlib.pyplot as plt
import malaya_speech.augmentation.waveform as augmentation
from malaya_speech.train.model import fastsplit, fastspeech, fastvc
from malaya_speech.train.model import sepformer_old as sepformer
from malaya_speech.utils import tf_featurization
import malaya_speech.train as train
import random
import pickle
from glob import glob
from sklearn.utils import shuffle
sr = 22050
speakers_size = 4
def get_data(combined_path, speakers_size = 4, sr = 22050):
with open(combined_path, 'rb') as fopen:
combined = pickle.load(fopen)
y = []
for i in range(speakers_size):
with open(combined_path.replace('combined', str(i)), 'rb') as fopen:
y_ = pickle.load(fopen)
y.append(y_)
return combined, y
def to_mel(y):
mel = malaya_speech.featurization.universal_mel(y)
mel[mel <= np.log(1e-2)] = np.log(1e-2)
return mel
def generate():
combined = glob('split-speaker-22k-train/combined/*.pkl')
while True:
combined = shuffle(combined)
for i in range(len(combined)):
x, y = get_data(combined[i])
yield {'combined': x, 'y': y, 'length': [len(x)]}
def get_dataset(batch_size = 8):
def get():
dataset = tf.data.Dataset.from_generator(
generate,
{'combined': tf.float32, 'y': tf.float32, 'length': tf.int32},
output_shapes = {
'combined': tf.TensorShape([None, 80]),
'y': tf.TensorShape([speakers_size, None, 80]),
'length': tf.TensorShape([None]),
},
)
dataset = dataset.padded_batch(
batch_size,
padded_shapes = {
'combined': tf.TensorShape([None, 80]),
'y': tf.TensorShape([speakers_size, None, 80]),
'length': tf.TensorShape([None]),
},
padding_values = {
'combined': tf.constant(np.log(1e-2), dtype = tf.float32),
'y': tf.constant(np.log(1e-2), dtype = tf.float32),
'length': tf.constant(0, dtype = tf.int32),
},
)
return dataset
return get
total_steps = 10000000
def model_fn(features, labels, mode, params):
lengths = features['length'][:, 0]
config = malaya_speech.config.fastspeech_config
dim = 256
config['encoder_hidden_size'] = dim
config['decoder_hidden_size'] = dim
config['encoder_num_hidden_layers'] = 4
config['encoder_num_attention_heads'] = 4
config = fastspeech.Config(vocab_size = 1, **config)
transformer = lambda: sepformer.Encoder_FastSpeech(
config.encoder_self_attention_params
)
decoder = lambda: fastvc.Decoder(config.decoder_self_attention_params)
model = sepformer.Model_Mel(
transformer, transformer, decoder, activation = None
)
logits = model(features['combined'], lengths)
outputs = tf.transpose(logits, [1, 2, 0, 3])
loss = fastsplit.calculate_loss(
features['y'], outputs, lengths, C = speakers_size
)
tf.identity(loss, 'total_loss')
tf.summary.scalar('total_loss', loss)
global_step = tf.train.get_or_create_global_step()
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = train.optimizer.adamw.create_optimizer(
loss,
init_lr = 0.0001,
num_train_steps = total_steps,
num_warmup_steps = 100000,
end_learning_rate = 0.00001,
weight_decay_rate = 0.001,
beta_1 = 0.9,
beta_2 = 0.98,
epsilon = 1e-6,
clip_norm = 1.0,
)
estimator_spec = tf.estimator.EstimatorSpec(
mode = mode, loss = loss, train_op = train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode = tf.estimator.ModeKeys.EVAL, loss = loss
)
return estimator_spec
train_hooks = [tf.train.LoggingTensorHook(['total_loss'], every_n_iter = 1)]
train_dataset = get_dataset()
save_directory = 'split-speaker-sepformer-mel'
train.run_training(
train_fn = train_dataset,
model_fn = model_fn,
model_dir = save_directory,
num_gpus = 1,
log_step = 1,
save_checkpoint_step = 3000,
max_steps = total_steps,
train_hooks = train_hooks,
eval_step = 0,
)
| [
"husein.zol05@gmail.com"
] | husein.zol05@gmail.com |
a939e1424dc025b49be00eec0aa30b151b213231 | caf644aa3e6aa7551567f806481a0465870b7ad8 | /login/migrations/0001_initial.py | c58190f1c6d91c015ccb8552792bb1b1bcd7f4ff | [] | no_license | 270466585/restudy_dj | 86b79ec5924c9c998a2f6841a64509df21ccd885 | ba7295ecfd947e475cb328334cc70d68a49c3e51 | refs/heads/master | 2020-04-18T12:11:13.041477 | 2019-01-25T10:13:34 | 2019-01-25T10:13:34 | 167,526,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | # Generated by Django 2.1.5 on 2019-01-25 05:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('password', models.CharField(max_length=256)),
('email', models.EmailField(max_length=254, unique=True)),
('sex', models.CharField(choices=[('male', '男'), ('female', '女')], default='男', max_length=32)),
('c_time', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
'ordering': ['-c_time'],
},
),
]
| [
"27066585@qq.com"
] | 27066585@qq.com |
2a4db2a394411b0b979fb3fcab54c78eda1e0084 | 34d6ec6c9a459ab592f82137927107f967831400 | /week01/6-plus-one.py | 0ba349debae40b719c8d4e01eaa728d962cee038 | [
"MIT"
] | permissive | MiracleWong/algorithm-learning-camp | 228605311597dc3c29f73d4fb6b7abedc65d05a7 | aa5bee8f12dc25992aaebd46647537633bf1207f | refs/heads/master | 2023-07-15T21:34:11.229006 | 2021-09-05T09:06:16 | 2021-09-05T09:06:16 | 379,647,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
digits = [str(i) for i in digits]
num = int(''.join(digits))
num += 1
num = str(num)
res = []
for i in num:
res.append(int(i))
return res | [
"cfwr1991@126.com"
] | cfwr1991@126.com |
d50dfca35776427690928466b7cbd9a5e88a3c3d | 1a80fa7faf79b34c6eff3fa08226964c04bba0c7 | /centipede.py | b8d68543cb206384d962841f40fd72365e48ca1e | [
"MIT"
] | permissive | brentru/CircuitPython_Centipede_Chromebook | 4d274efd375c59b7197f7b4dd35948a7e85a53ab | b4972aadbfb3890b7b9137373f5c11ae7dd8a727 | refs/heads/master | 2021-01-25T10:00:16.383835 | 2018-02-28T20:20:03 | 2018-02-28T20:20:03 | 123,333,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,462 | py | """
Centipede for Chromebook for CircuitPython
Copyright (c) 2018, Brent Rubell for Adafruit Industries
Centipede_for_Chromebook_Enrollment by Amplified_Labs
Copyright (c) 2016, Amplified IT
See the full description at http://labs.amplifiedit.com/centipede
Support forums are available at https://plus.google.com/communities/100599537603662785064
Published under an MIT License https://opensource.org/licenses/MIT
"""
import time
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
import board
import neopixel
import digitalio
# Modify the following to fit WiFi/Enrollment credentials:
wifi_name = "adafruit_ssid"
wifi_pass = "adafruit_password"
"""
wifi_security options:
0 = open
1 = WEP
2 = WPA
"""
wifi_security = 2
username = "circuit"
pasword = "python"
kbd = Keyboard()
# american keyboard layout
layout = KeyboardLayoutUS(kbd)
# we're going to make this button compatable with the
# builtin A button on the Circuit Playground Express
start_btn = digitalio.DigitalInOut(board.D4)
start_btn.direction = Direction.INPUT
start_button.pull = Pull.UP
# using builtin cplayx led
led = DigitalInOut(board.D13)
led.direction = Direction.OUTPUT
def repeat_key(key, num_repeat):
# repeats keypresses int num_repeat times
for x in range(0, num_repeat):
kbd.press(keycode.key)
kbd.release_all()
time.sleep(1)
def wifi_config():
repeat_key(TAB, 3)
kbd.press(keycode.ENTER)
kbd.release_all()
# up arrow 2 times to open extra wifi settings
repeat_key(tab, 2)
kbd.press(keycode.ENTER)
kbd.release_all()
time.sleep(1)
# SSID Config
#TODO: split the ssid into strings so the keyboard can write it?
time.sleep(1)
kbd.press(keycode.TAB)
time.sleep(1)
if(wifi_security == 0):
repeatKey(TAB, 2)
else:
# type in wifi pass
kbd.press(keycode.ENTER)
time.sleep(.1)
time.sleep(10)
kbd.press(keycode.TAB)
kbd.press(keyboard.ENTER)
time.sleep(.2)
# enter entrollment
kbd.press(keyboard.ENTER)
time.sleep(1)
while True:
time.sleep(4)
if(start_btn.value == 1):
# run wifi config
led.value = 1
wifi_config()
time.sleep(5)
while(start_btn.value != 1):
time.sleep(1)
led.value = 0
# run credential config
credential_config()
# pulse the neopixel ring
| [
"robots199@me.com"
] | robots199@me.com |
2f20ea73c211654103ed77f7d94454777f5cfc0d | 37c5a0a8ee807ec7e40bd38d4ecb6d7a8d1e21cd | /src/python/serif/theory/icews_event_mention.py | 5aa8b7b09dae41264fbcd246ebabfc270080420a | [
"Apache-2.0"
] | permissive | BBN-E/text-open | 3321187f17b3fbc0317c95d32aa3a741c8e9769b | b486a66339a330e94d81850e6acb3a7e34df746e | refs/heads/main | 2023-06-10T09:45:57.952476 | 2023-05-25T18:44:11 | 2023-05-25T18:44:11 | 302,090,801 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | from serif.theory.proposition import Proposition
from serif.theory.serif_theory import SerifTheory
from serif.theory.value_mention import ValueMention
from serif.xmlio import _SimpleAttribute, _ChildTheoryElementList, _ReferenceAttribute, _ReferenceListAttribute
class ICEWSEventMention(SerifTheory):
participants = _ChildTheoryElementList('ICEWSEventParticipant')
event_code = _SimpleAttribute(is_required=True)
event_tense = _SimpleAttribute(is_required=True)
pattern_id = _SimpleAttribute(is_required=True)
time_value_mention = _ReferenceAttribute('time_value_mention_id',
cls=ValueMention,
is_required=False)
propositions = _ReferenceListAttribute('proposition_ids', cls=Proposition)
original_event_id = _SimpleAttribute(is_required=False)
is_reciprocal = _SimpleAttribute(bool, is_required=False)
| [
"hqiu@bbn.com"
] | hqiu@bbn.com |
b0e958e2cf63938be65865ac0cfdf533a47698b0 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /output/StudentProblem/10.21.12.1/7/1569572792.py | 26ebc69dac781be379b9c65043af3ac844948ef0 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | ============================= test session starts ==============================
platform darwin -- Python 3.7.4, pytest-5.4.1, py-1.8.1, pluggy-0.13.1
rootdir: /tmp
collected 1 item
../../../../../tmp F [100%]
=================================== FAILURES ===================================
____________________________________ test_5 ____________________________________
def test_5():
> assert divisors(10) == [1, 2, 5, 10]
/private/tmp/blabla.py:17:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
x = 10
def divisors(x: int):
result = []
for i in range(x + 1):
> if not(x % i):
E ZeroDivisionError: integer division or modulo by zero
/private/tmp/blabla.py:11: ZeroDivisionError
=========================== short test summary info ============================
FAILED ../../../../../tmp/::test_5 - ZeroDivisionError: integer division or m...
============================== 1 failed in 0.05s ===============================
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
2abe504e9ab45cb335111ffdbc077fec444f5b0c | dab7eb86a8ffe3fcf012a851b2bf243ff7e06088 | /longestpossibleappna_3898/wsgi.py | d89f34b75ea5f57ca3a45a3dcb580dbe20c3e61a | [] | no_license | crowdbotics-apps/longestpossibleappna-3898 | 54760668be0c30a41fd6235232bf7782e70958a7 | dc57d9786b53d7018cca6256c28a8202625345ce | refs/heads/master | 2023-05-26T07:21:41.191231 | 2020-04-30T14:23:13 | 2020-04-30T14:23:13 | 260,232,643 | 0 | 0 | null | 2021-06-10T17:01:00 | 2020-04-30T14:22:47 | Python | UTF-8 | Python | false | false | 427 | py | """
WSGI config for longestpossibleappna_3898 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'longestpossibleappna_3898.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
737c77a191f20cede4d4431f84652b00e894ce30 | d2970ef359537f553e86dc05015b265611bd8f4f | /Aiden/Ceaser_Cypher.py | 7c503c72fa83a9975e1d05fd1c12ae791c0e2549 | [] | no_license | idcrypt3/camp_2019_07_07 | cc68c28f9c84a0ad6ac893cb65a0a48502a09af6 | 4c748b60f1553072dbda9d4d226b39a32548521f | refs/heads/master | 2020-06-17T08:23:30.734953 | 2019-07-17T16:29:55 | 2019-07-17T16:29:55 | 195,860,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | alphabet = "abcdefghijklmnopqrstuvwxyz"
partialOne = ""
partialTwo = ""
newAlphabet = ""
newMessage = ""
message = input("Please enter a secret message: ").lower()
key = int(input("Please enter a number to shift by: "))
if key == 0:
newAlphabet = alphabet
elif key > 0:
partialOne = alphabet[:key]
partialTwo = alphabet[key:]
newAlphabet = partialTwo + partialOne
else:
partialOne = alphabet[:(26 + key)]
partialTwo = alphabet[(26 + key):]
newAlphabet = partialTwo + partialOne
newMessage = ""
for i in range(0,len(message)):
index = alphabet.find(message[i])
if index < 0:
newMessage += message[i]
else:
newMessage += newAlphabet[index]
print(newMessage)
| [
"idcrypt3@gmail.com"
] | idcrypt3@gmail.com |
25a6097e0dd1368a43cac42ba3c40ecfc7ad22aa | 03e91d7a923d94a4c0cd016e3c64cdefa7d0e1c5 | /order/migrations/0006_productinordermodel_image.py | 073bb3ff6d5e5c476b1fc8d43e10aeac1f91c923 | [] | no_license | win77g/irashop | 0e6afec6fd0397ee82484f718e90502cfc627efb | 9244c59ca69e263c24c9ad92ddf355b8f9ee4efc | refs/heads/master | 2023-06-30T03:17:09.333673 | 2021-08-05T21:10:26 | 2021-08-05T21:10:26 | 261,574,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # Generated by Django 2.2.11 on 2020-04-05 13:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0005_productinordermodel_size'),
]
operations = [
migrations.AddField(
model_name='productinordermodel',
name='image',
field=models.CharField(blank=True, default=None, max_length=128, null=True, verbose_name='Фото'),
),
]
| [
"win21g@mail.ru"
] | win21g@mail.ru |
41b624dfe2a84d1fa0848874a189accdc719f795 | c61bcb0732a1c92bbed4195573e68393400c9fb7 | /suorganizer/suorganizer/settings.py | 3dac5bc8cb21252fd0fb73a1d3018c49f0cd77d6 | [] | no_license | paulhendricks/django-unleashed | ba181454a644d53a555adf814769965bf7a2fded | 209bb4f42e8d6856ff760f46f4338834d96d711d | refs/heads/master | 2021-01-20T18:36:33.677366 | 2016-07-24T23:11:48 | 2016-07-24T23:11:48 | 61,044,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,209 | py | """
Django settings for suorganizer project.
Generated by 'django-admin startproject' using Django 1.9.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1z#y4!g0b%!3x+kt#nk0#0q$2!40xw-0%w_pec$7$^yow$)9mj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'organizer',
'blog'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'suorganizer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'suorganizer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"paul.hendricks.2013@owu.edu"
] | paul.hendricks.2013@owu.edu |
cc23921f539f87f2abaf47cf8abbe9bab1429e24 | c8f10dd7dbb1a4cf2e22f5fc1cef6affa68013f9 | /myproject/crm/mixins.py | 64dd03a65e19c9dd476331f195ed2c36fd15df26 | [] | no_license | olivx/jc-challenge | 72f016d47c31fa7c7d8c57222eb60861dbc397ef | 55e8c24231605dcaec22f0d24d133e1702daa0c5 | refs/heads/master | 2021-01-12T09:19:17.059412 | 2017-02-13T03:06:24 | 2017-02-13T03:06:24 | 81,324,849 | 0 | 0 | null | 2017-02-13T03:03:19 | 2017-02-08T11:52:31 | Python | UTF-8 | Python | false | false | 244 | py | # -*- coding: utf-8 -*-
class CounterMixin(object):
def get_context_data(self, **kwargs):
context = super(CounterMixin, self).get_context_data(**kwargs)
context['count'] = self.get_queryset().count()
return context
| [
"rg3915@yahoo.com.br"
] | rg3915@yahoo.com.br |
0fa45dc47e6ff8104a20a15bfe36e559a5a7764e | d138deda43e36f6c79c5e3a9ef1cc62c6a92e881 | /python/paddle/amp/auto_cast.py | 441bc31b93684f94fd1dc36183679f493c03ada0 | [
"Apache-2.0"
] | permissive | seiriosPlus/Paddle | 51afd6f5c85c3ce41dd72953ee659d1539c19f90 | 9602a182b2a4979247c09df1ec283fc39cb4a981 | refs/heads/develop | 2021-08-16T16:05:10.848535 | 2020-12-27T15:15:19 | 2020-12-27T15:15:19 | 123,257,829 | 2 | 0 | Apache-2.0 | 2019-12-10T08:22:01 | 2018-02-28T08:57:42 | C++ | UTF-8 | Python | false | false | 2,562 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.dygraph.amp import amp_guard
__all__ = ['auto_cast']
def auto_cast(enable=True, custom_white_list=None, custom_black_list=None):
"""
Create a context which enables auto-mixed-precision(AMP) of operators executed in dynamic graph mode.
If enabled, the input data type (float32 or float16) of each operator is decided
by autocast algorithm for better performance.
Commonly, it is used together with `GradScaler` to achieve Auto-Mixed-Precision in
imperative mode.
Args:
enable(bool, optional): Enable auto-mixed-precision or not. Default is True.
custom_white_list(set|list, optional): The custom white_list. It's the set of ops that support
fp16 calculation and are considered numerically-safe and performance-critical. These ops
will be converted to fp16.
custom_black_list(set|list, optional): The custom black_list. The set of ops that support fp16
calculation and are considered numerically-dangerous and whose effects may also be
observed in downstream ops. These ops will not be converted to fp16.
Examples:
.. code-block:: python
import paddle
conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast():
conv = conv2d(data)
print(conv.dtype) # FP16
with paddle.amp.auto_cast(enable=False):
conv = conv2d(data)
print(conv.dtype) # FP32
with paddle.amp.auto_cast(custom_black_list={'conv2d'}):
conv = conv2d(data)
print(conv.dtype) # FP32
a = paddle.rand([2,3])
b = paddle.rand([2,3])
with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}):
c = a + b
print(c.dtype) # FP16
"""
return amp_guard(enable, custom_white_list, custom_black_list)
| [
"noreply@github.com"
] | seiriosPlus.noreply@github.com |
b991c27bef8a290364d95bb429d91db56f260232 | 8dffff5ff7f2645a50fd9846198e12e4c96a91da | /32-gcf.py | 40180be85f2c2280d2e4c6ac1c55700e901f6693 | [] | no_license | akshaypawar2508/Coderbyte-pythonSol | b233c5ee0c34e0413a26b24b423dae45342b9ade | 5c7d2028fe09fd02aad7808f88abc40fdea0f81e | refs/heads/master | 2022-01-03T09:44:18.635060 | 2014-07-31T13:32:08 | 2014-07-31T13:32:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | def Division(num1,num2):
while num2 != 0:
num1, num2 = num2, num1%num2
return num1
# keep this function call here
# to see how to enter arguments in Python scroll down
print Division(raw_input())
| [
"xzhu15@illinois.edu"
] | xzhu15@illinois.edu |
3de488bf4eb42746d03ff642d52f553da3b0a0a9 | 38828c16d3f6f466fe416067a099e139ba85a441 | /imageupload/migrations/0008_auto_20181011_1407.py | 677150fe5f7c6d89d7755a39b13514f4ee8858fc | [] | no_license | finebrush/takeatrips | 20c46329af0135f1bc3773a179520f78d042fc53 | 8641a669f3daca646e915cd82a69d5d61ee7ab3d | refs/heads/master | 2022-12-30T08:21:17.191846 | 2018-11-07T15:29:23 | 2018-11-07T15:29:23 | 155,531,775 | 0 | 0 | null | 2022-12-08T02:59:00 | 2018-10-31T09:33:15 | JavaScript | UTF-8 | Python | false | false | 973 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2018-10-11 05:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('imageupload', '0007_auto_20181011_1404'),
]
operations = [
migrations.AlterField(
model_name='uploadedimage',
name='bs_thename',
field=models.CharField(default='지역명', max_length=255, verbose_name='location name'),
),
migrations.AlterField(
model_name='uploadedimage',
name='bs_title',
field=models.CharField(default='소개 타이틀', max_length=255, verbose_name='Title of intro image'),
),
migrations.AlterField(
model_name='uploadedimage',
name='bs_writer',
field=models.CharField(default='소개 작가', max_length=255, verbose_name='writer of intro image'),
),
]
| [
"finebrush.mlab@gmail.com"
] | finebrush.mlab@gmail.com |
a348cd18fff127c70b3192663bfbcd78170a7dcf | 22d368661afd1ba00378d9da8eacadb86e2d4f95 | /vk/types/responses/__init__.py | 965c47e5089b6c9feb0aa36b023c7ff21b364cf9 | [
"MIT"
] | permissive | yilbegan/vk.py | e5e0887fde758e12577b394cd2636c48a5dc74be | 128029969edb57806b1d3d13a0a43613bc33abd3 | refs/heads/master | 2020-07-08T08:48:13.334133 | 2019-08-21T15:50:17 | 2019-08-21T15:50:17 | 203,623,364 | 3 | 1 | MIT | 2019-08-21T16:28:03 | 2019-08-21T16:28:03 | null | UTF-8 | Python | false | false | 358 | py | from . import others
from . import account
from . import apps
from . import appwidgets
from . import auth
from . import board
from . import database
from . import docs
from . import fave
from . import friends
from . import gift
from . import groups
from . import leadforms
from . import leads
from . import likes
from . import market
from . import messages
| [
"botyavs@gmail.com"
] | botyavs@gmail.com |
ae5aea420cf4046d5d1af7d4f13928738ec44541 | fdec477002fb0c5f013faf369d2a1e782172a1d6 | /shop/mainapp/api/api_views.py | 1b4b14641b6fdb7f493b43dfb8c352a846ffacea | [] | no_license | aimiranarzhigitova/API_projects | 19fb416479e5a76dab760f38621e643e2db609cb | 8256cc1bc8dc939453c61a39215e89dbd96fecb1 | refs/heads/master | 2023-05-16T08:52:51.209458 | 2021-06-06T09:44:53 | 2021-06-06T09:44:53 | 374,322,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,602 | py | from collections import OrderedDict
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.generics import ListAPIView, ListCreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.filters import SearchFilter
from rest_framework.pagination import PageNumberPagination
from .serializers import RegisterSerializer, UserSerializer, CategorySerializer, BaseProductSerializer, CustomerSerializer, CartProductSerializer, CartSerializers, OrderSerializer
from ..models import Category, Product, Customer, CartProduct, Cart, Order
from knox.models import AuthToken
# Register API
class RegisterAPI(generics.GenericAPIView):
serializer_class = RegisterSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response({
"user": UserSerializer (user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
class ProductPagination(PageNumberPagination):
page_size = 50
page_size_query_param = 'page_size'
max_page_size = 60
def get_paginated_response(self, data):
return Response(OrderedDict([
('objects_count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('items', data)
]))
class CategoryListApiView(ListCreateAPIView):
serializer_class = CategorySerializer
queryset = Category.objects.all()
class CategoryApiView(RetrieveUpdateDestroyAPIView):
serializer_class = CategorySerializer
queryset = Category.objects.all()
class ProductListApiView(ListCreateAPIView):
serializer_class = BaseProductSerializer
pagination_class = ProductPagination
queryset = Product.objects.all()
filter_backends = [SearchFilter]
search_fields = ['ip']
class ProductDetailApiView(RetrieveUpdateDestroyAPIView):
serializer_class = BaseProductSerializer
queryset = Product.objects.all()
class CustomersListApiView(ListAPIView):
serializer_class = CustomerSerializer
queryset = Customer.objects.all()
class CartProductListApiView(ListAPIView):
serializer_class = CartProductSerializer
queryset = CartProduct.objects.all()
class CartListApiView(ListAPIView):
serializer_class = CartSerializers
queryset = Cart.objects.all()
class OrderListApiView(ListAPIView):
serializer_class = OrderSerializer
queryset = Order.objects.all() | [
"aymira.narzhigitova@gmail.com"
] | aymira.narzhigitova@gmail.com |
efe00f1898d63c1220007f55cbf52362ae1563d3 | e43906683d87683705670655bc185d113b797f9c | /spectrumFit/apr2018dijetgamma.py | 6d4cf1cd6b365cac3b63ca8791c060ce063d51bd | [] | no_license | Yvonne-Ng/GP | 1fcba24faa868c86bee71da26386600e94d179d9 | 7dba2626fd417d3b6e432160ed49f09980b59d1e | refs/heads/master | 2020-03-11T14:22:32.271495 | 2018-09-11T13:29:03 | 2018-09-11T13:29:03 | 130,051,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | from runFunctions import spectrumGlobalFit
if __name__=="__main__":
#-----------a template config file -------#
config={#-----Title
"title": "TrijetBtagged1",
"useScaled": False,
#-----fit range
"xMinFit": 300,
"xMaxFit": 1500,
"xMinGP": 300,
"xMaxGP": 1500,
#-----Spectrum file input
"dataFile": "/lustre/SCRATCH/atlas/ywng/WorkSpace/r21/gp-toys/data/all/data/dijetgamma_mjj_g150_2j25_inclusive.h5",
"dataFileTDir": "",
"dataFileHist": "background_mjj_var",
#------put some placeholder file here
"officialFitFile":"/lustre/SCRATCH/atlas/ywng/WorkSpace/r21/gp-toys/data/all/Step1_SearchPhase_Zprime_mjj_var.h5",
#-----Fit function
"fitFunction": 0, #0: UA2; 1: 4 params
#initial parameter for fitting
"initParam": (7438.410338225633, 0.24951051678754332, 102.55526846085624, -271.9876795034993),
#the range of the parameter value within which it is throwing from
"initFitParam": [10000,10,100,300], #None(default): (9.6, -1.67, 56.87,-75.877 )
# the allowed range of variable values
"initRange": [(2000, 8000.),(-10, 10),(-100, 600.),(-500, 300.)] } #None(default): [(-100000, 1000000.),(-100., 100.),(-100., 100.),(-100., 100.)]
spectrumGlobalFit.spectrumGlobalFit(config)
| [
"yvonne.ng@cern.ch"
] | yvonne.ng@cern.ch |
60bafb492156b02c296679d940270394ce35ffce | 683a90831bb591526c6786e5f8c4a2b34852cf99 | /HackerRank/Interview/Strings/2_AlternatingCharacters.py | a5e68da537d3ea7f3412c42110cbce7e63191e9a | [] | no_license | dbetm/cp-history | 32a3ee0b19236a759ce0a6b9ba1b72ceb56b194d | 0ceeba631525c4776c21d547e5ab101f10c4fe70 | refs/heads/main | 2023-04-29T19:36:31.180763 | 2023-04-15T18:03:19 | 2023-04-15T18:03:19 | 164,786,056 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # https://www.hackerrank.com/challenges/alternating-characters/problem
# Tag(s): Greedy, strings
def alternatingCharacters(s):
flag_1 = 'A'
flag_2 = 'B'
x = 0
y = 0
n = len(s)
for i in range(n):
if s[i] == flag_1:
flag_1 = ('B' if flag_1 == 'A' else 'A')
else:
x += 1
if s[i] == flag_2:
flag_2 = ('B' if flag_2 == 'A' else 'A')
else:
y += 1
return min(x, y)
if __name__ == '__main__':
T = int(input())
for _ in range(T):
s = input()
print(alternatingCharacters(s))
| [
"davbetm@gmail.com"
] | davbetm@gmail.com |
260d7c448653d6a14a06b38e37e97db6a29a0c48 | c1db9d9bca3c908d5c30f3c02e7bc7bb2dc5b892 | /task/models.py | 1c1be9e97539791fc75e151a7adcf115623b147f | [] | no_license | rashidhamid139/Practice | 00e3aa1f3caa2648d8f62b1791687dd1313608ad | dcfe96a124687ec87545e34fb7021ef2d6e13bdb | refs/heads/master | 2023-03-17T13:27:13.719717 | 2021-03-04T16:28:56 | 2021-03-04T16:28:56 | 278,792,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | from django.db import models
# Create your models here.
class Task(models.Model):
title = models.CharField(max_length=255)
date = models.DateTimeField(auto_now_add=True)
completed = models.BooleanField(default=False)
class Meta:
ordering = ['completed', 'date']
def __str__(self):
return self.title
| [
"rashidhamid139@gmail.com"
] | rashidhamid139@gmail.com |
6398b36c28197d9034cac3de143b8dbaa16bb367 | d24a6e0be809ae3af8bc8daa6dacfc1789d38a84 | /other_contests/SMTB2019/A.py | aa4532f9e5732cb8c1cbfa619c0ff436ae54baa8 | [] | no_license | k-harada/AtCoder | 5d8004ce41c5fc6ad6ef90480ef847eaddeea179 | 02b0a6c92a05c6858b87cb22623ce877c1039f8f | refs/heads/master | 2023-08-21T18:55:53.644331 | 2023-08-05T14:21:25 | 2023-08-05T14:21:25 | 184,904,794 | 9 | 0 | null | 2023-05-22T16:29:18 | 2019-05-04T14:24:18 | Python | UTF-8 | Python | false | false | 367 | py | def solve(m1, d1, m2, d2):
if m1 == m2:
return 0
else:
return 1
def main():
m1, d1 = map(int, input().split())
m2, d2 = map(int, input().split())
res = solve(m1, d1, m2, d2)
print(res)
def test():
assert solve(11, 16, 11, 17) == 0
assert solve(11, 30, 12, 1) == 1
if __name__ == "__main__":
test()
main()
| [
"cashfeg@gmail.com"
] | cashfeg@gmail.com |
364c7fbdbdd853836d7faa2d48f0d96d450b696b | eec9299fd80ed057585e84e0f0e5b4d82b1ed9a7 | /comment/migrations/0002_auto_20181126_2237.py | f43103e474077979c288a083e9963fdafb9ec8e6 | [] | no_license | aimiliya/mysite | f51967f35c0297be7051d9f485dd0e59b8bb60c2 | b8e3b639de6c89fb8e6af7ee0092ee744a75be41 | refs/heads/master | 2020-04-08T19:06:36.539404 | 2018-12-01T08:05:18 | 2018-12-01T08:05:18 | 159,640,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # Generated by Django 2.1.3 on 2018-11-26 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comment', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='coment',
options={'ordering': ['-comment_time']},
),
migrations.AddField(
model_name='coment',
name='parent_id',
field=models.IntegerField(default=0),
),
]
| [
"951416267@qq.com"
] | 951416267@qq.com |
1b8186b33b4e154abe2da78ebfd54ce03d98b9f8 | 9433ce01c6e2906c694b6f0956a4640e1872d4d2 | /src/test/python/wdbd/test_girlant_down.py | e562510bb938cca541c18a206e5bb1b08ea78b43 | [] | no_license | shwdbd/python_codepool | fcd7950fc1339994186461ae18c34cee238938ee | 92a4fb61d060f9a545499b6b7f99a4dc211d5009 | refs/heads/master | 2023-02-20T19:49:23.677824 | 2022-06-15T08:53:51 | 2022-06-15T08:53:51 | 209,431,254 | 0 | 1 | null | 2023-02-15T21:58:53 | 2019-09-19T00:56:03 | Python | UTF-8 | Python | false | false | 2,250 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : test_girlant_down.py
@Time : 2020/02/03 19:50:02
@Author : Jeffrey Wang
@Version : 1.0
@Contact : shwangjj@163.com
@Desc : 影集下载功能单元测试
'''
import unittest
import wdbd.codepool.ant.girl_picture_ant as girl_ant
import wdbd.codepool.ant.girl_ci as ci
import os
import shutil
class Test_Download_SingleListPage(unittest.TestCase):
"""测试 下载单个列表页面
"""
url = 'https://www.meitulu.com/t/1386/' # 共5个
down_dir = r'temp_files\girl\\'
def tearDown(self):
if os.path.exists(self.down_dir):
shutil.rmtree(self.down_dir)
# os.removedir(self.down_dir)
return super().tearDown()
# 耗时太长,谨慎测试
def test_success(self):
"""
测试下载整个集合的情况
"""
count_of_set = 5
r = girl_ant.download_single_listpage(self.url, self.down_dir)
self.assertEqual(count_of_set, r)
# 检查下载的目录数量
self.assertEqual(count_of_set, len(os.listdir(self.down_dir)))
def test_fail(self):
"""测试 下载失败的情况
"""
err_url = 'xxxx'
err_dir = 'z:\\xxx\\'
# 测试,文件夹不存在的情况
r = girl_ant.download_single_listpage(self.url, err_dir)
self.assertEqual(0, r)
# 测试,url不存在的情况
r = girl_ant.download_single_listpage(err_url, self.down_dir)
self.assertEqual(0, r)
class Test_Download_SinglePage(unittest.TestCase):
"""测试下载单个影集
"""
down_dir = ci.DOWN_DIR
def tearDown(self):
if os.path.exists(self.down_dir):
shutil.rmtree(self.down_dir)
return super().tearDown()
def test_download_single(self):
"""测试 单个页面下载
"""
url = 'https://www.meitulu.com/item/15267.html'
name = '[YOUWU尤物馆] VOL.099 木木hanna - 性感黑丝吊袜写真'
r = girl_ant.download_single(url)
self.assertEqual(38, r) # 下载文件数
dw_dir = ci.DOWN_DIR + name + '\\'
self.assertTrue(os.path.exists(dw_dir)) # 生成的文件夹
| [
"shwangjj@163.com"
] | shwangjj@163.com |
c62a44507b5b34f7b2ce5401b569a0453dfa4af0 | b0b8d735473c79bae43d939a605bc60c07137b46 | /devices/readers.py | 5a9e606339baaa6e4646113c8ba67d05ebc78fee | [] | no_license | frnhr/plc_lines | 39e965d7481bde72c04bf2091497dfb0ec49198e | 60366cb5fd3b06d1558da921fe301fdb7a5d017e | refs/heads/master | 2022-10-05T08:27:23.669929 | 2020-05-19T13:12:31 | 2020-05-19T13:12:31 | 243,630,119 | 0 | 0 | null | 2022-09-30T01:21:53 | 2020-02-27T22:31:06 | Python | UTF-8 | Python | false | false | 1,668 | py | from __future__ import annotations
import json
from typing import Optional
from django.conf import settings
from pylogix.eip import Response, PLC
class ReaderError(RuntimeError):
"""Failed to read PLC device."""
SUCCESS_STATUSES = ("Success", "Partial transfer")
class ReaderBase:
def __init__(self, ip, variable) -> None:
self.ip = ip
self.variable = variable
def read(self) -> Optional[str]:
try:
response = self._read()
except NotImplementedError:
raise
except Exception as e:
raise ReaderError() from e
if response.Status not in SUCCESS_STATUSES:
raise ReaderError(response.Status)
# TODO Do we need to continue reading if get 0x06 Partial transfer?
return str(response.Value) if response is not None else None
def _read(self) -> Response:
raise NotImplementedError()
class FakeReader(ReaderBase):
"""
This is a dummy PLC reader, used for development (since the developer
has zero experience with PLCs, let alone having one handy for tinkering).
Edit FAKE_PLC.json file to change the value which is read.
"""
def _read(self) -> Response:
with open(settings.FAKE_READER_FILE) as fake_reader_file:
fake_plcs = json.loads(fake_reader_file.read())
response_kwargs = fake_plcs[self.ip]
return Response(**response_kwargs)
class PLCReader(ReaderBase):
"""Real PLC reader."""
def _read(self) -> Response:
with PLC() as comm:
comm.Micro800 = True
comm.IPAddress = self.ip
return comm.Read(self.variable)
| [
"fran.hrzenjak@gmail.com"
] | fran.hrzenjak@gmail.com |
eeb25bb99a16c36f21171b4e54186e08259a1435 | 7fdf37c8bb0fe575a28a996ccff08445777d7a59 | /image_server/wx_app/migrations/0014_img_fsize.py | d7fdb6906bba047ed956c7a82c573a9bf51fdede | [] | no_license | bushitan/image_str | 8285884b3aef06935023afa69d49bfc3baecaf2a | dca6f38cffe1f1d1c72a3a098bc4b106a4f5914d | refs/heads/master | 2020-05-21T19:19:39.403015 | 2017-07-20T08:38:31 | 2017-07-20T08:38:31 | 62,543,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wx_app', '0013_img_user_id'),
]
operations = [
migrations.AddField(
model_name='img',
name='fsize',
field=models.IntegerField(default=0, null=True, verbose_name=b'\xe6\x96\x87\xe4\xbb\xb6\xe5\xa4\xa7\xe5\xb0\x8f', blank=True),
),
]
| [
"373514952@qq.com"
] | 373514952@qq.com |
b6faa6b647be48cc1f8a41e6d699d2e4bcdb91c4 | 1897bb1a06572018eee4ef30b56e5e12425a4085 | /12306/1.29scrapy中的去重/project29/project29/spiders/custom.py | 8a63c14e438f4d34c404792c4fee3a83aaf2c93f | [] | no_license | xiaozhiqi2000/spider_advanced | 3f16e140b2f92206ad1ac0298ee0a94f57ad067d | 0a32fcb1fd409ae1bf686a7ed9809c2ee277dec7 | refs/heads/master | 2020-05-27T09:58:06.127519 | 2016-10-17T07:02:37 | 2016-10-17T07:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py | # -*- coding: utf-8 -*-
import scrapy
import json
from scrapy.http.request import Request
class CustomSpider(scrapy.Spider):
name = 'custom'
start_urls = ['https://kyfw.12306.cn/otn/userCommon/allProvince']
custom_settings = {
'DUPEFILTER_DEBUG': True,
# 'DUPEFILTER_CLASS': "project29.custom_filter.CustomURLFilter"
}
def parse_e(self, response):
self.logger.info(response.url)
self.logger.info(response.meta)
def parse(self, response):
self.logger.info("--------------------------")
j = json.loads(response.body)
for prov in j["data"]:
self.logger.info(prov["chineseName"])
yield Request(url='https://www.baidu.com/s?wd=1', callback = self.parse_e)
yield Request(url='https://www.baidu.com/s?wd=3', callback = self.parse_e)
yield Request(url='https://www.baidu.com/s?wd=3', callback = self.parse_e)
yield Request(url='https://www.baidu.com/s?wd=3', callback = self.parse_e, meta = {"timestamp":"1"})
yield Request(url='https://www.baidu.com/s?wd=3', callback = self.parse_e, meta = {"timestamp":"2"})
yield Request(url='https://www.baidu.com/s?wd=3', callback = self.parse_e, meta = {"timestamp":"2"})
| [
"xiaozhiqi2015@live.com"
] | xiaozhiqi2015@live.com |
03c6825d137329f515bb5c6d91bfd057aefa5a1d | 1e08e2c0a1cd9677b35347b9aedd579e8676ee41 | /blog/migrations/0004_blogtagindexpage.py | da35bd1521b74920e4add159baf03d570b2e6dcf | [
"MIT"
] | permissive | tbrlpld/wagtail-gatsby-blog-backend | 023eb4db9166cc860990bbf0414712932508dfa1 | f68f1d9e2577d5271960f142bf37dcbcdac6767a | refs/heads/master | 2022-11-30T11:01:48.115493 | 2020-08-18T17:40:46 | 2020-08-18T17:40:46 | 284,381,118 | 0 | 0 | MIT | 2020-08-18T17:47:24 | 2020-08-02T03:14:21 | Python | UTF-8 | Python | false | false | 752 | py | # Generated by Django 2.2.13 on 2020-06-12 01:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0045_assign_unlock_grouppagepermission'),
('blog', '0003_auto_20200612_0111'),
]
operations = [
migrations.CreateModel(
name='BlogTagIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| [
"tibor@lpld.io"
] | tibor@lpld.io |
71263850c4721086a1a743b2e881050df61695dc | caf794c8f43560ef71ba189191d1d8313af3c6ba | /datamanagement/add_generic_dataset.py | 8efe998de5033214280312401e780df473631eeb | [] | no_license | sanansakura/sisyphus | 007ac7f23edb2bb84ebeb32f6af60796df134b75 | eb82e2e141e896bdc5a980c44f908e4f68f68696 | refs/heads/master | 2021-04-24T02:57:15.308297 | 2020-03-24T21:49:26 | 2020-03-24T21:49:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,632 | py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import sys
import click
import json
import ast
import pandas as pd
from utils.constants import LOGGING_FORMAT
from utils.runtime_args import parse_runtime_args
from dbclients.tantalus import TantalusApi
import datamanagement.templates as templates
logging.basicConfig(format=LOGGING_FORMAT, stream=sys.stderr, level=logging.INFO)
REQUIRED_FIELDS = [
'filepaths',
'sample_id',
'library_id',
'storage_name',
'dataset_name',
'dataset_type',
]
OPTIONAL_FIELDS = [
'tag_name',
'aligner',
'sequence_lane_pks',
'reference_genome'
]
class ListParameter(click.Option):
def type_cast_value(self, ctx, value):
try:
return ast.literal_eval(value)
except:
raise click.BadParameter(value)
@click.group()
def input_type():
pass
@input_type.command()
@click.argument('json_file')
@click.option('--update', is_flag=True)
def json_input(**kwargs):
missing_input = False
#Parse the input json file
try:
with open(kwargs['json_file']) as f:
inputs = json.load(f)
except:
inputs = json.loads(kwargs['json_file'])
#Check that arguments have the right name
for key, val in inputs.iteritems():
if key not in REQUIRED_FIELDS + OPTIONAL_FIELDS:
raise Exception("Unrecognized input for {}".format(key))
#Check if all required arguments are present
for key in REQUIRED_FIELDS:
if key not in inputs:
logging.error("Missing input for {}".format(key))
missing_input = True
if missing_input:
raise Exception("Please add missing inputs")
for key in OPTIONAL_FIELDS:
if key not in inputs:
if key == 'sequence_lane_pks':
inputs[key] = []
else:
inputs[key] = None
inputs["update"] = kwargs['update']
#Call main with these arguments
add_generic_dataset(**inputs)
@input_type.command()
@click.argument('filepaths', nargs=-1)
@click.argument('sample_id', nargs=1)
@click.argument('library_id', nargs=1)
@click.option('--storage_name')
@click.option('--dataset_name')
@click.option('--dataset_type')
@click.option('--tag_name')
@click.option('--aligner')
@click.option('--sequence_lane_pks', cls=ListParameter, default='[]')
@click.option('--reference_genome', type=click.Choice(['HG18', 'HG19']))
@click.option('--update', is_flag=True)
def command_line(**kwargs):
missing_input = False
#Check if all required arguments are present
for key, val in kwargs.iteritems():
if not val and key in REQUIRED_FIELDS:
logging.error("Missing input for {}".format(key))
missing_input = True
if missing_input:
raise Exception("Please add missing inputs")
#Call main with these arguments
add_generic_dataset(**kwargs)
def add_generic_dataset(**kwargs):
tantalus_api = TantalusApi()
file_resource_pks = []
sample = tantalus_api.get(
"sample",
sample_id=kwargs['sample_id']
)
library = tantalus_api.get(
"dna_library",
library_id=kwargs['library_id']
)
#Add the file resource to tantalus
for filepath in kwargs['filepaths']:
logging.info("Adding file resource for {} to Tantalus".format(filepath))
resource, instance = tantalus_api.add_file(
storage_name=kwargs['storage_name'],
filepath=filepath,
update=kwargs['update']
)
file_resource_pks.append(resource["id"])
if "tag_name" in kwargs:
tag = tantalus_api.get("tag", name=kwargs["tag_name"])
tags = [tag["id"]]
else:
tags = []
ref_genome = kwargs.get("reference_genome")
aligner = kwargs.get("aligner")
if "sequence_lane_pks" in kwargs:
sequence_pks = map(str, kwargs["sequence_lane_pks"])
#Add the dataset to tantalus
sequence_dataset = tantalus_api.get_or_create(
"sequence_dataset",
name=kwargs['dataset_name'],
dataset_type=kwargs['dataset_type'],
sample=sample["id"],
library=library["id"],
sequence_lanes=sequence_pks,
file_resources=file_resource_pks,
reference_genome=ref_genome,
aligner=aligner,
tags=tags,
)
logging.info("Succesfully created sequence dataset with ID {}".format(sequence_dataset["id"]))
if __name__=='__main__':
input_type()
| [
"andrew.mcpherson@gmail.com"
] | andrew.mcpherson@gmail.com |
67b11257facdac50c09a31ffdcc0173abaefcf28 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_exhume.py | d04f4905b10af793887ef7eef394520e46e5b7d8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py |
#calss header
class _EXHUME():
def __init__(self,):
self.name = "EXHUME"
self.definitions = [u'to remove a dead body from the ground after it has been buried']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
1a2483954aba597c54da8e7a9cd1c48efadc0a79 | 706f239f0df4586221e6a7aac001626ab531c224 | /src/client_libraries/python/dynamics/customerinsights/api/models/measure_metadata_py3.py | 35818f8ac79c086ff065198678dc705906946fb2 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | Global19-atlassian-net/Dynamics365-CustomerInsights-Client-Libraries | 9681d258c649b005a2379d32b23d374695a6fca4 | 0ce81ae25e97c3b8de12b97963a8c765c0248238 | refs/heads/main | 2023-02-28T20:39:33.622885 | 2021-02-09T23:34:38 | 2021-02-09T23:34:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,925 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MeasureMetadata(Model):
"""Represents metadata for a measure (or KPI).
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar display_name:
:vartype display_name: str
:param name: Gets the unique name of the measure
:type name: str
:param description: Gets the description of the measure.
:type description: str
:param definition:
:type definition: ~dynamics.customerinsights.api.models.MeasureDefinition
:param latest_evaluation:
:type latest_evaluation: ~dynamics.customerinsights.api.models.Evaluation
:param output:
:type output: ~dynamics.customerinsights.api.models.ScalarOutput
:param evaluation_stats:
:type evaluation_stats:
~dynamics.customerinsights.api.models.EvaluationStats
:param error_description:
:type error_description: ~dynamics.customerinsights.api.models.StringInfo
:param sql_validation_stats:
:type sql_validation_stats:
~dynamics.customerinsights.api.models.SqlValidationStats
:param evaluation_history: Gets the evaluation history for the measure.
(not persisted in store)
:type evaluation_history:
list[~dynamics.customerinsights.api.models.Evaluation]
:param output_history: Gets the output history for the measure. (not
persisted in store)
:type output_history:
list[~dynamics.customerinsights.api.models.ScalarOutput]
:ivar version: Gets the version number of this object.
:vartype version: long
:ivar updated_by: Gets the UPN of the user who last updated this record.
:vartype updated_by: str
:ivar updated_utc: Gets the time the object was last updated.
:vartype updated_utc: datetime
:ivar created_by: Gets the email address of the user who created this
record.
:vartype created_by: str
:ivar created_utc: Gets the time the object was initially created.
:vartype created_utc: datetime
:ivar instance_id: Gets the Customer Insights instance id associated with
this object.
:vartype instance_id: str
"""
_validation = {
'display_name': {'readonly': True},
'version': {'readonly': True},
'updated_by': {'readonly': True},
'updated_utc': {'readonly': True},
'created_by': {'readonly': True},
'created_utc': {'readonly': True},
'instance_id': {'readonly': True},
}
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'definition': {'key': 'definition', 'type': 'MeasureDefinition'},
'latest_evaluation': {'key': 'latestEvaluation', 'type': 'Evaluation'},
'output': {'key': 'output', 'type': 'ScalarOutput'},
'evaluation_stats': {'key': 'evaluationStats', 'type': 'EvaluationStats'},
'error_description': {'key': 'errorDescription', 'type': 'StringInfo'},
'sql_validation_stats': {'key': 'sqlValidationStats', 'type': 'SqlValidationStats'},
'evaluation_history': {'key': 'evaluationHistory', 'type': '[Evaluation]'},
'output_history': {'key': 'outputHistory', 'type': '[ScalarOutput]'},
'version': {'key': 'version', 'type': 'long'},
'updated_by': {'key': 'updatedBy', 'type': 'str'},
'updated_utc': {'key': 'updatedUtc', 'type': 'iso-8601'},
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_utc': {'key': 'createdUtc', 'type': 'iso-8601'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
}
def __init__(self, *, name: str=None, description: str=None, definition=None, latest_evaluation=None, output=None, evaluation_stats=None, error_description=None, sql_validation_stats=None, evaluation_history=None, output_history=None, **kwargs) -> None:
super(MeasureMetadata, self).__init__(**kwargs)
self.display_name = None
self.name = name
self.description = description
self.definition = definition
self.latest_evaluation = latest_evaluation
self.output = output
self.evaluation_stats = evaluation_stats
self.error_description = error_description
self.sql_validation_stats = sql_validation_stats
self.evaluation_history = evaluation_history
self.output_history = output_history
self.version = None
self.updated_by = None
self.updated_utc = None
self.created_by = None
self.created_utc = None
self.instance_id = None
| [
"michaelajohnston@mac.com"
] | michaelajohnston@mac.com |
a5201f74952396af8a36123428177ee24f8d8dd1 | c65d9b487df6cdbbe6c4cb773f262ac13270e095 | /engine_modules/corporation/migrations/0007_auto_20151030_1102.py | e9e515d8af2970235652b1329b532c59639fab9e | [] | no_license | Neamar/corporate | 40c254e068f84d59109b25c49a7f613b4b9c7cdc | 3029e2e46087172d7ac187309b771b275446d0ce | refs/heads/master | 2021-06-21T20:26:34.471294 | 2021-03-06T08:40:53 | 2021-03-06T08:40:53 | 15,422,111 | 5 | 0 | null | 2020-07-24T19:38:15 | 2013-12-24T16:57:52 | Python | UTF-8 | Python | false | false | 769 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('corporation', '0006_auto_20150813_1714'),
]
operations = [
migrations.AlterField(
model_name='assetdelta',
name='category',
field=models.CharField(max_length=15, choices=[(b'effect-first', b'Eff. premier'), (b'effect-last', b'Eff. dernier'), (b'effect-crash', b'Eff. crash'), (b'detroit-inc', b'Detroit, Inc.'), (b'sabotage', b'Sabotage'), (b'extraction', b'Extraction'), (b'datasteal', b'Datasteal'), (b'market*bubble', b'Domination/Perte s\xc3\xa8che'), (b'invisible-hand', b'Main Invisible'), (b'votes', b'Votes')]),
),
]
| [
"neamar@neamar.fr"
] | neamar@neamar.fr |
dd0572748eea6b61edb50906f94377efe2355281 | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs_with_max_replays_5/interreplay_39_r_3/replay_config.py | 11cfacf3dd3ff4ad4b224792869d3fc7f377c640 | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import Replayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pyretic.py -m p0 pyretic.examples.firewall_for_sts_no_close', label='c1', address='127.0.0.1', cwd='../pyretic', kill_cmd='ps aux | grep -e pox -e pyretic | grep -v simulator | cut -c 9-15 | xargs kill -9')],
topology_class=MeshTopology,
topology_params="num_switches=3",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = Replayer(simulation_config, "experiments/fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs/interreplay_39_r_3/events.trace",
input_logger=InputLogger(),
wait_on_deterministic_values=False,
allow_unexpected_messages=False,
delay_flow_mods=False,
pass_through_whitelisted_messages=True)
# Invariant check: 'None'
| [
"cs@cs.berkeley.edu"
] | cs@cs.berkeley.edu |
f52e97d68bc016b621734e17240755ff95ec2b80 | 215fe73df20c3d44214c8693434617210f0aba9e | /barViz.py | c2bc1f3a0d9ef171f11b4e9ce1fb62d58ee4ca9c | [] | no_license | Schuck9/Game_Transition | 1c5ae2e902b6f6ae6ec636143edb377d7c010546 | 44450ddd0161578231d4a340f348c2f8d9dcfb64 | refs/heads/master | 2022-07-18T21:36:00.091202 | 2020-05-19T01:36:39 | 2020-05-19T01:36:39 | 260,609,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | """
A simple implementation of Ultimatum Game visualization
@date: 2020.5.18
@author: Tingyu Mo
"""
import numpy as np
import pandas as pd
import os
import time
import fractions
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
def bar_viz(data_path):
# matplotlib模块绘制直方图
# 读入数据
data = pd.read_excel(data_path)
save_path = os.path.join(os.getcwd(),"T1.jpg")
# 绘制直方图
# print(list(data.p))
data.dropna(subset=['p'], inplace=True)
data.dropna(subset=['q'], inplace=True)
plt.bar([1,3,5,7,9],list(data.p),label="Offer(p)")
plt.bar([2,4,6,8,10],list(data.q),label="Demond(q)")
# 添加x轴和y轴标签
plt.xlabel('mode')
plt.ylabel('Offer Or Demond')
meta_element = np.arange(10)
ax_label = [" ","0.5/1"," "," "," ","0.5"," "," "," "," 1"]
plt.xticks(meta_element,ax_label,fontsize=16)
# 添加标题
plt.legend()
plt.title('RG_D_EF_w0.1_u0.001 ')
# 显示图形
plt.savefig(save_path)
print("Figure has been saved to: ",save_path)
plt.show()
if __name__ == '__main__':
# RecordName ='2020-03-03-09-14-20'
# time_option = "all"
# pq_distribution_viz(RecordName,time_option)
# avg_pq_viz()
data_path ='./Hist.xlsx'
bar_viz(data_path) | [
"noreply@github.com"
] | Schuck9.noreply@github.com |
bc1188fe3e82e135991a580d3245a4232bca1a39 | c39f999cae8825afe2cdf1518d93ba31bd4c0e95 | /PYME/Analysis/LMVis/tcHist.py | 83371f074e9cd421f12f1495b9a26991ead89c65 | [] | no_license | WilliamRo/CLipPYME | 0b69860136a9b2533f2f29fc29408d7471cb934d | 6596167034c727ad7dad0a741dd59e0e48f6852a | refs/heads/master | 2023-05-11T09:50:58.605989 | 2023-05-09T02:17:47 | 2023-05-09T02:17:47 | 60,789,741 | 3 | 1 | null | 2016-06-17T08:52:44 | 2016-06-09T16:30:14 | Python | UTF-8 | Python | false | false | 1,682 | py | #!/usr/bin/python
##################
# tcHist.py
#
# Copyright David Baddeley, 2009
# d.baddeley@auckland.ac.nz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
from pylab import *
import scipy as sp
def doTCHist(xvals, yvals, xbins, ybins, sat=1):
h = sp.histogram2d(xvals,yvals,[xbins,ybins])[0]
lh = log10(h + 1).T
#print lh.shape
X,Y = sp.meshgrid(xbins[:-1], ybins[:-1])
c = cm.RdYlGn(sp.minimum(sp.maximum(X/(X + Y), 0),1))
#print c.shape
sc = sp.minimum(sat*lh/lh.max(), 1)
r = c[:,:,:3]
r[:,:,0] = r[:,:,0]*sc
r[:,:,1] = r[:,:,1]*sc
r[:,:,2] = r[:,:,2]*sc
return r
def doInvTCHist(xvals, yvals, xbins, ybins, sat=1):
h = sp.histogram2d(xvals,yvals,[xbins,ybins])[0]
lh = log10(h + 1).T
#print lh.shape
X,Y = sp.meshgrid(xbins[:-1], ybins[:-1])
c = 1 - cm.RdYlGn(sp.minimum(sp.maximum(X/(X + Y), 0),1))
#print c.shape
sc = sp.minimum(sat*lh/lh.max(), 1)
r = c[:,:,:3]
r[:,:,0] = r[:,:,0]*sc
r[:,:,1] = r[:,:,1]*sc
r[:,:,2] = r[:,:,2]*sc
return 1-r
| [
"willi4m@zju.edu.cn"
] | willi4m@zju.edu.cn |
352ccb19b87d55b6408f0744e1b2d22f5f1fa9a8 | 3cf1535ce25f3f0a71bfd5c7697b0efd1a9ce08c | /Experiments/forontiers_jupyter/pipe_utils.py | 40ad6099d3b3d895a5d271f28ead36f985a5adde | [
"MIT"
] | permissive | junhull/Resic | 11ac59a2caf4399822f6280fee41275a7a1fd5a8 | 30b96870713a3dfb356122cb71576dd00be60329 | refs/heads/main | 2023-05-31T11:30:11.469612 | 2021-06-25T17:34:29 | 2021-06-25T17:34:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,157 | py | import os
from tkinter import filedialog
from tkinter import *
import itertools
import subprocess
from functools import partial
import Processing.genome_3nt as genome_3nt
# TODO note that orchestration is done in functions with clear explanation
def all_genome_pair_combinations():
yield from itertools.combinations("ACGT", 2)
def genome_3nt_factory(from_nt, to_nt):
"""
generates the pre and post 3nt genome processing functions for a certain NT pairing
returns 2 function objects
"""
pre = partial(genome_3nt.pre, nt_replacement=[from_nt, to_nt])
post = partial(genome_3nt.post, nt_replacement=[from_nt, to_nt])
return pre, post
# todo - this is a stub function - need to remove it and replace it with the calling of the real one
def transcriptom_func(one, two):
print("placeholder: transcriptome")
return two, one
def genome_3nt_all_combination_spec():
"""
returns a list of 3-ples of the form ('X_Y',pre,post) where pre and post are the
3nt genome preprocessing and postprocessing functions of X to Y genome mapping
for all combinations of 2 different nucleotides X,Y
"""
three_nt_spec = []
for from_nt, to_nt in all_genome_pair_combinations():
name = "%s_%s" % (from_nt, to_nt)
pre, post = genome_3nt_factory(from_nt, to_nt)
three_nt_spec.append((name, pre, post))
return three_nt_spec
# file selection screen function
def files_selector():
"""
returns the filenames list the user picked in the popup window
"""
root = Tk()
filenames = filedialog.askopenfilenames(initialdir=os.getcwd(), title="Select files",
filetypes=(("all files", "*.*"), ("fastq files", "*.fastq"),
("pileup files", "*.pileup"), ("fasta files", "*.fasta")))
filenames_list = root.tk.splitlist(filenames)
root.destroy()
return list(filenames_list)
def file_selector():
"""
returns the filename the user picked in the popup window
"""
root = Tk()
filename = filedialog.askopenfilename(initialdir=os.getcwd(), title="Select files",
filetypes=(("all files", "*.*"), ("fastq files", "*.fastq"),
("pileup files", "*.pileup"), ("fasta files", "*.fasta")))
root.destroy()
return filename
def folder_selector():
"""
returns the folder the user picked in the popup window
"""
root = Tk()
folder_selected = filedialog.askdirectory()
root.destroy()
return folder_selected
def print_structure(startpath):
"""
prints the directory structure from startpath
"""
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
def call_process(command):
res = subprocess.call(command, shell=True)
if res:
print("error") | [
"you@example.com"
] | you@example.com |
64f56a690ecf18c897dc21fc3c4486792d2c8951 | e3c8f786d09e311d6ea1cab50edde040bf1ea988 | /Incident-Response/Tools/grr/grr/client/grr_response_client/local/binary_whitelist.py | 8c4e5b27c32b76d489f50185d4ea98a673d345b2 | [
"MIT",
"Apache-2.0"
] | permissive | foss2cyber/Incident-Playbook | d1add8aec6e28a19e515754c6ce2e524d67f368e | a379a134c0c5af14df4ed2afa066c1626506b754 | refs/heads/main | 2023-06-07T09:16:27.876561 | 2021-07-07T03:48:54 | 2021-07-07T03:48:54 | 384,988,036 | 1 | 0 | MIT | 2021-07-11T15:45:31 | 2021-07-11T15:45:31 | null | UTF-8 | Python | false | false | 607 | py | #!/usr/bin/env python
"""Deployment-specific whitelisted binaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
def IsExecutionWhitelisted(cmd, args):
"""Check if a binary and args is whitelisted.
Args:
cmd: Canonical path to the binary.
args: List of arguments to be passed to the binary.
Returns:
Bool, True if it is whitelisted.
This function is not called directly but used by client_utils_common.py to
detect site-specific binaries that are allowed to run.
"""
del cmd, args # Unused.
return False
| [
"a.songer@protonmail.com"
] | a.songer@protonmail.com |
f4a5f6ff610a2ef870fbcd75f625332f734d48f0 | 7f52618136c8d9b9ba0ce8f89f3fcc90c4e6feb7 | /csa_new/csa_new/doctype/umpire/test_umpire.py | 680f95b417bffcbca637abc61e953926fe0cf808 | [
"MIT"
] | permissive | Jishnu70055/user_management | 7ade7f196f974ea0b3ddb220e3fca49665d9de3b | 82d3d2c85a62c7f1162633c164cb7d50e229d2fd | refs/heads/main | 2023-07-06T14:03:00.213723 | 2021-08-10T12:42:10 | 2021-08-10T12:42:10 | 394,649,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | # Copyright (c) 2021, sd and Contributors
# See license.txt
# import frappe
import unittest
class TestUmpire(unittest.TestCase):
pass
| [
"jishnudq70055@gmail.com"
] | jishnudq70055@gmail.com |
cfd15ad4fd1f86eb6e93d87b51cc4e985e79c39d | 3d9ec6fddb04669104091b2f3b6f8fd0617db64d | /api/cloud_provider/compute_model.py | 0665ec64b08f60672fbcf8a1d2059be952f4c802 | [
"Apache-2.0"
] | permissive | lixianseng-limeidong/KubeOperator | b4403de8b38de54b5d0f153b19da38e6870283b3 | 124d7d9320a563d7f9f82c82bb762b523f921567 | refs/heads/master | 2020-08-07T17:34:35.196221 | 2019-10-08T03:12:20 | 2019-10-08T03:12:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | import logging
import os
import yaml
from fit2ansible.settings import CLOUDS_RESOURCE_DIR
logger = logging.getLogger(__name__)
compute_models = []
def load_compute_model():
with open((os.path.join(CLOUDS_RESOURCE_DIR, 'compute_model_meta.yml'))) as f:
logger.info('Load compute model meta')
compute_models.extend(yaml.load(f))
| [
"scydeai@qq.com"
] | scydeai@qq.com |
07f03ddd26a9d4f0c1263c883b35fa596a4e0cf2 | 517a7be46b56c7a9658255dc9d2abc5872ead589 | /Line_sweep/the_skyline_problem.py | 1ce50e56c6eb0758eaccfbaa556cedda5445be58 | [] | no_license | timwangmusic/Algorithm-Design | d1f1f7cdd6b769edbb0fa0d6cf5ddd641568baeb | 5aea290f55ec80d733c596fd6fa595adac776b97 | refs/heads/master | 2022-10-03T12:23:26.134172 | 2020-05-22T00:38:47 | 2020-05-22T03:55:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,575 | py | import collections
import heapq
from typing import List
"""
Skyline problem
A city's skyline is the outer contour of the silhouette formed by all the buildings in that city when viewed
from a distance. Now suppose you are given the locations and height of all the buildings,
output the skyline formed by these buildings collectively.
Computational complexity:
O(N*logN) where N is number of buildings
"""
def getSkyline(buildings: List[List[int]]) -> List[List[int]]:
points = []
for left, right, height in buildings:
# x-axis, height and if the event is building starts
points.append((left, height, True))
points.append((right, height, False))
heap = [] # max heap
points.sort(key=lambda p: p[:1])
counter = collections.Counter() # tracking valid elements in heap
start, end = 0, 0
res = []
prev_h = None
# for each unique x-axis value, compute a height
while start < len(points):
x, h, _ = points[start]
while end < len(points):
ex, eh, end_building_start = points[end]
if ex > x:
break
if end_building_start:
counter[eh] += 1
heapq.heappush(heap, -eh)
else:
counter[eh] -= 1
end += 1
# remove invalid elements from heap
while heap and counter[-heap[0]] == 0:
heapq.heappop(heap)
cur_h = -heap[0]
if cur_h != prev_h:
res.append([x, cur_h])
prev_h = cur_h
start = end
return res
| [
"weihewang2012@gmail.com"
] | weihewang2012@gmail.com |
df9d3e526751041179700b307c8cfb940b7c8a4b | e2426d7c01500ca4a2df4e4555f217f957baf957 | /cows/xml/util.py | 9a9fbbd3fc3cc0537e00ab8b27933d2b0ba9c54a | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | cedadev/cows | 959a5e1ad220cfe0cce48a2131d6971106c765aa | db9ed729c886b271ce85355b97e39243081e8246 | refs/heads/master | 2020-03-16T15:17:45.710584 | 2018-05-09T10:35:47 | 2018-05-09T10:36:37 | 132,736,968 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | # BSD Licence
# Copyright (c) 2009, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
# Copyright (C) 2007 STFC & NERC (Science and Technology Facilities Council).
# This software may be distributed under the terms of the
# Q Public License, version 1.0 or later.
# http://ndg.nerc.ac.uk/public_docs/QPublic_license.txt
"""
Elementtree convenience utilities
@author: Stephen Pascoe
"""
def find_text(node, path):
"""Find a node's text or None
"""
return getattr(node.find(path), 'text', None)
def findall_text(node, path):
"""Find all n.text elements from a path.
"""
return [n.text for n in node.findall(path)]
def find_with(node, path, func):
"""If node.find(path) returns a node n return func(n) else return None.
"""
n = node.find(path)
if n is None:
return None
else:
return func(n)
def findall_with(node, path, func):
"""Find all func(n) for n in node.findall(path).
"""
return [func(n) for n in node.findall(path)]
| [
"ag.stephens@stfc.ac.uk"
] | ag.stephens@stfc.ac.uk |
864325413a3e37a779f35facb6b7925229555615 | fac6e2aeba6873719a349f3f088a22183f92466c | /oracle/python/protectOracle/protectOracle.py | 00a0c61070908e63a21adc4ecbae136f980d078c | [] | no_license | geoffaskam/scripts | b8cd177f19c9990f93317d245d8ea87fe4cbfff3 | 1909d2dad935f10a26992a17541407b07c6b7884 | refs/heads/master | 2023-08-20T14:26:36.900054 | 2021-09-13T10:32:11 | 2021-09-13T10:32:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,570 | py | #!/usr/bin/env python
"""Protect Oracle"""
# usage:
# ./protectOracle.py -v mycluster \
# -u myuser \
# -d mydomain.net \
# -p 'My Policy' \
# -j 'My New Job' \
# -z 'America/New_York' \
# -s myserver.mydomain.net \
# -db mydb
# import pyhesity wrapper module
from pyhesity import *
### command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, required=True) # Cohesity cluster name or IP
parser.add_argument('-u', '--username', type=str, required=True) # Cohesity Username
parser.add_argument('-d', '--domain', type=str, default='local') # Cohesity User Domain
parser.add_argument('-j', '--jobname', type=str, required=True) # name of protection job
parser.add_argument('-p', '--policyname', type=str) # name of protection policy
parser.add_argument('-s', '--servername', type=str, required=True) # name of server to protect
parser.add_argument('-db', '--dbname', type=str) # name of DB to protect
parser.add_argument('-t', '--starttime', type=str, default='20:00') # job start time
parser.add_argument('-z', '--timezone', type=str, default='America/Los_Angeles') # timezone for job
parser.add_argument('-is', '--incrementalsla', type=int, default=60) # incremental SLA minutes
parser.add_argument('-fs', '--fullsla', type=int, default=120) # full SLA minutes
parser.add_argument('-sd', '--storagedomain', type=str, default='DefaultStorageDomain') # storage domain
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
jobname = args.jobname
policyname = args.policyname
servername = args.servername
dbname = args.dbname
starttime = args.starttime
timezone = args.timezone
incrementalsla = args.incrementalsla
fullsla = args.fullsla
storagedomain = args.storagedomain
# parse starttime
try:
(hour, minute) = starttime.split(':')
except Exception:
print('starttime is invalid!')
exit(1)
# authenticate
apiauth(vip, username, domain)
# find storage domain
sd = [sd for sd in api('get', 'viewBoxes') if sd['name'].lower() == storagedomain.lower()]
if len(sd) < 1:
print("Storage domain %s not found!" % storagedomain)
exit(1)
sdid = sd[0]['id']
# get oracle sources
sources = api('get', 'protectionSources?environments=kOracle')
# find policy
if policyname is not None:
policy = [p for p in api('get', 'protectionPolicies') if p['name'].lower() == policyname.lower()]
if len(policy) < 1:
print('Policy %s not found!' % policyname)
exit(1)
else:
policy = policy[0]
# find existing job
newJob = False
job = [j for j in api('get', 'protectionJobs?environments=kOracle&isActive=true&isDeleted=false') if j['name'].lower() == jobname.lower()]
if len(job) < 1:
if policyname is not None:
newJob = True
# create new job
job = {
"policyId": policy['id'],
"viewBoxId": sdid,
"createRemoteView": False,
"priority": "kMedium",
"incrementalProtectionSlaTimeMins": 60,
"alertingPolicy": [
"kFailure"
],
"sourceSpecialParameters": [],
"fullProtectionSlaTimeMins": 120,
"timezone": timezone,
"qosType": "kBackupHDD",
"environment": "kOracle",
"startTime": {
"minute": int(minute),
"hour": int(hour)
},
"parentSourceId": sources[0]['protectionSource']['id'],
"name": jobname,
"sourceIds": [],
"indexingPolicy": {
"disableIndexing": True
}
}
else:
print('Job %s not found!' % jobname)
exit(1)
else:
job = job[0]
# find server to add to job
server = [s for s in sources[0]['nodes'] if s['protectionSource']['name'].lower() == servername]
if len(server) < 1:
print('Server %s not found!' % servername)
exit(1)
serverId = server[0]['protectionSource']['id']
job['sourceIds'].append(serverId)
if dbname is not None:
# find db to add to job
db = [a for a in server[0]['applicationNodes'] if a['protectionSource']['name'].lower() == dbname.lower()]
if len(db) < 1:
print("Database %s not found!" % dbname)
exit(1)
dbIds = [db[0]['protectionSource']['id']]
print('Adding %s/%s to protection job %s...' % (servername, dbname, jobname))
else:
# or add all dbs to job
dbIds = [a['protectionSource']['id'] for a in server[0]['applicationNodes']]
print('Adding %s/* to protection job %s...' % (servername, jobname))
# update dblist for server
sourceSpecialParameter = [s for s in job['sourceSpecialParameters'] if s['sourceId'] == serverId]
if len(sourceSpecialParameter) < 1:
job['sourceSpecialParameters'].append({"sourceId": serverId, "oracleSpecialParameters": {"applicationEntityIds": dbIds}})
else:
for dbId in dbIds:
sourceSpecialParameter[0]['oracleSpecialParameters']['applicationEntityIds'].append(dbId)
sourceSpecialParameter[0]['oracleSpecialParameters']['applicationEntityIds'] = list(set(sourceSpecialParameter[0]['oracleSpecialParameters']['applicationEntityIds']))
job['sourceIds'] = list(set(job['sourceIds']))
if newJob is True:
# create new job
result = api('post', 'protectionJobs', job)
else:
# update existing job
result = api('put', 'protectionJobs/%s' % job['id'], job)
| [
"bseltzer@cohesity.com"
] | bseltzer@cohesity.com |
439d402a75de7bc30ffde90e79926a8c711ed6fc | f88f900c0384f6da82eeb749371ad44115527700 | /course-book/09-matching/0911-sift.py | 22566d262cac06e2ecb7a1028026128497681c01 | [] | no_license | aaron-kr/learning-opencv | eff382e8f0c822400f765451d57b192a63cd1b74 | 158239f0140569aec519fc1fbf255c54ef2567d2 | refs/heads/main | 2023-08-21T11:02:49.775425 | 2021-10-27T00:04:01 | 2021-10-27T00:04:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,619 | py | # 0911.py
# SIFT = Scale Invariant Feature Transform
import cv2
import numpy as np
#1
def distance(f1,f2):
x1,y1 = f1.pt
x2,y2 = f2.pt
return np.sqrt((x2-x1) ** 2 + (y2-y1) ** 2)
def filteringByDistance(kp, distE = 0.5):
size = len(kp)
mask = np.arange(1, size + 1).astype(np.bool8) # all True
for i, f1 in enumerate(kp):
if not mask[i]:
continue
else: # True
for j, f2 in enumerate(kp):
if i == j:
continue
if distance(f1,f2) < distE:
mask[j] = False
np_kp = np.array(kp)
return list(np_kp[mask])
#2
src = cv2.imread('../../img/chessboard.jpg')
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
## siftF = cv2.SIFT_create()
siftF = cv2.SIFT_create(edgeThreshold = 80)
kp = siftF.detect(gray)
print('len(kp) = ', len(kp))
#3
kp = sorted(kp, key = lambda f: f.response, reverse = True)
## filtered_kp = list(filter(lambda f: f.response > 0.01, kp))
filtered_kp = filteringByDistance(kp, 10)
print('len(filtered_kp) = ', len(filtered_kp))
kp, des = siftF.compute(gray, filtered_kp)
print('des.shape = ', des.shape)
print('des.dtype = ', des.dtype)
print('des = ', des)
#4
dst2 = cv2.drawKeypoints(gray, filtered_kp, None, color = (0,0,255))
for f in filtered_kp:
x,y = f.pt
size = f.size
rect = ((x,y), (size,size), f.angle)
box = cv2.boxPoints(rect).astype(np.int32)
cv2.polylines(dst2, [box], True, (0,255,0), 2)
cv2.circle(dst2, (round(x), round(y)), round(f.size / 2), (255,0,0), 2)
cv2.imshow('dst2', dst2)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"jekkilekki@gmail.com"
] | jekkilekki@gmail.com |
bf2e7bde9751dd59167e44ff617162cc3d743610 | 482ed16cd1c8d721e98a9c460555802f7cce8906 | /run-tests/t226.py | c3179519a2fe24c123647a0afa2d80f4d0ce9940 | [
"MIT"
] | permissive | forkcodeaiyc/skulpt_parser | ea2347b2a452476854cf03412474fae63bca31c0 | dd592e9b91bcbbe0c5cfdb5c2da0fb5ae604a428 | refs/heads/master | 2023-09-04T11:32:09.760317 | 2021-10-11T22:58:18 | 2021-10-11T22:58:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | for const in (1, 2, 3):
print(const)
def f():
for const in (1, 2, 3):
print(const)
for object in (1, 2, 3):
print(object)
instanceof = 5
void = 6
var = 7
delete = 8
switch = 9
default = 10
catch = 11
print((instanceof, void, var, delete, switch, default, catch))
f()
| [
"albert-jan.nijburg@babylonhealth.com"
] | albert-jan.nijburg@babylonhealth.com |
1f42f6a26b04a814d2e50725049e505104c4b7cb | b9286e5866373b777739f8a13ee06afa810451d5 | /antisocial/main/tests/test_models.py | 2a872f2af2ccf6ce8b1c891b839a08896ddffdcb | [
"BSD-3-Clause"
] | permissive | peicheng/antisocial | ff476d965e5f37cf038c88b13146ffe30dcd8e27 | 7ba8da6aa58ee20e5f01870b30a62d478cc707c9 | refs/heads/master | 2021-01-12T12:18:25.290271 | 2016-10-25T13:27:34 | 2016-10-25T21:08:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,946 | py | from django.test import TestCase
from antisocial.main.models import Feed, Entry, extract_published
from datetime import datetime, timedelta
def feed_factory():
return Feed.objects.create(
url="http://example.com/",
guid="1234",
title="test feed",
last_fetched=datetime.now(),
last_failed=datetime.now(),
next_fetch=datetime.now() + timedelta(hours=1)
)
def entry_factory(f):
return Entry.objects.create(
feed=f,
guid="entry1234",
title="test entry",
link="http://example.com/entry",
author="test author",
published=datetime.now(),
)
class DummyFeed(object):
feed = dict(guid="foo")
class DictObj(object):
def __init__(self, **kwargs):
self._d = kwargs
for k, v in kwargs.items():
setattr(self, k, v)
def __iter__(self):
return iter(self._d)
class TestHelpers(TestCase):
def test_extract_published_default(self):
r = extract_published(dict())
self.assertIsNotNone(r)
class TestFeed(TestCase):
def test_try_fetch(self):
f = feed_factory()
f.try_fetch()
self.assertEqual(f.backoff, 0)
def test_update_guid(self):
f = feed_factory()
f.update_guid(DummyFeed())
self.assertEqual(f.guid, "foo")
def test_update_etag(self):
f = feed_factory()
d = DictObj(etag='new one')
f.update_etag(d)
self.assertEqual(f.etag, 'new one')
def test_update_modified(self):
f = feed_factory()
d = DictObj(modified='new one')
f.update_modified(d)
self.assertEqual(f.modified, 'new one')
def test_update_entry_already_exists(self):
f = feed_factory()
e = entry_factory(f)
c = Entry.objects.count()
f.update_entry(dict(guid=e.guid))
# no new ones created
self.assertEqual(c, Entry.objects.count())
| [
"anders@columbia.edu"
] | anders@columbia.edu |
bae7da5bd3bf39389bc7010564d8a48d2f341187 | 0377a4135f9e8940809a62186b229295bed9e9bc | /剑指offer/素数对/solution.py | 8f7cd0843bb2c0886a8583a50ff0752e5a9d95fa | [] | no_license | neko-niko/leetcode | 80f54a8ffa799cb026a7f60296de26d59a0826b0 | 311f19641d890772cc78d5aad9d4162dedfc20a0 | refs/heads/master | 2021-07-10T10:24:57.284226 | 2020-09-13T11:28:45 | 2020-09-13T11:28:45 | 198,792,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | # 给定一个正整数,编写程序计算有多少对质数的和等于输入的这个正整数,并输出结果。输入值小于1000。
# 如,输入为10, 程序应该输出结果为2。(共有两对质数的和为10,分别为(5,5),(3,7))
import math
def Solution(num):
cot = 0
for i in range(1, (num // 2)+1):
if Judge(i):
if Judge(num-i):
cot += 1
return cot
def Judge(num):
if num <= 2:
return False
for i in range(2, int(math.sqrt(num)) + 1):
if num % i == 0:
return False
else:
continue
return True
print(Solution(10000000)) | [
"2361253285@qq.com"
] | 2361253285@qq.com |
863df0383c9787e542ce56e905fdafa519dba0cc | adecd0537a42ae728db1dea2b754c503dc533f9f | /docs/cycles.py | b92db1c1aa11ae72731c03e42d231df14990ecaa | [
"MIT"
] | permissive | mickaellalande/proplot | 596ab4a2c9c820b64b38bf6f54ccda440e98fe4a | 31bdb57f88190dc64f70bbd4d784b1af69ec36fc | refs/heads/master | 2022-12-17T12:00:43.428061 | 2020-09-23T15:04:10 | 2020-09-23T15:04:10 | 287,268,054 | 0 | 0 | MIT | 2020-08-13T12:02:26 | 2020-08-13T12:02:26 | null | UTF-8 | Python | false | false | 7,362 | py | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_cycles:
#
# Color cycles
# ============
#
# ProPlot defines **color cycles** as color palettes comprising sets of
# *distinct colors*. Unlike :ref:`colormaps <Colormaps>`, interpolation
# between these colors may not make sense. Color cycles are generally used
# with bar plots, line plots, and other distinct plot elements. ProPlot's
# named color cycles are actually registered as `~proplot.colors.ListedColormap`
# instances so that they can be `used with categorical data\
# <https://journals.ametsoc.org/view-large/figure/9538246/bams-d-13-00155_1-f5.tif>`__.
# Much more commonly, we build `property cycles\
# <https://matplotlib.org/3.1.0/tutorials/intermediate/color_cycle.html>`__
# from the `~proplot.colors.ListedColormap` colors using the
# `~proplot.constructor.Cycle` constructor function or by
# :ref:`drawing samples <ug_cycles_new>` from continuous colormaps.
#
# ProPlot adds several features to help you use color cycles effectively in
# your figures. This section documents the new registered color cycles,
# explains how to make and modify colormaps, and shows how to apply them to
# your plots.
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_cycles_included:
#
# Included color cycles
# ---------------------
#
# Use `~proplot.demos.show_cycles` to generate a table of the color cycles
# registered by default and loaded from your ``~/.proplot/cycles`` folder.
# You can make your own color cycles using the `~proplot.constructor.Cycle`
# constructor function.
# %%
import proplot as plot
fig, axs = plot.show_cycles()
# %% [raw] raw_mimetype="text/restructuredtext"
# Changing the color cycle
# ------------------------
#
# You can make and apply new property cyclers with the
# `~proplot.constructor.Cycle` constructor function. Various plotting
# commands like `~matplotlib.axes.Axes.plot` and
# `~matplotlib.axes.Axes.scatter` now accept a `cycle` keyword arg, which is
# passed to `~proplot.constructor.Cycle` (see
# `~proplot.axes.cycle_changer`). To save your color cycle data and use
# it every time ProPlot is imported, simply pass ``save=True`` to
# `~proplot.constructor.Cycle`. If you want to change the global property
# cycler, pass a *name* to the :rcraw:`cycle` setting or pass the result of
# `~proplot.constructor.Cycle` to the :rcraw:`axes.prop_cycle` setting (see
# the :ref:`configuration guide <ug_config>`).
# %%
import numpy as np
lw = 5
state = np.random.RandomState(51423)
data = (state.rand(12, 6) - 0.45).cumsum(axis=0)
kwargs = {'legend': 'b', 'labels': list('abcdef')}
# Modify the default color cycle
plot.rc.cycle = '538'
fig, axs = plot.subplots(ncols=3, axwidth=1.9)
axs.format(suptitle='Changing the color cycle')
ax = axs[0]
ax.plot(data, lw=lw, **kwargs)
ax.format(title='Global color cycle')
# Pass the cycle to a plotting command
ax = axs[1]
ax.plot(data, cycle='qual1', lw=lw, **kwargs)
ax.format(title='Local color cycle')
# As above but draw each line individually
# Note that the color cycle is not reset with each plot call
ax = axs[2]
labels = kwargs['labels']
for i in range(data.shape[1]):
ax.plot(data[:, i], cycle='qual1', legend='b', label=labels[i], lw=lw)
ax.format(title='With multiple plot calls')
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_cycles_new:
#
# Making new color cycles
# -----------------------
#
# You can make new color cycles with the `~proplot.constructor.Cycle`
# constructor function. One great way to make cycles is by sampling a
# colormap! Just pass the colormap name to `~proplot.constructor.Cycle`, and
# optionally specify the number of samples you want to draw as the last
# positional argument (e.g. ``plot.Cycle('Blues', 5)``).
#
# Positional arguments passed to `~proplot.constructor.Cycle` are interpreted
# by the `~proplot.constructor.Colormap` constructor, and the resulting
# colormap is sampled at discrete values. To exclude near-white colors on the
# end of a colormap, pass e.g. ``left=x`` to `~proplot.constructor.Cycle`, or
# supply a plotting command with e.g. ``cycle_kw={'left': x}``. See
# the :ref:`colormaps section <ug_cmaps>` for details.
#
# In the below example, several cycles are constructed from scratch, and the
# lines are referenced with colorbars and legends. Note that ProPlot allows
# you to :ref:`generate colorbars from lists of lines <ug_cbars>`.
# %%
import proplot as plot
import numpy as np
fig, axs = plot.subplots(ncols=2, share=0, axwidth=2.3)
state = np.random.RandomState(51423)
data = (20 * state.rand(10, 21) - 10).cumsum(axis=0)
# Cycle from on-the-fly monochromatic colormap
ax = axs[0]
lines = ax.plot(data[:, :5], cycle='plum', cycle_kw={'fade': 85}, lw=5)
fig.colorbar(lines, loc='b', col=1, values=np.arange(0, len(lines)))
fig.legend(lines, loc='b', col=1, labels=np.arange(0, len(lines)))
ax.format(title='Cycle from color')
# Cycle from registered colormaps
ax = axs[1]
cycle = plot.Cycle('blues', 'reds', 'oranges', 15, left=0.1)
lines = ax.plot(data[:, :15], cycle=cycle, lw=5)
fig.colorbar(lines, loc='b', col=2, values=np.arange(0, len(lines)), locator=2)
fig.legend(lines, loc='b', col=2, labels=np.arange(0, len(lines)), ncols=4)
ax.format(
title='Cycle from merged colormaps',
suptitle='Color cycles from colormaps'
)
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_cycles_other:
#
# Cycles of other properties
# --------------------------
#
# `~proplot.constructor.Cycle` can also generate cyclers that change
# properties other than color. Below, a single-color dash style cycler is
# constructed and applied to the axes locally. To apply it globally, simply
# use ``plot.rc['axes.prop_cycle'] = cycle``.
# %%
import proplot as plot
import numpy as np
import pandas as pd
# Create cycle that loops through 'dashes' Line2D property
cycle = plot.Cycle(dashes=[(1, 0.5), (1, 1.5), (3, 0.5), (3, 1.5)])
# Generate sample data
state = np.random.RandomState(51423)
data = (state.rand(20, 4) - 0.5).cumsum(axis=0)
data = pd.DataFrame(data, columns=pd.Index(['a', 'b', 'c', 'd'], name='label'))
# Plot data
fig, ax = plot.subplots(axwidth=2.6, aspect=1)
ax.format(suptitle='Plot without color cycle')
obj = ax.plot(
data, lw=3, cycle=cycle, legend='ul',
legend_kw={'ncols': 2, 'handlelength': 3}
)
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_cycles_dl:
#
# Downloading color cycles
# ------------------------
#
# There are plenty of online interactive tools for generating and testing
# color cycles, including
# `i want hue <http://tools.medialab.sciences-po.fr/iwanthue/index.php>`__,
# `coolers <https://coolors.co>`__, and
# `viz palette <https://projects.susielu.com/viz-palette>`__.
#
# To add color cycles downloaded from any of these sources, save the cycle
# data to a file in your ``~/.proplot/cycles`` folder and call
# `~proplot.config.register_cycles` (or restart your python session), or use
# `~proplot.colors.ListedColormap.from_file`. The file name is used as the
# registered cycle name. See `~proplot.colors.ListedColormap.from_file` for a
# table of valid file extensions.
| [
"lukelbd@gmail.com"
] | lukelbd@gmail.com |
0bf18a73e83e14446d208b46726489c1f8870061 | e99dfc900052272f89d55f2fd284389de2cf6a73 | /apostello/loaddotenv.py | 4a4189db4fdd1e2b38c5c29fcd1b383351778925 | [
"MIT"
] | permissive | armenzg/apostello | a3e6ca3d34917608af79fbab4134ee4de1f5e8ee | 1827547b5a8cf94bf1708bb4029c0b0e834416a9 | refs/heads/master | 2021-01-18T18:16:02.364837 | 2017-03-22T20:34:21 | 2017-03-22T20:34:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | #!/usr/bin/env python
import os
from dotenv import load_dotenv
def loaddotenv():
"""Load env vars from .env file."""
fname = '.env'
dotenv_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', fname)
)
load_dotenv(dotenv_path)
| [
"montgomery.dean97@gmail.com"
] | montgomery.dean97@gmail.com |
7bf14c0c23f8195723424bf053dcaecb469ab14c | 8e90a7759ec7143427823547e0fbff58e0343aaa | /training_api/application/paths/services/path_service.py | a82588bb1dc00c46b4dd497a83fefd245a8bb3a8 | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | BMW-InnovationLab/BMW-TensorFlow-Training-GUI | 646a6f86f26887e94351b4c572b7fe7f0842f75c | 06531dae14365986c86baf735fd149317f4bb67a | refs/heads/master | 2023-07-20T01:48:27.299962 | 2023-07-12T15:22:22 | 2023-07-12T15:22:22 | 227,429,492 | 1,030 | 198 | Apache-2.0 | 2023-05-22T17:40:23 | 2019-12-11T18:06:11 | Python | UTF-8 | Python | false | false | 528 | py | import json
from domain.models.paths import Paths
from domain.services.contract.abstract_path_service import AbstractPathService
class PathService(AbstractPathService):
"""
A class used to get paths from path.json and return object of type Paths
...
"""
def __init__(self):
with open("./assets/paths.json", 'r') as paths_file:
json_path = json.load(paths_file)
self.paths: Paths = Paths.parse_obj(json_path)
def get_paths(self) -> Paths:
return self.paths
| [
"Daniel.Jess@bmw.de"
] | Daniel.Jess@bmw.de |
bec7895cdef7c093a11c9933c559bcc908c7a1b2 | 3839400cb89316ce591667f17c0f72c85b16b242 | /misc/mutalyzer-comparison/bin/hgvs-g-to-c | d346afda01cd8ec0c8a126fa0bcb3f0c43d687a3 | [
"Apache-2.0"
] | permissive | HealthVivo/hgvs-1 | 73f768345fb2144c1c737a00436e524c22a9423d | 26aba8877791b0f94f1e14a5a49c60bcdaf2e6fd | refs/heads/master | 2020-12-13T01:28:10.625165 | 2014-09-03T05:51:43 | 2014-09-03T05:51:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,360 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
"""compare hgvs and mutalyzer for c. and g. variants provided on the command line
The comparison has two basic cases:
1) When a c. variant is provided, it is converted to g. by both hgvs
and mutalyzer and the results are compared.
2) When a g. variant is provided, it is converted to c. variants for
all transcripts available by each tool. The members of each
result set with matching accessions are compared.
"""
import argparse
import codecs
import csv
import logging
import os
import sys
import hgvs.parser
import hgvs.dataproviders.uta
from hgvs.variantmapper import EasyVariantMapper
defaults = {
'uta-dsn': 'postgresql://localhost/uta',
}
fieldnames = ['Input Variant','Errors','Chromosomal Variant','Coding Variant(s)']
def parse_args(argv):
# parse command line for configuration files
ap = argparse.ArgumentParser(
description = __doc__,
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
)
ap.add_argument('--variant-file', '-f')
ap.add_argument('--verbose', '-v', default=0, action='count')
args = ap.parse_args(argv)
return args
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
opts = parse_args(sys.argv[1:])
if opts.verbose:
logger.setLevel(logging.INFO if opts.verbose == 1 else logging.DEBUG)
hp = hgvs.parser.Parser()
dp = hgvs.dataproviders.uta.connect(defaults['uta-dsn'])
evm = hgvs.variantmapper.EasyVariantMapper(hdp=dp)
print( "\t".join(fieldnames) )
in_fh = codecs.open(opts.variant_file,encoding='utf-8') if opts.variant_file else sys.stdin
for hgvs_g in in_fh:
hgvs_g = hgvs_g.strip()
if hgvs_g.startswith("#"):
continue
try:
var_g = hp.parse_hgvs_variant(hgvs_g)
assert var_g.type == 'g'
var_cs = [ evm.g_to_c(var_g,ac) for ac in evm.relevant_transcripts(var_g) ]
print("\t".join([hgvs_g,'',hgvs_g]+map(str,var_cs)))
except hgvs.exceptions.HGVSError as exc:
logger.error(hgvs_g, exc_info=1)
except Exception as e:
logger.error(hgvs_g, exc_info=1)
sys.exit(1)
| [
"reecehart@gmail.com"
] | reecehart@gmail.com | |
74ddc9a8dc922ee096e065d875cfc1e898f7a31d | 2031771d8c226806a0b35c3579af990dd0747e64 | /pyobjc-framework-SystemConfiguration/PyObjCTest/test_scpreferences.py | ee461b1fa5e0c25b9106f2e1df010723e61a60ec | [
"MIT"
] | permissive | GreatFruitOmsk/pyobjc-mirror | a146b5363a5e39181f09761087fd854127c07c86 | 4f4cf0e4416ea67240633077e5665f5ed9724140 | refs/heads/master | 2018-12-22T12:38:52.382389 | 2018-11-12T09:54:18 | 2018-11-12T09:54:18 | 109,211,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,819 | py | from PyObjCTools.TestSupport import *
from SystemConfiguration import *
#from SecurityFoundation import SFAuthorization
class TestSCPreferences (TestCase):
def testConstants(self):
self.assertEqual(kSCPreferencesNotificationCommit, 1<<0)
self.assertEqual(kSCPreferencesNotificationApply, 1<<1)
def testFunctions(self):
self.assertIsInstance(SCPreferencesGetTypeID(), (int, long))
ref = SCPreferencesCreate(None, "pyobjc.test", "pyobjc.test")
self.assertIsInstance(ref, SCPreferencesRef)
self.assertResultIsBOOL(SCPreferencesLock)
self.assertArgIsBOOL(SCPreferencesLock, 1)
v = SCPreferencesLock(ref, False)
self.assertIsInstance(v, bool)
self.assertResultIsBOOL(SCPreferencesUnlock)
v = SCPreferencesUnlock(ref)
self.assertIsInstance(v, bool)
self.assertResultIsBOOL(SCPreferencesCommitChanges)
v = SCPreferencesCommitChanges(ref)
self.assertIsInstance(v, bool)
self.assertResultIsBOOL(SCPreferencesApplyChanges)
v = SCPreferencesApplyChanges(ref)
self.assertIsInstance(v, bool)
r = SCPreferencesGetSignature(ref)
self.assertIsInstance(r, CFDataRef)
r = SCPreferencesCopyKeyList(ref)
self.assertIsInstance(r, CFArrayRef)
l = []
def callback(ref, key, ctx):
l.append([ref, key, ctx])
ctx = object()
v = SCPreferencesSetCallback(ref, callback, ctx)
self.assertTrue(v is True)
self.assertResultIsBOOL(SCPreferencesAddValue)
r = SCPreferencesAddValue(ref, "use_python3", False)
self.assertTrue(r is True)
v = SCPreferencesGetValue(ref, "use_python3")
self.assertTrue(v is False)
v = SCPreferencesGetValue(ref, "use_python4")
self.assertTrue(v is None)
self.assertResultIsBOOL(SCPreferencesSetValue)
r = SCPreferencesSetValue(ref, "use_python3", "on newyearsday")
self.assertTrue(r is True)
self.assertResultIsBOOL(SCPreferencesRemoveValue)
r = SCPreferencesRemoveValue(ref, "use_python3")
self.assertResultIsBOOL(SCPreferencesScheduleWithRunLoop)
rl = CFRunLoopGetCurrent()
r = SCPreferencesScheduleWithRunLoop(ref, rl, kCFRunLoopCommonModes)
CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1.0, False)
self.assertResultIsBOOL(SCPreferencesUnscheduleFromRunLoop)
r = SCPreferencesUnscheduleFromRunLoop(ref, rl, kCFRunLoopCommonModes)
SCPreferencesSynchronize(ref)
def testSecurityIntegreation(self):
self.assertResultIsCFRetained(SCPreferencesCreateWithAuthorization)
@min_os_level('10.6')
def testFunctions10_6(self):
SCPreferencesSetDispatchQueue
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
136a39269006d806aaa99a0bac22de80bf3cab16 | 5a0b6cc5f6e0c19503fb4eb713cfa5c8a6f657d1 | /apps/accounts/forms.py | a777ce3364ff9763a8e79ad60147081e419d5410 | [] | no_license | ehoversten/django_user_auth | 5069780a70c3c7ce8707b107907f45e9a98e0583 | 91f5bc75f5bcc95c714f55f6ebcb2b4b50083d47 | refs/heads/master | 2020-04-02T03:15:27.770591 | 2018-10-21T23:47:48 | 2018-10-21T23:47:48 | 153,954,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UserCreateForm(UserCreationForm):
email = forms.EmailField(required=True)
# you can set extra validations here to prevent is_valid from succeeding f you don't want it to.
first_name = forms.CharField(max_length=30,required=True)
last_name = forms.CharField(max_length=30,required=True)
class Meta:
model = User
fields = ("username", "email", "password1", "password2")
def save(self, commit=True):
user = super(UserCreateForm, self).save(commit=False)
# let's say we wanted to make our data all caps, we could do that here!
user.email = self.cleaned_data["email"]
user.first_name = self.cleaned_data["first_name"]
user.last_name = self.cleaned_data["last_name"]
if commit:
user.save()
return user
| [
"ehoversten@gmail.com"
] | ehoversten@gmail.com |
97e39489a1008862592dc919c505fe0a8e088228 | d346c1e694e376c303f1b55808d90429a1ad3c3a | /medium/284.Peeking_Iterator.py | 1fc3679df11e721e85b5a1d2040e23cc759c49a1 | [] | no_license | littleliona/leetcode | 3d06bc27c0ef59b863a2119cd5222dc94ed57b56 | 789d8d5c9cfd90b872be4a4c35a34a766d95f282 | refs/heads/master | 2021-01-19T11:52:11.938391 | 2018-02-19T03:01:47 | 2018-02-19T03:01:47 | 88,000,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,338 | py | # Below is the interface for Iterator, which is already defined for you.
#
class Iterator(object):
def __init__(self, nums):
"""
Initializes an iterator object to the beginning of a list.
:type nums: List[int]
"""
self.L = []
for i,num in enumerate(nums):
def hasNext(self):
"""
Returns true if the iteration has more elements.
:rtype: bool
"""
def next(self):
"""
Returns the next element in the iteration.
:rtype: int
"""
class PeekingIterator(object):
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
def next(self):
"""
:rtype: int
"""
def hasNext(self):
"""
:rtype: bool
"""
# Your PeekingIterator object will be instantiated and called as such:
iter = PeekingIterator(Iterator(nums))
while iter.hasNext():
val = iter.peek() # Get the next element but not advance the iterator.
iter.next() # Should return the same value as [val].
| [
"aria@Arias-MacBook-Pro.local"
] | aria@Arias-MacBook-Pro.local |
c5a1e157aea842650f2144ec231bb7166234b266 | 4bfc3c184e736bb68dccbb6d5657f11c950df002 | /tests/operators/vector/test_minimum_ad_001.py | 7d7952f9c327d7e59295b101f0f6af6ed9cc7880 | [
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | laekov/akg | 159aa64ef6135222b5af784c408731275dfa9bdb | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | refs/heads/master | 2022-12-01T04:09:03.548063 | 2020-08-19T08:38:57 | 2020-08-19T08:41:28 | 288,678,192 | 0 | 0 | Apache-2.0 | 2020-08-19T08:41:30 | 2020-08-19T08:36:53 | Python | UTF-8 | Python | false | false | 2,560 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from base import TestBase
from test_run.minimum_ad_run import minimum_ad_run
class TestCase(TestBase):
def setup(self):
"""set test case """
case_name = "test_minimum_ad_001"
case_path = os.getcwd()
# params init
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= %s Setup case============", self.casename)
self.testarg = [
# testflag,opfuncname,testRunArgs, setdimArgs
("minimum_ad_001", minimum_ad_run, ((2, 2, 2), "int32", True, True)),
("minimum_ad_002", minimum_ad_run, ((2, 2), "float16", True, False)),
("minimum_ad_003", minimum_ad_run, ((2, 3, 3, 4), "int32", False, True)),
]
self.testarg_rpc_cloud = [
# testflag,opfuncname,testRunArgs, setdimArgs
("minimum_ad_001", minimum_ad_run, ((2, 3, 3, 4), "float32", False, True)),
("minimum_ad_002", minimum_ad_run, ((2, 2, 1), "float16", True, True)),
("minimum_ad_003", minimum_ad_run, ((2, 3, 3, 4), "int32", False, True)),
("minimum_ad_004", minimum_ad_run, ((16, 16), "float16", True, False)),
("minimum_ad_005", minimum_ad_run, ((8, 16), "int32", True, True)),
]
@pytest.mark.rpc_mini
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_ascend_training
def test_run(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg)
@pytest.mark.rpc_cloud
@pytest.mark.env_onecard
@pytest.mark.platform_x86_ascend_training
def test_run_rpc_cloud(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg_rpc_cloud)
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= %s Setup case============", self.casename) | [
"ckey.chengbin@huawei.com"
] | ckey.chengbin@huawei.com |
3059be9fd2c16b15f1b6fae6da39e0af08430466 | 1f7847055332e16614f5358f0ec39b39bb9a66a7 | /exercises/14_generators/test_task_14_1a.py | 4b50c494e9e7b9b19e0aae6f7a719e0deb3d277b | [] | no_license | satperm/advpyneng-examples-exercises | 6641dae31fa7f44db7e99547bc70d740988f21b9 | 6b12c320cace1d303dae38ddba9b19550a8708ec | refs/heads/master | 2022-12-14T09:28:48.255804 | 2020-09-06T14:14:42 | 2020-09-06T14:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | import time
import pytest
import task_14_1a
from collections.abc import Generator
import sys
sys.path.append('..')
from common_functions import check_function_exists, check_function_params
def test_func_created():
'''Проверяем, что функция создана'''
check_function_exists(task_14_1a, 'get_intf_ip')
def test_get_intf_ip_is_generator():
return_value = task_14_1a.get_intf_ip('config_r1.txt')
assert isinstance(return_value, Generator), "Надо создать генератор"
def test_get_intf_ip_yield_value():
return_value = task_14_1a.get_intf_ip('config_r1.txt')
all_results = list(return_value)
assert ('Loopback0', '10.1.1.1', '255.255.255.255') in all_results, "Функция вернула неправильный результат"
def test_get_intf_ip_new_file(tmpdir):
config = (
'!\n'
'!\n'
'interface Loopback0\n'
' ip address 192.168.10.1 255.255.255.255\n'
'!\n'
'interface Ethernet0/1\n'
' no ip address\n'
'!\n'
'interface Ethernet0/2\n'
' description To P_r9 Ethernet0/2\n'
' ip address 192.168.20.1 255.255.255.0\n'
' mpls traffic-eng tunnels\n'
'!\n'
'ip access-list standard LDP\n'
' permit 192.168.20.0 0.0.0.255\n'
'!\n')
correct_results = sorted([
('Loopback0', '192.168.10.1', '255.255.255.255'),
('Ethernet0/2', '192.168.20.1', '255.255.255.0')])
# записываем строку config во временный файл
dest_filename = tmpdir.mkdir("test_tasks").join("task_14_1a.txt")
dest_filename.write(config)
# проверяем результат
return_value = task_14_1a.get_intf_ip(dest_filename)
assert sorted(return_value) == correct_results, "Функция вернула неправильный результат"
| [
"nataliya.samoylenko@gmail.com"
] | nataliya.samoylenko@gmail.com |
8d66c48f1cf17ed6e117de35119616e410c9e269 | 3b2867636844ab4b402ef091b61222a5870bae6e | /year2017/day14.py | 765292b38d049a9d76ff935743617f5776e13d1b | [] | no_license | feigaoxyz/adventofcode | f992478ff6518930a60b9d4e15e5902d1f208f06 | 18918e16709eef833544f48d8c1a46c93d950000 | refs/heads/master | 2021-06-21T18:33:45.229440 | 2021-01-12T16:10:03 | 2021-01-12T16:10:03 | 51,060,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,939 | py | from common import load_input
from day10 import knot_hash_full
PART1_DOC = """Part 1:
For i in 0..127, how many '1' in 128 knot hashes (day 10) of
strings "INPUT-$i"?
"""
PART2_DOC = """Part 2:
Return number of connected components (4-direction) of 1's
"""
def disk_grid_recover(raw: str) -> list:
grid = []
for i in range(128):
hash = knot_hash_full('{}-{}'.format(raw, i))
grid.append([int(c) for c in '{:0>128b}'.format(int('0x' + hash, 16))])
return grid
def fn_p1(raw):
return sum(map(sum, disk_grid_recover(raw)))
def fn_p2(raw):
grid = disk_grid_recover(raw)
return len(connected_components(grid))
def connected_components(grid):
remain: set = set([(r, c)
for r in range(len(grid)) for c in range(len(grid[r]))
if grid[r][c] == 1])
marked: set = set()
ccs = []
while remain:
ccs.append(set())
p = remain.pop()
working = {p}
while working:
t = working.pop()
marked.add(t)
ccs[-1].add(t)
for dr, dc in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
r, c = t[0] + dr, t[1] + dc
if (r, c) in remain:
remain.remove((r, c))
working.add((r, c))
# print(len(marked), len(ccs))
return ccs
def test_connected_component():
assert len(connected_components([[1, 0], [0, 1]])) == 2
assert len(connected_components([[1, 1], [0, 1]])) == 1
assert len(connected_components([[1, 1, 1], [0, 0, 1], [0, 1, 1]])) == 1
pass
if __name__ == '__main__':
example = """flqrgnkx
""".strip()
input_data = load_input(__file__.split('.')[0] + '_in.txt').strip()
# print("Part 1 example:", fn_p1(example)) # 8108
# print("Part 1:", fn_p1(input_data)) # 8216
print("Part 2 example:", fn_p2(example)) # 1242
print("Part 2:", fn_p2(input_data)) # 1139
| [
"contact@feigao.org"
] | contact@feigao.org |
0603ccdfdf7026c83b1b6adbeb8206cdd97687ee | 69371d185a807c2754c460e7c7ccf8debe1bf384 | /src/robot/parsing/lexerwrapper.py | 97cc47e501426872e8e91563105988f3798536ef | [
"Apache-2.0",
"CC-BY-3.0"
] | permissive | eternalconcert/robotframework | bf5b8df519642fe383ba82f15f2b4c4f467a5c5e | 802f6a4986a34a6f64f7b48467d0a38f2b14fdd8 | refs/heads/master | 2020-07-17T09:38:35.126356 | 2019-09-04T20:13:47 | 2019-09-04T20:13:47 | 205,995,467 | 2 | 0 | NOASSERTION | 2019-09-03T05:25:15 | 2019-09-03T05:25:14 | null | UTF-8 | Python | false | false | 2,301 | py | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from robot.errors import DataError
from robot.output import LOGGER
from robot.utils import Utf8Reader, get_error_message
from .restreader import read_rest
PROCESS_CURDIR = True
class LexerWrapper(object):
def __init__(self, lexer, source):
self.source = source
self.curdir = os.path.dirname(source).replace('\\', '\\\\')
lexer.input(self._read(source))
self.tokens = lexer.get_tokens()
def _read(self, path):
try:
# IronPython handles BOM incorrectly if not using binary mode:
# https://ironpython.codeplex.com/workitem/34655
with open(path, 'rb') as data:
if os.path.splitext(path)[1].lower() in ('.rest', '.rst'):
return read_rest(data)
return Utf8Reader(data).read()
except:
raise DataError(get_error_message())
def token(self):
"""Adapter for yacc.yacc"""
token = next(self.tokens, None)
if token and token.type == token.ERROR:
self._report_error(token)
return self._next_token_after_eos()
if token and '${CURDIR}' in token.value and PROCESS_CURDIR:
token.value = token.value.replace('${CURDIR}', self.curdir)
return token
def _report_error(self, token):
# TODO: add line number
LOGGER.error("Error in file '%s': %s" % (self.source, token.error))
def _next_token_after_eos(self):
while True:
token = self.token()
if token is None:
return None
if token.type == token.EOS:
return self.token()
| [
"peke@iki.fi"
] | peke@iki.fi |
1ffd905a0dc4a4c67f0015ba2c5379758054c570 | f281d0d6431c1b45c6e5ebfff5856c374af4b130 | /DAY100~199/DAY148-LEET1011-CapacityToShipPackagesWithinDDays/joohyuk.py | 2fff9fa5c66fb2651695712afd4d3d1c98716ac2 | [] | no_license | tachyon83/code-rhino | ec802dc91dce20980fac401b26165a487494adb4 | b1af000f5798cd12ecdab36aeb9c7a36f91c1101 | refs/heads/master | 2022-08-13T09:10:16.369287 | 2022-07-30T11:27:34 | 2022-07-30T11:27:34 | 292,142,812 | 5 | 6 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | import sys
si = sys.stdin.readline
def solve(weights,d):
t=sum(weights)
s,e=0,t
while s<e:
m=(s+e)//2
hold,c=0,0
for e in weights:
if hold<m:
hold+=e
else:
hold=e
c+=1
if c<d:
e=m
else:
s=m
print(s)
| [
"noreply@github.com"
] | tachyon83.noreply@github.com |
4e60cf2724fa2e5c06ab75f0f32b8a440d656ec1 | cd4bbecc3f713b0c25508d0c5674d9e103db5df4 | /toontown/minigame/TwoDCamera.py | 5e1022d1ec3b0d80cd7d937278f60e318d768b98 | [] | no_license | peppythegod/ToontownOnline | dce0351cfa1ad8c476e035aa3947fdf53de916a6 | 2e5a106f3027714d301f284721382cb956cd87a0 | refs/heads/master | 2020-04-20T05:05:22.934339 | 2020-01-02T18:05:28 | 2020-01-02T18:05:28 | 168,646,608 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,591 | py | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.task.Task import Task
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from toontown.minigame import ToonBlitzGlobals
import math
class TwoDCamera(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('TwoDCamera')
def __init__(self, camera):
self.notify.debug('Constructing TwoDCamera with %s' % camera)
self.camera = camera
self.cameraSideView = ToonBlitzGlobals.CameraStartingPosition
self.threeQuarterOffset = 2
self.changeFacingInterval = None
self.ivalControllingCamera = False
self.accept('avatarOrientationChanged', self.setupChangeFacingInterval)
def onstage(self):
self.camera.reparentTo(render)
p = self.cameraSideView
self.camera.setPosHpr(render, p[0], p[1], p[2], p[3], p[4], p[5])
self.camera.setX(
render,
base.localAvatar.getX(render) + self.threeQuarterOffset)
def destroy(self):
self.ignore('avatarOrientationChanged')
p = self.cameraSideView
self.camera.setPosHpr(render, p[0], p[1], p[2], p[3], p[4], p[5])
def update(self):
if not self.ivalControllingCamera:
camX = base.localAvatar.getX(render) - math.sin(
base.localAvatar.getH(render) * math.pi /
180) * self.threeQuarterOffset
self.camera.setX(render, camX)
def clearChangeFacingInterval(self):
if self.changeFacingInterval:
self.changeFacingInterval.pause()
del self.changeFacingInterval
self.changeFacingInterval = None
def setupChangeFacingInterval(self, newHeading):
self.clearChangeFacingInterval()
self.newHeading = newHeading
self.changeFacingInterval = LerpFunc(self.myLerpPos, duration=5.0)
self.changeFacingInterval.start()
def myLerpPos(self, t):
self.ivalControllingCamera = True
finalCamX = base.localAvatar.getX(render) - math.sin(
self.newHeading * math.pi / 180) * self.threeQuarterOffset
diffX = finalCamX - self.camera.getX(render)
self.camera.setX(render, self.camera.getX(render) + diffX * t)
if math.fabs(self.camera.getX(render) - finalCamX) < 0.01:
self.notify.debug('giving up camera control')
self.camera.setX(render, finalCamX)
self.ivalControllingCamera = False
self.clearChangeFacingInterval()
| [
"47166977+peppythegod@users.noreply.github.com"
] | 47166977+peppythegod@users.noreply.github.com |
bc4ea94f6b02b9e24b146d3a2061fc53211512ef | 3ec4823d1cf7197da0fe086613383c0d2f85ba7b | /Lesson 7 function/7.4_positional_arguments.py | 4c675f46308702213181bef6274075a9463ee4dc | [] | no_license | JamCrumpet/Lesson-notes | 268f114d420cd55ec3c87c9334814a6e8398b6e6 | 501ef9687be8da4205a640fbc391444ebd65a15d | refs/heads/master | 2022-12-16T05:58:35.413156 | 2020-09-16T14:52:19 | 2020-09-16T14:52:19 | 288,780,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | # when calling a function python will match each argument in the function call with a parameter ...
# ... in the function definition
# the simplest way to do this is bases on the order of the argument provided
# values match up this way are called positional argument
# for example consider a function that displays info about pets
# the function tell use what kind of animal each pet is the the pets name
def describe_pet(animal_type, pet_name): # two types of argument the pet name and type
"""Display information about a pet"""
print("\nI have a " + animal_type + ".")
print("My " + animal_type + "'s name is " + pet_name.title() + ".")
describe_pet("Dog", "Wes") | [
"noreply@github.com"
] | JamCrumpet.noreply@github.com |
3acd80e802c20c4c87da449e8fecd10a0d1bbc5d | 1595ffcb2e59f511cabf4b9dc2b8de66862cc5a2 | /run_phase1.py | 9edf2f184e13eb252df596eb0256db2ff85e9163 | [] | no_license | littlepretty/SensorPGM | ab741064f2c9d8c5a6c8a917ef235250f80829eb | 06c8c980b0724f18247dab31a545c504f99e045b | refs/heads/master | 2021-01-14T12:44:42.769800 | 2016-05-05T14:39:09 | 2016-05-05T14:39:09 | 52,915,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,346 | py | #!/usr/bin/env python
import csv
import logging as lg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from python_log_indenter import IndentedLoggerAdapter
def learnModel(filename, n=48, m=50):
f = open(filename, 'rb')
reader = csv.reader(f)
day1 = []
day2 = []
day3 = []
for row in reader:
day1.append([float(x) for x in row[:n]])
day2.append([float(x) for x in row[n:n*2]])
day3.append([float(x) for x in row[n*2:n*3]])
"""learn parameters for m*n random variables"""
means = np.zeros((m, n))
stdevs = np.zeros((m, n))
for i in range(0, m):
for j in range(0, n):
row = [day1[i][j], day2[i][j], day3[i][j]]
means[i][j] = np.mean(row)
stdevs[i][j] = np.std(row) / np.sqrt(len(row) - 1)
log.debug(str(means[:1]))
log.debug(str(stdevs[:1]))
return means, stdevs
def windowInferenceError(day, means, b_cnt, n=96, m=50):
error = []
f = open('w%d.csv' % b_cnt, 'wb')
writer = csv.writer(f)
writer.writerow(title)
infer_data = np.zeros((m, n))
for i in range(0, n):
test_data = day[:, i]
infer_data[:, i] = means[:, i % 48]
window_start = int(i * b_cnt) % m
window_size = b_cnt
log.debug(str(range(window_start, window_start + window_size)))
"""replace inferred data with test data for these inside window"""
for k in range(window_start, window_start + window_size):
index = k % m
infer_data[index, i] = test_data[index]
"""absolute error for time i"""
error_i = np.subtract(test_data, infer_data[:, i])
error_i = np.absolute(error_i)
error.append(error_i)
for i in range(0, m):
row = [x for x in infer_data[i, :]]
row.insert(0, i)
writer.writerow(row)
return error
def findLargestK(error, budget, m=50):
max_indices = []
indices = range(0, m)
log.debug(str(error))
for index in indices:
if len(max_indices) == budget:
break
count = 0
for j in range(0, m):
if error[index] > error[j]:
count += 1
if count >= m - budget:
max_indices.append(index)
log.debug('read sensors %s' % str(max_indices))
log.debug('#sensors = %d' % len(max_indices))
return max_indices
def varianceInferenceError(day, means, stdevs, b_cnt, n=96, m=50):
error = []
f = open('v%d.csv' % b_cnt, 'wb')
writer = csv.writer(f)
writer.writerow(title)
infer_data = np.zeros((m, n))
for i in range(0, n):
test_data = day[:, i]
infer_data[:, i] = means[:, i % 48]
"""find maximum variances' index"""
variance = stdevs[:, i % 48]
max_indices = findLargestK(variance, b_cnt, m)
"""replace most variant data with test data"""
for index in max_indices:
infer_data[index, i] = test_data[index]
"""absolute error for time i"""
error_i = np.subtract(test_data, infer_data[:, i])
error_i = np.absolute(error_i)
error.append(error_i)
for i in range(0, m):
row = [x for x in infer_data[i, :]]
row.insert(0, i)
writer.writerow(row)
return error
def inferenceTest(filename, means, stdevs, n=96, m=50):
f = open(filename, 'rb')
reader = csv.reader(f)
data = np.array(list(reader)).astype('float')
win_avg_errors = []
var_avg_errors = []
for cnt in budget_cnts:
total_err = windowInferenceError(data, means, cnt)
win_avg_err = np.sum(total_err) / (len(total_err) * len(total_err[0]))
log.info('Window Inference for %.2f budget' % cnt)
log.debug('error matrix \n' + str(total_err))
log.add().info('avg error = ' + str(win_avg_err))
log.sub()
win_avg_errors.append(win_avg_err)
total_err = varianceInferenceError(data, means, stdevs, cnt)
var_avg_err = np.sum(total_err) / (len(total_err) * len(total_err[0]))
log.info('Variance Inference for %.2f budget' % cnt)
log.debug('error matrix \n' + str(total_err))
log.add().info('avg error = ' + str(var_avg_err))
log.sub()
var_avg_errors.append(var_avg_err)
return win_avg_errors, var_avg_errors
def plotAvgError(win, var):
matplotlib.rc('font', size=18)
index = np.arange(len(budget_cnts))
bar_width = 0.27
fig, ax = plt.subplots()
rect1 = ax.bar(index, win, bar_width, color='b', hatch='/')
rect2 = ax.bar(index + bar_width, var, bar_width, color='r', hatch='\\')
ax.set_xlim([-0.5, 5])
ax.set_ylabel('Mean Absolute Error')
ax.set_xlabel('Budget Count')
ax.set_xticks(index + bar_width)
ax.set_xticklabels(('0', '5', '10', '20', '25'))
ax.legend((rect1[0], rect2[0]), ('Window', 'Variance'))
plt.savefig('%s_err.eps' % topic, format='eps',
bbox_inches='tight')
# plt.show()
def main(train_file, test_file):
means, stdevs = learnModel(train_file)
win, var = inferenceTest(test_file, means, stdevs)
print win
print var
plotAvgError(win, var)
if __name__ == '__main__':
# lg.basicConfig(level=lg.DEBUG)
lg.basicConfig(level=lg.INFO)
log = IndentedLoggerAdapter(lg.getLogger(__name__))
title = ['sensors', 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0,
5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,
11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5, 15.0, 15.5, 16.0,
16.5, 17.0, 17.5, 18.0, 18.5, 19.0, 19.5, 20.0, 20.5, 21.0,
21.5, 22.0, 22.5, 23.0, 23.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5,
3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5,
9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5,
14.0, 14.5, 15.0, 15.5, 16.0, 16.5, 17.0, 17.5, 18.0, 18.5,
19.0, 19.5, 20.0, 20.5, 21.0, 21.5, 22.0, 22.5, 23.0, 23.5, 0.0]
budget_cnts = [20]
budget_cnts = [0, 5, 10, 20, 25]
log.info('Processing Temperature')
log.add()
topic = 'temperature'
main('intelTemperatureTrain.csv', 'intelTemperatureTest.csv')
log.sub()
log.info('Processing Humidity')
log.add()
topic = 'humidity'
main('intelHumidityTrain.csv', 'intelHumidityTest.csv')
log.sub()
| [
"littlepretty881203@gmail.com"
] | littlepretty881203@gmail.com |
e4e7ba162eaf4533e33f22af3e2304d322d02af4 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4150/codes/1585_1015.py | 7c3f11b65c17fa5d163e69c25b1d75d669bc1e27 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
a = int(input("valor 1: "))
b = int(input("valor 2: "))
c = int(input("valor 3: "))
minimo = (min(a,b,c))
maximo = (max(a,b,c))
intermediario1 = (a+b+c)
intermediario = intermediario1 - ( minimo+maximo)
print(minimo)
print(intermediario)
print(maximo)
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
9e09f4b3ae1d0853cbe16395aa8aa8d3699e6d1e | 306d2a92fb331aec6ddf0794b538d6e3385a0df9 | /app/model/account.py | 8f9c579c1ff38d8f743848f6c95fcc6bd5f55c7f | [] | no_license | Zarinabonu/ForceApp | f343d3a52aee08890230c5425c9e238df99c5a7f | 13f8e8613999c4850fc6f0bfcec66f897eecbe4a | refs/heads/master | 2020-12-10T08:00:25.072289 | 2020-01-20T13:14:07 | 2020-01-20T13:14:07 | 233,540,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,320 | py | from random import randint, randrange
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
class Account(models.Model):
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
f_name = models.CharField(max_length=100, null=True, blank=True)
l_name = models.CharField(max_length=100, null=True, blank=True)
m_name = models.CharField(max_length=100, null=True, blank=True)
phone = models.IntegerField(null=True, blank=True)
photo = models.ImageField(null=True, blank=True)
address = models.CharField(max_length=200, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
@receiver(post_save, sender=Account)
def create_user(sender, instance, created, **kwargs):
if created:
l = instance.l_name
f = instance.f_name
username = l+f
value = randrange(100, 999)
u = User.objects.create(username=username)
u.set_password(value)
u.save()
instance.user = u
instance.save()
@receiver(post_save, sender=User)
def create_token(sender, instance, created, **kwargs):
if created:
token = Token.objects.create(user=instance)
| [
"zarinabonu199924@gmail.com"
] | zarinabonu199924@gmail.com |
fe65fbb3d6367aae8acb39fc61f23ca80d548b1a | aa2157e595b89c3512857e41fee16e8b11d7a657 | /Fresher Lavel Logical Programms/Between Two number print prime number.py | 94f23ce6807fc887991b4303536dbc6271acaff8 | [] | no_license | biswaranjanroul/Python-Logical-Programms | efee6276eea3eafab9ee6b6e7e0910b715a504d1 | 152dcecf2ecae7891a11769f250a4dc8d9d6b15f | refs/heads/master | 2022-12-15T07:37:45.978218 | 2020-09-17T13:24:53 | 2020-09-17T13:24:53 | 296,326,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | lower=int(input("Enter lower range number:"))
upper=int(input("Enter upper range number:"))
for num in range(lower,upper+1):
if num>1:
for i in range(2,num):
if (num % i)==0:
break
else:
print(num)
| [
"biswaranjanroul2@gmail.com"
] | biswaranjanroul2@gmail.com |
3df314f92c0af4d60757e92c6b59b97b7bd43315 | 0de67c078e00b9f43bfd6c4ddb1f4ffd153f8b7e | /clubs_website/settings.py | 88f227d2f0e998d94739536e11fb99e1cbeb5270 | [] | no_license | jreinstra/menlo-clubs | c249d754942a2a49e2ebae5914a1f81a27f845ef | ff50d07e6220a537d5de42c18ae73c845d8c35d7 | refs/heads/master | 2020-04-26T11:05:12.537802 | 2015-11-18T22:18:11 | 2015-11-18T22:18:11 | 39,816,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,076 | py | """
Django settings for clubs_website project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ip-09p46cq61uibshu7r0=+fe-1smc4&%sq9@b=%hb1k5ck039'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'import_export',
'clubs',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'clubs_website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'clubs_website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# Heroku settings below
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES = {}
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
try:
from local_settings import *
except ImportError:
pass
| [
"jreinstra@gmail.com"
] | jreinstra@gmail.com |
6ed5d3c73ea9d4460a02feac33d5553147dbe7f1 | 956cc6ff2b58a69292f7d1223461bc9c2b9ea6f1 | /setups/cuda9.2/setup_all.py | b015fd581d34eec93edcca09d13adfb412600491 | [
"Apache-2.0"
] | permissive | Aanisha/monk_v1 | c24279b2b461df9b3de2984bae0e2583aba48143 | c9e89b2bc0c1dbb320aa6da5cba0aa1c1526ad72 | refs/heads/master | 2022-12-29T00:37:15.320129 | 2020-10-18T09:12:13 | 2020-10-18T09:12:13 | 286,278,278 | 0 | 0 | Apache-2.0 | 2020-08-09T16:51:02 | 2020-08-09T16:51:02 | null | UTF-8 | Python | false | false | 1,358 | py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="monk_cuda92", # Replace with your own username
version="0.0.1",
author="Tessellate Imaging",
author_email="abhishek@tessellateimaging.com",
description="Monk Classification Library - Cuda92 - backends - pytorch, keras, gluon",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Tessellate-Imaging/monk_v1",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Environment :: GPU :: NVIDIA CUDA :: 9.2",
],
install_requires=[
'scipy',
'scikit-learn',
'scikit-image',
'opencv-python',
'pillow==6.0.0',
'tqdm',
'gpustat',
'psutil',
'pandas',
'GPUtil',
'mxnet-cu92==1.5.1',
'gluoncv==0.6',
'torch==1.4.0',
'torchvision==0.5.0',
'keras==2.2.5',
'tensorflow-gpu==1.12.0',
'torch==1.4.0',
'tabulate',
'netron',
'networkx',
'matplotlib',
'pylg',
'ipywidgets'
],
python_requires='>=3.6',
)
| [
"abhishek4273@gmail.com"
] | abhishek4273@gmail.com |
db97c2663b86fb19553854a8de36158cd641b64b | a5638a2ff9381b1a5b804eab3d90db04e9614c4f | /Python/Easy/multiplesOfANumber.py | 3ab47faf75bd90882b94721a9d2e81973e7a609f | [] | no_license | jeffthemaximum/CodeEval | c9e1fe5a966718a627d72e3f73f9c1bddb42c0ef | e2e85e7564a711c2ae83acbcab6b5c67023b3659 | refs/heads/master | 2021-01-17T07:21:25.490894 | 2016-07-18T23:54:22 | 2016-07-18T23:54:22 | 40,488,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | import sys
with open(sys.argv[1], 'r') as input:
test_cases = input.read().strip().splitlines()
for test in test_cases:
x = test.split(',') #an array
i = 1
flag = True
while flag:
if (int(x[1]) * i) > int(x[0]):
print int(x[1]) * i
flag = False
else:
i = i + 1
continue
break | [
"frey.maxim@gmail.com"
] | frey.maxim@gmail.com |
0cadc9640709c0c2dd78d9014603c391ed1cf5fa | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /045_functions/009_functools_module/examples/13-chain.py | d7e02c6a1ee13cce91b500f4c51f840784da80d8 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 172 | py | """Пример использования функции chain модуля itertools"""
from itertools import chain
for i in chain(range(2), range(3)):
print(i)
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
9a46dd5c59b0f01363d42c4eead8dcff3f0a4dbf | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/insert_20200610160638.py | 4b1c20b5c255f499b25e5a60015365369be8bac8 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # nums is a list
# find where n is to be inserted
# soo,you loop through the array
# the array is sorted
# to know the position you should check whethere n is greater than nums[i]
# continue the loop as you check
def Insert(nums,n):
for i in range(len(nums)):
print(nums[i])
# if n in nums[i]:
# print(i)
Insert([1,3,5,6], 2)
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
6a7a82c9c0a99d0b647ecff1e459848d4a2ac2e3 | 41f960a830752877bf2248bb2c620491752ccfe5 | /fork/multipprocessing.py | 1fe7394082645c192a8111fbf878346eed726efa | [] | no_license | drakhero/python_base | 368617032d2d3d5388f22de2cb2ca0af81de00ec | 90848ef630ab607a1b5563f773e1b4ca7eaef08f | refs/heads/master | 2020-04-24T09:51:09.596139 | 2019-02-23T03:52:35 | 2019-02-23T03:52:35 | 171,875,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | from multiprocessing import Process
import time
#事件1
def fun1():
print("子进程做事件1")
#创建1个进程
p = Process(target=fun1)
#进程启动,执行fun1函数中的代码
p.start()
time.sleep(1)
#父进程
print('父进程在做事')
| [
"17504336124@163.com"
] | 17504336124@163.com |
ad80d80796527bb59b38b55a1a1ec677fb086100 | 94c4a1a14cc9e68584912340c8b2fd54686dd638 | /Day9/select_Ftp/server/server.py | e50b72d485949db91c8b08747249b21954a9e665 | [] | no_license | Anne19953/LearnPython | 33da4ae57b3aed9cb687567958cafa8a55ff2b7b | a2bcb620ed453ff802862ae31efd0e8c159d8bfe | refs/heads/master | 2020-08-09T00:25:31.839253 | 2019-10-10T02:53:45 | 2019-10-10T02:53:45 | 192,083,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,289 | py | #!/usr/bin/env python
# coding:utf-8
"""
Name : server.py
Author : anne
Time : 2019-08-27 17:28
Desc:
"""
import os
import time
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
import socket
import selectors
class selectTtpServer:
def __init__(self):
self.dic = {}
self.hasReceived = 0
self.sel = selectors.DefaultSelector()
self.create_socket()
self.handle()
#注册socket
def create_socket(self):
server = socket.socket()
server.bind(('127.0.0.1',8885))
server.listen(5)
server.setblocking(False) #设置为非阻塞
self.sel.register(server,selectors.EVENT_READ,self.accept)
print('服务端已开启,等待用户链接。。。')
#监听
def handle(self):
while True:
events = self.sel.select() # 监听
for key, mask in events:
callback = key.data # 第一次是accept函数的地址,如果监听到是conn变化就是read函数的地址
callback(key.fileobj, mask) # 执行accept(),key.fileobj 就是socket,
# 执行read(),key.fileobj 就是conn
def accept(self,sock,mask):
conn, addr = sock.accept()
print('accepted', conn, 'from', addr)
conn.setblocking(False)
self.sel.register(conn, selectors.EVENT_READ, self.read) # 将conn与read函数进行绑定
self.dic[conn] = {}
def read(self,conn,mask):
try:
if not self.dic[conn]:
data = conn.recv(1024)
cmd,filename,filesize = str(data,encoding='utf-8').split('|')
self.dic = {conn:{'cmd':cmd,'filename':filename,'filesize':int(filesize)}}
if cmd == 'put':
conn.send(bytes('OK',encoding='utf-8'))
if self.dic[conn]['cmd'] == 'get':
file = os.path.join(BASE_DIR,'download',filename)
if os.path.exists(file):
filesize = os.path.getsize(file)
send_info = '%s|%s'%('YES',filesize)
conn.send(bytes(send_info,encoding='utf-8'))
else:
send_info = '%s|%s'%('NO',0)
conn.send(bytes(send_info,encoding='utf-8'))
else:
if self.dic[conn].get('cmd',None):
cmd = self.dic[conn].get('cmd')
if hasattr(self,cmd):
func = getattr(self,cmd)
func(conn)
else:
print('error cmd!')
except Exception as e:
print('error',e)
self.sel.unregister(conn)
conn.close()
def put(self,conn):
fileName = self.dic[conn]['filename']
fileSize = self.dic[conn]['filesize']
path = os.path.join(BASE_DIR,'upload',fileName)
recv_data = conn.recv(1024)
self.hasReceived += len(recv_data)
with open(path,'ab') as f:
f.write(recv_data)
if fileSize == self.hasReceived:
if conn in self.dic.keys():
self.dic[conn] = {}
print('%s 上传完毕!'%fileName)
def get(self,conn):
pass
if __name__ == '__main__':
selectTtpServer()
| [
"anne@199534.com"
] | anne@199534.com |
b7f03924a22aa5e1a6b5585208e2c3461ac89f15 | bf64d19174ef332f39e2d8210f3eb4f783262554 | /lib/networks/Resnet18_fcn_classifier_test.py | af272fabe9f6981c3d58530213a26620b2263652 | [] | no_license | juzisedefeimao/cv | 3e4dd7deee471321e071ca996769fc3b65481993 | fb9e9292030481f5a26efde4003fb83d37a34962 | refs/heads/master | 2020-05-30T14:29:13.253563 | 2019-06-02T01:08:53 | 2019-06-02T01:08:53 | 189,791,743 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,027 | py | from lib.networks.network import Network
import numpy as np
from lib.networks.netconfig import cfg
import tensorflow as tf
cls_num = cfg.ZLRM.TRAIN.CLASSIFY_NUM
class Resnet18_fcn_classifier_test(Network):
def __init__(self):
self.inputs = []
self.data = tf.placeholder(tf.float32, shape=[None, None ,None, 3], name='data')
self.layers = {'data': self.data}
self.setup()
def setup(self):
bn_trainable = False
(
self.feed('data')
.conv(7, 7, 64, 2, 2, name='conv1', relu=False, trainable=True)
.batch_normalization(name='bn1', relu=True, trainable=bn_trainable)
.max_pool(3, 3, 2, 2, name='pool1', padding='VALID')
)
# ======================变换形状适应第一组模块=======================
(
self.feed('pool1')
.conv(1, 1, 64, 1, 1, name='transform1_conv', relu=False, trainable=True)
.batch_normalization(name='transform1_bn', relu=False, trainable=bn_trainable)
)
# ======================第一组模块===========================
(
self.feed('pool1')
.conv(3, 3, 64, 1, 1, name='res1_1_conv1', relu=False, trainable=True)
.batch_normalization(name='res1_1_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 64, 1, 1, name='res1_1_conv2', relu=False, trainable=True)
.batch_normalization(name='res1_1_bn2', relu=False, trainable=bn_trainable)
)
(
self.feed('transform1_bn', 'res1_1_bn2')
.add(name='res1_1_add')
.relu(name='res1_1_relu')
.conv(3, 3, 64, 1, 1, name='res1_2_conv1', relu=False, trainable=True)
.batch_normalization(name='res1_2_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 64, 1, 1, name='res1_2_conv2', relu=False, trainable=True)
.batch_normalization(name='res1_2_bn2', relu=False, trainable=bn_trainable)
)
# ======================计算残差变换形状适应第二组模块=======================
(
self.feed('transform1_bn', 'res1_2_bn2')
.add(name='res1_2_add')
.relu(name='res1_2_relu')
.conv(1, 1, 128, 2, 2, name='transform2_conv', relu=False, trainable=True)
.batch_normalization(name='transform2_bn', relu=False, trainable=bn_trainable)
)
# ======================第二组模块===========================
(
self.feed('res1_2_relu')
.conv(3, 3, 128, 2, 2, name='res2_1_conv1', relu=False, trainable=True)
.batch_normalization(name='res2_1_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 128, 1, 1, name='res2_1_conv2', relu=False, trainable=True)
.batch_normalization(name='res2_1_bn2', relu=False, trainable=bn_trainable)
)
(
self.feed('transform2_bn', 'res2_1_bn2')
.add(name='res2_1_add')
.relu(name='res2_1_relu')
.conv(3, 3, 128, 1, 1, name='res2_2_conv1', relu=False, trainable=True)
.batch_normalization(name='res2_2_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 128, 1, 1, name='res2_2_conv2', relu=False, trainable=True)
.batch_normalization(name='res2_2_bn2', relu=True, trainable=bn_trainable)
)
# ======================计算残差变换形状适应第三组模块=======================
(
self.feed('transform2_bn', 'res2_2_bn2')
.add(name='res2_2_add')
.relu(name='res2_2_relu')
.conv(1, 1, 256, 2, 2, name='transform3_conv', relu=False, trainable=True)
.batch_normalization(name='transform3_bn', relu=False, trainable=bn_trainable)
)
# ======================第三组模块===========================
(
self.feed('res2_2_relu')
.conv(3, 3, 256, 2, 2, name='res3_1_conv1', relu=False, trainable=True)
.batch_normalization(name='res3_1_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 256, 1, 1, name='res3_1_conv2', relu=False, trainable=True)
.batch_normalization(name='res3_1_bn2', relu=True, trainable=bn_trainable)
)
(
self.feed('transform3_bn', 'res3_1_bn2')
.add(name='res3_1_add')
.relu(name='res3_1_relu')
.conv(3, 3, 256, 1, 1, name='res3_2_conv1', relu=False, trainable=True)
.batch_normalization(name='res3_2_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 256, 1, 1, name='res3_2_conv2', relu=False, trainable=True)
.batch_normalization(name='res3_2_bn2', relu=True, trainable=bn_trainable)
)
# ======================计算残差变换形状适应第四组模块=======================
(
self.feed('transform3_bn', 'res3_2_bn2')
.add(name='res3_2_add')
.relu(name='res3_2_relu')
.conv(1, 1, 512, 2, 2, name='transform4_conv', relu=False, trainable=True)
.batch_normalization(name='transform4_bn', relu=False, trainable=bn_trainable)
)
# ======================第四组模块===========================
(
self.feed('res3_2_relu')
.conv(3, 3, 512, 2, 2, name='res4_1_conv1', relu=False, trainable=True)
.batch_normalization(name='res4_1_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 512, 1, 1, name='res4_1_conv2', relu=False, trainable=True)
.batch_normalization(name='res4_1_bn2', relu=True, trainable=bn_trainable)
)
(
self.feed('transform4_bn', 'res4_1_bn2')
.add(name='res4_1_add')
.relu(name='res4_1_relu')
.conv(3, 3, 512, 1, 1, name='res4_2_conv1', relu=False, trainable=True)
.batch_normalization(name='res4_2_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 512, 1, 1, name='res4_2_conv2', relu=False, trainable=True)
.batch_normalization(name='res4_2_bn2', relu=True, trainable=bn_trainable)
)
# ======================计算残差变换结束模块=======================
(
self.feed('transform4_bn', 'res4_2_bn2')
.add(name='res4_2_add')
.relu(name='res4_2_relu')
.conv(1, 1, cls_num * cfg.ZLRM.PSROIPOOL * cfg.ZLRM.PSROIPOOL, 1, 1, name='fcn_cls', trainable=True)
.ps_pool(output_dim=cls_num, group_size=cfg.ZLRM.PSROIPOOL, name='pspooled_cls_rois')
.avg_pool(cfg.ZLRM.PSROIPOOL, cfg.ZLRM.PSROIPOOL, cfg.ZLRM.PSROIPOOL, cfg.ZLRM.PSROIPOOL, name='cls_score')
.softmax(name='cls_prob')
) | [
"17696272096@163.com"
] | 17696272096@163.com |
05c69d8e932f1b7de373e2b187bfd9d583ee9ff4 | ac3093b2b1b37244fbd10f6eee4de22fa50911da | /links/forms.py | cecf01dc0104575fdf01dc26ebe776d62ce94999 | [
"MIT"
] | permissive | moshthepitt/product.co.ke | 7b5e7b18d0fd2673be52455dbfbcbbeecf2b9224 | 41b32c4019f30ce9483b4d84f335450f45f0e1cb | refs/heads/master | 2021-01-14T11:53:06.899243 | 2016-05-12T13:02:22 | 2016-05-12T13:02:22 | 57,992,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | # -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, HTML
from crispy_forms.bootstrap import Field, FormActions
from .models import Link
class LinkForm(forms.ModelForm):
description = forms.CharField(
max_length=750,
help_text=_("A short description. Please limit to 750 cahracters."),
widget=forms.Textarea
)
class Meta:
model = Link
fields = ['title', 'link', 'description']
def __init__(self, *args, **kwargs):
super(LinkForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'link-form'
self.helper.form_method = 'post'
self.helper.layout = Layout(
Field('title'),
Field('link'),
Field('description'),
FormActions(
Submit('submit', _('Save'), css_class='btn-success'),
HTML(
"<a class='btn btn-default' href='{% url \"home\" %}'>Cancel</a>")
)
)
| [
"kelvin@jayanoris.com"
] | kelvin@jayanoris.com |
5175579f392fdd01f9d697c958ad775828bc60e6 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_None/trend_Lag1Trend/cycle_0/ar_/test_artificial_1024_None_Lag1Trend_0__20.py | df4b1542088823a0e850d7fad698d78d1c3a2ab6 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 265 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 0, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
5670329fa6b2b9d2c32ff04c5c850e4875756d9a | cfa2417f07259e512a1bbface4f1f4ccd66502c6 | /test/test_Util/test_ifu_util.py | e96fc21970bd9cf8f6e5e013a2f6847d8ecdd5c7 | [
"BSD-3-Clause"
] | permissive | jiwoncpark/hierArc | 3779439533d3c9c5fe2e687f4bdf737dfc7673e8 | 3f31c0ae7540387fe98f778035d415c3cff38756 | refs/heads/master | 2021-05-18T21:32:45.590675 | 2020-12-23T00:01:01 | 2020-12-23T00:01:01 | 251,431,028 | 0 | 0 | NOASSERTION | 2020-03-30T21:20:08 | 2020-03-30T21:20:08 | null | UTF-8 | Python | false | false | 788 | py | import numpy as np
import numpy.testing as npt
import pytest
from hierarc.Util import ifu_util
class TestIFUUtil(object):
def setup(self):
pass
def test_radial_dispersion(self):
num = 10
dispersion_map = np.zeros((num, num))
weight_map_disp = np.ones((num, num))
velocity_map = np.ones((num, num))
weight_map_v = np.ones((num, num))
r_bins = np.linspace(0, 5, 5)
fiber_scale = 1
flux_map = np.ones((num, num))
disp_r, error_r = ifu_util.binned_total(dispersion_map, weight_map_disp, velocity_map, weight_map_v, flux_map, fiber_scale, r_bins)
assert len(disp_r) == len(r_bins) - 1
npt.assert_almost_equal(disp_r, 1, decimal=6)
if __name__ == '__main__':
pytest.main()
| [
"sibirrer@gmail.com"
] | sibirrer@gmail.com |
9c5cf35ff34488e229c78f5ead98107df7ee0731 | 2352bc07e12b0256913559cf3485a360569ccd5e | /Practice/code_class/Crossin-practices/python_weekly_question/capitalize_words.py | ec65131874c754c9acc1776b29306e73cfbd2694 | [] | no_license | Dis-count/Python_practice | 166ae563be7f6d99a12bdc0e221c550ef37bd4fd | fa0cae54e853157a1d2d78bf90408c68ce617c1a | refs/heads/master | 2022-12-12T03:38:24.091529 | 2021-12-22T09:51:59 | 2021-12-22T09:51:59 | 224,171,833 | 2 | 1 | null | 2022-12-08T05:29:38 | 2019-11-26T11:07:00 | Jupyter Notebook | UTF-8 | Python | false | false | 292 | py | #-*- coding:utf-8 -*-
quote = "How can mirrors be real if our eyes aren't real"
def fuc(string):
'''func - 使某一字符串所有字符都大写'''
list_new_string = [i.capitalize() for i in string.split()]
new_string = ' '.join(c_n_s)
return new_string
print(fuc(quote))
| [
"492193947@qq.com"
] | 492193947@qq.com |
93a7ca5e3dd067ae8ba260b3d7caf5fabfba15e3 | d62e0bf740c8b9ee96dd161d6f1ed2e6a01616fc | /examples/twisted/wamp/pubsub/simple/example1/server.py | f9f14248acac91914594c4f5e85446bc83aa22fc | [
"Python-2.0",
"Apache-2.0"
] | permissive | devbazy/AutobahnPython | 4ff867e84811fb1c43083a139f4184824e1df8d2 | f14ca62fd59e15e078796e88153c26cb2f54a35a | refs/heads/master | 2020-12-31T05:39:39.824777 | 2014-01-19T10:02:17 | 2014-01-19T10:02:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | ###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.python import log
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import listenWS
from autobahn.wamp import WampServerFactory, \
WampServerProtocol
class MyPubSubServerProtocol(WampServerProtocol):
"""
Protocol class for our simple demo WAMP server.
"""
def onSessionOpen(self):
## When the WAMP session to a client has been established,
## register a single fixed URI as PubSub topic that our
## message broker will handle
##
self.registerForPubSub("http://example.com/myEvent1")
if __name__ == '__main__':
log.startLogging(sys.stdout)
## our WAMP/WebSocket server
##
wampFactory = WampServerFactory("ws://localhost:9000", debugWamp = True)
wampFactory.protocol = MyPubSubServerProtocol
listenWS(wampFactory)
## our Web server (for static Web content)
##
webFactory = Site(File("."))
reactor.listenTCP(8080, webFactory)
## run the Twisted network reactor
##
reactor.run()
| [
"tobias.oberstein@tavendo.de"
] | tobias.oberstein@tavendo.de |
a1103dcb4e7eb7b56f15ff380ba3acb5cd6f83bb | 521c1beeb2776161ae6d550be35cd0c180887129 | /elvis/utils/xmlparse.py | 2b88d726b7ac85815abba0ae6279ea0aa4354d38 | [] | no_license | elvis2workspace/CustomLibrary | 601b552792ac2c33beeb709474f857c82793ac7e | 6449eea8aa99ca1172f54b669d97703d36132ce3 | refs/heads/master | 2021-01-23T21:33:05.617871 | 2017-09-26T01:57:48 | 2017-09-26T01:57:48 | 58,983,388 | 0 | 1 | null | 2016-12-06T09:56:14 | 2016-05-17T02:22:14 | Python | UTF-8 | Python | false | false | 1,199 | py | #!/usr/bin/env python
#coding=utf-8
'''
Created on 2016年1月27日
@author: zhang.xiuhai
'''
import re
import urllib
local = 'D:\\testdir\\'
def getHtml(url):
page = urllib.urlopen(url)#创建一个表示远程url的类文件对象,然后像本地文件一样操作这个类文件对象来获取远程数据。
html = page.read()
return html
def getImg(html):
reg = r'res="(.+?\.jpg)"'
imgre = re.compile(reg)
imglist = re.findall(imgre, html)
x = 0
for imgurl in imglist:
urllib.urlretrieve(imgurl, local+'%s.jpg' % x, callbackfunc)#将远程数据下载在本地当前目录,命名规则(回掉函数显示进度)
x+=1
#return imglist
def callbackfunc(blocknum, blocksize, totalsize):
'''回调函数
@blocknum:已经下载的数据块
@blocksize:数据块的大小
@totalsize:远程文件的大小
'''
percent = 100.0*blocknum*blocksize/totalsize
if percent > 100:
percent = 100
print "%.2f%%"% percent
if __name__ == '__main__':
html = getHtml("http://image.baidu.com/")
print html
# for item in getImg(html):
# print item
print getImg(html) | [
"xiuhai5052@hotmail.com"
] | xiuhai5052@hotmail.com |
09f2f4cbd1def8e671fdebdfccf32af887d19f4a | eb6f6e4fd2b358805d8b41c883a27c80067f748c | /chapter8/material.py | c8635923273822c0e8dd71b349921c2a7273c105 | [] | no_license | candyer/Ray-tracing | a4b5637ccb63eec0bddf5533abf54c6d0b164c57 | d121380f9f35ad4ad596bec1d58b4021ba022f58 | refs/heads/master | 2021-07-13T03:04:37.938321 | 2017-10-18T18:41:20 | 2017-10-18T18:41:20 | 105,039,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py |
from abc import ABCMeta, abstractmethod
class Material:
__metaclass__ = ABCMeta
@abstractmethod
def scatter(self, ray_in, rec, attenuation, scattered):
pass
def reflect(self, v, n):
return v - n * (v.dot(n) * 2)
| [
"candyer@users.noreply.github.com"
] | candyer@users.noreply.github.com |
158e10f3fc5f523e30714eeeb904b4f920555b5a | 25bb4e760769cc483a20f27b6312698891dce034 | /python/Sets/py-set-difference-operation-English.py | c13712f5d892bff5d1b28e0d29f3bfb1088ffb44 | [] | no_license | rangaeeeee/codes-hackerrank | e13d22adff1ef74974e34251d9bfac6cfd36f2b0 | ce7fdf7f336c10164fd2f779d4ed3713849d7c2b | refs/heads/master | 2021-01-19T17:07:28.451983 | 2017-09-01T18:05:33 | 2017-09-01T18:05:33 | 101,049,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | n = int(input())
nSet = set(map(int,input().split()))
b = int(input())
bSet = set(map(int,input().split()))
print(len(nSet.difference(bSet))) | [
"rrangarajan.85@gmail.com"
] | rrangarajan.85@gmail.com |
5d532060d925ffa94c8657a4ad75368b5d37e3ac | 1b3ed8b5b474e8346cf19279e3cec33ea5dc9c94 | /quick_start2/new_task.py | b8d4c3e9a85c91feeb7f6ed040d23c5d50a61f90 | [] | no_license | valerydmitrieva/rabbitmq_quick-start | 4b1d94b1fbe521b80adfd17dfdaf8afb7935548a | 8d9e46b96379b0957d9b85097d028d9162d1595e | refs/heads/master | 2020-04-19T04:46:45.405471 | 2019-01-28T13:57:45 | 2019-01-28T13:57:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | import sys
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
message = ' '.join(sys.argv[1:]) or "Hello World!"
channel.basic_publish(exchange='',
routing_key='task_queue',
body=message,
properties=pika.BasicProperties(delivery_mode=2,))
print("[x] Sent %r" % (message,))
connection.close() | [
"test@example.com"
] | test@example.com |
6ceda38c5b9c5ecefdcce21508c0ceb213a35c2e | 517600ba1ab1e7781b26df1439227f4569746d5a | /blog/migrations/0010_auto_20200521_2035.py | 76f4c00cec504ac5b01278e71bcabf23473394ae | [] | no_license | Smartybrainy/myworldgist | 885d86c838881ace6bced2492a46169dbd33b19d | 592f404c1047eccbbc8dad4b83032ffafb8d797a | refs/heads/master | 2023-01-21T19:29:47.208445 | 2020-12-05T00:52:10 | 2020-12-05T00:52:10 | 315,742,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | # Generated by Django 3.0.5 on 2020-05-21 19:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_auto_20200520_1119'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='added_date',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"smartyleey@gmail.com"
] | smartyleey@gmail.com |
588a1a3d27e75aaae0ccf3b96f5c76030132b90f | e3887534dffc1b5d1d97d4a6a030b22d46ae3a5c | /shah_entp_erpnext/config/docs.py | e89464f41a4e1144fe21c69dd17ae27bad91116f | [
"MIT"
] | permissive | anandpdoshi/shah_entp_erpnext | 5da4ba98a6e668b5ec970e0c361e081fa14ebdf8 | 7c19c629188b8f1e3449fd6f3a5a0ee371d3158c | refs/heads/master | 2016-08-12T05:24:51.194469 | 2016-04-24T17:46:57 | 2016-04-24T17:46:57 | 55,857,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/shah_entp_erpnext"
# docs_base_url = "https://[org_name].github.io/shah_entp_erpnext"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Shah Enterprises ERPNext Extension"
| [
"anand@erpnext.com"
] | anand@erpnext.com |
ceb1b4e0137321f1a236b1f2b9cd803efd2087b6 | 77fa2374a6d119a5a1890857ff16fc9f15004882 | /clishdeonly_15466/settings.py | 4a31aac7d924862912ff89cc47a756534ff99607 | [] | no_license | crowdbotics-apps/clishdeonly-15466 | b8ec985c6d819a3296c1763c0f38efacca2f6c7c | 5d136cbfab7f37755d2eae2ac0e54c9fcb912623 | refs/heads/master | 2022-12-10T12:37:08.378715 | 2020-04-05T16:16:50 | 2020-04-05T16:16:50 | 253,275,964 | 0 | 0 | null | 2022-12-08T09:29:33 | 2020-04-05T16:09:50 | Python | UTF-8 | Python | false | false | 5,487 | py | """
Django settings for clishdeonly_15466 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "clishdeonly_15466.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "clishdeonly_15466.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
fa9a1379d28d85b2fc26b90fae36448da8f77891 | 18f8abb90efece37949f5b5758c7752b1602fb12 | /py/django_tools/django-haystack/haystack/management/commands/clear_index.py | d99fb5bb36e64bb0017632e707949d2afb342da0 | [
"BSD-3-Clause",
"MIT"
] | permissive | marceltoben/evandrix.github.com | caa7d4c2ef84ba8c5a9a6ace2126e8fd6db1a516 | abc3fbfb34f791f84e9a9d4dc522966421778ab2 | refs/heads/master | 2021-08-02T06:18:12.953567 | 2011-08-23T16:49:33 | 2011-08-23T16:49:33 | 2,267,457 | 3 | 5 | null | 2021-07-28T11:39:25 | 2011-08-25T11:18:56 | C | UTF-8 | Python | false | false | 1,778 | py | from optparse import make_option
import sys
from django.core.management.base import BaseCommand
from haystack.constants import DEFAULT_ALIAS
class Command(BaseCommand):
help = "Clears out the search index completely."
base_options = (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='If provided, no prompts will be issued to the user and the data will be wiped out.'
),
make_option("-u", "--using", action="store", type="string", dest="using", default=DEFAULT_ALIAS,
help='If provided, chooses a connection to work with.'
),
)
option_list = BaseCommand.option_list + base_options
def handle(self, **options):
"""Clears out the search index completely."""
from haystack import connections
self.verbosity = int(options.get('verbosity', 1))
self.using = options.get('using')
if options.get('interactive', True):
print
print "WARNING: This will irreparably remove EVERYTHING from your search index in connection '%s'." % self.using
print "Your choices after this are to restore from backups or rebuild via the `rebuild_index` command."
yes_or_no = raw_input("Are you sure you wish to continue? [y/N] ")
print
if not yes_or_no.lower().startswith('y'):
print "No action taken."
sys.exit()
if self.verbosity >= 1:
print "Removing all documents from your index because you said so."
backend = connections[self.using].get_backend()
backend.clear()
if self.verbosity >= 1:
print "All documents removed."
| [
"evandrix@gmail.com"
] | evandrix@gmail.com |
853bbc1268691dba67496264c74d9d41b009b1e5 | a84bc49dba142c4047a23d85917f411f42e196aa | /doc/src/web4sa/src-web4sa/apps/flask_apps/vib5/generate.py | 7abac724856713c9b47a14879f7a847d3b93ceb1 | [] | no_license | TZer0/web4sciapps | 40297ec4efd7bcad58514bfd891b6090eff7ff1f | cdcda068bbb44929a48d221410c635fa4d73da9c | refs/heads/master | 2021-01-22T08:59:27.682367 | 2014-09-23T11:45:05 | 2014-09-23T11:45:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | from parampool.generator.flask import generate
from compute import compute_gamma
generate(compute_gamma, default_field='FloatField', enable_login=True)
| [
"hpl@simula.no"
] | hpl@simula.no |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.